1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/completion.h> 5 #include <linux/circ_buf.h> 6 #include <linux/list.h> 7 8 #include <soc/qcom/cmd-db.h> 9 10 #include "a6xx_gmu.h" 11 #include "a6xx_gmu.xml.h" 12 #include "a6xx_gpu.h" 13 14 #define HFI_MSG_ID(val) [val] = #val 15 16 static const char * const a6xx_hfi_msg_id[] = { 17 HFI_MSG_ID(HFI_H2F_MSG_INIT), 18 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), 19 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), 20 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), 21 HFI_MSG_ID(HFI_H2F_MSG_TEST), 22 HFI_MSG_ID(HFI_H2F_MSG_START), 23 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), 24 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), 25 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), 26 }; 27 28 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, 29 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 30 { 31 struct a6xx_hfi_queue_header *header = queue->header; 32 u32 i, hdr, index = header->read_index; 33 34 if (header->read_index == header->write_index) { 35 header->rx_request = 1; 36 return 0; 37 } 38 39 hdr = queue->data[index]; 40 41 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; 42 43 /* 44 * If we are to assume that the GMU firmware is in fact a rational actor 45 * and is programmed to not send us a larger response than we expect 46 * then we can also assume that if the header size is unexpectedly large 47 * that it is due to memory corruption and/or hardware failure. In this 48 * case the only reasonable course of action is to BUG() to help harden 49 * the failure. 50 */ 51 52 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); 53 54 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { 55 data[i] = queue->data[index]; 56 index = (index + 1) % header->size; 57 } 58 59 if (!gmu->legacy) 60 index = ALIGN(index, 4) % header->size; 61 62 header->read_index = index; 63 return HFI_HEADER_SIZE(hdr); 64 } 65 66 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, 67 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 68 { 69 struct a6xx_hfi_queue_header *header = queue->header; 70 u32 i, space, index = header->write_index; 71 72 spin_lock(&queue->lock); 73 74 space = CIRC_SPACE(header->write_index, header->read_index, 75 header->size); 76 if (space < dwords) { 77 header->dropped++; 78 spin_unlock(&queue->lock); 79 return -ENOSPC; 80 } 81 82 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; 83 84 for (i = 0; i < dwords; i++) { 85 queue->data[index] = data[i]; 86 index = (index + 1) % header->size; 87 } 88 89 /* Cookify any non used data at the end of the write buffer */ 90 if (!gmu->legacy) { 91 for (; index % 4; index = (index + 1) % header->size) 92 queue->data[index] = 0xfafafafa; 93 } 94 95 header->write_index = index; 96 spin_unlock(&queue->lock); 97 98 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); 99 return 0; 100 } 101 102 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, 103 u32 *payload, u32 payload_size) 104 { 105 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; 106 u32 val; 107 int ret; 108 109 /* Wait for a response */ 110 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 111 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); 112 113 if (ret) { 114 DRM_DEV_ERROR(gmu->dev, 115 "Message %s id %d timed out waiting for response\n", 116 a6xx_hfi_msg_id[id], seqnum); 117 return -ETIMEDOUT; 118 } 119 120 /* Clear the interrupt */ 121 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 122 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); 123 124 for (;;) { 125 struct a6xx_hfi_msg_response resp; 126 127 /* Get the next packet */ 128 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, 129 sizeof(resp) >> 2); 130 131 /* If the queue is empty our response never made it */ 132 if (!ret) { 133 DRM_DEV_ERROR(gmu->dev, 134 "The HFI response queue is unexpectedly empty\n"); 135 136 return -ENOENT; 137 } 138 139 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { 140 struct a6xx_hfi_msg_error *error = 141 (struct a6xx_hfi_msg_error *) &resp; 142 143 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", 144 error->code); 145 continue; 146 } 147 148 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { 149 DRM_DEV_ERROR(gmu->dev, 150 "Unexpected message id %d on the response queue\n", 151 HFI_HEADER_SEQNUM(resp.ret_header)); 152 continue; 153 } 154 155 if (resp.error) { 156 DRM_DEV_ERROR(gmu->dev, 157 "Message %s id %d returned error %d\n", 158 a6xx_hfi_msg_id[id], seqnum, resp.error); 159 return -EINVAL; 160 } 161 162 /* All is well, copy over the buffer */ 163 if (payload && payload_size) 164 memcpy(payload, resp.payload, 165 min_t(u32, payload_size, sizeof(resp.payload))); 166 167 return 0; 168 } 169 } 170 171 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, 172 void *data, u32 size, u32 *payload, u32 payload_size) 173 { 174 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; 175 int ret, dwords = size >> 2; 176 u32 seqnum; 177 178 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; 179 180 /* First dword of the message is the message header - fill it in */ 181 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | 182 (dwords << 8) | id; 183 184 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); 185 if (ret) { 186 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", 187 a6xx_hfi_msg_id[id], seqnum); 188 return ret; 189 } 190 191 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); 192 } 193 194 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) 195 { 196 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; 197 198 msg.dbg_buffer_addr = (u32) gmu->debug.iova; 199 msg.dbg_buffer_size = (u32) gmu->debug.size; 200 msg.boot_state = boot_state; 201 202 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), 203 NULL, 0); 204 } 205 206 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) 207 { 208 struct a6xx_hfi_msg_fw_version msg = { 0 }; 209 210 /* Currently supporting version 1.10 */ 211 msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17); 212 213 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), 214 version, sizeof(*version)); 215 } 216 217 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) 218 { 219 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 }; 220 int i; 221 222 msg.num_gpu_levels = gmu->nr_gpu_freqs; 223 msg.num_gmu_levels = gmu->nr_gmu_freqs; 224 225 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 226 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 227 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 228 } 229 230 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 231 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 232 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 233 } 234 235 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 236 NULL, 0); 237 } 238 239 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) 240 { 241 struct a6xx_hfi_msg_perf_table msg = { 0 }; 242 int i; 243 244 msg.num_gpu_levels = gmu->nr_gpu_freqs; 245 msg.num_gmu_levels = gmu->nr_gmu_freqs; 246 247 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 248 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 249 msg.gx_votes[i].acd = 0xffffffff; 250 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 251 } 252 253 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 254 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 255 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 256 } 257 258 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 259 NULL, 0); 260 } 261 262 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 263 { 264 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ 265 msg->bw_level_num = 1; 266 267 msg->ddr_cmds_num = 3; 268 msg->ddr_wait_bitmask = 0x01; 269 270 msg->ddr_cmds_addrs[0] = 0x50000; 271 msg->ddr_cmds_addrs[1] = 0x5003c; 272 msg->ddr_cmds_addrs[2] = 0x5000c; 273 274 msg->ddr_cmds_data[0][0] = 0x40000000; 275 msg->ddr_cmds_data[0][1] = 0x40000000; 276 msg->ddr_cmds_data[0][2] = 0x40000000; 277 278 /* 279 * These are the CX (CNOC) votes - these are used by the GMU but the 280 * votes are known and fixed for the target 281 */ 282 msg->cnoc_cmds_num = 1; 283 msg->cnoc_wait_bitmask = 0x01; 284 285 msg->cnoc_cmds_addrs[0] = 0x5007c; 286 msg->cnoc_cmds_data[0][0] = 0x40000000; 287 msg->cnoc_cmds_data[1][0] = 0x60000001; 288 } 289 290 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 291 { 292 msg->bw_level_num = 13; 293 294 msg->ddr_cmds_num = 3; 295 msg->ddr_wait_bitmask = 0x0; 296 297 msg->ddr_cmds_addrs[0] = 0x50000; 298 msg->ddr_cmds_addrs[1] = 0x50004; 299 msg->ddr_cmds_addrs[2] = 0x50080; 300 301 msg->ddr_cmds_data[0][0] = 0x40000000; 302 msg->ddr_cmds_data[0][1] = 0x40000000; 303 msg->ddr_cmds_data[0][2] = 0x40000000; 304 msg->ddr_cmds_data[1][0] = 0x6000030c; 305 msg->ddr_cmds_data[1][1] = 0x600000db; 306 msg->ddr_cmds_data[1][2] = 0x60000008; 307 msg->ddr_cmds_data[2][0] = 0x60000618; 308 msg->ddr_cmds_data[2][1] = 0x600001b6; 309 msg->ddr_cmds_data[2][2] = 0x60000008; 310 msg->ddr_cmds_data[3][0] = 0x60000925; 311 msg->ddr_cmds_data[3][1] = 0x60000291; 312 msg->ddr_cmds_data[3][2] = 0x60000008; 313 msg->ddr_cmds_data[4][0] = 0x60000dc1; 314 msg->ddr_cmds_data[4][1] = 0x600003dc; 315 msg->ddr_cmds_data[4][2] = 0x60000008; 316 msg->ddr_cmds_data[5][0] = 0x600010ad; 317 msg->ddr_cmds_data[5][1] = 0x600004ae; 318 msg->ddr_cmds_data[5][2] = 0x60000008; 319 msg->ddr_cmds_data[6][0] = 0x600014c3; 320 msg->ddr_cmds_data[6][1] = 0x600005d4; 321 msg->ddr_cmds_data[6][2] = 0x60000008; 322 msg->ddr_cmds_data[7][0] = 0x6000176a; 323 msg->ddr_cmds_data[7][1] = 0x60000693; 324 msg->ddr_cmds_data[7][2] = 0x60000008; 325 msg->ddr_cmds_data[8][0] = 0x60001f01; 326 msg->ddr_cmds_data[8][1] = 0x600008b5; 327 msg->ddr_cmds_data[8][2] = 0x60000008; 328 msg->ddr_cmds_data[9][0] = 0x60002940; 329 msg->ddr_cmds_data[9][1] = 0x60000b95; 330 msg->ddr_cmds_data[9][2] = 0x60000008; 331 msg->ddr_cmds_data[10][0] = 0x60002f68; 332 msg->ddr_cmds_data[10][1] = 0x60000d50; 333 msg->ddr_cmds_data[10][2] = 0x60000008; 334 msg->ddr_cmds_data[11][0] = 0x60003700; 335 msg->ddr_cmds_data[11][1] = 0x60000f71; 336 msg->ddr_cmds_data[11][2] = 0x60000008; 337 msg->ddr_cmds_data[12][0] = 0x60003fce; 338 msg->ddr_cmds_data[12][1] = 0x600011ea; 339 msg->ddr_cmds_data[12][2] = 0x60000008; 340 341 msg->cnoc_cmds_num = 1; 342 msg->cnoc_wait_bitmask = 0x0; 343 344 msg->cnoc_cmds_addrs[0] = 0x50054; 345 346 msg->cnoc_cmds_data[0][0] = 0x40000000; 347 } 348 349 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 350 { 351 /* 352 * Send a single "off" entry just to get things running 353 * TODO: bus scaling 354 */ 355 msg->bw_level_num = 1; 356 357 msg->ddr_cmds_num = 3; 358 msg->ddr_wait_bitmask = 0x01; 359 360 msg->ddr_cmds_addrs[0] = 0x50000; 361 msg->ddr_cmds_addrs[1] = 0x5003c; 362 msg->ddr_cmds_addrs[2] = 0x5000c; 363 364 msg->ddr_cmds_data[0][0] = 0x40000000; 365 msg->ddr_cmds_data[0][1] = 0x40000000; 366 msg->ddr_cmds_data[0][2] = 0x40000000; 367 368 /* 369 * These are the CX (CNOC) votes - these are used by the GMU but the 370 * votes are known and fixed for the target 371 */ 372 msg->cnoc_cmds_num = 3; 373 msg->cnoc_wait_bitmask = 0x01; 374 375 msg->cnoc_cmds_addrs[0] = 0x50034; 376 msg->cnoc_cmds_addrs[1] = 0x5007c; 377 msg->cnoc_cmds_addrs[2] = 0x5004c; 378 379 msg->cnoc_cmds_data[0][0] = 0x40000000; 380 msg->cnoc_cmds_data[0][1] = 0x00000000; 381 msg->cnoc_cmds_data[0][2] = 0x40000000; 382 383 msg->cnoc_cmds_data[1][0] = 0x60000001; 384 msg->cnoc_cmds_data[1][1] = 0x20000001; 385 msg->cnoc_cmds_data[1][2] = 0x60000001; 386 } 387 388 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 389 { 390 /* 391 * Send a single "off" entry just to get things running 392 * TODO: bus scaling 393 */ 394 msg->bw_level_num = 1; 395 396 msg->ddr_cmds_num = 3; 397 msg->ddr_wait_bitmask = 0x01; 398 399 msg->ddr_cmds_addrs[0] = 0x50000; 400 msg->ddr_cmds_addrs[1] = 0x50004; 401 msg->ddr_cmds_addrs[2] = 0x5007c; 402 403 msg->ddr_cmds_data[0][0] = 0x40000000; 404 msg->ddr_cmds_data[0][1] = 0x40000000; 405 msg->ddr_cmds_data[0][2] = 0x40000000; 406 407 /* 408 * These are the CX (CNOC) votes - these are used by the GMU but the 409 * votes are known and fixed for the target 410 */ 411 msg->cnoc_cmds_num = 1; 412 msg->cnoc_wait_bitmask = 0x01; 413 414 msg->cnoc_cmds_addrs[0] = 0x500a4; 415 msg->cnoc_cmds_data[0][0] = 0x40000000; 416 msg->cnoc_cmds_data[1][0] = 0x60000001; 417 } 418 419 static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 420 { 421 /* 422 * Send a single "off" entry just to get things running 423 * TODO: bus scaling 424 */ 425 msg->bw_level_num = 1; 426 427 msg->ddr_cmds_num = 3; 428 msg->ddr_wait_bitmask = 0x01; 429 430 msg->ddr_cmds_addrs[0] = 0x50004; 431 msg->ddr_cmds_addrs[1] = 0x50000; 432 msg->ddr_cmds_addrs[2] = 0x500ac; 433 434 msg->ddr_cmds_data[0][0] = 0x40000000; 435 msg->ddr_cmds_data[0][1] = 0x40000000; 436 msg->ddr_cmds_data[0][2] = 0x40000000; 437 438 /* 439 * These are the CX (CNOC) votes - these are used by the GMU but the 440 * votes are known and fixed for the target 441 */ 442 msg->cnoc_cmds_num = 1; 443 msg->cnoc_wait_bitmask = 0x01; 444 445 msg->cnoc_cmds_addrs[0] = 0x5003c; 446 msg->cnoc_cmds_data[0][0] = 0x40000000; 447 msg->cnoc_cmds_data[1][0] = 0x60000001; 448 } 449 450 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 451 { 452 /* 453 * Send a single "off" entry just to get things running 454 * TODO: bus scaling 455 */ 456 msg->bw_level_num = 1; 457 458 msg->ddr_cmds_num = 3; 459 msg->ddr_wait_bitmask = 0x01; 460 461 msg->ddr_cmds_addrs[0] = 0x50004; 462 msg->ddr_cmds_addrs[1] = 0x500a0; 463 msg->ddr_cmds_addrs[2] = 0x50000; 464 465 msg->ddr_cmds_data[0][0] = 0x40000000; 466 msg->ddr_cmds_data[0][1] = 0x40000000; 467 msg->ddr_cmds_data[0][2] = 0x40000000; 468 469 /* 470 * These are the CX (CNOC) votes - these are used by the GMU but the 471 * votes are known and fixed for the target 472 */ 473 msg->cnoc_cmds_num = 1; 474 msg->cnoc_wait_bitmask = 0x01; 475 476 msg->cnoc_cmds_addrs[0] = 0x50070; 477 msg->cnoc_cmds_data[0][0] = 0x40000000; 478 msg->cnoc_cmds_data[1][0] = 0x60000001; 479 } 480 481 static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 482 { 483 /* 484 * Send a single "off" entry just to get things running 485 * TODO: bus scaling 486 */ 487 msg->bw_level_num = 1; 488 489 msg->ddr_cmds_num = 3; 490 msg->ddr_wait_bitmask = 0x07; 491 492 msg->ddr_cmds_addrs[0] = 0x50004; 493 msg->ddr_cmds_addrs[1] = 0x50000; 494 msg->ddr_cmds_addrs[2] = 0x500b4; 495 496 msg->ddr_cmds_data[0][0] = 0x40000000; 497 msg->ddr_cmds_data[0][1] = 0x40000000; 498 msg->ddr_cmds_data[0][2] = 0x40000000; 499 500 /* 501 * These are the CX (CNOC) votes - these are used by the GMU but the 502 * votes are known and fixed for the target 503 */ 504 msg->cnoc_cmds_num = 1; 505 msg->cnoc_wait_bitmask = 0x01; 506 507 msg->cnoc_cmds_addrs[0] = 0x50058; 508 msg->cnoc_cmds_data[0][0] = 0x40000000; 509 msg->cnoc_cmds_data[1][0] = 0x60000001; 510 } 511 512 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 513 { 514 /* 515 * Send a single "off" entry just to get things running 516 * TODO: bus scaling 517 */ 518 msg->bw_level_num = 1; 519 520 msg->ddr_cmds_num = 3; 521 msg->ddr_wait_bitmask = 0x07; 522 523 msg->ddr_cmds_addrs[0] = 0x50004; 524 msg->ddr_cmds_addrs[1] = 0x50000; 525 msg->ddr_cmds_addrs[2] = 0x50088; 526 527 msg->ddr_cmds_data[0][0] = 0x40000000; 528 msg->ddr_cmds_data[0][1] = 0x40000000; 529 msg->ddr_cmds_data[0][2] = 0x40000000; 530 531 /* 532 * These are the CX (CNOC) votes - these are used by the GMU but the 533 * votes are known and fixed for the target 534 */ 535 msg->cnoc_cmds_num = 1; 536 msg->cnoc_wait_bitmask = 0x01; 537 538 msg->cnoc_cmds_addrs[0] = 0x5006c; 539 msg->cnoc_cmds_data[0][0] = 0x40000000; 540 msg->cnoc_cmds_data[1][0] = 0x60000001; 541 } 542 543 static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 544 { 545 msg->bw_level_num = 12; 546 547 msg->ddr_cmds_num = 3; 548 msg->ddr_wait_bitmask = 0x7; 549 550 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0"); 551 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0"); 552 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV"); 553 554 msg->ddr_cmds_data[0][0] = 0x40000000; 555 msg->ddr_cmds_data[0][1] = 0x40000000; 556 msg->ddr_cmds_data[0][2] = 0x40000000; 557 msg->ddr_cmds_data[1][0] = 0x600002e8; 558 msg->ddr_cmds_data[1][1] = 0x600003d0; 559 msg->ddr_cmds_data[1][2] = 0x60000008; 560 msg->ddr_cmds_data[2][0] = 0x6000068d; 561 msg->ddr_cmds_data[2][1] = 0x6000089a; 562 msg->ddr_cmds_data[2][2] = 0x60000008; 563 msg->ddr_cmds_data[3][0] = 0x600007f2; 564 msg->ddr_cmds_data[3][1] = 0x60000a6e; 565 msg->ddr_cmds_data[3][2] = 0x60000008; 566 msg->ddr_cmds_data[4][0] = 0x600009e5; 567 msg->ddr_cmds_data[4][1] = 0x60000cfd; 568 msg->ddr_cmds_data[4][2] = 0x60000008; 569 msg->ddr_cmds_data[5][0] = 0x60000b29; 570 msg->ddr_cmds_data[5][1] = 0x60000ea6; 571 msg->ddr_cmds_data[5][2] = 0x60000008; 572 msg->ddr_cmds_data[6][0] = 0x60001698; 573 msg->ddr_cmds_data[6][1] = 0x60001da8; 574 msg->ddr_cmds_data[6][2] = 0x60000008; 575 msg->ddr_cmds_data[7][0] = 0x600018d2; 576 msg->ddr_cmds_data[7][1] = 0x60002093; 577 msg->ddr_cmds_data[7][2] = 0x60000008; 578 msg->ddr_cmds_data[8][0] = 0x60001e66; 579 msg->ddr_cmds_data[8][1] = 0x600027e6; 580 msg->ddr_cmds_data[8][2] = 0x60000008; 581 msg->ddr_cmds_data[9][0] = 0x600027c2; 582 msg->ddr_cmds_data[9][1] = 0x6000342f; 583 msg->ddr_cmds_data[9][2] = 0x60000008; 584 msg->ddr_cmds_data[10][0] = 0x60002e71; 585 msg->ddr_cmds_data[10][1] = 0x60003cf5; 586 msg->ddr_cmds_data[10][2] = 0x60000008; 587 msg->ddr_cmds_data[11][0] = 0x600030ae; 588 msg->ddr_cmds_data[11][1] = 0x60003fe5; 589 msg->ddr_cmds_data[11][2] = 0x60000008; 590 591 msg->cnoc_cmds_num = 1; 592 msg->cnoc_wait_bitmask = 0x1; 593 594 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0"); 595 msg->cnoc_cmds_data[0][0] = 0x40000000; 596 msg->cnoc_cmds_data[1][0] = 0x60000001; 597 } 598 599 static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 600 { 601 msg->bw_level_num = 1; 602 603 msg->ddr_cmds_num = 3; 604 msg->ddr_wait_bitmask = 0x7; 605 606 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0"); 607 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0"); 608 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV"); 609 610 msg->ddr_cmds_data[0][0] = 0x40000000; 611 msg->ddr_cmds_data[0][1] = 0x40000000; 612 msg->ddr_cmds_data[0][2] = 0x40000000; 613 614 /* TODO: add a proper dvfs table */ 615 616 msg->cnoc_cmds_num = 1; 617 msg->cnoc_wait_bitmask = 0x1; 618 619 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0"); 620 msg->cnoc_cmds_data[0][0] = 0x40000000; 621 msg->cnoc_cmds_data[1][0] = 0x60000001; 622 } 623 624 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 625 { 626 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ 627 msg->bw_level_num = 1; 628 629 msg->ddr_cmds_num = 3; 630 msg->ddr_wait_bitmask = 0x07; 631 632 msg->ddr_cmds_addrs[0] = 0x50000; 633 msg->ddr_cmds_addrs[1] = 0x5005c; 634 msg->ddr_cmds_addrs[2] = 0x5000c; 635 636 msg->ddr_cmds_data[0][0] = 0x40000000; 637 msg->ddr_cmds_data[0][1] = 0x40000000; 638 msg->ddr_cmds_data[0][2] = 0x40000000; 639 640 /* 641 * These are the CX (CNOC) votes. This is used but the values for the 642 * sdm845 GMU are known and fixed so we can hard code them. 643 */ 644 645 msg->cnoc_cmds_num = 3; 646 msg->cnoc_wait_bitmask = 0x05; 647 648 msg->cnoc_cmds_addrs[0] = 0x50034; 649 msg->cnoc_cmds_addrs[1] = 0x5007c; 650 msg->cnoc_cmds_addrs[2] = 0x5004c; 651 652 msg->cnoc_cmds_data[0][0] = 0x40000000; 653 msg->cnoc_cmds_data[0][1] = 0x00000000; 654 msg->cnoc_cmds_data[0][2] = 0x40000000; 655 656 msg->cnoc_cmds_data[1][0] = 0x60000001; 657 msg->cnoc_cmds_data[1][1] = 0x20000001; 658 msg->cnoc_cmds_data[1][2] = 0x60000001; 659 } 660 661 662 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 663 { 664 struct a6xx_hfi_msg_bw_table *msg; 665 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 666 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 667 668 if (gmu->bw_table) 669 goto send; 670 671 msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL); 672 if (!msg) 673 return -ENOMEM; 674 675 if (adreno_is_a618(adreno_gpu)) 676 a618_build_bw_table(msg); 677 else if (adreno_is_a619(adreno_gpu)) 678 a619_build_bw_table(msg); 679 else if (adreno_is_a640_family(adreno_gpu)) 680 a640_build_bw_table(msg); 681 else if (adreno_is_a650(adreno_gpu)) 682 a650_build_bw_table(msg); 683 else if (adreno_is_7c3(adreno_gpu)) 684 adreno_7c3_build_bw_table(msg); 685 else if (adreno_is_a660(adreno_gpu)) 686 a660_build_bw_table(msg); 687 else if (adreno_is_a663(adreno_gpu)) 688 a663_build_bw_table(msg); 689 else if (adreno_is_a690(adreno_gpu)) 690 a690_build_bw_table(msg); 691 else if (adreno_is_a730(adreno_gpu)) 692 a730_build_bw_table(msg); 693 else if (adreno_is_a740_family(adreno_gpu)) 694 a740_build_bw_table(msg); 695 else 696 a6xx_build_bw_table(msg); 697 698 gmu->bw_table = msg; 699 700 send: 701 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)), 702 NULL, 0); 703 } 704 705 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) 706 { 707 struct a6xx_hfi_msg_test msg = { 0 }; 708 709 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), 710 NULL, 0); 711 } 712 713 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) 714 { 715 struct a6xx_hfi_msg_start msg = { 0 }; 716 717 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), 718 NULL, 0); 719 } 720 721 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) 722 { 723 struct a6xx_hfi_msg_core_fw_start msg = { 0 }; 724 725 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, 726 sizeof(msg), NULL, 0); 727 } 728 729 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) 730 { 731 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 }; 732 733 msg.ack_type = 1; /* blocking */ 734 msg.freq = index; 735 msg.bw = 0; /* TODO: bus scaling */ 736 737 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, 738 sizeof(msg), NULL, 0); 739 } 740 741 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) 742 { 743 struct a6xx_hfi_prep_slumber_cmd msg = { 0 }; 744 745 /* TODO: should freq and bw fields be non-zero ? */ 746 747 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, 748 sizeof(msg), NULL, 0); 749 } 750 751 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) 752 { 753 int ret; 754 755 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); 756 if (ret) 757 return ret; 758 759 ret = a6xx_hfi_get_fw_version(gmu, NULL); 760 if (ret) 761 return ret; 762 763 /* 764 * We have to get exchange version numbers per the sequence but at this 765 * point th kernel driver doesn't need to know the exact version of 766 * the GMU firmware 767 */ 768 769 ret = a6xx_hfi_send_perf_table_v1(gmu); 770 if (ret) 771 return ret; 772 773 ret = a6xx_hfi_send_bw_table(gmu); 774 if (ret) 775 return ret; 776 777 /* 778 * Let the GMU know that there won't be any more HFI messages until next 779 * boot 780 */ 781 a6xx_hfi_send_test(gmu); 782 783 return 0; 784 } 785 786 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) 787 { 788 int ret; 789 790 if (gmu->legacy) 791 return a6xx_hfi_start_v1(gmu, boot_state); 792 793 794 ret = a6xx_hfi_send_perf_table(gmu); 795 if (ret) 796 return ret; 797 798 ret = a6xx_hfi_send_bw_table(gmu); 799 if (ret) 800 return ret; 801 802 ret = a6xx_hfi_send_core_fw_start(gmu); 803 if (ret) 804 return ret; 805 806 /* 807 * Downstream driver sends this in its "a6xx_hw_init" equivalent, 808 * but seems to be no harm in sending it here 809 */ 810 ret = a6xx_hfi_send_start(gmu); 811 if (ret) 812 return ret; 813 814 return 0; 815 } 816 817 void a6xx_hfi_stop(struct a6xx_gmu *gmu) 818 { 819 int i; 820 821 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { 822 struct a6xx_hfi_queue *queue = &gmu->queues[i]; 823 824 if (!queue->header) 825 continue; 826 827 if (queue->header->read_index != queue->header->write_index) 828 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); 829 830 queue->header->read_index = 0; 831 queue->header->write_index = 0; 832 833 memset(&queue->history, 0xff, sizeof(queue->history)); 834 queue->history_idx = 0; 835 } 836 } 837 838 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, 839 struct a6xx_hfi_queue_header *header, void *virt, u64 iova, 840 u32 id) 841 { 842 spin_lock_init(&queue->lock); 843 queue->header = header; 844 queue->data = virt; 845 atomic_set(&queue->seqnum, 0); 846 847 memset(&queue->history, 0xff, sizeof(queue->history)); 848 queue->history_idx = 0; 849 850 /* Set up the shared memory header */ 851 header->iova = iova; 852 header->type = 10 << 8 | id; 853 header->status = 1; 854 header->size = SZ_4K >> 2; 855 header->msg_size = 0; 856 header->dropped = 0; 857 header->rx_watermark = 1; 858 header->tx_watermark = 1; 859 header->rx_request = 1; 860 header->tx_request = 0; 861 header->read_index = 0; 862 header->write_index = 0; 863 } 864 865 void a6xx_hfi_init(struct a6xx_gmu *gmu) 866 { 867 struct a6xx_gmu_bo *hfi = &gmu->hfi; 868 struct a6xx_hfi_queue_table_header *table = hfi->virt; 869 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); 870 u64 offset; 871 int table_size; 872 873 /* 874 * The table size is the size of the table header plus all of the queue 875 * headers 876 */ 877 table_size = sizeof(*table); 878 table_size += (ARRAY_SIZE(gmu->queues) * 879 sizeof(struct a6xx_hfi_queue_header)); 880 881 table->version = 0; 882 table->size = table_size; 883 /* First queue header is located immediately after the table header */ 884 table->qhdr0_offset = sizeof(*table) >> 2; 885 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; 886 table->num_queues = ARRAY_SIZE(gmu->queues); 887 table->active_queues = ARRAY_SIZE(gmu->queues); 888 889 /* Command queue */ 890 offset = SZ_4K; 891 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, 892 hfi->iova + offset, 0); 893 894 /* GMU response queue */ 895 offset += SZ_4K; 896 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, 897 hfi->iova + offset, gmu->legacy ? 4 : 1); 898 } 899