1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Stanislaw Skowronek 23 */ 24 25 #include <linux/module.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/string_helpers.h> 29 30 #include <asm/unaligned.h> 31 32 #include <drm/drm_util.h> 33 34 #define ATOM_DEBUG 35 36 #include "atomfirmware.h" 37 #include "atom.h" 38 #include "atom-names.h" 39 #include "atom-bits.h" 40 #include "amdgpu.h" 41 42 #define ATOM_COND_ABOVE 0 43 #define ATOM_COND_ABOVEOREQUAL 1 44 #define ATOM_COND_ALWAYS 2 45 #define ATOM_COND_BELOW 3 46 #define ATOM_COND_BELOWOREQUAL 4 47 #define ATOM_COND_EQUAL 5 48 #define ATOM_COND_NOTEQUAL 6 49 50 #define ATOM_PORT_ATI 0 51 #define ATOM_PORT_PCI 1 52 #define ATOM_PORT_SYSIO 2 53 54 #define ATOM_UNIT_MICROSEC 0 55 #define ATOM_UNIT_MILLISEC 1 56 57 #define PLL_INDEX 2 58 #define PLL_DATA 3 59 60 #define ATOM_CMD_TIMEOUT_SEC 20 61 62 typedef struct { 63 struct atom_context *ctx; 64 uint32_t *ps, *ws; 65 int ps_shift; 66 uint16_t start; 67 unsigned last_jump; 68 unsigned long last_jump_jiffies; 69 bool abort; 70 } atom_exec_context; 71 72 int amdgpu_atom_debug; 73 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); 74 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); 75 76 static uint32_t atom_arg_mask[8] = 77 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 78 0xFF000000 }; 79 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; 80 81 static int atom_dst_to_src[8][4] = { 82 /* translate destination alignment field to the source alignment encoding */ 83 {0, 0, 0, 0}, 84 {1, 2, 3, 0}, 85 {1, 2, 3, 0}, 86 {1, 2, 3, 0}, 87 {4, 5, 6, 7}, 88 {4, 5, 6, 7}, 89 {4, 5, 6, 7}, 90 {4, 5, 6, 7}, 91 }; 92 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; 93 94 static int debug_depth; 95 #ifdef ATOM_DEBUG 96 static void debug_print_spaces(int n) 97 { 98 while (n--) 99 printk(" "); 100 } 101 102 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) 103 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) 104 #else 105 #define DEBUG(...) do { } while (0) 106 #define SDEBUG(...) do { } while (0) 107 #endif 108 109 static uint32_t atom_iio_execute(struct atom_context *ctx, int base, 110 uint32_t index, uint32_t data) 111 { 112 uint32_t temp = 0xCDCDCDCD; 113 114 while (1) 115 switch (CU8(base)) { 116 case ATOM_IIO_NOP: 117 base++; 118 break; 119 case ATOM_IIO_READ: 120 temp = ctx->card->reg_read(ctx->card, CU16(base + 1)); 121 base += 3; 122 break; 123 case ATOM_IIO_WRITE: 124 ctx->card->reg_write(ctx->card, CU16(base + 1), temp); 125 base += 3; 126 break; 127 case ATOM_IIO_CLEAR: 128 temp &= 129 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 130 CU8(base + 2)); 131 base += 3; 132 break; 133 case ATOM_IIO_SET: 134 temp |= 135 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 136 2); 137 base += 3; 138 break; 139 case ATOM_IIO_MOVE_INDEX: 140 temp &= 141 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 142 CU8(base + 3)); 143 temp |= 144 ((index >> CU8(base + 2)) & 145 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 146 3); 147 base += 4; 148 break; 149 case ATOM_IIO_MOVE_DATA: 150 temp &= 151 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 152 CU8(base + 3)); 153 temp |= 154 ((data >> CU8(base + 2)) & 155 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 156 3); 157 base += 4; 158 break; 159 case ATOM_IIO_MOVE_ATTR: 160 temp &= 161 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 162 CU8(base + 3)); 163 temp |= 164 ((ctx-> 165 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - 166 CU8 167 (base 168 + 169 1)))) 170 << CU8(base + 3); 171 base += 4; 172 break; 173 case ATOM_IIO_END: 174 return temp; 175 default: 176 pr_info("Unknown IIO opcode\n"); 177 return 0; 178 } 179 } 180 181 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, 182 int *ptr, uint32_t *saved, int print) 183 { 184 uint32_t idx, val = 0xCDCDCDCD, align, arg; 185 struct atom_context *gctx = ctx->ctx; 186 arg = attr & 7; 187 align = (attr >> 3) & 7; 188 switch (arg) { 189 case ATOM_ARG_REG: 190 idx = U16(*ptr); 191 (*ptr) += 2; 192 if (print) 193 DEBUG("REG[0x%04X]", idx); 194 idx += gctx->reg_block; 195 switch (gctx->io_mode) { 196 case ATOM_IO_MM: 197 val = gctx->card->reg_read(gctx->card, idx); 198 break; 199 case ATOM_IO_PCI: 200 pr_info("PCI registers are not implemented\n"); 201 return 0; 202 case ATOM_IO_SYSIO: 203 pr_info("SYSIO registers are not implemented\n"); 204 return 0; 205 default: 206 if (!(gctx->io_mode & 0x80)) { 207 pr_info("Bad IO mode\n"); 208 return 0; 209 } 210 if (!gctx->iio[gctx->io_mode & 0x7F]) { 211 pr_info("Undefined indirect IO read method %d\n", 212 gctx->io_mode & 0x7F); 213 return 0; 214 } 215 val = 216 atom_iio_execute(gctx, 217 gctx->iio[gctx->io_mode & 0x7F], 218 idx, 0); 219 } 220 break; 221 case ATOM_ARG_PS: 222 idx = U8(*ptr); 223 (*ptr)++; 224 /* get_unaligned_le32 avoids unaligned accesses from atombios 225 * tables, noticed on a DEC Alpha. */ 226 val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 227 if (print) 228 DEBUG("PS[0x%02X,0x%04X]", idx, val); 229 break; 230 case ATOM_ARG_WS: 231 idx = U8(*ptr); 232 (*ptr)++; 233 if (print) 234 DEBUG("WS[0x%02X]", idx); 235 switch (idx) { 236 case ATOM_WS_QUOTIENT: 237 val = gctx->divmul[0]; 238 break; 239 case ATOM_WS_REMAINDER: 240 val = gctx->divmul[1]; 241 break; 242 case ATOM_WS_DATAPTR: 243 val = gctx->data_block; 244 break; 245 case ATOM_WS_SHIFT: 246 val = gctx->shift; 247 break; 248 case ATOM_WS_OR_MASK: 249 val = 1 << gctx->shift; 250 break; 251 case ATOM_WS_AND_MASK: 252 val = ~(1 << gctx->shift); 253 break; 254 case ATOM_WS_FB_WINDOW: 255 val = gctx->fb_base; 256 break; 257 case ATOM_WS_ATTRIBUTES: 258 val = gctx->io_attr; 259 break; 260 case ATOM_WS_REGPTR: 261 val = gctx->reg_block; 262 break; 263 default: 264 val = ctx->ws[idx]; 265 } 266 break; 267 case ATOM_ARG_ID: 268 idx = U16(*ptr); 269 (*ptr) += 2; 270 if (print) { 271 if (gctx->data_block) 272 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); 273 else 274 DEBUG("ID[0x%04X]", idx); 275 } 276 val = U32(idx + gctx->data_block); 277 break; 278 case ATOM_ARG_FB: 279 idx = U8(*ptr); 280 (*ptr)++; 281 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 282 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", 283 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 284 val = 0; 285 } else 286 val = gctx->scratch[(gctx->fb_base / 4) + idx]; 287 if (print) 288 DEBUG("FB[0x%02X]", idx); 289 break; 290 case ATOM_ARG_IMM: 291 switch (align) { 292 case ATOM_SRC_DWORD: 293 val = U32(*ptr); 294 (*ptr) += 4; 295 if (print) 296 DEBUG("IMM 0x%08X\n", val); 297 return val; 298 case ATOM_SRC_WORD0: 299 case ATOM_SRC_WORD8: 300 case ATOM_SRC_WORD16: 301 val = U16(*ptr); 302 (*ptr) += 2; 303 if (print) 304 DEBUG("IMM 0x%04X\n", val); 305 return val; 306 case ATOM_SRC_BYTE0: 307 case ATOM_SRC_BYTE8: 308 case ATOM_SRC_BYTE16: 309 case ATOM_SRC_BYTE24: 310 val = U8(*ptr); 311 (*ptr)++; 312 if (print) 313 DEBUG("IMM 0x%02X\n", val); 314 return val; 315 } 316 return 0; 317 case ATOM_ARG_PLL: 318 idx = U8(*ptr); 319 (*ptr)++; 320 if (print) 321 DEBUG("PLL[0x%02X]", idx); 322 val = gctx->card->pll_read(gctx->card, idx); 323 break; 324 case ATOM_ARG_MC: 325 idx = U8(*ptr); 326 (*ptr)++; 327 if (print) 328 DEBUG("MC[0x%02X]", idx); 329 val = gctx->card->mc_read(gctx->card, idx); 330 break; 331 } 332 if (saved) 333 *saved = val; 334 val &= atom_arg_mask[align]; 335 val >>= atom_arg_shift[align]; 336 if (print) 337 switch (align) { 338 case ATOM_SRC_DWORD: 339 DEBUG(".[31:0] -> 0x%08X\n", val); 340 break; 341 case ATOM_SRC_WORD0: 342 DEBUG(".[15:0] -> 0x%04X\n", val); 343 break; 344 case ATOM_SRC_WORD8: 345 DEBUG(".[23:8] -> 0x%04X\n", val); 346 break; 347 case ATOM_SRC_WORD16: 348 DEBUG(".[31:16] -> 0x%04X\n", val); 349 break; 350 case ATOM_SRC_BYTE0: 351 DEBUG(".[7:0] -> 0x%02X\n", val); 352 break; 353 case ATOM_SRC_BYTE8: 354 DEBUG(".[15:8] -> 0x%02X\n", val); 355 break; 356 case ATOM_SRC_BYTE16: 357 DEBUG(".[23:16] -> 0x%02X\n", val); 358 break; 359 case ATOM_SRC_BYTE24: 360 DEBUG(".[31:24] -> 0x%02X\n", val); 361 break; 362 } 363 return val; 364 } 365 366 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) 367 { 368 uint32_t align = (attr >> 3) & 7, arg = attr & 7; 369 switch (arg) { 370 case ATOM_ARG_REG: 371 case ATOM_ARG_ID: 372 (*ptr) += 2; 373 break; 374 case ATOM_ARG_PLL: 375 case ATOM_ARG_MC: 376 case ATOM_ARG_PS: 377 case ATOM_ARG_WS: 378 case ATOM_ARG_FB: 379 (*ptr)++; 380 break; 381 case ATOM_ARG_IMM: 382 switch (align) { 383 case ATOM_SRC_DWORD: 384 (*ptr) += 4; 385 return; 386 case ATOM_SRC_WORD0: 387 case ATOM_SRC_WORD8: 388 case ATOM_SRC_WORD16: 389 (*ptr) += 2; 390 return; 391 case ATOM_SRC_BYTE0: 392 case ATOM_SRC_BYTE8: 393 case ATOM_SRC_BYTE16: 394 case ATOM_SRC_BYTE24: 395 (*ptr)++; 396 return; 397 } 398 } 399 } 400 401 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) 402 { 403 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 404 } 405 406 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) 407 { 408 uint32_t val = 0xCDCDCDCD; 409 410 switch (align) { 411 case ATOM_SRC_DWORD: 412 val = U32(*ptr); 413 (*ptr) += 4; 414 break; 415 case ATOM_SRC_WORD0: 416 case ATOM_SRC_WORD8: 417 case ATOM_SRC_WORD16: 418 val = U16(*ptr); 419 (*ptr) += 2; 420 break; 421 case ATOM_SRC_BYTE0: 422 case ATOM_SRC_BYTE8: 423 case ATOM_SRC_BYTE16: 424 case ATOM_SRC_BYTE24: 425 val = U8(*ptr); 426 (*ptr)++; 427 break; 428 } 429 return val; 430 } 431 432 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 433 int *ptr, uint32_t *saved, int print) 434 { 435 return atom_get_src_int(ctx, 436 arg | atom_dst_to_src[(attr >> 3) & 437 7][(attr >> 6) & 3] << 3, 438 ptr, saved, print); 439 } 440 441 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) 442 { 443 atom_skip_src_int(ctx, 444 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 445 3] << 3, ptr); 446 } 447 448 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, 449 int *ptr, uint32_t val, uint32_t saved) 450 { 451 uint32_t align = 452 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = 453 val, idx; 454 struct atom_context *gctx = ctx->ctx; 455 old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; 456 val <<= atom_arg_shift[align]; 457 val &= atom_arg_mask[align]; 458 saved &= ~atom_arg_mask[align]; 459 val |= saved; 460 switch (arg) { 461 case ATOM_ARG_REG: 462 idx = U16(*ptr); 463 (*ptr) += 2; 464 DEBUG("REG[0x%04X]", idx); 465 idx += gctx->reg_block; 466 switch (gctx->io_mode) { 467 case ATOM_IO_MM: 468 if (idx == 0) 469 gctx->card->reg_write(gctx->card, idx, 470 val << 2); 471 else 472 gctx->card->reg_write(gctx->card, idx, val); 473 break; 474 case ATOM_IO_PCI: 475 pr_info("PCI registers are not implemented\n"); 476 return; 477 case ATOM_IO_SYSIO: 478 pr_info("SYSIO registers are not implemented\n"); 479 return; 480 default: 481 if (!(gctx->io_mode & 0x80)) { 482 pr_info("Bad IO mode\n"); 483 return; 484 } 485 if (!gctx->iio[gctx->io_mode & 0xFF]) { 486 pr_info("Undefined indirect IO write method %d\n", 487 gctx->io_mode & 0x7F); 488 return; 489 } 490 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], 491 idx, val); 492 } 493 break; 494 case ATOM_ARG_PS: 495 idx = U8(*ptr); 496 (*ptr)++; 497 DEBUG("PS[0x%02X]", idx); 498 ctx->ps[idx] = cpu_to_le32(val); 499 break; 500 case ATOM_ARG_WS: 501 idx = U8(*ptr); 502 (*ptr)++; 503 DEBUG("WS[0x%02X]", idx); 504 switch (idx) { 505 case ATOM_WS_QUOTIENT: 506 gctx->divmul[0] = val; 507 break; 508 case ATOM_WS_REMAINDER: 509 gctx->divmul[1] = val; 510 break; 511 case ATOM_WS_DATAPTR: 512 gctx->data_block = val; 513 break; 514 case ATOM_WS_SHIFT: 515 gctx->shift = val; 516 break; 517 case ATOM_WS_OR_MASK: 518 case ATOM_WS_AND_MASK: 519 break; 520 case ATOM_WS_FB_WINDOW: 521 gctx->fb_base = val; 522 break; 523 case ATOM_WS_ATTRIBUTES: 524 gctx->io_attr = val; 525 break; 526 case ATOM_WS_REGPTR: 527 gctx->reg_block = val; 528 break; 529 default: 530 ctx->ws[idx] = val; 531 } 532 break; 533 case ATOM_ARG_FB: 534 idx = U8(*ptr); 535 (*ptr)++; 536 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 537 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", 538 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 539 } else 540 gctx->scratch[(gctx->fb_base / 4) + idx] = val; 541 DEBUG("FB[0x%02X]", idx); 542 break; 543 case ATOM_ARG_PLL: 544 idx = U8(*ptr); 545 (*ptr)++; 546 DEBUG("PLL[0x%02X]", idx); 547 gctx->card->pll_write(gctx->card, idx, val); 548 break; 549 case ATOM_ARG_MC: 550 idx = U8(*ptr); 551 (*ptr)++; 552 DEBUG("MC[0x%02X]", idx); 553 gctx->card->mc_write(gctx->card, idx, val); 554 return; 555 } 556 switch (align) { 557 case ATOM_SRC_DWORD: 558 DEBUG(".[31:0] <- 0x%08X\n", old_val); 559 break; 560 case ATOM_SRC_WORD0: 561 DEBUG(".[15:0] <- 0x%04X\n", old_val); 562 break; 563 case ATOM_SRC_WORD8: 564 DEBUG(".[23:8] <- 0x%04X\n", old_val); 565 break; 566 case ATOM_SRC_WORD16: 567 DEBUG(".[31:16] <- 0x%04X\n", old_val); 568 break; 569 case ATOM_SRC_BYTE0: 570 DEBUG(".[7:0] <- 0x%02X\n", old_val); 571 break; 572 case ATOM_SRC_BYTE8: 573 DEBUG(".[15:8] <- 0x%02X\n", old_val); 574 break; 575 case ATOM_SRC_BYTE16: 576 DEBUG(".[23:16] <- 0x%02X\n", old_val); 577 break; 578 case ATOM_SRC_BYTE24: 579 DEBUG(".[31:24] <- 0x%02X\n", old_val); 580 break; 581 } 582 } 583 584 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) 585 { 586 uint8_t attr = U8((*ptr)++); 587 uint32_t dst, src, saved; 588 int dptr = *ptr; 589 SDEBUG(" dst: "); 590 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 591 SDEBUG(" src: "); 592 src = atom_get_src(ctx, attr, ptr); 593 dst += src; 594 SDEBUG(" dst: "); 595 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 596 } 597 598 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) 599 { 600 uint8_t attr = U8((*ptr)++); 601 uint32_t dst, src, saved; 602 int dptr = *ptr; 603 SDEBUG(" dst: "); 604 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 605 SDEBUG(" src: "); 606 src = atom_get_src(ctx, attr, ptr); 607 dst &= src; 608 SDEBUG(" dst: "); 609 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 610 } 611 612 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) 613 { 614 printk("ATOM BIOS beeped!\n"); 615 } 616 617 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 618 { 619 int idx = U8((*ptr)++); 620 int r = 0; 621 622 if (idx < ATOM_TABLE_NAMES_CNT) 623 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); 624 else 625 SDEBUG(" table: %d\n", idx); 626 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 627 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 628 if (r) { 629 ctx->abort = true; 630 } 631 } 632 633 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 634 { 635 uint8_t attr = U8((*ptr)++); 636 uint32_t saved; 637 int dptr = *ptr; 638 attr &= 0x38; 639 attr |= atom_def_dst[attr >> 3] << 6; 640 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 641 SDEBUG(" dst: "); 642 atom_put_dst(ctx, arg, attr, &dptr, 0, saved); 643 } 644 645 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) 646 { 647 uint8_t attr = U8((*ptr)++); 648 uint32_t dst, src; 649 SDEBUG(" src1: "); 650 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 651 SDEBUG(" src2: "); 652 src = atom_get_src(ctx, attr, ptr); 653 ctx->ctx->cs_equal = (dst == src); 654 ctx->ctx->cs_above = (dst > src); 655 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", 656 ctx->ctx->cs_above ? "GT" : "LE"); 657 } 658 659 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 660 { 661 unsigned count = U8((*ptr)++); 662 SDEBUG(" count: %d\n", count); 663 if (arg == ATOM_UNIT_MICROSEC) 664 udelay(count); 665 else if (!drm_can_sleep()) 666 mdelay(count); 667 else 668 msleep(count); 669 } 670 671 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 672 { 673 uint8_t attr = U8((*ptr)++); 674 uint32_t dst, src; 675 SDEBUG(" src1: "); 676 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 677 SDEBUG(" src2: "); 678 src = atom_get_src(ctx, attr, ptr); 679 if (src != 0) { 680 ctx->ctx->divmul[0] = dst / src; 681 ctx->ctx->divmul[1] = dst % src; 682 } else { 683 ctx->ctx->divmul[0] = 0; 684 ctx->ctx->divmul[1] = 0; 685 } 686 } 687 688 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg) 689 { 690 uint64_t val64; 691 uint8_t attr = U8((*ptr)++); 692 uint32_t dst, src; 693 SDEBUG(" src1: "); 694 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 695 SDEBUG(" src2: "); 696 src = atom_get_src(ctx, attr, ptr); 697 if (src != 0) { 698 val64 = dst; 699 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32; 700 do_div(val64, src); 701 ctx->ctx->divmul[0] = lower_32_bits(val64); 702 ctx->ctx->divmul[1] = upper_32_bits(val64); 703 } else { 704 ctx->ctx->divmul[0] = 0; 705 ctx->ctx->divmul[1] = 0; 706 } 707 } 708 709 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) 710 { 711 /* functionally, a nop */ 712 } 713 714 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 715 { 716 int execute = 0, target = U16(*ptr); 717 unsigned long cjiffies; 718 719 (*ptr) += 2; 720 switch (arg) { 721 case ATOM_COND_ABOVE: 722 execute = ctx->ctx->cs_above; 723 break; 724 case ATOM_COND_ABOVEOREQUAL: 725 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; 726 break; 727 case ATOM_COND_ALWAYS: 728 execute = 1; 729 break; 730 case ATOM_COND_BELOW: 731 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); 732 break; 733 case ATOM_COND_BELOWOREQUAL: 734 execute = !ctx->ctx->cs_above; 735 break; 736 case ATOM_COND_EQUAL: 737 execute = ctx->ctx->cs_equal; 738 break; 739 case ATOM_COND_NOTEQUAL: 740 execute = !ctx->ctx->cs_equal; 741 break; 742 } 743 if (arg != ATOM_COND_ALWAYS) 744 SDEBUG(" taken: %s\n", str_yes_no(execute)); 745 SDEBUG(" target: 0x%04X\n", target); 746 if (execute) { 747 if (ctx->last_jump == (ctx->start + target)) { 748 cjiffies = jiffies; 749 if (time_after(cjiffies, ctx->last_jump_jiffies)) { 750 cjiffies -= ctx->last_jump_jiffies; 751 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) { 752 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n", 753 ATOM_CMD_TIMEOUT_SEC); 754 ctx->abort = true; 755 } 756 } else { 757 /* jiffies wrap around we will just wait a little longer */ 758 ctx->last_jump_jiffies = jiffies; 759 } 760 } else { 761 ctx->last_jump = ctx->start + target; 762 ctx->last_jump_jiffies = jiffies; 763 } 764 *ptr = ctx->start + target; 765 } 766 } 767 768 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 769 { 770 uint8_t attr = U8((*ptr)++); 771 uint32_t dst, mask, src, saved; 772 int dptr = *ptr; 773 SDEBUG(" dst: "); 774 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 775 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 776 SDEBUG(" mask: 0x%08x", mask); 777 SDEBUG(" src: "); 778 src = atom_get_src(ctx, attr, ptr); 779 dst &= mask; 780 dst |= src; 781 SDEBUG(" dst: "); 782 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 783 } 784 785 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) 786 { 787 uint8_t attr = U8((*ptr)++); 788 uint32_t src, saved; 789 int dptr = *ptr; 790 if (((attr >> 3) & 7) != ATOM_SRC_DWORD) 791 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 792 else { 793 atom_skip_dst(ctx, arg, attr, ptr); 794 saved = 0xCDCDCDCD; 795 } 796 SDEBUG(" src: "); 797 src = atom_get_src(ctx, attr, ptr); 798 SDEBUG(" dst: "); 799 atom_put_dst(ctx, arg, attr, &dptr, src, saved); 800 } 801 802 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) 803 { 804 uint8_t attr = U8((*ptr)++); 805 uint32_t dst, src; 806 SDEBUG(" src1: "); 807 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 808 SDEBUG(" src2: "); 809 src = atom_get_src(ctx, attr, ptr); 810 ctx->ctx->divmul[0] = dst * src; 811 } 812 813 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg) 814 { 815 uint64_t val64; 816 uint8_t attr = U8((*ptr)++); 817 uint32_t dst, src; 818 SDEBUG(" src1: "); 819 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 820 SDEBUG(" src2: "); 821 src = atom_get_src(ctx, attr, ptr); 822 val64 = (uint64_t)dst * (uint64_t)src; 823 ctx->ctx->divmul[0] = lower_32_bits(val64); 824 ctx->ctx->divmul[1] = upper_32_bits(val64); 825 } 826 827 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) 828 { 829 /* nothing */ 830 } 831 832 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) 833 { 834 uint8_t attr = U8((*ptr)++); 835 uint32_t dst, src, saved; 836 int dptr = *ptr; 837 SDEBUG(" dst: "); 838 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 839 SDEBUG(" src: "); 840 src = atom_get_src(ctx, attr, ptr); 841 dst |= src; 842 SDEBUG(" dst: "); 843 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 844 } 845 846 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) 847 { 848 uint8_t val = U8((*ptr)++); 849 SDEBUG("POST card output: 0x%02X\n", val); 850 } 851 852 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) 853 { 854 pr_info("unimplemented!\n"); 855 } 856 857 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) 858 { 859 pr_info("unimplemented!\n"); 860 } 861 862 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) 863 { 864 pr_info("unimplemented!\n"); 865 } 866 867 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) 868 { 869 int idx = U8(*ptr); 870 (*ptr)++; 871 SDEBUG(" block: %d\n", idx); 872 if (!idx) 873 ctx->ctx->data_block = 0; 874 else if (idx == 255) 875 ctx->ctx->data_block = ctx->start; 876 else 877 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); 878 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); 879 } 880 881 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) 882 { 883 uint8_t attr = U8((*ptr)++); 884 SDEBUG(" fb_base: "); 885 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); 886 } 887 888 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) 889 { 890 int port; 891 switch (arg) { 892 case ATOM_PORT_ATI: 893 port = U16(*ptr); 894 if (port < ATOM_IO_NAMES_CNT) 895 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); 896 else 897 SDEBUG(" port: %d\n", port); 898 if (!port) 899 ctx->ctx->io_mode = ATOM_IO_MM; 900 else 901 ctx->ctx->io_mode = ATOM_IO_IIO | port; 902 (*ptr) += 2; 903 break; 904 case ATOM_PORT_PCI: 905 ctx->ctx->io_mode = ATOM_IO_PCI; 906 (*ptr)++; 907 break; 908 case ATOM_PORT_SYSIO: 909 ctx->ctx->io_mode = ATOM_IO_SYSIO; 910 (*ptr)++; 911 break; 912 } 913 } 914 915 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) 916 { 917 ctx->ctx->reg_block = U16(*ptr); 918 (*ptr) += 2; 919 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 920 } 921 922 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) 923 { 924 uint8_t attr = U8((*ptr)++), shift; 925 uint32_t saved, dst; 926 int dptr = *ptr; 927 attr &= 0x38; 928 attr |= atom_def_dst[attr >> 3] << 6; 929 SDEBUG(" dst: "); 930 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 931 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 932 SDEBUG(" shift: %d\n", shift); 933 dst <<= shift; 934 SDEBUG(" dst: "); 935 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 936 } 937 938 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) 939 { 940 uint8_t attr = U8((*ptr)++), shift; 941 uint32_t saved, dst; 942 int dptr = *ptr; 943 attr &= 0x38; 944 attr |= atom_def_dst[attr >> 3] << 6; 945 SDEBUG(" dst: "); 946 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 947 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 948 SDEBUG(" shift: %d\n", shift); 949 dst >>= shift; 950 SDEBUG(" dst: "); 951 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 952 } 953 954 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 955 { 956 uint8_t attr = U8((*ptr)++), shift; 957 uint32_t saved, dst; 958 int dptr = *ptr; 959 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 960 SDEBUG(" dst: "); 961 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 962 /* op needs to full dst value */ 963 dst = saved; 964 shift = atom_get_src(ctx, attr, ptr); 965 SDEBUG(" shift: %d\n", shift); 966 dst <<= shift; 967 dst &= atom_arg_mask[dst_align]; 968 dst >>= atom_arg_shift[dst_align]; 969 SDEBUG(" dst: "); 970 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 971 } 972 973 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) 974 { 975 uint8_t attr = U8((*ptr)++), shift; 976 uint32_t saved, dst; 977 int dptr = *ptr; 978 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 979 SDEBUG(" dst: "); 980 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 981 /* op needs to full dst value */ 982 dst = saved; 983 shift = atom_get_src(ctx, attr, ptr); 984 SDEBUG(" shift: %d\n", shift); 985 dst >>= shift; 986 dst &= atom_arg_mask[dst_align]; 987 dst >>= atom_arg_shift[dst_align]; 988 SDEBUG(" dst: "); 989 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 990 } 991 992 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) 993 { 994 uint8_t attr = U8((*ptr)++); 995 uint32_t dst, src, saved; 996 int dptr = *ptr; 997 SDEBUG(" dst: "); 998 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 999 SDEBUG(" src: "); 1000 src = atom_get_src(ctx, attr, ptr); 1001 dst -= src; 1002 SDEBUG(" dst: "); 1003 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 1004 } 1005 1006 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) 1007 { 1008 uint8_t attr = U8((*ptr)++); 1009 uint32_t src, val, target; 1010 SDEBUG(" switch: "); 1011 src = atom_get_src(ctx, attr, ptr); 1012 while (U16(*ptr) != ATOM_CASE_END) 1013 if (U8(*ptr) == ATOM_CASE_MAGIC) { 1014 (*ptr)++; 1015 SDEBUG(" case: "); 1016 val = 1017 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, 1018 ptr); 1019 target = U16(*ptr); 1020 if (val == src) { 1021 SDEBUG(" target: %04X\n", target); 1022 *ptr = ctx->start + target; 1023 return; 1024 } 1025 (*ptr) += 2; 1026 } else { 1027 pr_info("Bad case\n"); 1028 return; 1029 } 1030 (*ptr) += 2; 1031 } 1032 1033 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) 1034 { 1035 uint8_t attr = U8((*ptr)++); 1036 uint32_t dst, src; 1037 SDEBUG(" src1: "); 1038 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 1039 SDEBUG(" src2: "); 1040 src = atom_get_src(ctx, attr, ptr); 1041 ctx->ctx->cs_equal = ((dst & src) == 0); 1042 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); 1043 } 1044 1045 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) 1046 { 1047 uint8_t attr = U8((*ptr)++); 1048 uint32_t dst, src, saved; 1049 int dptr = *ptr; 1050 SDEBUG(" dst: "); 1051 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 1052 SDEBUG(" src: "); 1053 src = atom_get_src(ctx, attr, ptr); 1054 dst ^= src; 1055 SDEBUG(" dst: "); 1056 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 1057 } 1058 1059 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) 1060 { 1061 uint8_t val = U8((*ptr)++); 1062 SDEBUG("DEBUG output: 0x%02X\n", val); 1063 } 1064 1065 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg) 1066 { 1067 uint16_t val = U16(*ptr); 1068 (*ptr) += val + 2; 1069 SDEBUG("PROCESSDS output: 0x%02X\n", val); 1070 } 1071 1072 static struct { 1073 void (*func) (atom_exec_context *, int *, int); 1074 int arg; 1075 } opcode_table[ATOM_OP_CNT] = { 1076 { 1077 NULL, 0}, { 1078 atom_op_move, ATOM_ARG_REG}, { 1079 atom_op_move, ATOM_ARG_PS}, { 1080 atom_op_move, ATOM_ARG_WS}, { 1081 atom_op_move, ATOM_ARG_FB}, { 1082 atom_op_move, ATOM_ARG_PLL}, { 1083 atom_op_move, ATOM_ARG_MC}, { 1084 atom_op_and, ATOM_ARG_REG}, { 1085 atom_op_and, ATOM_ARG_PS}, { 1086 atom_op_and, ATOM_ARG_WS}, { 1087 atom_op_and, ATOM_ARG_FB}, { 1088 atom_op_and, ATOM_ARG_PLL}, { 1089 atom_op_and, ATOM_ARG_MC}, { 1090 atom_op_or, ATOM_ARG_REG}, { 1091 atom_op_or, ATOM_ARG_PS}, { 1092 atom_op_or, ATOM_ARG_WS}, { 1093 atom_op_or, ATOM_ARG_FB}, { 1094 atom_op_or, ATOM_ARG_PLL}, { 1095 atom_op_or, ATOM_ARG_MC}, { 1096 atom_op_shift_left, ATOM_ARG_REG}, { 1097 atom_op_shift_left, ATOM_ARG_PS}, { 1098 atom_op_shift_left, ATOM_ARG_WS}, { 1099 atom_op_shift_left, ATOM_ARG_FB}, { 1100 atom_op_shift_left, ATOM_ARG_PLL}, { 1101 atom_op_shift_left, ATOM_ARG_MC}, { 1102 atom_op_shift_right, ATOM_ARG_REG}, { 1103 atom_op_shift_right, ATOM_ARG_PS}, { 1104 atom_op_shift_right, ATOM_ARG_WS}, { 1105 atom_op_shift_right, ATOM_ARG_FB}, { 1106 atom_op_shift_right, ATOM_ARG_PLL}, { 1107 atom_op_shift_right, ATOM_ARG_MC}, { 1108 atom_op_mul, ATOM_ARG_REG}, { 1109 atom_op_mul, ATOM_ARG_PS}, { 1110 atom_op_mul, ATOM_ARG_WS}, { 1111 atom_op_mul, ATOM_ARG_FB}, { 1112 atom_op_mul, ATOM_ARG_PLL}, { 1113 atom_op_mul, ATOM_ARG_MC}, { 1114 atom_op_div, ATOM_ARG_REG}, { 1115 atom_op_div, ATOM_ARG_PS}, { 1116 atom_op_div, ATOM_ARG_WS}, { 1117 atom_op_div, ATOM_ARG_FB}, { 1118 atom_op_div, ATOM_ARG_PLL}, { 1119 atom_op_div, ATOM_ARG_MC}, { 1120 atom_op_add, ATOM_ARG_REG}, { 1121 atom_op_add, ATOM_ARG_PS}, { 1122 atom_op_add, ATOM_ARG_WS}, { 1123 atom_op_add, ATOM_ARG_FB}, { 1124 atom_op_add, ATOM_ARG_PLL}, { 1125 atom_op_add, ATOM_ARG_MC}, { 1126 atom_op_sub, ATOM_ARG_REG}, { 1127 atom_op_sub, ATOM_ARG_PS}, { 1128 atom_op_sub, ATOM_ARG_WS}, { 1129 atom_op_sub, ATOM_ARG_FB}, { 1130 atom_op_sub, ATOM_ARG_PLL}, { 1131 atom_op_sub, ATOM_ARG_MC}, { 1132 atom_op_setport, ATOM_PORT_ATI}, { 1133 atom_op_setport, ATOM_PORT_PCI}, { 1134 atom_op_setport, ATOM_PORT_SYSIO}, { 1135 atom_op_setregblock, 0}, { 1136 atom_op_setfbbase, 0}, { 1137 atom_op_compare, ATOM_ARG_REG}, { 1138 atom_op_compare, ATOM_ARG_PS}, { 1139 atom_op_compare, ATOM_ARG_WS}, { 1140 atom_op_compare, ATOM_ARG_FB}, { 1141 atom_op_compare, ATOM_ARG_PLL}, { 1142 atom_op_compare, ATOM_ARG_MC}, { 1143 atom_op_switch, 0}, { 1144 atom_op_jump, ATOM_COND_ALWAYS}, { 1145 atom_op_jump, ATOM_COND_EQUAL}, { 1146 atom_op_jump, ATOM_COND_BELOW}, { 1147 atom_op_jump, ATOM_COND_ABOVE}, { 1148 atom_op_jump, ATOM_COND_BELOWOREQUAL}, { 1149 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { 1150 atom_op_jump, ATOM_COND_NOTEQUAL}, { 1151 atom_op_test, ATOM_ARG_REG}, { 1152 atom_op_test, ATOM_ARG_PS}, { 1153 atom_op_test, ATOM_ARG_WS}, { 1154 atom_op_test, ATOM_ARG_FB}, { 1155 atom_op_test, ATOM_ARG_PLL}, { 1156 atom_op_test, ATOM_ARG_MC}, { 1157 atom_op_delay, ATOM_UNIT_MILLISEC}, { 1158 atom_op_delay, ATOM_UNIT_MICROSEC}, { 1159 atom_op_calltable, 0}, { 1160 atom_op_repeat, 0}, { 1161 atom_op_clear, ATOM_ARG_REG}, { 1162 atom_op_clear, ATOM_ARG_PS}, { 1163 atom_op_clear, ATOM_ARG_WS}, { 1164 atom_op_clear, ATOM_ARG_FB}, { 1165 atom_op_clear, ATOM_ARG_PLL}, { 1166 atom_op_clear, ATOM_ARG_MC}, { 1167 atom_op_nop, 0}, { 1168 atom_op_eot, 0}, { 1169 atom_op_mask, ATOM_ARG_REG}, { 1170 atom_op_mask, ATOM_ARG_PS}, { 1171 atom_op_mask, ATOM_ARG_WS}, { 1172 atom_op_mask, ATOM_ARG_FB}, { 1173 atom_op_mask, ATOM_ARG_PLL}, { 1174 atom_op_mask, ATOM_ARG_MC}, { 1175 atom_op_postcard, 0}, { 1176 atom_op_beep, 0}, { 1177 atom_op_savereg, 0}, { 1178 atom_op_restorereg, 0}, { 1179 atom_op_setdatablock, 0}, { 1180 atom_op_xor, ATOM_ARG_REG}, { 1181 atom_op_xor, ATOM_ARG_PS}, { 1182 atom_op_xor, ATOM_ARG_WS}, { 1183 atom_op_xor, ATOM_ARG_FB}, { 1184 atom_op_xor, ATOM_ARG_PLL}, { 1185 atom_op_xor, ATOM_ARG_MC}, { 1186 atom_op_shl, ATOM_ARG_REG}, { 1187 atom_op_shl, ATOM_ARG_PS}, { 1188 atom_op_shl, ATOM_ARG_WS}, { 1189 atom_op_shl, ATOM_ARG_FB}, { 1190 atom_op_shl, ATOM_ARG_PLL}, { 1191 atom_op_shl, ATOM_ARG_MC}, { 1192 atom_op_shr, ATOM_ARG_REG}, { 1193 atom_op_shr, ATOM_ARG_PS}, { 1194 atom_op_shr, ATOM_ARG_WS}, { 1195 atom_op_shr, ATOM_ARG_FB}, { 1196 atom_op_shr, ATOM_ARG_PLL}, { 1197 atom_op_shr, ATOM_ARG_MC}, { 1198 atom_op_debug, 0}, { 1199 atom_op_processds, 0}, { 1200 atom_op_mul32, ATOM_ARG_PS}, { 1201 atom_op_mul32, ATOM_ARG_WS}, { 1202 atom_op_div32, ATOM_ARG_PS}, { 1203 atom_op_div32, ATOM_ARG_WS}, 1204 }; 1205 1206 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) 1207 { 1208 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1209 int len, ws, ps, ptr; 1210 unsigned char op; 1211 atom_exec_context ectx; 1212 int ret = 0; 1213 1214 if (!base) 1215 return -EINVAL; 1216 1217 len = CU16(base + ATOM_CT_SIZE_PTR); 1218 ws = CU8(base + ATOM_CT_WS_PTR); 1219 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; 1220 ptr = base + ATOM_CT_CODE_PTR; 1221 1222 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1223 1224 ectx.ctx = ctx; 1225 ectx.ps_shift = ps / 4; 1226 ectx.start = base; 1227 ectx.ps = params; 1228 ectx.abort = false; 1229 ectx.last_jump = 0; 1230 if (ws) 1231 ectx.ws = kcalloc(4, ws, GFP_KERNEL); 1232 else 1233 ectx.ws = NULL; 1234 1235 debug_depth++; 1236 while (1) { 1237 op = CU8(ptr++); 1238 if (op < ATOM_OP_NAMES_CNT) 1239 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); 1240 else 1241 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); 1242 if (ectx.abort) { 1243 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", 1244 base, len, ws, ps, ptr - 1); 1245 ret = -EINVAL; 1246 goto free; 1247 } 1248 1249 if (op < ATOM_OP_CNT && op > 0) 1250 opcode_table[op].func(&ectx, &ptr, 1251 opcode_table[op].arg); 1252 else 1253 break; 1254 1255 if (op == ATOM_OP_EOT) 1256 break; 1257 } 1258 debug_depth--; 1259 SDEBUG("<<\n"); 1260 1261 free: 1262 if (ws) 1263 kfree(ectx.ws); 1264 return ret; 1265 } 1266 1267 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) 1268 { 1269 int r; 1270 1271 mutex_lock(&ctx->mutex); 1272 /* reset data block */ 1273 ctx->data_block = 0; 1274 /* reset reg block */ 1275 ctx->reg_block = 0; 1276 /* reset fb window */ 1277 ctx->fb_base = 0; 1278 /* reset io mode */ 1279 ctx->io_mode = ATOM_IO_MM; 1280 /* reset divmul */ 1281 ctx->divmul[0] = 0; 1282 ctx->divmul[1] = 0; 1283 r = amdgpu_atom_execute_table_locked(ctx, index, params); 1284 mutex_unlock(&ctx->mutex); 1285 return r; 1286 } 1287 1288 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1289 1290 static void atom_index_iio(struct atom_context *ctx, int base) 1291 { 1292 ctx->iio = kzalloc(2 * 256, GFP_KERNEL); 1293 if (!ctx->iio) 1294 return; 1295 while (CU8(base) == ATOM_IIO_START) { 1296 ctx->iio[CU8(base + 1)] = base + 2; 1297 base += 2; 1298 while (CU8(base) != ATOM_IIO_END) 1299 base += atom_iio_len[CU8(base)]; 1300 base += 3; 1301 } 1302 } 1303 1304 static void atom_get_vbios_name(struct atom_context *ctx) 1305 { 1306 unsigned char *p_rom; 1307 unsigned char str_num; 1308 unsigned short off_to_vbios_str; 1309 unsigned char *c_ptr; 1310 int name_size; 1311 int i; 1312 1313 const char *na = "--N/A--"; 1314 char *back; 1315 1316 p_rom = ctx->bios; 1317 1318 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS); 1319 if (str_num != 0) { 1320 off_to_vbios_str = 1321 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); 1322 1323 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str); 1324 } else { 1325 /* do not know where to find name */ 1326 memcpy(ctx->name, na, 7); 1327 ctx->name[7] = 0; 1328 return; 1329 } 1330 1331 /* 1332 * skip the atombios strings, usually 4 1333 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type 1334 */ 1335 for (i = 0; i < str_num; i++) { 1336 while (*c_ptr != 0) 1337 c_ptr++; 1338 c_ptr++; 1339 } 1340 1341 /* skip the following 2 chars: 0x0D 0x0A */ 1342 c_ptr += 2; 1343 1344 name_size = strnlen(c_ptr, STRLEN_LONG - 1); 1345 memcpy(ctx->name, c_ptr, name_size); 1346 back = ctx->name + name_size; 1347 while ((*--back) == ' ') 1348 ; 1349 *(back + 1) = '\0'; 1350 } 1351 1352 static void atom_get_vbios_date(struct atom_context *ctx) 1353 { 1354 unsigned char *p_rom; 1355 unsigned char *date_in_rom; 1356 1357 p_rom = ctx->bios; 1358 1359 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE; 1360 1361 ctx->date[0] = '2'; 1362 ctx->date[1] = '0'; 1363 ctx->date[2] = date_in_rom[6]; 1364 ctx->date[3] = date_in_rom[7]; 1365 ctx->date[4] = '/'; 1366 ctx->date[5] = date_in_rom[0]; 1367 ctx->date[6] = date_in_rom[1]; 1368 ctx->date[7] = '/'; 1369 ctx->date[8] = date_in_rom[3]; 1370 ctx->date[9] = date_in_rom[4]; 1371 ctx->date[10] = ' '; 1372 ctx->date[11] = date_in_rom[9]; 1373 ctx->date[12] = date_in_rom[10]; 1374 ctx->date[13] = date_in_rom[11]; 1375 ctx->date[14] = date_in_rom[12]; 1376 ctx->date[15] = date_in_rom[13]; 1377 ctx->date[16] = '\0'; 1378 } 1379 1380 static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start, 1381 int end, int maxlen) 1382 { 1383 unsigned long str_off; 1384 unsigned char *p_rom; 1385 unsigned short str_len; 1386 1387 str_off = 0; 1388 str_len = strnlen(str, maxlen); 1389 p_rom = ctx->bios; 1390 1391 for (; start <= end; ++start) { 1392 for (str_off = 0; str_off < str_len; ++str_off) { 1393 if (str[str_off] != *(p_rom + start + str_off)) 1394 break; 1395 } 1396 1397 if (str_off == str_len || str[str_off] == 0) 1398 return p_rom + start; 1399 } 1400 return NULL; 1401 } 1402 1403 static void atom_get_vbios_pn(struct atom_context *ctx) 1404 { 1405 unsigned char *p_rom; 1406 unsigned short off_to_vbios_str; 1407 unsigned char *vbios_str; 1408 int count; 1409 1410 off_to_vbios_str = 0; 1411 p_rom = ctx->bios; 1412 1413 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) { 1414 off_to_vbios_str = 1415 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); 1416 1417 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str); 1418 } else { 1419 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER; 1420 } 1421 1422 if (*vbios_str == 0) { 1423 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64); 1424 if (vbios_str == NULL) 1425 vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1; 1426 } 1427 if (vbios_str != NULL && *vbios_str == 0) 1428 vbios_str++; 1429 1430 if (vbios_str != NULL) { 1431 count = 0; 1432 while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' && 1433 vbios_str[count] <= 'z') { 1434 ctx->vbios_pn[count] = vbios_str[count]; 1435 count++; 1436 } 1437 1438 ctx->vbios_pn[count] = 0; 1439 } 1440 1441 pr_info("ATOM BIOS: %s\n", ctx->vbios_pn); 1442 } 1443 1444 static void atom_get_vbios_version(struct atom_context *ctx) 1445 { 1446 unsigned short start = 3, end; 1447 unsigned char *vbios_ver; 1448 unsigned char *p_rom; 1449 1450 p_rom = ctx->bios; 1451 /* Search from strings offset if it's present */ 1452 start = *(unsigned short *)(p_rom + 1453 OFFSET_TO_GET_ATOMBIOS_STRING_START); 1454 1455 /* Search till atom rom header start point */ 1456 end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER); 1457 1458 /* Use hardcoded offsets, if the offsets are not populated */ 1459 if (end <= start) { 1460 start = 3; 1461 end = 1024; 1462 } 1463 1464 /* find anchor ATOMBIOSBK-AMD */ 1465 vbios_ver = 1466 atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64); 1467 if (vbios_ver != NULL) { 1468 /* skip ATOMBIOSBK-AMD VER */ 1469 vbios_ver += 18; 1470 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL); 1471 } else { 1472 ctx->vbios_ver_str[0] = '\0'; 1473 } 1474 } 1475 1476 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) 1477 { 1478 int base; 1479 struct atom_context *ctx = 1480 kzalloc(sizeof(struct atom_context), GFP_KERNEL); 1481 struct _ATOM_ROM_HEADER *atom_rom_header; 1482 struct _ATOM_MASTER_DATA_TABLE *master_table; 1483 struct _ATOM_FIRMWARE_INFO *atom_fw_info; 1484 1485 if (!ctx) 1486 return NULL; 1487 1488 ctx->card = card; 1489 ctx->bios = bios; 1490 1491 if (CU16(0) != ATOM_BIOS_MAGIC) { 1492 pr_info("Invalid BIOS magic\n"); 1493 kfree(ctx); 1494 return NULL; 1495 } 1496 if (strncmp 1497 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, 1498 strlen(ATOM_ATI_MAGIC))) { 1499 pr_info("Invalid ATI magic\n"); 1500 kfree(ctx); 1501 return NULL; 1502 } 1503 1504 base = CU16(ATOM_ROM_TABLE_PTR); 1505 if (strncmp 1506 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, 1507 strlen(ATOM_ROM_MAGIC))) { 1508 pr_info("Invalid ATOM magic\n"); 1509 kfree(ctx); 1510 return NULL; 1511 } 1512 1513 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); 1514 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); 1515 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); 1516 if (!ctx->iio) { 1517 amdgpu_atom_destroy(ctx); 1518 return NULL; 1519 } 1520 1521 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); 1522 if (atom_rom_header->usMasterDataTableOffset != 0) { 1523 master_table = (struct _ATOM_MASTER_DATA_TABLE *) 1524 CSTR(atom_rom_header->usMasterDataTableOffset); 1525 if (master_table->ListOfDataTables.FirmwareInfo != 0) { 1526 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *) 1527 CSTR(master_table->ListOfDataTables.FirmwareInfo); 1528 ctx->version = atom_fw_info->ulFirmwareRevision; 1529 } 1530 } 1531 1532 atom_get_vbios_name(ctx); 1533 atom_get_vbios_pn(ctx); 1534 atom_get_vbios_date(ctx); 1535 atom_get_vbios_version(ctx); 1536 1537 return ctx; 1538 } 1539 1540 int amdgpu_atom_asic_init(struct atom_context *ctx) 1541 { 1542 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); 1543 uint32_t ps[16]; 1544 int ret; 1545 1546 memset(ps, 0, 64); 1547 1548 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); 1549 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); 1550 if (!ps[0] || !ps[1]) 1551 return 1; 1552 1553 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1554 return 1; 1555 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1556 if (ret) 1557 return ret; 1558 1559 memset(ps, 0, 64); 1560 1561 return ret; 1562 } 1563 1564 void amdgpu_atom_destroy(struct atom_context *ctx) 1565 { 1566 kfree(ctx->iio); 1567 kfree(ctx); 1568 } 1569 1570 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, 1571 uint16_t *size, uint8_t *frev, uint8_t *crev, 1572 uint16_t *data_start) 1573 { 1574 int offset = index * 2 + 4; 1575 int idx = CU16(ctx->data_table + offset); 1576 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); 1577 1578 if (!mdt[index]) 1579 return false; 1580 1581 if (size) 1582 *size = CU16(idx); 1583 if (frev) 1584 *frev = CU8(idx + 2); 1585 if (crev) 1586 *crev = CU8(idx + 3); 1587 *data_start = idx; 1588 return true; 1589 } 1590 1591 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, 1592 uint8_t *crev) 1593 { 1594 int offset = index * 2 + 4; 1595 int idx = CU16(ctx->cmd_table + offset); 1596 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); 1597 1598 if (!mct[index]) 1599 return false; 1600 1601 if (frev) 1602 *frev = CU8(idx + 2); 1603 if (crev) 1604 *crev = CU8(idx + 3); 1605 return true; 1606 } 1607 1608