1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/string_helpers.h>
29
30 #include <linux/unaligned.h>
31
32 #include <drm/drm_util.h>
33
34 #define ATOM_DEBUG
35
36 #include "atomfirmware.h"
37 #include "atom.h"
38 #include "atom-names.h"
39 #include "atom-bits.h"
40 #include "amdgpu.h"
41
42 #define ATOM_COND_ABOVE 0
43 #define ATOM_COND_ABOVEOREQUAL 1
44 #define ATOM_COND_ALWAYS 2
45 #define ATOM_COND_BELOW 3
46 #define ATOM_COND_BELOWOREQUAL 4
47 #define ATOM_COND_EQUAL 5
48 #define ATOM_COND_NOTEQUAL 6
49
50 #define ATOM_PORT_ATI 0
51 #define ATOM_PORT_PCI 1
52 #define ATOM_PORT_SYSIO 2
53
54 #define ATOM_UNIT_MICROSEC 0
55 #define ATOM_UNIT_MILLISEC 1
56
57 #define PLL_INDEX 2
58 #define PLL_DATA 3
59
60 #define ATOM_CMD_TIMEOUT_SEC 20
61
62 typedef struct {
63 struct atom_context *ctx;
64 uint32_t *ps, *ws;
65 int ps_size, ws_size;
66 int ps_shift;
67 uint16_t start;
68 unsigned last_jump;
69 unsigned long last_jump_jiffies;
70 bool abort;
71 } atom_exec_context;
72
73 int amdgpu_atom_debug;
74 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
75 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
76
77 static uint32_t atom_arg_mask[8] =
78 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
79 0xFF000000 };
80 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
81
82 static int atom_dst_to_src[8][4] = {
83 /* translate destination alignment field to the source alignment encoding */
84 {0, 0, 0, 0},
85 {1, 2, 3, 0},
86 {1, 2, 3, 0},
87 {1, 2, 3, 0},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 {4, 5, 6, 7},
91 {4, 5, 6, 7},
92 };
93 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
94
95 static int debug_depth;
96 #ifdef ATOM_DEBUG
debug_print_spaces(int n)97 static void debug_print_spaces(int n)
98 {
99 while (n--)
100 printk(" ");
101 }
102
103 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
104 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
105 #else
106 #define DEBUG(...) do { } while (0)
107 #define SDEBUG(...) do { } while (0)
108 #endif
109
atom_iio_execute(struct atom_context * ctx,int base,uint32_t index,uint32_t data)110 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
111 uint32_t index, uint32_t data)
112 {
113 uint32_t temp = 0xCDCDCDCD;
114
115 while (1)
116 switch (CU8(base)) {
117 case ATOM_IIO_NOP:
118 base++;
119 break;
120 case ATOM_IIO_READ:
121 temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
122 base += 3;
123 break;
124 case ATOM_IIO_WRITE:
125 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
126 base += 3;
127 break;
128 case ATOM_IIO_CLEAR:
129 temp &=
130 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
131 CU8(base + 2));
132 base += 3;
133 break;
134 case ATOM_IIO_SET:
135 temp |=
136 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
137 2);
138 base += 3;
139 break;
140 case ATOM_IIO_MOVE_INDEX:
141 temp &=
142 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
143 CU8(base + 3));
144 temp |=
145 ((index >> CU8(base + 2)) &
146 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
147 3);
148 base += 4;
149 break;
150 case ATOM_IIO_MOVE_DATA:
151 temp &=
152 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
153 CU8(base + 3));
154 temp |=
155 ((data >> CU8(base + 2)) &
156 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
157 3);
158 base += 4;
159 break;
160 case ATOM_IIO_MOVE_ATTR:
161 temp &=
162 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
163 CU8(base + 3));
164 temp |=
165 ((ctx->
166 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
167 CU8
168 (base
169 +
170 1))))
171 << CU8(base + 3);
172 base += 4;
173 break;
174 case ATOM_IIO_END:
175 return temp;
176 default:
177 pr_info("Unknown IIO opcode\n");
178 return 0;
179 }
180 }
181
atom_get_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr,uint32_t * saved,int print)182 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
183 int *ptr, uint32_t *saved, int print)
184 {
185 uint32_t idx, val = 0xCDCDCDCD, align, arg;
186 struct atom_context *gctx = ctx->ctx;
187 arg = attr & 7;
188 align = (attr >> 3) & 7;
189 switch (arg) {
190 case ATOM_ARG_REG:
191 idx = U16(*ptr);
192 (*ptr) += 2;
193 if (print)
194 DEBUG("REG[0x%04X]", idx);
195 idx += gctx->reg_block;
196 switch (gctx->io_mode) {
197 case ATOM_IO_MM:
198 val = gctx->card->reg_read(gctx->card, idx);
199 break;
200 case ATOM_IO_PCI:
201 pr_info("PCI registers are not implemented\n");
202 return 0;
203 case ATOM_IO_SYSIO:
204 pr_info("SYSIO registers are not implemented\n");
205 return 0;
206 default:
207 if (!(gctx->io_mode & 0x80)) {
208 pr_info("Bad IO mode\n");
209 return 0;
210 }
211 if (!gctx->iio[gctx->io_mode & 0x7F]) {
212 pr_info("Undefined indirect IO read method %d\n",
213 gctx->io_mode & 0x7F);
214 return 0;
215 }
216 val =
217 atom_iio_execute(gctx,
218 gctx->iio[gctx->io_mode & 0x7F],
219 idx, 0);
220 }
221 break;
222 case ATOM_ARG_PS:
223 idx = U8(*ptr);
224 (*ptr)++;
225 /* get_unaligned_le32 avoids unaligned accesses from atombios
226 * tables, noticed on a DEC Alpha. */
227 if (idx < ctx->ps_size)
228 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
229 else
230 pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
231 if (print)
232 DEBUG("PS[0x%02X,0x%04X]", idx, val);
233 break;
234 case ATOM_ARG_WS:
235 idx = U8(*ptr);
236 (*ptr)++;
237 if (print)
238 DEBUG("WS[0x%02X]", idx);
239 switch (idx) {
240 case ATOM_WS_QUOTIENT:
241 val = gctx->divmul[0];
242 break;
243 case ATOM_WS_REMAINDER:
244 val = gctx->divmul[1];
245 break;
246 case ATOM_WS_DATAPTR:
247 val = gctx->data_block;
248 break;
249 case ATOM_WS_SHIFT:
250 val = gctx->shift;
251 break;
252 case ATOM_WS_OR_MASK:
253 val = 1 << gctx->shift;
254 break;
255 case ATOM_WS_AND_MASK:
256 val = ~(1 << gctx->shift);
257 break;
258 case ATOM_WS_FB_WINDOW:
259 val = gctx->fb_base;
260 break;
261 case ATOM_WS_ATTRIBUTES:
262 val = gctx->io_attr;
263 break;
264 case ATOM_WS_REGPTR:
265 val = gctx->reg_block;
266 break;
267 default:
268 if (idx < ctx->ws_size)
269 val = ctx->ws[idx];
270 else
271 pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
272 }
273 break;
274 case ATOM_ARG_ID:
275 idx = U16(*ptr);
276 (*ptr) += 2;
277 if (print) {
278 if (gctx->data_block)
279 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
280 else
281 DEBUG("ID[0x%04X]", idx);
282 }
283 val = U32(idx + gctx->data_block);
284 break;
285 case ATOM_ARG_FB:
286 idx = U8(*ptr);
287 (*ptr)++;
288 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
289 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
290 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
291 val = 0;
292 } else
293 val = gctx->scratch[(gctx->fb_base / 4) + idx];
294 if (print)
295 DEBUG("FB[0x%02X]", idx);
296 break;
297 case ATOM_ARG_IMM:
298 switch (align) {
299 case ATOM_SRC_DWORD:
300 val = U32(*ptr);
301 (*ptr) += 4;
302 if (print)
303 DEBUG("IMM 0x%08X\n", val);
304 break;
305 case ATOM_SRC_WORD0:
306 case ATOM_SRC_WORD8:
307 case ATOM_SRC_WORD16:
308 val = U16(*ptr);
309 (*ptr) += 2;
310 if (print)
311 DEBUG("IMM 0x%04X\n", val);
312 break;
313 case ATOM_SRC_BYTE0:
314 case ATOM_SRC_BYTE8:
315 case ATOM_SRC_BYTE16:
316 case ATOM_SRC_BYTE24:
317 val = U8(*ptr);
318 (*ptr)++;
319 if (print)
320 DEBUG("IMM 0x%02X\n", val);
321 break;
322 }
323 return val;
324 case ATOM_ARG_PLL:
325 idx = U8(*ptr);
326 (*ptr)++;
327 if (print)
328 DEBUG("PLL[0x%02X]", idx);
329 val = gctx->card->pll_read(gctx->card, idx);
330 break;
331 case ATOM_ARG_MC:
332 idx = U8(*ptr);
333 (*ptr)++;
334 if (print)
335 DEBUG("MC[0x%02X]", idx);
336 val = gctx->card->mc_read(gctx->card, idx);
337 break;
338 }
339 if (saved)
340 *saved = val;
341 val &= atom_arg_mask[align];
342 val >>= atom_arg_shift[align];
343 if (print)
344 switch (align) {
345 case ATOM_SRC_DWORD:
346 DEBUG(".[31:0] -> 0x%08X\n", val);
347 break;
348 case ATOM_SRC_WORD0:
349 DEBUG(".[15:0] -> 0x%04X\n", val);
350 break;
351 case ATOM_SRC_WORD8:
352 DEBUG(".[23:8] -> 0x%04X\n", val);
353 break;
354 case ATOM_SRC_WORD16:
355 DEBUG(".[31:16] -> 0x%04X\n", val);
356 break;
357 case ATOM_SRC_BYTE0:
358 DEBUG(".[7:0] -> 0x%02X\n", val);
359 break;
360 case ATOM_SRC_BYTE8:
361 DEBUG(".[15:8] -> 0x%02X\n", val);
362 break;
363 case ATOM_SRC_BYTE16:
364 DEBUG(".[23:16] -> 0x%02X\n", val);
365 break;
366 case ATOM_SRC_BYTE24:
367 DEBUG(".[31:24] -> 0x%02X\n", val);
368 break;
369 }
370 return val;
371 }
372
atom_skip_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr)373 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
374 {
375 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
376 switch (arg) {
377 case ATOM_ARG_REG:
378 case ATOM_ARG_ID:
379 (*ptr) += 2;
380 break;
381 case ATOM_ARG_PLL:
382 case ATOM_ARG_MC:
383 case ATOM_ARG_PS:
384 case ATOM_ARG_WS:
385 case ATOM_ARG_FB:
386 (*ptr)++;
387 break;
388 case ATOM_ARG_IMM:
389 switch (align) {
390 case ATOM_SRC_DWORD:
391 (*ptr) += 4;
392 return;
393 case ATOM_SRC_WORD0:
394 case ATOM_SRC_WORD8:
395 case ATOM_SRC_WORD16:
396 (*ptr) += 2;
397 return;
398 case ATOM_SRC_BYTE0:
399 case ATOM_SRC_BYTE8:
400 case ATOM_SRC_BYTE16:
401 case ATOM_SRC_BYTE24:
402 (*ptr)++;
403 return;
404 }
405 }
406 }
407
atom_get_src(atom_exec_context * ctx,uint8_t attr,int * ptr)408 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
409 {
410 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
411 }
412
atom_get_src_direct(atom_exec_context * ctx,uint8_t align,int * ptr)413 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
414 {
415 uint32_t val = 0xCDCDCDCD;
416
417 switch (align) {
418 case ATOM_SRC_DWORD:
419 val = U32(*ptr);
420 (*ptr) += 4;
421 break;
422 case ATOM_SRC_WORD0:
423 case ATOM_SRC_WORD8:
424 case ATOM_SRC_WORD16:
425 val = U16(*ptr);
426 (*ptr) += 2;
427 break;
428 case ATOM_SRC_BYTE0:
429 case ATOM_SRC_BYTE8:
430 case ATOM_SRC_BYTE16:
431 case ATOM_SRC_BYTE24:
432 val = U8(*ptr);
433 (*ptr)++;
434 break;
435 }
436 return val;
437 }
438
atom_get_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t * saved,int print)439 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
440 int *ptr, uint32_t *saved, int print)
441 {
442 return atom_get_src_int(ctx,
443 arg | atom_dst_to_src[(attr >> 3) &
444 7][(attr >> 6) & 3] << 3,
445 ptr, saved, print);
446 }
447
atom_skip_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr)448 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
449 {
450 atom_skip_src_int(ctx,
451 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
452 3] << 3, ptr);
453 }
454
atom_put_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t val,uint32_t saved)455 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
456 int *ptr, uint32_t val, uint32_t saved)
457 {
458 uint32_t align =
459 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
460 val, idx;
461 struct atom_context *gctx = ctx->ctx;
462 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
463 val <<= atom_arg_shift[align];
464 val &= atom_arg_mask[align];
465 saved &= ~atom_arg_mask[align];
466 val |= saved;
467 switch (arg) {
468 case ATOM_ARG_REG:
469 idx = U16(*ptr);
470 (*ptr) += 2;
471 DEBUG("REG[0x%04X]", idx);
472 idx += gctx->reg_block;
473 switch (gctx->io_mode) {
474 case ATOM_IO_MM:
475 if (idx == 0)
476 gctx->card->reg_write(gctx->card, idx,
477 val << 2);
478 else
479 gctx->card->reg_write(gctx->card, idx, val);
480 break;
481 case ATOM_IO_PCI:
482 pr_info("PCI registers are not implemented\n");
483 return;
484 case ATOM_IO_SYSIO:
485 pr_info("SYSIO registers are not implemented\n");
486 return;
487 default:
488 if (!(gctx->io_mode & 0x80)) {
489 pr_info("Bad IO mode\n");
490 return;
491 }
492 if (!gctx->iio[gctx->io_mode & 0xFF]) {
493 pr_info("Undefined indirect IO write method %d\n",
494 gctx->io_mode & 0x7F);
495 return;
496 }
497 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
498 idx, val);
499 }
500 break;
501 case ATOM_ARG_PS:
502 idx = U8(*ptr);
503 (*ptr)++;
504 DEBUG("PS[0x%02X]", idx);
505 if (idx >= ctx->ps_size) {
506 pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
507 return;
508 }
509 ctx->ps[idx] = cpu_to_le32(val);
510 break;
511 case ATOM_ARG_WS:
512 idx = U8(*ptr);
513 (*ptr)++;
514 DEBUG("WS[0x%02X]", idx);
515 switch (idx) {
516 case ATOM_WS_QUOTIENT:
517 gctx->divmul[0] = val;
518 break;
519 case ATOM_WS_REMAINDER:
520 gctx->divmul[1] = val;
521 break;
522 case ATOM_WS_DATAPTR:
523 gctx->data_block = val;
524 break;
525 case ATOM_WS_SHIFT:
526 gctx->shift = val;
527 break;
528 case ATOM_WS_OR_MASK:
529 case ATOM_WS_AND_MASK:
530 break;
531 case ATOM_WS_FB_WINDOW:
532 gctx->fb_base = val;
533 break;
534 case ATOM_WS_ATTRIBUTES:
535 gctx->io_attr = val;
536 break;
537 case ATOM_WS_REGPTR:
538 gctx->reg_block = val;
539 break;
540 default:
541 if (idx >= ctx->ws_size) {
542 pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
543 return;
544 }
545 ctx->ws[idx] = val;
546 }
547 break;
548 case ATOM_ARG_FB:
549 idx = U8(*ptr);
550 (*ptr)++;
551 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
552 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
553 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
554 } else
555 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
556 DEBUG("FB[0x%02X]", idx);
557 break;
558 case ATOM_ARG_PLL:
559 idx = U8(*ptr);
560 (*ptr)++;
561 DEBUG("PLL[0x%02X]", idx);
562 gctx->card->pll_write(gctx->card, idx, val);
563 break;
564 case ATOM_ARG_MC:
565 idx = U8(*ptr);
566 (*ptr)++;
567 DEBUG("MC[0x%02X]", idx);
568 gctx->card->mc_write(gctx->card, idx, val);
569 return;
570 }
571 switch (align) {
572 case ATOM_SRC_DWORD:
573 DEBUG(".[31:0] <- 0x%08X\n", old_val);
574 break;
575 case ATOM_SRC_WORD0:
576 DEBUG(".[15:0] <- 0x%04X\n", old_val);
577 break;
578 case ATOM_SRC_WORD8:
579 DEBUG(".[23:8] <- 0x%04X\n", old_val);
580 break;
581 case ATOM_SRC_WORD16:
582 DEBUG(".[31:16] <- 0x%04X\n", old_val);
583 break;
584 case ATOM_SRC_BYTE0:
585 DEBUG(".[7:0] <- 0x%02X\n", old_val);
586 break;
587 case ATOM_SRC_BYTE8:
588 DEBUG(".[15:8] <- 0x%02X\n", old_val);
589 break;
590 case ATOM_SRC_BYTE16:
591 DEBUG(".[23:16] <- 0x%02X\n", old_val);
592 break;
593 case ATOM_SRC_BYTE24:
594 DEBUG(".[31:24] <- 0x%02X\n", old_val);
595 break;
596 }
597 }
598
atom_op_add(atom_exec_context * ctx,int * ptr,int arg)599 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
600 {
601 uint8_t attr = U8((*ptr)++);
602 uint32_t dst, src, saved;
603 int dptr = *ptr;
604 SDEBUG(" dst: ");
605 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
606 SDEBUG(" src: ");
607 src = atom_get_src(ctx, attr, ptr);
608 dst += src;
609 SDEBUG(" dst: ");
610 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
611 }
612
atom_op_and(atom_exec_context * ctx,int * ptr,int arg)613 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
614 {
615 uint8_t attr = U8((*ptr)++);
616 uint32_t dst, src, saved;
617 int dptr = *ptr;
618 SDEBUG(" dst: ");
619 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
620 SDEBUG(" src: ");
621 src = atom_get_src(ctx, attr, ptr);
622 dst &= src;
623 SDEBUG(" dst: ");
624 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
625 }
626
atom_op_beep(atom_exec_context * ctx,int * ptr,int arg)627 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
628 {
629 printk("ATOM BIOS beeped!\n");
630 }
631
atom_op_calltable(atom_exec_context * ctx,int * ptr,int arg)632 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
633 {
634 int idx = U8((*ptr)++);
635 int r = 0;
636
637 if (idx < ATOM_TABLE_NAMES_CNT)
638 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
639 else
640 SDEBUG(" table: %d\n", idx);
641 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
642 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
643 if (r) {
644 ctx->abort = true;
645 }
646 }
647
atom_op_clear(atom_exec_context * ctx,int * ptr,int arg)648 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
649 {
650 uint8_t attr = U8((*ptr)++);
651 uint32_t saved;
652 int dptr = *ptr;
653 attr &= 0x38;
654 attr |= atom_def_dst[attr >> 3] << 6;
655 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
656 SDEBUG(" dst: ");
657 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
658 }
659
atom_op_compare(atom_exec_context * ctx,int * ptr,int arg)660 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
661 {
662 uint8_t attr = U8((*ptr)++);
663 uint32_t dst, src;
664 SDEBUG(" src1: ");
665 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
666 SDEBUG(" src2: ");
667 src = atom_get_src(ctx, attr, ptr);
668 ctx->ctx->cs_equal = (dst == src);
669 ctx->ctx->cs_above = (dst > src);
670 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
671 ctx->ctx->cs_above ? "GT" : "LE");
672 }
673
atom_op_delay(atom_exec_context * ctx,int * ptr,int arg)674 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
675 {
676 unsigned count = U8((*ptr)++);
677 SDEBUG(" count: %d\n", count);
678 if (arg == ATOM_UNIT_MICROSEC)
679 udelay(count);
680 else if (!drm_can_sleep())
681 mdelay(count);
682 else
683 msleep(count);
684 }
685
atom_op_div(atom_exec_context * ctx,int * ptr,int arg)686 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
687 {
688 uint8_t attr = U8((*ptr)++);
689 uint32_t dst, src;
690 SDEBUG(" src1: ");
691 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
692 SDEBUG(" src2: ");
693 src = atom_get_src(ctx, attr, ptr);
694 if (src != 0) {
695 ctx->ctx->divmul[0] = dst / src;
696 ctx->ctx->divmul[1] = dst % src;
697 } else {
698 ctx->ctx->divmul[0] = 0;
699 ctx->ctx->divmul[1] = 0;
700 }
701 }
702
atom_op_div32(atom_exec_context * ctx,int * ptr,int arg)703 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
704 {
705 uint64_t val64;
706 uint8_t attr = U8((*ptr)++);
707 uint32_t dst, src;
708 SDEBUG(" src1: ");
709 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
710 SDEBUG(" src2: ");
711 src = atom_get_src(ctx, attr, ptr);
712 if (src != 0) {
713 val64 = dst;
714 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
715 do_div(val64, src);
716 ctx->ctx->divmul[0] = lower_32_bits(val64);
717 ctx->ctx->divmul[1] = upper_32_bits(val64);
718 } else {
719 ctx->ctx->divmul[0] = 0;
720 ctx->ctx->divmul[1] = 0;
721 }
722 }
723
atom_op_eot(atom_exec_context * ctx,int * ptr,int arg)724 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
725 {
726 /* functionally, a nop */
727 }
728
atom_op_jump(atom_exec_context * ctx,int * ptr,int arg)729 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
730 {
731 int execute = 0, target = U16(*ptr);
732 unsigned long cjiffies;
733
734 (*ptr) += 2;
735 switch (arg) {
736 case ATOM_COND_ABOVE:
737 execute = ctx->ctx->cs_above;
738 break;
739 case ATOM_COND_ABOVEOREQUAL:
740 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
741 break;
742 case ATOM_COND_ALWAYS:
743 execute = 1;
744 break;
745 case ATOM_COND_BELOW:
746 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
747 break;
748 case ATOM_COND_BELOWOREQUAL:
749 execute = !ctx->ctx->cs_above;
750 break;
751 case ATOM_COND_EQUAL:
752 execute = ctx->ctx->cs_equal;
753 break;
754 case ATOM_COND_NOTEQUAL:
755 execute = !ctx->ctx->cs_equal;
756 break;
757 }
758 if (arg != ATOM_COND_ALWAYS)
759 SDEBUG(" taken: %s\n", str_yes_no(execute));
760 SDEBUG(" target: 0x%04X\n", target);
761 if (execute) {
762 if (ctx->last_jump == (ctx->start + target)) {
763 cjiffies = jiffies;
764 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
765 cjiffies -= ctx->last_jump_jiffies;
766 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
767 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
768 ATOM_CMD_TIMEOUT_SEC);
769 ctx->abort = true;
770 }
771 } else {
772 /* jiffies wrap around we will just wait a little longer */
773 ctx->last_jump_jiffies = jiffies;
774 }
775 } else {
776 ctx->last_jump = ctx->start + target;
777 ctx->last_jump_jiffies = jiffies;
778 }
779 *ptr = ctx->start + target;
780 }
781 }
782
atom_op_mask(atom_exec_context * ctx,int * ptr,int arg)783 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
784 {
785 uint8_t attr = U8((*ptr)++);
786 uint32_t dst, mask, src, saved;
787 int dptr = *ptr;
788 SDEBUG(" dst: ");
789 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
790 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
791 SDEBUG(" mask: 0x%08x", mask);
792 SDEBUG(" src: ");
793 src = atom_get_src(ctx, attr, ptr);
794 dst &= mask;
795 dst |= src;
796 SDEBUG(" dst: ");
797 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
798 }
799
atom_op_move(atom_exec_context * ctx,int * ptr,int arg)800 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
801 {
802 uint8_t attr = U8((*ptr)++);
803 uint32_t src, saved;
804 int dptr = *ptr;
805 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
806 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
807 else {
808 atom_skip_dst(ctx, arg, attr, ptr);
809 saved = 0xCDCDCDCD;
810 }
811 SDEBUG(" src: ");
812 src = atom_get_src(ctx, attr, ptr);
813 SDEBUG(" dst: ");
814 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
815 }
816
atom_op_mul(atom_exec_context * ctx,int * ptr,int arg)817 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
818 {
819 uint8_t attr = U8((*ptr)++);
820 uint32_t dst, src;
821 SDEBUG(" src1: ");
822 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
823 SDEBUG(" src2: ");
824 src = atom_get_src(ctx, attr, ptr);
825 ctx->ctx->divmul[0] = dst * src;
826 }
827
atom_op_mul32(atom_exec_context * ctx,int * ptr,int arg)828 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
829 {
830 uint64_t val64;
831 uint8_t attr = U8((*ptr)++);
832 uint32_t dst, src;
833 SDEBUG(" src1: ");
834 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
835 SDEBUG(" src2: ");
836 src = atom_get_src(ctx, attr, ptr);
837 val64 = (uint64_t)dst * (uint64_t)src;
838 ctx->ctx->divmul[0] = lower_32_bits(val64);
839 ctx->ctx->divmul[1] = upper_32_bits(val64);
840 }
841
atom_op_nop(atom_exec_context * ctx,int * ptr,int arg)842 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
843 {
844 /* nothing */
845 }
846
atom_op_or(atom_exec_context * ctx,int * ptr,int arg)847 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
848 {
849 uint8_t attr = U8((*ptr)++);
850 uint32_t dst, src, saved;
851 int dptr = *ptr;
852 SDEBUG(" dst: ");
853 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
854 SDEBUG(" src: ");
855 src = atom_get_src(ctx, attr, ptr);
856 dst |= src;
857 SDEBUG(" dst: ");
858 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
859 }
860
atom_op_postcard(atom_exec_context * ctx,int * ptr,int arg)861 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
862 {
863 uint8_t val = U8((*ptr)++);
864 SDEBUG("POST card output: 0x%02X\n", val);
865 }
866
atom_op_repeat(atom_exec_context * ctx,int * ptr,int arg)867 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
868 {
869 pr_info("unimplemented!\n");
870 }
871
atom_op_restorereg(atom_exec_context * ctx,int * ptr,int arg)872 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
873 {
874 pr_info("unimplemented!\n");
875 }
876
atom_op_savereg(atom_exec_context * ctx,int * ptr,int arg)877 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
878 {
879 pr_info("unimplemented!\n");
880 }
881
atom_op_setdatablock(atom_exec_context * ctx,int * ptr,int arg)882 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
883 {
884 int idx = U8(*ptr);
885 (*ptr)++;
886 SDEBUG(" block: %d\n", idx);
887 if (!idx)
888 ctx->ctx->data_block = 0;
889 else if (idx == 255)
890 ctx->ctx->data_block = ctx->start;
891 else
892 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
893 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
894 }
895
atom_op_setfbbase(atom_exec_context * ctx,int * ptr,int arg)896 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
897 {
898 uint8_t attr = U8((*ptr)++);
899 SDEBUG(" fb_base: ");
900 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
901 }
902
atom_op_setport(atom_exec_context * ctx,int * ptr,int arg)903 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
904 {
905 int port;
906 switch (arg) {
907 case ATOM_PORT_ATI:
908 port = U16(*ptr);
909 if (port < ATOM_IO_NAMES_CNT)
910 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
911 else
912 SDEBUG(" port: %d\n", port);
913 if (!port)
914 ctx->ctx->io_mode = ATOM_IO_MM;
915 else
916 ctx->ctx->io_mode = ATOM_IO_IIO | port;
917 (*ptr) += 2;
918 break;
919 case ATOM_PORT_PCI:
920 ctx->ctx->io_mode = ATOM_IO_PCI;
921 (*ptr)++;
922 break;
923 case ATOM_PORT_SYSIO:
924 ctx->ctx->io_mode = ATOM_IO_SYSIO;
925 (*ptr)++;
926 break;
927 }
928 }
929
atom_op_setregblock(atom_exec_context * ctx,int * ptr,int arg)930 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
931 {
932 ctx->ctx->reg_block = U16(*ptr);
933 (*ptr) += 2;
934 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
935 }
936
atom_op_shift_left(atom_exec_context * ctx,int * ptr,int arg)937 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
938 {
939 uint8_t attr = U8((*ptr)++), shift;
940 uint32_t saved, dst;
941 int dptr = *ptr;
942 attr &= 0x38;
943 attr |= atom_def_dst[attr >> 3] << 6;
944 SDEBUG(" dst: ");
945 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
946 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
947 SDEBUG(" shift: %d\n", shift);
948 dst <<= shift;
949 SDEBUG(" dst: ");
950 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
951 }
952
atom_op_shift_right(atom_exec_context * ctx,int * ptr,int arg)953 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
954 {
955 uint8_t attr = U8((*ptr)++), shift;
956 uint32_t saved, dst;
957 int dptr = *ptr;
958 attr &= 0x38;
959 attr |= atom_def_dst[attr >> 3] << 6;
960 SDEBUG(" dst: ");
961 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
962 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
963 SDEBUG(" shift: %d\n", shift);
964 dst >>= shift;
965 SDEBUG(" dst: ");
966 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
967 }
968
atom_op_shl(atom_exec_context * ctx,int * ptr,int arg)969 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
970 {
971 uint8_t attr = U8((*ptr)++), shift;
972 uint32_t saved, dst;
973 int dptr = *ptr;
974 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
975 SDEBUG(" dst: ");
976 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
977 /* op needs to full dst value */
978 dst = saved;
979 shift = atom_get_src(ctx, attr, ptr);
980 SDEBUG(" shift: %d\n", shift);
981 dst <<= shift;
982 dst &= atom_arg_mask[dst_align];
983 dst >>= atom_arg_shift[dst_align];
984 SDEBUG(" dst: ");
985 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
986 }
987
atom_op_shr(atom_exec_context * ctx,int * ptr,int arg)988 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
989 {
990 uint8_t attr = U8((*ptr)++), shift;
991 uint32_t saved, dst;
992 int dptr = *ptr;
993 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
994 SDEBUG(" dst: ");
995 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
996 /* op needs to full dst value */
997 dst = saved;
998 shift = atom_get_src(ctx, attr, ptr);
999 SDEBUG(" shift: %d\n", shift);
1000 dst >>= shift;
1001 dst &= atom_arg_mask[dst_align];
1002 dst >>= atom_arg_shift[dst_align];
1003 SDEBUG(" dst: ");
1004 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1005 }
1006
atom_op_sub(atom_exec_context * ctx,int * ptr,int arg)1007 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
1008 {
1009 uint8_t attr = U8((*ptr)++);
1010 uint32_t dst, src, saved;
1011 int dptr = *ptr;
1012 SDEBUG(" dst: ");
1013 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1014 SDEBUG(" src: ");
1015 src = atom_get_src(ctx, attr, ptr);
1016 dst -= src;
1017 SDEBUG(" dst: ");
1018 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1019 }
1020
atom_op_switch(atom_exec_context * ctx,int * ptr,int arg)1021 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1022 {
1023 uint8_t attr = U8((*ptr)++);
1024 uint32_t src, val, target;
1025 SDEBUG(" switch: ");
1026 src = atom_get_src(ctx, attr, ptr);
1027 while (U16(*ptr) != ATOM_CASE_END)
1028 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1029 (*ptr)++;
1030 SDEBUG(" case: ");
1031 val =
1032 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1033 ptr);
1034 target = U16(*ptr);
1035 if (val == src) {
1036 SDEBUG(" target: %04X\n", target);
1037 *ptr = ctx->start + target;
1038 return;
1039 }
1040 (*ptr) += 2;
1041 } else {
1042 pr_info("Bad case\n");
1043 return;
1044 }
1045 (*ptr) += 2;
1046 }
1047
atom_op_test(atom_exec_context * ctx,int * ptr,int arg)1048 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1049 {
1050 uint8_t attr = U8((*ptr)++);
1051 uint32_t dst, src;
1052 SDEBUG(" src1: ");
1053 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1054 SDEBUG(" src2: ");
1055 src = atom_get_src(ctx, attr, ptr);
1056 ctx->ctx->cs_equal = ((dst & src) == 0);
1057 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1058 }
1059
atom_op_xor(atom_exec_context * ctx,int * ptr,int arg)1060 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1061 {
1062 uint8_t attr = U8((*ptr)++);
1063 uint32_t dst, src, saved;
1064 int dptr = *ptr;
1065 SDEBUG(" dst: ");
1066 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1067 SDEBUG(" src: ");
1068 src = atom_get_src(ctx, attr, ptr);
1069 dst ^= src;
1070 SDEBUG(" dst: ");
1071 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1072 }
1073
atom_op_debug(atom_exec_context * ctx,int * ptr,int arg)1074 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1075 {
1076 uint8_t val = U8((*ptr)++);
1077 SDEBUG("DEBUG output: 0x%02X\n", val);
1078 }
1079
atom_op_processds(atom_exec_context * ctx,int * ptr,int arg)1080 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1081 {
1082 uint16_t val = U16(*ptr);
1083 (*ptr) += val + 2;
1084 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1085 }
1086
1087 static struct {
1088 void (*func) (atom_exec_context *, int *, int);
1089 int arg;
1090 } opcode_table[ATOM_OP_CNT] = {
1091 {
1092 NULL, 0}, {
1093 atom_op_move, ATOM_ARG_REG}, {
1094 atom_op_move, ATOM_ARG_PS}, {
1095 atom_op_move, ATOM_ARG_WS}, {
1096 atom_op_move, ATOM_ARG_FB}, {
1097 atom_op_move, ATOM_ARG_PLL}, {
1098 atom_op_move, ATOM_ARG_MC}, {
1099 atom_op_and, ATOM_ARG_REG}, {
1100 atom_op_and, ATOM_ARG_PS}, {
1101 atom_op_and, ATOM_ARG_WS}, {
1102 atom_op_and, ATOM_ARG_FB}, {
1103 atom_op_and, ATOM_ARG_PLL}, {
1104 atom_op_and, ATOM_ARG_MC}, {
1105 atom_op_or, ATOM_ARG_REG}, {
1106 atom_op_or, ATOM_ARG_PS}, {
1107 atom_op_or, ATOM_ARG_WS}, {
1108 atom_op_or, ATOM_ARG_FB}, {
1109 atom_op_or, ATOM_ARG_PLL}, {
1110 atom_op_or, ATOM_ARG_MC}, {
1111 atom_op_shift_left, ATOM_ARG_REG}, {
1112 atom_op_shift_left, ATOM_ARG_PS}, {
1113 atom_op_shift_left, ATOM_ARG_WS}, {
1114 atom_op_shift_left, ATOM_ARG_FB}, {
1115 atom_op_shift_left, ATOM_ARG_PLL}, {
1116 atom_op_shift_left, ATOM_ARG_MC}, {
1117 atom_op_shift_right, ATOM_ARG_REG}, {
1118 atom_op_shift_right, ATOM_ARG_PS}, {
1119 atom_op_shift_right, ATOM_ARG_WS}, {
1120 atom_op_shift_right, ATOM_ARG_FB}, {
1121 atom_op_shift_right, ATOM_ARG_PLL}, {
1122 atom_op_shift_right, ATOM_ARG_MC}, {
1123 atom_op_mul, ATOM_ARG_REG}, {
1124 atom_op_mul, ATOM_ARG_PS}, {
1125 atom_op_mul, ATOM_ARG_WS}, {
1126 atom_op_mul, ATOM_ARG_FB}, {
1127 atom_op_mul, ATOM_ARG_PLL}, {
1128 atom_op_mul, ATOM_ARG_MC}, {
1129 atom_op_div, ATOM_ARG_REG}, {
1130 atom_op_div, ATOM_ARG_PS}, {
1131 atom_op_div, ATOM_ARG_WS}, {
1132 atom_op_div, ATOM_ARG_FB}, {
1133 atom_op_div, ATOM_ARG_PLL}, {
1134 atom_op_div, ATOM_ARG_MC}, {
1135 atom_op_add, ATOM_ARG_REG}, {
1136 atom_op_add, ATOM_ARG_PS}, {
1137 atom_op_add, ATOM_ARG_WS}, {
1138 atom_op_add, ATOM_ARG_FB}, {
1139 atom_op_add, ATOM_ARG_PLL}, {
1140 atom_op_add, ATOM_ARG_MC}, {
1141 atom_op_sub, ATOM_ARG_REG}, {
1142 atom_op_sub, ATOM_ARG_PS}, {
1143 atom_op_sub, ATOM_ARG_WS}, {
1144 atom_op_sub, ATOM_ARG_FB}, {
1145 atom_op_sub, ATOM_ARG_PLL}, {
1146 atom_op_sub, ATOM_ARG_MC}, {
1147 atom_op_setport, ATOM_PORT_ATI}, {
1148 atom_op_setport, ATOM_PORT_PCI}, {
1149 atom_op_setport, ATOM_PORT_SYSIO}, {
1150 atom_op_setregblock, 0}, {
1151 atom_op_setfbbase, 0}, {
1152 atom_op_compare, ATOM_ARG_REG}, {
1153 atom_op_compare, ATOM_ARG_PS}, {
1154 atom_op_compare, ATOM_ARG_WS}, {
1155 atom_op_compare, ATOM_ARG_FB}, {
1156 atom_op_compare, ATOM_ARG_PLL}, {
1157 atom_op_compare, ATOM_ARG_MC}, {
1158 atom_op_switch, 0}, {
1159 atom_op_jump, ATOM_COND_ALWAYS}, {
1160 atom_op_jump, ATOM_COND_EQUAL}, {
1161 atom_op_jump, ATOM_COND_BELOW}, {
1162 atom_op_jump, ATOM_COND_ABOVE}, {
1163 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1164 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1165 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1166 atom_op_test, ATOM_ARG_REG}, {
1167 atom_op_test, ATOM_ARG_PS}, {
1168 atom_op_test, ATOM_ARG_WS}, {
1169 atom_op_test, ATOM_ARG_FB}, {
1170 atom_op_test, ATOM_ARG_PLL}, {
1171 atom_op_test, ATOM_ARG_MC}, {
1172 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1173 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1174 atom_op_calltable, 0}, {
1175 atom_op_repeat, 0}, {
1176 atom_op_clear, ATOM_ARG_REG}, {
1177 atom_op_clear, ATOM_ARG_PS}, {
1178 atom_op_clear, ATOM_ARG_WS}, {
1179 atom_op_clear, ATOM_ARG_FB}, {
1180 atom_op_clear, ATOM_ARG_PLL}, {
1181 atom_op_clear, ATOM_ARG_MC}, {
1182 atom_op_nop, 0}, {
1183 atom_op_eot, 0}, {
1184 atom_op_mask, ATOM_ARG_REG}, {
1185 atom_op_mask, ATOM_ARG_PS}, {
1186 atom_op_mask, ATOM_ARG_WS}, {
1187 atom_op_mask, ATOM_ARG_FB}, {
1188 atom_op_mask, ATOM_ARG_PLL}, {
1189 atom_op_mask, ATOM_ARG_MC}, {
1190 atom_op_postcard, 0}, {
1191 atom_op_beep, 0}, {
1192 atom_op_savereg, 0}, {
1193 atom_op_restorereg, 0}, {
1194 atom_op_setdatablock, 0}, {
1195 atom_op_xor, ATOM_ARG_REG}, {
1196 atom_op_xor, ATOM_ARG_PS}, {
1197 atom_op_xor, ATOM_ARG_WS}, {
1198 atom_op_xor, ATOM_ARG_FB}, {
1199 atom_op_xor, ATOM_ARG_PLL}, {
1200 atom_op_xor, ATOM_ARG_MC}, {
1201 atom_op_shl, ATOM_ARG_REG}, {
1202 atom_op_shl, ATOM_ARG_PS}, {
1203 atom_op_shl, ATOM_ARG_WS}, {
1204 atom_op_shl, ATOM_ARG_FB}, {
1205 atom_op_shl, ATOM_ARG_PLL}, {
1206 atom_op_shl, ATOM_ARG_MC}, {
1207 atom_op_shr, ATOM_ARG_REG}, {
1208 atom_op_shr, ATOM_ARG_PS}, {
1209 atom_op_shr, ATOM_ARG_WS}, {
1210 atom_op_shr, ATOM_ARG_FB}, {
1211 atom_op_shr, ATOM_ARG_PLL}, {
1212 atom_op_shr, ATOM_ARG_MC}, {
1213 atom_op_debug, 0}, {
1214 atom_op_processds, 0}, {
1215 atom_op_mul32, ATOM_ARG_PS}, {
1216 atom_op_mul32, ATOM_ARG_WS}, {
1217 atom_op_div32, ATOM_ARG_PS}, {
1218 atom_op_div32, ATOM_ARG_WS},
1219 };
1220
amdgpu_atom_execute_table_locked(struct atom_context * ctx,int index,uint32_t * params,int params_size)1221 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1222 {
1223 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1224 int len, ws, ps, ptr;
1225 unsigned char op;
1226 atom_exec_context ectx;
1227 int ret = 0;
1228
1229 if (!base)
1230 return -EINVAL;
1231
1232 len = CU16(base + ATOM_CT_SIZE_PTR);
1233 ws = CU8(base + ATOM_CT_WS_PTR);
1234 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1235 ptr = base + ATOM_CT_CODE_PTR;
1236
1237 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1238
1239 ectx.ctx = ctx;
1240 ectx.ps_shift = ps / 4;
1241 ectx.start = base;
1242 ectx.ps = params;
1243 ectx.ps_size = params_size;
1244 ectx.abort = false;
1245 ectx.last_jump = 0;
1246 ectx.last_jump_jiffies = 0;
1247 if (ws) {
1248 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1249 ectx.ws_size = ws;
1250 } else {
1251 ectx.ws = NULL;
1252 ectx.ws_size = 0;
1253 }
1254
1255 debug_depth++;
1256 while (1) {
1257 op = CU8(ptr++);
1258 if (op < ATOM_OP_NAMES_CNT)
1259 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1260 else
1261 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1262 if (ectx.abort) {
1263 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1264 base, len, ws, ps, ptr - 1);
1265 ret = -EINVAL;
1266 goto free;
1267 }
1268
1269 if (op < ATOM_OP_CNT && op > 0)
1270 opcode_table[op].func(&ectx, &ptr,
1271 opcode_table[op].arg);
1272 else
1273 break;
1274
1275 if (op == ATOM_OP_EOT)
1276 break;
1277 }
1278 debug_depth--;
1279 SDEBUG("<<\n");
1280
1281 free:
1282 if (ws)
1283 kfree(ectx.ws);
1284 return ret;
1285 }
1286
amdgpu_atom_execute_table(struct atom_context * ctx,int index,uint32_t * params,int params_size)1287 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1288 {
1289 int r;
1290
1291 mutex_lock(&ctx->mutex);
1292 /* reset data block */
1293 ctx->data_block = 0;
1294 /* reset reg block */
1295 ctx->reg_block = 0;
1296 /* reset fb window */
1297 ctx->fb_base = 0;
1298 /* reset io mode */
1299 ctx->io_mode = ATOM_IO_MM;
1300 /* reset divmul */
1301 ctx->divmul[0] = 0;
1302 ctx->divmul[1] = 0;
1303 r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
1304 mutex_unlock(&ctx->mutex);
1305 return r;
1306 }
1307
1308 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1309
atom_index_iio(struct atom_context * ctx,int base)1310 static void atom_index_iio(struct atom_context *ctx, int base)
1311 {
1312 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1313 if (!ctx->iio)
1314 return;
1315 while (CU8(base) == ATOM_IIO_START) {
1316 ctx->iio[CU8(base + 1)] = base + 2;
1317 base += 2;
1318 while (CU8(base) != ATOM_IIO_END)
1319 base += atom_iio_len[CU8(base)];
1320 base += 3;
1321 }
1322 }
1323
atom_get_vbios_name(struct atom_context * ctx)1324 static void atom_get_vbios_name(struct atom_context *ctx)
1325 {
1326 unsigned char *p_rom;
1327 unsigned char str_num;
1328 unsigned short off_to_vbios_str;
1329 unsigned char *c_ptr;
1330 int name_size;
1331 int i;
1332
1333 const char *na = "--N/A--";
1334 char *back;
1335
1336 p_rom = ctx->bios;
1337
1338 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
1339 if (str_num != 0) {
1340 off_to_vbios_str =
1341 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1342
1343 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
1344 } else {
1345 /* do not know where to find name */
1346 memcpy(ctx->name, na, 7);
1347 ctx->name[7] = 0;
1348 return;
1349 }
1350
1351 /*
1352 * skip the atombios strings, usually 4
1353 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
1354 */
1355 for (i = 0; i < str_num; i++) {
1356 while (*c_ptr != 0)
1357 c_ptr++;
1358 c_ptr++;
1359 }
1360
1361 /* skip the following 2 chars: 0x0D 0x0A */
1362 c_ptr += 2;
1363
1364 name_size = strnlen(c_ptr, STRLEN_LONG - 1);
1365 memcpy(ctx->name, c_ptr, name_size);
1366 back = ctx->name + name_size;
1367 while ((*--back) == ' ')
1368 ;
1369 *(back + 1) = '\0';
1370 }
1371
atom_get_vbios_date(struct atom_context * ctx)1372 static void atom_get_vbios_date(struct atom_context *ctx)
1373 {
1374 unsigned char *p_rom;
1375 unsigned char *date_in_rom;
1376
1377 p_rom = ctx->bios;
1378
1379 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
1380
1381 ctx->date[0] = '2';
1382 ctx->date[1] = '0';
1383 ctx->date[2] = date_in_rom[6];
1384 ctx->date[3] = date_in_rom[7];
1385 ctx->date[4] = '/';
1386 ctx->date[5] = date_in_rom[0];
1387 ctx->date[6] = date_in_rom[1];
1388 ctx->date[7] = '/';
1389 ctx->date[8] = date_in_rom[3];
1390 ctx->date[9] = date_in_rom[4];
1391 ctx->date[10] = ' ';
1392 ctx->date[11] = date_in_rom[9];
1393 ctx->date[12] = date_in_rom[10];
1394 ctx->date[13] = date_in_rom[11];
1395 ctx->date[14] = date_in_rom[12];
1396 ctx->date[15] = date_in_rom[13];
1397 ctx->date[16] = '\0';
1398 }
1399
atom_find_str_in_rom(struct atom_context * ctx,char * str,int start,int end,int maxlen)1400 static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
1401 int end, int maxlen)
1402 {
1403 unsigned long str_off;
1404 unsigned char *p_rom;
1405 unsigned short str_len;
1406
1407 str_off = 0;
1408 str_len = strnlen(str, maxlen);
1409 p_rom = ctx->bios;
1410
1411 for (; start <= end; ++start) {
1412 for (str_off = 0; str_off < str_len; ++str_off) {
1413 if (str[str_off] != *(p_rom + start + str_off))
1414 break;
1415 }
1416
1417 if (str_off == str_len || str[str_off] == 0)
1418 return p_rom + start;
1419 }
1420 return NULL;
1421 }
1422
atom_get_vbios_pn(struct atom_context * ctx)1423 static void atom_get_vbios_pn(struct atom_context *ctx)
1424 {
1425 unsigned char *p_rom;
1426 unsigned short off_to_vbios_str;
1427 unsigned char *vbios_str;
1428 int count;
1429
1430 off_to_vbios_str = 0;
1431 p_rom = ctx->bios;
1432
1433 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
1434 off_to_vbios_str =
1435 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1436
1437 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
1438 } else {
1439 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
1440 }
1441
1442 if (*vbios_str == 0) {
1443 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
1444 if (vbios_str == NULL)
1445 vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
1446 }
1447 if (vbios_str != NULL && *vbios_str == 0)
1448 vbios_str++;
1449
1450 if (vbios_str != NULL) {
1451 count = 0;
1452 while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
1453 vbios_str[count] <= 'z') {
1454 ctx->vbios_pn[count] = vbios_str[count];
1455 count++;
1456 }
1457
1458 ctx->vbios_pn[count] = 0;
1459 }
1460
1461 pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
1462 }
1463
atom_get_vbios_version(struct atom_context * ctx)1464 static void atom_get_vbios_version(struct atom_context *ctx)
1465 {
1466 unsigned short start = 3, end;
1467 unsigned char *vbios_ver;
1468 unsigned char *p_rom;
1469
1470 p_rom = ctx->bios;
1471 /* Search from strings offset if it's present */
1472 start = *(unsigned short *)(p_rom +
1473 OFFSET_TO_GET_ATOMBIOS_STRING_START);
1474
1475 /* Search till atom rom header start point */
1476 end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER);
1477
1478 /* Use hardcoded offsets, if the offsets are not populated */
1479 if (end <= start) {
1480 start = 3;
1481 end = 1024;
1482 }
1483
1484 /* find anchor ATOMBIOSBK-AMD */
1485 vbios_ver =
1486 atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64);
1487 if (vbios_ver != NULL) {
1488 /* skip ATOMBIOSBK-AMD VER */
1489 vbios_ver += 18;
1490 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
1491 } else {
1492 ctx->vbios_ver_str[0] = '\0';
1493 }
1494 }
1495
amdgpu_atom_parse(struct card_info * card,void * bios)1496 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1497 {
1498 int base;
1499 struct atom_context *ctx =
1500 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1501 struct _ATOM_ROM_HEADER *atom_rom_header;
1502 struct _ATOM_MASTER_DATA_TABLE *master_table;
1503 struct _ATOM_FIRMWARE_INFO *atom_fw_info;
1504
1505 if (!ctx)
1506 return NULL;
1507
1508 ctx->card = card;
1509 ctx->bios = bios;
1510
1511 if (CU16(0) != ATOM_BIOS_MAGIC) {
1512 pr_info("Invalid BIOS magic\n");
1513 kfree(ctx);
1514 return NULL;
1515 }
1516 if (strncmp
1517 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1518 strlen(ATOM_ATI_MAGIC))) {
1519 pr_info("Invalid ATI magic\n");
1520 kfree(ctx);
1521 return NULL;
1522 }
1523
1524 base = CU16(ATOM_ROM_TABLE_PTR);
1525 if (strncmp
1526 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1527 strlen(ATOM_ROM_MAGIC))) {
1528 pr_info("Invalid ATOM magic\n");
1529 kfree(ctx);
1530 return NULL;
1531 }
1532
1533 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1534 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1535 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1536 if (!ctx->iio) {
1537 amdgpu_atom_destroy(ctx);
1538 return NULL;
1539 }
1540
1541 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
1542 if (atom_rom_header->usMasterDataTableOffset != 0) {
1543 master_table = (struct _ATOM_MASTER_DATA_TABLE *)
1544 CSTR(atom_rom_header->usMasterDataTableOffset);
1545 if (master_table->ListOfDataTables.FirmwareInfo != 0) {
1546 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
1547 CSTR(master_table->ListOfDataTables.FirmwareInfo);
1548 ctx->version = atom_fw_info->ulFirmwareRevision;
1549 }
1550 }
1551
1552 atom_get_vbios_name(ctx);
1553 atom_get_vbios_pn(ctx);
1554 atom_get_vbios_date(ctx);
1555 atom_get_vbios_version(ctx);
1556
1557 return ctx;
1558 }
1559
amdgpu_atom_asic_init(struct atom_context * ctx)1560 int amdgpu_atom_asic_init(struct atom_context *ctx)
1561 {
1562 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1563 uint32_t ps[16];
1564 int ret;
1565
1566 memset(ps, 0, 64);
1567
1568 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1569 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1570 if (!ps[0] || !ps[1])
1571 return 1;
1572
1573 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1574 return 1;
1575 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
1576 if (ret)
1577 return ret;
1578
1579 memset(ps, 0, 64);
1580
1581 return ret;
1582 }
1583
amdgpu_atom_destroy(struct atom_context * ctx)1584 void amdgpu_atom_destroy(struct atom_context *ctx)
1585 {
1586 kfree(ctx->iio);
1587 kfree(ctx);
1588 }
1589
amdgpu_atom_parse_data_header(struct atom_context * ctx,int index,uint16_t * size,uint8_t * frev,uint8_t * crev,uint16_t * data_start)1590 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1591 uint16_t *size, uint8_t *frev, uint8_t *crev,
1592 uint16_t *data_start)
1593 {
1594 int offset = index * 2 + 4;
1595 int idx = CU16(ctx->data_table + offset);
1596 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1597
1598 if (!mdt[index])
1599 return false;
1600
1601 if (size)
1602 *size = CU16(idx);
1603 if (frev)
1604 *frev = CU8(idx + 2);
1605 if (crev)
1606 *crev = CU8(idx + 3);
1607 *data_start = idx;
1608 return true;
1609 }
1610
amdgpu_atom_parse_cmd_header(struct atom_context * ctx,int index,uint8_t * frev,uint8_t * crev)1611 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1612 uint8_t *crev)
1613 {
1614 int offset = index * 2 + 4;
1615 int idx = CU16(ctx->cmd_table + offset);
1616 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1617
1618 if (!mct[index])
1619 return false;
1620
1621 if (frev)
1622 *frev = CU8(idx + 2);
1623 if (crev)
1624 *crev = CU8(idx + 3);
1625 return true;
1626 }
1627
1628