1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/string_helpers.h>
29
30 #include <linux/unaligned.h>
31
32 #include <drm/drm_device.h>
33 #include <drm/drm_util.h>
34
35 #define ATOM_DEBUG
36
37 #include "atom.h"
38 #include "atom-names.h"
39 #include "atom-bits.h"
40 #include "radeon.h"
41
42 #define ATOM_COND_ABOVE 0
43 #define ATOM_COND_ABOVEOREQUAL 1
44 #define ATOM_COND_ALWAYS 2
45 #define ATOM_COND_BELOW 3
46 #define ATOM_COND_BELOWOREQUAL 4
47 #define ATOM_COND_EQUAL 5
48 #define ATOM_COND_NOTEQUAL 6
49
50 #define ATOM_PORT_ATI 0
51 #define ATOM_PORT_PCI 1
52 #define ATOM_PORT_SYSIO 2
53
54 #define ATOM_UNIT_MICROSEC 0
55 #define ATOM_UNIT_MILLISEC 1
56
57 #define PLL_INDEX 2
58 #define PLL_DATA 3
59
60 typedef struct {
61 struct atom_context *ctx;
62 uint32_t *ps, *ws;
63 int ps_size, ws_size;
64 int ps_shift;
65 uint16_t start;
66 unsigned last_jump;
67 unsigned long last_jump_jiffies;
68 bool abort;
69 } atom_exec_context;
70
71 int atom_debug = 0;
72 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
73 int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
74
75 static uint32_t atom_arg_mask[8] = {
76 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
77 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
78 };
79 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
80
81 static int atom_dst_to_src[8][4] = {
82 /* translate destination alignment field to the source alignment encoding */
83 {0, 0, 0, 0},
84 {1, 2, 3, 0},
85 {1, 2, 3, 0},
86 {1, 2, 3, 0},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 {4, 5, 6, 7},
91 };
92 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
93
94 static int debug_depth = 0;
95 #ifdef ATOM_DEBUG
debug_print_spaces(int n)96 static void debug_print_spaces(int n)
97 {
98 while (n--)
99 printk(" ");
100 }
101
102 #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
103 #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
104 #else
105 #define DEBUG(...) do { } while (0)
106 #define SDEBUG(...) do { } while (0)
107 #endif
108
atom_iio_execute(struct atom_context * ctx,int base,uint32_t index,uint32_t data)109 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
110 uint32_t index, uint32_t data)
111 {
112 struct radeon_device *rdev = ctx->card->dev->dev_private;
113 uint32_t temp = 0xCDCDCDCD;
114
115 while (1)
116 switch (CU8(base)) {
117 case ATOM_IIO_NOP:
118 base++;
119 break;
120 case ATOM_IIO_READ:
121 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
122 base += 3;
123 break;
124 case ATOM_IIO_WRITE:
125 if (rdev->family == CHIP_RV515)
126 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
127 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
128 base += 3;
129 break;
130 case ATOM_IIO_CLEAR:
131 temp &=
132 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
133 CU8(base + 2));
134 base += 3;
135 break;
136 case ATOM_IIO_SET:
137 temp |=
138 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
139 2);
140 base += 3;
141 break;
142 case ATOM_IIO_MOVE_INDEX:
143 temp &=
144 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
145 CU8(base + 3));
146 temp |=
147 ((index >> CU8(base + 2)) &
148 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
149 3);
150 base += 4;
151 break;
152 case ATOM_IIO_MOVE_DATA:
153 temp &=
154 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
155 CU8(base + 3));
156 temp |=
157 ((data >> CU8(base + 2)) &
158 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
159 3);
160 base += 4;
161 break;
162 case ATOM_IIO_MOVE_ATTR:
163 temp &=
164 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
165 CU8(base + 3));
166 temp |=
167 ((ctx->io_attr >> CU8(base + 2)) &
168 (0xFFFFFFFF >> (32 - CU8(base + 1)))) <<
169 CU8(base + 3);
170 base += 4;
171 break;
172 case ATOM_IIO_END:
173 return temp;
174 default:
175 pr_info("Unknown IIO opcode\n");
176 return 0;
177 }
178 }
179
atom_get_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr,uint32_t * saved,int print)180 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
181 int *ptr, uint32_t *saved, int print)
182 {
183 uint32_t idx, val = 0xCDCDCDCD, align, arg;
184 struct atom_context *gctx = ctx->ctx;
185 arg = attr & 7;
186 align = (attr >> 3) & 7;
187 switch (arg) {
188 case ATOM_ARG_REG:
189 idx = U16(*ptr);
190 (*ptr) += 2;
191 if (print)
192 DEBUG("REG[0x%04X]", idx);
193 idx += gctx->reg_block;
194 switch (gctx->io_mode) {
195 case ATOM_IO_MM:
196 val = gctx->card->reg_read(gctx->card, idx);
197 break;
198 case ATOM_IO_PCI:
199 pr_info("PCI registers are not implemented\n");
200 return 0;
201 case ATOM_IO_SYSIO:
202 pr_info("SYSIO registers are not implemented\n");
203 return 0;
204 default:
205 if (!(gctx->io_mode & 0x80)) {
206 pr_info("Bad IO mode\n");
207 return 0;
208 }
209 if (!gctx->iio[gctx->io_mode & 0x7F]) {
210 pr_info("Undefined indirect IO read method %d\n",
211 gctx->io_mode & 0x7F);
212 return 0;
213 }
214 val =
215 atom_iio_execute(gctx,
216 gctx->iio[gctx->io_mode & 0x7F],
217 idx, 0);
218 }
219 break;
220 case ATOM_ARG_PS:
221 idx = U8(*ptr);
222 (*ptr)++;
223 /* get_unaligned_le32 avoids unaligned accesses from atombios
224 * tables, noticed on a DEC Alpha. */
225 if (idx < ctx->ps_size)
226 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
227 else
228 pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
229 if (print)
230 DEBUG("PS[0x%02X,0x%04X]", idx, val);
231 break;
232 case ATOM_ARG_WS:
233 idx = U8(*ptr);
234 (*ptr)++;
235 if (print)
236 DEBUG("WS[0x%02X]", idx);
237 switch (idx) {
238 case ATOM_WS_QUOTIENT:
239 val = gctx->divmul[0];
240 break;
241 case ATOM_WS_REMAINDER:
242 val = gctx->divmul[1];
243 break;
244 case ATOM_WS_DATAPTR:
245 val = gctx->data_block;
246 break;
247 case ATOM_WS_SHIFT:
248 val = gctx->shift;
249 break;
250 case ATOM_WS_OR_MASK:
251 val = 1 << gctx->shift;
252 break;
253 case ATOM_WS_AND_MASK:
254 val = ~(1 << gctx->shift);
255 break;
256 case ATOM_WS_FB_WINDOW:
257 val = gctx->fb_base;
258 break;
259 case ATOM_WS_ATTRIBUTES:
260 val = gctx->io_attr;
261 break;
262 case ATOM_WS_REGPTR:
263 val = gctx->reg_block;
264 break;
265 default:
266 if (idx < ctx->ws_size)
267 val = ctx->ws[idx];
268 else
269 pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
270 }
271 break;
272 case ATOM_ARG_ID:
273 idx = U16(*ptr);
274 (*ptr) += 2;
275 if (print) {
276 if (gctx->data_block)
277 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
278 else
279 DEBUG("ID[0x%04X]", idx);
280 }
281 val = U32(idx + gctx->data_block);
282 break;
283 case ATOM_ARG_FB:
284 idx = U8(*ptr);
285 (*ptr)++;
286 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
287 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
288 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
289 val = 0;
290 } else
291 val = gctx->scratch[(gctx->fb_base / 4) + idx];
292 if (print)
293 DEBUG("FB[0x%02X]", idx);
294 break;
295 case ATOM_ARG_IMM:
296 switch (align) {
297 case ATOM_SRC_DWORD:
298 val = U32(*ptr);
299 (*ptr) += 4;
300 if (print)
301 DEBUG("IMM 0x%08X\n", val);
302 return val;
303 case ATOM_SRC_WORD0:
304 case ATOM_SRC_WORD8:
305 case ATOM_SRC_WORD16:
306 val = U16(*ptr);
307 (*ptr) += 2;
308 if (print)
309 DEBUG("IMM 0x%04X\n", val);
310 return val;
311 case ATOM_SRC_BYTE0:
312 case ATOM_SRC_BYTE8:
313 case ATOM_SRC_BYTE16:
314 case ATOM_SRC_BYTE24:
315 val = U8(*ptr);
316 (*ptr)++;
317 if (print)
318 DEBUG("IMM 0x%02X\n", val);
319 return val;
320 }
321 return 0;
322 case ATOM_ARG_PLL:
323 idx = U8(*ptr);
324 (*ptr)++;
325 if (print)
326 DEBUG("PLL[0x%02X]", idx);
327 val = gctx->card->pll_read(gctx->card, idx);
328 break;
329 case ATOM_ARG_MC:
330 idx = U8(*ptr);
331 (*ptr)++;
332 if (print)
333 DEBUG("MC[0x%02X]", idx);
334 val = gctx->card->mc_read(gctx->card, idx);
335 break;
336 }
337 if (saved)
338 *saved = val;
339 val &= atom_arg_mask[align];
340 val >>= atom_arg_shift[align];
341 if (print)
342 switch (align) {
343 case ATOM_SRC_DWORD:
344 DEBUG(".[31:0] -> 0x%08X\n", val);
345 break;
346 case ATOM_SRC_WORD0:
347 DEBUG(".[15:0] -> 0x%04X\n", val);
348 break;
349 case ATOM_SRC_WORD8:
350 DEBUG(".[23:8] -> 0x%04X\n", val);
351 break;
352 case ATOM_SRC_WORD16:
353 DEBUG(".[31:16] -> 0x%04X\n", val);
354 break;
355 case ATOM_SRC_BYTE0:
356 DEBUG(".[7:0] -> 0x%02X\n", val);
357 break;
358 case ATOM_SRC_BYTE8:
359 DEBUG(".[15:8] -> 0x%02X\n", val);
360 break;
361 case ATOM_SRC_BYTE16:
362 DEBUG(".[23:16] -> 0x%02X\n", val);
363 break;
364 case ATOM_SRC_BYTE24:
365 DEBUG(".[31:24] -> 0x%02X\n", val);
366 break;
367 }
368 return val;
369 }
370
atom_skip_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr)371 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
372 {
373 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
374 switch (arg) {
375 case ATOM_ARG_REG:
376 case ATOM_ARG_ID:
377 (*ptr) += 2;
378 break;
379 case ATOM_ARG_PLL:
380 case ATOM_ARG_MC:
381 case ATOM_ARG_PS:
382 case ATOM_ARG_WS:
383 case ATOM_ARG_FB:
384 (*ptr)++;
385 break;
386 case ATOM_ARG_IMM:
387 switch (align) {
388 case ATOM_SRC_DWORD:
389 (*ptr) += 4;
390 return;
391 case ATOM_SRC_WORD0:
392 case ATOM_SRC_WORD8:
393 case ATOM_SRC_WORD16:
394 (*ptr) += 2;
395 return;
396 case ATOM_SRC_BYTE0:
397 case ATOM_SRC_BYTE8:
398 case ATOM_SRC_BYTE16:
399 case ATOM_SRC_BYTE24:
400 (*ptr)++;
401 return;
402 }
403 return;
404 }
405 }
406
atom_get_src(atom_exec_context * ctx,uint8_t attr,int * ptr)407 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
408 {
409 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
410 }
411
atom_get_src_direct(atom_exec_context * ctx,uint8_t align,int * ptr)412 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
413 {
414 uint32_t val = 0xCDCDCDCD;
415
416 switch (align) {
417 case ATOM_SRC_DWORD:
418 val = U32(*ptr);
419 (*ptr) += 4;
420 break;
421 case ATOM_SRC_WORD0:
422 case ATOM_SRC_WORD8:
423 case ATOM_SRC_WORD16:
424 val = U16(*ptr);
425 (*ptr) += 2;
426 break;
427 case ATOM_SRC_BYTE0:
428 case ATOM_SRC_BYTE8:
429 case ATOM_SRC_BYTE16:
430 case ATOM_SRC_BYTE24:
431 val = U8(*ptr);
432 (*ptr)++;
433 break;
434 }
435 return val;
436 }
437
atom_get_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t * saved,int print)438 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
439 int *ptr, uint32_t *saved, int print)
440 {
441 return atom_get_src_int(ctx,
442 arg | atom_dst_to_src[(attr >> 3) &
443 7][(attr >> 6) & 3] << 3,
444 ptr, saved, print);
445 }
446
atom_skip_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr)447 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
448 {
449 atom_skip_src_int(ctx,
450 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
451 3] << 3, ptr);
452 }
453
atom_put_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t val,uint32_t saved)454 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
455 int *ptr, uint32_t val, uint32_t saved)
456 {
457 uint32_t align =
458 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
459 val, idx;
460 struct atom_context *gctx = ctx->ctx;
461 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
462 val <<= atom_arg_shift[align];
463 val &= atom_arg_mask[align];
464 saved &= ~atom_arg_mask[align];
465 val |= saved;
466 switch (arg) {
467 case ATOM_ARG_REG:
468 idx = U16(*ptr);
469 (*ptr) += 2;
470 DEBUG("REG[0x%04X]", idx);
471 idx += gctx->reg_block;
472 switch (gctx->io_mode) {
473 case ATOM_IO_MM:
474 if (idx == 0)
475 gctx->card->reg_write(gctx->card, idx,
476 val << 2);
477 else
478 gctx->card->reg_write(gctx->card, idx, val);
479 break;
480 case ATOM_IO_PCI:
481 pr_info("PCI registers are not implemented\n");
482 return;
483 case ATOM_IO_SYSIO:
484 pr_info("SYSIO registers are not implemented\n");
485 return;
486 default:
487 if (!(gctx->io_mode & 0x80)) {
488 pr_info("Bad IO mode\n");
489 return;
490 }
491 if (!gctx->iio[gctx->io_mode & 0xFF]) {
492 pr_info("Undefined indirect IO write method %d\n",
493 gctx->io_mode & 0x7F);
494 return;
495 }
496 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
497 idx, val);
498 }
499 break;
500 case ATOM_ARG_PS:
501 idx = U8(*ptr);
502 (*ptr)++;
503 DEBUG("PS[0x%02X]", idx);
504 if (idx >= ctx->ps_size) {
505 pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
506 return;
507 }
508 ctx->ps[idx] = cpu_to_le32(val);
509 break;
510 case ATOM_ARG_WS:
511 idx = U8(*ptr);
512 (*ptr)++;
513 DEBUG("WS[0x%02X]", idx);
514 switch (idx) {
515 case ATOM_WS_QUOTIENT:
516 gctx->divmul[0] = val;
517 break;
518 case ATOM_WS_REMAINDER:
519 gctx->divmul[1] = val;
520 break;
521 case ATOM_WS_DATAPTR:
522 gctx->data_block = val;
523 break;
524 case ATOM_WS_SHIFT:
525 gctx->shift = val;
526 break;
527 case ATOM_WS_OR_MASK:
528 case ATOM_WS_AND_MASK:
529 break;
530 case ATOM_WS_FB_WINDOW:
531 gctx->fb_base = val;
532 break;
533 case ATOM_WS_ATTRIBUTES:
534 gctx->io_attr = val;
535 break;
536 case ATOM_WS_REGPTR:
537 gctx->reg_block = val;
538 break;
539 default:
540 if (idx >= ctx->ws_size) {
541 pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
542 return;
543 }
544 ctx->ws[idx] = val;
545 }
546 break;
547 case ATOM_ARG_FB:
548 idx = U8(*ptr);
549 (*ptr)++;
550 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
551 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
552 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
553 } else
554 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
555 DEBUG("FB[0x%02X]", idx);
556 break;
557 case ATOM_ARG_PLL:
558 idx = U8(*ptr);
559 (*ptr)++;
560 DEBUG("PLL[0x%02X]", idx);
561 gctx->card->pll_write(gctx->card, idx, val);
562 break;
563 case ATOM_ARG_MC:
564 idx = U8(*ptr);
565 (*ptr)++;
566 DEBUG("MC[0x%02X]", idx);
567 gctx->card->mc_write(gctx->card, idx, val);
568 return;
569 }
570 switch (align) {
571 case ATOM_SRC_DWORD:
572 DEBUG(".[31:0] <- 0x%08X\n", old_val);
573 break;
574 case ATOM_SRC_WORD0:
575 DEBUG(".[15:0] <- 0x%04X\n", old_val);
576 break;
577 case ATOM_SRC_WORD8:
578 DEBUG(".[23:8] <- 0x%04X\n", old_val);
579 break;
580 case ATOM_SRC_WORD16:
581 DEBUG(".[31:16] <- 0x%04X\n", old_val);
582 break;
583 case ATOM_SRC_BYTE0:
584 DEBUG(".[7:0] <- 0x%02X\n", old_val);
585 break;
586 case ATOM_SRC_BYTE8:
587 DEBUG(".[15:8] <- 0x%02X\n", old_val);
588 break;
589 case ATOM_SRC_BYTE16:
590 DEBUG(".[23:16] <- 0x%02X\n", old_val);
591 break;
592 case ATOM_SRC_BYTE24:
593 DEBUG(".[31:24] <- 0x%02X\n", old_val);
594 break;
595 }
596 }
597
atom_op_add(atom_exec_context * ctx,int * ptr,int arg)598 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
599 {
600 uint8_t attr = U8((*ptr)++);
601 uint32_t dst, src, saved;
602 int dptr = *ptr;
603 SDEBUG(" dst: ");
604 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
605 SDEBUG(" src: ");
606 src = atom_get_src(ctx, attr, ptr);
607 dst += src;
608 SDEBUG(" dst: ");
609 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
610 }
611
atom_op_and(atom_exec_context * ctx,int * ptr,int arg)612 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
613 {
614 uint8_t attr = U8((*ptr)++);
615 uint32_t dst, src, saved;
616 int dptr = *ptr;
617 SDEBUG(" dst: ");
618 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
619 SDEBUG(" src: ");
620 src = atom_get_src(ctx, attr, ptr);
621 dst &= src;
622 SDEBUG(" dst: ");
623 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
624 }
625
atom_op_beep(atom_exec_context * ctx,int * ptr,int arg)626 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
627 {
628 printk("ATOM BIOS beeped!\n");
629 }
630
atom_op_calltable(atom_exec_context * ctx,int * ptr,int arg)631 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
632 {
633 int idx = U8((*ptr)++);
634 int r = 0;
635
636 if (idx < ATOM_TABLE_NAMES_CNT)
637 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
638 else
639 SDEBUG(" table: %d\n", idx);
640 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
641 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
642 if (r) {
643 ctx->abort = true;
644 }
645 }
646
atom_op_clear(atom_exec_context * ctx,int * ptr,int arg)647 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
648 {
649 uint8_t attr = U8((*ptr)++);
650 uint32_t saved;
651 int dptr = *ptr;
652 attr &= 0x38;
653 attr |= atom_def_dst[attr >> 3] << 6;
654 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
655 SDEBUG(" dst: ");
656 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
657 }
658
atom_op_compare(atom_exec_context * ctx,int * ptr,int arg)659 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
660 {
661 uint8_t attr = U8((*ptr)++);
662 uint32_t dst, src;
663 SDEBUG(" src1: ");
664 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
665 SDEBUG(" src2: ");
666 src = atom_get_src(ctx, attr, ptr);
667 ctx->ctx->cs_equal = (dst == src);
668 ctx->ctx->cs_above = (dst > src);
669 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
670 ctx->ctx->cs_above ? "GT" : "LE");
671 }
672
atom_op_delay(atom_exec_context * ctx,int * ptr,int arg)673 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
674 {
675 unsigned count = U8((*ptr)++);
676 SDEBUG(" count: %d\n", count);
677 if (arg == ATOM_UNIT_MICROSEC)
678 udelay(count);
679 else if (!drm_can_sleep())
680 mdelay(count);
681 else
682 msleep(count);
683 }
684
atom_op_div(atom_exec_context * ctx,int * ptr,int arg)685 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
686 {
687 uint8_t attr = U8((*ptr)++);
688 uint32_t dst, src;
689 SDEBUG(" src1: ");
690 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
691 SDEBUG(" src2: ");
692 src = atom_get_src(ctx, attr, ptr);
693 if (src != 0) {
694 ctx->ctx->divmul[0] = dst / src;
695 ctx->ctx->divmul[1] = dst % src;
696 } else {
697 ctx->ctx->divmul[0] = 0;
698 ctx->ctx->divmul[1] = 0;
699 }
700 }
701
atom_op_eot(atom_exec_context * ctx,int * ptr,int arg)702 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
703 {
704 /* functionally, a nop */
705 }
706
atom_op_jump(atom_exec_context * ctx,int * ptr,int arg)707 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
708 {
709 int execute = 0, target = U16(*ptr);
710 unsigned long cjiffies;
711
712 (*ptr) += 2;
713 switch (arg) {
714 case ATOM_COND_ABOVE:
715 execute = ctx->ctx->cs_above;
716 break;
717 case ATOM_COND_ABOVEOREQUAL:
718 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
719 break;
720 case ATOM_COND_ALWAYS:
721 execute = 1;
722 break;
723 case ATOM_COND_BELOW:
724 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
725 break;
726 case ATOM_COND_BELOWOREQUAL:
727 execute = !ctx->ctx->cs_above;
728 break;
729 case ATOM_COND_EQUAL:
730 execute = ctx->ctx->cs_equal;
731 break;
732 case ATOM_COND_NOTEQUAL:
733 execute = !ctx->ctx->cs_equal;
734 break;
735 }
736 if (arg != ATOM_COND_ALWAYS)
737 SDEBUG(" taken: %s\n", str_yes_no(execute));
738 SDEBUG(" target: 0x%04X\n", target);
739 if (execute) {
740 if (ctx->last_jump == (ctx->start + target)) {
741 cjiffies = jiffies;
742 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
743 cjiffies -= ctx->last_jump_jiffies;
744 if ((jiffies_to_msecs(cjiffies) > 5000)) {
745 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
746 ctx->abort = true;
747 }
748 } else {
749 /* jiffies wrap around we will just wait a little longer */
750 ctx->last_jump_jiffies = jiffies;
751 }
752 } else {
753 ctx->last_jump = ctx->start + target;
754 ctx->last_jump_jiffies = jiffies;
755 }
756 *ptr = ctx->start + target;
757 }
758 }
759
atom_op_mask(atom_exec_context * ctx,int * ptr,int arg)760 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
761 {
762 uint8_t attr = U8((*ptr)++);
763 uint32_t dst, mask, src, saved;
764 int dptr = *ptr;
765 SDEBUG(" dst: ");
766 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
767 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
768 SDEBUG(" mask: 0x%08x", mask);
769 SDEBUG(" src: ");
770 src = atom_get_src(ctx, attr, ptr);
771 dst &= mask;
772 dst |= src;
773 SDEBUG(" dst: ");
774 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
775 }
776
atom_op_move(atom_exec_context * ctx,int * ptr,int arg)777 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
778 {
779 uint8_t attr = U8((*ptr)++);
780 uint32_t src, saved;
781 int dptr = *ptr;
782 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
783 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
784 else {
785 atom_skip_dst(ctx, arg, attr, ptr);
786 saved = 0xCDCDCDCD;
787 }
788 SDEBUG(" src: ");
789 src = atom_get_src(ctx, attr, ptr);
790 SDEBUG(" dst: ");
791 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
792 }
793
atom_op_mul(atom_exec_context * ctx,int * ptr,int arg)794 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
795 {
796 uint8_t attr = U8((*ptr)++);
797 uint32_t dst, src;
798 SDEBUG(" src1: ");
799 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
800 SDEBUG(" src2: ");
801 src = atom_get_src(ctx, attr, ptr);
802 ctx->ctx->divmul[0] = dst * src;
803 }
804
atom_op_nop(atom_exec_context * ctx,int * ptr,int arg)805 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
806 {
807 /* nothing */
808 }
809
atom_op_or(atom_exec_context * ctx,int * ptr,int arg)810 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
811 {
812 uint8_t attr = U8((*ptr)++);
813 uint32_t dst, src, saved;
814 int dptr = *ptr;
815 SDEBUG(" dst: ");
816 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
817 SDEBUG(" src: ");
818 src = atom_get_src(ctx, attr, ptr);
819 dst |= src;
820 SDEBUG(" dst: ");
821 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
822 }
823
atom_op_postcard(atom_exec_context * ctx,int * ptr,int arg)824 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
825 {
826 uint8_t val = U8((*ptr)++);
827 SDEBUG("POST card output: 0x%02X\n", val);
828 }
829
atom_op_repeat(atom_exec_context * ctx,int * ptr,int arg)830 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
831 {
832 pr_info("unimplemented!\n");
833 }
834
atom_op_restorereg(atom_exec_context * ctx,int * ptr,int arg)835 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
836 {
837 pr_info("unimplemented!\n");
838 }
839
atom_op_savereg(atom_exec_context * ctx,int * ptr,int arg)840 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
841 {
842 pr_info("unimplemented!\n");
843 }
844
atom_op_setdatablock(atom_exec_context * ctx,int * ptr,int arg)845 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
846 {
847 int idx = U8(*ptr);
848 (*ptr)++;
849 SDEBUG(" block: %d\n", idx);
850 if (!idx)
851 ctx->ctx->data_block = 0;
852 else if (idx == 255)
853 ctx->ctx->data_block = ctx->start;
854 else
855 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
856 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
857 }
858
atom_op_setfbbase(atom_exec_context * ctx,int * ptr,int arg)859 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
860 {
861 uint8_t attr = U8((*ptr)++);
862 SDEBUG(" fb_base: ");
863 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
864 }
865
atom_op_setport(atom_exec_context * ctx,int * ptr,int arg)866 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
867 {
868 int port;
869 switch (arg) {
870 case ATOM_PORT_ATI:
871 port = U16(*ptr);
872 if (port < ATOM_IO_NAMES_CNT)
873 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
874 else
875 SDEBUG(" port: %d\n", port);
876 if (!port)
877 ctx->ctx->io_mode = ATOM_IO_MM;
878 else
879 ctx->ctx->io_mode = ATOM_IO_IIO | port;
880 (*ptr) += 2;
881 break;
882 case ATOM_PORT_PCI:
883 ctx->ctx->io_mode = ATOM_IO_PCI;
884 (*ptr)++;
885 break;
886 case ATOM_PORT_SYSIO:
887 ctx->ctx->io_mode = ATOM_IO_SYSIO;
888 (*ptr)++;
889 break;
890 }
891 }
892
atom_op_setregblock(atom_exec_context * ctx,int * ptr,int arg)893 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
894 {
895 ctx->ctx->reg_block = U16(*ptr);
896 (*ptr) += 2;
897 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
898 }
899
atom_op_shift_left(atom_exec_context * ctx,int * ptr,int arg)900 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
901 {
902 uint8_t attr = U8((*ptr)++), shift;
903 uint32_t saved, dst;
904 int dptr = *ptr;
905 attr &= 0x38;
906 attr |= atom_def_dst[attr >> 3] << 6;
907 SDEBUG(" dst: ");
908 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
909 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
910 SDEBUG(" shift: %d\n", shift);
911 dst <<= shift;
912 SDEBUG(" dst: ");
913 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
914 }
915
atom_op_shift_right(atom_exec_context * ctx,int * ptr,int arg)916 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
917 {
918 uint8_t attr = U8((*ptr)++), shift;
919 uint32_t saved, dst;
920 int dptr = *ptr;
921 attr &= 0x38;
922 attr |= atom_def_dst[attr >> 3] << 6;
923 SDEBUG(" dst: ");
924 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
925 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
926 SDEBUG(" shift: %d\n", shift);
927 dst >>= shift;
928 SDEBUG(" dst: ");
929 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
930 }
931
atom_op_shl(atom_exec_context * ctx,int * ptr,int arg)932 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
933 {
934 uint8_t attr = U8((*ptr)++), shift;
935 uint32_t saved, dst;
936 int dptr = *ptr;
937 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
938 SDEBUG(" dst: ");
939 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
940 /* op needs to full dst value */
941 dst = saved;
942 shift = atom_get_src(ctx, attr, ptr);
943 SDEBUG(" shift: %d\n", shift);
944 dst <<= shift;
945 dst &= atom_arg_mask[dst_align];
946 dst >>= atom_arg_shift[dst_align];
947 SDEBUG(" dst: ");
948 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
949 }
950
atom_op_shr(atom_exec_context * ctx,int * ptr,int arg)951 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
952 {
953 uint8_t attr = U8((*ptr)++), shift;
954 uint32_t saved, dst;
955 int dptr = *ptr;
956 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
957 SDEBUG(" dst: ");
958 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
959 /* op needs to full dst value */
960 dst = saved;
961 shift = atom_get_src(ctx, attr, ptr);
962 SDEBUG(" shift: %d\n", shift);
963 dst >>= shift;
964 dst &= atom_arg_mask[dst_align];
965 dst >>= atom_arg_shift[dst_align];
966 SDEBUG(" dst: ");
967 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
968 }
969
atom_op_sub(atom_exec_context * ctx,int * ptr,int arg)970 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
971 {
972 uint8_t attr = U8((*ptr)++);
973 uint32_t dst, src, saved;
974 int dptr = *ptr;
975 SDEBUG(" dst: ");
976 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
977 SDEBUG(" src: ");
978 src = atom_get_src(ctx, attr, ptr);
979 dst -= src;
980 SDEBUG(" dst: ");
981 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
982 }
983
atom_op_switch(atom_exec_context * ctx,int * ptr,int arg)984 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
985 {
986 uint8_t attr = U8((*ptr)++);
987 uint32_t src, val, target;
988 SDEBUG(" switch: ");
989 src = atom_get_src(ctx, attr, ptr);
990 while (U16(*ptr) != ATOM_CASE_END)
991 if (U8(*ptr) == ATOM_CASE_MAGIC) {
992 (*ptr)++;
993 SDEBUG(" case: ");
994 val =
995 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
996 ptr);
997 target = U16(*ptr);
998 if (val == src) {
999 SDEBUG(" target: %04X\n", target);
1000 *ptr = ctx->start + target;
1001 return;
1002 }
1003 (*ptr) += 2;
1004 } else {
1005 pr_info("Bad case\n");
1006 return;
1007 }
1008 (*ptr) += 2;
1009 }
1010
atom_op_test(atom_exec_context * ctx,int * ptr,int arg)1011 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1012 {
1013 uint8_t attr = U8((*ptr)++);
1014 uint32_t dst, src;
1015 SDEBUG(" src1: ");
1016 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1017 SDEBUG(" src2: ");
1018 src = atom_get_src(ctx, attr, ptr);
1019 ctx->ctx->cs_equal = ((dst & src) == 0);
1020 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1021 }
1022
atom_op_xor(atom_exec_context * ctx,int * ptr,int arg)1023 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1024 {
1025 uint8_t attr = U8((*ptr)++);
1026 uint32_t dst, src, saved;
1027 int dptr = *ptr;
1028 SDEBUG(" dst: ");
1029 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1030 SDEBUG(" src: ");
1031 src = atom_get_src(ctx, attr, ptr);
1032 dst ^= src;
1033 SDEBUG(" dst: ");
1034 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1035 }
1036
atom_op_debug(atom_exec_context * ctx,int * ptr,int arg)1037 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1038 {
1039 pr_info("unimplemented!\n");
1040 }
1041
1042 static struct {
1043 void (*func) (atom_exec_context *, int *, int);
1044 int arg;
1045 } opcode_table[ATOM_OP_CNT] = {
1046 {
1047 NULL, 0}, {
1048 atom_op_move, ATOM_ARG_REG}, {
1049 atom_op_move, ATOM_ARG_PS}, {
1050 atom_op_move, ATOM_ARG_WS}, {
1051 atom_op_move, ATOM_ARG_FB}, {
1052 atom_op_move, ATOM_ARG_PLL}, {
1053 atom_op_move, ATOM_ARG_MC}, {
1054 atom_op_and, ATOM_ARG_REG}, {
1055 atom_op_and, ATOM_ARG_PS}, {
1056 atom_op_and, ATOM_ARG_WS}, {
1057 atom_op_and, ATOM_ARG_FB}, {
1058 atom_op_and, ATOM_ARG_PLL}, {
1059 atom_op_and, ATOM_ARG_MC}, {
1060 atom_op_or, ATOM_ARG_REG}, {
1061 atom_op_or, ATOM_ARG_PS}, {
1062 atom_op_or, ATOM_ARG_WS}, {
1063 atom_op_or, ATOM_ARG_FB}, {
1064 atom_op_or, ATOM_ARG_PLL}, {
1065 atom_op_or, ATOM_ARG_MC}, {
1066 atom_op_shift_left, ATOM_ARG_REG}, {
1067 atom_op_shift_left, ATOM_ARG_PS}, {
1068 atom_op_shift_left, ATOM_ARG_WS}, {
1069 atom_op_shift_left, ATOM_ARG_FB}, {
1070 atom_op_shift_left, ATOM_ARG_PLL}, {
1071 atom_op_shift_left, ATOM_ARG_MC}, {
1072 atom_op_shift_right, ATOM_ARG_REG}, {
1073 atom_op_shift_right, ATOM_ARG_PS}, {
1074 atom_op_shift_right, ATOM_ARG_WS}, {
1075 atom_op_shift_right, ATOM_ARG_FB}, {
1076 atom_op_shift_right, ATOM_ARG_PLL}, {
1077 atom_op_shift_right, ATOM_ARG_MC}, {
1078 atom_op_mul, ATOM_ARG_REG}, {
1079 atom_op_mul, ATOM_ARG_PS}, {
1080 atom_op_mul, ATOM_ARG_WS}, {
1081 atom_op_mul, ATOM_ARG_FB}, {
1082 atom_op_mul, ATOM_ARG_PLL}, {
1083 atom_op_mul, ATOM_ARG_MC}, {
1084 atom_op_div, ATOM_ARG_REG}, {
1085 atom_op_div, ATOM_ARG_PS}, {
1086 atom_op_div, ATOM_ARG_WS}, {
1087 atom_op_div, ATOM_ARG_FB}, {
1088 atom_op_div, ATOM_ARG_PLL}, {
1089 atom_op_div, ATOM_ARG_MC}, {
1090 atom_op_add, ATOM_ARG_REG}, {
1091 atom_op_add, ATOM_ARG_PS}, {
1092 atom_op_add, ATOM_ARG_WS}, {
1093 atom_op_add, ATOM_ARG_FB}, {
1094 atom_op_add, ATOM_ARG_PLL}, {
1095 atom_op_add, ATOM_ARG_MC}, {
1096 atom_op_sub, ATOM_ARG_REG}, {
1097 atom_op_sub, ATOM_ARG_PS}, {
1098 atom_op_sub, ATOM_ARG_WS}, {
1099 atom_op_sub, ATOM_ARG_FB}, {
1100 atom_op_sub, ATOM_ARG_PLL}, {
1101 atom_op_sub, ATOM_ARG_MC}, {
1102 atom_op_setport, ATOM_PORT_ATI}, {
1103 atom_op_setport, ATOM_PORT_PCI}, {
1104 atom_op_setport, ATOM_PORT_SYSIO}, {
1105 atom_op_setregblock, 0}, {
1106 atom_op_setfbbase, 0}, {
1107 atom_op_compare, ATOM_ARG_REG}, {
1108 atom_op_compare, ATOM_ARG_PS}, {
1109 atom_op_compare, ATOM_ARG_WS}, {
1110 atom_op_compare, ATOM_ARG_FB}, {
1111 atom_op_compare, ATOM_ARG_PLL}, {
1112 atom_op_compare, ATOM_ARG_MC}, {
1113 atom_op_switch, 0}, {
1114 atom_op_jump, ATOM_COND_ALWAYS}, {
1115 atom_op_jump, ATOM_COND_EQUAL}, {
1116 atom_op_jump, ATOM_COND_BELOW}, {
1117 atom_op_jump, ATOM_COND_ABOVE}, {
1118 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1119 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1120 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1121 atom_op_test, ATOM_ARG_REG}, {
1122 atom_op_test, ATOM_ARG_PS}, {
1123 atom_op_test, ATOM_ARG_WS}, {
1124 atom_op_test, ATOM_ARG_FB}, {
1125 atom_op_test, ATOM_ARG_PLL}, {
1126 atom_op_test, ATOM_ARG_MC}, {
1127 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1128 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1129 atom_op_calltable, 0}, {
1130 atom_op_repeat, 0}, {
1131 atom_op_clear, ATOM_ARG_REG}, {
1132 atom_op_clear, ATOM_ARG_PS}, {
1133 atom_op_clear, ATOM_ARG_WS}, {
1134 atom_op_clear, ATOM_ARG_FB}, {
1135 atom_op_clear, ATOM_ARG_PLL}, {
1136 atom_op_clear, ATOM_ARG_MC}, {
1137 atom_op_nop, 0}, {
1138 atom_op_eot, 0}, {
1139 atom_op_mask, ATOM_ARG_REG}, {
1140 atom_op_mask, ATOM_ARG_PS}, {
1141 atom_op_mask, ATOM_ARG_WS}, {
1142 atom_op_mask, ATOM_ARG_FB}, {
1143 atom_op_mask, ATOM_ARG_PLL}, {
1144 atom_op_mask, ATOM_ARG_MC}, {
1145 atom_op_postcard, 0}, {
1146 atom_op_beep, 0}, {
1147 atom_op_savereg, 0}, {
1148 atom_op_restorereg, 0}, {
1149 atom_op_setdatablock, 0}, {
1150 atom_op_xor, ATOM_ARG_REG}, {
1151 atom_op_xor, ATOM_ARG_PS}, {
1152 atom_op_xor, ATOM_ARG_WS}, {
1153 atom_op_xor, ATOM_ARG_FB}, {
1154 atom_op_xor, ATOM_ARG_PLL}, {
1155 atom_op_xor, ATOM_ARG_MC}, {
1156 atom_op_shl, ATOM_ARG_REG}, {
1157 atom_op_shl, ATOM_ARG_PS}, {
1158 atom_op_shl, ATOM_ARG_WS}, {
1159 atom_op_shl, ATOM_ARG_FB}, {
1160 atom_op_shl, ATOM_ARG_PLL}, {
1161 atom_op_shl, ATOM_ARG_MC}, {
1162 atom_op_shr, ATOM_ARG_REG}, {
1163 atom_op_shr, ATOM_ARG_PS}, {
1164 atom_op_shr, ATOM_ARG_WS}, {
1165 atom_op_shr, ATOM_ARG_FB}, {
1166 atom_op_shr, ATOM_ARG_PLL}, {
1167 atom_op_shr, ATOM_ARG_MC}, {
1168 atom_op_debug, 0},};
1169
atom_execute_table_locked(struct atom_context * ctx,int index,uint32_t * params,int params_size)1170 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1171 {
1172 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1173 int len, ws, ps, ptr;
1174 unsigned char op;
1175 atom_exec_context ectx;
1176 int ret = 0;
1177
1178 if (!base)
1179 return -EINVAL;
1180
1181 len = CU16(base + ATOM_CT_SIZE_PTR);
1182 ws = CU8(base + ATOM_CT_WS_PTR);
1183 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1184 ptr = base + ATOM_CT_CODE_PTR;
1185
1186 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1187
1188 ectx.ctx = ctx;
1189 ectx.ps_shift = ps / 4;
1190 ectx.start = base;
1191 ectx.ps = params;
1192 ectx.ps_size = params_size;
1193 ectx.abort = false;
1194 ectx.last_jump = 0;
1195 if (ws) {
1196 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1197 ectx.ws_size = ws;
1198 } else {
1199 ectx.ws = NULL;
1200 ectx.ws_size = 0;
1201 }
1202
1203 debug_depth++;
1204 while (1) {
1205 op = CU8(ptr++);
1206 if (op < ATOM_OP_NAMES_CNT)
1207 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1208 else
1209 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1210 if (ectx.abort) {
1211 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1212 base, len, ws, ps, ptr - 1);
1213 ret = -EINVAL;
1214 goto free;
1215 }
1216
1217 if (op < ATOM_OP_CNT && op > 0)
1218 opcode_table[op].func(&ectx, &ptr,
1219 opcode_table[op].arg);
1220 else
1221 break;
1222
1223 if (op == ATOM_OP_EOT)
1224 break;
1225 }
1226 debug_depth--;
1227 SDEBUG("<<\n");
1228
1229 free:
1230 kfree(ectx.ws);
1231 return ret;
1232 }
1233
atom_execute_table_scratch_unlocked(struct atom_context * ctx,int index,uint32_t * params,int params_size)1234 int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1235 {
1236 int r;
1237
1238 mutex_lock(&ctx->mutex);
1239 /* reset data block */
1240 ctx->data_block = 0;
1241 /* reset reg block */
1242 ctx->reg_block = 0;
1243 /* reset fb window */
1244 ctx->fb_base = 0;
1245 /* reset io mode */
1246 ctx->io_mode = ATOM_IO_MM;
1247 /* reset divmul */
1248 ctx->divmul[0] = 0;
1249 ctx->divmul[1] = 0;
1250 r = atom_execute_table_locked(ctx, index, params, params_size);
1251 mutex_unlock(&ctx->mutex);
1252 return r;
1253 }
1254
atom_execute_table(struct atom_context * ctx,int index,uint32_t * params,int params_size)1255 int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
1256 {
1257 int r;
1258 mutex_lock(&ctx->scratch_mutex);
1259 r = atom_execute_table_scratch_unlocked(ctx, index, params, params_size);
1260 mutex_unlock(&ctx->scratch_mutex);
1261 return r;
1262 }
1263
1264 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1265
atom_index_iio(struct atom_context * ctx,int base)1266 static void atom_index_iio(struct atom_context *ctx, int base)
1267 {
1268 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1269 if (!ctx->iio)
1270 return;
1271 while (CU8(base) == ATOM_IIO_START) {
1272 ctx->iio[CU8(base + 1)] = base + 2;
1273 base += 2;
1274 while (CU8(base) != ATOM_IIO_END)
1275 base += atom_iio_len[CU8(base)];
1276 base += 3;
1277 }
1278 }
1279
atom_parse(struct card_info * card,void * bios)1280 struct atom_context *atom_parse(struct card_info *card, void *bios)
1281 {
1282 int base;
1283 struct atom_context *ctx =
1284 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1285 char *str;
1286 char name[512];
1287 int i;
1288
1289 if (!ctx)
1290 return NULL;
1291
1292 ctx->card = card;
1293 ctx->bios = bios;
1294
1295 if (CU16(0) != ATOM_BIOS_MAGIC) {
1296 pr_info("Invalid BIOS magic\n");
1297 kfree(ctx);
1298 return NULL;
1299 }
1300 if (strncmp
1301 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1302 strlen(ATOM_ATI_MAGIC))) {
1303 pr_info("Invalid ATI magic\n");
1304 kfree(ctx);
1305 return NULL;
1306 }
1307
1308 base = CU16(ATOM_ROM_TABLE_PTR);
1309 if (strncmp
1310 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1311 strlen(ATOM_ROM_MAGIC))) {
1312 pr_info("Invalid ATOM magic\n");
1313 kfree(ctx);
1314 return NULL;
1315 }
1316
1317 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1318 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1319 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1320 if (!ctx->iio) {
1321 atom_destroy(ctx);
1322 return NULL;
1323 }
1324
1325 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1326 while (*str && ((*str == '\n') || (*str == '\r')))
1327 str++;
1328 /* name string isn't always 0 terminated */
1329 for (i = 0; i < 511; i++) {
1330 name[i] = str[i];
1331 if (name[i] < '.' || name[i] > 'z') {
1332 name[i] = 0;
1333 break;
1334 }
1335 }
1336 pr_info("ATOM BIOS: %s\n", name);
1337
1338 return ctx;
1339 }
1340
atom_asic_init(struct atom_context * ctx)1341 int atom_asic_init(struct atom_context *ctx)
1342 {
1343 struct radeon_device *rdev = ctx->card->dev->dev_private;
1344 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1345 uint32_t ps[16];
1346 int ret;
1347
1348 memset(ps, 0, 64);
1349
1350 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1351 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1352 if (!ps[0] || !ps[1])
1353 return 1;
1354
1355 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1356 return 1;
1357 ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
1358 if (ret)
1359 return ret;
1360
1361 memset(ps, 0, 64);
1362
1363 if (rdev->family < CHIP_R600) {
1364 if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
1365 atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps, 16);
1366 }
1367 return ret;
1368 }
1369
atom_destroy(struct atom_context * ctx)1370 void atom_destroy(struct atom_context *ctx)
1371 {
1372 kfree(ctx->iio);
1373 kfree(ctx);
1374 }
1375
atom_parse_data_header(struct atom_context * ctx,int index,uint16_t * size,uint8_t * frev,uint8_t * crev,uint16_t * data_start)1376 bool atom_parse_data_header(struct atom_context *ctx, int index,
1377 uint16_t *size, uint8_t *frev, uint8_t *crev,
1378 uint16_t *data_start)
1379 {
1380 int offset = index * 2 + 4;
1381 int idx = CU16(ctx->data_table + offset);
1382 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1383
1384 if (!mdt[index])
1385 return false;
1386
1387 if (size)
1388 *size = CU16(idx);
1389 if (frev)
1390 *frev = CU8(idx + 2);
1391 if (crev)
1392 *crev = CU8(idx + 3);
1393 *data_start = idx;
1394 return true;
1395 }
1396
atom_parse_cmd_header(struct atom_context * ctx,int index,uint8_t * frev,uint8_t * crev)1397 bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1398 uint8_t *crev)
1399 {
1400 int offset = index * 2 + 4;
1401 int idx = CU16(ctx->cmd_table + offset);
1402 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1403
1404 if (!mct[index])
1405 return false;
1406
1407 if (frev)
1408 *frev = CU8(idx + 2);
1409 if (crev)
1410 *crev = CU8(idx + 3);
1411 return true;
1412 }
1413
atom_allocate_fb_scratch(struct atom_context * ctx)1414 int atom_allocate_fb_scratch(struct atom_context *ctx)
1415 {
1416 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1417 uint16_t data_offset;
1418 int usage_bytes = 0;
1419 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1420
1421 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1422 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1423
1424 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1425 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1426 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1427
1428 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1429 }
1430 ctx->scratch_size_bytes = 0;
1431 if (usage_bytes == 0)
1432 usage_bytes = 20 * 1024;
1433 /* allocate some scratch memory */
1434 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1435 if (!ctx->scratch)
1436 return -ENOMEM;
1437 ctx->scratch_size_bytes = usage_bytes;
1438 return 0;
1439 }
1440