xref: /linux/drivers/gpu/drm/amd/amdgpu/atom.c (revision 8dcbc611f0fcbcc196dc96e0f833181df0d80242)
1  /*
2   * Copyright 2008 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   * Author: Stanislaw Skowronek
23   */
24  
25  #include <linux/module.h>
26  #include <linux/sched.h>
27  #include <linux/slab.h>
28  #include <asm/unaligned.h>
29  
30  #include <drm/drm_util.h>
31  
32  #define ATOM_DEBUG
33  
34  #include "atom.h"
35  #include "atom-names.h"
36  #include "atom-bits.h"
37  #include "amdgpu.h"
38  
39  #define ATOM_COND_ABOVE		0
40  #define ATOM_COND_ABOVEOREQUAL	1
41  #define ATOM_COND_ALWAYS	2
42  #define ATOM_COND_BELOW		3
43  #define ATOM_COND_BELOWOREQUAL	4
44  #define ATOM_COND_EQUAL		5
45  #define ATOM_COND_NOTEQUAL	6
46  
47  #define ATOM_PORT_ATI	0
48  #define ATOM_PORT_PCI	1
49  #define ATOM_PORT_SYSIO	2
50  
51  #define ATOM_UNIT_MICROSEC	0
52  #define ATOM_UNIT_MILLISEC	1
53  
54  #define PLL_INDEX	2
55  #define PLL_DATA	3
56  
57  #define ATOM_CMD_TIMEOUT_SEC	20
58  
59  typedef struct {
60  	struct atom_context *ctx;
61  	uint32_t *ps, *ws;
62  	int ps_shift;
63  	uint16_t start;
64  	unsigned last_jump;
65  	unsigned long last_jump_jiffies;
66  	bool abort;
67  } atom_exec_context;
68  
69  int amdgpu_atom_debug;
70  static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
71  int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
72  
73  static uint32_t atom_arg_mask[8] =
74  	{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
75  	  0xFF000000 };
76  static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
77  
78  static int atom_dst_to_src[8][4] = {
79  	/* translate destination alignment field to the source alignment encoding */
80  	{0, 0, 0, 0},
81  	{1, 2, 3, 0},
82  	{1, 2, 3, 0},
83  	{1, 2, 3, 0},
84  	{4, 5, 6, 7},
85  	{4, 5, 6, 7},
86  	{4, 5, 6, 7},
87  	{4, 5, 6, 7},
88  };
89  static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
90  
91  static int debug_depth;
92  #ifdef ATOM_DEBUG
93  static void debug_print_spaces(int n)
94  {
95  	while (n--)
96  		printk("   ");
97  }
98  
99  #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
100  #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
101  #else
102  #define DEBUG(...) do { } while (0)
103  #define SDEBUG(...) do { } while (0)
104  #endif
105  
106  static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
107  				 uint32_t index, uint32_t data)
108  {
109  	uint32_t temp = 0xCDCDCDCD;
110  
111  	while (1)
112  		switch (CU8(base)) {
113  		case ATOM_IIO_NOP:
114  			base++;
115  			break;
116  		case ATOM_IIO_READ:
117  			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
118  			base += 3;
119  			break;
120  		case ATOM_IIO_WRITE:
121  			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
122  			base += 3;
123  			break;
124  		case ATOM_IIO_CLEAR:
125  			temp &=
126  			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
127  			      CU8(base + 2));
128  			base += 3;
129  			break;
130  		case ATOM_IIO_SET:
131  			temp |=
132  			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
133  									2);
134  			base += 3;
135  			break;
136  		case ATOM_IIO_MOVE_INDEX:
137  			temp &=
138  			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
139  			      CU8(base + 3));
140  			temp |=
141  			    ((index >> CU8(base + 2)) &
142  			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
143  									  3);
144  			base += 4;
145  			break;
146  		case ATOM_IIO_MOVE_DATA:
147  			temp &=
148  			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
149  			      CU8(base + 3));
150  			temp |=
151  			    ((data >> CU8(base + 2)) &
152  			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
153  									  3);
154  			base += 4;
155  			break;
156  		case ATOM_IIO_MOVE_ATTR:
157  			temp &=
158  			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
159  			      CU8(base + 3));
160  			temp |=
161  			    ((ctx->
162  			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
163  									  CU8
164  									  (base
165  									   +
166  									   1))))
167  			    << CU8(base + 3);
168  			base += 4;
169  			break;
170  		case ATOM_IIO_END:
171  			return temp;
172  		default:
173  			pr_info("Unknown IIO opcode\n");
174  			return 0;
175  		}
176  }
177  
178  static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
179  				 int *ptr, uint32_t *saved, int print)
180  {
181  	uint32_t idx, val = 0xCDCDCDCD, align, arg;
182  	struct atom_context *gctx = ctx->ctx;
183  	arg = attr & 7;
184  	align = (attr >> 3) & 7;
185  	switch (arg) {
186  	case ATOM_ARG_REG:
187  		idx = U16(*ptr);
188  		(*ptr) += 2;
189  		if (print)
190  			DEBUG("REG[0x%04X]", idx);
191  		idx += gctx->reg_block;
192  		switch (gctx->io_mode) {
193  		case ATOM_IO_MM:
194  			val = gctx->card->reg_read(gctx->card, idx);
195  			break;
196  		case ATOM_IO_PCI:
197  			pr_info("PCI registers are not implemented\n");
198  			return 0;
199  		case ATOM_IO_SYSIO:
200  			pr_info("SYSIO registers are not implemented\n");
201  			return 0;
202  		default:
203  			if (!(gctx->io_mode & 0x80)) {
204  				pr_info("Bad IO mode\n");
205  				return 0;
206  			}
207  			if (!gctx->iio[gctx->io_mode & 0x7F]) {
208  				pr_info("Undefined indirect IO read method %d\n",
209  					gctx->io_mode & 0x7F);
210  				return 0;
211  			}
212  			val =
213  			    atom_iio_execute(gctx,
214  					     gctx->iio[gctx->io_mode & 0x7F],
215  					     idx, 0);
216  		}
217  		break;
218  	case ATOM_ARG_PS:
219  		idx = U8(*ptr);
220  		(*ptr)++;
221  		/* get_unaligned_le32 avoids unaligned accesses from atombios
222  		 * tables, noticed on a DEC Alpha. */
223  		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
224  		if (print)
225  			DEBUG("PS[0x%02X,0x%04X]", idx, val);
226  		break;
227  	case ATOM_ARG_WS:
228  		idx = U8(*ptr);
229  		(*ptr)++;
230  		if (print)
231  			DEBUG("WS[0x%02X]", idx);
232  		switch (idx) {
233  		case ATOM_WS_QUOTIENT:
234  			val = gctx->divmul[0];
235  			break;
236  		case ATOM_WS_REMAINDER:
237  			val = gctx->divmul[1];
238  			break;
239  		case ATOM_WS_DATAPTR:
240  			val = gctx->data_block;
241  			break;
242  		case ATOM_WS_SHIFT:
243  			val = gctx->shift;
244  			break;
245  		case ATOM_WS_OR_MASK:
246  			val = 1 << gctx->shift;
247  			break;
248  		case ATOM_WS_AND_MASK:
249  			val = ~(1 << gctx->shift);
250  			break;
251  		case ATOM_WS_FB_WINDOW:
252  			val = gctx->fb_base;
253  			break;
254  		case ATOM_WS_ATTRIBUTES:
255  			val = gctx->io_attr;
256  			break;
257  		case ATOM_WS_REGPTR:
258  			val = gctx->reg_block;
259  			break;
260  		default:
261  			val = ctx->ws[idx];
262  		}
263  		break;
264  	case ATOM_ARG_ID:
265  		idx = U16(*ptr);
266  		(*ptr) += 2;
267  		if (print) {
268  			if (gctx->data_block)
269  				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
270  			else
271  				DEBUG("ID[0x%04X]", idx);
272  		}
273  		val = U32(idx + gctx->data_block);
274  		break;
275  	case ATOM_ARG_FB:
276  		idx = U8(*ptr);
277  		(*ptr)++;
278  		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
279  			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
280  				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
281  			val = 0;
282  		} else
283  			val = gctx->scratch[(gctx->fb_base / 4) + idx];
284  		if (print)
285  			DEBUG("FB[0x%02X]", idx);
286  		break;
287  	case ATOM_ARG_IMM:
288  		switch (align) {
289  		case ATOM_SRC_DWORD:
290  			val = U32(*ptr);
291  			(*ptr) += 4;
292  			if (print)
293  				DEBUG("IMM 0x%08X\n", val);
294  			return val;
295  		case ATOM_SRC_WORD0:
296  		case ATOM_SRC_WORD8:
297  		case ATOM_SRC_WORD16:
298  			val = U16(*ptr);
299  			(*ptr) += 2;
300  			if (print)
301  				DEBUG("IMM 0x%04X\n", val);
302  			return val;
303  		case ATOM_SRC_BYTE0:
304  		case ATOM_SRC_BYTE8:
305  		case ATOM_SRC_BYTE16:
306  		case ATOM_SRC_BYTE24:
307  			val = U8(*ptr);
308  			(*ptr)++;
309  			if (print)
310  				DEBUG("IMM 0x%02X\n", val);
311  			return val;
312  		}
313  		return 0;
314  	case ATOM_ARG_PLL:
315  		idx = U8(*ptr);
316  		(*ptr)++;
317  		if (print)
318  			DEBUG("PLL[0x%02X]", idx);
319  		val = gctx->card->pll_read(gctx->card, idx);
320  		break;
321  	case ATOM_ARG_MC:
322  		idx = U8(*ptr);
323  		(*ptr)++;
324  		if (print)
325  			DEBUG("MC[0x%02X]", idx);
326  		val = gctx->card->mc_read(gctx->card, idx);
327  		break;
328  	}
329  	if (saved)
330  		*saved = val;
331  	val &= atom_arg_mask[align];
332  	val >>= atom_arg_shift[align];
333  	if (print)
334  		switch (align) {
335  		case ATOM_SRC_DWORD:
336  			DEBUG(".[31:0] -> 0x%08X\n", val);
337  			break;
338  		case ATOM_SRC_WORD0:
339  			DEBUG(".[15:0] -> 0x%04X\n", val);
340  			break;
341  		case ATOM_SRC_WORD8:
342  			DEBUG(".[23:8] -> 0x%04X\n", val);
343  			break;
344  		case ATOM_SRC_WORD16:
345  			DEBUG(".[31:16] -> 0x%04X\n", val);
346  			break;
347  		case ATOM_SRC_BYTE0:
348  			DEBUG(".[7:0] -> 0x%02X\n", val);
349  			break;
350  		case ATOM_SRC_BYTE8:
351  			DEBUG(".[15:8] -> 0x%02X\n", val);
352  			break;
353  		case ATOM_SRC_BYTE16:
354  			DEBUG(".[23:16] -> 0x%02X\n", val);
355  			break;
356  		case ATOM_SRC_BYTE24:
357  			DEBUG(".[31:24] -> 0x%02X\n", val);
358  			break;
359  		}
360  	return val;
361  }
362  
363  static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
364  {
365  	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
366  	switch (arg) {
367  	case ATOM_ARG_REG:
368  	case ATOM_ARG_ID:
369  		(*ptr) += 2;
370  		break;
371  	case ATOM_ARG_PLL:
372  	case ATOM_ARG_MC:
373  	case ATOM_ARG_PS:
374  	case ATOM_ARG_WS:
375  	case ATOM_ARG_FB:
376  		(*ptr)++;
377  		break;
378  	case ATOM_ARG_IMM:
379  		switch (align) {
380  		case ATOM_SRC_DWORD:
381  			(*ptr) += 4;
382  			return;
383  		case ATOM_SRC_WORD0:
384  		case ATOM_SRC_WORD8:
385  		case ATOM_SRC_WORD16:
386  			(*ptr) += 2;
387  			return;
388  		case ATOM_SRC_BYTE0:
389  		case ATOM_SRC_BYTE8:
390  		case ATOM_SRC_BYTE16:
391  		case ATOM_SRC_BYTE24:
392  			(*ptr)++;
393  			return;
394  		}
395  		return;
396  	}
397  }
398  
399  static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
400  {
401  	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
402  }
403  
404  static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
405  {
406  	uint32_t val = 0xCDCDCDCD;
407  
408  	switch (align) {
409  	case ATOM_SRC_DWORD:
410  		val = U32(*ptr);
411  		(*ptr) += 4;
412  		break;
413  	case ATOM_SRC_WORD0:
414  	case ATOM_SRC_WORD8:
415  	case ATOM_SRC_WORD16:
416  		val = U16(*ptr);
417  		(*ptr) += 2;
418  		break;
419  	case ATOM_SRC_BYTE0:
420  	case ATOM_SRC_BYTE8:
421  	case ATOM_SRC_BYTE16:
422  	case ATOM_SRC_BYTE24:
423  		val = U8(*ptr);
424  		(*ptr)++;
425  		break;
426  	}
427  	return val;
428  }
429  
430  static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
431  			     int *ptr, uint32_t *saved, int print)
432  {
433  	return atom_get_src_int(ctx,
434  				arg | atom_dst_to_src[(attr >> 3) &
435  						      7][(attr >> 6) & 3] << 3,
436  				ptr, saved, print);
437  }
438  
439  static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
440  {
441  	atom_skip_src_int(ctx,
442  			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
443  								 3] << 3, ptr);
444  }
445  
446  static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
447  			 int *ptr, uint32_t val, uint32_t saved)
448  {
449  	uint32_t align =
450  	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
451  	    val, idx;
452  	struct atom_context *gctx = ctx->ctx;
453  	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
454  	val <<= atom_arg_shift[align];
455  	val &= atom_arg_mask[align];
456  	saved &= ~atom_arg_mask[align];
457  	val |= saved;
458  	switch (arg) {
459  	case ATOM_ARG_REG:
460  		idx = U16(*ptr);
461  		(*ptr) += 2;
462  		DEBUG("REG[0x%04X]", idx);
463  		idx += gctx->reg_block;
464  		switch (gctx->io_mode) {
465  		case ATOM_IO_MM:
466  			if (idx == 0)
467  				gctx->card->reg_write(gctx->card, idx,
468  						      val << 2);
469  			else
470  				gctx->card->reg_write(gctx->card, idx, val);
471  			break;
472  		case ATOM_IO_PCI:
473  			pr_info("PCI registers are not implemented\n");
474  			return;
475  		case ATOM_IO_SYSIO:
476  			pr_info("SYSIO registers are not implemented\n");
477  			return;
478  		default:
479  			if (!(gctx->io_mode & 0x80)) {
480  				pr_info("Bad IO mode\n");
481  				return;
482  			}
483  			if (!gctx->iio[gctx->io_mode & 0xFF]) {
484  				pr_info("Undefined indirect IO write method %d\n",
485  					gctx->io_mode & 0x7F);
486  				return;
487  			}
488  			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
489  					 idx, val);
490  		}
491  		break;
492  	case ATOM_ARG_PS:
493  		idx = U8(*ptr);
494  		(*ptr)++;
495  		DEBUG("PS[0x%02X]", idx);
496  		ctx->ps[idx] = cpu_to_le32(val);
497  		break;
498  	case ATOM_ARG_WS:
499  		idx = U8(*ptr);
500  		(*ptr)++;
501  		DEBUG("WS[0x%02X]", idx);
502  		switch (idx) {
503  		case ATOM_WS_QUOTIENT:
504  			gctx->divmul[0] = val;
505  			break;
506  		case ATOM_WS_REMAINDER:
507  			gctx->divmul[1] = val;
508  			break;
509  		case ATOM_WS_DATAPTR:
510  			gctx->data_block = val;
511  			break;
512  		case ATOM_WS_SHIFT:
513  			gctx->shift = val;
514  			break;
515  		case ATOM_WS_OR_MASK:
516  		case ATOM_WS_AND_MASK:
517  			break;
518  		case ATOM_WS_FB_WINDOW:
519  			gctx->fb_base = val;
520  			break;
521  		case ATOM_WS_ATTRIBUTES:
522  			gctx->io_attr = val;
523  			break;
524  		case ATOM_WS_REGPTR:
525  			gctx->reg_block = val;
526  			break;
527  		default:
528  			ctx->ws[idx] = val;
529  		}
530  		break;
531  	case ATOM_ARG_FB:
532  		idx = U8(*ptr);
533  		(*ptr)++;
534  		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
535  			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
536  				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
537  		} else
538  			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
539  		DEBUG("FB[0x%02X]", idx);
540  		break;
541  	case ATOM_ARG_PLL:
542  		idx = U8(*ptr);
543  		(*ptr)++;
544  		DEBUG("PLL[0x%02X]", idx);
545  		gctx->card->pll_write(gctx->card, idx, val);
546  		break;
547  	case ATOM_ARG_MC:
548  		idx = U8(*ptr);
549  		(*ptr)++;
550  		DEBUG("MC[0x%02X]", idx);
551  		gctx->card->mc_write(gctx->card, idx, val);
552  		return;
553  	}
554  	switch (align) {
555  	case ATOM_SRC_DWORD:
556  		DEBUG(".[31:0] <- 0x%08X\n", old_val);
557  		break;
558  	case ATOM_SRC_WORD0:
559  		DEBUG(".[15:0] <- 0x%04X\n", old_val);
560  		break;
561  	case ATOM_SRC_WORD8:
562  		DEBUG(".[23:8] <- 0x%04X\n", old_val);
563  		break;
564  	case ATOM_SRC_WORD16:
565  		DEBUG(".[31:16] <- 0x%04X\n", old_val);
566  		break;
567  	case ATOM_SRC_BYTE0:
568  		DEBUG(".[7:0] <- 0x%02X\n", old_val);
569  		break;
570  	case ATOM_SRC_BYTE8:
571  		DEBUG(".[15:8] <- 0x%02X\n", old_val);
572  		break;
573  	case ATOM_SRC_BYTE16:
574  		DEBUG(".[23:16] <- 0x%02X\n", old_val);
575  		break;
576  	case ATOM_SRC_BYTE24:
577  		DEBUG(".[31:24] <- 0x%02X\n", old_val);
578  		break;
579  	}
580  }
581  
582  static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
583  {
584  	uint8_t attr = U8((*ptr)++);
585  	uint32_t dst, src, saved;
586  	int dptr = *ptr;
587  	SDEBUG("   dst: ");
588  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
589  	SDEBUG("   src: ");
590  	src = atom_get_src(ctx, attr, ptr);
591  	dst += src;
592  	SDEBUG("   dst: ");
593  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
594  }
595  
596  static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
597  {
598  	uint8_t attr = U8((*ptr)++);
599  	uint32_t dst, src, saved;
600  	int dptr = *ptr;
601  	SDEBUG("   dst: ");
602  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
603  	SDEBUG("   src: ");
604  	src = atom_get_src(ctx, attr, ptr);
605  	dst &= src;
606  	SDEBUG("   dst: ");
607  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
608  }
609  
610  static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
611  {
612  	printk("ATOM BIOS beeped!\n");
613  }
614  
615  static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
616  {
617  	int idx = U8((*ptr)++);
618  	int r = 0;
619  
620  	if (idx < ATOM_TABLE_NAMES_CNT)
621  		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
622  	else
623  		SDEBUG("   table: %d\n", idx);
624  	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
625  		r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
626  	if (r) {
627  		ctx->abort = true;
628  	}
629  }
630  
631  static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
632  {
633  	uint8_t attr = U8((*ptr)++);
634  	uint32_t saved;
635  	int dptr = *ptr;
636  	attr &= 0x38;
637  	attr |= atom_def_dst[attr >> 3] << 6;
638  	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
639  	SDEBUG("   dst: ");
640  	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
641  }
642  
643  static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
644  {
645  	uint8_t attr = U8((*ptr)++);
646  	uint32_t dst, src;
647  	SDEBUG("   src1: ");
648  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
649  	SDEBUG("   src2: ");
650  	src = atom_get_src(ctx, attr, ptr);
651  	ctx->ctx->cs_equal = (dst == src);
652  	ctx->ctx->cs_above = (dst > src);
653  	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
654  	       ctx->ctx->cs_above ? "GT" : "LE");
655  }
656  
657  static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
658  {
659  	unsigned count = U8((*ptr)++);
660  	SDEBUG("   count: %d\n", count);
661  	if (arg == ATOM_UNIT_MICROSEC)
662  		udelay(count);
663  	else if (!drm_can_sleep())
664  		mdelay(count);
665  	else
666  		msleep(count);
667  }
668  
669  static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
670  {
671  	uint8_t attr = U8((*ptr)++);
672  	uint32_t dst, src;
673  	SDEBUG("   src1: ");
674  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
675  	SDEBUG("   src2: ");
676  	src = atom_get_src(ctx, attr, ptr);
677  	if (src != 0) {
678  		ctx->ctx->divmul[0] = dst / src;
679  		ctx->ctx->divmul[1] = dst % src;
680  	} else {
681  		ctx->ctx->divmul[0] = 0;
682  		ctx->ctx->divmul[1] = 0;
683  	}
684  }
685  
686  static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
687  {
688  	uint64_t val64;
689  	uint8_t attr = U8((*ptr)++);
690  	uint32_t dst, src;
691  	SDEBUG("   src1: ");
692  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
693  	SDEBUG("   src2: ");
694  	src = atom_get_src(ctx, attr, ptr);
695  	if (src != 0) {
696  		val64 = dst;
697  		val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
698  		do_div(val64, src);
699  		ctx->ctx->divmul[0] = lower_32_bits(val64);
700  		ctx->ctx->divmul[1] = upper_32_bits(val64);
701  	} else {
702  		ctx->ctx->divmul[0] = 0;
703  		ctx->ctx->divmul[1] = 0;
704  	}
705  }
706  
707  static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
708  {
709  	/* functionally, a nop */
710  }
711  
712  static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
713  {
714  	int execute = 0, target = U16(*ptr);
715  	unsigned long cjiffies;
716  
717  	(*ptr) += 2;
718  	switch (arg) {
719  	case ATOM_COND_ABOVE:
720  		execute = ctx->ctx->cs_above;
721  		break;
722  	case ATOM_COND_ABOVEOREQUAL:
723  		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
724  		break;
725  	case ATOM_COND_ALWAYS:
726  		execute = 1;
727  		break;
728  	case ATOM_COND_BELOW:
729  		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
730  		break;
731  	case ATOM_COND_BELOWOREQUAL:
732  		execute = !ctx->ctx->cs_above;
733  		break;
734  	case ATOM_COND_EQUAL:
735  		execute = ctx->ctx->cs_equal;
736  		break;
737  	case ATOM_COND_NOTEQUAL:
738  		execute = !ctx->ctx->cs_equal;
739  		break;
740  	}
741  	if (arg != ATOM_COND_ALWAYS)
742  		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
743  	SDEBUG("   target: 0x%04X\n", target);
744  	if (execute) {
745  		if (ctx->last_jump == (ctx->start + target)) {
746  			cjiffies = jiffies;
747  			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
748  				cjiffies -= ctx->last_jump_jiffies;
749  				if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
750  					DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
751  						  ATOM_CMD_TIMEOUT_SEC);
752  					ctx->abort = true;
753  				}
754  			} else {
755  				/* jiffies wrap around we will just wait a little longer */
756  				ctx->last_jump_jiffies = jiffies;
757  			}
758  		} else {
759  			ctx->last_jump = ctx->start + target;
760  			ctx->last_jump_jiffies = jiffies;
761  		}
762  		*ptr = ctx->start + target;
763  	}
764  }
765  
766  static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
767  {
768  	uint8_t attr = U8((*ptr)++);
769  	uint32_t dst, mask, src, saved;
770  	int dptr = *ptr;
771  	SDEBUG("   dst: ");
772  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
773  	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
774  	SDEBUG("   mask: 0x%08x", mask);
775  	SDEBUG("   src: ");
776  	src = atom_get_src(ctx, attr, ptr);
777  	dst &= mask;
778  	dst |= src;
779  	SDEBUG("   dst: ");
780  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
781  }
782  
783  static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
784  {
785  	uint8_t attr = U8((*ptr)++);
786  	uint32_t src, saved;
787  	int dptr = *ptr;
788  	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
789  		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
790  	else {
791  		atom_skip_dst(ctx, arg, attr, ptr);
792  		saved = 0xCDCDCDCD;
793  	}
794  	SDEBUG("   src: ");
795  	src = atom_get_src(ctx, attr, ptr);
796  	SDEBUG("   dst: ");
797  	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
798  }
799  
800  static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
801  {
802  	uint8_t attr = U8((*ptr)++);
803  	uint32_t dst, src;
804  	SDEBUG("   src1: ");
805  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
806  	SDEBUG("   src2: ");
807  	src = atom_get_src(ctx, attr, ptr);
808  	ctx->ctx->divmul[0] = dst * src;
809  }
810  
811  static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
812  {
813  	uint64_t val64;
814  	uint8_t attr = U8((*ptr)++);
815  	uint32_t dst, src;
816  	SDEBUG("   src1: ");
817  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
818  	SDEBUG("   src2: ");
819  	src = atom_get_src(ctx, attr, ptr);
820  	val64 = (uint64_t)dst * (uint64_t)src;
821  	ctx->ctx->divmul[0] = lower_32_bits(val64);
822  	ctx->ctx->divmul[1] = upper_32_bits(val64);
823  }
824  
825  static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
826  {
827  	/* nothing */
828  }
829  
830  static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
831  {
832  	uint8_t attr = U8((*ptr)++);
833  	uint32_t dst, src, saved;
834  	int dptr = *ptr;
835  	SDEBUG("   dst: ");
836  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837  	SDEBUG("   src: ");
838  	src = atom_get_src(ctx, attr, ptr);
839  	dst |= src;
840  	SDEBUG("   dst: ");
841  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
842  }
843  
844  static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
845  {
846  	uint8_t val = U8((*ptr)++);
847  	SDEBUG("POST card output: 0x%02X\n", val);
848  }
849  
850  static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
851  {
852  	pr_info("unimplemented!\n");
853  }
854  
855  static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
856  {
857  	pr_info("unimplemented!\n");
858  }
859  
860  static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
861  {
862  	pr_info("unimplemented!\n");
863  }
864  
865  static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
866  {
867  	int idx = U8(*ptr);
868  	(*ptr)++;
869  	SDEBUG("   block: %d\n", idx);
870  	if (!idx)
871  		ctx->ctx->data_block = 0;
872  	else if (idx == 255)
873  		ctx->ctx->data_block = ctx->start;
874  	else
875  		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
876  	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
877  }
878  
879  static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
880  {
881  	uint8_t attr = U8((*ptr)++);
882  	SDEBUG("   fb_base: ");
883  	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
884  }
885  
886  static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
887  {
888  	int port;
889  	switch (arg) {
890  	case ATOM_PORT_ATI:
891  		port = U16(*ptr);
892  		if (port < ATOM_IO_NAMES_CNT)
893  			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
894  		else
895  			SDEBUG("   port: %d\n", port);
896  		if (!port)
897  			ctx->ctx->io_mode = ATOM_IO_MM;
898  		else
899  			ctx->ctx->io_mode = ATOM_IO_IIO | port;
900  		(*ptr) += 2;
901  		break;
902  	case ATOM_PORT_PCI:
903  		ctx->ctx->io_mode = ATOM_IO_PCI;
904  		(*ptr)++;
905  		break;
906  	case ATOM_PORT_SYSIO:
907  		ctx->ctx->io_mode = ATOM_IO_SYSIO;
908  		(*ptr)++;
909  		break;
910  	}
911  }
912  
913  static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
914  {
915  	ctx->ctx->reg_block = U16(*ptr);
916  	(*ptr) += 2;
917  	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
918  }
919  
920  static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
921  {
922  	uint8_t attr = U8((*ptr)++), shift;
923  	uint32_t saved, dst;
924  	int dptr = *ptr;
925  	attr &= 0x38;
926  	attr |= atom_def_dst[attr >> 3] << 6;
927  	SDEBUG("   dst: ");
928  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
929  	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
930  	SDEBUG("   shift: %d\n", shift);
931  	dst <<= shift;
932  	SDEBUG("   dst: ");
933  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
934  }
935  
936  static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
937  {
938  	uint8_t attr = U8((*ptr)++), shift;
939  	uint32_t saved, dst;
940  	int dptr = *ptr;
941  	attr &= 0x38;
942  	attr |= atom_def_dst[attr >> 3] << 6;
943  	SDEBUG("   dst: ");
944  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
945  	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
946  	SDEBUG("   shift: %d\n", shift);
947  	dst >>= shift;
948  	SDEBUG("   dst: ");
949  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
950  }
951  
952  static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
953  {
954  	uint8_t attr = U8((*ptr)++), shift;
955  	uint32_t saved, dst;
956  	int dptr = *ptr;
957  	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
958  	SDEBUG("   dst: ");
959  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
960  	/* op needs to full dst value */
961  	dst = saved;
962  	shift = atom_get_src(ctx, attr, ptr);
963  	SDEBUG("   shift: %d\n", shift);
964  	dst <<= shift;
965  	dst &= atom_arg_mask[dst_align];
966  	dst >>= atom_arg_shift[dst_align];
967  	SDEBUG("   dst: ");
968  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
969  }
970  
971  static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
972  {
973  	uint8_t attr = U8((*ptr)++), shift;
974  	uint32_t saved, dst;
975  	int dptr = *ptr;
976  	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
977  	SDEBUG("   dst: ");
978  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
979  	/* op needs to full dst value */
980  	dst = saved;
981  	shift = atom_get_src(ctx, attr, ptr);
982  	SDEBUG("   shift: %d\n", shift);
983  	dst >>= shift;
984  	dst &= atom_arg_mask[dst_align];
985  	dst >>= atom_arg_shift[dst_align];
986  	SDEBUG("   dst: ");
987  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
988  }
989  
990  static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
991  {
992  	uint8_t attr = U8((*ptr)++);
993  	uint32_t dst, src, saved;
994  	int dptr = *ptr;
995  	SDEBUG("   dst: ");
996  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
997  	SDEBUG("   src: ");
998  	src = atom_get_src(ctx, attr, ptr);
999  	dst -= src;
1000  	SDEBUG("   dst: ");
1001  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1002  }
1003  
1004  static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1005  {
1006  	uint8_t attr = U8((*ptr)++);
1007  	uint32_t src, val, target;
1008  	SDEBUG("   switch: ");
1009  	src = atom_get_src(ctx, attr, ptr);
1010  	while (U16(*ptr) != ATOM_CASE_END)
1011  		if (U8(*ptr) == ATOM_CASE_MAGIC) {
1012  			(*ptr)++;
1013  			SDEBUG("   case: ");
1014  			val =
1015  			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1016  					 ptr);
1017  			target = U16(*ptr);
1018  			if (val == src) {
1019  				SDEBUG("   target: %04X\n", target);
1020  				*ptr = ctx->start + target;
1021  				return;
1022  			}
1023  			(*ptr) += 2;
1024  		} else {
1025  			pr_info("Bad case\n");
1026  			return;
1027  		}
1028  	(*ptr) += 2;
1029  }
1030  
1031  static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1032  {
1033  	uint8_t attr = U8((*ptr)++);
1034  	uint32_t dst, src;
1035  	SDEBUG("   src1: ");
1036  	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1037  	SDEBUG("   src2: ");
1038  	src = atom_get_src(ctx, attr, ptr);
1039  	ctx->ctx->cs_equal = ((dst & src) == 0);
1040  	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1041  }
1042  
1043  static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1044  {
1045  	uint8_t attr = U8((*ptr)++);
1046  	uint32_t dst, src, saved;
1047  	int dptr = *ptr;
1048  	SDEBUG("   dst: ");
1049  	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1050  	SDEBUG("   src: ");
1051  	src = atom_get_src(ctx, attr, ptr);
1052  	dst ^= src;
1053  	SDEBUG("   dst: ");
1054  	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1055  }
1056  
1057  static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1058  {
1059  	uint8_t val = U8((*ptr)++);
1060  	SDEBUG("DEBUG output: 0x%02X\n", val);
1061  }
1062  
1063  static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1064  {
1065  	uint16_t val = U16(*ptr);
1066  	(*ptr) += val + 2;
1067  	SDEBUG("PROCESSDS output: 0x%02X\n", val);
1068  }
1069  
1070  static struct {
1071  	void (*func) (atom_exec_context *, int *, int);
1072  	int arg;
1073  } opcode_table[ATOM_OP_CNT] = {
1074  	{
1075  	NULL, 0}, {
1076  	atom_op_move, ATOM_ARG_REG}, {
1077  	atom_op_move, ATOM_ARG_PS}, {
1078  	atom_op_move, ATOM_ARG_WS}, {
1079  	atom_op_move, ATOM_ARG_FB}, {
1080  	atom_op_move, ATOM_ARG_PLL}, {
1081  	atom_op_move, ATOM_ARG_MC}, {
1082  	atom_op_and, ATOM_ARG_REG}, {
1083  	atom_op_and, ATOM_ARG_PS}, {
1084  	atom_op_and, ATOM_ARG_WS}, {
1085  	atom_op_and, ATOM_ARG_FB}, {
1086  	atom_op_and, ATOM_ARG_PLL}, {
1087  	atom_op_and, ATOM_ARG_MC}, {
1088  	atom_op_or, ATOM_ARG_REG}, {
1089  	atom_op_or, ATOM_ARG_PS}, {
1090  	atom_op_or, ATOM_ARG_WS}, {
1091  	atom_op_or, ATOM_ARG_FB}, {
1092  	atom_op_or, ATOM_ARG_PLL}, {
1093  	atom_op_or, ATOM_ARG_MC}, {
1094  	atom_op_shift_left, ATOM_ARG_REG}, {
1095  	atom_op_shift_left, ATOM_ARG_PS}, {
1096  	atom_op_shift_left, ATOM_ARG_WS}, {
1097  	atom_op_shift_left, ATOM_ARG_FB}, {
1098  	atom_op_shift_left, ATOM_ARG_PLL}, {
1099  	atom_op_shift_left, ATOM_ARG_MC}, {
1100  	atom_op_shift_right, ATOM_ARG_REG}, {
1101  	atom_op_shift_right, ATOM_ARG_PS}, {
1102  	atom_op_shift_right, ATOM_ARG_WS}, {
1103  	atom_op_shift_right, ATOM_ARG_FB}, {
1104  	atom_op_shift_right, ATOM_ARG_PLL}, {
1105  	atom_op_shift_right, ATOM_ARG_MC}, {
1106  	atom_op_mul, ATOM_ARG_REG}, {
1107  	atom_op_mul, ATOM_ARG_PS}, {
1108  	atom_op_mul, ATOM_ARG_WS}, {
1109  	atom_op_mul, ATOM_ARG_FB}, {
1110  	atom_op_mul, ATOM_ARG_PLL}, {
1111  	atom_op_mul, ATOM_ARG_MC}, {
1112  	atom_op_div, ATOM_ARG_REG}, {
1113  	atom_op_div, ATOM_ARG_PS}, {
1114  	atom_op_div, ATOM_ARG_WS}, {
1115  	atom_op_div, ATOM_ARG_FB}, {
1116  	atom_op_div, ATOM_ARG_PLL}, {
1117  	atom_op_div, ATOM_ARG_MC}, {
1118  	atom_op_add, ATOM_ARG_REG}, {
1119  	atom_op_add, ATOM_ARG_PS}, {
1120  	atom_op_add, ATOM_ARG_WS}, {
1121  	atom_op_add, ATOM_ARG_FB}, {
1122  	atom_op_add, ATOM_ARG_PLL}, {
1123  	atom_op_add, ATOM_ARG_MC}, {
1124  	atom_op_sub, ATOM_ARG_REG}, {
1125  	atom_op_sub, ATOM_ARG_PS}, {
1126  	atom_op_sub, ATOM_ARG_WS}, {
1127  	atom_op_sub, ATOM_ARG_FB}, {
1128  	atom_op_sub, ATOM_ARG_PLL}, {
1129  	atom_op_sub, ATOM_ARG_MC}, {
1130  	atom_op_setport, ATOM_PORT_ATI}, {
1131  	atom_op_setport, ATOM_PORT_PCI}, {
1132  	atom_op_setport, ATOM_PORT_SYSIO}, {
1133  	atom_op_setregblock, 0}, {
1134  	atom_op_setfbbase, 0}, {
1135  	atom_op_compare, ATOM_ARG_REG}, {
1136  	atom_op_compare, ATOM_ARG_PS}, {
1137  	atom_op_compare, ATOM_ARG_WS}, {
1138  	atom_op_compare, ATOM_ARG_FB}, {
1139  	atom_op_compare, ATOM_ARG_PLL}, {
1140  	atom_op_compare, ATOM_ARG_MC}, {
1141  	atom_op_switch, 0}, {
1142  	atom_op_jump, ATOM_COND_ALWAYS}, {
1143  	atom_op_jump, ATOM_COND_EQUAL}, {
1144  	atom_op_jump, ATOM_COND_BELOW}, {
1145  	atom_op_jump, ATOM_COND_ABOVE}, {
1146  	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1147  	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1148  	atom_op_jump, ATOM_COND_NOTEQUAL}, {
1149  	atom_op_test, ATOM_ARG_REG}, {
1150  	atom_op_test, ATOM_ARG_PS}, {
1151  	atom_op_test, ATOM_ARG_WS}, {
1152  	atom_op_test, ATOM_ARG_FB}, {
1153  	atom_op_test, ATOM_ARG_PLL}, {
1154  	atom_op_test, ATOM_ARG_MC}, {
1155  	atom_op_delay, ATOM_UNIT_MILLISEC}, {
1156  	atom_op_delay, ATOM_UNIT_MICROSEC}, {
1157  	atom_op_calltable, 0}, {
1158  	atom_op_repeat, 0}, {
1159  	atom_op_clear, ATOM_ARG_REG}, {
1160  	atom_op_clear, ATOM_ARG_PS}, {
1161  	atom_op_clear, ATOM_ARG_WS}, {
1162  	atom_op_clear, ATOM_ARG_FB}, {
1163  	atom_op_clear, ATOM_ARG_PLL}, {
1164  	atom_op_clear, ATOM_ARG_MC}, {
1165  	atom_op_nop, 0}, {
1166  	atom_op_eot, 0}, {
1167  	atom_op_mask, ATOM_ARG_REG}, {
1168  	atom_op_mask, ATOM_ARG_PS}, {
1169  	atom_op_mask, ATOM_ARG_WS}, {
1170  	atom_op_mask, ATOM_ARG_FB}, {
1171  	atom_op_mask, ATOM_ARG_PLL}, {
1172  	atom_op_mask, ATOM_ARG_MC}, {
1173  	atom_op_postcard, 0}, {
1174  	atom_op_beep, 0}, {
1175  	atom_op_savereg, 0}, {
1176  	atom_op_restorereg, 0}, {
1177  	atom_op_setdatablock, 0}, {
1178  	atom_op_xor, ATOM_ARG_REG}, {
1179  	atom_op_xor, ATOM_ARG_PS}, {
1180  	atom_op_xor, ATOM_ARG_WS}, {
1181  	atom_op_xor, ATOM_ARG_FB}, {
1182  	atom_op_xor, ATOM_ARG_PLL}, {
1183  	atom_op_xor, ATOM_ARG_MC}, {
1184  	atom_op_shl, ATOM_ARG_REG}, {
1185  	atom_op_shl, ATOM_ARG_PS}, {
1186  	atom_op_shl, ATOM_ARG_WS}, {
1187  	atom_op_shl, ATOM_ARG_FB}, {
1188  	atom_op_shl, ATOM_ARG_PLL}, {
1189  	atom_op_shl, ATOM_ARG_MC}, {
1190  	atom_op_shr, ATOM_ARG_REG}, {
1191  	atom_op_shr, ATOM_ARG_PS}, {
1192  	atom_op_shr, ATOM_ARG_WS}, {
1193  	atom_op_shr, ATOM_ARG_FB}, {
1194  	atom_op_shr, ATOM_ARG_PLL}, {
1195  	atom_op_shr, ATOM_ARG_MC}, {
1196  	atom_op_debug, 0}, {
1197  	atom_op_processds, 0}, {
1198  	atom_op_mul32, ATOM_ARG_PS}, {
1199  	atom_op_mul32, ATOM_ARG_WS}, {
1200  	atom_op_div32, ATOM_ARG_PS}, {
1201  	atom_op_div32, ATOM_ARG_WS},
1202  };
1203  
1204  static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
1205  {
1206  	int base = CU16(ctx->cmd_table + 4 + 2 * index);
1207  	int len, ws, ps, ptr;
1208  	unsigned char op;
1209  	atom_exec_context ectx;
1210  	int ret = 0;
1211  
1212  	if (!base)
1213  		return -EINVAL;
1214  
1215  	len = CU16(base + ATOM_CT_SIZE_PTR);
1216  	ws = CU8(base + ATOM_CT_WS_PTR);
1217  	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1218  	ptr = base + ATOM_CT_CODE_PTR;
1219  
1220  	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1221  
1222  	ectx.ctx = ctx;
1223  	ectx.ps_shift = ps / 4;
1224  	ectx.start = base;
1225  	ectx.ps = params;
1226  	ectx.abort = false;
1227  	ectx.last_jump = 0;
1228  	if (ws)
1229  		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1230  	else
1231  		ectx.ws = NULL;
1232  
1233  	debug_depth++;
1234  	while (1) {
1235  		op = CU8(ptr++);
1236  		if (op < ATOM_OP_NAMES_CNT)
1237  			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1238  		else
1239  			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1240  		if (ectx.abort) {
1241  			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1242  				base, len, ws, ps, ptr - 1);
1243  			ret = -EINVAL;
1244  			goto free;
1245  		}
1246  
1247  		if (op < ATOM_OP_CNT && op > 0)
1248  			opcode_table[op].func(&ectx, &ptr,
1249  					      opcode_table[op].arg);
1250  		else
1251  			break;
1252  
1253  		if (op == ATOM_OP_EOT)
1254  			break;
1255  	}
1256  	debug_depth--;
1257  	SDEBUG("<<\n");
1258  
1259  free:
1260  	if (ws)
1261  		kfree(ectx.ws);
1262  	return ret;
1263  }
1264  
1265  int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
1266  {
1267  	int r;
1268  
1269  	mutex_lock(&ctx->mutex);
1270  	/* reset data block */
1271  	ctx->data_block = 0;
1272  	/* reset reg block */
1273  	ctx->reg_block = 0;
1274  	/* reset fb window */
1275  	ctx->fb_base = 0;
1276  	/* reset io mode */
1277  	ctx->io_mode = ATOM_IO_MM;
1278  	/* reset divmul */
1279  	ctx->divmul[0] = 0;
1280  	ctx->divmul[1] = 0;
1281  	r = amdgpu_atom_execute_table_locked(ctx, index, params);
1282  	mutex_unlock(&ctx->mutex);
1283  	return r;
1284  }
1285  
1286  static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1287  
1288  static void atom_index_iio(struct atom_context *ctx, int base)
1289  {
1290  	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1291  	if (!ctx->iio)
1292  		return;
1293  	while (CU8(base) == ATOM_IIO_START) {
1294  		ctx->iio[CU8(base + 1)] = base + 2;
1295  		base += 2;
1296  		while (CU8(base) != ATOM_IIO_END)
1297  			base += atom_iio_len[CU8(base)];
1298  		base += 3;
1299  	}
1300  }
1301  
1302  struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1303  {
1304  	int base;
1305  	struct atom_context *ctx =
1306  	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1307  	char *str;
1308  	u16 idx;
1309  
1310  	if (!ctx)
1311  		return NULL;
1312  
1313  	ctx->card = card;
1314  	ctx->bios = bios;
1315  
1316  	if (CU16(0) != ATOM_BIOS_MAGIC) {
1317  		pr_info("Invalid BIOS magic\n");
1318  		kfree(ctx);
1319  		return NULL;
1320  	}
1321  	if (strncmp
1322  	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1323  	     strlen(ATOM_ATI_MAGIC))) {
1324  		pr_info("Invalid ATI magic\n");
1325  		kfree(ctx);
1326  		return NULL;
1327  	}
1328  
1329  	base = CU16(ATOM_ROM_TABLE_PTR);
1330  	if (strncmp
1331  	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1332  	     strlen(ATOM_ROM_MAGIC))) {
1333  		pr_info("Invalid ATOM magic\n");
1334  		kfree(ctx);
1335  		return NULL;
1336  	}
1337  
1338  	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1339  	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1340  	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1341  	if (!ctx->iio) {
1342  		amdgpu_atom_destroy(ctx);
1343  		return NULL;
1344  	}
1345  
1346  	idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1347  	if (idx == 0)
1348  		idx = 0x80;
1349  
1350  	str = CSTR(idx);
1351  	if (*str != '\0') {
1352  		pr_info("ATOM BIOS: %s\n", str);
1353  		strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1354  	}
1355  
1356  
1357  	return ctx;
1358  }
1359  
1360  int amdgpu_atom_asic_init(struct atom_context *ctx)
1361  {
1362  	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1363  	uint32_t ps[16];
1364  	int ret;
1365  
1366  	memset(ps, 0, 64);
1367  
1368  	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1369  	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1370  	if (!ps[0] || !ps[1])
1371  		return 1;
1372  
1373  	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1374  		return 1;
1375  	ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1376  	if (ret)
1377  		return ret;
1378  
1379  	memset(ps, 0, 64);
1380  
1381  	return ret;
1382  }
1383  
1384  void amdgpu_atom_destroy(struct atom_context *ctx)
1385  {
1386  	kfree(ctx->iio);
1387  	kfree(ctx);
1388  }
1389  
1390  bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1391  			    uint16_t *size, uint8_t *frev, uint8_t *crev,
1392  			    uint16_t *data_start)
1393  {
1394  	int offset = index * 2 + 4;
1395  	int idx = CU16(ctx->data_table + offset);
1396  	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1397  
1398  	if (!mdt[index])
1399  		return false;
1400  
1401  	if (size)
1402  		*size = CU16(idx);
1403  	if (frev)
1404  		*frev = CU8(idx + 2);
1405  	if (crev)
1406  		*crev = CU8(idx + 3);
1407  	*data_start = idx;
1408  	return true;
1409  }
1410  
1411  bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1412  			   uint8_t *crev)
1413  {
1414  	int offset = index * 2 + 4;
1415  	int idx = CU16(ctx->cmd_table + offset);
1416  	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1417  
1418  	if (!mct[index])
1419  		return false;
1420  
1421  	if (frev)
1422  		*frev = CU8(idx + 2);
1423  	if (crev)
1424  		*crev = CU8(idx + 3);
1425  	return true;
1426  }
1427  
1428