xref: /linux/drivers/gpu/drm/ast/ast_2500.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2012 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  */
25 /*
26  * Authors: Dave Airlie <airlied@redhat.com>
27  */
28 
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 
32 #include <drm/drm_drv.h>
33 #include <drm/drm_print.h>
34 
35 #include "ast_drv.h"
36 #include "ast_post.h"
37 
38 /*
39  * POST
40  */
41 
42 /*
43  * AST2500 DRAM settings modules
44  */
45 
46 #define REGTBL_NUM           17
47 #define REGIDX_010           0
48 #define REGIDX_014           1
49 #define REGIDX_018           2
50 #define REGIDX_020           3
51 #define REGIDX_024           4
52 #define REGIDX_02C           5
53 #define REGIDX_030           6
54 #define REGIDX_214           7
55 #define REGIDX_2E0           8
56 #define REGIDX_2E4           9
57 #define REGIDX_2E8           10
58 #define REGIDX_2EC           11
59 #define REGIDX_2F0           12
60 #define REGIDX_2F4           13
61 #define REGIDX_2F8           14
62 #define REGIDX_RFC           15
63 #define REGIDX_PLL           16
64 
65 static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
66 	0x64604D38,		     /* 0x010 */
67 	0x29690599,		     /* 0x014 */
68 	0x00000300,		     /* 0x018 */
69 	0x00000000,		     /* 0x020 */
70 	0x00000000,		     /* 0x024 */
71 	0x02181E70,		     /* 0x02C */
72 	0x00000040,		     /* 0x030 */
73 	0x00000024,		     /* 0x214 */
74 	0x02001300,		     /* 0x2E0 */
75 	0x0E0000A0,		     /* 0x2E4 */
76 	0x000E001B,		     /* 0x2E8 */
77 	0x35B8C105,		     /* 0x2EC */
78 	0x08090408,		     /* 0x2F0 */
79 	0x9B000800,		     /* 0x2F4 */
80 	0x0E400A00,		     /* 0x2F8 */
81 	0x9971452F,		     /* tRFC  */
82 	0x000071C1		     /* PLL   */
83 };
84 
85 static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
86 	0x63604E37,		     /* 0x010 */
87 	0xE97AFA99,		     /* 0x014 */
88 	0x00019000,		     /* 0x018 */
89 	0x08000000,		     /* 0x020 */
90 	0x00000400,		     /* 0x024 */
91 	0x00000410,		     /* 0x02C */
92 	0x00000101,		     /* 0x030 */
93 	0x00000024,		     /* 0x214 */
94 	0x03002900,		     /* 0x2E0 */
95 	0x0E0000A0,		     /* 0x2E4 */
96 	0x000E001C,		     /* 0x2E8 */
97 	0x35B8C106,		     /* 0x2EC */
98 	0x08080607,		     /* 0x2F0 */
99 	0x9B000900,		     /* 0x2F4 */
100 	0x0E400A00,		     /* 0x2F8 */
101 	0x99714545,		     /* tRFC  */
102 	0x000071C1		     /* PLL   */
103 };
104 
105 #define TIMEOUT              5000000
106 
107 void ast_2500_patch_ahb(void __iomem *regs)
108 {
109 	u32 data;
110 
111 	/* Clear bus lock condition */
112 	__ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
113 	__ast_moutdwm(regs, 0x1e600084, 0x00010000);
114 	__ast_moutdwm(regs, 0x1e600088, 0x00000000);
115 	__ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
116 
117 	data = __ast_mindwm(regs, 0x1e6e2070);
118 	if (data & 0x08000000) { /* check fast reset */
119 		/*
120 		 * If "Fast restet" is enabled for ARM-ICE debugger,
121 		 * then WDT needs to enable, that
122 		 * WDT04 is WDT#1 Reload reg.
123 		 * WDT08 is WDT#1 counter restart reg to avoid system deadlock
124 		 * WDT0C is WDT#1 control reg
125 		 *	[6:5]:= 01:Full chip
126 		 *	[4]:= 1:1MHz clock source
127 		 *	[1]:= 1:WDT will be cleeared and disabled after timeout occurs
128 		 *	[0]:= 1:WDT enable
129 		 */
130 		__ast_moutdwm(regs, 0x1E785004, 0x00000010);
131 		__ast_moutdwm(regs, 0x1E785008, 0x00004755);
132 		__ast_moutdwm(regs, 0x1E78500c, 0x00000033);
133 		udelay(1000);
134 	}
135 
136 	do {
137 		__ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
138 		data = __ast_mindwm(regs, 0x1e6e2000);
139 	} while (data != 1);
140 
141 	__ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
142 }
143 
144 static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
145 {
146 	return mmc_test(ast, datagen, 0x85);
147 }
148 
149 static bool cbr_test_2500(struct ast_device *ast)
150 {
151 	ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
152 	ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
153 	if (!mmc_test_burst(ast, 0))
154 		return false;
155 	if (!mmc_test_single_2500(ast, 0))
156 		return false;
157 	return true;
158 }
159 
160 static bool ddr_test_2500(struct ast_device *ast)
161 {
162 	ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
163 	ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
164 	if (!mmc_test_burst(ast, 0))
165 		return false;
166 	if (!mmc_test_burst(ast, 1))
167 		return false;
168 	if (!mmc_test_burst(ast, 2))
169 		return false;
170 	if (!mmc_test_burst(ast, 3))
171 		return false;
172 	if (!mmc_test_single_2500(ast, 0))
173 		return false;
174 	return true;
175 }
176 
177 static void ddr_init_common_2500(struct ast_device *ast)
178 {
179 	ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
180 	ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
181 	ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF);
182 	ast_moutdwm(ast, 0x1E6E0040, 0x88448844);
183 	ast_moutdwm(ast, 0x1E6E0044, 0x24422288);
184 	ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
185 	ast_moutdwm(ast, 0x1E6E004C, 0x22222222);
186 	ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
187 	ast_moutdwm(ast, 0x1E6E0208, 0x00000000);
188 	ast_moutdwm(ast, 0x1E6E0218, 0x00000000);
189 	ast_moutdwm(ast, 0x1E6E0220, 0x00000000);
190 	ast_moutdwm(ast, 0x1E6E0228, 0x00000000);
191 	ast_moutdwm(ast, 0x1E6E0230, 0x00000000);
192 	ast_moutdwm(ast, 0x1E6E02A8, 0x00000000);
193 	ast_moutdwm(ast, 0x1E6E02B0, 0x00000000);
194 	ast_moutdwm(ast, 0x1E6E0240, 0x86000000);
195 	ast_moutdwm(ast, 0x1E6E0244, 0x00008600);
196 	ast_moutdwm(ast, 0x1E6E0248, 0x80000000);
197 	ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
198 }
199 
200 static void ddr_phy_init_2500(struct ast_device *ast)
201 {
202 	u32 data, pass, timecnt;
203 
204 	pass = 0;
205 	ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
206 	while (!pass) {
207 		for (timecnt = 0; timecnt < TIMEOUT; timecnt++) {
208 			data = ast_mindwm(ast, 0x1E6E0060) & 0x1;
209 			if (!data)
210 				break;
211 		}
212 		if (timecnt != TIMEOUT) {
213 			data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000;
214 			if (!data)
215 				pass = 1;
216 		}
217 		if (!pass) {
218 			ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
219 			udelay(10); /* delay 10 us */
220 			ast_moutdwm(ast, 0x1E6E0060, 0x00000005);
221 		}
222 	}
223 
224 	ast_moutdwm(ast, 0x1E6E0060, 0x00000006);
225 }
226 
227 /*
228  * Check DRAM Size
229  * 1Gb : 0x80000000 ~ 0x87FFFFFF
230  * 2Gb : 0x80000000 ~ 0x8FFFFFFF
231  * 4Gb : 0x80000000 ~ 0x9FFFFFFF
232  * 8Gb : 0x80000000 ~ 0xBFFFFFFF
233  */
234 static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
235 {
236 	u32 reg_04, reg_14;
237 
238 	reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc;
239 	reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00;
240 
241 	ast_moutdwm(ast, 0xA0100000, 0x41424344);
242 	ast_moutdwm(ast, 0x90100000, 0x35363738);
243 	ast_moutdwm(ast, 0x88100000, 0x292A2B2C);
244 	ast_moutdwm(ast, 0x80100000, 0x1D1E1F10);
245 
246 	/* Check 8Gbit */
247 	if (ast_mindwm(ast, 0xA0100000) == 0x41424344) {
248 		reg_04 |= 0x03;
249 		reg_14 |= (tRFC >> 24) & 0xFF;
250 		/* Check 4Gbit */
251 	} else if (ast_mindwm(ast, 0x90100000) == 0x35363738) {
252 		reg_04 |= 0x02;
253 		reg_14 |= (tRFC >> 16) & 0xFF;
254 		/* Check 2Gbit */
255 	} else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) {
256 		reg_04 |= 0x01;
257 		reg_14 |= (tRFC >> 8) & 0xFF;
258 	} else {
259 		reg_14 |= tRFC & 0xFF;
260 	}
261 	ast_moutdwm(ast, 0x1E6E0004, reg_04);
262 	ast_moutdwm(ast, 0x1E6E0014, reg_14);
263 }
264 
265 static void enable_cache_2500(struct ast_device *ast)
266 {
267 	u32 reg_04, data;
268 
269 	reg_04 = ast_mindwm(ast, 0x1E6E0004);
270 	ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000);
271 
272 	do
273 		data = ast_mindwm(ast, 0x1E6E0004);
274 	while (!(data & 0x80000));
275 	ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
276 }
277 
278 static void set_mpll_2500(struct ast_device *ast)
279 {
280 	u32 addr, data, param;
281 
282 	/* Reset MMC */
283 	ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
284 	ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
285 	for (addr = 0x1e6e0004; addr < 0x1e6e0090;) {
286 		ast_moutdwm(ast, addr, 0x0);
287 		addr += 4;
288 	}
289 	ast_moutdwm(ast, 0x1E6E0034, 0x00020000);
290 
291 	ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
292 	data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000;
293 	if (data) {
294 		/* CLKIN = 25MHz */
295 		param = 0x930023E0;
296 		ast_moutdwm(ast, 0x1E6E2160, 0x00011320);
297 	} else {
298 		/* CLKIN = 24MHz */
299 		param = 0x93002400;
300 	}
301 	ast_moutdwm(ast, 0x1E6E2020, param);
302 	udelay(100);
303 }
304 
305 static void reset_mmc_2500(struct ast_device *ast)
306 {
307 	ast_moutdwm(ast, 0x1E78505C, 0x00000004);
308 	ast_moutdwm(ast, 0x1E785044, 0x00000001);
309 	ast_moutdwm(ast, 0x1E785048, 0x00004755);
310 	ast_moutdwm(ast, 0x1E78504C, 0x00000013);
311 	mdelay(100);
312 	ast_moutdwm(ast, 0x1E785054, 0x00000077);
313 	ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
314 }
315 
316 static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
317 {
318 	ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
319 	ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
320 	ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
321 	ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
322 	ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]);	     /* MODEREG4/6 */
323 	ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]);	     /* MODEREG5 */
324 	ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
325 	ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]);	     /* MODEREG1/3 */
326 
327 	/* DDR PHY Setting */
328 	ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE);
329 	ast_moutdwm(ast, 0x1E6E0204, 0x00001001);
330 	ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
331 	ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
332 	ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
333 	ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
334 	ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
335 	ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
336 	ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
337 	ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
338 	ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
339 	ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
340 	ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
341 	ast_moutdwm(ast, 0x1E6E02C0, 0x00000006);
342 
343 	/* Controller Setting */
344 	ast_moutdwm(ast, 0x1E6E0034, 0x00020091);
345 
346 	/* Wait DDR PHY init done */
347 	ddr_phy_init_2500(ast);
348 
349 	ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
350 	ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
351 	ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
352 
353 	check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
354 	enable_cache_2500(ast);
355 	ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
356 	ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
357 }
358 
359 static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
360 {
361 	u32 data, data2, pass, retrycnt;
362 	u32 ddr_vref, phy_vref;
363 	u32 min_ddr_vref = 0, min_phy_vref = 0;
364 	u32 max_ddr_vref = 0, max_phy_vref = 0;
365 
366 	ast_moutdwm(ast, 0x1E6E0004, 0x00000313);
367 	ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]);
368 	ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]);
369 	ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]);
370 	ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]);	     /* MODEREG4/6 */
371 	ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]);	     /* MODEREG5 */
372 	ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */
373 	ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]);	     /* MODEREG1/3 */
374 
375 	/* DDR PHY Setting */
376 	ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE);
377 	ast_moutdwm(ast, 0x1E6E0204, 0x09002000);
378 	ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B);
379 	ast_moutdwm(ast, 0x1E6E0210, 0x20000000);
380 	ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]);
381 	ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]);
382 	ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]);
383 	ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]);
384 	ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]);
385 	ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]);
386 	ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]);
387 	ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]);
388 	ast_moutdwm(ast, 0x1E6E0290, 0x00100008);
389 	ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C);
390 	ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E);
391 
392 	/* Controller Setting */
393 	ast_moutdwm(ast, 0x1E6E0034, 0x0001A991);
394 
395 	/* Train PHY Vref first */
396 	pass = 0;
397 
398 	for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
399 		max_phy_vref = 0x0;
400 		pass = 0;
401 		ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06);
402 		for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) {
403 			ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
404 			ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
405 			ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8));
406 			/* Fire DFI Init */
407 			ddr_phy_init_2500(ast);
408 			ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
409 			if (cbr_test_2500(ast)) {
410 				pass++;
411 				data = ast_mindwm(ast, 0x1E6E03D0);
412 				data2 = data >> 8;
413 				data  = data & 0xff;
414 				if (data > data2)
415 					data = data2;
416 				if (max_phy_vref < data) {
417 					max_phy_vref = data;
418 					min_phy_vref = phy_vref;
419 				}
420 			} else if (pass > 0) {
421 				break;
422 			}
423 		}
424 	}
425 	ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8));
426 
427 	/* Train DDR Vref next */
428 	pass = 0;
429 
430 	for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) {
431 		min_ddr_vref = 0xFF;
432 		max_ddr_vref = 0x0;
433 		pass = 0;
434 		for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) {
435 			ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
436 			ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
437 			ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
438 			/* Fire DFI Init */
439 			ddr_phy_init_2500(ast);
440 			ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
441 			if (cbr_test_2500(ast)) {
442 				pass++;
443 				if (min_ddr_vref > ddr_vref)
444 					min_ddr_vref = ddr_vref;
445 				if (max_ddr_vref < ddr_vref)
446 					max_ddr_vref = ddr_vref;
447 			} else if (pass != 0) {
448 				break;
449 			}
450 		}
451 	}
452 
453 	ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
454 	ast_moutdwm(ast, 0x1E6E0060, 0x00000000);
455 	ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1;
456 	ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8));
457 
458 	/* Wait DDR PHY init done */
459 	ddr_phy_init_2500(ast);
460 
461 	ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]);
462 	ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81);
463 	ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93);
464 
465 	check_dram_size_2500(ast, ddr_table[REGIDX_RFC]);
466 	enable_cache_2500(ast);
467 	ast_moutdwm(ast, 0x1E6E001C, 0x00000008);
468 	ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
469 }
470 
471 static bool ast_dram_init_2500(struct ast_device *ast)
472 {
473 	u32 data;
474 	u32 max_tries = 5;
475 
476 	do {
477 		if (max_tries-- == 0)
478 			return false;
479 		set_mpll_2500(ast);
480 		reset_mmc_2500(ast);
481 		ddr_init_common_2500(ast);
482 
483 		data = ast_mindwm(ast, 0x1E6E2070);
484 		if (data & 0x01000000)
485 			ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table);
486 		else
487 			ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table);
488 	} while (!ddr_test_2500(ast));
489 
490 	ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41);
491 
492 	/* Patch code */
493 	data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF;
494 	ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000);
495 
496 	return true;
497 }
498 
499 static void ast_post_chip_2500(struct ast_device *ast)
500 {
501 	struct drm_device *dev = &ast->base;
502 	u32 temp;
503 	u8 reg;
504 
505 	reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
506 	if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
507 		/* Clear bus lock condition */
508 		ast_2500_patch_ahb(ast->regs);
509 
510 		/* Disable watchdog */
511 		ast_moutdwm(ast, 0x1E78502C, 0x00000000);
512 		ast_moutdwm(ast, 0x1E78504C, 0x00000000);
513 
514 		/*
515 		 * Reset USB port to patch USB unknown device issue
516 		 * SCU90 is Multi-function Pin Control #5
517 		 *	[29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub
518 		 *				port).
519 		 * SCU94 is Multi-function Pin Control #6
520 		 *	[14:13]:= 1x:USB2.0 Host2 controller
521 		 * SCU70 is Hardware Strap reg
522 		 *	[23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by
523 		 *				[18]: 0(24)/1(48) MHz)
524 		 * SCU7C is Write clear reg to SCU70
525 		 *	[23]:= write 1 and then SCU70[23] will be clear as 0b.
526 		 */
527 		ast_moutdwm(ast, 0x1E6E2090, 0x20000000);
528 		ast_moutdwm(ast, 0x1E6E2094, 0x00004000);
529 		if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) {
530 			ast_moutdwm(ast, 0x1E6E207C, 0x00800000);
531 			mdelay(100);
532 			ast_moutdwm(ast, 0x1E6E2070, 0x00800000);
533 		}
534 		/* Modify eSPI reset pin */
535 		temp = ast_mindwm(ast, 0x1E6E2070);
536 		if (temp & 0x02000000)
537 			ast_moutdwm(ast, 0x1E6E207C, 0x00004000);
538 
539 		/* Slow down CPU/AHB CLK in VGA only mode */
540 		temp = ast_read32(ast, 0x12008);
541 		temp |= 0x73;
542 		ast_write32(ast, 0x12008, temp);
543 
544 		if (!ast_dram_init_2500(ast))
545 			drm_err(dev, "DRAM init failed !\n");
546 
547 		temp = ast_mindwm(ast, 0x1e6e2040);
548 		ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
549 	}
550 
551 	/* wait ready */
552 	do {
553 		reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
554 	} while ((reg & 0x40) == 0);
555 }
556 
557 int ast_2500_post(struct ast_device *ast)
558 {
559 	ast_2300_set_def_ext_reg(ast);
560 
561 	if (ast->config_mode == ast_use_p2a) {
562 		ast_post_chip_2500(ast);
563 	} else {
564 		if (ast->tx_chip == AST_TX_SIL164) {
565 			/* Enable DVO */
566 			ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
567 		}
568 	}
569 
570 	return 0;
571 }
572 
573 /*
574  * Mode setting
575  */
576 
577 const struct ast_vbios_dclk_info ast_2500_dclk_table[] = {
578 	{0x2c, 0xe7, 0x03},			/* 00: VCLK25_175	*/
579 	{0x95, 0x62, 0x03},			/* 01: VCLK28_322	*/
580 	{0x67, 0x63, 0x01},			/* 02: VCLK31_5		*/
581 	{0x76, 0x63, 0x01},			/* 03: VCLK36		*/
582 	{0xee, 0x67, 0x01},			/* 04: VCLK40		*/
583 	{0x82, 0x62, 0x01},			/* 05: VCLK49_5		*/
584 	{0xc6, 0x64, 0x01},			/* 06: VCLK50		*/
585 	{0x94, 0x62, 0x01},			/* 07: VCLK56_25	*/
586 	{0x80, 0x64, 0x00},			/* 08: VCLK65		*/
587 	{0x7b, 0x63, 0x00},			/* 09: VCLK75		*/
588 	{0x67, 0x62, 0x00},			/* 0a: VCLK78_75	*/
589 	{0x7c, 0x62, 0x00},			/* 0b: VCLK94_5		*/
590 	{0x8e, 0x62, 0x00},			/* 0c: VCLK108		*/
591 	{0x85, 0x24, 0x00},			/* 0d: VCLK135		*/
592 	{0x67, 0x22, 0x00},			/* 0e: VCLK157_5	*/
593 	{0x6a, 0x22, 0x00},			/* 0f: VCLK162		*/
594 	{0x4d, 0x4c, 0x80},			/* 10: VCLK154		*/
595 	{0x68, 0x6f, 0x80},			/* 11: VCLK83.5		*/
596 	{0x28, 0x49, 0x80},			/* 12: VCLK106.5	*/
597 	{0x37, 0x49, 0x80},			/* 13: VCLK146.25	*/
598 	{0x1f, 0x45, 0x80},			/* 14: VCLK148.5	*/
599 	{0x47, 0x6c, 0x80},			/* 15: VCLK71		*/
600 	{0x25, 0x65, 0x80},			/* 16: VCLK88.75	*/
601 	{0x58, 0x01, 0x42},			/* 17: VCLK119		*/
602 	{0x32, 0x67, 0x80},			/* 18: VCLK85_5		*/
603 	{0x6a, 0x6d, 0x80},			/* 19: VCLK97_75	*/
604 	{0x44, 0x20, 0x43},			/* 1a: VCLK118_25	*/
605 };
606 
607 /*
608  * Device initialization
609  */
610 
611 static void ast_2500_detect_widescreen(struct ast_device *ast)
612 {
613 	if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) {
614 		ast->support_wsxga_p = true;
615 		ast->support_fullhd = true;
616 	}
617 	if (__ast_2100_detect_wuxga(ast))
618 		ast->support_wuxga = true;
619 }
620 
621 static const struct ast_device_quirks ast_2500_device_quirks = {
622 	.crtc_mem_req_threshold_low = 96,
623 	.crtc_mem_req_threshold_high = 120,
624 	.crtc_hsync_precatch_needed = true,
625 };
626 
627 struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
628 					  const struct drm_driver *drv,
629 					  enum ast_chip chip,
630 					  enum ast_config_mode config_mode,
631 					  void __iomem *regs,
632 					  void __iomem *ioregs,
633 					  bool need_post)
634 {
635 	struct drm_device *dev;
636 	struct ast_device *ast;
637 	int ret;
638 
639 	ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
640 	if (IS_ERR(ast))
641 		return ERR_CAST(ast);
642 	dev = &ast->base;
643 
644 	ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks);
645 
646 	ast->dclk_table = ast_2500_dclk_table;
647 
648 	ast_2300_detect_tx_chip(ast);
649 
650 	if (need_post) {
651 		ret = ast_post_gpu(ast);
652 		if (ret)
653 			return ERR_PTR(ret);
654 	}
655 
656 	ret = ast_mm_init(ast);
657 	if (ret)
658 		return ERR_PTR(ret);
659 
660 	/* map reserved buffer */
661 	ast->dp501_fw_buf = NULL;
662 	if (ast->vram_size < pci_resource_len(pdev, 0)) {
663 		ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
664 		if (!ast->dp501_fw_buf)
665 			drm_info(dev, "failed to map reserved buffer!\n");
666 	}
667 
668 	ast_2500_detect_widescreen(ast);
669 
670 	ret = ast_mode_config_init(ast);
671 	if (ret)
672 		return ERR_PTR(ret);
673 
674 	return dev;
675 }
676