xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gsc.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20 
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "regs-gsc.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_gsc.h"
26 
27 /*
28  * GSC is stand for General SCaler and
29  * supports image scaler/rotator and input/output DMA operations.
30  * input DMA reads image data from the memory.
31  * output DMA writes image data to memory.
32  * GSC supports image rotation and image effect functions.
33  *
34  * M2M operation : supports crop/scale/rotation/csc so on.
35  * Memory ----> GSC H/W ----> Memory.
36  * Writeback operation : supports cloned screen with FIMD.
37  * FIMD ----> GSC H/W ----> Memory.
38  * Output operation : supports direct display using local path.
39  * Memory ----> GSC H/W ----> FIMD, Mixer.
40  */
41 
42 /*
43  * TODO
44  * 1. check suspend/resume api if needed.
45  * 2. need to check use case platform_device_id.
46  * 3. check src/dst size with, height.
47  * 4. added check_prepare api for right register.
48  * 5. need to add supported list in prop_list.
49  * 6. check prescaler/scaler optimization.
50  */
51 
52 #define GSC_MAX_DEVS	4
53 #define GSC_MAX_SRC		4
54 #define GSC_MAX_DST		16
55 #define GSC_RESET_TIMEOUT	50
56 #define GSC_BUF_STOP	1
57 #define GSC_BUF_START	2
58 #define GSC_REG_SZ		16
59 #define GSC_WIDTH_ITU_709	1280
60 #define GSC_SC_UP_MAX_RATIO		65536
61 #define GSC_SC_DOWN_RATIO_7_8		74898
62 #define GSC_SC_DOWN_RATIO_6_8		87381
63 #define GSC_SC_DOWN_RATIO_5_8		104857
64 #define GSC_SC_DOWN_RATIO_4_8		131072
65 #define GSC_SC_DOWN_RATIO_3_8		174762
66 #define GSC_SC_DOWN_RATIO_2_8		262144
67 #define GSC_REFRESH_MIN	12
68 #define GSC_REFRESH_MAX	60
69 #define GSC_CROP_MAX	8192
70 #define GSC_CROP_MIN	32
71 #define GSC_SCALE_MAX	4224
72 #define GSC_SCALE_MIN	32
73 #define GSC_COEF_RATIO	7
74 #define GSC_COEF_PHASE	9
75 #define GSC_COEF_ATTR	16
76 #define GSC_COEF_H_8T	8
77 #define GSC_COEF_V_4T	4
78 #define GSC_COEF_DEPTH	3
79 
80 #define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
81 #define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
82 					struct gsc_context, ippdrv);
83 #define gsc_read(offset)		readl(ctx->regs + (offset))
84 #define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
85 
86 /*
87  * A structure of scaler.
88  *
89  * @range: narrow, wide.
90  * @pre_shfactor: pre sclaer shift factor.
91  * @pre_hratio: horizontal ratio of the prescaler.
92  * @pre_vratio: vertical ratio of the prescaler.
93  * @main_hratio: the main scaler's horizontal ratio.
94  * @main_vratio: the main scaler's vertical ratio.
95  */
96 struct gsc_scaler {
97 	bool	range;
98 	u32	pre_shfactor;
99 	u32	pre_hratio;
100 	u32	pre_vratio;
101 	unsigned long main_hratio;
102 	unsigned long main_vratio;
103 };
104 
105 /*
106  * A structure of scaler capability.
107  *
108  * find user manual 49.2 features.
109  * @tile_w: tile mode or rotation width.
110  * @tile_h: tile mode or rotation height.
111  * @w: other cases width.
112  * @h: other cases height.
113  */
114 struct gsc_capability {
115 	/* tile or rotation */
116 	u32	tile_w;
117 	u32	tile_h;
118 	/* other cases */
119 	u32	w;
120 	u32	h;
121 };
122 
123 /*
124  * A structure of gsc context.
125  *
126  * @ippdrv: prepare initialization using ippdrv.
127  * @regs_res: register resources.
128  * @regs: memory mapped io registers.
129  * @lock: locking of operations.
130  * @gsc_clk: gsc gate clock.
131  * @sc: scaler infomations.
132  * @id: gsc id.
133  * @irq: irq number.
134  * @rotation: supports rotation of src.
135  * @suspended: qos operations.
136  */
137 struct gsc_context {
138 	struct exynos_drm_ippdrv	ippdrv;
139 	struct resource	*regs_res;
140 	void __iomem	*regs;
141 	struct mutex	lock;
142 	struct clk	*gsc_clk;
143 	struct gsc_scaler	sc;
144 	int	id;
145 	int	irq;
146 	bool	rotation;
147 	bool	suspended;
148 };
149 
150 /* 8-tap Filter Coefficient */
151 static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152 	{	/* Ratio <= 65536 (~8:8) */
153 		{  0,  0,   0, 128,   0,   0,  0,  0 },
154 		{ -1,  2,  -6, 127,   7,  -2,  1,  0 },
155 		{ -1,  4, -12, 125,  16,  -5,  1,  0 },
156 		{ -1,  5, -15, 120,  25,  -8,  2,  0 },
157 		{ -1,  6, -18, 114,  35, -10,  3, -1 },
158 		{ -1,  6, -20, 107,  46, -13,  4, -1 },
159 		{ -2,  7, -21,  99,  57, -16,  5, -1 },
160 		{ -1,  6, -20,  89,  68, -18,  5, -1 },
161 		{ -1,  6, -20,  79,  79, -20,  6, -1 },
162 		{ -1,  5, -18,  68,  89, -20,  6, -1 },
163 		{ -1,  5, -16,  57,  99, -21,  7, -2 },
164 		{ -1,  4, -13,  46, 107, -20,  6, -1 },
165 		{ -1,  3, -10,  35, 114, -18,  6, -1 },
166 		{  0,  2,  -8,  25, 120, -15,  5, -1 },
167 		{  0,  1,  -5,  16, 125, -12,  4, -1 },
168 		{  0,  1,  -2,   7, 127,  -6,  2, -1 }
169 	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
170 		{  3, -8,  14, 111,  13,  -8,  3,  0 },
171 		{  2, -6,   7, 112,  21, -10,  3, -1 },
172 		{  2, -4,   1, 110,  28, -12,  4, -1 },
173 		{  1, -2,  -3, 106,  36, -13,  4, -1 },
174 		{  1, -1,  -7, 103,  44, -15,  4, -1 },
175 		{  1,  1, -11,  97,  53, -16,  4, -1 },
176 		{  0,  2, -13,  91,  61, -16,  4, -1 },
177 		{  0,  3, -15,  85,  69, -17,  4, -1 },
178 		{  0,  3, -16,  77,  77, -16,  3,  0 },
179 		{ -1,  4, -17,  69,  85, -15,  3,  0 },
180 		{ -1,  4, -16,  61,  91, -13,  2,  0 },
181 		{ -1,  4, -16,  53,  97, -11,  1,  1 },
182 		{ -1,  4, -15,  44, 103,  -7, -1,  1 },
183 		{ -1,  4, -13,  36, 106,  -3, -2,  1 },
184 		{ -1,  4, -12,  28, 110,   1, -4,  2 },
185 		{ -1,  3, -10,  21, 112,   7, -6,  2 }
186 	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
187 		{ 2, -11,  25,  96, 25, -11,   2,  0 },
188 		{ 2, -10,  19,  96, 31, -12,   2,  0 },
189 		{ 2,  -9,  14,  94, 37, -12,   2,  0 },
190 		{ 2,  -8,  10,  92, 43, -12,   1,  0 },
191 		{ 2,  -7,   5,  90, 49, -12,   1,  0 },
192 		{ 2,  -5,   1,  86, 55, -12,   0,  1 },
193 		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 },
194 		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
195 		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
196 		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
197 		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 },
198 		{ 1,   0, -12,  55, 86,   1,  -5,  2 },
199 		{ 0,   1, -12,  49, 90,   5,  -7,  2 },
200 		{ 0,   1, -12,  43, 92,  10,  -8,  2 },
201 		{ 0,   2, -12,  37, 94,  14,  -9,  2 },
202 		{ 0,   2, -12,  31, 96,  19, -10,  2 }
203 	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
204 		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 },
205 		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 },
206 		{  0,  -8, 24,  79, 41,  -7,  -2,  1 },
207 		{  0,  -8, 20,  78, 46,  -6,  -3,  1 },
208 		{  0,  -8, 16,  76, 50,  -4,  -3,  1 },
209 		{  0,  -7, 13,  74, 54,  -3,  -4,  1 },
210 		{  1,  -7, 10,  71, 58,  -1,  -5,  1 },
211 		{  1,  -6,  6,  68, 62,   1,  -5,  1 },
212 		{  1,  -6,  4,  65, 65,   4,  -6,  1 },
213 		{  1,  -5,  1,  62, 68,   6,  -6,  1 },
214 		{  1,  -5, -1,  58, 71,  10,  -7,  1 },
215 		{  1,  -4, -3,  54, 74,  13,  -7,  0 },
216 		{  1,  -3, -4,  50, 76,  16,  -8,  0 },
217 		{  1,  -3, -6,  46, 78,  20,  -8,  0 },
218 		{  1,  -2, -7,  41, 79,  24,  -8,  0 },
219 		{  1,  -2, -7,  37, 80,  28,  -8, -1 }
220 	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
221 		{ -3,   0, 35,  64, 35,   0,  -3,  0 },
222 		{ -3,  -1, 32,  64, 38,   1,  -3,  0 },
223 		{ -2,  -2, 29,  63, 41,   2,  -3,  0 },
224 		{ -2,  -3, 27,  63, 43,   4,  -4,  0 },
225 		{ -2,  -3, 24,  61, 46,   6,  -4,  0 },
226 		{ -2,  -3, 21,  60, 49,   7,  -4,  0 },
227 		{ -1,  -4, 19,  59, 51,   9,  -4, -1 },
228 		{ -1,  -4, 16,  57, 53,  12,  -4, -1 },
229 		{ -1,  -4, 14,  55, 55,  14,  -4, -1 },
230 		{ -1,  -4, 12,  53, 57,  16,  -4, -1 },
231 		{ -1,  -4,  9,  51, 59,  19,  -4, -1 },
232 		{  0,  -4,  7,  49, 60,  21,  -3, -2 },
233 		{  0,  -4,  6,  46, 61,  24,  -3, -2 },
234 		{  0,  -4,  4,  43, 63,  27,  -3, -2 },
235 		{  0,  -3,  2,  41, 63,  29,  -2, -2 },
236 		{  0,  -3,  1,  38, 64,  32,  -1, -3 }
237 	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
238 		{ -1,   8, 33,  48, 33,   8,  -1,  0 },
239 		{ -1,   7, 31,  49, 35,   9,  -1, -1 },
240 		{ -1,   6, 30,  49, 36,  10,  -1, -1 },
241 		{ -1,   5, 28,  48, 38,  12,  -1, -1 },
242 		{ -1,   4, 26,  48, 39,  13,   0, -1 },
243 		{ -1,   3, 24,  47, 41,  15,   0, -1 },
244 		{ -1,   2, 23,  47, 42,  16,   0, -1 },
245 		{ -1,   2, 21,  45, 43,  18,   1, -1 },
246 		{ -1,   1, 19,  45, 45,  19,   1, -1 },
247 		{ -1,   1, 18,  43, 45,  21,   2, -1 },
248 		{ -1,   0, 16,  42, 47,  23,   2, -1 },
249 		{ -1,   0, 15,  41, 47,  24,   3, -1 },
250 		{ -1,   0, 13,  39, 48,  26,   4, -1 },
251 		{ -1,  -1, 12,  38, 48,  28,   5, -1 },
252 		{ -1,  -1, 10,  36, 49,  30,   6, -1 },
253 		{ -1,  -1,  9,  35, 49,  31,   7, -1 }
254 	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
255 		{  2,  13, 30,  38, 30,  13,   2,  0 },
256 		{  2,  12, 29,  38, 30,  14,   3,  0 },
257 		{  2,  11, 28,  38, 31,  15,   3,  0 },
258 		{  2,  10, 26,  38, 32,  16,   4,  0 },
259 		{  1,  10, 26,  37, 33,  17,   4,  0 },
260 		{  1,   9, 24,  37, 34,  18,   5,  0 },
261 		{  1,   8, 24,  37, 34,  19,   5,  0 },
262 		{  1,   7, 22,  36, 35,  20,   6,  1 },
263 		{  1,   6, 21,  36, 36,  21,   6,  1 },
264 		{  1,   6, 20,  35, 36,  22,   7,  1 },
265 		{  0,   5, 19,  34, 37,  24,   8,  1 },
266 		{  0,   5, 18,  34, 37,  24,   9,  1 },
267 		{  0,   4, 17,  33, 37,  26,  10,  1 },
268 		{  0,   4, 16,  32, 38,  26,  10,  2 },
269 		{  0,   3, 15,  31, 38,  28,  11,  2 },
270 		{  0,   3, 14,  30, 38,  29,  12,  2 }
271 	}
272 };
273 
274 /* 4-tap Filter Coefficient */
275 static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276 	{	/* Ratio <= 65536 (~8:8) */
277 		{  0, 128,   0,  0 },
278 		{ -4, 127,   5,  0 },
279 		{ -6, 124,  11, -1 },
280 		{ -8, 118,  19, -1 },
281 		{ -8, 111,  27, -2 },
282 		{ -8, 102,  37, -3 },
283 		{ -8,  92,  48, -4 },
284 		{ -7,  81,  59, -5 },
285 		{ -6,  70,  70, -6 },
286 		{ -5,  59,  81, -7 },
287 		{ -4,  48,  92, -8 },
288 		{ -3,  37, 102, -8 },
289 		{ -2,  27, 111, -8 },
290 		{ -1,  19, 118, -8 },
291 		{ -1,  11, 124, -6 },
292 		{  0,   5, 127, -4 }
293 	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
294 		{  8, 112,   8,  0 },
295 		{  4, 111,  14, -1 },
296 		{  1, 109,  20, -2 },
297 		{ -2, 105,  27, -2 },
298 		{ -3, 100,  34, -3 },
299 		{ -5,  93,  43, -3 },
300 		{ -5,  86,  51, -4 },
301 		{ -5,  77,  60, -4 },
302 		{ -5,  69,  69, -5 },
303 		{ -4,  60,  77, -5 },
304 		{ -4,  51,  86, -5 },
305 		{ -3,  43,  93, -5 },
306 		{ -3,  34, 100, -3 },
307 		{ -2,  27, 105, -2 },
308 		{ -2,  20, 109,  1 },
309 		{ -1,  14, 111,  4 }
310 	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
311 		{ 16,  96,  16,  0 },
312 		{ 12,  97,  21, -2 },
313 		{  8,  96,  26, -2 },
314 		{  5,  93,  32, -2 },
315 		{  2,  89,  39, -2 },
316 		{  0,  84,  46, -2 },
317 		{ -1,  79,  53, -3 },
318 		{ -2,  73,  59, -2 },
319 		{ -2,  66,  66, -2 },
320 		{ -2,  59,  73, -2 },
321 		{ -3,  53,  79, -1 },
322 		{ -2,  46,  84,  0 },
323 		{ -2,  39,  89,  2 },
324 		{ -2,  32,  93,  5 },
325 		{ -2,  26,  96,  8 },
326 		{ -2,  21,  97, 12 }
327 	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
328 		{ 22,  84,  22,  0 },
329 		{ 18,  85,  26, -1 },
330 		{ 14,  84,  31, -1 },
331 		{ 11,  82,  36, -1 },
332 		{  8,  79,  42, -1 },
333 		{  6,  76,  47, -1 },
334 		{  4,  72,  52,  0 },
335 		{  2,  68,  58,  0 },
336 		{  1,  63,  63,  1 },
337 		{  0,  58,  68,  2 },
338 		{  0,  52,  72,  4 },
339 		{ -1,  47,  76,  6 },
340 		{ -1,  42,  79,  8 },
341 		{ -1,  36,  82, 11 },
342 		{ -1,  31,  84, 14 },
343 		{ -1,  26,  85, 18 }
344 	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
345 		{ 26,  76,  26,  0 },
346 		{ 22,  76,  30,  0 },
347 		{ 19,  75,  34,  0 },
348 		{ 16,  73,  38,  1 },
349 		{ 13,  71,  43,  1 },
350 		{ 10,  69,  47,  2 },
351 		{  8,  66,  51,  3 },
352 		{  6,  63,  55,  4 },
353 		{  5,  59,  59,  5 },
354 		{  4,  55,  63,  6 },
355 		{  3,  51,  66,  8 },
356 		{  2,  47,  69, 10 },
357 		{  1,  43,  71, 13 },
358 		{  1,  38,  73, 16 },
359 		{  0,  34,  75, 19 },
360 		{  0,  30,  76, 22 }
361 	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
362 		{ 29,  70,  29,  0 },
363 		{ 26,  68,  32,  2 },
364 		{ 23,  67,  36,  2 },
365 		{ 20,  66,  39,  3 },
366 		{ 17,  65,  43,  3 },
367 		{ 15,  63,  46,  4 },
368 		{ 12,  61,  50,  5 },
369 		{ 10,  58,  53,  7 },
370 		{  8,  56,  56,  8 },
371 		{  7,  53,  58, 10 },
372 		{  5,  50,  61, 12 },
373 		{  4,  46,  63, 15 },
374 		{  3,  43,  65, 17 },
375 		{  3,  39,  66, 20 },
376 		{  2,  36,  67, 23 },
377 		{  2,  32,  68, 26 }
378 	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
379 		{ 32,  64,  32,  0 },
380 		{ 28,  63,  34,  3 },
381 		{ 25,  62,  37,  4 },
382 		{ 22,  62,  40,  4 },
383 		{ 19,  61,  43,  5 },
384 		{ 17,  59,  46,  6 },
385 		{ 15,  58,  48,  7 },
386 		{ 13,  55,  51,  9 },
387 		{ 11,  53,  53, 11 },
388 		{  9,  51,  55, 13 },
389 		{  7,  48,  58, 15 },
390 		{  6,  46,  59, 17 },
391 		{  5,  43,  61, 19 },
392 		{  4,  40,  62, 22 },
393 		{  4,  37,  62, 25 },
394 		{  3,  34,  63, 28 }
395 	}
396 };
397 
398 static int gsc_sw_reset(struct gsc_context *ctx)
399 {
400 	u32 cfg;
401 	int count = GSC_RESET_TIMEOUT;
402 
403 	DRM_DEBUG_KMS("%s\n", __func__);
404 
405 	/* s/w reset */
406 	cfg = (GSC_SW_RESET_SRESET);
407 	gsc_write(cfg, GSC_SW_RESET);
408 
409 	/* wait s/w reset complete */
410 	while (count--) {
411 		cfg = gsc_read(GSC_SW_RESET);
412 		if (!cfg)
413 			break;
414 		usleep_range(1000, 2000);
415 	}
416 
417 	if (cfg) {
418 		DRM_ERROR("failed to reset gsc h/w.\n");
419 		return -EBUSY;
420 	}
421 
422 	/* reset sequence */
423 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
424 	cfg |= (GSC_IN_BASE_ADDR_MASK |
425 		GSC_IN_BASE_ADDR_PINGPONG(0));
426 	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
427 	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
428 	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
429 
430 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
431 	cfg |= (GSC_OUT_BASE_ADDR_MASK |
432 		GSC_OUT_BASE_ADDR_PINGPONG(0));
433 	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
434 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
435 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
436 
437 	return 0;
438 }
439 
440 static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441 {
442 	u32 gscblk_cfg;
443 
444 	DRM_DEBUG_KMS("%s\n", __func__);
445 
446 	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447 
448 	if (enable)
449 		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
450 				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
451 				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
452 	else
453 		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
454 
455 	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
456 }
457 
458 static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
459 		bool overflow, bool done)
460 {
461 	u32 cfg;
462 
463 	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
464 			enable, overflow, done);
465 
466 	cfg = gsc_read(GSC_IRQ);
467 	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
468 
469 	if (enable)
470 		cfg |= GSC_IRQ_ENABLE;
471 	else
472 		cfg &= ~GSC_IRQ_ENABLE;
473 
474 	if (overflow)
475 		cfg &= ~GSC_IRQ_OR_MASK;
476 	else
477 		cfg |= GSC_IRQ_OR_MASK;
478 
479 	if (done)
480 		cfg &= ~GSC_IRQ_FRMDONE_MASK;
481 	else
482 		cfg |= GSC_IRQ_FRMDONE_MASK;
483 
484 	gsc_write(cfg, GSC_IRQ);
485 }
486 
487 
488 static int gsc_src_set_fmt(struct device *dev, u32 fmt)
489 {
490 	struct gsc_context *ctx = get_gsc_context(dev);
491 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 	u32 cfg;
493 
494 	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
495 
496 	cfg = gsc_read(GSC_IN_CON);
497 	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
498 		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
499 		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
500 		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
501 
502 	switch (fmt) {
503 	case DRM_FORMAT_RGB565:
504 		cfg |= GSC_IN_RGB565;
505 		break;
506 	case DRM_FORMAT_XRGB8888:
507 		cfg |= GSC_IN_XRGB8888;
508 		break;
509 	case DRM_FORMAT_BGRX8888:
510 		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
511 		break;
512 	case DRM_FORMAT_YUYV:
513 		cfg |= (GSC_IN_YUV422_1P |
514 			GSC_IN_YUV422_1P_ORDER_LSB_Y |
515 			GSC_IN_CHROMA_ORDER_CBCR);
516 		break;
517 	case DRM_FORMAT_YVYU:
518 		cfg |= (GSC_IN_YUV422_1P |
519 			GSC_IN_YUV422_1P_ORDER_LSB_Y |
520 			GSC_IN_CHROMA_ORDER_CRCB);
521 		break;
522 	case DRM_FORMAT_UYVY:
523 		cfg |= (GSC_IN_YUV422_1P |
524 			GSC_IN_YUV422_1P_OEDER_LSB_C |
525 			GSC_IN_CHROMA_ORDER_CBCR);
526 		break;
527 	case DRM_FORMAT_VYUY:
528 		cfg |= (GSC_IN_YUV422_1P |
529 			GSC_IN_YUV422_1P_OEDER_LSB_C |
530 			GSC_IN_CHROMA_ORDER_CRCB);
531 		break;
532 	case DRM_FORMAT_NV21:
533 	case DRM_FORMAT_NV61:
534 		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
535 			GSC_IN_YUV420_2P);
536 		break;
537 	case DRM_FORMAT_YUV422:
538 		cfg |= GSC_IN_YUV422_3P;
539 		break;
540 	case DRM_FORMAT_YUV420:
541 	case DRM_FORMAT_YVU420:
542 		cfg |= GSC_IN_YUV420_3P;
543 		break;
544 	case DRM_FORMAT_NV12:
545 	case DRM_FORMAT_NV16:
546 		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
547 			GSC_IN_YUV420_2P);
548 		break;
549 	case DRM_FORMAT_NV12MT:
550 		cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
551 		break;
552 	default:
553 		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
554 		return -EINVAL;
555 	}
556 
557 	gsc_write(cfg, GSC_IN_CON);
558 
559 	return 0;
560 }
561 
562 static int gsc_src_set_transf(struct device *dev,
563 		enum drm_exynos_degree degree,
564 		enum drm_exynos_flip flip, bool *swap)
565 {
566 	struct gsc_context *ctx = get_gsc_context(dev);
567 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 	u32 cfg;
569 
570 	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
571 		degree, flip);
572 
573 	cfg = gsc_read(GSC_IN_CON);
574 	cfg &= ~GSC_IN_ROT_MASK;
575 
576 	switch (degree) {
577 	case EXYNOS_DRM_DEGREE_0:
578 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
579 			cfg |= GSC_IN_ROT_XFLIP;
580 		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
581 			cfg |= GSC_IN_ROT_YFLIP;
582 		break;
583 	case EXYNOS_DRM_DEGREE_90:
584 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
585 			cfg |= GSC_IN_ROT_90_XFLIP;
586 		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
587 			cfg |= GSC_IN_ROT_90_YFLIP;
588 		else
589 			cfg |= GSC_IN_ROT_90;
590 		break;
591 	case EXYNOS_DRM_DEGREE_180:
592 		cfg |= GSC_IN_ROT_180;
593 		break;
594 	case EXYNOS_DRM_DEGREE_270:
595 		cfg |= GSC_IN_ROT_270;
596 		break;
597 	default:
598 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 		return -EINVAL;
600 	}
601 
602 	gsc_write(cfg, GSC_IN_CON);
603 
604 	ctx->rotation = cfg &
605 		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
606 	*swap = ctx->rotation;
607 
608 	return 0;
609 }
610 
611 static int gsc_src_set_size(struct device *dev, int swap,
612 		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
613 {
614 	struct gsc_context *ctx = get_gsc_context(dev);
615 	struct drm_exynos_pos img_pos = *pos;
616 	struct gsc_scaler *sc = &ctx->sc;
617 	u32 cfg;
618 
619 	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 		__func__, swap, pos->x, pos->y, pos->w, pos->h);
621 
622 	if (swap) {
623 		img_pos.w = pos->h;
624 		img_pos.h = pos->w;
625 	}
626 
627 	/* pixel offset */
628 	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
629 		GSC_SRCIMG_OFFSET_Y(img_pos.y));
630 	gsc_write(cfg, GSC_SRCIMG_OFFSET);
631 
632 	/* cropped size */
633 	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
634 		GSC_CROPPED_HEIGHT(img_pos.h));
635 	gsc_write(cfg, GSC_CROPPED_SIZE);
636 
637 	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
638 		__func__, sz->hsize, sz->vsize);
639 
640 	/* original size */
641 	cfg = gsc_read(GSC_SRCIMG_SIZE);
642 	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
643 		GSC_SRCIMG_WIDTH_MASK);
644 
645 	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
646 		GSC_SRCIMG_HEIGHT(sz->vsize));
647 
648 	gsc_write(cfg, GSC_SRCIMG_SIZE);
649 
650 	cfg = gsc_read(GSC_IN_CON);
651 	cfg &= ~GSC_IN_RGB_TYPE_MASK;
652 
653 	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
654 		__func__, pos->w, sc->range);
655 
656 	if (pos->w >= GSC_WIDTH_ITU_709)
657 		if (sc->range)
658 			cfg |= GSC_IN_RGB_HD_WIDE;
659 		else
660 			cfg |= GSC_IN_RGB_HD_NARROW;
661 	else
662 		if (sc->range)
663 			cfg |= GSC_IN_RGB_SD_WIDE;
664 		else
665 			cfg |= GSC_IN_RGB_SD_NARROW;
666 
667 	gsc_write(cfg, GSC_IN_CON);
668 
669 	return 0;
670 }
671 
672 static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
673 		enum drm_exynos_ipp_buf_type buf_type)
674 {
675 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
676 	bool masked;
677 	u32 cfg;
678 	u32 mask = 0x00000001 << buf_id;
679 
680 	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
681 		buf_id, buf_type);
682 
683 	/* mask register set */
684 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
685 
686 	switch (buf_type) {
687 	case IPP_BUF_ENQUEUE:
688 		masked = false;
689 		break;
690 	case IPP_BUF_DEQUEUE:
691 		masked = true;
692 		break;
693 	default:
694 		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
695 		return -EINVAL;
696 	}
697 
698 	/* sequence id */
699 	cfg &= ~mask;
700 	cfg |= masked << buf_id;
701 	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
702 	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
703 	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
704 
705 	return 0;
706 }
707 
708 static int gsc_src_set_addr(struct device *dev,
709 		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
710 		enum drm_exynos_ipp_buf_type buf_type)
711 {
712 	struct gsc_context *ctx = get_gsc_context(dev);
713 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
715 	struct drm_exynos_ipp_property *property;
716 
717 	if (!c_node) {
718 		DRM_ERROR("failed to get c_node.\n");
719 		return -EFAULT;
720 	}
721 
722 	property = &c_node->property;
723 	if (!property) {
724 		DRM_ERROR("failed to get property.\n");
725 		return -EFAULT;
726 	}
727 
728 	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
729 		property->prop_id, buf_id, buf_type);
730 
731 	if (buf_id > GSC_MAX_SRC) {
732 		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
733 		return -EINVAL;
734 	}
735 
736 	/* address register set */
737 	switch (buf_type) {
738 	case IPP_BUF_ENQUEUE:
739 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
740 			GSC_IN_BASE_ADDR_Y(buf_id));
741 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
742 			GSC_IN_BASE_ADDR_CB(buf_id));
743 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
744 			GSC_IN_BASE_ADDR_CR(buf_id));
745 		break;
746 	case IPP_BUF_DEQUEUE:
747 		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
748 		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
749 		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
750 		break;
751 	default:
752 		/* bypass */
753 		break;
754 	}
755 
756 	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
757 }
758 
759 static struct exynos_drm_ipp_ops gsc_src_ops = {
760 	.set_fmt = gsc_src_set_fmt,
761 	.set_transf = gsc_src_set_transf,
762 	.set_size = gsc_src_set_size,
763 	.set_addr = gsc_src_set_addr,
764 };
765 
766 static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
767 {
768 	struct gsc_context *ctx = get_gsc_context(dev);
769 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
770 	u32 cfg;
771 
772 	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
773 
774 	cfg = gsc_read(GSC_OUT_CON);
775 	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
776 		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
777 		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
778 		 GSC_OUT_GLOBAL_ALPHA_MASK);
779 
780 	switch (fmt) {
781 	case DRM_FORMAT_RGB565:
782 		cfg |= GSC_OUT_RGB565;
783 		break;
784 	case DRM_FORMAT_XRGB8888:
785 		cfg |= GSC_OUT_XRGB8888;
786 		break;
787 	case DRM_FORMAT_BGRX8888:
788 		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
789 		break;
790 	case DRM_FORMAT_YUYV:
791 		cfg |= (GSC_OUT_YUV422_1P |
792 			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
793 			GSC_OUT_CHROMA_ORDER_CBCR);
794 		break;
795 	case DRM_FORMAT_YVYU:
796 		cfg |= (GSC_OUT_YUV422_1P |
797 			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
798 			GSC_OUT_CHROMA_ORDER_CRCB);
799 		break;
800 	case DRM_FORMAT_UYVY:
801 		cfg |= (GSC_OUT_YUV422_1P |
802 			GSC_OUT_YUV422_1P_OEDER_LSB_C |
803 			GSC_OUT_CHROMA_ORDER_CBCR);
804 		break;
805 	case DRM_FORMAT_VYUY:
806 		cfg |= (GSC_OUT_YUV422_1P |
807 			GSC_OUT_YUV422_1P_OEDER_LSB_C |
808 			GSC_OUT_CHROMA_ORDER_CRCB);
809 		break;
810 	case DRM_FORMAT_NV21:
811 	case DRM_FORMAT_NV61:
812 		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
813 		break;
814 	case DRM_FORMAT_YUV422:
815 	case DRM_FORMAT_YUV420:
816 	case DRM_FORMAT_YVU420:
817 		cfg |= GSC_OUT_YUV420_3P;
818 		break;
819 	case DRM_FORMAT_NV12:
820 	case DRM_FORMAT_NV16:
821 		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
822 			GSC_OUT_YUV420_2P);
823 		break;
824 	case DRM_FORMAT_NV12MT:
825 		cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
826 		break;
827 	default:
828 		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
829 		return -EINVAL;
830 	}
831 
832 	gsc_write(cfg, GSC_OUT_CON);
833 
834 	return 0;
835 }
836 
837 static int gsc_dst_set_transf(struct device *dev,
838 		enum drm_exynos_degree degree,
839 		enum drm_exynos_flip flip, bool *swap)
840 {
841 	struct gsc_context *ctx = get_gsc_context(dev);
842 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
843 	u32 cfg;
844 
845 	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
846 		degree, flip);
847 
848 	cfg = gsc_read(GSC_IN_CON);
849 	cfg &= ~GSC_IN_ROT_MASK;
850 
851 	switch (degree) {
852 	case EXYNOS_DRM_DEGREE_0:
853 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
854 			cfg |= GSC_IN_ROT_XFLIP;
855 		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
856 			cfg |= GSC_IN_ROT_YFLIP;
857 		break;
858 	case EXYNOS_DRM_DEGREE_90:
859 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
860 			cfg |= GSC_IN_ROT_90_XFLIP;
861 		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
862 			cfg |= GSC_IN_ROT_90_YFLIP;
863 		else
864 			cfg |= GSC_IN_ROT_90;
865 		break;
866 	case EXYNOS_DRM_DEGREE_180:
867 		cfg |= GSC_IN_ROT_180;
868 		break;
869 	case EXYNOS_DRM_DEGREE_270:
870 		cfg |= GSC_IN_ROT_270;
871 		break;
872 	default:
873 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
874 		return -EINVAL;
875 	}
876 
877 	gsc_write(cfg, GSC_IN_CON);
878 
879 	ctx->rotation = cfg &
880 		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
881 	*swap = ctx->rotation;
882 
883 	return 0;
884 }
885 
886 static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
887 {
888 	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
889 
890 	if (src >= dst * 8) {
891 		DRM_ERROR("failed to make ratio and shift.\n");
892 		return -EINVAL;
893 	} else if (src >= dst * 4)
894 		*ratio = 4;
895 	else if (src >= dst * 2)
896 		*ratio = 2;
897 	else
898 		*ratio = 1;
899 
900 	return 0;
901 }
902 
903 static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
904 {
905 	if (hratio == 4 && vratio == 4)
906 		*shfactor = 4;
907 	else if ((hratio == 4 && vratio == 2) ||
908 		 (hratio == 2 && vratio == 4))
909 		*shfactor = 3;
910 	else if ((hratio == 4 && vratio == 1) ||
911 		 (hratio == 1 && vratio == 4) ||
912 		 (hratio == 2 && vratio == 2))
913 		*shfactor = 2;
914 	else if (hratio == 1 && vratio == 1)
915 		*shfactor = 0;
916 	else
917 		*shfactor = 1;
918 }
919 
920 static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
921 		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
922 {
923 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
924 	u32 cfg;
925 	u32 src_w, src_h, dst_w, dst_h;
926 	int ret = 0;
927 
928 	src_w = src->w;
929 	src_h = src->h;
930 
931 	if (ctx->rotation) {
932 		dst_w = dst->h;
933 		dst_h = dst->w;
934 	} else {
935 		dst_w = dst->w;
936 		dst_h = dst->h;
937 	}
938 
939 	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
940 	if (ret) {
941 		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
942 		return ret;
943 	}
944 
945 	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
946 	if (ret) {
947 		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
948 		return ret;
949 	}
950 
951 	DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
952 		__func__, sc->pre_hratio, sc->pre_vratio);
953 
954 	sc->main_hratio = (src_w << 16) / dst_w;
955 	sc->main_vratio = (src_h << 16) / dst_h;
956 
957 	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
958 		__func__, sc->main_hratio, sc->main_vratio);
959 
960 	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
961 		&sc->pre_shfactor);
962 
963 	DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
964 		sc->pre_shfactor);
965 
966 	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
967 		GSC_PRESC_H_RATIO(sc->pre_hratio) |
968 		GSC_PRESC_V_RATIO(sc->pre_vratio));
969 	gsc_write(cfg, GSC_PRE_SCALE_RATIO);
970 
971 	return ret;
972 }
973 
974 static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
975 {
976 	int i, j, k, sc_ratio;
977 
978 	if (main_hratio <= GSC_SC_UP_MAX_RATIO)
979 		sc_ratio = 0;
980 	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
981 		sc_ratio = 1;
982 	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
983 		sc_ratio = 2;
984 	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
985 		sc_ratio = 3;
986 	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
987 		sc_ratio = 4;
988 	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
989 		sc_ratio = 5;
990 	else
991 		sc_ratio = 6;
992 
993 	for (i = 0; i < GSC_COEF_PHASE; i++)
994 		for (j = 0; j < GSC_COEF_H_8T; j++)
995 			for (k = 0; k < GSC_COEF_DEPTH; k++)
996 				gsc_write(h_coef_8t[sc_ratio][i][j],
997 					GSC_HCOEF(i, j, k));
998 }
999 
1000 static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
1001 {
1002 	int i, j, k, sc_ratio;
1003 
1004 	if (main_vratio <= GSC_SC_UP_MAX_RATIO)
1005 		sc_ratio = 0;
1006 	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
1007 		sc_ratio = 1;
1008 	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
1009 		sc_ratio = 2;
1010 	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
1011 		sc_ratio = 3;
1012 	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
1013 		sc_ratio = 4;
1014 	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
1015 		sc_ratio = 5;
1016 	else
1017 		sc_ratio = 6;
1018 
1019 	for (i = 0; i < GSC_COEF_PHASE; i++)
1020 		for (j = 0; j < GSC_COEF_V_4T; j++)
1021 			for (k = 0; k < GSC_COEF_DEPTH; k++)
1022 				gsc_write(v_coef_4t[sc_ratio][i][j],
1023 					GSC_VCOEF(i, j, k));
1024 }
1025 
1026 static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1027 {
1028 	u32 cfg;
1029 
1030 	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
1031 		__func__, sc->main_hratio, sc->main_vratio);
1032 
1033 	gsc_set_h_coef(ctx, sc->main_hratio);
1034 	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1035 	gsc_write(cfg, GSC_MAIN_H_RATIO);
1036 
1037 	gsc_set_v_coef(ctx, sc->main_vratio);
1038 	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1039 	gsc_write(cfg, GSC_MAIN_V_RATIO);
1040 }
1041 
1042 static int gsc_dst_set_size(struct device *dev, int swap,
1043 		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1044 {
1045 	struct gsc_context *ctx = get_gsc_context(dev);
1046 	struct drm_exynos_pos img_pos = *pos;
1047 	struct gsc_scaler *sc = &ctx->sc;
1048 	u32 cfg;
1049 
1050 	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1051 		__func__, swap, pos->x, pos->y, pos->w, pos->h);
1052 
1053 	if (swap) {
1054 		img_pos.w = pos->h;
1055 		img_pos.h = pos->w;
1056 	}
1057 
1058 	/* pixel offset */
1059 	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1060 		GSC_DSTIMG_OFFSET_Y(pos->y));
1061 	gsc_write(cfg, GSC_DSTIMG_OFFSET);
1062 
1063 	/* scaled size */
1064 	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1065 	gsc_write(cfg, GSC_SCALED_SIZE);
1066 
1067 	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
1068 		__func__, sz->hsize, sz->vsize);
1069 
1070 	/* original size */
1071 	cfg = gsc_read(GSC_DSTIMG_SIZE);
1072 	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1073 		GSC_DSTIMG_WIDTH_MASK);
1074 	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1075 		GSC_DSTIMG_HEIGHT(sz->vsize));
1076 	gsc_write(cfg, GSC_DSTIMG_SIZE);
1077 
1078 	cfg = gsc_read(GSC_OUT_CON);
1079 	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1080 
1081 	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
1082 		__func__, pos->w, sc->range);
1083 
1084 	if (pos->w >= GSC_WIDTH_ITU_709)
1085 		if (sc->range)
1086 			cfg |= GSC_OUT_RGB_HD_WIDE;
1087 		else
1088 			cfg |= GSC_OUT_RGB_HD_NARROW;
1089 	else
1090 		if (sc->range)
1091 			cfg |= GSC_OUT_RGB_SD_WIDE;
1092 		else
1093 			cfg |= GSC_OUT_RGB_SD_NARROW;
1094 
1095 	gsc_write(cfg, GSC_OUT_CON);
1096 
1097 	return 0;
1098 }
1099 
1100 static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1101 {
1102 	u32 cfg, i, buf_num = GSC_REG_SZ;
1103 	u32 mask = 0x00000001;
1104 
1105 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1106 
1107 	for (i = 0; i < GSC_REG_SZ; i++)
1108 		if (cfg & (mask << i))
1109 			buf_num--;
1110 
1111 	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1112 
1113 	return buf_num;
1114 }
1115 
1116 static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1117 		enum drm_exynos_ipp_buf_type buf_type)
1118 {
1119 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1120 	bool masked;
1121 	u32 cfg;
1122 	u32 mask = 0x00000001 << buf_id;
1123 	int ret = 0;
1124 
1125 	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1126 		buf_id, buf_type);
1127 
1128 	mutex_lock(&ctx->lock);
1129 
1130 	/* mask register set */
1131 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1132 
1133 	switch (buf_type) {
1134 	case IPP_BUF_ENQUEUE:
1135 		masked = false;
1136 		break;
1137 	case IPP_BUF_DEQUEUE:
1138 		masked = true;
1139 		break;
1140 	default:
1141 		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1142 		ret =  -EINVAL;
1143 		goto err_unlock;
1144 	}
1145 
1146 	/* sequence id */
1147 	cfg &= ~mask;
1148 	cfg |= masked << buf_id;
1149 	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1150 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1151 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1152 
1153 	/* interrupt enable */
1154 	if (buf_type == IPP_BUF_ENQUEUE &&
1155 	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1156 		gsc_handle_irq(ctx, true, false, true);
1157 
1158 	/* interrupt disable */
1159 	if (buf_type == IPP_BUF_DEQUEUE &&
1160 	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1161 		gsc_handle_irq(ctx, false, false, true);
1162 
1163 err_unlock:
1164 	mutex_unlock(&ctx->lock);
1165 	return ret;
1166 }
1167 
1168 static int gsc_dst_set_addr(struct device *dev,
1169 		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1170 		enum drm_exynos_ipp_buf_type buf_type)
1171 {
1172 	struct gsc_context *ctx = get_gsc_context(dev);
1173 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1174 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1175 	struct drm_exynos_ipp_property *property;
1176 
1177 	if (!c_node) {
1178 		DRM_ERROR("failed to get c_node.\n");
1179 		return -EFAULT;
1180 	}
1181 
1182 	property = &c_node->property;
1183 	if (!property) {
1184 		DRM_ERROR("failed to get property.\n");
1185 		return -EFAULT;
1186 	}
1187 
1188 	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1189 		property->prop_id, buf_id, buf_type);
1190 
1191 	if (buf_id > GSC_MAX_DST) {
1192 		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1193 		return -EINVAL;
1194 	}
1195 
1196 	/* address register set */
1197 	switch (buf_type) {
1198 	case IPP_BUF_ENQUEUE:
1199 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1200 			GSC_OUT_BASE_ADDR_Y(buf_id));
1201 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1202 			GSC_OUT_BASE_ADDR_CB(buf_id));
1203 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1204 			GSC_OUT_BASE_ADDR_CR(buf_id));
1205 		break;
1206 	case IPP_BUF_DEQUEUE:
1207 		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1208 		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1209 		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1210 		break;
1211 	default:
1212 		/* bypass */
1213 		break;
1214 	}
1215 
1216 	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1217 }
1218 
1219 static struct exynos_drm_ipp_ops gsc_dst_ops = {
1220 	.set_fmt = gsc_dst_set_fmt,
1221 	.set_transf = gsc_dst_set_transf,
1222 	.set_size = gsc_dst_set_size,
1223 	.set_addr = gsc_dst_set_addr,
1224 };
1225 
1226 static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1227 {
1228 	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1229 
1230 	if (enable) {
1231 		clk_enable(ctx->gsc_clk);
1232 		ctx->suspended = false;
1233 	} else {
1234 		clk_disable(ctx->gsc_clk);
1235 		ctx->suspended = true;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 static int gsc_get_src_buf_index(struct gsc_context *ctx)
1242 {
1243 	u32 cfg, curr_index, i;
1244 	u32 buf_id = GSC_MAX_SRC;
1245 	int ret;
1246 
1247 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1248 
1249 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1250 	curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1251 
1252 	for (i = curr_index; i < GSC_MAX_SRC; i++) {
1253 		if (!((cfg >> i) & 0x1)) {
1254 			buf_id = i;
1255 			break;
1256 		}
1257 	}
1258 
1259 	if (buf_id == GSC_MAX_SRC) {
1260 		DRM_ERROR("failed to get in buffer index.\n");
1261 		return -EINVAL;
1262 	}
1263 
1264 	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1265 	if (ret < 0) {
1266 		DRM_ERROR("failed to dequeue.\n");
1267 		return ret;
1268 	}
1269 
1270 	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1271 		curr_index, buf_id);
1272 
1273 	return buf_id;
1274 }
1275 
1276 static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1277 {
1278 	u32 cfg, curr_index, i;
1279 	u32 buf_id = GSC_MAX_DST;
1280 	int ret;
1281 
1282 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1283 
1284 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1285 	curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1286 
1287 	for (i = curr_index; i < GSC_MAX_DST; i++) {
1288 		if (!((cfg >> i) & 0x1)) {
1289 			buf_id = i;
1290 			break;
1291 		}
1292 	}
1293 
1294 	if (buf_id == GSC_MAX_DST) {
1295 		DRM_ERROR("failed to get out buffer index.\n");
1296 		return -EINVAL;
1297 	}
1298 
1299 	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1300 	if (ret < 0) {
1301 		DRM_ERROR("failed to dequeue.\n");
1302 		return ret;
1303 	}
1304 
1305 	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1306 		curr_index, buf_id);
1307 
1308 	return buf_id;
1309 }
1310 
1311 static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1312 {
1313 	struct gsc_context *ctx = dev_id;
1314 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1315 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1316 	struct drm_exynos_ipp_event_work *event_work =
1317 		c_node->event_work;
1318 	u32 status;
1319 	int buf_id[EXYNOS_DRM_OPS_MAX];
1320 
1321 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1322 
1323 	status = gsc_read(GSC_IRQ);
1324 	if (status & GSC_IRQ_STATUS_OR_IRQ) {
1325 		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
1326 			ctx->id, status);
1327 		return IRQ_NONE;
1328 	}
1329 
1330 	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1331 		dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
1332 			ctx->id, status);
1333 
1334 		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1335 		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1336 			return IRQ_HANDLED;
1337 
1338 		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1339 		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1340 			return IRQ_HANDLED;
1341 
1342 		DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
1343 			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1344 
1345 		event_work->ippdrv = ippdrv;
1346 		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1347 			buf_id[EXYNOS_DRM_OPS_SRC];
1348 		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1349 			buf_id[EXYNOS_DRM_OPS_DST];
1350 		queue_work(ippdrv->event_workq,
1351 			(struct work_struct *)event_work);
1352 	}
1353 
1354 	return IRQ_HANDLED;
1355 }
1356 
1357 static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1358 {
1359 	struct drm_exynos_ipp_prop_list *prop_list;
1360 
1361 	DRM_DEBUG_KMS("%s\n", __func__);
1362 
1363 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1364 	if (!prop_list) {
1365 		DRM_ERROR("failed to alloc property list.\n");
1366 		return -ENOMEM;
1367 	}
1368 
1369 	prop_list->version = 1;
1370 	prop_list->writeback = 1;
1371 	prop_list->refresh_min = GSC_REFRESH_MIN;
1372 	prop_list->refresh_max = GSC_REFRESH_MAX;
1373 	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1374 				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1375 	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1376 				(1 << EXYNOS_DRM_DEGREE_90) |
1377 				(1 << EXYNOS_DRM_DEGREE_180) |
1378 				(1 << EXYNOS_DRM_DEGREE_270);
1379 	prop_list->csc = 1;
1380 	prop_list->crop = 1;
1381 	prop_list->crop_max.hsize = GSC_CROP_MAX;
1382 	prop_list->crop_max.vsize = GSC_CROP_MAX;
1383 	prop_list->crop_min.hsize = GSC_CROP_MIN;
1384 	prop_list->crop_min.vsize = GSC_CROP_MIN;
1385 	prop_list->scale = 1;
1386 	prop_list->scale_max.hsize = GSC_SCALE_MAX;
1387 	prop_list->scale_max.vsize = GSC_SCALE_MAX;
1388 	prop_list->scale_min.hsize = GSC_SCALE_MIN;
1389 	prop_list->scale_min.vsize = GSC_SCALE_MIN;
1390 
1391 	ippdrv->prop_list = prop_list;
1392 
1393 	return 0;
1394 }
1395 
1396 static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1397 {
1398 	switch (flip) {
1399 	case EXYNOS_DRM_FLIP_NONE:
1400 	case EXYNOS_DRM_FLIP_VERTICAL:
1401 	case EXYNOS_DRM_FLIP_HORIZONTAL:
1402 	case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
1403 		return true;
1404 	default:
1405 		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1406 		return false;
1407 	}
1408 }
1409 
1410 static int gsc_ippdrv_check_property(struct device *dev,
1411 		struct drm_exynos_ipp_property *property)
1412 {
1413 	struct gsc_context *ctx = get_gsc_context(dev);
1414 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1415 	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1416 	struct drm_exynos_ipp_config *config;
1417 	struct drm_exynos_pos *pos;
1418 	struct drm_exynos_sz *sz;
1419 	bool swap;
1420 	int i;
1421 
1422 	DRM_DEBUG_KMS("%s\n", __func__);
1423 
1424 	for_each_ipp_ops(i) {
1425 		if ((i == EXYNOS_DRM_OPS_SRC) &&
1426 			(property->cmd == IPP_CMD_WB))
1427 			continue;
1428 
1429 		config = &property->config[i];
1430 		pos = &config->pos;
1431 		sz = &config->sz;
1432 
1433 		/* check for flip */
1434 		if (!gsc_check_drm_flip(config->flip)) {
1435 			DRM_ERROR("invalid flip.\n");
1436 			goto err_property;
1437 		}
1438 
1439 		/* check for degree */
1440 		switch (config->degree) {
1441 		case EXYNOS_DRM_DEGREE_90:
1442 		case EXYNOS_DRM_DEGREE_270:
1443 			swap = true;
1444 			break;
1445 		case EXYNOS_DRM_DEGREE_0:
1446 		case EXYNOS_DRM_DEGREE_180:
1447 			swap = false;
1448 			break;
1449 		default:
1450 			DRM_ERROR("invalid degree.\n");
1451 			goto err_property;
1452 		}
1453 
1454 		/* check for buffer bound */
1455 		if ((pos->x + pos->w > sz->hsize) ||
1456 			(pos->y + pos->h > sz->vsize)) {
1457 			DRM_ERROR("out of buf bound.\n");
1458 			goto err_property;
1459 		}
1460 
1461 		/* check for crop */
1462 		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1463 			if (swap) {
1464 				if ((pos->h < pp->crop_min.hsize) ||
1465 					(sz->vsize > pp->crop_max.hsize) ||
1466 					(pos->w < pp->crop_min.vsize) ||
1467 					(sz->hsize > pp->crop_max.vsize)) {
1468 					DRM_ERROR("out of crop size.\n");
1469 					goto err_property;
1470 				}
1471 			} else {
1472 				if ((pos->w < pp->crop_min.hsize) ||
1473 					(sz->hsize > pp->crop_max.hsize) ||
1474 					(pos->h < pp->crop_min.vsize) ||
1475 					(sz->vsize > pp->crop_max.vsize)) {
1476 					DRM_ERROR("out of crop size.\n");
1477 					goto err_property;
1478 				}
1479 			}
1480 		}
1481 
1482 		/* check for scale */
1483 		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1484 			if (swap) {
1485 				if ((pos->h < pp->scale_min.hsize) ||
1486 					(sz->vsize > pp->scale_max.hsize) ||
1487 					(pos->w < pp->scale_min.vsize) ||
1488 					(sz->hsize > pp->scale_max.vsize)) {
1489 					DRM_ERROR("out of scale size.\n");
1490 					goto err_property;
1491 				}
1492 			} else {
1493 				if ((pos->w < pp->scale_min.hsize) ||
1494 					(sz->hsize > pp->scale_max.hsize) ||
1495 					(pos->h < pp->scale_min.vsize) ||
1496 					(sz->vsize > pp->scale_max.vsize)) {
1497 					DRM_ERROR("out of scale size.\n");
1498 					goto err_property;
1499 				}
1500 			}
1501 		}
1502 	}
1503 
1504 	return 0;
1505 
1506 err_property:
1507 	for_each_ipp_ops(i) {
1508 		if ((i == EXYNOS_DRM_OPS_SRC) &&
1509 			(property->cmd == IPP_CMD_WB))
1510 			continue;
1511 
1512 		config = &property->config[i];
1513 		pos = &config->pos;
1514 		sz = &config->sz;
1515 
1516 		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1517 			i ? "dst" : "src", config->flip, config->degree,
1518 			pos->x, pos->y, pos->w, pos->h,
1519 			sz->hsize, sz->vsize);
1520 	}
1521 
1522 	return -EINVAL;
1523 }
1524 
1525 
1526 static int gsc_ippdrv_reset(struct device *dev)
1527 {
1528 	struct gsc_context *ctx = get_gsc_context(dev);
1529 	struct gsc_scaler *sc = &ctx->sc;
1530 	int ret;
1531 
1532 	DRM_DEBUG_KMS("%s\n", __func__);
1533 
1534 	/* reset h/w block */
1535 	ret = gsc_sw_reset(ctx);
1536 	if (ret < 0) {
1537 		dev_err(dev, "failed to reset hardware.\n");
1538 		return ret;
1539 	}
1540 
1541 	/* scaler setting */
1542 	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1543 	sc->range = true;
1544 
1545 	return 0;
1546 }
1547 
1548 static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1549 {
1550 	struct gsc_context *ctx = get_gsc_context(dev);
1551 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1552 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1553 	struct drm_exynos_ipp_property *property;
1554 	struct drm_exynos_ipp_config *config;
1555 	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
1556 	struct drm_exynos_ipp_set_wb set_wb;
1557 	u32 cfg;
1558 	int ret, i;
1559 
1560 	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1561 
1562 	if (!c_node) {
1563 		DRM_ERROR("failed to get c_node.\n");
1564 		return -EINVAL;
1565 	}
1566 
1567 	property = &c_node->property;
1568 	if (!property) {
1569 		DRM_ERROR("failed to get property.\n");
1570 		return -EINVAL;
1571 	}
1572 
1573 	gsc_handle_irq(ctx, true, false, true);
1574 
1575 	for_each_ipp_ops(i) {
1576 		config = &property->config[i];
1577 		img_pos[i] = config->pos;
1578 	}
1579 
1580 	switch (cmd) {
1581 	case IPP_CMD_M2M:
1582 		/* enable one shot */
1583 		cfg = gsc_read(GSC_ENABLE);
1584 		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1585 			GSC_ENABLE_CLK_GATE_MODE_MASK);
1586 		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1587 		gsc_write(cfg, GSC_ENABLE);
1588 
1589 		/* src dma memory */
1590 		cfg = gsc_read(GSC_IN_CON);
1591 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1592 		cfg |= GSC_IN_PATH_MEMORY;
1593 		gsc_write(cfg, GSC_IN_CON);
1594 
1595 		/* dst dma memory */
1596 		cfg = gsc_read(GSC_OUT_CON);
1597 		cfg |= GSC_OUT_PATH_MEMORY;
1598 		gsc_write(cfg, GSC_OUT_CON);
1599 		break;
1600 	case IPP_CMD_WB:
1601 		set_wb.enable = 1;
1602 		set_wb.refresh = property->refresh_rate;
1603 		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1604 		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1605 
1606 		/* src local path */
1607 		cfg = readl(GSC_IN_CON);
1608 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1610 		gsc_write(cfg, GSC_IN_CON);
1611 
1612 		/* dst dma memory */
1613 		cfg = gsc_read(GSC_OUT_CON);
1614 		cfg |= GSC_OUT_PATH_MEMORY;
1615 		gsc_write(cfg, GSC_OUT_CON);
1616 		break;
1617 	case IPP_CMD_OUTPUT:
1618 		/* src dma memory */
1619 		cfg = gsc_read(GSC_IN_CON);
1620 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1621 		cfg |= GSC_IN_PATH_MEMORY;
1622 		gsc_write(cfg, GSC_IN_CON);
1623 
1624 		/* dst local path */
1625 		cfg = gsc_read(GSC_OUT_CON);
1626 		cfg |= GSC_OUT_PATH_MEMORY;
1627 		gsc_write(cfg, GSC_OUT_CON);
1628 		break;
1629 	default:
1630 		ret = -EINVAL;
1631 		dev_err(dev, "invalid operations.\n");
1632 		return ret;
1633 	}
1634 
1635 	ret = gsc_set_prescaler(ctx, &ctx->sc,
1636 		&img_pos[EXYNOS_DRM_OPS_SRC],
1637 		&img_pos[EXYNOS_DRM_OPS_DST]);
1638 	if (ret) {
1639 		dev_err(dev, "failed to set precalser.\n");
1640 		return ret;
1641 	}
1642 
1643 	gsc_set_scaler(ctx, &ctx->sc);
1644 
1645 	cfg = gsc_read(GSC_ENABLE);
1646 	cfg |= GSC_ENABLE_ON;
1647 	gsc_write(cfg, GSC_ENABLE);
1648 
1649 	return 0;
1650 }
1651 
1652 static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1653 {
1654 	struct gsc_context *ctx = get_gsc_context(dev);
1655 	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1656 	u32 cfg;
1657 
1658 	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1659 
1660 	switch (cmd) {
1661 	case IPP_CMD_M2M:
1662 		/* bypass */
1663 		break;
1664 	case IPP_CMD_WB:
1665 		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1666 		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1667 		break;
1668 	case IPP_CMD_OUTPUT:
1669 	default:
1670 		dev_err(dev, "invalid operations.\n");
1671 		break;
1672 	}
1673 
1674 	gsc_handle_irq(ctx, false, false, true);
1675 
1676 	/* reset sequence */
1677 	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1678 	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1679 	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1680 
1681 	cfg = gsc_read(GSC_ENABLE);
1682 	cfg &= ~GSC_ENABLE_ON;
1683 	gsc_write(cfg, GSC_ENABLE);
1684 }
1685 
1686 static int __devinit gsc_probe(struct platform_device *pdev)
1687 {
1688 	struct device *dev = &pdev->dev;
1689 	struct gsc_context *ctx;
1690 	struct resource *res;
1691 	struct exynos_drm_ippdrv *ippdrv;
1692 	int ret;
1693 
1694 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1695 	if (!ctx)
1696 		return -ENOMEM;
1697 
1698 	/* clock control */
1699 	ctx->gsc_clk = clk_get(dev, "gscl");
1700 	if (IS_ERR(ctx->gsc_clk)) {
1701 		dev_err(dev, "failed to get gsc clock.\n");
1702 		ret = PTR_ERR(ctx->gsc_clk);
1703 		goto err_ctx;
1704 	}
1705 
1706 	/* resource memory */
1707 	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1708 	if (!ctx->regs_res) {
1709 		dev_err(dev, "failed to find registers.\n");
1710 		ret = -ENOENT;
1711 		goto err_clk;
1712 	}
1713 
1714 	ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1715 	if (!ctx->regs) {
1716 		dev_err(dev, "failed to map registers.\n");
1717 		ret = -ENXIO;
1718 		goto err_clk;
1719 	}
1720 
1721 	/* resource irq */
1722 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1723 	if (!res) {
1724 		dev_err(dev, "failed to request irq resource.\n");
1725 		ret = -ENOENT;
1726 		goto err_get_regs;
1727 	}
1728 
1729 	ctx->irq = res->start;
1730 	ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
1731 		IRQF_ONESHOT, "drm_gsc", ctx);
1732 	if (ret < 0) {
1733 		dev_err(dev, "failed to request irq.\n");
1734 		goto err_get_regs;
1735 	}
1736 
1737 	/* context initailization */
1738 	ctx->id = pdev->id;
1739 
1740 	ippdrv = &ctx->ippdrv;
1741 	ippdrv->dev = dev;
1742 	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1743 	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1744 	ippdrv->check_property = gsc_ippdrv_check_property;
1745 	ippdrv->reset = gsc_ippdrv_reset;
1746 	ippdrv->start = gsc_ippdrv_start;
1747 	ippdrv->stop = gsc_ippdrv_stop;
1748 	ret = gsc_init_prop_list(ippdrv);
1749 	if (ret < 0) {
1750 		dev_err(dev, "failed to init property list.\n");
1751 		goto err_get_irq;
1752 	}
1753 
1754 	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1755 		(int)ippdrv);
1756 
1757 	mutex_init(&ctx->lock);
1758 	platform_set_drvdata(pdev, ctx);
1759 
1760 	pm_runtime_set_active(dev);
1761 	pm_runtime_enable(dev);
1762 
1763 	ret = exynos_drm_ippdrv_register(ippdrv);
1764 	if (ret < 0) {
1765 		dev_err(dev, "failed to register drm gsc device.\n");
1766 		goto err_ippdrv_register;
1767 	}
1768 
1769 	dev_info(&pdev->dev, "drm gsc registered successfully.\n");
1770 
1771 	return 0;
1772 
1773 err_ippdrv_register:
1774 	devm_kfree(dev, ippdrv->prop_list);
1775 	pm_runtime_disable(dev);
1776 err_get_irq:
1777 	free_irq(ctx->irq, ctx);
1778 err_get_regs:
1779 	devm_iounmap(dev, ctx->regs);
1780 err_clk:
1781 	clk_put(ctx->gsc_clk);
1782 err_ctx:
1783 	devm_kfree(dev, ctx);
1784 	return ret;
1785 }
1786 
1787 static int __devexit gsc_remove(struct platform_device *pdev)
1788 {
1789 	struct device *dev = &pdev->dev;
1790 	struct gsc_context *ctx = get_gsc_context(dev);
1791 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1792 
1793 	devm_kfree(dev, ippdrv->prop_list);
1794 	exynos_drm_ippdrv_unregister(ippdrv);
1795 	mutex_destroy(&ctx->lock);
1796 
1797 	pm_runtime_set_suspended(dev);
1798 	pm_runtime_disable(dev);
1799 
1800 	free_irq(ctx->irq, ctx);
1801 	devm_iounmap(dev, ctx->regs);
1802 
1803 	clk_put(ctx->gsc_clk);
1804 
1805 	devm_kfree(dev, ctx);
1806 
1807 	return 0;
1808 }
1809 
1810 #ifdef CONFIG_PM_SLEEP
1811 static int gsc_suspend(struct device *dev)
1812 {
1813 	struct gsc_context *ctx = get_gsc_context(dev);
1814 
1815 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1816 
1817 	if (pm_runtime_suspended(dev))
1818 		return 0;
1819 
1820 	return gsc_clk_ctrl(ctx, false);
1821 }
1822 
1823 static int gsc_resume(struct device *dev)
1824 {
1825 	struct gsc_context *ctx = get_gsc_context(dev);
1826 
1827 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1828 
1829 	if (!pm_runtime_suspended(dev))
1830 		return gsc_clk_ctrl(ctx, true);
1831 
1832 	return 0;
1833 }
1834 #endif
1835 
1836 #ifdef CONFIG_PM_RUNTIME
1837 static int gsc_runtime_suspend(struct device *dev)
1838 {
1839 	struct gsc_context *ctx = get_gsc_context(dev);
1840 
1841 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1842 
1843 	return  gsc_clk_ctrl(ctx, false);
1844 }
1845 
1846 static int gsc_runtime_resume(struct device *dev)
1847 {
1848 	struct gsc_context *ctx = get_gsc_context(dev);
1849 
1850 	DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
1851 
1852 	return  gsc_clk_ctrl(ctx, true);
1853 }
1854 #endif
1855 
1856 static const struct dev_pm_ops gsc_pm_ops = {
1857 	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1858 	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1859 };
1860 
1861 struct platform_driver gsc_driver = {
1862 	.probe		= gsc_probe,
1863 	.remove		= __devexit_p(gsc_remove),
1864 	.driver		= {
1865 		.name	= "exynos-drm-gsc",
1866 		.owner	= THIS_MODULE,
1867 		.pm	= &gsc_pm_ops,
1868 	},
1869 };
1870 
1871