1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6 #include <drm/drm_blend.h>
7 #include <drm/drm_fourcc.h>
8 #include <drm/drm_framebuffer.h>
9
10 #include <linux/clk.h>
11 #include <linux/component.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/soc/mediatek/mtk-cmdq.h>
17
18 #include "mtk_crtc.h"
19 #include "mtk_ddp_comp.h"
20 #include "mtk_disp_drv.h"
21 #include "mtk_drm_drv.h"
22
23 #define DISP_REG_OVL_INTEN 0x0004
24 #define OVL_FME_CPL_INT BIT(1)
25 #define DISP_REG_OVL_INTSTA 0x0008
26 #define DISP_REG_OVL_EN 0x000c
27 #define DISP_REG_OVL_RST 0x0014
28 #define DISP_REG_OVL_ROI_SIZE 0x0020
29 #define DISP_REG_OVL_DATAPATH_CON 0x0024
30 #define OVL_LAYER_SMI_ID_EN BIT(0)
31 #define OVL_BGCLR_SEL_IN BIT(2)
32 #define OVL_LAYER_AFBC_EN(n) BIT(4+n)
33 #define DISP_REG_OVL_ROI_BGCLR 0x0028
34 #define DISP_REG_OVL_SRC_CON 0x002c
35 #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
36 #define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n))
37 #define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n))
38 #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n))
39 #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16)
40 #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
41 #define OVL_CONST_BLEND BIT(28)
42 #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
43 #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
44 #define DISP_REG_OVL_ADDR_MT2701 0x0040
45 #define DISP_REG_OVL_CLRFMT_EXT 0x02d0
46 #define OVL_CON_CLRFMT_BIT_DEPTH_MASK(n) (GENMASK(1, 0) << (4 * (n)))
47 #define OVL_CON_CLRFMT_BIT_DEPTH(depth, n) ((depth) << (4 * (n)))
48 #define OVL_CON_CLRFMT_8_BIT (0)
49 #define OVL_CON_CLRFMT_10_BIT (1)
50 #define DISP_REG_OVL_ADDR_MT8173 0x0f40
51 #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
52 #define DISP_REG_OVL_HDR_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x04)
53 #define DISP_REG_OVL_HDR_PITCH(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x08)
54
55 #define GMC_THRESHOLD_BITS 16
56 #define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
57 #define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
58
59 #define OVL_CON_CLRFMT_MAN BIT(23)
60 #define OVL_CON_BYTE_SWAP BIT(24)
61
62 /* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
63 #define OVL_CON_RGB_SWAP BIT(25)
64
65 #define OVL_CON_CLRFMT_RGB (1 << 12)
66 #define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
67 #define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
68 #define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
69 #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
70 #define OVL_CON_CLRFMT_UYVY (4 << 12)
71 #define OVL_CON_CLRFMT_YUYV (5 << 12)
72 #define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
73 #define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
74 #define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
75 #define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
76 #define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
77 #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
78 0 : OVL_CON_CLRFMT_RGB)
79 #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
80 OVL_CON_CLRFMT_RGB : 0)
81 #define OVL_CON_AEN BIT(8)
82 #define OVL_CON_ALPHA 0xff
83 #define OVL_CON_VIRT_FLIP BIT(9)
84 #define OVL_CON_HORZ_FLIP BIT(10)
85
86 #define OVL_COLOR_ALPHA GENMASK(31, 24)
87
is_10bit_rgb(u32 fmt)88 static inline bool is_10bit_rgb(u32 fmt)
89 {
90 switch (fmt) {
91 case DRM_FORMAT_XRGB2101010:
92 case DRM_FORMAT_ARGB2101010:
93 case DRM_FORMAT_RGBX1010102:
94 case DRM_FORMAT_RGBA1010102:
95 case DRM_FORMAT_XBGR2101010:
96 case DRM_FORMAT_ABGR2101010:
97 case DRM_FORMAT_BGRX1010102:
98 case DRM_FORMAT_BGRA1010102:
99 return true;
100 }
101 return false;
102 }
103
104 static const u32 mt8173_formats[] = {
105 DRM_FORMAT_XRGB8888,
106 DRM_FORMAT_ARGB8888,
107 DRM_FORMAT_BGRX8888,
108 DRM_FORMAT_BGRA8888,
109 DRM_FORMAT_ABGR8888,
110 DRM_FORMAT_XBGR8888,
111 DRM_FORMAT_RGB888,
112 DRM_FORMAT_BGR888,
113 DRM_FORMAT_RGB565,
114 DRM_FORMAT_UYVY,
115 DRM_FORMAT_YUYV,
116 };
117
118 static const u32 mt8195_formats[] = {
119 DRM_FORMAT_XRGB8888,
120 DRM_FORMAT_ARGB8888,
121 DRM_FORMAT_XRGB2101010,
122 DRM_FORMAT_ARGB2101010,
123 DRM_FORMAT_BGRX8888,
124 DRM_FORMAT_BGRA8888,
125 DRM_FORMAT_BGRX1010102,
126 DRM_FORMAT_BGRA1010102,
127 DRM_FORMAT_ABGR8888,
128 DRM_FORMAT_XBGR8888,
129 DRM_FORMAT_XBGR2101010,
130 DRM_FORMAT_ABGR2101010,
131 DRM_FORMAT_RGBX8888,
132 DRM_FORMAT_RGBA8888,
133 DRM_FORMAT_RGBX1010102,
134 DRM_FORMAT_RGBA1010102,
135 DRM_FORMAT_RGB888,
136 DRM_FORMAT_BGR888,
137 DRM_FORMAT_RGB565,
138 DRM_FORMAT_UYVY,
139 DRM_FORMAT_YUYV,
140 };
141
142 struct mtk_disp_ovl_data {
143 unsigned int addr;
144 unsigned int gmc_bits;
145 unsigned int layer_nr;
146 bool fmt_rgb565_is_0;
147 bool smi_id_en;
148 bool supports_afbc;
149 const u32 blend_modes;
150 const u32 *formats;
151 size_t num_formats;
152 bool supports_clrfmt_ext;
153 };
154
155 /*
156 * struct mtk_disp_ovl - DISP_OVL driver structure
157 * @crtc: associated crtc to report vblank events to
158 * @data: platform data
159 */
160 struct mtk_disp_ovl {
161 struct drm_crtc *crtc;
162 struct clk *clk;
163 void __iomem *regs;
164 struct cmdq_client_reg cmdq_reg;
165 const struct mtk_disp_ovl_data *data;
166 void (*vblank_cb)(void *data);
167 void *vblank_cb_data;
168 };
169
mtk_disp_ovl_irq_handler(int irq,void * dev_id)170 static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
171 {
172 struct mtk_disp_ovl *priv = dev_id;
173
174 /* Clear frame completion interrupt */
175 writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
176
177 if (!priv->vblank_cb)
178 return IRQ_NONE;
179
180 priv->vblank_cb(priv->vblank_cb_data);
181
182 return IRQ_HANDLED;
183 }
184
mtk_ovl_register_vblank_cb(struct device * dev,void (* vblank_cb)(void *),void * vblank_cb_data)185 void mtk_ovl_register_vblank_cb(struct device *dev,
186 void (*vblank_cb)(void *),
187 void *vblank_cb_data)
188 {
189 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
190
191 ovl->vblank_cb = vblank_cb;
192 ovl->vblank_cb_data = vblank_cb_data;
193 }
194
mtk_ovl_unregister_vblank_cb(struct device * dev)195 void mtk_ovl_unregister_vblank_cb(struct device *dev)
196 {
197 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
198
199 ovl->vblank_cb = NULL;
200 ovl->vblank_cb_data = NULL;
201 }
202
mtk_ovl_enable_vblank(struct device * dev)203 void mtk_ovl_enable_vblank(struct device *dev)
204 {
205 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
206
207 writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
208 writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
209 }
210
mtk_ovl_disable_vblank(struct device * dev)211 void mtk_ovl_disable_vblank(struct device *dev)
212 {
213 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
214
215 writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
216 }
217
mtk_ovl_get_blend_modes(struct device * dev)218 u32 mtk_ovl_get_blend_modes(struct device *dev)
219 {
220 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
221
222 return ovl->data->blend_modes;
223 }
224
mtk_ovl_get_formats(struct device * dev)225 const u32 *mtk_ovl_get_formats(struct device *dev)
226 {
227 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
228
229 return ovl->data->formats;
230 }
231
mtk_ovl_get_num_formats(struct device * dev)232 size_t mtk_ovl_get_num_formats(struct device *dev)
233 {
234 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
235
236 return ovl->data->num_formats;
237 }
238
mtk_ovl_clk_enable(struct device * dev)239 int mtk_ovl_clk_enable(struct device *dev)
240 {
241 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
242
243 return clk_prepare_enable(ovl->clk);
244 }
245
mtk_ovl_clk_disable(struct device * dev)246 void mtk_ovl_clk_disable(struct device *dev)
247 {
248 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
249
250 clk_disable_unprepare(ovl->clk);
251 }
252
mtk_ovl_start(struct device * dev)253 void mtk_ovl_start(struct device *dev)
254 {
255 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
256
257 if (ovl->data->smi_id_en) {
258 unsigned int reg;
259
260 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
261 reg = reg | OVL_LAYER_SMI_ID_EN;
262 writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
263 }
264 writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
265 }
266
mtk_ovl_stop(struct device * dev)267 void mtk_ovl_stop(struct device *dev)
268 {
269 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
270
271 writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
272 if (ovl->data->smi_id_en) {
273 unsigned int reg;
274
275 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
276 reg = reg & ~OVL_LAYER_SMI_ID_EN;
277 writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
278 }
279 }
280
mtk_ovl_set_afbc(struct mtk_disp_ovl * ovl,struct cmdq_pkt * cmdq_pkt,int idx,bool enabled)281 static void mtk_ovl_set_afbc(struct mtk_disp_ovl *ovl, struct cmdq_pkt *cmdq_pkt,
282 int idx, bool enabled)
283 {
284 mtk_ddp_write_mask(cmdq_pkt, enabled ? OVL_LAYER_AFBC_EN(idx) : 0,
285 &ovl->cmdq_reg, ovl->regs,
286 DISP_REG_OVL_DATAPATH_CON, OVL_LAYER_AFBC_EN(idx));
287 }
288
mtk_ovl_set_bit_depth(struct device * dev,int idx,u32 format,struct cmdq_pkt * cmdq_pkt)289 static void mtk_ovl_set_bit_depth(struct device *dev, int idx, u32 format,
290 struct cmdq_pkt *cmdq_pkt)
291 {
292 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
293 unsigned int bit_depth = OVL_CON_CLRFMT_8_BIT;
294
295 if (!ovl->data->supports_clrfmt_ext)
296 return;
297
298 if (is_10bit_rgb(format))
299 bit_depth = OVL_CON_CLRFMT_10_BIT;
300
301 mtk_ddp_write_mask(cmdq_pkt, OVL_CON_CLRFMT_BIT_DEPTH(bit_depth, idx),
302 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CLRFMT_EXT,
303 OVL_CON_CLRFMT_BIT_DEPTH_MASK(idx));
304 }
305
mtk_ovl_config(struct device * dev,unsigned int w,unsigned int h,unsigned int vrefresh,unsigned int bpc,struct cmdq_pkt * cmdq_pkt)306 void mtk_ovl_config(struct device *dev, unsigned int w,
307 unsigned int h, unsigned int vrefresh,
308 unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
309 {
310 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
311
312 if (w != 0 && h != 0)
313 mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
314 DISP_REG_OVL_ROI_SIZE);
315
316 /*
317 * The background color must be opaque black (ARGB),
318 * otherwise the alpha blending will have no effect
319 */
320 mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg,
321 ovl->regs, DISP_REG_OVL_ROI_BGCLR);
322
323 mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
324 mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
325 }
326
mtk_ovl_layer_nr(struct device * dev)327 unsigned int mtk_ovl_layer_nr(struct device *dev)
328 {
329 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
330
331 return ovl->data->layer_nr;
332 }
333
mtk_ovl_supported_rotations(struct device * dev)334 unsigned int mtk_ovl_supported_rotations(struct device *dev)
335 {
336 return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
337 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
338 }
339
mtk_ovl_layer_check(struct device * dev,unsigned int idx,struct mtk_plane_state * mtk_state)340 int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
341 struct mtk_plane_state *mtk_state)
342 {
343 struct drm_plane_state *state = &mtk_state->base;
344
345 /* check if any unsupported rotation is set */
346 if (state->rotation & ~mtk_ovl_supported_rotations(dev))
347 return -EINVAL;
348
349 /*
350 * TODO: Rotating/reflecting YUV buffers is not supported at this time.
351 * Only RGB[AX] variants are supported.
352 * Since DRM_MODE_ROTATE_0 means "no rotation", we should not
353 * reject layers with this property.
354 */
355 if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0))
356 return -EINVAL;
357
358 return 0;
359 }
360
mtk_ovl_layer_on(struct device * dev,unsigned int idx,struct cmdq_pkt * cmdq_pkt)361 void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
362 struct cmdq_pkt *cmdq_pkt)
363 {
364 unsigned int gmc_thrshd_l;
365 unsigned int gmc_thrshd_h;
366 unsigned int gmc_value;
367 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
368
369 mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
370 DISP_REG_OVL_RDMA_CTRL(idx));
371 gmc_thrshd_l = GMC_THRESHOLD_LOW >>
372 (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
373 gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
374 (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
375 if (ovl->data->gmc_bits == 10)
376 gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
377 else
378 gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
379 gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
380 mtk_ddp_write(cmdq_pkt, gmc_value,
381 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
382 mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
383 DISP_REG_OVL_SRC_CON, BIT(idx));
384 }
385
mtk_ovl_layer_off(struct device * dev,unsigned int idx,struct cmdq_pkt * cmdq_pkt)386 void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
387 struct cmdq_pkt *cmdq_pkt)
388 {
389 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
390
391 mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
392 DISP_REG_OVL_SRC_CON, BIT(idx));
393 mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
394 DISP_REG_OVL_RDMA_CTRL(idx));
395 }
396
mtk_ovl_fmt_convert(struct mtk_disp_ovl * ovl,struct mtk_plane_state * state)397 static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
398 struct mtk_plane_state *state)
399 {
400 unsigned int fmt = state->pending.format;
401 unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
402
403 /*
404 * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
405 * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
406 *
407 * Check blend_modes in the driver data to see if premultiplied mode is supported.
408 * If not, use coverage mode instead to set it to the supported color formats.
409 *
410 * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
411 * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
412 * will get an error return from drm_plane_create_blend_mode_property() and
413 * state->base.pixel_blend_mode should not be used.
414 */
415 if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
416 blend_mode = state->base.pixel_blend_mode;
417
418 switch (fmt) {
419 default:
420 case DRM_FORMAT_RGB565:
421 return OVL_CON_CLRFMT_RGB565(ovl);
422 case DRM_FORMAT_BGR565:
423 return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
424 case DRM_FORMAT_RGB888:
425 return OVL_CON_CLRFMT_RGB888(ovl);
426 case DRM_FORMAT_BGR888:
427 return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
428 case DRM_FORMAT_RGBX8888:
429 case DRM_FORMAT_RGBA8888:
430 case DRM_FORMAT_RGBX1010102:
431 case DRM_FORMAT_RGBA1010102:
432 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
433 OVL_CON_CLRFMT_RGBA8888 :
434 OVL_CON_CLRFMT_PRGBA8888;
435 case DRM_FORMAT_BGRX8888:
436 case DRM_FORMAT_BGRA8888:
437 case DRM_FORMAT_BGRX1010102:
438 case DRM_FORMAT_BGRA1010102:
439 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
440 OVL_CON_CLRFMT_BGRA8888 :
441 OVL_CON_CLRFMT_PBGRA8888;
442 case DRM_FORMAT_XRGB8888:
443 case DRM_FORMAT_ARGB8888:
444 case DRM_FORMAT_XRGB2101010:
445 case DRM_FORMAT_ARGB2101010:
446 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
447 OVL_CON_CLRFMT_ARGB8888 :
448 OVL_CON_CLRFMT_PARGB8888;
449 case DRM_FORMAT_XBGR8888:
450 case DRM_FORMAT_ABGR8888:
451 case DRM_FORMAT_XBGR2101010:
452 case DRM_FORMAT_ABGR2101010:
453 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
454 OVL_CON_CLRFMT_ABGR8888 :
455 OVL_CON_CLRFMT_PABGR8888;
456 case DRM_FORMAT_UYVY:
457 return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
458 case DRM_FORMAT_YUYV:
459 return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
460 }
461 }
462
mtk_ovl_afbc_layer_config(struct mtk_disp_ovl * ovl,unsigned int idx,struct mtk_plane_pending_state * pending,struct cmdq_pkt * cmdq_pkt)463 static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
464 unsigned int idx,
465 struct mtk_plane_pending_state *pending,
466 struct cmdq_pkt *cmdq_pkt)
467 {
468 unsigned int pitch_msb = pending->pitch >> 16;
469 unsigned int hdr_pitch = pending->hdr_pitch;
470 unsigned int hdr_addr = pending->hdr_addr;
471
472 if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
473 mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
474 DISP_REG_OVL_HDR_ADDR(ovl, idx));
475 mtk_ddp_write_relaxed(cmdq_pkt,
476 OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
477 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
478 mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
479 DISP_REG_OVL_HDR_PITCH(ovl, idx));
480 } else {
481 mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
482 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
483 }
484 }
485
mtk_ovl_layer_config(struct device * dev,unsigned int idx,struct mtk_plane_state * state,struct cmdq_pkt * cmdq_pkt)486 void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
487 struct mtk_plane_state *state,
488 struct cmdq_pkt *cmdq_pkt)
489 {
490 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
491 struct mtk_plane_pending_state *pending = &state->pending;
492 unsigned int addr = pending->addr;
493 unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
494 unsigned int fmt = pending->format;
495 unsigned int rotation = pending->rotation;
496 unsigned int offset = (pending->y << 16) | pending->x;
497 unsigned int src_size = (pending->height << 16) | pending->width;
498 unsigned int blend_mode = state->base.pixel_blend_mode;
499 unsigned int ignore_pixel_alpha = 0;
500 unsigned int con;
501
502 if (!pending->enable) {
503 mtk_ovl_layer_off(dev, idx, cmdq_pkt);
504 return;
505 }
506
507 con = mtk_ovl_fmt_convert(ovl, state);
508 if (state->base.fb) {
509 con |= state->base.alpha & OVL_CON_ALPHA;
510
511 /*
512 * For blend_modes supported SoCs, always enable alpha blending.
513 * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
514 */
515 if (blend_mode || state->base.fb->format->has_alpha)
516 con |= OVL_CON_AEN;
517
518 /*
519 * Although the alpha channel can be ignored, CONST_BLD must be enabled
520 * for XRGB format, otherwise OVL will still read the value from memory.
521 * For RGB888 related formats, whether CONST_BLD is enabled or not won't
522 * affect the result. Therefore we use !has_alpha as the condition.
523 */
524 if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
525 ignore_pixel_alpha = OVL_CONST_BLEND;
526 }
527
528 /*
529 * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
530 * to flip x + flip y to support both in the same time.
531 */
532 if (rotation & DRM_MODE_ROTATE_180)
533 rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
534
535 if (rotation & DRM_MODE_REFLECT_Y) {
536 con |= OVL_CON_VIRT_FLIP;
537 addr += (pending->height - 1) * pending->pitch;
538 }
539
540 if (rotation & DRM_MODE_REFLECT_X) {
541 con |= OVL_CON_HORZ_FLIP;
542 addr += pending->pitch - 1;
543 }
544
545 if (ovl->data->supports_afbc)
546 mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
547 pending->modifier != DRM_FORMAT_MOD_LINEAR);
548
549 mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
550 DISP_REG_OVL_CON(idx));
551 mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
552 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
553 mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
554 DISP_REG_OVL_SRC_SIZE(idx));
555 mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
556 DISP_REG_OVL_OFFSET(idx));
557 mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
558 DISP_REG_OVL_ADDR(ovl, idx));
559
560 if (ovl->data->supports_afbc)
561 mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
562
563 mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
564 mtk_ovl_layer_on(dev, idx, cmdq_pkt);
565 }
566
mtk_ovl_bgclr_in_on(struct device * dev)567 void mtk_ovl_bgclr_in_on(struct device *dev)
568 {
569 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
570 unsigned int reg;
571
572 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
573 reg = reg | OVL_BGCLR_SEL_IN;
574 writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
575 }
576
mtk_ovl_bgclr_in_off(struct device * dev)577 void mtk_ovl_bgclr_in_off(struct device *dev)
578 {
579 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
580 unsigned int reg;
581
582 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
583 reg = reg & ~OVL_BGCLR_SEL_IN;
584 writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
585 }
586
mtk_disp_ovl_bind(struct device * dev,struct device * master,void * data)587 static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
588 void *data)
589 {
590 return 0;
591 }
592
mtk_disp_ovl_unbind(struct device * dev,struct device * master,void * data)593 static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
594 void *data)
595 {
596 }
597
598 static const struct component_ops mtk_disp_ovl_component_ops = {
599 .bind = mtk_disp_ovl_bind,
600 .unbind = mtk_disp_ovl_unbind,
601 };
602
mtk_disp_ovl_probe(struct platform_device * pdev)603 static int mtk_disp_ovl_probe(struct platform_device *pdev)
604 {
605 struct device *dev = &pdev->dev;
606 struct mtk_disp_ovl *priv;
607 struct resource *res;
608 int irq;
609 int ret;
610
611 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
612 if (!priv)
613 return -ENOMEM;
614
615 irq = platform_get_irq(pdev, 0);
616 if (irq < 0)
617 return irq;
618
619 priv->clk = devm_clk_get(dev, NULL);
620 if (IS_ERR(priv->clk))
621 return dev_err_probe(dev, PTR_ERR(priv->clk),
622 "failed to get ovl clk\n");
623
624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
625 priv->regs = devm_ioremap_resource(dev, res);
626 if (IS_ERR(priv->regs))
627 return dev_err_probe(dev, PTR_ERR(priv->regs),
628 "failed to ioremap ovl\n");
629 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
630 ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
631 if (ret)
632 dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
633 #endif
634
635 priv->data = of_device_get_match_data(dev);
636 platform_set_drvdata(pdev, priv);
637
638 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
639 IRQF_TRIGGER_NONE, dev_name(dev), priv);
640 if (ret < 0)
641 return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq);
642
643 pm_runtime_enable(dev);
644
645 ret = component_add(dev, &mtk_disp_ovl_component_ops);
646 if (ret) {
647 pm_runtime_disable(dev);
648 return dev_err_probe(dev, ret, "Failed to add component\n");
649 }
650
651 return 0;
652 }
653
mtk_disp_ovl_remove(struct platform_device * pdev)654 static void mtk_disp_ovl_remove(struct platform_device *pdev)
655 {
656 component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
657 pm_runtime_disable(&pdev->dev);
658 }
659
660 static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
661 .addr = DISP_REG_OVL_ADDR_MT2701,
662 .gmc_bits = 8,
663 .layer_nr = 4,
664 .fmt_rgb565_is_0 = false,
665 .formats = mt8173_formats,
666 .num_formats = ARRAY_SIZE(mt8173_formats),
667 };
668
669 static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
670 .addr = DISP_REG_OVL_ADDR_MT8173,
671 .gmc_bits = 8,
672 .layer_nr = 4,
673 .fmt_rgb565_is_0 = true,
674 .formats = mt8173_formats,
675 .num_formats = ARRAY_SIZE(mt8173_formats),
676 };
677
678 static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
679 .addr = DISP_REG_OVL_ADDR_MT8173,
680 .gmc_bits = 10,
681 .layer_nr = 4,
682 .fmt_rgb565_is_0 = true,
683 .formats = mt8173_formats,
684 .num_formats = ARRAY_SIZE(mt8173_formats),
685 };
686
687 static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
688 .addr = DISP_REG_OVL_ADDR_MT8173,
689 .gmc_bits = 10,
690 .layer_nr = 2,
691 .fmt_rgb565_is_0 = true,
692 .formats = mt8173_formats,
693 .num_formats = ARRAY_SIZE(mt8173_formats),
694 };
695
696 static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
697 .addr = DISP_REG_OVL_ADDR_MT8173,
698 .gmc_bits = 10,
699 .layer_nr = 4,
700 .fmt_rgb565_is_0 = true,
701 .smi_id_en = true,
702 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
703 BIT(DRM_MODE_BLEND_COVERAGE) |
704 BIT(DRM_MODE_BLEND_PIXEL_NONE),
705 .formats = mt8173_formats,
706 .num_formats = ARRAY_SIZE(mt8173_formats),
707 };
708
709 static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
710 .addr = DISP_REG_OVL_ADDR_MT8173,
711 .gmc_bits = 10,
712 .layer_nr = 2,
713 .fmt_rgb565_is_0 = true,
714 .smi_id_en = true,
715 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
716 BIT(DRM_MODE_BLEND_COVERAGE) |
717 BIT(DRM_MODE_BLEND_PIXEL_NONE),
718 .formats = mt8173_formats,
719 .num_formats = ARRAY_SIZE(mt8173_formats),
720 };
721
722 static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
723 .addr = DISP_REG_OVL_ADDR_MT8173,
724 .gmc_bits = 10,
725 .layer_nr = 4,
726 .fmt_rgb565_is_0 = true,
727 .smi_id_en = true,
728 .supports_afbc = true,
729 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
730 BIT(DRM_MODE_BLEND_COVERAGE) |
731 BIT(DRM_MODE_BLEND_PIXEL_NONE),
732 .formats = mt8195_formats,
733 .num_formats = ARRAY_SIZE(mt8195_formats),
734 .supports_clrfmt_ext = true,
735 };
736
737 static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
738 { .compatible = "mediatek,mt2701-disp-ovl",
739 .data = &mt2701_ovl_driver_data},
740 { .compatible = "mediatek,mt8173-disp-ovl",
741 .data = &mt8173_ovl_driver_data},
742 { .compatible = "mediatek,mt8183-disp-ovl",
743 .data = &mt8183_ovl_driver_data},
744 { .compatible = "mediatek,mt8183-disp-ovl-2l",
745 .data = &mt8183_ovl_2l_driver_data},
746 { .compatible = "mediatek,mt8192-disp-ovl",
747 .data = &mt8192_ovl_driver_data},
748 { .compatible = "mediatek,mt8192-disp-ovl-2l",
749 .data = &mt8192_ovl_2l_driver_data},
750 { .compatible = "mediatek,mt8195-disp-ovl",
751 .data = &mt8195_ovl_driver_data},
752 {},
753 };
754 MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
755
756 struct platform_driver mtk_disp_ovl_driver = {
757 .probe = mtk_disp_ovl_probe,
758 .remove = mtk_disp_ovl_remove,
759 .driver = {
760 .name = "mediatek-disp-ovl",
761 .of_match_table = mtk_disp_ovl_driver_dt_match,
762 },
763 };
764