1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2016 Mentor Graphics Inc. 4 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc. 5 */ 6 #include <linux/io.h> 7 #include "ipu-prv.h" 8 9 struct ipu_vdi { 10 void __iomem *base; 11 u32 module; 12 spinlock_t lock; 13 int use_count; 14 struct ipu_soc *ipu; 15 }; 16 17 18 /* VDI Register Offsets */ 19 #define VDI_FSIZE 0x0000 20 #define VDI_C 0x0004 21 22 /* VDI Register Fields */ 23 #define VDI_C_CH_420 (0 << 1) 24 #define VDI_C_CH_422 (1 << 1) 25 #define VDI_C_MOT_SEL_MASK (0x3 << 2) 26 #define VDI_C_MOT_SEL_FULL (2 << 2) 27 #define VDI_C_MOT_SEL_LOW (1 << 2) 28 #define VDI_C_MOT_SEL_MED (0 << 2) 29 #define VDI_C_BURST_SIZE1_4 (3 << 4) 30 #define VDI_C_BURST_SIZE2_4 (3 << 8) 31 #define VDI_C_BURST_SIZE3_4 (3 << 12) 32 #define VDI_C_BURST_SIZE_MASK 0xF 33 #define VDI_C_BURST_SIZE1_OFFSET 4 34 #define VDI_C_BURST_SIZE2_OFFSET 8 35 #define VDI_C_BURST_SIZE3_OFFSET 12 36 #define VDI_C_VWM1_SET_1 (0 << 16) 37 #define VDI_C_VWM1_SET_2 (1 << 16) 38 #define VDI_C_VWM1_CLR_2 (1 << 19) 39 #define VDI_C_VWM3_SET_1 (0 << 22) 40 #define VDI_C_VWM3_SET_2 (1 << 22) 41 #define VDI_C_VWM3_CLR_2 (1 << 25) 42 #define VDI_C_TOP_FIELD_MAN_1 (1 << 30) 43 #define VDI_C_TOP_FIELD_AUTO_1 (1 << 31) 44 45 static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset) 46 { 47 return readl(vdi->base + offset); 48 } 49 50 static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value, 51 unsigned int offset) 52 { 53 writel(value, vdi->base + offset); 54 } 55 56 void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field) 57 { 58 bool top_field_0 = false; 59 unsigned long flags; 60 u32 reg; 61 62 switch (field) { 63 case V4L2_FIELD_INTERLACED_TB: 64 case V4L2_FIELD_SEQ_TB: 65 case V4L2_FIELD_TOP: 66 top_field_0 = true; 67 break; 68 case V4L2_FIELD_INTERLACED_BT: 69 case V4L2_FIELD_SEQ_BT: 70 case V4L2_FIELD_BOTTOM: 71 top_field_0 = false; 72 break; 73 default: 74 top_field_0 = (std & V4L2_STD_525_60) ? true : false; 75 break; 76 } 77 78 spin_lock_irqsave(&vdi->lock, flags); 79 80 reg = ipu_vdi_read(vdi, VDI_C); 81 if (top_field_0) 82 reg &= ~(VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1); 83 else 84 reg |= VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1; 85 ipu_vdi_write(vdi, reg, VDI_C); 86 87 spin_unlock_irqrestore(&vdi->lock, flags); 88 } 89 EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order); 90 91 void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel) 92 { 93 unsigned long flags; 94 u32 reg; 95 96 spin_lock_irqsave(&vdi->lock, flags); 97 98 reg = ipu_vdi_read(vdi, VDI_C); 99 100 reg &= ~VDI_C_MOT_SEL_MASK; 101 102 switch (motion_sel) { 103 case MED_MOTION: 104 reg |= VDI_C_MOT_SEL_MED; 105 break; 106 case HIGH_MOTION: 107 reg |= VDI_C_MOT_SEL_FULL; 108 break; 109 default: 110 reg |= VDI_C_MOT_SEL_LOW; 111 break; 112 } 113 114 ipu_vdi_write(vdi, reg, VDI_C); 115 116 spin_unlock_irqrestore(&vdi->lock, flags); 117 } 118 EXPORT_SYMBOL_GPL(ipu_vdi_set_motion); 119 120 void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres) 121 { 122 unsigned long flags; 123 u32 pixel_fmt, reg; 124 125 spin_lock_irqsave(&vdi->lock, flags); 126 127 reg = ((yres - 1) << 16) | (xres - 1); 128 ipu_vdi_write(vdi, reg, VDI_FSIZE); 129 130 /* 131 * Full motion, only vertical filter is used. 132 * Burst size is 4 accesses 133 */ 134 if (code == MEDIA_BUS_FMT_UYVY8_2X8 || 135 code == MEDIA_BUS_FMT_UYVY8_1X16 || 136 code == MEDIA_BUS_FMT_YUYV8_2X8 || 137 code == MEDIA_BUS_FMT_YUYV8_1X16) 138 pixel_fmt = VDI_C_CH_422; 139 else 140 pixel_fmt = VDI_C_CH_420; 141 142 reg = ipu_vdi_read(vdi, VDI_C); 143 reg |= pixel_fmt; 144 reg |= VDI_C_BURST_SIZE2_4; 145 reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2; 146 reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2; 147 ipu_vdi_write(vdi, reg, VDI_C); 148 149 spin_unlock_irqrestore(&vdi->lock, flags); 150 } 151 EXPORT_SYMBOL_GPL(ipu_vdi_setup); 152 153 int ipu_vdi_enable(struct ipu_vdi *vdi) 154 { 155 unsigned long flags; 156 157 spin_lock_irqsave(&vdi->lock, flags); 158 159 if (!vdi->use_count) 160 ipu_module_enable(vdi->ipu, vdi->module); 161 162 vdi->use_count++; 163 164 spin_unlock_irqrestore(&vdi->lock, flags); 165 166 return 0; 167 } 168 EXPORT_SYMBOL_GPL(ipu_vdi_enable); 169 170 int ipu_vdi_disable(struct ipu_vdi *vdi) 171 { 172 unsigned long flags; 173 174 spin_lock_irqsave(&vdi->lock, flags); 175 176 if (vdi->use_count) { 177 if (!--vdi->use_count) 178 ipu_module_disable(vdi->ipu, vdi->module); 179 } 180 181 spin_unlock_irqrestore(&vdi->lock, flags); 182 183 return 0; 184 } 185 EXPORT_SYMBOL_GPL(ipu_vdi_disable); 186 187 struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu) 188 { 189 return ipu->vdi_priv; 190 } 191 EXPORT_SYMBOL_GPL(ipu_vdi_get); 192 193 void ipu_vdi_put(struct ipu_vdi *vdi) 194 { 195 } 196 EXPORT_SYMBOL_GPL(ipu_vdi_put); 197 198 int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev, 199 unsigned long base, u32 module) 200 { 201 struct ipu_vdi *vdi; 202 203 vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL); 204 if (!vdi) 205 return -ENOMEM; 206 207 ipu->vdi_priv = vdi; 208 209 spin_lock_init(&vdi->lock); 210 vdi->module = module; 211 vdi->base = devm_ioremap(dev, base, PAGE_SIZE); 212 if (!vdi->base) 213 return -ENOMEM; 214 215 dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base); 216 vdi->ipu = ipu; 217 218 return 0; 219 } 220 221 void ipu_vdi_exit(struct ipu_soc *ipu) 222 { 223 } 224