xref: /linux/drivers/media/pci/intel/ipu3/ipu3-cio2.c (revision 6fd600d742744dc7ef7fc65ca26daa2b1163158a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-mc.h>
32 #include <media/v4l2-ioctl.h>
33 #include <media/videobuf2-dma-sg.h>
34 
35 #include "ipu3-cio2.h"
36 
37 struct ipu3_cio2_fmt {
38 	u32 mbus_code;
39 	u32 fourcc;
40 	u8 mipicode;
41 	u8 bpp;
42 };
43 
44 /*
45  * These are raw formats used in Intel's third generation of
46  * Image Processing Unit known as IPU3.
47  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
48  * last LSB 6 bits unused.
49  */
50 static const struct ipu3_cio2_fmt formats[] = {
51 	{	/* put default entry at beginning */
52 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
53 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
54 		.mipicode	= 0x2b,
55 		.bpp		= 10,
56 	}, {
57 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
58 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
59 		.mipicode	= 0x2b,
60 		.bpp		= 10,
61 	}, {
62 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
63 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
64 		.mipicode	= 0x2b,
65 		.bpp		= 10,
66 	}, {
67 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
68 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
69 		.mipicode	= 0x2b,
70 		.bpp		= 10,
71 	}, {
72 		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
73 		.fourcc		= V4L2_PIX_FMT_IPU3_Y10,
74 		.mipicode	= 0x2b,
75 		.bpp		= 10,
76 	},
77 };
78 
79 /*
80  * cio2_find_format - lookup color format by fourcc or/and media bus code
81  * @pixelformat: fourcc to match, ignored if null
82  * @mbus_code: media bus code to match, ignored if null
83  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)84 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
85 						    const u32 *mbus_code)
86 {
87 	unsigned int i;
88 
89 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
90 		if (pixelformat && *pixelformat != formats[i].fourcc)
91 			continue;
92 		if (mbus_code && *mbus_code != formats[i].mbus_code)
93 			continue;
94 
95 		return &formats[i];
96 	}
97 
98 	return NULL;
99 }
100 
cio2_bytesperline(const unsigned int width)101 static inline u32 cio2_bytesperline(const unsigned int width)
102 {
103 	/*
104 	 * 64 bytes for every 50 pixels, the line length
105 	 * in bytes is multiple of 64 (line end alignment).
106 	 */
107 	return DIV_ROUND_UP(width, 50) * 64;
108 }
109 
110 /**************** FBPT operations ****************/
111 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)112 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
113 {
114 	struct device *dev = &cio2->pci_dev->dev;
115 
116 	if (cio2->dummy_lop) {
117 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
118 				  cio2->dummy_lop_bus_addr);
119 		cio2->dummy_lop = NULL;
120 	}
121 	if (cio2->dummy_page) {
122 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
123 				  cio2->dummy_page_bus_addr);
124 		cio2->dummy_page = NULL;
125 	}
126 }
127 
cio2_fbpt_init_dummy(struct cio2_device * cio2)128 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
129 {
130 	struct device *dev = &cio2->pci_dev->dev;
131 	unsigned int i;
132 
133 	cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
134 					      &cio2->dummy_page_bus_addr,
135 					      GFP_KERNEL);
136 	cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
137 					     &cio2->dummy_lop_bus_addr,
138 					     GFP_KERNEL);
139 	if (!cio2->dummy_page || !cio2->dummy_lop) {
140 		cio2_fbpt_exit_dummy(cio2);
141 		return -ENOMEM;
142 	}
143 	/*
144 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
145 	 * Initialize each entry to dummy_page bus base address.
146 	 */
147 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
148 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
149 
150 	return 0;
151 }
152 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])153 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
154 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
155 {
156 	/*
157 	 * The CPU first initializes some fields in fbpt, then sets
158 	 * the VALID bit, this barrier is to ensure that the DMA(device)
159 	 * does not see the VALID bit enabled before other fields are
160 	 * initialized; otherwise it could lead to havoc.
161 	 */
162 	dma_wmb();
163 
164 	/*
165 	 * Request interrupts for start and completion
166 	 * Valid bit is applicable only to 1st entry
167 	 */
168 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
169 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
170 }
171 
172 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])173 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
174 				       struct cio2_fbpt_entry
175 				       entry[CIO2_MAX_LOPS])
176 {
177 	unsigned int i;
178 
179 	entry[0].first_entry.first_page_offset = 0;
180 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
181 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
182 
183 	for (i = 0; i < CIO2_MAX_LOPS; i++)
184 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
185 
186 	cio2_fbpt_entry_enable(cio2, entry);
187 }
188 
189 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])190 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
191 				     struct cio2_buffer *b,
192 				     struct cio2_fbpt_entry
193 				     entry[CIO2_MAX_LOPS])
194 {
195 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
196 	unsigned int length = vb->planes[0].length;
197 	int remaining, i;
198 
199 	entry[0].first_entry.first_page_offset = b->offset;
200 	remaining = length + entry[0].first_entry.first_page_offset;
201 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
202 	/*
203 	 * last_page_available_bytes has the offset of the last byte in the
204 	 * last page which is still accessible by DMA. DMA cannot access
205 	 * beyond this point. Valid range for this is from 0 to 4095.
206 	 * 0 indicates 1st byte in the page is DMA accessible.
207 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
208 	 * is available for DMA transfer.
209 	 */
210 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
211 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
212 	/* Fill FBPT */
213 	remaining = length;
214 	i = 0;
215 	while (remaining > 0) {
216 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
217 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
218 		entry++;
219 		i++;
220 	}
221 
222 	/*
223 	 * The first not meaningful FBPT entry should point to a valid LOP
224 	 */
225 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
226 
227 	cio2_fbpt_entry_enable(cio2, entry);
228 }
229 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)230 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
231 {
232 	struct device *dev = &cio2->pci_dev->dev;
233 
234 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
235 				     GFP_KERNEL);
236 	if (!q->fbpt)
237 		return -ENOMEM;
238 
239 	return 0;
240 }
241 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)242 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
243 {
244 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
245 }
246 
247 /**************** CSI2 hardware setup ****************/
248 
249 /*
250  * The CSI2 receiver has several parameters affecting
251  * the receiver timings. These depend on the MIPI bus frequency
252  * F in Hz (sensor transmitter rate) as follows:
253  *     register value = (A/1e9 + B * UI) / COUNT_ACC
254  * where
255  *      UI = 1 / (2 * F) in seconds
256  *      COUNT_ACC = counter accuracy in seconds
257  *      For IPU3 COUNT_ACC = 0.0625
258  *
259  * A and B are coefficients from the table below,
260  * depending whether the register minimum or maximum value is
261  * calculated.
262  *                                     Minimum     Maximum
263  * Clock lane                          A     B     A     B
264  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
265  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
266  * Data lanes
267  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
268  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
269  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
270  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
271  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
272  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
273  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
274  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
275  *
276  * We use the minimum values of both A and B.
277  */
278 
279 /*
280  * shift for keeping value range suitable for 32-bit integer arithmetic
281  */
282 #define LIMIT_SHIFT	8
283 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)284 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
285 {
286 	const u32 accinv = 16; /* invert of counter resolution */
287 	const u32 uiinv = 500000000; /* 1e9 / 2 */
288 	s32 r;
289 
290 	freq >>= LIMIT_SHIFT;
291 
292 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
293 		return def;
294 	/*
295 	 * b could be 0, -2 or -8, so |accinv * b| is always
296 	 * less than (1 << ds) and thus |r| < 500000000.
297 	 */
298 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
299 	r = r / (s32)freq;
300 	/* max value of a is 95 */
301 	r += accinv * a;
302 
303 	return r;
304 };
305 
306 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)307 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
308 				 struct cio2_csi2_timing *timing,
309 				 unsigned int bpp, unsigned int lanes)
310 {
311 	struct device *dev = &cio2->pci_dev->dev;
312 	s64 freq;
313 
314 	if (!q->sensor)
315 		return -ENODEV;
316 
317 	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
318 	if (freq < 0) {
319 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
320 		return freq;
321 	}
322 
323 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
324 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
325 					    freq,
326 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
327 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
328 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
329 					    freq,
330 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
331 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
332 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
333 					    freq,
334 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
335 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
336 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
337 					    freq,
338 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
339 
340 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
341 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
342 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
343 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
344 
345 	return 0;
346 };
347 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)348 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
349 {
350 	static const int NUM_VCS = 4;
351 	static const int SID;	/* Stream id */
352 	static const int ENTRY;
353 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
354 					CIO2_FBPT_SUBENTRY_UNIT);
355 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
356 	const struct ipu3_cio2_fmt *fmt;
357 	void __iomem *const base = cio2->base;
358 	u8 lanes, csi2bus = q->csi2.port;
359 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
360 	struct cio2_csi2_timing timing = { 0 };
361 	int i, r;
362 
363 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
364 	if (!fmt)
365 		return -EINVAL;
366 
367 	lanes = q->csi2.lanes;
368 
369 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
370 	if (r)
371 		return r;
372 
373 	writel(timing.clk_termen, q->csi_rx_base +
374 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
375 	writel(timing.clk_settle, q->csi_rx_base +
376 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
377 
378 	for (i = 0; i < lanes; i++) {
379 		writel(timing.dat_termen, q->csi_rx_base +
380 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
381 		writel(timing.dat_settle, q->csi_rx_base +
382 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
383 	}
384 
385 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
386 	       CIO2_PBM_WMCTRL1_MID1_2CK |
387 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
388 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
389 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
390 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
391 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
392 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
393 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
394 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
395 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
396 	       CIO2_PBM_ARB_CTRL_LE_EN |
397 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
398 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
399 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
400 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
401 	       base + CIO2_REG_PBM_ARB_CTRL);
402 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
403 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
404 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
405 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
406 
407 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
408 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
409 
410 	/* Configure MIPI backend */
411 	for (i = 0; i < NUM_VCS; i++)
412 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
413 
414 	/* There are 16 short packet LUT entry */
415 	for (i = 0; i < 16; i++)
416 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
417 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
418 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
419 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
420 
421 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
422 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
423 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
424 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
425 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
426 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
427 
428 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
429 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
430 	       base + CIO2_REG_INT_EN);
431 
432 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
433 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
434 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
435 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
436 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
437 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
438 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
439 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
440 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
441 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
442 
443 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
444 	writel(CIO2_CGC_PRIM_TGE |
445 	       CIO2_CGC_SIDE_TGE |
446 	       CIO2_CGC_XOSC_TGE |
447 	       CIO2_CGC_D3I3_TGE |
448 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
449 	       CIO2_CGC_CSI2_PORT_DCGE |
450 	       CIO2_CGC_SIDE_DCGE |
451 	       CIO2_CGC_PRIM_DCGE |
452 	       CIO2_CGC_ROSC_DCGE |
453 	       CIO2_CGC_XOSC_DCGE |
454 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
455 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
456 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
457 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
458 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
459 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
460 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
461 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
462 	       base + CIO2_REG_LTRVAL01);
463 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
464 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
465 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
466 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
467 	       base + CIO2_REG_LTRVAL23);
468 
469 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
470 		writel(0, base + CIO2_REG_CDMABA(i));
471 		writel(0, base + CIO2_REG_CDMAC0(i));
472 		writel(0, base + CIO2_REG_CDMAC1(i));
473 	}
474 
475 	/* Enable DMA */
476 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
477 
478 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
479 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
480 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
481 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
482 	       CIO2_CDMAC0_DMA_EN |
483 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
484 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
485 
486 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
487 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
488 
489 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
490 
491 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
492 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
493 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
494 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
495 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
496 
497 	/* Clear interrupts */
498 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
499 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
500 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
501 	writel(~0, base + CIO2_REG_INT_STS);
502 
503 	/* Enable devices, starting from the last device in the pipe */
504 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
505 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
506 
507 	return 0;
508 }
509 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)510 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
511 {
512 	struct device *dev = &cio2->pci_dev->dev;
513 	void __iomem *const base = cio2->base;
514 	unsigned int i;
515 	u32 value;
516 	int ret;
517 
518 	/* Disable CSI receiver and MIPI backend devices */
519 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
520 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
521 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
522 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
523 
524 	/* Halt DMA */
525 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
526 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
527 				 value, value & CIO2_CDMAC0_DMA_HALTED,
528 				 4000, 2000000);
529 	if (ret)
530 		dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
531 
532 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
533 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
534 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
535 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
536 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
537 	}
538 }
539 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)540 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
541 {
542 	struct device *dev = &cio2->pci_dev->dev;
543 	struct cio2_queue *q = cio2->cur_queue;
544 	struct cio2_fbpt_entry *entry;
545 	u64 ns = ktime_get_ns();
546 
547 	if (dma_chan >= CIO2_QUEUES) {
548 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
549 		return;
550 	}
551 
552 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
553 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
554 		dev_warn(dev, "no ready buffers found on DMA channel %u\n",
555 			 dma_chan);
556 		return;
557 	}
558 
559 	/* Find out which buffer(s) are ready */
560 	do {
561 		struct cio2_buffer *b;
562 
563 		b = q->bufs[q->bufs_first];
564 		if (b) {
565 			unsigned int received = entry[1].second_entry.num_of_bytes;
566 			unsigned long payload =
567 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
568 
569 			q->bufs[q->bufs_first] = NULL;
570 			atomic_dec(&q->bufs_queued);
571 			dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
572 
573 			b->vbb.vb2_buf.timestamp = ns;
574 			b->vbb.field = V4L2_FIELD_NONE;
575 			b->vbb.sequence = atomic_read(&q->frame_sequence);
576 			if (payload != received)
577 				dev_warn(dev,
578 					 "payload length is %lu, received %u\n",
579 					 payload, received);
580 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
581 		}
582 		atomic_inc(&q->frame_sequence);
583 		cio2_fbpt_entry_init_dummy(cio2, entry);
584 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
585 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
586 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
587 }
588 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)589 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
590 {
591 	/*
592 	 * For the user space camera control algorithms it is essential
593 	 * to know when the reception of a frame has begun. That's often
594 	 * the best timing information to get from the hardware.
595 	 */
596 	struct v4l2_event event = {
597 		.type = V4L2_EVENT_FRAME_SYNC,
598 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
599 	};
600 
601 	v4l2_event_queue(q->subdev.devnode, &event);
602 }
603 
604 static const char *const cio2_irq_errs[] = {
605 	"single packet header error corrected",
606 	"multiple packet header errors detected",
607 	"payload checksum (CRC) error",
608 	"fifo overflow",
609 	"reserved short packet data type detected",
610 	"reserved long packet data type detected",
611 	"incomplete long packet detected",
612 	"frame sync error",
613 	"line sync error",
614 	"DPHY start of transmission error",
615 	"DPHY synchronization error",
616 	"escape mode error",
617 	"escape mode trigger event",
618 	"escape mode ultra-low power state for data lane(s)",
619 	"escape mode ultra-low power state exit for clock lane",
620 	"inter-frame short packet discarded",
621 	"inter-frame long packet discarded",
622 	"non-matching Long Packet stalled",
623 };
624 
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)625 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
626 {
627 	unsigned long csi2_status = status;
628 	unsigned int i;
629 
630 	for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
631 		dev_err(dev, "CSI-2 receiver port %i: %s\n",
632 			port, cio2_irq_errs[i]);
633 
634 	if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
635 		dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
636 			 csi2_status, port);
637 }
638 
639 static const char *const cio2_port_errs[] = {
640 	"ECC recoverable",
641 	"DPHY not recoverable",
642 	"ECC not recoverable",
643 	"CRC error",
644 	"INTERFRAMEDATA",
645 	"PKT2SHORT",
646 	"PKT2LONG",
647 };
648 
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)649 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
650 {
651 	unsigned long port_status = status;
652 	unsigned int i;
653 
654 	for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
655 		dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
656 }
657 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)658 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
659 {
660 	struct device *dev = &cio2->pci_dev->dev;
661 	void __iomem *const base = cio2->base;
662 
663 	if (int_status & CIO2_INT_IOOE) {
664 		/*
665 		 * Interrupt on Output Error:
666 		 * 1) SRAM is full and FS received, or
667 		 * 2) An invalid bit detected by DMA.
668 		 */
669 		u32 oe_status, oe_clear;
670 
671 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
672 		oe_status = oe_clear;
673 
674 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
675 			dev_err(dev, "DMA output error: 0x%x\n",
676 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
677 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
678 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
679 		}
680 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
681 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
682 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
683 				>> CIO2_INT_EXT_OE_OES_SHIFT);
684 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
685 		}
686 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
687 		if (oe_status)
688 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
689 				 oe_status);
690 		int_status &= ~CIO2_INT_IOOE;
691 	}
692 
693 	if (int_status & CIO2_INT_IOC_MASK) {
694 		/* DMA IO done -- frame ready */
695 		u32 clr = 0;
696 		unsigned int d;
697 
698 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
699 			if (int_status & CIO2_INT_IOC(d)) {
700 				clr |= CIO2_INT_IOC(d);
701 				cio2_buffer_done(cio2, d);
702 			}
703 		int_status &= ~clr;
704 	}
705 
706 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
707 		/* DMA IO starts or reached specified line */
708 		u32 clr = 0;
709 		unsigned int d;
710 
711 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
712 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
713 				clr |= CIO2_INT_IOS_IOLN(d);
714 				if (d == CIO2_DMA_CHAN)
715 					cio2_queue_event_sof(cio2,
716 							     cio2->cur_queue);
717 			}
718 		int_status &= ~clr;
719 	}
720 
721 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
722 		/* CSI2 receiver (error) interrupt */
723 		unsigned int port;
724 		u32 ie_status;
725 
726 		ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
727 
728 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
729 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
730 
731 			cio2_irq_log_port_errs(dev, port, port_status);
732 
733 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
734 				void __iomem *csi_rx_base =
735 						base + CIO2_REG_PIPE_BASE(port);
736 				u32 csi2_status;
737 
738 				csi2_status = readl(csi_rx_base +
739 						CIO2_REG_IRQCTRL_STATUS);
740 
741 				cio2_irq_log_irq_errs(dev, port, csi2_status);
742 
743 				writel(csi2_status,
744 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
745 			}
746 		}
747 
748 		writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
749 
750 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
751 	}
752 
753 	if (int_status)
754 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
755 }
756 
cio2_irq(int irq,void * cio2_ptr)757 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
758 {
759 	struct cio2_device *cio2 = cio2_ptr;
760 	void __iomem *const base = cio2->base;
761 	struct device *dev = &cio2->pci_dev->dev;
762 	u32 int_status;
763 
764 	int_status = readl(base + CIO2_REG_INT_STS);
765 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
766 	if (!int_status)
767 		return IRQ_NONE;
768 
769 	do {
770 		writel(int_status, base + CIO2_REG_INT_STS);
771 		cio2_irq_handle_once(cio2, int_status);
772 		int_status = readl(base + CIO2_REG_INT_STS);
773 		if (int_status)
774 			dev_dbg(dev, "pending status 0x%x\n", int_status);
775 	} while (int_status);
776 
777 	return IRQ_HANDLED;
778 }
779 
780 /**************** Videobuf2 interface ****************/
781 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)782 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
783 					enum vb2_buffer_state state)
784 {
785 	unsigned int i;
786 
787 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
788 		if (q->bufs[i]) {
789 			atomic_dec(&q->bufs_queued);
790 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
791 					state);
792 			q->bufs[i] = NULL;
793 		}
794 	}
795 }
796 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])797 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
798 				unsigned int *num_buffers,
799 				unsigned int *num_planes,
800 				unsigned int sizes[],
801 				struct device *alloc_devs[])
802 {
803 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
804 	struct device *dev = &cio2->pci_dev->dev;
805 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
806 	unsigned int i;
807 
808 	if (*num_planes && *num_planes < q->format.num_planes)
809 		return -EINVAL;
810 
811 	for (i = 0; i < q->format.num_planes; ++i) {
812 		if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
813 			return -EINVAL;
814 		sizes[i] = q->format.plane_fmt[i].sizeimage;
815 		alloc_devs[i] = dev;
816 	}
817 
818 	*num_planes = q->format.num_planes;
819 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
820 
821 	/* Initialize buffer queue */
822 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
823 		q->bufs[i] = NULL;
824 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
825 	}
826 	atomic_set(&q->bufs_queued, 0);
827 	q->bufs_first = 0;
828 	q->bufs_next = 0;
829 
830 	return 0;
831 }
832 
833 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)834 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
835 {
836 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
837 	struct device *dev = &cio2->pci_dev->dev;
838 	struct cio2_buffer *b = to_cio2_buffer(vb);
839 	unsigned int pages = PFN_UP(vb->planes[0].length);
840 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
841 	struct sg_table *sg;
842 	struct sg_dma_page_iter sg_iter;
843 	unsigned int i, j;
844 
845 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
846 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
847 			vb->planes[0].length);
848 		return -ENOSPC;		/* Should never happen */
849 	}
850 
851 	memset(b->lop, 0, sizeof(b->lop));
852 	/* Allocate LOP table */
853 	for (i = 0; i < lops; i++) {
854 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
855 					       &b->lop_bus_addr[i], GFP_KERNEL);
856 		if (!b->lop[i])
857 			goto fail;
858 	}
859 
860 	/* Fill LOP */
861 	sg = vb2_dma_sg_plane_desc(vb, 0);
862 	if (!sg)
863 		return -ENOMEM;
864 
865 	if (sg->nents && sg->sgl)
866 		b->offset = sg->sgl->offset;
867 
868 	i = j = 0;
869 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
870 		if (!pages--)
871 			break;
872 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
873 		j++;
874 		if (j == CIO2_LOP_ENTRIES) {
875 			i++;
876 			j = 0;
877 		}
878 	}
879 
880 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
881 	return 0;
882 fail:
883 	while (i--)
884 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
885 	return -ENOMEM;
886 }
887 
888 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)889 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
890 {
891 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
892 	struct device *dev = &cio2->pci_dev->dev;
893 	struct cio2_queue *q =
894 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
895 	struct cio2_buffer *b = to_cio2_buffer(vb);
896 	struct cio2_fbpt_entry *entry;
897 	unsigned long flags;
898 	unsigned int i, j, next = q->bufs_next;
899 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
900 	u32 fbpt_rp;
901 
902 	dev_dbg(dev, "queue buffer %d\n", vb->index);
903 
904 	/*
905 	 * This code queues the buffer to the CIO2 DMA engine, which starts
906 	 * running once streaming has started. It is possible that this code
907 	 * gets pre-empted due to increased CPU load. Upon this, the driver
908 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
909 	 * engine. When the DMA engine encounters an FBPT entry without the
910 	 * VALID bit set, the DMA engine halts, which requires a restart of
911 	 * the DMA engine and sensor, to continue streaming.
912 	 * This is not desired and is highly unlikely given that there are
913 	 * 32 FBPT entries that the DMA engine needs to process, to run into
914 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
915 	 * by disabling interrupts for the duration of this queueing.
916 	 */
917 	local_irq_save(flags);
918 
919 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
920 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
921 		   & CIO2_CDMARI_FBPT_RP_MASK;
922 
923 	/*
924 	 * fbpt_rp is the fbpt entry that the dma is currently working
925 	 * on, but since it could jump to next entry at any time,
926 	 * assume that we might already be there.
927 	 */
928 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
929 
930 	if (bufs_queued <= 1 || fbpt_rp == next)
931 		/* Buffers were drained */
932 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
933 
934 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
935 		/*
936 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
937 		 * hw, the user has requested N buffer queue. The driver
938 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
939 		 * user queues a buffer, there necessarily is a free buffer.
940 		 */
941 		if (!q->bufs[next]) {
942 			q->bufs[next] = b;
943 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
944 			cio2_fbpt_entry_init_buf(cio2, b, entry);
945 			local_irq_restore(flags);
946 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
947 			for (j = 0; j < vb->num_planes; j++)
948 				vb2_set_plane_payload(vb, j,
949 					q->format.plane_fmt[j].sizeimage);
950 			return;
951 		}
952 
953 		dev_dbg(dev, "entry %i was full!\n", next);
954 		next = (next + 1) % CIO2_MAX_BUFFERS;
955 	}
956 
957 	local_irq_restore(flags);
958 	dev_err(dev, "error: all cio2 entries were full!\n");
959 	atomic_dec(&q->bufs_queued);
960 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
961 }
962 
963 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)964 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
965 {
966 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
967 	struct device *dev = &cio2->pci_dev->dev;
968 	struct cio2_buffer *b = to_cio2_buffer(vb);
969 	unsigned int i;
970 
971 	/* Free LOP table */
972 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
973 		if (b->lop[i])
974 			dma_free_coherent(dev, PAGE_SIZE,
975 					  b->lop[i], b->lop_bus_addr[i]);
976 	}
977 }
978 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)979 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
980 {
981 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
982 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
983 	struct device *dev = &cio2->pci_dev->dev;
984 	int r;
985 
986 	cio2->cur_queue = q;
987 	atomic_set(&q->frame_sequence, 0);
988 
989 	r = pm_runtime_resume_and_get(dev);
990 	if (r < 0) {
991 		dev_info(dev, "failed to set power %d\n", r);
992 		return r;
993 	}
994 
995 	r = video_device_pipeline_start(&q->vdev, &q->pipe);
996 	if (r)
997 		goto fail_pipeline;
998 
999 	r = cio2_hw_init(cio2, q);
1000 	if (r)
1001 		goto fail_hw;
1002 
1003 	/* Start streaming on sensor */
1004 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1005 	if (r)
1006 		goto fail_csi2_subdev;
1007 
1008 	cio2->streaming = true;
1009 
1010 	return 0;
1011 
1012 fail_csi2_subdev:
1013 	cio2_hw_exit(cio2, q);
1014 fail_hw:
1015 	video_device_pipeline_stop(&q->vdev);
1016 fail_pipeline:
1017 	dev_dbg(dev, "failed to start streaming (%d)\n", r);
1018 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1019 	pm_runtime_put(dev);
1020 
1021 	return r;
1022 }
1023 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1024 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1025 {
1026 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1027 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1028 	struct device *dev = &cio2->pci_dev->dev;
1029 
1030 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1031 		dev_err(dev, "failed to stop sensor streaming\n");
1032 
1033 	cio2_hw_exit(cio2, q);
1034 	synchronize_irq(cio2->pci_dev->irq);
1035 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1036 	video_device_pipeline_stop(&q->vdev);
1037 	pm_runtime_put(dev);
1038 	cio2->streaming = false;
1039 }
1040 
1041 static const struct vb2_ops cio2_vb2_ops = {
1042 	.buf_init = cio2_vb2_buf_init,
1043 	.buf_queue = cio2_vb2_buf_queue,
1044 	.buf_cleanup = cio2_vb2_buf_cleanup,
1045 	.queue_setup = cio2_vb2_queue_setup,
1046 	.start_streaming = cio2_vb2_start_streaming,
1047 	.stop_streaming = cio2_vb2_stop_streaming,
1048 	.wait_prepare = vb2_ops_wait_prepare,
1049 	.wait_finish = vb2_ops_wait_finish,
1050 };
1051 
1052 /**************** V4L2 interface ****************/
1053 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1054 static int cio2_v4l2_querycap(struct file *file, void *fh,
1055 			      struct v4l2_capability *cap)
1056 {
1057 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1058 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1059 
1060 	return 0;
1061 }
1062 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1063 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1064 			      struct v4l2_fmtdesc *f)
1065 {
1066 	if (f->index >= ARRAY_SIZE(formats))
1067 		return -EINVAL;
1068 
1069 	f->pixelformat = formats[f->index].fourcc;
1070 
1071 	return 0;
1072 }
1073 
1074 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1075 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1076 {
1077 	struct cio2_queue *q = file_to_cio2_queue(file);
1078 
1079 	f->fmt.pix_mp = q->format;
1080 
1081 	return 0;
1082 }
1083 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1084 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1085 {
1086 	const struct ipu3_cio2_fmt *fmt;
1087 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1088 
1089 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1090 	if (!fmt)
1091 		fmt = &formats[0];
1092 
1093 	/* Only supports up to 4224x3136 */
1094 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1095 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1096 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1097 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1098 
1099 	mpix->num_planes = 1;
1100 	mpix->pixelformat = fmt->fourcc;
1101 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1102 	mpix->field = V4L2_FIELD_NONE;
1103 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1104 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1105 							mpix->height;
1106 
1107 	/* use default */
1108 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1109 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1110 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1111 
1112 	return 0;
1113 }
1114 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1115 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1116 {
1117 	struct cio2_queue *q = file_to_cio2_queue(file);
1118 
1119 	cio2_v4l2_try_fmt(file, fh, f);
1120 	q->format = f->fmt.pix_mp;
1121 
1122 	return 0;
1123 }
1124 
1125 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1126 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1127 {
1128 	if (input->index > 0)
1129 		return -EINVAL;
1130 
1131 	strscpy(input->name, "camera", sizeof(input->name));
1132 	input->type = V4L2_INPUT_TYPE_CAMERA;
1133 
1134 	return 0;
1135 }
1136 
1137 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1138 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1139 {
1140 	*input = 0;
1141 
1142 	return 0;
1143 }
1144 
1145 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1146 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1147 {
1148 	return input == 0 ? 0 : -EINVAL;
1149 }
1150 
1151 static const struct v4l2_file_operations cio2_v4l2_fops = {
1152 	.owner = THIS_MODULE,
1153 	.unlocked_ioctl = video_ioctl2,
1154 	.open = v4l2_fh_open,
1155 	.release = vb2_fop_release,
1156 	.poll = vb2_fop_poll,
1157 	.mmap = vb2_fop_mmap,
1158 };
1159 
1160 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1161 	.vidioc_querycap = cio2_v4l2_querycap,
1162 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1163 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1164 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1165 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1166 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1167 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1168 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1169 	.vidioc_querybuf = vb2_ioctl_querybuf,
1170 	.vidioc_qbuf = vb2_ioctl_qbuf,
1171 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1172 	.vidioc_streamon = vb2_ioctl_streamon,
1173 	.vidioc_streamoff = vb2_ioctl_streamoff,
1174 	.vidioc_expbuf = vb2_ioctl_expbuf,
1175 	.vidioc_enum_input = cio2_video_enum_input,
1176 	.vidioc_g_input	= cio2_video_g_input,
1177 	.vidioc_s_input	= cio2_video_s_input,
1178 };
1179 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1180 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1181 				       struct v4l2_fh *fh,
1182 				       struct v4l2_event_subscription *sub)
1183 {
1184 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1185 		return -EINVAL;
1186 
1187 	/* Line number. For now only zero accepted. */
1188 	if (sub->id != 0)
1189 		return -EINVAL;
1190 
1191 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1192 }
1193 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1194 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1195 {
1196 	struct v4l2_mbus_framefmt *format;
1197 	const struct v4l2_mbus_framefmt fmt_default = {
1198 		.width = 1936,
1199 		.height = 1096,
1200 		.code = formats[0].mbus_code,
1201 		.field = V4L2_FIELD_NONE,
1202 		.colorspace = V4L2_COLORSPACE_RAW,
1203 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1204 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1205 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1206 	};
1207 
1208 	/* Initialize try_fmt */
1209 	format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SINK);
1210 	*format = fmt_default;
1211 
1212 	/* same as sink */
1213 	format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SOURCE);
1214 	*format = fmt_default;
1215 
1216 	return 0;
1217 }
1218 
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1219 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1220 			       struct v4l2_subdev_state *sd_state,
1221 			       struct v4l2_subdev_format *fmt)
1222 {
1223 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1224 
1225 	mutex_lock(&q->subdev_lock);
1226 
1227 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1228 		fmt->format = *v4l2_subdev_state_get_format(sd_state,
1229 							    fmt->pad);
1230 	else
1231 		fmt->format = q->subdev_fmt;
1232 
1233 	mutex_unlock(&q->subdev_lock);
1234 
1235 	return 0;
1236 }
1237 
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1238 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1239 			       struct v4l2_subdev_state *sd_state,
1240 			       struct v4l2_subdev_format *fmt)
1241 {
1242 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1243 	struct v4l2_mbus_framefmt *mbus;
1244 	u32 mbus_code = fmt->format.code;
1245 	unsigned int i;
1246 
1247 	/*
1248 	 * Only allow setting sink pad format;
1249 	 * source always propagates from sink
1250 	 */
1251 	if (fmt->pad == CIO2_PAD_SOURCE)
1252 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1253 
1254 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1255 		mbus = v4l2_subdev_state_get_format(sd_state, fmt->pad);
1256 	else
1257 		mbus = &q->subdev_fmt;
1258 
1259 	fmt->format.code = formats[0].mbus_code;
1260 
1261 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1262 		if (formats[i].mbus_code == mbus_code) {
1263 			fmt->format.code = mbus_code;
1264 			break;
1265 		}
1266 	}
1267 
1268 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1269 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1270 	fmt->format.field = V4L2_FIELD_NONE;
1271 
1272 	mutex_lock(&q->subdev_lock);
1273 	*mbus = fmt->format;
1274 	mutex_unlock(&q->subdev_lock);
1275 
1276 	return 0;
1277 }
1278 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1279 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1280 				      struct v4l2_subdev_state *sd_state,
1281 				      struct v4l2_subdev_mbus_code_enum *code)
1282 {
1283 	if (code->index >= ARRAY_SIZE(formats))
1284 		return -EINVAL;
1285 
1286 	code->code = formats[code->index].mbus_code;
1287 	return 0;
1288 }
1289 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1290 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1291 						struct v4l2_subdev_format *fmt)
1292 {
1293 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1294 		struct v4l2_subdev *sd =
1295 			media_entity_to_v4l2_subdev(pad->entity);
1296 
1297 		memset(fmt, 0, sizeof(*fmt));
1298 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1299 		fmt->pad = pad->index;
1300 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1301 	}
1302 
1303 	return -EINVAL;
1304 }
1305 
cio2_video_link_validate(struct media_link * link)1306 static int cio2_video_link_validate(struct media_link *link)
1307 {
1308 	struct media_entity *entity = link->sink->entity;
1309 	struct video_device *vd = media_entity_to_video_device(entity);
1310 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1311 	struct cio2_device *cio2 = video_get_drvdata(vd);
1312 	struct device *dev = &cio2->pci_dev->dev;
1313 	struct v4l2_subdev_format source_fmt;
1314 	int ret;
1315 
1316 	if (!media_pad_remote_pad_first(entity->pads)) {
1317 		dev_info(dev, "video node %s pad not connected\n", vd->name);
1318 		return -ENOTCONN;
1319 	}
1320 
1321 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1322 	if (ret < 0)
1323 		return 0;
1324 
1325 	if (source_fmt.format.width != q->format.width ||
1326 	    source_fmt.format.height != q->format.height) {
1327 		dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1328 			q->format.width, q->format.height,
1329 			source_fmt.format.width, source_fmt.format.height);
1330 		return -EINVAL;
1331 	}
1332 
1333 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1334 		return -EINVAL;
1335 
1336 	return 0;
1337 }
1338 
1339 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1340 	.subscribe_event = cio2_subdev_subscribe_event,
1341 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1342 };
1343 
1344 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1345 	.open = cio2_subdev_open,
1346 };
1347 
1348 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1349 	.link_validate = v4l2_subdev_link_validate_default,
1350 	.get_fmt = cio2_subdev_get_fmt,
1351 	.set_fmt = cio2_subdev_set_fmt,
1352 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1353 };
1354 
1355 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1356 	.core = &cio2_subdev_core_ops,
1357 	.pad = &cio2_subdev_pad_ops,
1358 };
1359 
1360 /******* V4L2 sub-device asynchronous registration callbacks***********/
1361 
1362 struct sensor_async_subdev {
1363 	struct v4l2_async_connection asd;
1364 	struct csi2_bus_info csi2;
1365 };
1366 
1367 #define to_sensor_asd(__asd)	\
1368 	container_of_const(__asd, struct sensor_async_subdev, asd)
1369 
1370 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1371 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1372 			       struct v4l2_subdev *sd,
1373 			       struct v4l2_async_connection *asd)
1374 {
1375 	struct cio2_device *cio2 = to_cio2_device(notifier);
1376 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1377 	struct cio2_queue *q;
1378 	int ret;
1379 
1380 	if (cio2->queue[s_asd->csi2.port].sensor)
1381 		return -EBUSY;
1382 
1383 	ret = ipu_bridge_instantiate_vcm(sd->dev);
1384 	if (ret)
1385 		return ret;
1386 
1387 	q = &cio2->queue[s_asd->csi2.port];
1388 
1389 	q->csi2 = s_asd->csi2;
1390 	q->sensor = sd;
1391 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1392 
1393 	return 0;
1394 }
1395 
1396 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1397 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1398 				 struct v4l2_subdev *sd,
1399 				 struct v4l2_async_connection *asd)
1400 {
1401 	struct cio2_device *cio2 = to_cio2_device(notifier);
1402 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1403 
1404 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1405 }
1406 
1407 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1408 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1409 {
1410 	struct cio2_device *cio2 = to_cio2_device(notifier);
1411 	struct sensor_async_subdev *s_asd;
1412 	struct v4l2_async_connection *asd;
1413 	struct cio2_queue *q;
1414 	int ret;
1415 
1416 	list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1417 		s_asd = to_sensor_asd(asd);
1418 		q = &cio2->queue[s_asd->csi2.port];
1419 
1420 		ret = v4l2_create_fwnode_links_to_pad(asd->sd,
1421 						      &q->subdev_pads[CIO2_PAD_SINK], 0);
1422 		if (ret)
1423 			return ret;
1424 	}
1425 
1426 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1427 }
1428 
1429 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1430 	.bound = cio2_notifier_bound,
1431 	.unbind = cio2_notifier_unbind,
1432 	.complete = cio2_notifier_complete,
1433 };
1434 
cio2_parse_firmware(struct cio2_device * cio2)1435 static int cio2_parse_firmware(struct cio2_device *cio2)
1436 {
1437 	struct device *dev = &cio2->pci_dev->dev;
1438 	unsigned int i;
1439 	int ret;
1440 
1441 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1442 		struct v4l2_fwnode_endpoint vep = {
1443 			.bus_type = V4L2_MBUS_CSI2_DPHY
1444 		};
1445 		struct sensor_async_subdev *s_asd;
1446 		struct fwnode_handle *ep;
1447 
1448 		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1449 						FWNODE_GRAPH_ENDPOINT_NEXT);
1450 		if (!ep)
1451 			continue;
1452 
1453 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1454 		if (ret)
1455 			goto err_parse;
1456 
1457 		s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1458 							struct
1459 							sensor_async_subdev);
1460 		if (IS_ERR(s_asd)) {
1461 			ret = PTR_ERR(s_asd);
1462 			goto err_parse;
1463 		}
1464 
1465 		s_asd->csi2.port = vep.base.port;
1466 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1467 
1468 		fwnode_handle_put(ep);
1469 
1470 		continue;
1471 
1472 err_parse:
1473 		fwnode_handle_put(ep);
1474 		return ret;
1475 	}
1476 
1477 	/*
1478 	 * Proceed even without sensors connected to allow the device to
1479 	 * suspend.
1480 	 */
1481 	cio2->notifier.ops = &cio2_async_ops;
1482 	ret = v4l2_async_nf_register(&cio2->notifier);
1483 	if (ret)
1484 		dev_err(dev, "failed to register async notifier : %d\n", ret);
1485 
1486 	return ret;
1487 }
1488 
1489 /**************** Queue initialization ****************/
1490 static const struct media_entity_operations cio2_media_ops = {
1491 	.link_validate = v4l2_subdev_link_validate,
1492 };
1493 
1494 static const struct media_entity_operations cio2_video_entity_ops = {
1495 	.link_validate = cio2_video_link_validate,
1496 };
1497 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1498 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1499 {
1500 	static const u32 default_width = 1936;
1501 	static const u32 default_height = 1096;
1502 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1503 	struct device *dev = &cio2->pci_dev->dev;
1504 	struct video_device *vdev = &q->vdev;
1505 	struct vb2_queue *vbq = &q->vbq;
1506 	struct v4l2_subdev *subdev = &q->subdev;
1507 	struct v4l2_mbus_framefmt *fmt;
1508 	int r;
1509 
1510 	/* Initialize miscellaneous variables */
1511 	mutex_init(&q->lock);
1512 	mutex_init(&q->subdev_lock);
1513 
1514 	/* Initialize formats to default values */
1515 	fmt = &q->subdev_fmt;
1516 	fmt->width = default_width;
1517 	fmt->height = default_height;
1518 	fmt->code = dflt_fmt.mbus_code;
1519 	fmt->field = V4L2_FIELD_NONE;
1520 
1521 	q->format.width = default_width;
1522 	q->format.height = default_height;
1523 	q->format.pixelformat = dflt_fmt.fourcc;
1524 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1525 	q->format.field = V4L2_FIELD_NONE;
1526 	q->format.num_planes = 1;
1527 	q->format.plane_fmt[0].bytesperline =
1528 				cio2_bytesperline(q->format.width);
1529 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1530 						q->format.height;
1531 
1532 	/* Initialize fbpt */
1533 	r = cio2_fbpt_init(cio2, q);
1534 	if (r)
1535 		goto fail_fbpt;
1536 
1537 	/* Initialize media entities */
1538 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1539 		MEDIA_PAD_FL_MUST_CONNECT;
1540 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1541 	subdev->entity.ops = &cio2_media_ops;
1542 	subdev->internal_ops = &cio2_subdev_internal_ops;
1543 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1544 	if (r) {
1545 		dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1546 		goto fail_subdev_media_entity;
1547 	}
1548 
1549 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1550 	vdev->entity.ops = &cio2_video_entity_ops;
1551 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1552 	if (r) {
1553 		dev_err(dev, "failed initialize videodev media entity (%d)\n",
1554 			r);
1555 		goto fail_vdev_media_entity;
1556 	}
1557 
1558 	/* Initialize subdev */
1559 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1560 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1561 	subdev->owner = THIS_MODULE;
1562 	subdev->dev = dev;
1563 	snprintf(subdev->name, sizeof(subdev->name),
1564 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1565 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1566 	v4l2_set_subdevdata(subdev, cio2);
1567 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1568 	if (r) {
1569 		dev_err(dev, "failed initialize subdev (%d)\n", r);
1570 		goto fail_subdev;
1571 	}
1572 
1573 	/* Initialize vbq */
1574 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1575 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1576 	vbq->ops = &cio2_vb2_ops;
1577 	vbq->mem_ops = &vb2_dma_sg_memops;
1578 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1579 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1580 	vbq->min_queued_buffers = 1;
1581 	vbq->drv_priv = cio2;
1582 	vbq->lock = &q->lock;
1583 	r = vb2_queue_init(vbq);
1584 	if (r) {
1585 		dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1586 		goto fail_subdev;
1587 	}
1588 
1589 	/* Initialize vdev */
1590 	snprintf(vdev->name, sizeof(vdev->name),
1591 		 "%s %td", CIO2_NAME, q - cio2->queue);
1592 	vdev->release = video_device_release_empty;
1593 	vdev->fops = &cio2_v4l2_fops;
1594 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1595 	vdev->lock = &cio2->lock;
1596 	vdev->v4l2_dev = &cio2->v4l2_dev;
1597 	vdev->queue = &q->vbq;
1598 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1599 	video_set_drvdata(vdev, cio2);
1600 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1601 	if (r) {
1602 		dev_err(dev, "failed to register video device (%d)\n", r);
1603 		goto fail_vdev;
1604 	}
1605 
1606 	/* Create link from CIO2 subdev to output node */
1607 	r = media_create_pad_link(
1608 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1609 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1610 	if (r)
1611 		goto fail_link;
1612 
1613 	return 0;
1614 
1615 fail_link:
1616 	vb2_video_unregister_device(&q->vdev);
1617 fail_vdev:
1618 	v4l2_device_unregister_subdev(subdev);
1619 fail_subdev:
1620 	media_entity_cleanup(&vdev->entity);
1621 fail_vdev_media_entity:
1622 	media_entity_cleanup(&subdev->entity);
1623 fail_subdev_media_entity:
1624 	cio2_fbpt_exit(q, dev);
1625 fail_fbpt:
1626 	mutex_destroy(&q->subdev_lock);
1627 	mutex_destroy(&q->lock);
1628 
1629 	return r;
1630 }
1631 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1632 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1633 {
1634 	vb2_video_unregister_device(&q->vdev);
1635 	media_entity_cleanup(&q->vdev.entity);
1636 	v4l2_device_unregister_subdev(&q->subdev);
1637 	media_entity_cleanup(&q->subdev.entity);
1638 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1639 	mutex_destroy(&q->subdev_lock);
1640 	mutex_destroy(&q->lock);
1641 }
1642 
cio2_queues_init(struct cio2_device * cio2)1643 static int cio2_queues_init(struct cio2_device *cio2)
1644 {
1645 	int i, r;
1646 
1647 	for (i = 0; i < CIO2_QUEUES; i++) {
1648 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1649 		if (r)
1650 			break;
1651 	}
1652 
1653 	if (i == CIO2_QUEUES)
1654 		return 0;
1655 
1656 	for (i--; i >= 0; i--)
1657 		cio2_queue_exit(cio2, &cio2->queue[i]);
1658 
1659 	return r;
1660 }
1661 
cio2_queues_exit(struct cio2_device * cio2)1662 static void cio2_queues_exit(struct cio2_device *cio2)
1663 {
1664 	unsigned int i;
1665 
1666 	for (i = 0; i < CIO2_QUEUES; i++)
1667 		cio2_queue_exit(cio2, &cio2->queue[i]);
1668 }
1669 
1670 /**************** PCI interface ****************/
1671 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1672 static int cio2_pci_probe(struct pci_dev *pci_dev,
1673 			  const struct pci_device_id *id)
1674 {
1675 	struct device *dev = &pci_dev->dev;
1676 	struct cio2_device *cio2;
1677 	int r;
1678 
1679 	/*
1680 	 * On some platforms no connections to sensors are defined in firmware,
1681 	 * if the device has no endpoints then we can try to build those as
1682 	 * software_nodes parsed from SSDB.
1683 	 */
1684 	r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1685 	if (r)
1686 		return r;
1687 
1688 	cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1689 	if (!cio2)
1690 		return -ENOMEM;
1691 	cio2->pci_dev = pci_dev;
1692 
1693 	r = pcim_enable_device(pci_dev);
1694 	if (r) {
1695 		dev_err(dev, "failed to enable device (%d)\n", r);
1696 		return r;
1697 	}
1698 
1699 	dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1700 		 pci_dev->device, pci_dev->revision);
1701 
1702 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1703 	if (r) {
1704 		dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1705 		return -ENODEV;
1706 	}
1707 
1708 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1709 
1710 	pci_set_drvdata(pci_dev, cio2);
1711 
1712 	pci_set_master(pci_dev);
1713 
1714 	r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1715 	if (r) {
1716 		dev_err(dev, "failed to set DMA mask (%d)\n", r);
1717 		return -ENODEV;
1718 	}
1719 
1720 	r = pci_enable_msi(pci_dev);
1721 	if (r) {
1722 		dev_err(dev, "failed to enable MSI (%d)\n", r);
1723 		return r;
1724 	}
1725 
1726 	r = cio2_fbpt_init_dummy(cio2);
1727 	if (r)
1728 		return r;
1729 
1730 	mutex_init(&cio2->lock);
1731 
1732 	cio2->media_dev.dev = dev;
1733 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1734 		sizeof(cio2->media_dev.model));
1735 	cio2->media_dev.hw_revision = 0;
1736 
1737 	media_device_init(&cio2->media_dev);
1738 	r = media_device_register(&cio2->media_dev);
1739 	if (r < 0)
1740 		goto fail_mutex_destroy;
1741 
1742 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1743 	r = v4l2_device_register(dev, &cio2->v4l2_dev);
1744 	if (r) {
1745 		dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1746 		goto fail_media_device_unregister;
1747 	}
1748 
1749 	r = cio2_queues_init(cio2);
1750 	if (r)
1751 		goto fail_v4l2_device_unregister;
1752 
1753 	v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1754 
1755 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1756 			     CIO2_NAME, cio2);
1757 	if (r) {
1758 		dev_err(dev, "failed to request IRQ (%d)\n", r);
1759 		goto fail_clean_notifier;
1760 	}
1761 
1762 	/* Register notifier for subdevices we care */
1763 	r = cio2_parse_firmware(cio2);
1764 	if (r)
1765 		goto fail_clean_notifier;
1766 
1767 	pm_runtime_put_noidle(dev);
1768 	pm_runtime_allow(dev);
1769 
1770 	return 0;
1771 
1772 fail_clean_notifier:
1773 	v4l2_async_nf_unregister(&cio2->notifier);
1774 	v4l2_async_nf_cleanup(&cio2->notifier);
1775 	cio2_queues_exit(cio2);
1776 fail_v4l2_device_unregister:
1777 	v4l2_device_unregister(&cio2->v4l2_dev);
1778 fail_media_device_unregister:
1779 	media_device_unregister(&cio2->media_dev);
1780 	media_device_cleanup(&cio2->media_dev);
1781 fail_mutex_destroy:
1782 	mutex_destroy(&cio2->lock);
1783 	cio2_fbpt_exit_dummy(cio2);
1784 
1785 	return r;
1786 }
1787 
cio2_pci_remove(struct pci_dev * pci_dev)1788 static void cio2_pci_remove(struct pci_dev *pci_dev)
1789 {
1790 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1791 
1792 	media_device_unregister(&cio2->media_dev);
1793 	v4l2_async_nf_unregister(&cio2->notifier);
1794 	v4l2_async_nf_cleanup(&cio2->notifier);
1795 	cio2_queues_exit(cio2);
1796 	cio2_fbpt_exit_dummy(cio2);
1797 	v4l2_device_unregister(&cio2->v4l2_dev);
1798 	media_device_cleanup(&cio2->media_dev);
1799 	mutex_destroy(&cio2->lock);
1800 
1801 	pm_runtime_forbid(&pci_dev->dev);
1802 	pm_runtime_get_noresume(&pci_dev->dev);
1803 }
1804 
cio2_runtime_suspend(struct device * dev)1805 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1806 {
1807 	struct pci_dev *pci_dev = to_pci_dev(dev);
1808 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1809 	void __iomem *const base = cio2->base;
1810 
1811 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1812 	dev_dbg(dev, "cio2 runtime suspend.\n");
1813 
1814 	return 0;
1815 }
1816 
cio2_runtime_resume(struct device * dev)1817 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1818 {
1819 	struct pci_dev *pci_dev = to_pci_dev(dev);
1820 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1821 	void __iomem *const base = cio2->base;
1822 
1823 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1824 	dev_dbg(dev, "cio2 runtime resume.\n");
1825 
1826 	return 0;
1827 }
1828 
1829 /*
1830  * Helper function to advance all the elements of a circular buffer by "start"
1831  * positions
1832  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1833 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1834 {
1835 	struct {
1836 		size_t begin, end;
1837 	} arr[2] = {
1838 		{ 0, start - 1 },
1839 		{ start, elems - 1 },
1840 	};
1841 
1842 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1843 
1844 	/* Loop as long as we have out-of-place entries */
1845 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1846 		size_t size0, i;
1847 
1848 		/*
1849 		 * Find the number of entries that can be arranged on this
1850 		 * iteration.
1851 		 */
1852 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1853 
1854 		/* Swap the entries in two parts of the array. */
1855 		for (i = 0; i < size0; i++) {
1856 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1857 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1858 			size_t j;
1859 
1860 			for (j = 0; j < elem_size; j++)
1861 				swap(d[j], s[j]);
1862 		}
1863 
1864 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1865 			/* The end of the first array remains unarranged. */
1866 			arr[0].begin += size0;
1867 		} else {
1868 			/*
1869 			 * The first array is fully arranged so we proceed
1870 			 * handling the next one.
1871 			 */
1872 			arr[0].begin = arr[1].begin;
1873 			arr[0].end = arr[1].begin + size0 - 1;
1874 			arr[1].begin += size0;
1875 		}
1876 	}
1877 }
1878 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1879 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1880 {
1881 	unsigned int i, j;
1882 
1883 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1884 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1885 		if (q->bufs[j])
1886 			break;
1887 
1888 	if (i == CIO2_MAX_BUFFERS)
1889 		return;
1890 
1891 	if (j) {
1892 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1893 			CIO2_MAX_BUFFERS, j);
1894 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1895 			CIO2_MAX_BUFFERS, j);
1896 	}
1897 
1898 	/*
1899 	 * DMA clears the valid bit when accessing the buffer.
1900 	 * When stopping stream in suspend callback, some of the buffers
1901 	 * may be in invalid state. After resume, when DMA meets the invalid
1902 	 * buffer, it will halt and stop receiving new data.
1903 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1904 	 */
1905 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1906 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1907 }
1908 
cio2_suspend(struct device * dev)1909 static int __maybe_unused cio2_suspend(struct device *dev)
1910 {
1911 	struct pci_dev *pci_dev = to_pci_dev(dev);
1912 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1913 	struct cio2_queue *q = cio2->cur_queue;
1914 	int r;
1915 
1916 	dev_dbg(dev, "cio2 suspend\n");
1917 	if (!cio2->streaming)
1918 		return 0;
1919 
1920 	/* Stop stream */
1921 	r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1922 	if (r) {
1923 		dev_err(dev, "failed to stop sensor streaming\n");
1924 		return r;
1925 	}
1926 
1927 	cio2_hw_exit(cio2, q);
1928 	synchronize_irq(pci_dev->irq);
1929 
1930 	pm_runtime_force_suspend(dev);
1931 
1932 	/*
1933 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1934 	 * so relocate the queued buffs to the fbpt head before suspend.
1935 	 */
1936 	cio2_fbpt_rearrange(cio2, q);
1937 	q->bufs_first = 0;
1938 	q->bufs_next = 0;
1939 
1940 	return 0;
1941 }
1942 
cio2_resume(struct device * dev)1943 static int __maybe_unused cio2_resume(struct device *dev)
1944 {
1945 	struct cio2_device *cio2 = dev_get_drvdata(dev);
1946 	struct cio2_queue *q = cio2->cur_queue;
1947 	int r;
1948 
1949 	dev_dbg(dev, "cio2 resume\n");
1950 	if (!cio2->streaming)
1951 		return 0;
1952 	/* Start stream */
1953 	r = pm_runtime_force_resume(dev);
1954 	if (r < 0) {
1955 		dev_err(dev, "failed to set power %d\n", r);
1956 		return r;
1957 	}
1958 
1959 	r = cio2_hw_init(cio2, q);
1960 	if (r) {
1961 		dev_err(dev, "fail to init cio2 hw\n");
1962 		return r;
1963 	}
1964 
1965 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1966 	if (r) {
1967 		dev_err(dev, "fail to start sensor streaming\n");
1968 		cio2_hw_exit(cio2, q);
1969 	}
1970 
1971 	return r;
1972 }
1973 
1974 static const struct dev_pm_ops cio2_pm_ops = {
1975 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
1976 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
1977 };
1978 
1979 static const struct pci_device_id cio2_pci_id_table[] = {
1980 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
1981 	{ }
1982 };
1983 
1984 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
1985 
1986 static struct pci_driver cio2_pci_driver = {
1987 	.name = CIO2_NAME,
1988 	.id_table = cio2_pci_id_table,
1989 	.probe = cio2_pci_probe,
1990 	.remove = cio2_pci_remove,
1991 	.driver = {
1992 		.pm = &cio2_pm_ops,
1993 	},
1994 };
1995 
1996 module_pci_driver(cio2_pci_driver);
1997 
1998 MODULE_AUTHOR("Tuukka Toivonen");
1999 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2000 MODULE_AUTHOR("Jian Xu Zheng");
2001 MODULE_AUTHOR("Yuning Pu");
2002 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2003 MODULE_LICENSE("GPL v2");
2004 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2005 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
2006