xref: /linux/drivers/i2c/busses/i2c-imx-lpi2c.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * This is i.MX low power i2c controller driver.
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/i2c.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 
28 #define DRIVER_NAME "imx-lpi2c"
29 
30 #define LPI2C_PARAM	0x04	/* i2c RX/TX FIFO size */
31 #define LPI2C_MCR	0x10	/* i2c contrl register */
32 #define LPI2C_MSR	0x14	/* i2c status register */
33 #define LPI2C_MIER	0x18	/* i2c interrupt enable */
34 #define LPI2C_MDER	0x1C	/* i2c DMA enable */
35 #define LPI2C_MCFGR0	0x20	/* i2c master configuration */
36 #define LPI2C_MCFGR1	0x24	/* i2c master configuration */
37 #define LPI2C_MCFGR2	0x28	/* i2c master configuration */
38 #define LPI2C_MCFGR3	0x2C	/* i2c master configuration */
39 #define LPI2C_MCCR0	0x48	/* i2c master clk configuration */
40 #define LPI2C_MCCR1	0x50	/* i2c master clk configuration */
41 #define LPI2C_MFCR	0x58	/* i2c master FIFO control */
42 #define LPI2C_MFSR	0x5C	/* i2c master FIFO status */
43 #define LPI2C_MTDR	0x60	/* i2c master TX data register */
44 #define LPI2C_MRDR	0x70	/* i2c master RX data register */
45 
46 #define LPI2C_SCR	0x110	/* i2c target control register */
47 #define LPI2C_SSR	0x114	/* i2c target status register */
48 #define LPI2C_SIER	0x118	/* i2c target interrupt enable */
49 #define LPI2C_SDER	0x11C	/* i2c target DMA enable */
50 #define LPI2C_SCFGR0	0x120	/* i2c target configuration */
51 #define LPI2C_SCFGR1	0x124	/* i2c target configuration */
52 #define LPI2C_SCFGR2	0x128	/* i2c target configuration */
53 #define LPI2C_SAMR	0x140	/* i2c target address match */
54 #define LPI2C_SASR	0x150	/* i2c target address status */
55 #define LPI2C_STAR	0x154	/* i2c target transmit ACK */
56 #define LPI2C_STDR	0x160	/* i2c target transmit data */
57 #define LPI2C_SRDR	0x170	/* i2c target receive data */
58 #define LPI2C_SRDROR	0x178	/* i2c target receive data read only */
59 
60 /* i2c command */
61 #define TRAN_DATA	0X00
62 #define RECV_DATA	0X01
63 #define GEN_STOP	0X02
64 #define RECV_DISCARD	0X03
65 #define GEN_START	0X04
66 #define START_NACK	0X05
67 #define START_HIGH	0X06
68 #define START_HIGH_NACK	0X07
69 
70 #define MCR_MEN		BIT(0)
71 #define MCR_RST		BIT(1)
72 #define MCR_DOZEN	BIT(2)
73 #define MCR_DBGEN	BIT(3)
74 #define MCR_RTF		BIT(8)
75 #define MCR_RRF		BIT(9)
76 #define MSR_TDF		BIT(0)
77 #define MSR_RDF		BIT(1)
78 #define MSR_SDF		BIT(9)
79 #define MSR_NDF		BIT(10)
80 #define MSR_ALF		BIT(11)
81 #define MSR_MBF		BIT(24)
82 #define MSR_BBF		BIT(25)
83 #define MIER_TDIE	BIT(0)
84 #define MIER_RDIE	BIT(1)
85 #define MIER_SDIE	BIT(9)
86 #define MIER_NDIE	BIT(10)
87 #define MCFGR1_AUTOSTOP	BIT(8)
88 #define MCFGR1_IGNACK	BIT(9)
89 #define MRDR_RXEMPTY	BIT(14)
90 #define MDER_TDDE	BIT(0)
91 #define MDER_RDDE	BIT(1)
92 
93 #define SCR_SEN		BIT(0)
94 #define SCR_RST		BIT(1)
95 #define SCR_FILTEN	BIT(4)
96 #define SCR_RTF		BIT(8)
97 #define SCR_RRF		BIT(9)
98 #define SSR_TDF		BIT(0)
99 #define SSR_RDF		BIT(1)
100 #define SSR_AVF		BIT(2)
101 #define SSR_TAF		BIT(3)
102 #define SSR_RSF		BIT(8)
103 #define SSR_SDF		BIT(9)
104 #define SSR_BEF		BIT(10)
105 #define SSR_FEF		BIT(11)
106 #define SSR_SBF		BIT(24)
107 #define SSR_BBF		BIT(25)
108 #define SSR_CLEAR_BITS	(SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
109 #define SIER_TDIE	BIT(0)
110 #define SIER_RDIE	BIT(1)
111 #define SIER_AVIE	BIT(2)
112 #define SIER_TAIE	BIT(3)
113 #define SIER_RSIE	BIT(8)
114 #define SIER_SDIE	BIT(9)
115 #define SIER_BEIE	BIT(10)
116 #define SIER_FEIE	BIT(11)
117 #define SIER_AM0F	BIT(12)
118 #define SCFGR1_RXSTALL	BIT(1)
119 #define SCFGR1_TXDSTALL	BIT(2)
120 #define SCFGR2_FILTSDA_SHIFT	24
121 #define SCFGR2_FILTSCL_SHIFT	16
122 #define SCFGR2_CLKHOLD(x)	(x)
123 #define SCFGR2_FILTSDA(x)	((x) << SCFGR2_FILTSDA_SHIFT)
124 #define SCFGR2_FILTSCL(x)	((x) << SCFGR2_FILTSCL_SHIFT)
125 #define SASR_READ_REQ	0x1
126 #define SLAVE_INT_FLAG	(SIER_TDIE | SIER_RDIE | SIER_AVIE | \
127 			 SIER_SDIE | SIER_BEIE)
128 
129 #define I2C_CLK_RATIO	2
130 #define CHUNK_DATA	256
131 
132 #define I2C_PM_TIMEOUT		10 /* ms */
133 #define I2C_DMA_THRESHOLD	8 /* bytes */
134 
135 enum lpi2c_imx_mode {
136 	STANDARD,	/* 100+Kbps */
137 	FAST,		/* 400+Kbps */
138 	FAST_PLUS,	/* 1.0+Mbps */
139 	HS,		/* 3.4+Mbps */
140 	ULTRA_FAST,	/* 5.0+Mbps */
141 };
142 
143 enum lpi2c_imx_pincfg {
144 	TWO_PIN_OD,
145 	TWO_PIN_OO,
146 	TWO_PIN_PP,
147 	FOUR_PIN_PP,
148 };
149 
150 struct lpi2c_imx_dma {
151 	bool		using_pio_mode;
152 	u8		rx_cmd_buf_len;
153 	u8		*dma_buf;
154 	u16		*rx_cmd_buf;
155 	unsigned int	dma_len;
156 	unsigned int	tx_burst_num;
157 	unsigned int	rx_burst_num;
158 	unsigned long	dma_msg_flag;
159 	resource_size_t	phy_addr;
160 	dma_addr_t	dma_tx_addr;
161 	dma_addr_t	dma_addr;
162 	enum dma_data_direction dma_data_dir;
163 	enum dma_transfer_direction dma_transfer_dir;
164 	struct dma_chan	*chan_tx;
165 	struct dma_chan	*chan_rx;
166 };
167 
168 struct lpi2c_imx_struct {
169 	struct i2c_adapter	adapter;
170 	int			num_clks;
171 	struct clk_bulk_data	*clks;
172 	void __iomem		*base;
173 	__u8			*rx_buf;
174 	__u8			*tx_buf;
175 	struct completion	complete;
176 	unsigned long		rate_per;
177 	unsigned int		msglen;
178 	unsigned int		delivered;
179 	unsigned int		block_data;
180 	unsigned int		bitrate;
181 	unsigned int		txfifosize;
182 	unsigned int		rxfifosize;
183 	enum lpi2c_imx_mode	mode;
184 	struct i2c_bus_recovery_info rinfo;
185 	bool			can_use_dma;
186 	struct lpi2c_imx_dma	*dma;
187 	struct i2c_client	*target;
188 };
189 
190 static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
191 			      unsigned int enable)
192 {
193 	writel(enable, lpi2c_imx->base + LPI2C_MIER);
194 }
195 
196 static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
197 {
198 	unsigned long orig_jiffies = jiffies;
199 	unsigned int temp;
200 
201 	while (1) {
202 		temp = readl(lpi2c_imx->base + LPI2C_MSR);
203 
204 		/* check for arbitration lost, clear if set */
205 		if (temp & MSR_ALF) {
206 			writel(temp, lpi2c_imx->base + LPI2C_MSR);
207 			return -EAGAIN;
208 		}
209 
210 		if (temp & (MSR_BBF | MSR_MBF))
211 			break;
212 
213 		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
214 			dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
215 			if (lpi2c_imx->adapter.bus_recovery_info)
216 				i2c_recover_bus(&lpi2c_imx->adapter);
217 			return -ETIMEDOUT;
218 		}
219 		schedule();
220 	}
221 
222 	return 0;
223 }
224 
225 static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
226 {
227 	unsigned int bitrate = lpi2c_imx->bitrate;
228 	enum lpi2c_imx_mode mode;
229 
230 	if (bitrate < I2C_MAX_FAST_MODE_FREQ)
231 		mode = STANDARD;
232 	else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
233 		mode = FAST;
234 	else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
235 		mode = FAST_PLUS;
236 	else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
237 		mode = HS;
238 	else
239 		mode = ULTRA_FAST;
240 
241 	lpi2c_imx->mode = mode;
242 }
243 
244 static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
245 			   struct i2c_msg *msgs)
246 {
247 	unsigned int temp;
248 
249 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
250 	temp |= MCR_RRF | MCR_RTF;
251 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
252 	writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
253 
254 	temp = i2c_8bit_addr_from_msg(msgs) | (GEN_START << 8);
255 	writel(temp, lpi2c_imx->base + LPI2C_MTDR);
256 
257 	return lpi2c_imx_bus_busy(lpi2c_imx);
258 }
259 
260 static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
261 {
262 	unsigned long orig_jiffies = jiffies;
263 	unsigned int temp;
264 
265 	writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
266 
267 	do {
268 		temp = readl(lpi2c_imx->base + LPI2C_MSR);
269 		if (temp & MSR_SDF)
270 			break;
271 
272 		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
273 			dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
274 			if (lpi2c_imx->adapter.bus_recovery_info)
275 				i2c_recover_bus(&lpi2c_imx->adapter);
276 			break;
277 		}
278 		schedule();
279 
280 	} while (1);
281 }
282 
283 /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
284 static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
285 {
286 	u8 prescale, filt, sethold, datavd;
287 	unsigned int clk_rate, clk_cycle, clkhi, clklo;
288 	enum lpi2c_imx_pincfg pincfg;
289 	unsigned int temp;
290 
291 	lpi2c_imx_set_mode(lpi2c_imx);
292 
293 	clk_rate = lpi2c_imx->rate_per;
294 
295 	if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
296 		filt = 0;
297 	else
298 		filt = 2;
299 
300 	for (prescale = 0; prescale <= 7; prescale++) {
301 		clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
302 			    - 3 - (filt >> 1);
303 		clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1);
304 		clklo = clk_cycle - clkhi;
305 		if (clklo < 64)
306 			break;
307 	}
308 
309 	if (prescale > 7)
310 		return -EINVAL;
311 
312 	/* set MCFGR1: PINCFG, PRESCALE, IGNACK */
313 	if (lpi2c_imx->mode == ULTRA_FAST)
314 		pincfg = TWO_PIN_OO;
315 	else
316 		pincfg = TWO_PIN_OD;
317 	temp = prescale | pincfg << 24;
318 
319 	if (lpi2c_imx->mode == ULTRA_FAST)
320 		temp |= MCFGR1_IGNACK;
321 
322 	writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
323 
324 	/* set MCFGR2: FILTSDA, FILTSCL */
325 	temp = (filt << 16) | (filt << 24);
326 	writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
327 
328 	/* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
329 	sethold = clkhi;
330 	datavd = clkhi >> 1;
331 	temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
332 
333 	if (lpi2c_imx->mode == HS)
334 		writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
335 	else
336 		writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
337 
338 	return 0;
339 }
340 
341 static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
342 {
343 	unsigned int temp;
344 	int ret;
345 
346 	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
347 	if (ret < 0)
348 		return ret;
349 
350 	temp = MCR_RST;
351 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
352 	writel(0, lpi2c_imx->base + LPI2C_MCR);
353 
354 	ret = lpi2c_imx_config(lpi2c_imx);
355 	if (ret)
356 		goto rpm_put;
357 
358 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
359 	temp |= MCR_MEN;
360 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
361 
362 	return 0;
363 
364 rpm_put:
365 	pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
366 	pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
367 
368 	return ret;
369 }
370 
371 static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
372 {
373 	u32 temp;
374 
375 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
376 	temp &= ~MCR_MEN;
377 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
378 
379 	pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
380 	pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
381 
382 	return 0;
383 }
384 
385 static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
386 {
387 	unsigned long time_left;
388 
389 	time_left = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
390 
391 	return time_left ? 0 : -ETIMEDOUT;
392 }
393 
394 static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
395 {
396 	unsigned long orig_jiffies = jiffies;
397 	u32 txcnt;
398 
399 	do {
400 		txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
401 
402 		if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) {
403 			dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
404 			return -EIO;
405 		}
406 
407 		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
408 			dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
409 			if (lpi2c_imx->adapter.bus_recovery_info)
410 				i2c_recover_bus(&lpi2c_imx->adapter);
411 			return -ETIMEDOUT;
412 		}
413 		schedule();
414 
415 	} while (txcnt);
416 
417 	return 0;
418 }
419 
420 static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
421 {
422 	writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
423 }
424 
425 static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
426 {
427 	unsigned int temp, remaining;
428 
429 	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
430 
431 	if (remaining > (lpi2c_imx->rxfifosize >> 1))
432 		temp = lpi2c_imx->rxfifosize >> 1;
433 	else
434 		temp = 0;
435 
436 	writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
437 }
438 
439 static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx)
440 {
441 	unsigned int data, txcnt;
442 
443 	txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
444 
445 	while (txcnt < lpi2c_imx->txfifosize) {
446 		if (lpi2c_imx->delivered == lpi2c_imx->msglen)
447 			break;
448 
449 		data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
450 		writel(data, lpi2c_imx->base + LPI2C_MTDR);
451 		txcnt++;
452 	}
453 
454 	if (lpi2c_imx->delivered < lpi2c_imx->msglen)
455 		lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
456 	else
457 		complete(&lpi2c_imx->complete);
458 }
459 
460 static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx)
461 {
462 	unsigned int blocklen, remaining;
463 	unsigned int temp, data;
464 
465 	do {
466 		data = readl(lpi2c_imx->base + LPI2C_MRDR);
467 		if (data & MRDR_RXEMPTY)
468 			break;
469 
470 		lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
471 	} while (1);
472 
473 	/*
474 	 * First byte is the length of remaining packet in the SMBus block
475 	 * data read. Add it to msgs->len.
476 	 */
477 	if (lpi2c_imx->block_data) {
478 		blocklen = lpi2c_imx->rx_buf[0];
479 		lpi2c_imx->msglen += blocklen;
480 	}
481 
482 	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
483 
484 	if (!remaining) {
485 		complete(&lpi2c_imx->complete);
486 		return;
487 	}
488 
489 	/* not finished, still waiting for rx data */
490 	lpi2c_imx_set_rx_watermark(lpi2c_imx);
491 
492 	/* multiple receive commands */
493 	if (lpi2c_imx->block_data) {
494 		lpi2c_imx->block_data = 0;
495 		temp = remaining;
496 		temp |= (RECV_DATA << 8);
497 		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
498 	} else if (!(lpi2c_imx->delivered & 0xff)) {
499 		temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
500 		temp |= (RECV_DATA << 8);
501 		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
502 	}
503 
504 	lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
505 }
506 
507 static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
508 			    struct i2c_msg *msgs)
509 {
510 	lpi2c_imx->tx_buf = msgs->buf;
511 	lpi2c_imx_set_tx_watermark(lpi2c_imx);
512 	lpi2c_imx_write_txfifo(lpi2c_imx);
513 }
514 
515 static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
516 			   struct i2c_msg *msgs)
517 {
518 	unsigned int temp;
519 
520 	lpi2c_imx->rx_buf = msgs->buf;
521 	lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
522 
523 	lpi2c_imx_set_rx_watermark(lpi2c_imx);
524 	temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
525 	temp |= (RECV_DATA << 8);
526 	writel(temp, lpi2c_imx->base + LPI2C_MTDR);
527 
528 	lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
529 }
530 
531 static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
532 {
533 	if (!lpi2c_imx->can_use_dma)
534 		return false;
535 
536 	/*
537 	 * When the length of data is less than I2C_DMA_THRESHOLD,
538 	 * cpu mode is used directly to avoid low performance.
539 	 */
540 	return !(msg->len < I2C_DMA_THRESHOLD);
541 }
542 
543 static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
544 			      struct i2c_msg *msg)
545 {
546 	reinit_completion(&lpi2c_imx->complete);
547 
548 	if (msg->flags & I2C_M_RD)
549 		lpi2c_imx_read(lpi2c_imx, msg);
550 	else
551 		lpi2c_imx_write(lpi2c_imx, msg);
552 
553 	return lpi2c_imx_pio_msg_complete(lpi2c_imx);
554 }
555 
556 static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
557 {
558 	unsigned long time = 0;
559 
560 	time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
561 
562 	/* Add extra second for scheduler related activities */
563 	time += 1;
564 
565 	/* Double calculated time */
566 	return msecs_to_jiffies(time * MSEC_PER_SEC);
567 }
568 
569 static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
570 {
571 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
572 	u16 rx_remain = dma->dma_len;
573 	int cmd_num;
574 	u16 temp;
575 
576 	/*
577 	 * Calculate the number of rx command words via the DMA TX channel
578 	 * writing into command register based on the i2c msg len, and build
579 	 * the rx command words buffer.
580 	 */
581 	cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
582 	dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
583 	dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
584 
585 	if (!dma->rx_cmd_buf) {
586 		dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
587 		return -ENOMEM;
588 	}
589 
590 	for (int i = 0; i < cmd_num ; i++) {
591 		temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
592 		temp |= (RECV_DATA << 8);
593 		rx_remain -= CHUNK_DATA;
594 		dma->rx_cmd_buf[i] = temp;
595 	}
596 
597 	return 0;
598 }
599 
600 static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
601 {
602 	unsigned long time_left, time;
603 
604 	time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
605 	time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
606 	if (time_left == 0) {
607 		dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
608 		return -ETIMEDOUT;
609 	}
610 
611 	return 0;
612 }
613 
614 static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
615 {
616 	struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
617 				? dma->chan_rx : dma->chan_tx;
618 
619 	dma_unmap_single(chan->device->dev, dma->dma_addr,
620 			 dma->dma_len, dma->dma_data_dir);
621 
622 	dma->dma_data_dir = DMA_NONE;
623 }
624 
625 static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
626 {
627 	dmaengine_terminate_sync(dma->chan_tx);
628 	dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
629 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
630 }
631 
632 static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
633 {
634 	if (dma->dma_data_dir == DMA_FROM_DEVICE)
635 		dmaengine_terminate_sync(dma->chan_rx);
636 	else if (dma->dma_data_dir == DMA_TO_DEVICE)
637 		dmaengine_terminate_sync(dma->chan_tx);
638 
639 	lpi2c_dma_unmap(dma);
640 }
641 
642 static void lpi2c_dma_callback(void *data)
643 {
644 	struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
645 
646 	complete(&lpi2c_imx->complete);
647 }
648 
649 static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
650 {
651 	struct dma_async_tx_descriptor *rx_cmd_desc;
652 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
653 	struct dma_chan *txchan = dma->chan_tx;
654 	dma_cookie_t cookie;
655 
656 	dma->dma_tx_addr = dma_map_single(txchan->device->dev,
657 					  dma->rx_cmd_buf, dma->rx_cmd_buf_len,
658 					  DMA_TO_DEVICE);
659 	if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
660 		dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
661 		return -EINVAL;
662 	}
663 
664 	rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
665 						  dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
666 						  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
667 	if (!rx_cmd_desc) {
668 		dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
669 		goto desc_prepare_err_exit;
670 	}
671 
672 	cookie = dmaengine_submit(rx_cmd_desc);
673 	if (dma_submit_error(cookie)) {
674 		dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
675 		goto submit_err_exit;
676 	}
677 
678 	dma_async_issue_pending(txchan);
679 
680 	return 0;
681 
682 desc_prepare_err_exit:
683 	dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
684 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
685 	return -EINVAL;
686 
687 submit_err_exit:
688 	dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
689 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
690 	dmaengine_desc_free(rx_cmd_desc);
691 	return -EINVAL;
692 }
693 
694 static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
695 {
696 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
697 	struct dma_async_tx_descriptor *desc;
698 	struct dma_chan *chan;
699 	dma_cookie_t cookie;
700 
701 	if (dma->dma_msg_flag & I2C_M_RD) {
702 		chan = dma->chan_rx;
703 		dma->dma_data_dir = DMA_FROM_DEVICE;
704 		dma->dma_transfer_dir = DMA_DEV_TO_MEM;
705 	} else {
706 		chan = dma->chan_tx;
707 		dma->dma_data_dir = DMA_TO_DEVICE;
708 		dma->dma_transfer_dir = DMA_MEM_TO_DEV;
709 	}
710 
711 	dma->dma_addr = dma_map_single(chan->device->dev,
712 				       dma->dma_buf, dma->dma_len, dma->dma_data_dir);
713 	if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
714 		dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
715 		return -EINVAL;
716 	}
717 
718 	desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
719 					   dma->dma_len, dma->dma_transfer_dir,
720 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
721 	if (!desc) {
722 		dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
723 		goto desc_prepare_err_exit;
724 	}
725 
726 	reinit_completion(&lpi2c_imx->complete);
727 	desc->callback = lpi2c_dma_callback;
728 	desc->callback_param = lpi2c_imx;
729 
730 	cookie = dmaengine_submit(desc);
731 	if (dma_submit_error(cookie)) {
732 		dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
733 		goto submit_err_exit;
734 	}
735 
736 	/* Can't switch to PIO mode when DMA have started transfer */
737 	dma->using_pio_mode = false;
738 
739 	dma_async_issue_pending(chan);
740 
741 	return 0;
742 
743 desc_prepare_err_exit:
744 	lpi2c_dma_unmap(dma);
745 	return -EINVAL;
746 
747 submit_err_exit:
748 	lpi2c_dma_unmap(dma);
749 	dmaengine_desc_free(desc);
750 	return -EINVAL;
751 }
752 
753 static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
754 {
755 	unsigned int i;
756 
757 	for (i = fifosize / 2; i > 0; i--)
758 		if (!(len % i))
759 			break;
760 
761 	return i;
762 }
763 
764 /*
765  * For a highest DMA efficiency, tx/rx burst number should be calculated according
766  * to the FIFO depth.
767  */
768 static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
769 {
770 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
771 	unsigned int cmd_num;
772 
773 	if (dma->dma_msg_flag & I2C_M_RD) {
774 		/*
775 		 * One RX cmd word can trigger DMA receive no more than 256 bytes.
776 		 * The number of RX cmd words should be calculated based on the data
777 		 * length.
778 		 */
779 		cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
780 		dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
781 								 cmd_num);
782 		dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
783 								 dma->dma_len);
784 	} else {
785 		dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
786 								 dma->dma_len);
787 	}
788 }
789 
790 static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
791 {
792 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
793 	struct dma_slave_config rx = {}, tx = {};
794 	int ret;
795 
796 	lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
797 
798 	if (dma->dma_msg_flag & I2C_M_RD) {
799 		tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
800 		tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
801 		tx.dst_maxburst = dma->tx_burst_num;
802 		tx.direction = DMA_MEM_TO_DEV;
803 		ret = dmaengine_slave_config(dma->chan_tx, &tx);
804 		if (ret < 0)
805 			return ret;
806 
807 		rx.src_addr = dma->phy_addr + LPI2C_MRDR;
808 		rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
809 		rx.src_maxburst = dma->rx_burst_num;
810 		rx.direction = DMA_DEV_TO_MEM;
811 		ret = dmaengine_slave_config(dma->chan_rx, &rx);
812 		if (ret < 0)
813 			return ret;
814 	} else {
815 		tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
816 		tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
817 		tx.dst_maxburst = dma->tx_burst_num;
818 		tx.direction = DMA_MEM_TO_DEV;
819 		ret = dmaengine_slave_config(dma->chan_tx, &tx);
820 		if (ret < 0)
821 			return ret;
822 	}
823 
824 	return 0;
825 }
826 
827 static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
828 {
829 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
830 	/*
831 	 * TX interrupt will be triggered when the number of words in
832 	 * the transmit FIFO is equal or less than TX watermark.
833 	 * RX interrupt will be triggered when the number of words in
834 	 * the receive FIFO is greater than RX watermark.
835 	 * In order to trigger the DMA interrupt, TX watermark should be
836 	 * set equal to the DMA TX burst number but RX watermark should
837 	 * be set less than the DMA RX burst number.
838 	 */
839 	if (dma->dma_msg_flag & I2C_M_RD) {
840 		/* Set I2C TX/RX watermark */
841 		writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
842 		       lpi2c_imx->base + LPI2C_MFCR);
843 		/* Enable I2C DMA TX/RX function */
844 		writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
845 	} else {
846 		/* Set I2C TX watermark */
847 		writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
848 		/* Enable I2C DMA TX function */
849 		writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
850 	}
851 
852 	/* Enable NACK detected */
853 	lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
854 };
855 
856 /*
857  * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
858  * data word into TXFIFO, but in RX DMA mode it is different.
859  *
860  * The LPI2C MTDR register is a command data and transmit data register.
861  * Bits 8-10 are the command data field and Bits 0-7 are the transmit
862  * data field. When the LPI2C master needs to read data, the number of
863  * bytes to read should be set in the command field and RECV_DATA should
864  * be set into the command data field to receive (DATA[7:0] + 1) bytes.
865  * The recv data command word is made of RECV_DATA in the command data
866  * field and the number of bytes to read in transmit data field. When the
867  * length of data to be read exceeds 256 bytes, recv data command word
868  * needs to be written to TXFIFO multiple times.
869  *
870  * So when in RX DMA mode, the TX channel also must to be configured to
871  * send RX command words and the RX command word must be set in advance
872  * before transmitting.
873  */
874 static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
875 			      struct i2c_msg *msg)
876 {
877 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
878 	int ret;
879 
880 	/* When DMA mode fails before transferring, CPU mode can be used. */
881 	dma->using_pio_mode = true;
882 
883 	dma->dma_len = msg->len;
884 	dma->dma_msg_flag = msg->flags;
885 	dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
886 	if (!dma->dma_buf)
887 		return -ENOMEM;
888 
889 	ret = lpi2c_dma_config(lpi2c_imx);
890 	if (ret) {
891 		dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
892 		goto disable_dma;
893 	}
894 
895 	lpi2c_dma_enable(lpi2c_imx);
896 
897 	ret = lpi2c_dma_submit(lpi2c_imx);
898 	if (ret) {
899 		dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
900 		goto disable_dma;
901 	}
902 
903 	if (dma->dma_msg_flag & I2C_M_RD) {
904 		ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
905 		if (ret)
906 			goto disable_cleanup_data_dma;
907 
908 		ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
909 		if (ret)
910 			goto disable_cleanup_data_dma;
911 	}
912 
913 	ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
914 	if (ret)
915 		goto disable_cleanup_all_dma;
916 
917 	/* When encountering NACK in transfer, clean up all DMA transfers */
918 	if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
919 		ret = -EIO;
920 		goto disable_cleanup_all_dma;
921 	}
922 
923 	if (dma->dma_msg_flag & I2C_M_RD)
924 		dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
925 				 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
926 	lpi2c_dma_unmap(dma);
927 
928 	goto disable_dma;
929 
930 disable_cleanup_all_dma:
931 	if (dma->dma_msg_flag & I2C_M_RD)
932 		lpi2c_cleanup_rx_cmd_dma(dma);
933 disable_cleanup_data_dma:
934 	lpi2c_cleanup_dma(dma);
935 disable_dma:
936 	/* Disable I2C DMA function */
937 	writel(0, lpi2c_imx->base + LPI2C_MDER);
938 
939 	if (dma->dma_msg_flag & I2C_M_RD)
940 		kfree(dma->rx_cmd_buf);
941 
942 	if (ret)
943 		i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
944 	else
945 		i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
946 
947 	return ret;
948 }
949 
950 static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
951 			  struct i2c_msg *msgs, int num)
952 {
953 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
954 	unsigned int temp;
955 	int i, result;
956 
957 	result = lpi2c_imx_master_enable(lpi2c_imx);
958 	if (result)
959 		return result;
960 
961 	for (i = 0; i < num; i++) {
962 		result = lpi2c_imx_start(lpi2c_imx, &msgs[i]);
963 		if (result)
964 			goto disable;
965 
966 		/* quick smbus */
967 		if (num == 1 && msgs[0].len == 0)
968 			goto stop;
969 
970 		lpi2c_imx->rx_buf = NULL;
971 		lpi2c_imx->tx_buf = NULL;
972 		lpi2c_imx->delivered = 0;
973 		lpi2c_imx->msglen = msgs[i].len;
974 		init_completion(&lpi2c_imx->complete);
975 
976 		if (is_use_dma(lpi2c_imx, &msgs[i])) {
977 			result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
978 			if (result && lpi2c_imx->dma->using_pio_mode)
979 				result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
980 		} else {
981 			result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
982 		}
983 
984 		if (result)
985 			goto stop;
986 
987 		if (!(msgs[i].flags & I2C_M_RD)) {
988 			result = lpi2c_imx_txfifo_empty(lpi2c_imx);
989 			if (result)
990 				goto stop;
991 		}
992 	}
993 
994 stop:
995 	lpi2c_imx_stop(lpi2c_imx);
996 
997 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
998 	if ((temp & MSR_NDF) && !result)
999 		result = -EIO;
1000 
1001 disable:
1002 	lpi2c_imx_master_disable(lpi2c_imx);
1003 
1004 	dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
1005 		(result < 0) ? "error" : "success msg",
1006 		(result < 0) ? result : num);
1007 
1008 	return (result < 0) ? result : num;
1009 }
1010 
1011 static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
1012 					u32 ssr, u32 sier_filter)
1013 {
1014 	u8 value;
1015 	u32 sasr;
1016 
1017 	/* Arbitration lost */
1018 	if (sier_filter & SSR_BEF) {
1019 		writel(0, lpi2c_imx->base + LPI2C_SIER);
1020 		return IRQ_HANDLED;
1021 	}
1022 
1023 	/* Address detected */
1024 	if (sier_filter & SSR_AVF) {
1025 		sasr = readl(lpi2c_imx->base + LPI2C_SASR);
1026 		if (SASR_READ_REQ & sasr) {
1027 			/* Read request */
1028 			i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
1029 			writel(value, lpi2c_imx->base + LPI2C_STDR);
1030 			goto ret;
1031 		} else {
1032 			/* Write request */
1033 			i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
1034 		}
1035 	}
1036 
1037 	if (sier_filter & SSR_SDF)
1038 		/* STOP */
1039 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
1040 
1041 	if (sier_filter & SSR_TDF) {
1042 		/* Target send data */
1043 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
1044 		writel(value, lpi2c_imx->base + LPI2C_STDR);
1045 	}
1046 
1047 	if (sier_filter & SSR_RDF) {
1048 		/* Target receive data */
1049 		value = readl(lpi2c_imx->base + LPI2C_SRDR);
1050 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
1051 	}
1052 
1053 ret:
1054 	/* Clear SSR */
1055 	writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
1056 	return IRQ_HANDLED;
1057 }
1058 
1059 static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
1060 {
1061 	unsigned int enabled;
1062 	unsigned int temp;
1063 
1064 	enabled = readl(lpi2c_imx->base + LPI2C_MIER);
1065 
1066 	lpi2c_imx_intctrl(lpi2c_imx, 0);
1067 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
1068 	temp &= enabled;
1069 
1070 	if (temp & MSR_NDF)
1071 		complete(&lpi2c_imx->complete);
1072 	else if (temp & MSR_RDF)
1073 		lpi2c_imx_read_rxfifo(lpi2c_imx);
1074 	else if (temp & MSR_TDF)
1075 		lpi2c_imx_write_txfifo(lpi2c_imx);
1076 
1077 	return IRQ_HANDLED;
1078 }
1079 
1080 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
1081 {
1082 	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
1083 
1084 	if (lpi2c_imx->target) {
1085 		u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
1086 		u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
1087 		u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
1088 
1089 		/*
1090 		 * The target is enabled and an interrupt has been triggered.
1091 		 * Enter the target's irq handler.
1092 		 */
1093 		if ((scr & SCR_SEN) && sier_filter)
1094 			return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
1095 	}
1096 
1097 	/*
1098 	 * Otherwise the interrupt has been triggered by the master.
1099 	 * Enter the master's irq handler.
1100 	 */
1101 	return lpi2c_imx_master_isr(lpi2c_imx);
1102 }
1103 
1104 static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
1105 {
1106 	u32 temp;
1107 
1108 	/* reset target module */
1109 	writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1110 	writel(0, lpi2c_imx->base + LPI2C_SCR);
1111 
1112 	/* Set target address */
1113 	writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
1114 
1115 	writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
1116 
1117 	/*
1118 	 * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
1119 	 *
1120 	 * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
1121 	 * set to the same value and should be set >= 50ns.
1122 	 *
1123 	 * CLKHOLD is only used when clock stretching is enabled, but it will
1124 	 * extend the clock stretching to ensure there is an additional delay
1125 	 * between the target driving SDA and the target releasing the SCL pin.
1126 	 *
1127 	 * CLKHOLD setting is crucial for lpi2c target. When master read data
1128 	 * from target, if there is a delay caused by cpu idle, excessive load,
1129 	 * or other delays between two bytes in one message transmission, it
1130 	 * will cause a short interval time between the driving SDA signal and
1131 	 * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
1132 	 * signal resulting in an arbitration failure. This issue can be avoided
1133 	 * by setting CLKHOLD.
1134 	 *
1135 	 * In order to ensure lpi2c function normally when the lpi2c speed is as
1136 	 * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
1137 	 * higher clock frequency like 400kHz and 1MHz.
1138 	 */
1139 	temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
1140 	writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
1141 
1142 	/*
1143 	 * Enable module:
1144 	 * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
1145 	 * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
1146 	 * and CLKHOLD.
1147 	 */
1148 	writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
1149 
1150 	/* Enable interrupt from i2c module */
1151 	writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
1152 }
1153 
1154 static int lpi2c_imx_register_target(struct i2c_client *client)
1155 {
1156 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1157 	int ret;
1158 
1159 	if (lpi2c_imx->target)
1160 		return -EBUSY;
1161 
1162 	lpi2c_imx->target = client;
1163 
1164 	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
1165 	if (ret < 0) {
1166 		dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
1167 		return ret;
1168 	}
1169 
1170 	lpi2c_imx_target_init(lpi2c_imx);
1171 
1172 	return 0;
1173 }
1174 
1175 static int lpi2c_imx_unregister_target(struct i2c_client *client)
1176 {
1177 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1178 	int ret;
1179 
1180 	if (!lpi2c_imx->target)
1181 		return -EINVAL;
1182 
1183 	/* Reset target address. */
1184 	writel(0, lpi2c_imx->base + LPI2C_SAMR);
1185 
1186 	writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1187 	writel(0, lpi2c_imx->base + LPI2C_SCR);
1188 
1189 	lpi2c_imx->target = NULL;
1190 
1191 	ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
1192 	if (ret < 0)
1193 		dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
1194 
1195 	return ret;
1196 }
1197 
1198 static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
1199 				  struct platform_device *pdev)
1200 {
1201 	struct i2c_bus_recovery_info *bri = &lpi2c_imx->rinfo;
1202 
1203 	bri->pinctrl = devm_pinctrl_get(&pdev->dev);
1204 	if (IS_ERR(bri->pinctrl))
1205 		return PTR_ERR(bri->pinctrl);
1206 
1207 	lpi2c_imx->adapter.bus_recovery_info = bri;
1208 
1209 	return 0;
1210 }
1211 
1212 static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
1213 {
1214 	if (dma->chan_rx)
1215 		dma_release_channel(dma->chan_rx);
1216 
1217 	if (dma->chan_tx)
1218 		dma_release_channel(dma->chan_tx);
1219 
1220 	devm_kfree(dev, dma);
1221 }
1222 
1223 static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
1224 {
1225 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1226 	struct lpi2c_imx_dma *dma;
1227 	int ret;
1228 
1229 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
1230 	if (!dma)
1231 		return -ENOMEM;
1232 
1233 	dma->phy_addr = phy_addr;
1234 
1235 	/* Prepare for TX DMA: */
1236 	dma->chan_tx = dma_request_chan(dev, "tx");
1237 	if (IS_ERR(dma->chan_tx)) {
1238 		ret = PTR_ERR(dma->chan_tx);
1239 		if (ret != -ENODEV && ret != -EPROBE_DEFER)
1240 			dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
1241 		dma->chan_tx = NULL;
1242 		goto dma_exit;
1243 	}
1244 
1245 	/* Prepare for RX DMA: */
1246 	dma->chan_rx = dma_request_chan(dev, "rx");
1247 	if (IS_ERR(dma->chan_rx)) {
1248 		ret = PTR_ERR(dma->chan_rx);
1249 		if (ret != -ENODEV && ret != -EPROBE_DEFER)
1250 			dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
1251 		dma->chan_rx = NULL;
1252 		goto dma_exit;
1253 	}
1254 
1255 	lpi2c_imx->can_use_dma = true;
1256 	lpi2c_imx->dma = dma;
1257 	return 0;
1258 
1259 dma_exit:
1260 	dma_exit(dev, dma);
1261 	return ret;
1262 }
1263 
1264 static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
1265 {
1266 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
1267 		I2C_FUNC_SMBUS_READ_BLOCK_DATA;
1268 }
1269 
1270 static const struct i2c_algorithm lpi2c_imx_algo = {
1271 	.master_xfer	= lpi2c_imx_xfer,
1272 	.functionality	= lpi2c_imx_func,
1273 	.reg_target	= lpi2c_imx_register_target,
1274 	.unreg_target	= lpi2c_imx_unregister_target,
1275 };
1276 
1277 static const struct of_device_id lpi2c_imx_of_match[] = {
1278 	{ .compatible = "fsl,imx7ulp-lpi2c" },
1279 	{ }
1280 };
1281 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
1282 
1283 static int lpi2c_imx_probe(struct platform_device *pdev)
1284 {
1285 	struct lpi2c_imx_struct *lpi2c_imx;
1286 	struct resource *res;
1287 	dma_addr_t phy_addr;
1288 	unsigned int temp;
1289 	int irq, ret;
1290 
1291 	lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
1292 	if (!lpi2c_imx)
1293 		return -ENOMEM;
1294 
1295 	lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1296 	if (IS_ERR(lpi2c_imx->base))
1297 		return PTR_ERR(lpi2c_imx->base);
1298 
1299 	irq = platform_get_irq(pdev, 0);
1300 	if (irq < 0)
1301 		return irq;
1302 
1303 	lpi2c_imx->adapter.owner	= THIS_MODULE;
1304 	lpi2c_imx->adapter.algo		= &lpi2c_imx_algo;
1305 	lpi2c_imx->adapter.dev.parent	= &pdev->dev;
1306 	lpi2c_imx->adapter.dev.of_node	= pdev->dev.of_node;
1307 	strscpy(lpi2c_imx->adapter.name, pdev->name,
1308 		sizeof(lpi2c_imx->adapter.name));
1309 	phy_addr = (dma_addr_t)res->start;
1310 
1311 	ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
1312 	if (ret < 0)
1313 		return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
1314 	lpi2c_imx->num_clks = ret;
1315 
1316 	ret = of_property_read_u32(pdev->dev.of_node,
1317 				   "clock-frequency", &lpi2c_imx->bitrate);
1318 	if (ret)
1319 		lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
1320 
1321 	ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
1322 			       pdev->name, lpi2c_imx);
1323 	if (ret)
1324 		return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
1325 
1326 	i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
1327 	platform_set_drvdata(pdev, lpi2c_imx);
1328 
1329 	ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1330 	if (ret)
1331 		return ret;
1332 
1333 	/*
1334 	 * Lock the parent clock rate to avoid getting parent clock upon
1335 	 * each transfer
1336 	 */
1337 	ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
1338 	if (ret)
1339 		return dev_err_probe(&pdev->dev, ret,
1340 				     "can't lock I2C peripheral clock rate\n");
1341 
1342 	lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
1343 	if (!lpi2c_imx->rate_per)
1344 		return dev_err_probe(&pdev->dev, -EINVAL,
1345 				     "can't get I2C peripheral clock rate\n");
1346 
1347 	pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
1348 	pm_runtime_use_autosuspend(&pdev->dev);
1349 	pm_runtime_get_noresume(&pdev->dev);
1350 	pm_runtime_set_active(&pdev->dev);
1351 	pm_runtime_enable(&pdev->dev);
1352 
1353 	temp = readl(lpi2c_imx->base + LPI2C_PARAM);
1354 	lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
1355 	lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
1356 
1357 	/* Init optional bus recovery function */
1358 	ret = lpi2c_imx_init_recovery_info(lpi2c_imx, pdev);
1359 	/* Give it another chance if pinctrl used is not ready yet */
1360 	if (ret == -EPROBE_DEFER)
1361 		goto rpm_disable;
1362 
1363 	/* Init DMA */
1364 	ret = lpi2c_dma_init(&pdev->dev, phy_addr);
1365 	if (ret) {
1366 		if (ret == -EPROBE_DEFER)
1367 			goto rpm_disable;
1368 		dev_info(&pdev->dev, "use pio mode\n");
1369 	}
1370 
1371 	ret = i2c_add_adapter(&lpi2c_imx->adapter);
1372 	if (ret)
1373 		goto rpm_disable;
1374 
1375 	pm_runtime_mark_last_busy(&pdev->dev);
1376 	pm_runtime_put_autosuspend(&pdev->dev);
1377 
1378 	dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
1379 
1380 	return 0;
1381 
1382 rpm_disable:
1383 	pm_runtime_put(&pdev->dev);
1384 	pm_runtime_disable(&pdev->dev);
1385 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1386 
1387 	return ret;
1388 }
1389 
1390 static void lpi2c_imx_remove(struct platform_device *pdev)
1391 {
1392 	struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
1393 
1394 	i2c_del_adapter(&lpi2c_imx->adapter);
1395 
1396 	pm_runtime_disable(&pdev->dev);
1397 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1398 }
1399 
1400 static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
1401 {
1402 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1403 
1404 	clk_bulk_disable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1405 	pinctrl_pm_select_sleep_state(dev);
1406 
1407 	return 0;
1408 }
1409 
1410 static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
1411 {
1412 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1413 	int ret;
1414 
1415 	pinctrl_pm_select_default_state(dev);
1416 	ret = clk_bulk_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1417 	if (ret) {
1418 		dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
1419 		return ret;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
1426 {
1427 	return pm_runtime_force_suspend(dev);
1428 }
1429 
1430 static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
1431 {
1432 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1433 	int ret;
1434 
1435 	ret = pm_runtime_force_resume(dev);
1436 	if (ret)
1437 		return ret;
1438 
1439 	/*
1440 	 * If the I2C module powers down during system suspend,
1441 	 * the register values will be lost. Therefore, reinitialize
1442 	 * the target when the system resumes.
1443 	 */
1444 	if (lpi2c_imx->target)
1445 		lpi2c_imx_target_init(lpi2c_imx);
1446 
1447 	return 0;
1448 }
1449 
1450 static int lpi2c_suspend(struct device *dev)
1451 {
1452 	/*
1453 	 * Some I2C devices may need the I2C controller to remain active
1454 	 * during resume_noirq() or suspend_noirq(). If the controller is
1455 	 * autosuspended, there is no way to wake it up once runtime PM is
1456 	 * disabled (in suspend_late()).
1457 	 *
1458 	 * During system resume, the I2C controller will be available only
1459 	 * after runtime PM is re-enabled (in resume_early()). However, this
1460 	 * may be too late for some devices.
1461 	 *
1462 	 * Wake up the controller in the suspend() callback while runtime PM
1463 	 * is still enabled. The I2C controller will remain available until
1464 	 * the suspend_noirq() callback (pm_runtime_force_suspend()) is
1465 	 * called. During resume, the I2C controller can be restored by the
1466 	 * resume_noirq() callback (pm_runtime_force_resume()).
1467 	 *
1468 	 * Finally, the resume() callback re-enables autosuspend, ensuring
1469 	 * the I2C controller remains available until the system enters
1470 	 * suspend_noirq() and from resume_noirq().
1471 	 */
1472 	return pm_runtime_resume_and_get(dev);
1473 }
1474 
1475 static int lpi2c_resume(struct device *dev)
1476 {
1477 	pm_runtime_mark_last_busy(dev);
1478 	pm_runtime_put_autosuspend(dev);
1479 
1480 	return 0;
1481 }
1482 
1483 static const struct dev_pm_ops lpi2c_pm_ops = {
1484 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
1485 				      lpi2c_resume_noirq)
1486 	SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
1487 	SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
1488 			   lpi2c_runtime_resume, NULL)
1489 };
1490 
1491 static struct platform_driver lpi2c_imx_driver = {
1492 	.probe = lpi2c_imx_probe,
1493 	.remove = lpi2c_imx_remove,
1494 	.driver = {
1495 		.name = DRIVER_NAME,
1496 		.of_match_table = lpi2c_imx_of_match,
1497 		.pm = &lpi2c_pm_ops,
1498 	},
1499 };
1500 
1501 module_platform_driver(lpi2c_imx_driver);
1502 
1503 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1504 MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
1505 MODULE_LICENSE("GPL");
1506