xref: /linux/drivers/i2c/busses/i2c-imx-lpi2c.c (revision d3b402c5a2d47f51eb0581da1a7b142f82cb10d1)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * This is i.MX low power i2c controller driver.
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
16 #include <linux/i2c.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 
30 #define DRIVER_NAME "imx-lpi2c"
31 
32 #define LPI2C_PARAM	0x04	/* i2c RX/TX FIFO size */
33 #define LPI2C_MCR	0x10	/* i2c contrl register */
34 #define LPI2C_MSR	0x14	/* i2c status register */
35 #define LPI2C_MIER	0x18	/* i2c interrupt enable */
36 #define LPI2C_MDER	0x1C	/* i2c DMA enable */
37 #define LPI2C_MCFGR0	0x20	/* i2c master configuration */
38 #define LPI2C_MCFGR1	0x24	/* i2c master configuration */
39 #define LPI2C_MCFGR2	0x28	/* i2c master configuration */
40 #define LPI2C_MCFGR3	0x2C	/* i2c master configuration */
41 #define LPI2C_MCCR0	0x48	/* i2c master clk configuration */
42 #define LPI2C_MCCR1	0x50	/* i2c master clk configuration */
43 #define LPI2C_MFCR	0x58	/* i2c master FIFO control */
44 #define LPI2C_MFSR	0x5C	/* i2c master FIFO status */
45 #define LPI2C_MTDR	0x60	/* i2c master TX data register */
46 #define LPI2C_MRDR	0x70	/* i2c master RX data register */
47 
48 #define LPI2C_SCR	0x110	/* i2c target control register */
49 #define LPI2C_SSR	0x114	/* i2c target status register */
50 #define LPI2C_SIER	0x118	/* i2c target interrupt enable */
51 #define LPI2C_SDER	0x11C	/* i2c target DMA enable */
52 #define LPI2C_SCFGR0	0x120	/* i2c target configuration */
53 #define LPI2C_SCFGR1	0x124	/* i2c target configuration */
54 #define LPI2C_SCFGR2	0x128	/* i2c target configuration */
55 #define LPI2C_SAMR	0x140	/* i2c target address match */
56 #define LPI2C_SASR	0x150	/* i2c target address status */
57 #define LPI2C_STAR	0x154	/* i2c target transmit ACK */
58 #define LPI2C_STDR	0x160	/* i2c target transmit data */
59 #define LPI2C_SRDR	0x170	/* i2c target receive data */
60 #define LPI2C_SRDROR	0x178	/* i2c target receive data read only */
61 
62 /* i2c command */
63 #define TRAN_DATA	0X00
64 #define RECV_DATA	0X01
65 #define GEN_STOP	0X02
66 #define RECV_DISCARD	0X03
67 #define GEN_START	0X04
68 #define START_NACK	0X05
69 #define START_HIGH	0X06
70 #define START_HIGH_NACK	0X07
71 
72 #define MCR_MEN		BIT(0)
73 #define MCR_RST		BIT(1)
74 #define MCR_DOZEN	BIT(2)
75 #define MCR_DBGEN	BIT(3)
76 #define MCR_RTF		BIT(8)
77 #define MCR_RRF		BIT(9)
78 #define MSR_TDF		BIT(0)
79 #define MSR_RDF		BIT(1)
80 #define MSR_SDF		BIT(9)
81 #define MSR_NDF		BIT(10)
82 #define MSR_ALF		BIT(11)
83 #define MSR_MBF		BIT(24)
84 #define MSR_BBF		BIT(25)
85 #define MIER_TDIE	BIT(0)
86 #define MIER_RDIE	BIT(1)
87 #define MIER_SDIE	BIT(9)
88 #define MIER_NDIE	BIT(10)
89 #define MCFGR1_AUTOSTOP	BIT(8)
90 #define MCFGR1_IGNACK	BIT(9)
91 #define MRDR_RXEMPTY	BIT(14)
92 #define MDER_TDDE	BIT(0)
93 #define MDER_RDDE	BIT(1)
94 #define MSR_RDF_ASSERTED(x) FIELD_GET(MSR_RDF, (x))
95 
96 #define SCR_SEN		BIT(0)
97 #define SCR_RST		BIT(1)
98 #define SCR_FILTEN	BIT(4)
99 #define SCR_RTF		BIT(8)
100 #define SCR_RRF		BIT(9)
101 #define SSR_TDF		BIT(0)
102 #define SSR_RDF		BIT(1)
103 #define SSR_AVF		BIT(2)
104 #define SSR_TAF		BIT(3)
105 #define SSR_RSF		BIT(8)
106 #define SSR_SDF		BIT(9)
107 #define SSR_BEF		BIT(10)
108 #define SSR_FEF		BIT(11)
109 #define SSR_SBF		BIT(24)
110 #define SSR_BBF		BIT(25)
111 #define SSR_CLEAR_BITS	(SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
112 #define SIER_TDIE	BIT(0)
113 #define SIER_RDIE	BIT(1)
114 #define SIER_AVIE	BIT(2)
115 #define SIER_TAIE	BIT(3)
116 #define SIER_RSIE	BIT(8)
117 #define SIER_SDIE	BIT(9)
118 #define SIER_BEIE	BIT(10)
119 #define SIER_FEIE	BIT(11)
120 #define SIER_AM0F	BIT(12)
121 #define SCFGR1_RXSTALL	BIT(1)
122 #define SCFGR1_TXDSTALL	BIT(2)
123 #define SCFGR2_FILTSDA_SHIFT	24
124 #define SCFGR2_FILTSCL_SHIFT	16
125 #define SCFGR2_CLKHOLD(x)	(x)
126 #define SCFGR2_FILTSDA(x)	((x) << SCFGR2_FILTSDA_SHIFT)
127 #define SCFGR2_FILTSCL(x)	((x) << SCFGR2_FILTSCL_SHIFT)
128 #define SASR_READ_REQ	0x1
129 #define SLAVE_INT_FLAG	(SIER_TDIE | SIER_RDIE | SIER_AVIE | \
130 			 SIER_SDIE | SIER_BEIE)
131 
132 #define I2C_CLK_RATIO	2
133 #define CHUNK_DATA	256
134 
135 #define I2C_PM_TIMEOUT		10 /* ms */
136 #define I2C_PM_LONG_TIMEOUT_MS	1000 /* Avoid dead lock caused by big clock prepare lock */
137 #define I2C_DMA_THRESHOLD	8 /* bytes */
138 
139 enum lpi2c_imx_mode {
140 	STANDARD,	/* 100+Kbps */
141 	FAST,		/* 400+Kbps */
142 	FAST_PLUS,	/* 1.0+Mbps */
143 	HS,		/* 3.4+Mbps */
144 	ULTRA_FAST,	/* 5.0+Mbps */
145 };
146 
147 enum lpi2c_imx_pincfg {
148 	TWO_PIN_OD,
149 	TWO_PIN_OO,
150 	TWO_PIN_PP,
151 	FOUR_PIN_PP,
152 };
153 
154 struct imx_lpi2c_hwdata {
155 	bool	need_request_free_irq;		/* Needed by irqsteer */
156 	bool	need_prepare_unprepare_clk;	/* Needed by LPCG */
157 };
158 
159 struct lpi2c_imx_dma {
160 	bool		using_pio_mode;
161 	u8		rx_cmd_buf_len;
162 	u8		*dma_buf;
163 	u16		*rx_cmd_buf;
164 	unsigned int	dma_len;
165 	unsigned int	tx_burst_num;
166 	unsigned int	rx_burst_num;
167 	unsigned long	dma_msg_flag;
168 	resource_size_t	phy_addr;
169 	dma_addr_t	dma_tx_addr;
170 	dma_addr_t	dma_addr;
171 	enum dma_data_direction dma_data_dir;
172 	enum dma_transfer_direction dma_transfer_dir;
173 	struct dma_chan	*chan_tx;
174 	struct dma_chan	*chan_rx;
175 };
176 
177 struct lpi2c_imx_struct {
178 	struct i2c_adapter	adapter;
179 	int			num_clks;
180 	struct clk_bulk_data	*clks;
181 	void __iomem		*base;
182 	__u8			*rx_buf;
183 	__u8			*tx_buf;
184 	struct completion	complete;
185 	unsigned long		rate_per;
186 	unsigned int		msglen;
187 	unsigned int		delivered;
188 	unsigned int		block_data;
189 	unsigned int		bitrate;
190 	unsigned int		txfifosize;
191 	unsigned int		rxfifosize;
192 	enum lpi2c_imx_mode	mode;
193 	struct i2c_bus_recovery_info rinfo;
194 	bool			can_use_dma;
195 	struct lpi2c_imx_dma	*dma;
196 	struct i2c_client	*target;
197 	int			irq;
198 	const struct imx_lpi2c_hwdata *hwdata;
199 };
200 
201 static const struct imx_lpi2c_hwdata imx7ulp_lpi2c_hwdata = {
202 };
203 
204 static const struct imx_lpi2c_hwdata imx8qxp_lpi2c_hwdata = {
205 	.need_request_free_irq		= true,
206 	.need_prepare_unprepare_clk	= true,
207 };
208 
209 static const struct imx_lpi2c_hwdata imx8qm_lpi2c_hwdata = {
210 	.need_request_free_irq		= true,
211 	.need_prepare_unprepare_clk	= true,
212 };
213 
214 #define lpi2c_imx_read_msr_poll_timeout(atomic, val, cond)                    \
215 	(atomic ? readl_poll_timeout_atomic(lpi2c_imx->base + LPI2C_MSR, val, \
216 					    cond, 0, 500000) :                \
217 		  readl_poll_timeout(lpi2c_imx->base + LPI2C_MSR, val, cond,  \
218 				     0, 500000))
219 
lpi2c_imx_intctrl(struct lpi2c_imx_struct * lpi2c_imx,unsigned int enable)220 static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
221 			      unsigned int enable)
222 {
223 	writel(enable, lpi2c_imx->base + LPI2C_MIER);
224 }
225 
lpi2c_imx_bus_busy(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)226 static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
227 {
228 	unsigned int temp;
229 	int err;
230 
231 	err = lpi2c_imx_read_msr_poll_timeout(atomic, temp,
232 					      temp & (MSR_ALF | MSR_BBF | MSR_MBF));
233 
234 	/* check for arbitration lost, clear if set */
235 	if (temp & MSR_ALF) {
236 		writel(temp, lpi2c_imx->base + LPI2C_MSR);
237 		return -EAGAIN;
238 	}
239 
240 	/* check for bus not busy */
241 	if (err) {
242 		dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
243 		if (lpi2c_imx->adapter.bus_recovery_info)
244 			i2c_recover_bus(&lpi2c_imx->adapter);
245 		return -ETIMEDOUT;
246 	}
247 
248 	return 0;
249 }
250 
lpi2c_imx_txfifo_cnt(struct lpi2c_imx_struct * lpi2c_imx)251 static u32 lpi2c_imx_txfifo_cnt(struct lpi2c_imx_struct *lpi2c_imx)
252 {
253 	return readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
254 }
255 
lpi2c_imx_set_mode(struct lpi2c_imx_struct * lpi2c_imx)256 static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
257 {
258 	unsigned int bitrate = lpi2c_imx->bitrate;
259 	enum lpi2c_imx_mode mode;
260 
261 	if (bitrate < I2C_MAX_FAST_MODE_FREQ)
262 		mode = STANDARD;
263 	else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
264 		mode = FAST;
265 	else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
266 		mode = FAST_PLUS;
267 	else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
268 		mode = HS;
269 	else
270 		mode = ULTRA_FAST;
271 
272 	lpi2c_imx->mode = mode;
273 }
274 
lpi2c_imx_start(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs,bool atomic)275 static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
276 			   struct i2c_msg *msgs, bool atomic)
277 {
278 	unsigned int temp;
279 
280 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
281 	temp |= MCR_RRF | MCR_RTF;
282 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
283 	writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
284 
285 	temp = i2c_8bit_addr_from_msg(msgs) | (GEN_START << 8);
286 	writel(temp, lpi2c_imx->base + LPI2C_MTDR);
287 
288 	return lpi2c_imx_bus_busy(lpi2c_imx, atomic);
289 }
290 
lpi2c_imx_stop(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)291 static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
292 {
293 	unsigned int temp;
294 	int err;
295 
296 	writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
297 
298 	err = lpi2c_imx_read_msr_poll_timeout(atomic, temp, temp & MSR_SDF);
299 
300 	if (err) {
301 		dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
302 		if (lpi2c_imx->adapter.bus_recovery_info)
303 			i2c_recover_bus(&lpi2c_imx->adapter);
304 	}
305 }
306 
307 /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
lpi2c_imx_config(struct lpi2c_imx_struct * lpi2c_imx)308 static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
309 {
310 	u8 prescale, filt, sethold, datavd;
311 	unsigned int clk_rate, clk_cycle, clkhi, clklo;
312 	enum lpi2c_imx_pincfg pincfg;
313 	unsigned int temp;
314 
315 	lpi2c_imx_set_mode(lpi2c_imx);
316 
317 	clk_rate = lpi2c_imx->rate_per;
318 
319 	if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
320 		filt = 0;
321 	else
322 		filt = 2;
323 
324 	for (prescale = 0; prescale <= 7; prescale++) {
325 		clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
326 			    - 3 - (filt >> 1);
327 		clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1);
328 		clklo = clk_cycle - clkhi;
329 		if (clklo < 64)
330 			break;
331 	}
332 
333 	if (prescale > 7)
334 		return -EINVAL;
335 
336 	/* set MCFGR1: PINCFG, PRESCALE, IGNACK */
337 	if (lpi2c_imx->mode == ULTRA_FAST)
338 		pincfg = TWO_PIN_OO;
339 	else
340 		pincfg = TWO_PIN_OD;
341 	temp = prescale | pincfg << 24;
342 
343 	if (lpi2c_imx->mode == ULTRA_FAST)
344 		temp |= MCFGR1_IGNACK;
345 
346 	writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
347 
348 	/* set MCFGR2: FILTSDA, FILTSCL */
349 	temp = (filt << 16) | (filt << 24);
350 	writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
351 
352 	/* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
353 	sethold = clkhi;
354 	datavd = clkhi >> 1;
355 	temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
356 
357 	if (lpi2c_imx->mode == HS)
358 		writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
359 	else
360 		writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
361 
362 	return 0;
363 }
364 
lpi2c_imx_master_enable(struct lpi2c_imx_struct * lpi2c_imx)365 static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
366 {
367 	unsigned int temp;
368 	int ret;
369 
370 	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
371 	if (ret < 0)
372 		return ret;
373 
374 	temp = MCR_RST;
375 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
376 	writel(0, lpi2c_imx->base + LPI2C_MCR);
377 
378 	ret = lpi2c_imx_config(lpi2c_imx);
379 	if (ret)
380 		goto rpm_put;
381 
382 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
383 	temp |= MCR_MEN;
384 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
385 
386 	return 0;
387 
388 rpm_put:
389 	pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
390 
391 	return ret;
392 }
393 
lpi2c_imx_master_disable(struct lpi2c_imx_struct * lpi2c_imx)394 static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
395 {
396 	u32 temp;
397 
398 	temp = readl(lpi2c_imx->base + LPI2C_MCR);
399 	temp &= ~MCR_MEN;
400 	writel(temp, lpi2c_imx->base + LPI2C_MCR);
401 
402 	pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
403 
404 	return 0;
405 }
406 
lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct * lpi2c_imx)407 static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
408 {
409 	unsigned long time_left;
410 
411 	time_left = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
412 
413 	return time_left ? 0 : -ETIMEDOUT;
414 }
415 
lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)416 static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
417 {
418 	unsigned int temp;
419 	int err;
420 
421 	err = lpi2c_imx_read_msr_poll_timeout(atomic, temp,
422 					      (temp & MSR_NDF) || !lpi2c_imx_txfifo_cnt(lpi2c_imx));
423 
424 	if (temp & MSR_NDF) {
425 		dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
426 		return -EIO;
427 	}
428 
429 	if (err) {
430 		dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
431 		if (lpi2c_imx->adapter.bus_recovery_info)
432 			i2c_recover_bus(&lpi2c_imx->adapter);
433 		return -ETIMEDOUT;
434 	}
435 
436 	return 0;
437 }
438 
lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct * lpi2c_imx)439 static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
440 {
441 	writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
442 }
443 
lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct * lpi2c_imx)444 static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
445 {
446 	unsigned int temp, remaining;
447 
448 	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
449 
450 	if (remaining > (lpi2c_imx->rxfifosize >> 1))
451 		temp = lpi2c_imx->rxfifosize >> 1;
452 	else
453 		temp = 0;
454 
455 	writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
456 }
457 
lpi2c_imx_write_txfifo(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)458 static bool lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
459 {
460 	unsigned int data, txcnt;
461 
462 	txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
463 
464 	while (txcnt < lpi2c_imx->txfifosize) {
465 		if (lpi2c_imx->delivered == lpi2c_imx->msglen)
466 			break;
467 
468 		data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
469 		writel(data, lpi2c_imx->base + LPI2C_MTDR);
470 		txcnt++;
471 	}
472 
473 	if (lpi2c_imx->delivered < lpi2c_imx->msglen) {
474 		if (!atomic)
475 			lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
476 		return false;
477 	}
478 
479 	if (!atomic)
480 		complete(&lpi2c_imx->complete);
481 
482 	return true;
483 }
484 
lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)485 static bool lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
486 {
487 	unsigned int remaining;
488 	unsigned int temp, data;
489 
490 	do {
491 		data = readl(lpi2c_imx->base + LPI2C_MRDR);
492 		if (data & MRDR_RXEMPTY)
493 			break;
494 
495 		lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
496 	} while (1);
497 
498 	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
499 
500 	if (!remaining) {
501 		if (!atomic)
502 			complete(&lpi2c_imx->complete);
503 		return true;
504 	}
505 
506 	/* not finished, still waiting for rx data */
507 	lpi2c_imx_set_rx_watermark(lpi2c_imx);
508 
509 	/* multiple receive commands */
510 	if (!(lpi2c_imx->delivered & 0xff)) {
511 		temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
512 		temp |= (RECV_DATA << 8);
513 		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
514 	}
515 
516 	if (!atomic)
517 		lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
518 
519 	return false;
520 }
521 
lpi2c_imx_write(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)522 static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
523 			    struct i2c_msg *msgs)
524 {
525 	lpi2c_imx->tx_buf = msgs->buf;
526 	lpi2c_imx_set_tx_watermark(lpi2c_imx);
527 	lpi2c_imx_write_txfifo(lpi2c_imx, false);
528 }
529 
lpi2c_imx_write_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)530 static int lpi2c_imx_write_atomic(struct lpi2c_imx_struct *lpi2c_imx,
531 				  struct i2c_msg *msgs)
532 {
533 	u32 temp;
534 	int err;
535 
536 	lpi2c_imx->tx_buf = msgs->buf;
537 
538 	err = lpi2c_imx_read_msr_poll_timeout(true, temp,
539 					      (temp & MSR_NDF) ||
540 					      lpi2c_imx_write_txfifo(lpi2c_imx, true));
541 
542 	if (temp & MSR_NDF)
543 		return -EIO;
544 
545 	return err;
546 }
547 
lpi2c_SMBus_block_read_length_byte(struct lpi2c_imx_struct * lpi2c_imx)548 static unsigned int lpi2c_SMBus_block_read_length_byte(struct lpi2c_imx_struct *lpi2c_imx)
549 {
550 	unsigned int data;
551 
552 	data = readl(lpi2c_imx->base + LPI2C_MRDR);
553 	lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
554 
555 	return data;
556 }
557 
lpi2c_imx_read_init(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)558 static int lpi2c_imx_read_init(struct lpi2c_imx_struct *lpi2c_imx,
559 			       struct i2c_msg *msgs)
560 {
561 	unsigned int temp, val, block_len;
562 	int ret;
563 
564 	lpi2c_imx->rx_buf = msgs->buf;
565 	lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
566 
567 	lpi2c_imx_set_rx_watermark(lpi2c_imx);
568 
569 	if (!lpi2c_imx->block_data) {
570 		temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
571 		temp |= (RECV_DATA << 8);
572 		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
573 	} else {
574 		/*
575 		 * The LPI2C controller automatically sends a NACK after the last byte of a
576 		 * receive command, unless the next command in MTDR is also a receive command.
577 		 * If MTDR is empty when a receive completes, a NACK is sent by default.
578 		 *
579 		 * To comply with the SMBus block read spec, we start with a 2-byte read:
580 		 * The first byte in RXFIFO is the block length. Once this byte arrives, the
581 		 * controller immediately updates MTDR with the next read command, ensuring
582 		 * continuous ACK instead of NACK.
583 		 *
584 		 * The second byte is the first block data byte. Therefore, the subsequent
585 		 * read command should request (block_len - 1) bytes, since one data byte
586 		 * has already been read.
587 		 */
588 
589 		writel((RECV_DATA << 8) | 0x01, lpi2c_imx->base + LPI2C_MTDR);
590 
591 		ret = readl_poll_timeout(lpi2c_imx->base + LPI2C_MSR, val,
592 					 MSR_RDF_ASSERTED(val), 1, 1000);
593 		if (ret) {
594 			dev_err(&lpi2c_imx->adapter.dev, "SMBus read count failed %d\n", ret);
595 			return ret;
596 		}
597 
598 		/* Read block length byte and confirm this SMBus transfer meets protocol */
599 		block_len = lpi2c_SMBus_block_read_length_byte(lpi2c_imx);
600 		if (block_len == 0 || block_len > I2C_SMBUS_BLOCK_MAX) {
601 			dev_err(&lpi2c_imx->adapter.dev, "Invalid SMBus block read length\n");
602 			return -EPROTO;
603 		}
604 
605 		/*
606 		 * When block_len shows more bytes need to be read, update second read command to
607 		 * keep MTDR non-empty and ensuring continuous ACKs. Only update command register
608 		 * here. All block bytes will be read out at IRQ handler or lpi2c_imx_read_atomic()
609 		 * function.
610 		 */
611 		if (block_len > 1)
612 			writel((RECV_DATA << 8) | (block_len - 2), lpi2c_imx->base + LPI2C_MTDR);
613 
614 		lpi2c_imx->msglen += block_len;
615 		msgs->len += block_len;
616 	}
617 
618 	return 0;
619 }
620 
lpi2c_imx_read_chunk_atomic(struct lpi2c_imx_struct * lpi2c_imx)621 static bool lpi2c_imx_read_chunk_atomic(struct lpi2c_imx_struct *lpi2c_imx)
622 {
623 	u32 rxcnt;
624 
625 	rxcnt = (readl(lpi2c_imx->base + LPI2C_MFSR) >> 16) & 0xFF;
626 	if (!rxcnt)
627 		return false;
628 
629 	if (!lpi2c_imx_read_rxfifo(lpi2c_imx, true))
630 		return false;
631 
632 	return true;
633 }
634 
lpi2c_imx_read_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)635 static int lpi2c_imx_read_atomic(struct lpi2c_imx_struct *lpi2c_imx,
636 				 struct i2c_msg *msgs)
637 {
638 	u32 temp;
639 	int tmo_us;
640 
641 	tmo_us = 1000000;
642 	do {
643 		if (lpi2c_imx_read_chunk_atomic(lpi2c_imx))
644 			return 0;
645 
646 		temp = readl(lpi2c_imx->base + LPI2C_MSR);
647 
648 		if (temp & MSR_NDF)
649 			return -EIO;
650 
651 		udelay(100);
652 		tmo_us -= 100;
653 	} while (tmo_us > 0);
654 
655 	return -ETIMEDOUT;
656 }
657 
is_use_dma(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)658 static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
659 {
660 	if (!lpi2c_imx->can_use_dma)
661 		return false;
662 
663 	/* DMA is not suitable for SMBus block read */
664 	if (msg->flags & I2C_M_RECV_LEN)
665 		return false;
666 
667 	/*
668 	 * A system-wide suspend or resume transition is in progress. LPI2C should use PIO to
669 	 * transfer data to avoid issue caused by no ready DMA HW resource.
670 	 */
671 	if (pm_suspend_in_progress())
672 		return false;
673 
674 	/*
675 	 * When the length of data is less than I2C_DMA_THRESHOLD,
676 	 * cpu mode is used directly to avoid low performance.
677 	 */
678 	return !(msg->len < I2C_DMA_THRESHOLD);
679 }
680 
lpi2c_imx_pio_xfer(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)681 static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
682 			      struct i2c_msg *msg)
683 {
684 	int ret;
685 
686 	reinit_completion(&lpi2c_imx->complete);
687 
688 	if (msg->flags & I2C_M_RD) {
689 		ret = lpi2c_imx_read_init(lpi2c_imx, msg);
690 		if (ret)
691 			return ret;
692 		lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
693 	} else {
694 		lpi2c_imx_write(lpi2c_imx, msg);
695 	}
696 
697 	return lpi2c_imx_pio_msg_complete(lpi2c_imx);
698 }
699 
lpi2c_imx_pio_xfer_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)700 static int lpi2c_imx_pio_xfer_atomic(struct lpi2c_imx_struct *lpi2c_imx,
701 				     struct i2c_msg *msg)
702 {
703 	int ret;
704 
705 	if (msg->flags & I2C_M_RD) {
706 		ret = lpi2c_imx_read_init(lpi2c_imx, msg);
707 		if (ret)
708 			return ret;
709 		return lpi2c_imx_read_atomic(lpi2c_imx, msg);
710 	}
711 
712 	return lpi2c_imx_write_atomic(lpi2c_imx, msg);
713 }
714 
lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct * lpi2c_imx)715 static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
716 {
717 	unsigned long time = 0;
718 
719 	time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
720 
721 	/* Add extra second for scheduler related activities */
722 	time += 1;
723 
724 	/* Double calculated time */
725 	return secs_to_jiffies(time);
726 }
727 
lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct * lpi2c_imx)728 static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
729 {
730 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
731 	u16 rx_remain = dma->dma_len;
732 	int cmd_num;
733 	u16 temp;
734 
735 	/*
736 	 * Calculate the number of rx command words via the DMA TX channel
737 	 * writing into command register based on the i2c msg len, and build
738 	 * the rx command words buffer.
739 	 */
740 	cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
741 	dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
742 	dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
743 
744 	if (!dma->rx_cmd_buf) {
745 		dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
746 		return -ENOMEM;
747 	}
748 
749 	for (int i = 0; i < cmd_num ; i++) {
750 		temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
751 		temp |= (RECV_DATA << 8);
752 		rx_remain -= CHUNK_DATA;
753 		dma->rx_cmd_buf[i] = temp;
754 	}
755 
756 	return 0;
757 }
758 
lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct * lpi2c_imx)759 static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
760 {
761 	unsigned long time_left, time;
762 
763 	time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
764 	time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
765 	if (time_left == 0) {
766 		dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
767 		return -ETIMEDOUT;
768 	}
769 
770 	return 0;
771 }
772 
lpi2c_dma_unmap(struct lpi2c_imx_dma * dma)773 static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
774 {
775 	struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
776 				? dma->chan_rx : dma->chan_tx;
777 
778 	dma_unmap_single(chan->device->dev, dma->dma_addr,
779 			 dma->dma_len, dma->dma_data_dir);
780 
781 	dma->dma_data_dir = DMA_NONE;
782 }
783 
lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma * dma)784 static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
785 {
786 	dmaengine_terminate_sync(dma->chan_tx);
787 	dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
788 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
789 }
790 
lpi2c_cleanup_dma(struct lpi2c_imx_dma * dma)791 static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
792 {
793 	if (dma->dma_data_dir == DMA_FROM_DEVICE)
794 		dmaengine_terminate_sync(dma->chan_rx);
795 	else if (dma->dma_data_dir == DMA_TO_DEVICE)
796 		dmaengine_terminate_sync(dma->chan_tx);
797 
798 	lpi2c_dma_unmap(dma);
799 }
800 
lpi2c_dma_callback(void * data)801 static void lpi2c_dma_callback(void *data)
802 {
803 	struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
804 
805 	complete(&lpi2c_imx->complete);
806 }
807 
lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct * lpi2c_imx)808 static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
809 {
810 	struct dma_async_tx_descriptor *rx_cmd_desc;
811 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
812 	struct dma_chan *txchan = dma->chan_tx;
813 	dma_cookie_t cookie;
814 
815 	dma->dma_tx_addr = dma_map_single(txchan->device->dev,
816 					  dma->rx_cmd_buf, dma->rx_cmd_buf_len,
817 					  DMA_TO_DEVICE);
818 	if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
819 		dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
820 		return -EINVAL;
821 	}
822 
823 	rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
824 						  dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
825 						  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
826 	if (!rx_cmd_desc) {
827 		dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
828 		goto desc_prepare_err_exit;
829 	}
830 
831 	cookie = dmaengine_submit(rx_cmd_desc);
832 	if (dma_submit_error(cookie)) {
833 		dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
834 		goto submit_err_exit;
835 	}
836 
837 	dma_async_issue_pending(txchan);
838 
839 	return 0;
840 
841 desc_prepare_err_exit:
842 	dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
843 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
844 	return -EINVAL;
845 
846 submit_err_exit:
847 	dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
848 			 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
849 	dmaengine_desc_free(rx_cmd_desc);
850 	return -EINVAL;
851 }
852 
lpi2c_dma_submit(struct lpi2c_imx_struct * lpi2c_imx)853 static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
854 {
855 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
856 	struct dma_async_tx_descriptor *desc;
857 	struct dma_chan *chan;
858 	dma_cookie_t cookie;
859 
860 	if (dma->dma_msg_flag & I2C_M_RD) {
861 		chan = dma->chan_rx;
862 		dma->dma_data_dir = DMA_FROM_DEVICE;
863 		dma->dma_transfer_dir = DMA_DEV_TO_MEM;
864 	} else {
865 		chan = dma->chan_tx;
866 		dma->dma_data_dir = DMA_TO_DEVICE;
867 		dma->dma_transfer_dir = DMA_MEM_TO_DEV;
868 	}
869 
870 	dma->dma_addr = dma_map_single(chan->device->dev,
871 				       dma->dma_buf, dma->dma_len, dma->dma_data_dir);
872 	if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
873 		dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
874 		return -EINVAL;
875 	}
876 
877 	desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
878 					   dma->dma_len, dma->dma_transfer_dir,
879 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
880 	if (!desc) {
881 		dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
882 		goto desc_prepare_err_exit;
883 	}
884 
885 	reinit_completion(&lpi2c_imx->complete);
886 	desc->callback = lpi2c_dma_callback;
887 	desc->callback_param = lpi2c_imx;
888 
889 	cookie = dmaengine_submit(desc);
890 	if (dma_submit_error(cookie)) {
891 		dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
892 		goto submit_err_exit;
893 	}
894 
895 	/* Can't switch to PIO mode when DMA have started transfer */
896 	dma->using_pio_mode = false;
897 
898 	dma_async_issue_pending(chan);
899 
900 	return 0;
901 
902 desc_prepare_err_exit:
903 	lpi2c_dma_unmap(dma);
904 	return -EINVAL;
905 
906 submit_err_exit:
907 	lpi2c_dma_unmap(dma);
908 	dmaengine_desc_free(desc);
909 	return -EINVAL;
910 }
911 
lpi2c_imx_find_max_burst_num(unsigned int fifosize,unsigned int len)912 static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
913 {
914 	unsigned int i;
915 
916 	for (i = fifosize / 2; i > 0; i--)
917 		if (!(len % i))
918 			break;
919 
920 	return i;
921 }
922 
923 /*
924  * For a highest DMA efficiency, tx/rx burst number should be calculated according
925  * to the FIFO depth.
926  */
lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct * lpi2c_imx)927 static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
928 {
929 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
930 	unsigned int cmd_num;
931 
932 	if (dma->dma_msg_flag & I2C_M_RD) {
933 		/*
934 		 * One RX cmd word can trigger DMA receive no more than 256 bytes.
935 		 * The number of RX cmd words should be calculated based on the data
936 		 * length.
937 		 */
938 		cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
939 		dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
940 								 cmd_num);
941 		dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
942 								 dma->dma_len);
943 	} else {
944 		dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
945 								 dma->dma_len);
946 	}
947 }
948 
lpi2c_dma_config(struct lpi2c_imx_struct * lpi2c_imx)949 static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
950 {
951 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
952 	struct dma_slave_config rx = {}, tx = {};
953 	int ret;
954 
955 	lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
956 
957 	if (dma->dma_msg_flag & I2C_M_RD) {
958 		tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
959 		tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
960 		tx.dst_maxburst = dma->tx_burst_num;
961 		tx.direction = DMA_MEM_TO_DEV;
962 		ret = dmaengine_slave_config(dma->chan_tx, &tx);
963 		if (ret < 0)
964 			return ret;
965 
966 		rx.src_addr = dma->phy_addr + LPI2C_MRDR;
967 		rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
968 		rx.src_maxburst = dma->rx_burst_num;
969 		rx.direction = DMA_DEV_TO_MEM;
970 		ret = dmaengine_slave_config(dma->chan_rx, &rx);
971 		if (ret < 0)
972 			return ret;
973 	} else {
974 		tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
975 		tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
976 		tx.dst_maxburst = dma->tx_burst_num;
977 		tx.direction = DMA_MEM_TO_DEV;
978 		ret = dmaengine_slave_config(dma->chan_tx, &tx);
979 		if (ret < 0)
980 			return ret;
981 	}
982 
983 	return 0;
984 }
985 
lpi2c_dma_enable(struct lpi2c_imx_struct * lpi2c_imx)986 static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
987 {
988 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
989 	/*
990 	 * TX interrupt will be triggered when the number of words in
991 	 * the transmit FIFO is equal or less than TX watermark.
992 	 * RX interrupt will be triggered when the number of words in
993 	 * the receive FIFO is greater than RX watermark.
994 	 * In order to trigger the DMA interrupt, TX watermark should be
995 	 * set equal to the DMA TX burst number but RX watermark should
996 	 * be set less than the DMA RX burst number.
997 	 */
998 	if (dma->dma_msg_flag & I2C_M_RD) {
999 		/* Set I2C TX/RX watermark */
1000 		writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
1001 		       lpi2c_imx->base + LPI2C_MFCR);
1002 		/* Enable I2C DMA TX/RX function */
1003 		writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
1004 	} else {
1005 		/* Set I2C TX watermark */
1006 		writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
1007 		/* Enable I2C DMA TX function */
1008 		writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
1009 	}
1010 
1011 	/* Enable NACK detected */
1012 	lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
1013 };
1014 
1015 /*
1016  * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
1017  * data word into TXFIFO, but in RX DMA mode it is different.
1018  *
1019  * The LPI2C MTDR register is a command data and transmit data register.
1020  * Bits 8-10 are the command data field and Bits 0-7 are the transmit
1021  * data field. When the LPI2C master needs to read data, the number of
1022  * bytes to read should be set in the command field and RECV_DATA should
1023  * be set into the command data field to receive (DATA[7:0] + 1) bytes.
1024  * The recv data command word is made of RECV_DATA in the command data
1025  * field and the number of bytes to read in transmit data field. When the
1026  * length of data to be read exceeds 256 bytes, recv data command word
1027  * needs to be written to TXFIFO multiple times.
1028  *
1029  * So when in RX DMA mode, the TX channel also must to be configured to
1030  * send RX command words and the RX command word must be set in advance
1031  * before transmitting.
1032  */
lpi2c_imx_dma_xfer(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)1033 static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
1034 			      struct i2c_msg *msg)
1035 {
1036 	struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
1037 	int ret;
1038 
1039 	/* When DMA mode fails before transferring, CPU mode can be used. */
1040 	dma->using_pio_mode = true;
1041 
1042 	dma->dma_len = msg->len;
1043 	dma->dma_msg_flag = msg->flags;
1044 	dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
1045 	if (!dma->dma_buf)
1046 		return -ENOMEM;
1047 
1048 	ret = lpi2c_dma_config(lpi2c_imx);
1049 	if (ret) {
1050 		dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
1051 		goto disable_dma;
1052 	}
1053 
1054 	lpi2c_dma_enable(lpi2c_imx);
1055 
1056 	ret = lpi2c_dma_submit(lpi2c_imx);
1057 	if (ret) {
1058 		dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
1059 		goto disable_dma;
1060 	}
1061 
1062 	if (dma->dma_msg_flag & I2C_M_RD) {
1063 		ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
1064 		if (ret)
1065 			goto disable_cleanup_data_dma;
1066 
1067 		ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
1068 		if (ret)
1069 			goto disable_cleanup_data_dma;
1070 	}
1071 
1072 	ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
1073 	if (ret)
1074 		goto disable_cleanup_all_dma;
1075 
1076 	/* When encountering NACK in transfer, clean up all DMA transfers */
1077 	if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
1078 		ret = -EIO;
1079 		goto disable_cleanup_all_dma;
1080 	}
1081 
1082 	if (dma->dma_msg_flag & I2C_M_RD)
1083 		dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
1084 				 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
1085 	lpi2c_dma_unmap(dma);
1086 
1087 	goto disable_dma;
1088 
1089 disable_cleanup_all_dma:
1090 	if (dma->dma_msg_flag & I2C_M_RD)
1091 		lpi2c_cleanup_rx_cmd_dma(dma);
1092 disable_cleanup_data_dma:
1093 	lpi2c_cleanup_dma(dma);
1094 disable_dma:
1095 	/* Disable I2C DMA function */
1096 	writel(0, lpi2c_imx->base + LPI2C_MDER);
1097 
1098 	if (dma->dma_msg_flag & I2C_M_RD)
1099 		kfree(dma->rx_cmd_buf);
1100 
1101 	if (ret)
1102 		i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
1103 	else
1104 		i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
1105 
1106 	return ret;
1107 }
1108 
lpi2c_imx_xfer_common(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num,bool atomic)1109 static int lpi2c_imx_xfer_common(struct i2c_adapter *adapter,
1110 				 struct i2c_msg *msgs, int num, bool atomic)
1111 {
1112 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
1113 	unsigned int temp;
1114 	int i, result;
1115 
1116 	result = lpi2c_imx_master_enable(lpi2c_imx);
1117 	if (result)
1118 		return result;
1119 
1120 	for (i = 0; i < num; i++) {
1121 		result = lpi2c_imx_start(lpi2c_imx, &msgs[i], atomic);
1122 		if (result)
1123 			goto disable;
1124 
1125 		/* quick smbus */
1126 		if (num == 1 && msgs[0].len == 0)
1127 			goto stop;
1128 
1129 		lpi2c_imx->rx_buf = NULL;
1130 		lpi2c_imx->tx_buf = NULL;
1131 		lpi2c_imx->delivered = 0;
1132 		lpi2c_imx->msglen = msgs[i].len;
1133 
1134 		if (atomic) {
1135 			result = lpi2c_imx_pio_xfer_atomic(lpi2c_imx, &msgs[i]);
1136 		} else {
1137 			init_completion(&lpi2c_imx->complete);
1138 
1139 			if (is_use_dma(lpi2c_imx, &msgs[i])) {
1140 				result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
1141 				if (result && lpi2c_imx->dma->using_pio_mode)
1142 					result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
1143 			} else {
1144 				result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
1145 			}
1146 		}
1147 
1148 		if (result)
1149 			goto stop;
1150 
1151 		if (!(msgs[i].flags & I2C_M_RD)) {
1152 			result = lpi2c_imx_txfifo_empty(lpi2c_imx, atomic);
1153 			if (result)
1154 				goto stop;
1155 		}
1156 	}
1157 
1158 stop:
1159 	lpi2c_imx_stop(lpi2c_imx, atomic);
1160 
1161 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
1162 	if ((temp & MSR_NDF) && !result)
1163 		result = -EIO;
1164 
1165 disable:
1166 	lpi2c_imx_master_disable(lpi2c_imx);
1167 
1168 	dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
1169 		(result < 0) ? "error" : "success msg",
1170 		(result < 0) ? result : num);
1171 
1172 	return (result < 0) ? result : num;
1173 }
1174 
lpi2c_imx_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)1175 static int lpi2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
1176 {
1177 	return lpi2c_imx_xfer_common(adapter, msgs, num, false);
1178 }
1179 
lpi2c_imx_xfer_atomic(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)1180 static int lpi2c_imx_xfer_atomic(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
1181 {
1182 	return lpi2c_imx_xfer_common(adapter, msgs, num, true);
1183 }
1184 
lpi2c_imx_target_isr(struct lpi2c_imx_struct * lpi2c_imx,u32 ssr,u32 sier_filter)1185 static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
1186 					u32 ssr, u32 sier_filter)
1187 {
1188 	u8 value;
1189 	u32 sasr;
1190 
1191 	/* Arbitration lost */
1192 	if (sier_filter & SSR_BEF) {
1193 		writel(0, lpi2c_imx->base + LPI2C_SIER);
1194 		return IRQ_HANDLED;
1195 	}
1196 
1197 	/* Address detected */
1198 	if (sier_filter & SSR_AVF) {
1199 		sasr = readl(lpi2c_imx->base + LPI2C_SASR);
1200 		if (SASR_READ_REQ & sasr) {
1201 			/* Read request */
1202 			i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
1203 			writel(value, lpi2c_imx->base + LPI2C_STDR);
1204 			goto ret;
1205 		} else {
1206 			/* Write request */
1207 			i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
1208 		}
1209 	}
1210 
1211 	if (sier_filter & SSR_SDF)
1212 		/* STOP */
1213 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
1214 
1215 	if (sier_filter & SSR_TDF) {
1216 		/* Target send data */
1217 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
1218 		writel(value, lpi2c_imx->base + LPI2C_STDR);
1219 	}
1220 
1221 	if (sier_filter & SSR_RDF) {
1222 		/* Target receive data */
1223 		value = readl(lpi2c_imx->base + LPI2C_SRDR);
1224 		i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
1225 	}
1226 
1227 ret:
1228 	/* Clear SSR */
1229 	writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
1230 	return IRQ_HANDLED;
1231 }
1232 
lpi2c_imx_master_isr(struct lpi2c_imx_struct * lpi2c_imx)1233 static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
1234 {
1235 	unsigned int enabled;
1236 	unsigned int temp;
1237 
1238 	enabled = readl(lpi2c_imx->base + LPI2C_MIER);
1239 
1240 	lpi2c_imx_intctrl(lpi2c_imx, 0);
1241 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
1242 	temp &= enabled;
1243 
1244 	if (temp & MSR_NDF)
1245 		complete(&lpi2c_imx->complete);
1246 	else if (temp & MSR_RDF)
1247 		lpi2c_imx_read_rxfifo(lpi2c_imx, false);
1248 	else if (temp & MSR_TDF)
1249 		lpi2c_imx_write_txfifo(lpi2c_imx, false);
1250 
1251 	return IRQ_HANDLED;
1252 }
1253 
lpi2c_imx_isr(int irq,void * dev_id)1254 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
1255 {
1256 	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
1257 
1258 	if (lpi2c_imx->target) {
1259 		u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
1260 		u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
1261 		u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
1262 
1263 		/*
1264 		 * The target is enabled and an interrupt has been triggered.
1265 		 * Enter the target's irq handler.
1266 		 */
1267 		if ((scr & SCR_SEN) && sier_filter)
1268 			return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
1269 	}
1270 
1271 	/*
1272 	 * Otherwise the interrupt has been triggered by the master.
1273 	 * Enter the master's irq handler.
1274 	 */
1275 	return lpi2c_imx_master_isr(lpi2c_imx);
1276 }
1277 
lpi2c_imx_target_init(struct lpi2c_imx_struct * lpi2c_imx)1278 static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
1279 {
1280 	u32 temp;
1281 
1282 	/* reset target module */
1283 	writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1284 	writel(0, lpi2c_imx->base + LPI2C_SCR);
1285 
1286 	/* Set target address */
1287 	writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
1288 
1289 	writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
1290 
1291 	/*
1292 	 * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
1293 	 *
1294 	 * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
1295 	 * set to the same value and should be set >= 50ns.
1296 	 *
1297 	 * CLKHOLD is only used when clock stretching is enabled, but it will
1298 	 * extend the clock stretching to ensure there is an additional delay
1299 	 * between the target driving SDA and the target releasing the SCL pin.
1300 	 *
1301 	 * CLKHOLD setting is crucial for lpi2c target. When master read data
1302 	 * from target, if there is a delay caused by cpu idle, excessive load,
1303 	 * or other delays between two bytes in one message transmission, it
1304 	 * will cause a short interval time between the driving SDA signal and
1305 	 * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
1306 	 * signal resulting in an arbitration failure. This issue can be avoided
1307 	 * by setting CLKHOLD.
1308 	 *
1309 	 * In order to ensure lpi2c function normally when the lpi2c speed is as
1310 	 * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
1311 	 * higher clock frequency like 400kHz and 1MHz.
1312 	 */
1313 	temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
1314 	writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
1315 
1316 	/*
1317 	 * Enable module:
1318 	 * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
1319 	 * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
1320 	 * and CLKHOLD.
1321 	 */
1322 	writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
1323 
1324 	/* Enable interrupt from i2c module */
1325 	writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
1326 }
1327 
lpi2c_imx_register_target(struct i2c_client * client)1328 static int lpi2c_imx_register_target(struct i2c_client *client)
1329 {
1330 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1331 	int ret;
1332 
1333 	if (lpi2c_imx->target)
1334 		return -EBUSY;
1335 
1336 	lpi2c_imx->target = client;
1337 
1338 	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
1339 	if (ret < 0) {
1340 		dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
1341 		return ret;
1342 	}
1343 
1344 	lpi2c_imx_target_init(lpi2c_imx);
1345 
1346 	return 0;
1347 }
1348 
lpi2c_imx_unregister_target(struct i2c_client * client)1349 static int lpi2c_imx_unregister_target(struct i2c_client *client)
1350 {
1351 	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1352 	int ret;
1353 
1354 	if (!lpi2c_imx->target)
1355 		return -EINVAL;
1356 
1357 	/* Reset target address. */
1358 	writel(0, lpi2c_imx->base + LPI2C_SAMR);
1359 
1360 	writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1361 	writel(0, lpi2c_imx->base + LPI2C_SCR);
1362 
1363 	lpi2c_imx->target = NULL;
1364 
1365 	ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
1366 	if (ret < 0)
1367 		dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
1368 
1369 	return ret;
1370 }
1371 
lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct * lpi2c_imx,struct platform_device * pdev)1372 static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
1373 				  struct platform_device *pdev)
1374 {
1375 	struct i2c_bus_recovery_info *bri = &lpi2c_imx->rinfo;
1376 
1377 	bri->pinctrl = devm_pinctrl_get(&pdev->dev);
1378 	if (IS_ERR(bri->pinctrl))
1379 		return PTR_ERR(bri->pinctrl);
1380 
1381 	lpi2c_imx->adapter.bus_recovery_info = bri;
1382 
1383 	return 0;
1384 }
1385 
dma_exit(struct device * dev,struct lpi2c_imx_dma * dma)1386 static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
1387 {
1388 	if (dma->chan_rx)
1389 		dma_release_channel(dma->chan_rx);
1390 
1391 	if (dma->chan_tx)
1392 		dma_release_channel(dma->chan_tx);
1393 
1394 	devm_kfree(dev, dma);
1395 }
1396 
lpi2c_dma_init(struct device * dev,dma_addr_t phy_addr)1397 static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
1398 {
1399 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1400 	struct lpi2c_imx_dma *dma;
1401 	int ret;
1402 
1403 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
1404 	if (!dma)
1405 		return -ENOMEM;
1406 
1407 	dma->phy_addr = phy_addr;
1408 
1409 	/* Prepare for TX DMA: */
1410 	dma->chan_tx = dma_request_chan(dev, "tx");
1411 	if (IS_ERR(dma->chan_tx)) {
1412 		ret = PTR_ERR(dma->chan_tx);
1413 		if (ret != -ENODEV && ret != -EPROBE_DEFER)
1414 			dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
1415 		dma->chan_tx = NULL;
1416 		goto dma_exit;
1417 	}
1418 
1419 	/* Prepare for RX DMA: */
1420 	dma->chan_rx = dma_request_chan(dev, "rx");
1421 	if (IS_ERR(dma->chan_rx)) {
1422 		ret = PTR_ERR(dma->chan_rx);
1423 		if (ret != -ENODEV && ret != -EPROBE_DEFER)
1424 			dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
1425 		dma->chan_rx = NULL;
1426 		goto dma_exit;
1427 	}
1428 
1429 	lpi2c_imx->can_use_dma = true;
1430 	lpi2c_imx->dma = dma;
1431 	return 0;
1432 
1433 dma_exit:
1434 	dma_exit(dev, dma);
1435 	return ret;
1436 }
1437 
lpi2c_imx_func(struct i2c_adapter * adapter)1438 static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
1439 {
1440 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
1441 		I2C_FUNC_SMBUS_READ_BLOCK_DATA;
1442 }
1443 
1444 static const struct i2c_algorithm lpi2c_imx_algo = {
1445 	.xfer = lpi2c_imx_xfer,
1446 	.xfer_atomic = lpi2c_imx_xfer_atomic,
1447 	.functionality = lpi2c_imx_func,
1448 	.reg_target = lpi2c_imx_register_target,
1449 	.unreg_target = lpi2c_imx_unregister_target,
1450 };
1451 
1452 static const struct of_device_id lpi2c_imx_of_match[] = {
1453 	{ .compatible = "fsl,imx7ulp-lpi2c", .data = &imx7ulp_lpi2c_hwdata,},
1454 	{ .compatible = "fsl,imx8qxp-lpi2c", .data = &imx8qxp_lpi2c_hwdata,},
1455 	{ .compatible = "fsl,imx8qm-lpi2c", .data = &imx8qm_lpi2c_hwdata,},
1456 	{ }
1457 };
1458 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
1459 
lpi2c_imx_probe(struct platform_device * pdev)1460 static int lpi2c_imx_probe(struct platform_device *pdev)
1461 {
1462 	struct lpi2c_imx_struct *lpi2c_imx;
1463 	struct resource *res;
1464 	dma_addr_t phy_addr;
1465 	unsigned int temp;
1466 	int ret;
1467 
1468 	lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
1469 	if (!lpi2c_imx)
1470 		return -ENOMEM;
1471 
1472 	lpi2c_imx->hwdata = of_device_get_match_data(&pdev->dev);
1473 	if (!lpi2c_imx->hwdata)
1474 		return -ENODEV;
1475 
1476 	lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1477 	if (IS_ERR(lpi2c_imx->base))
1478 		return PTR_ERR(lpi2c_imx->base);
1479 
1480 	lpi2c_imx->irq = platform_get_irq(pdev, 0);
1481 	if (lpi2c_imx->irq < 0)
1482 		return lpi2c_imx->irq;
1483 
1484 	lpi2c_imx->adapter.owner	= THIS_MODULE;
1485 	lpi2c_imx->adapter.algo		= &lpi2c_imx_algo;
1486 	lpi2c_imx->adapter.dev.parent	= &pdev->dev;
1487 	lpi2c_imx->adapter.dev.of_node	= pdev->dev.of_node;
1488 	strscpy(lpi2c_imx->adapter.name, pdev->name,
1489 		sizeof(lpi2c_imx->adapter.name));
1490 	phy_addr = (dma_addr_t)res->start;
1491 
1492 	ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
1493 	if (ret < 0)
1494 		return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
1495 	lpi2c_imx->num_clks = ret;
1496 
1497 	ret = of_property_read_u32(pdev->dev.of_node,
1498 				   "clock-frequency", &lpi2c_imx->bitrate);
1499 	if (ret)
1500 		lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
1501 
1502 	ret = devm_request_irq(&pdev->dev, lpi2c_imx->irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
1503 			       pdev->name, lpi2c_imx);
1504 	if (ret)
1505 		return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", lpi2c_imx->irq);
1506 
1507 	i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
1508 	platform_set_drvdata(pdev, lpi2c_imx);
1509 
1510 	ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1511 	if (ret)
1512 		return ret;
1513 
1514 	/*
1515 	 * Lock the parent clock rate to avoid getting parent clock upon
1516 	 * each transfer
1517 	 */
1518 	ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
1519 	if (ret)
1520 		return dev_err_probe(&pdev->dev, ret,
1521 				     "can't lock I2C peripheral clock rate\n");
1522 
1523 	lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
1524 	if (!lpi2c_imx->rate_per)
1525 		return dev_err_probe(&pdev->dev, -EINVAL,
1526 				     "can't get I2C peripheral clock rate\n");
1527 
1528 	if (lpi2c_imx->hwdata->need_prepare_unprepare_clk)
1529 		pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_LONG_TIMEOUT_MS);
1530 	else
1531 		pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
1532 
1533 	pm_runtime_use_autosuspend(&pdev->dev);
1534 	pm_runtime_get_noresume(&pdev->dev);
1535 	pm_runtime_set_active(&pdev->dev);
1536 	pm_runtime_enable(&pdev->dev);
1537 
1538 	temp = readl(lpi2c_imx->base + LPI2C_PARAM);
1539 	lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
1540 	lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
1541 
1542 	/* Init optional bus recovery function */
1543 	ret = lpi2c_imx_init_recovery_info(lpi2c_imx, pdev);
1544 	/* Give it another chance if pinctrl used is not ready yet */
1545 	if (ret == -EPROBE_DEFER)
1546 		goto rpm_disable;
1547 
1548 	/* Init DMA */
1549 	ret = lpi2c_dma_init(&pdev->dev, phy_addr);
1550 	if (ret) {
1551 		if (ret == -EPROBE_DEFER)
1552 			goto rpm_disable;
1553 		dev_info(&pdev->dev, "use pio mode\n");
1554 	}
1555 
1556 	ret = i2c_add_adapter(&lpi2c_imx->adapter);
1557 	if (ret)
1558 		goto rpm_disable;
1559 
1560 	pm_runtime_put_autosuspend(&pdev->dev);
1561 
1562 	dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
1563 
1564 	return 0;
1565 
1566 rpm_disable:
1567 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1568 	pm_runtime_put_sync(&pdev->dev);
1569 	pm_runtime_disable(&pdev->dev);
1570 
1571 	return ret;
1572 }
1573 
lpi2c_imx_remove(struct platform_device * pdev)1574 static void lpi2c_imx_remove(struct platform_device *pdev)
1575 {
1576 	struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
1577 
1578 	i2c_del_adapter(&lpi2c_imx->adapter);
1579 
1580 	pm_runtime_disable(&pdev->dev);
1581 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1582 }
1583 
lpi2c_runtime_suspend(struct device * dev)1584 static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
1585 {
1586 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1587 	bool need_prepare_unprepare_clk = lpi2c_imx->hwdata->need_prepare_unprepare_clk;
1588 	bool need_request_free_irq = lpi2c_imx->hwdata->need_request_free_irq;
1589 
1590 	if (need_request_free_irq)
1591 		devm_free_irq(dev, lpi2c_imx->irq, lpi2c_imx);
1592 
1593 	if (need_prepare_unprepare_clk)
1594 		clk_bulk_disable_unprepare(lpi2c_imx->num_clks, lpi2c_imx->clks);
1595 	else
1596 		clk_bulk_disable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1597 	pinctrl_pm_select_sleep_state(dev);
1598 
1599 	return 0;
1600 }
1601 
lpi2c_runtime_resume(struct device * dev)1602 static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
1603 {
1604 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1605 	bool need_prepare_unprepare_clk = lpi2c_imx->hwdata->need_prepare_unprepare_clk;
1606 	bool need_request_free_irq = lpi2c_imx->hwdata->need_request_free_irq;
1607 	int ret;
1608 
1609 	pinctrl_pm_select_default_state(dev);
1610 	if (need_prepare_unprepare_clk) {
1611 		ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1612 		if (ret) {
1613 			dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
1614 			return ret;
1615 		}
1616 	} else {
1617 		ret = clk_bulk_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1618 		if (ret) {
1619 			dev_err(dev, "failed to enable clock %d\n", ret);
1620 			return ret;
1621 		}
1622 	}
1623 
1624 	if (need_request_free_irq) {
1625 		ret = devm_request_irq(dev, lpi2c_imx->irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
1626 				       dev_name(dev), lpi2c_imx);
1627 		if (ret) {
1628 			dev_err(dev, "can't claim irq %d\n", lpi2c_imx->irq);
1629 			return ret;
1630 		}
1631 	}
1632 
1633 	return 0;
1634 }
1635 
lpi2c_suspend_noirq(struct device * dev)1636 static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
1637 {
1638 	return pm_runtime_force_suspend(dev);
1639 }
1640 
lpi2c_resume_noirq(struct device * dev)1641 static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
1642 {
1643 	struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1644 	int ret;
1645 
1646 	ret = pm_runtime_force_resume(dev);
1647 	if (ret)
1648 		return ret;
1649 
1650 	/*
1651 	 * If the I2C module powers down during system suspend,
1652 	 * the register values will be lost. Therefore, reinitialize
1653 	 * the target when the system resumes.
1654 	 */
1655 	if (lpi2c_imx->target)
1656 		lpi2c_imx_target_init(lpi2c_imx);
1657 
1658 	return 0;
1659 }
1660 
lpi2c_suspend(struct device * dev)1661 static int lpi2c_suspend(struct device *dev)
1662 {
1663 	/*
1664 	 * Some I2C devices may need the I2C controller to remain active
1665 	 * during resume_noirq() or suspend_noirq(). If the controller is
1666 	 * autosuspended, there is no way to wake it up once runtime PM is
1667 	 * disabled (in suspend_late()).
1668 	 *
1669 	 * During system resume, the I2C controller will be available only
1670 	 * after runtime PM is re-enabled (in resume_early()). However, this
1671 	 * may be too late for some devices.
1672 	 *
1673 	 * Wake up the controller in the suspend() callback while runtime PM
1674 	 * is still enabled. The I2C controller will remain available until
1675 	 * the suspend_noirq() callback (pm_runtime_force_suspend()) is
1676 	 * called. During resume, the I2C controller can be restored by the
1677 	 * resume_noirq() callback (pm_runtime_force_resume()).
1678 	 *
1679 	 * Finally, the resume() callback re-enables autosuspend, ensuring
1680 	 * the I2C controller remains available until the system enters
1681 	 * suspend_noirq() and from resume_noirq().
1682 	 */
1683 	return pm_runtime_resume_and_get(dev);
1684 }
1685 
lpi2c_resume(struct device * dev)1686 static int lpi2c_resume(struct device *dev)
1687 {
1688 	pm_runtime_put_autosuspend(dev);
1689 
1690 	return 0;
1691 }
1692 
1693 static const struct dev_pm_ops lpi2c_pm_ops = {
1694 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
1695 				      lpi2c_resume_noirq)
1696 	SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
1697 	SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
1698 			   lpi2c_runtime_resume, NULL)
1699 };
1700 
1701 static struct platform_driver lpi2c_imx_driver = {
1702 	.probe = lpi2c_imx_probe,
1703 	.remove = lpi2c_imx_remove,
1704 	.driver = {
1705 		.name = DRIVER_NAME,
1706 		.of_match_table = lpi2c_imx_of_match,
1707 		.pm = &lpi2c_pm_ops,
1708 	},
1709 };
1710 
1711 module_platform_driver(lpi2c_imx_driver);
1712 
1713 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1714 MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
1715 MODULE_LICENSE("GPL");
1716