1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * This is i.MX low power i2c controller driver.
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 */
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/i2c.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28
29 #define DRIVER_NAME "imx-lpi2c"
30
31 #define LPI2C_PARAM 0x04 /* i2c RX/TX FIFO size */
32 #define LPI2C_MCR 0x10 /* i2c contrl register */
33 #define LPI2C_MSR 0x14 /* i2c status register */
34 #define LPI2C_MIER 0x18 /* i2c interrupt enable */
35 #define LPI2C_MDER 0x1C /* i2c DMA enable */
36 #define LPI2C_MCFGR0 0x20 /* i2c master configuration */
37 #define LPI2C_MCFGR1 0x24 /* i2c master configuration */
38 #define LPI2C_MCFGR2 0x28 /* i2c master configuration */
39 #define LPI2C_MCFGR3 0x2C /* i2c master configuration */
40 #define LPI2C_MCCR0 0x48 /* i2c master clk configuration */
41 #define LPI2C_MCCR1 0x50 /* i2c master clk configuration */
42 #define LPI2C_MFCR 0x58 /* i2c master FIFO control */
43 #define LPI2C_MFSR 0x5C /* i2c master FIFO status */
44 #define LPI2C_MTDR 0x60 /* i2c master TX data register */
45 #define LPI2C_MRDR 0x70 /* i2c master RX data register */
46
47 #define LPI2C_SCR 0x110 /* i2c target control register */
48 #define LPI2C_SSR 0x114 /* i2c target status register */
49 #define LPI2C_SIER 0x118 /* i2c target interrupt enable */
50 #define LPI2C_SDER 0x11C /* i2c target DMA enable */
51 #define LPI2C_SCFGR0 0x120 /* i2c target configuration */
52 #define LPI2C_SCFGR1 0x124 /* i2c target configuration */
53 #define LPI2C_SCFGR2 0x128 /* i2c target configuration */
54 #define LPI2C_SAMR 0x140 /* i2c target address match */
55 #define LPI2C_SASR 0x150 /* i2c target address status */
56 #define LPI2C_STAR 0x154 /* i2c target transmit ACK */
57 #define LPI2C_STDR 0x160 /* i2c target transmit data */
58 #define LPI2C_SRDR 0x170 /* i2c target receive data */
59 #define LPI2C_SRDROR 0x178 /* i2c target receive data read only */
60
61 /* i2c command */
62 #define TRAN_DATA 0X00
63 #define RECV_DATA 0X01
64 #define GEN_STOP 0X02
65 #define RECV_DISCARD 0X03
66 #define GEN_START 0X04
67 #define START_NACK 0X05
68 #define START_HIGH 0X06
69 #define START_HIGH_NACK 0X07
70
71 #define MCR_MEN BIT(0)
72 #define MCR_RST BIT(1)
73 #define MCR_DOZEN BIT(2)
74 #define MCR_DBGEN BIT(3)
75 #define MCR_RTF BIT(8)
76 #define MCR_RRF BIT(9)
77 #define MSR_TDF BIT(0)
78 #define MSR_RDF BIT(1)
79 #define MSR_SDF BIT(9)
80 #define MSR_NDF BIT(10)
81 #define MSR_ALF BIT(11)
82 #define MSR_MBF BIT(24)
83 #define MSR_BBF BIT(25)
84 #define MIER_TDIE BIT(0)
85 #define MIER_RDIE BIT(1)
86 #define MIER_SDIE BIT(9)
87 #define MIER_NDIE BIT(10)
88 #define MCFGR1_AUTOSTOP BIT(8)
89 #define MCFGR1_IGNACK BIT(9)
90 #define MRDR_RXEMPTY BIT(14)
91 #define MDER_TDDE BIT(0)
92 #define MDER_RDDE BIT(1)
93
94 #define SCR_SEN BIT(0)
95 #define SCR_RST BIT(1)
96 #define SCR_FILTEN BIT(4)
97 #define SCR_RTF BIT(8)
98 #define SCR_RRF BIT(9)
99 #define SSR_TDF BIT(0)
100 #define SSR_RDF BIT(1)
101 #define SSR_AVF BIT(2)
102 #define SSR_TAF BIT(3)
103 #define SSR_RSF BIT(8)
104 #define SSR_SDF BIT(9)
105 #define SSR_BEF BIT(10)
106 #define SSR_FEF BIT(11)
107 #define SSR_SBF BIT(24)
108 #define SSR_BBF BIT(25)
109 #define SSR_CLEAR_BITS (SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
110 #define SIER_TDIE BIT(0)
111 #define SIER_RDIE BIT(1)
112 #define SIER_AVIE BIT(2)
113 #define SIER_TAIE BIT(3)
114 #define SIER_RSIE BIT(8)
115 #define SIER_SDIE BIT(9)
116 #define SIER_BEIE BIT(10)
117 #define SIER_FEIE BIT(11)
118 #define SIER_AM0F BIT(12)
119 #define SCFGR1_RXSTALL BIT(1)
120 #define SCFGR1_TXDSTALL BIT(2)
121 #define SCFGR2_FILTSDA_SHIFT 24
122 #define SCFGR2_FILTSCL_SHIFT 16
123 #define SCFGR2_CLKHOLD(x) (x)
124 #define SCFGR2_FILTSDA(x) ((x) << SCFGR2_FILTSDA_SHIFT)
125 #define SCFGR2_FILTSCL(x) ((x) << SCFGR2_FILTSCL_SHIFT)
126 #define SASR_READ_REQ 0x1
127 #define SLAVE_INT_FLAG (SIER_TDIE | SIER_RDIE | SIER_AVIE | \
128 SIER_SDIE | SIER_BEIE)
129
130 #define I2C_CLK_RATIO 2
131 #define CHUNK_DATA 256
132
133 #define I2C_PM_TIMEOUT 10 /* ms */
134 #define I2C_DMA_THRESHOLD 8 /* bytes */
135
136 enum lpi2c_imx_mode {
137 STANDARD, /* 100+Kbps */
138 FAST, /* 400+Kbps */
139 FAST_PLUS, /* 1.0+Mbps */
140 HS, /* 3.4+Mbps */
141 ULTRA_FAST, /* 5.0+Mbps */
142 };
143
144 enum lpi2c_imx_pincfg {
145 TWO_PIN_OD,
146 TWO_PIN_OO,
147 TWO_PIN_PP,
148 FOUR_PIN_PP,
149 };
150
151 struct lpi2c_imx_dma {
152 bool using_pio_mode;
153 u8 rx_cmd_buf_len;
154 u8 *dma_buf;
155 u16 *rx_cmd_buf;
156 unsigned int dma_len;
157 unsigned int tx_burst_num;
158 unsigned int rx_burst_num;
159 unsigned long dma_msg_flag;
160 resource_size_t phy_addr;
161 dma_addr_t dma_tx_addr;
162 dma_addr_t dma_addr;
163 enum dma_data_direction dma_data_dir;
164 enum dma_transfer_direction dma_transfer_dir;
165 struct dma_chan *chan_tx;
166 struct dma_chan *chan_rx;
167 };
168
169 struct lpi2c_imx_struct {
170 struct i2c_adapter adapter;
171 int num_clks;
172 struct clk_bulk_data *clks;
173 void __iomem *base;
174 __u8 *rx_buf;
175 __u8 *tx_buf;
176 struct completion complete;
177 unsigned long rate_per;
178 unsigned int msglen;
179 unsigned int delivered;
180 unsigned int block_data;
181 unsigned int bitrate;
182 unsigned int txfifosize;
183 unsigned int rxfifosize;
184 enum lpi2c_imx_mode mode;
185 struct i2c_bus_recovery_info rinfo;
186 bool can_use_dma;
187 struct lpi2c_imx_dma *dma;
188 struct i2c_client *target;
189 };
190
191 #define lpi2c_imx_read_msr_poll_timeout(atomic, val, cond) \
192 (atomic ? readl_poll_timeout_atomic(lpi2c_imx->base + LPI2C_MSR, val, \
193 cond, 0, 500000) : \
194 readl_poll_timeout(lpi2c_imx->base + LPI2C_MSR, val, cond, \
195 0, 500000))
196
lpi2c_imx_intctrl(struct lpi2c_imx_struct * lpi2c_imx,unsigned int enable)197 static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
198 unsigned int enable)
199 {
200 writel(enable, lpi2c_imx->base + LPI2C_MIER);
201 }
202
lpi2c_imx_bus_busy(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)203 static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
204 {
205 unsigned int temp;
206 int err;
207
208 err = lpi2c_imx_read_msr_poll_timeout(atomic, temp,
209 temp & (MSR_ALF | MSR_BBF | MSR_MBF));
210
211 /* check for arbitration lost, clear if set */
212 if (temp & MSR_ALF) {
213 writel(temp, lpi2c_imx->base + LPI2C_MSR);
214 return -EAGAIN;
215 }
216
217 /* check for bus not busy */
218 if (err) {
219 dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
220 if (lpi2c_imx->adapter.bus_recovery_info)
221 i2c_recover_bus(&lpi2c_imx->adapter);
222 return -ETIMEDOUT;
223 }
224
225 return 0;
226 }
227
lpi2c_imx_txfifo_cnt(struct lpi2c_imx_struct * lpi2c_imx)228 static u32 lpi2c_imx_txfifo_cnt(struct lpi2c_imx_struct *lpi2c_imx)
229 {
230 return readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
231 }
232
lpi2c_imx_set_mode(struct lpi2c_imx_struct * lpi2c_imx)233 static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
234 {
235 unsigned int bitrate = lpi2c_imx->bitrate;
236 enum lpi2c_imx_mode mode;
237
238 if (bitrate < I2C_MAX_FAST_MODE_FREQ)
239 mode = STANDARD;
240 else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
241 mode = FAST;
242 else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
243 mode = FAST_PLUS;
244 else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
245 mode = HS;
246 else
247 mode = ULTRA_FAST;
248
249 lpi2c_imx->mode = mode;
250 }
251
lpi2c_imx_start(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs,bool atomic)252 static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
253 struct i2c_msg *msgs, bool atomic)
254 {
255 unsigned int temp;
256
257 temp = readl(lpi2c_imx->base + LPI2C_MCR);
258 temp |= MCR_RRF | MCR_RTF;
259 writel(temp, lpi2c_imx->base + LPI2C_MCR);
260 writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
261
262 temp = i2c_8bit_addr_from_msg(msgs) | (GEN_START << 8);
263 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
264
265 return lpi2c_imx_bus_busy(lpi2c_imx, atomic);
266 }
267
lpi2c_imx_stop(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)268 static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
269 {
270 unsigned int temp;
271 int err;
272
273 writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
274
275 err = lpi2c_imx_read_msr_poll_timeout(atomic, temp, temp & MSR_SDF);
276
277 if (err) {
278 dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
279 if (lpi2c_imx->adapter.bus_recovery_info)
280 i2c_recover_bus(&lpi2c_imx->adapter);
281 }
282 }
283
284 /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
lpi2c_imx_config(struct lpi2c_imx_struct * lpi2c_imx)285 static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
286 {
287 u8 prescale, filt, sethold, datavd;
288 unsigned int clk_rate, clk_cycle, clkhi, clklo;
289 enum lpi2c_imx_pincfg pincfg;
290 unsigned int temp;
291
292 lpi2c_imx_set_mode(lpi2c_imx);
293
294 clk_rate = lpi2c_imx->rate_per;
295
296 if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
297 filt = 0;
298 else
299 filt = 2;
300
301 for (prescale = 0; prescale <= 7; prescale++) {
302 clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
303 - 3 - (filt >> 1);
304 clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1);
305 clklo = clk_cycle - clkhi;
306 if (clklo < 64)
307 break;
308 }
309
310 if (prescale > 7)
311 return -EINVAL;
312
313 /* set MCFGR1: PINCFG, PRESCALE, IGNACK */
314 if (lpi2c_imx->mode == ULTRA_FAST)
315 pincfg = TWO_PIN_OO;
316 else
317 pincfg = TWO_PIN_OD;
318 temp = prescale | pincfg << 24;
319
320 if (lpi2c_imx->mode == ULTRA_FAST)
321 temp |= MCFGR1_IGNACK;
322
323 writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
324
325 /* set MCFGR2: FILTSDA, FILTSCL */
326 temp = (filt << 16) | (filt << 24);
327 writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
328
329 /* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
330 sethold = clkhi;
331 datavd = clkhi >> 1;
332 temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
333
334 if (lpi2c_imx->mode == HS)
335 writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
336 else
337 writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
338
339 return 0;
340 }
341
lpi2c_imx_master_enable(struct lpi2c_imx_struct * lpi2c_imx)342 static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
343 {
344 unsigned int temp;
345 int ret;
346
347 ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
348 if (ret < 0)
349 return ret;
350
351 temp = MCR_RST;
352 writel(temp, lpi2c_imx->base + LPI2C_MCR);
353 writel(0, lpi2c_imx->base + LPI2C_MCR);
354
355 ret = lpi2c_imx_config(lpi2c_imx);
356 if (ret)
357 goto rpm_put;
358
359 temp = readl(lpi2c_imx->base + LPI2C_MCR);
360 temp |= MCR_MEN;
361 writel(temp, lpi2c_imx->base + LPI2C_MCR);
362
363 return 0;
364
365 rpm_put:
366 pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
367
368 return ret;
369 }
370
lpi2c_imx_master_disable(struct lpi2c_imx_struct * lpi2c_imx)371 static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
372 {
373 u32 temp;
374
375 temp = readl(lpi2c_imx->base + LPI2C_MCR);
376 temp &= ~MCR_MEN;
377 writel(temp, lpi2c_imx->base + LPI2C_MCR);
378
379 pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
380
381 return 0;
382 }
383
lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct * lpi2c_imx)384 static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
385 {
386 unsigned long time_left;
387
388 time_left = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
389
390 return time_left ? 0 : -ETIMEDOUT;
391 }
392
lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)393 static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
394 {
395 unsigned int temp;
396 int err;
397
398 err = lpi2c_imx_read_msr_poll_timeout(atomic, temp,
399 (temp & MSR_NDF) || !lpi2c_imx_txfifo_cnt(lpi2c_imx));
400
401 if (temp & MSR_NDF) {
402 dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
403 return -EIO;
404 }
405
406 if (err) {
407 dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
408 if (lpi2c_imx->adapter.bus_recovery_info)
409 i2c_recover_bus(&lpi2c_imx->adapter);
410 return -ETIMEDOUT;
411 }
412
413 return 0;
414 }
415
lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct * lpi2c_imx)416 static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
417 {
418 writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
419 }
420
lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct * lpi2c_imx)421 static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
422 {
423 unsigned int temp, remaining;
424
425 remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
426
427 if (remaining > (lpi2c_imx->rxfifosize >> 1))
428 temp = lpi2c_imx->rxfifosize >> 1;
429 else
430 temp = 0;
431
432 writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
433 }
434
lpi2c_imx_write_txfifo(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)435 static bool lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
436 {
437 unsigned int data, txcnt;
438
439 txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
440
441 while (txcnt < lpi2c_imx->txfifosize) {
442 if (lpi2c_imx->delivered == lpi2c_imx->msglen)
443 break;
444
445 data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
446 writel(data, lpi2c_imx->base + LPI2C_MTDR);
447 txcnt++;
448 }
449
450 if (lpi2c_imx->delivered < lpi2c_imx->msglen) {
451 if (!atomic)
452 lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
453 return false;
454 }
455
456 if (!atomic)
457 complete(&lpi2c_imx->complete);
458
459 return true;
460 }
461
lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct * lpi2c_imx,bool atomic)462 static bool lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx, bool atomic)
463 {
464 unsigned int blocklen, remaining;
465 unsigned int temp, data;
466
467 do {
468 data = readl(lpi2c_imx->base + LPI2C_MRDR);
469 if (data & MRDR_RXEMPTY)
470 break;
471
472 lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
473 } while (1);
474
475 /*
476 * First byte is the length of remaining packet in the SMBus block
477 * data read. Add it to msgs->len.
478 */
479 if (lpi2c_imx->block_data) {
480 blocklen = lpi2c_imx->rx_buf[0];
481 lpi2c_imx->msglen += blocklen;
482 }
483
484 remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
485
486 if (!remaining) {
487 if (!atomic)
488 complete(&lpi2c_imx->complete);
489 return true;
490 }
491
492 /* not finished, still waiting for rx data */
493 lpi2c_imx_set_rx_watermark(lpi2c_imx);
494
495 /* multiple receive commands */
496 if (lpi2c_imx->block_data) {
497 lpi2c_imx->block_data = 0;
498 temp = remaining;
499 temp |= (RECV_DATA << 8);
500 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
501 } else if (!(lpi2c_imx->delivered & 0xff)) {
502 temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
503 temp |= (RECV_DATA << 8);
504 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
505 }
506
507 if (!atomic)
508 lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
509
510 return false;
511 }
512
lpi2c_imx_write(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)513 static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
514 struct i2c_msg *msgs)
515 {
516 lpi2c_imx->tx_buf = msgs->buf;
517 lpi2c_imx_set_tx_watermark(lpi2c_imx);
518 lpi2c_imx_write_txfifo(lpi2c_imx, false);
519 }
520
lpi2c_imx_write_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)521 static int lpi2c_imx_write_atomic(struct lpi2c_imx_struct *lpi2c_imx,
522 struct i2c_msg *msgs)
523 {
524 u32 temp;
525 int err;
526
527 lpi2c_imx->tx_buf = msgs->buf;
528
529 err = lpi2c_imx_read_msr_poll_timeout(true, temp,
530 (temp & MSR_NDF) ||
531 lpi2c_imx_write_txfifo(lpi2c_imx, true));
532
533 if (temp & MSR_NDF)
534 return -EIO;
535
536 return err;
537 }
538
lpi2c_imx_read_init(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)539 static void lpi2c_imx_read_init(struct lpi2c_imx_struct *lpi2c_imx,
540 struct i2c_msg *msgs)
541 {
542 unsigned int temp;
543
544 lpi2c_imx->rx_buf = msgs->buf;
545 lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
546
547 lpi2c_imx_set_rx_watermark(lpi2c_imx);
548 temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
549 temp |= (RECV_DATA << 8);
550 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
551 }
552
lpi2c_imx_read_chunk_atomic(struct lpi2c_imx_struct * lpi2c_imx)553 static bool lpi2c_imx_read_chunk_atomic(struct lpi2c_imx_struct *lpi2c_imx)
554 {
555 u32 rxcnt;
556
557 rxcnt = (readl(lpi2c_imx->base + LPI2C_MFSR) >> 16) & 0xFF;
558 if (!rxcnt)
559 return false;
560
561 if (!lpi2c_imx_read_rxfifo(lpi2c_imx, true))
562 return false;
563
564 return true;
565 }
566
lpi2c_imx_read_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msgs)567 static int lpi2c_imx_read_atomic(struct lpi2c_imx_struct *lpi2c_imx,
568 struct i2c_msg *msgs)
569 {
570 u32 temp;
571 int tmo_us;
572
573 tmo_us = 1000000;
574 do {
575 if (lpi2c_imx_read_chunk_atomic(lpi2c_imx))
576 return 0;
577
578 temp = readl(lpi2c_imx->base + LPI2C_MSR);
579
580 if (temp & MSR_NDF)
581 return -EIO;
582
583 udelay(100);
584 tmo_us -= 100;
585 } while (tmo_us > 0);
586
587 return -ETIMEDOUT;
588 }
589
is_use_dma(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)590 static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
591 {
592 if (!lpi2c_imx->can_use_dma)
593 return false;
594
595 /*
596 * A system-wide suspend or resume transition is in progress. LPI2C should use PIO to
597 * transfer data to avoid issue caused by no ready DMA HW resource.
598 */
599 if (pm_suspend_in_progress())
600 return false;
601
602 /*
603 * When the length of data is less than I2C_DMA_THRESHOLD,
604 * cpu mode is used directly to avoid low performance.
605 */
606 return !(msg->len < I2C_DMA_THRESHOLD);
607 }
608
lpi2c_imx_pio_xfer(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)609 static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
610 struct i2c_msg *msg)
611 {
612 reinit_completion(&lpi2c_imx->complete);
613
614 if (msg->flags & I2C_M_RD) {
615 lpi2c_imx_read_init(lpi2c_imx, msg);
616 lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
617 } else {
618 lpi2c_imx_write(lpi2c_imx, msg);
619 }
620
621 return lpi2c_imx_pio_msg_complete(lpi2c_imx);
622 }
623
lpi2c_imx_pio_xfer_atomic(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)624 static int lpi2c_imx_pio_xfer_atomic(struct lpi2c_imx_struct *lpi2c_imx,
625 struct i2c_msg *msg)
626 {
627 if (msg->flags & I2C_M_RD) {
628 lpi2c_imx_read_init(lpi2c_imx, msg);
629 return lpi2c_imx_read_atomic(lpi2c_imx, msg);
630 }
631
632 return lpi2c_imx_write_atomic(lpi2c_imx, msg);
633 }
634
lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct * lpi2c_imx)635 static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
636 {
637 unsigned long time = 0;
638
639 time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
640
641 /* Add extra second for scheduler related activities */
642 time += 1;
643
644 /* Double calculated time */
645 return secs_to_jiffies(time);
646 }
647
lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct * lpi2c_imx)648 static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
649 {
650 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
651 u16 rx_remain = dma->dma_len;
652 int cmd_num;
653 u16 temp;
654
655 /*
656 * Calculate the number of rx command words via the DMA TX channel
657 * writing into command register based on the i2c msg len, and build
658 * the rx command words buffer.
659 */
660 cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
661 dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
662 dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
663
664 if (!dma->rx_cmd_buf) {
665 dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
666 return -ENOMEM;
667 }
668
669 for (int i = 0; i < cmd_num ; i++) {
670 temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
671 temp |= (RECV_DATA << 8);
672 rx_remain -= CHUNK_DATA;
673 dma->rx_cmd_buf[i] = temp;
674 }
675
676 return 0;
677 }
678
lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct * lpi2c_imx)679 static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
680 {
681 unsigned long time_left, time;
682
683 time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
684 time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
685 if (time_left == 0) {
686 dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
687 return -ETIMEDOUT;
688 }
689
690 return 0;
691 }
692
lpi2c_dma_unmap(struct lpi2c_imx_dma * dma)693 static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
694 {
695 struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
696 ? dma->chan_rx : dma->chan_tx;
697
698 dma_unmap_single(chan->device->dev, dma->dma_addr,
699 dma->dma_len, dma->dma_data_dir);
700
701 dma->dma_data_dir = DMA_NONE;
702 }
703
lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma * dma)704 static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
705 {
706 dmaengine_terminate_sync(dma->chan_tx);
707 dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
708 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
709 }
710
lpi2c_cleanup_dma(struct lpi2c_imx_dma * dma)711 static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
712 {
713 if (dma->dma_data_dir == DMA_FROM_DEVICE)
714 dmaengine_terminate_sync(dma->chan_rx);
715 else if (dma->dma_data_dir == DMA_TO_DEVICE)
716 dmaengine_terminate_sync(dma->chan_tx);
717
718 lpi2c_dma_unmap(dma);
719 }
720
lpi2c_dma_callback(void * data)721 static void lpi2c_dma_callback(void *data)
722 {
723 struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
724
725 complete(&lpi2c_imx->complete);
726 }
727
lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct * lpi2c_imx)728 static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
729 {
730 struct dma_async_tx_descriptor *rx_cmd_desc;
731 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
732 struct dma_chan *txchan = dma->chan_tx;
733 dma_cookie_t cookie;
734
735 dma->dma_tx_addr = dma_map_single(txchan->device->dev,
736 dma->rx_cmd_buf, dma->rx_cmd_buf_len,
737 DMA_TO_DEVICE);
738 if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
739 dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
740 return -EINVAL;
741 }
742
743 rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
744 dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
745 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
746 if (!rx_cmd_desc) {
747 dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
748 goto desc_prepare_err_exit;
749 }
750
751 cookie = dmaengine_submit(rx_cmd_desc);
752 if (dma_submit_error(cookie)) {
753 dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
754 goto submit_err_exit;
755 }
756
757 dma_async_issue_pending(txchan);
758
759 return 0;
760
761 desc_prepare_err_exit:
762 dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
763 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
764 return -EINVAL;
765
766 submit_err_exit:
767 dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
768 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
769 dmaengine_desc_free(rx_cmd_desc);
770 return -EINVAL;
771 }
772
lpi2c_dma_submit(struct lpi2c_imx_struct * lpi2c_imx)773 static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
774 {
775 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
776 struct dma_async_tx_descriptor *desc;
777 struct dma_chan *chan;
778 dma_cookie_t cookie;
779
780 if (dma->dma_msg_flag & I2C_M_RD) {
781 chan = dma->chan_rx;
782 dma->dma_data_dir = DMA_FROM_DEVICE;
783 dma->dma_transfer_dir = DMA_DEV_TO_MEM;
784 } else {
785 chan = dma->chan_tx;
786 dma->dma_data_dir = DMA_TO_DEVICE;
787 dma->dma_transfer_dir = DMA_MEM_TO_DEV;
788 }
789
790 dma->dma_addr = dma_map_single(chan->device->dev,
791 dma->dma_buf, dma->dma_len, dma->dma_data_dir);
792 if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
793 dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
794 return -EINVAL;
795 }
796
797 desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
798 dma->dma_len, dma->dma_transfer_dir,
799 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
800 if (!desc) {
801 dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
802 goto desc_prepare_err_exit;
803 }
804
805 reinit_completion(&lpi2c_imx->complete);
806 desc->callback = lpi2c_dma_callback;
807 desc->callback_param = lpi2c_imx;
808
809 cookie = dmaengine_submit(desc);
810 if (dma_submit_error(cookie)) {
811 dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
812 goto submit_err_exit;
813 }
814
815 /* Can't switch to PIO mode when DMA have started transfer */
816 dma->using_pio_mode = false;
817
818 dma_async_issue_pending(chan);
819
820 return 0;
821
822 desc_prepare_err_exit:
823 lpi2c_dma_unmap(dma);
824 return -EINVAL;
825
826 submit_err_exit:
827 lpi2c_dma_unmap(dma);
828 dmaengine_desc_free(desc);
829 return -EINVAL;
830 }
831
lpi2c_imx_find_max_burst_num(unsigned int fifosize,unsigned int len)832 static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
833 {
834 unsigned int i;
835
836 for (i = fifosize / 2; i > 0; i--)
837 if (!(len % i))
838 break;
839
840 return i;
841 }
842
843 /*
844 * For a highest DMA efficiency, tx/rx burst number should be calculated according
845 * to the FIFO depth.
846 */
lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct * lpi2c_imx)847 static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
848 {
849 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
850 unsigned int cmd_num;
851
852 if (dma->dma_msg_flag & I2C_M_RD) {
853 /*
854 * One RX cmd word can trigger DMA receive no more than 256 bytes.
855 * The number of RX cmd words should be calculated based on the data
856 * length.
857 */
858 cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
859 dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
860 cmd_num);
861 dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
862 dma->dma_len);
863 } else {
864 dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
865 dma->dma_len);
866 }
867 }
868
lpi2c_dma_config(struct lpi2c_imx_struct * lpi2c_imx)869 static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
870 {
871 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
872 struct dma_slave_config rx = {}, tx = {};
873 int ret;
874
875 lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
876
877 if (dma->dma_msg_flag & I2C_M_RD) {
878 tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
879 tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
880 tx.dst_maxburst = dma->tx_burst_num;
881 tx.direction = DMA_MEM_TO_DEV;
882 ret = dmaengine_slave_config(dma->chan_tx, &tx);
883 if (ret < 0)
884 return ret;
885
886 rx.src_addr = dma->phy_addr + LPI2C_MRDR;
887 rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
888 rx.src_maxburst = dma->rx_burst_num;
889 rx.direction = DMA_DEV_TO_MEM;
890 ret = dmaengine_slave_config(dma->chan_rx, &rx);
891 if (ret < 0)
892 return ret;
893 } else {
894 tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
895 tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
896 tx.dst_maxburst = dma->tx_burst_num;
897 tx.direction = DMA_MEM_TO_DEV;
898 ret = dmaengine_slave_config(dma->chan_tx, &tx);
899 if (ret < 0)
900 return ret;
901 }
902
903 return 0;
904 }
905
lpi2c_dma_enable(struct lpi2c_imx_struct * lpi2c_imx)906 static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
907 {
908 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
909 /*
910 * TX interrupt will be triggered when the number of words in
911 * the transmit FIFO is equal or less than TX watermark.
912 * RX interrupt will be triggered when the number of words in
913 * the receive FIFO is greater than RX watermark.
914 * In order to trigger the DMA interrupt, TX watermark should be
915 * set equal to the DMA TX burst number but RX watermark should
916 * be set less than the DMA RX burst number.
917 */
918 if (dma->dma_msg_flag & I2C_M_RD) {
919 /* Set I2C TX/RX watermark */
920 writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
921 lpi2c_imx->base + LPI2C_MFCR);
922 /* Enable I2C DMA TX/RX function */
923 writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
924 } else {
925 /* Set I2C TX watermark */
926 writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
927 /* Enable I2C DMA TX function */
928 writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
929 }
930
931 /* Enable NACK detected */
932 lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
933 };
934
935 /*
936 * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
937 * data word into TXFIFO, but in RX DMA mode it is different.
938 *
939 * The LPI2C MTDR register is a command data and transmit data register.
940 * Bits 8-10 are the command data field and Bits 0-7 are the transmit
941 * data field. When the LPI2C master needs to read data, the number of
942 * bytes to read should be set in the command field and RECV_DATA should
943 * be set into the command data field to receive (DATA[7:0] + 1) bytes.
944 * The recv data command word is made of RECV_DATA in the command data
945 * field and the number of bytes to read in transmit data field. When the
946 * length of data to be read exceeds 256 bytes, recv data command word
947 * needs to be written to TXFIFO multiple times.
948 *
949 * So when in RX DMA mode, the TX channel also must to be configured to
950 * send RX command words and the RX command word must be set in advance
951 * before transmitting.
952 */
lpi2c_imx_dma_xfer(struct lpi2c_imx_struct * lpi2c_imx,struct i2c_msg * msg)953 static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
954 struct i2c_msg *msg)
955 {
956 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
957 int ret;
958
959 /* When DMA mode fails before transferring, CPU mode can be used. */
960 dma->using_pio_mode = true;
961
962 dma->dma_len = msg->len;
963 dma->dma_msg_flag = msg->flags;
964 dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
965 if (!dma->dma_buf)
966 return -ENOMEM;
967
968 ret = lpi2c_dma_config(lpi2c_imx);
969 if (ret) {
970 dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
971 goto disable_dma;
972 }
973
974 lpi2c_dma_enable(lpi2c_imx);
975
976 ret = lpi2c_dma_submit(lpi2c_imx);
977 if (ret) {
978 dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
979 goto disable_dma;
980 }
981
982 if (dma->dma_msg_flag & I2C_M_RD) {
983 ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
984 if (ret)
985 goto disable_cleanup_data_dma;
986
987 ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
988 if (ret)
989 goto disable_cleanup_data_dma;
990 }
991
992 ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
993 if (ret)
994 goto disable_cleanup_all_dma;
995
996 /* When encountering NACK in transfer, clean up all DMA transfers */
997 if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
998 ret = -EIO;
999 goto disable_cleanup_all_dma;
1000 }
1001
1002 if (dma->dma_msg_flag & I2C_M_RD)
1003 dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
1004 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
1005 lpi2c_dma_unmap(dma);
1006
1007 goto disable_dma;
1008
1009 disable_cleanup_all_dma:
1010 if (dma->dma_msg_flag & I2C_M_RD)
1011 lpi2c_cleanup_rx_cmd_dma(dma);
1012 disable_cleanup_data_dma:
1013 lpi2c_cleanup_dma(dma);
1014 disable_dma:
1015 /* Disable I2C DMA function */
1016 writel(0, lpi2c_imx->base + LPI2C_MDER);
1017
1018 if (dma->dma_msg_flag & I2C_M_RD)
1019 kfree(dma->rx_cmd_buf);
1020
1021 if (ret)
1022 i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
1023 else
1024 i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
1025
1026 return ret;
1027 }
1028
lpi2c_imx_xfer_common(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num,bool atomic)1029 static int lpi2c_imx_xfer_common(struct i2c_adapter *adapter,
1030 struct i2c_msg *msgs, int num, bool atomic)
1031 {
1032 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
1033 unsigned int temp;
1034 int i, result;
1035
1036 result = lpi2c_imx_master_enable(lpi2c_imx);
1037 if (result)
1038 return result;
1039
1040 for (i = 0; i < num; i++) {
1041 result = lpi2c_imx_start(lpi2c_imx, &msgs[i], atomic);
1042 if (result)
1043 goto disable;
1044
1045 /* quick smbus */
1046 if (num == 1 && msgs[0].len == 0)
1047 goto stop;
1048
1049 lpi2c_imx->rx_buf = NULL;
1050 lpi2c_imx->tx_buf = NULL;
1051 lpi2c_imx->delivered = 0;
1052 lpi2c_imx->msglen = msgs[i].len;
1053
1054 if (atomic) {
1055 result = lpi2c_imx_pio_xfer_atomic(lpi2c_imx, &msgs[i]);
1056 } else {
1057 init_completion(&lpi2c_imx->complete);
1058
1059 if (is_use_dma(lpi2c_imx, &msgs[i])) {
1060 result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
1061 if (result && lpi2c_imx->dma->using_pio_mode)
1062 result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
1063 } else {
1064 result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
1065 }
1066 }
1067
1068 if (result)
1069 goto stop;
1070
1071 if (!(msgs[i].flags & I2C_M_RD)) {
1072 result = lpi2c_imx_txfifo_empty(lpi2c_imx, atomic);
1073 if (result)
1074 goto stop;
1075 }
1076 }
1077
1078 stop:
1079 lpi2c_imx_stop(lpi2c_imx, atomic);
1080
1081 temp = readl(lpi2c_imx->base + LPI2C_MSR);
1082 if ((temp & MSR_NDF) && !result)
1083 result = -EIO;
1084
1085 disable:
1086 lpi2c_imx_master_disable(lpi2c_imx);
1087
1088 dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
1089 (result < 0) ? "error" : "success msg",
1090 (result < 0) ? result : num);
1091
1092 return (result < 0) ? result : num;
1093 }
1094
lpi2c_imx_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)1095 static int lpi2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
1096 {
1097 return lpi2c_imx_xfer_common(adapter, msgs, num, false);
1098 }
1099
lpi2c_imx_xfer_atomic(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)1100 static int lpi2c_imx_xfer_atomic(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
1101 {
1102 return lpi2c_imx_xfer_common(adapter, msgs, num, true);
1103 }
1104
lpi2c_imx_target_isr(struct lpi2c_imx_struct * lpi2c_imx,u32 ssr,u32 sier_filter)1105 static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
1106 u32 ssr, u32 sier_filter)
1107 {
1108 u8 value;
1109 u32 sasr;
1110
1111 /* Arbitration lost */
1112 if (sier_filter & SSR_BEF) {
1113 writel(0, lpi2c_imx->base + LPI2C_SIER);
1114 return IRQ_HANDLED;
1115 }
1116
1117 /* Address detected */
1118 if (sier_filter & SSR_AVF) {
1119 sasr = readl(lpi2c_imx->base + LPI2C_SASR);
1120 if (SASR_READ_REQ & sasr) {
1121 /* Read request */
1122 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
1123 writel(value, lpi2c_imx->base + LPI2C_STDR);
1124 goto ret;
1125 } else {
1126 /* Write request */
1127 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
1128 }
1129 }
1130
1131 if (sier_filter & SSR_SDF)
1132 /* STOP */
1133 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
1134
1135 if (sier_filter & SSR_TDF) {
1136 /* Target send data */
1137 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
1138 writel(value, lpi2c_imx->base + LPI2C_STDR);
1139 }
1140
1141 if (sier_filter & SSR_RDF) {
1142 /* Target receive data */
1143 value = readl(lpi2c_imx->base + LPI2C_SRDR);
1144 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
1145 }
1146
1147 ret:
1148 /* Clear SSR */
1149 writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
1150 return IRQ_HANDLED;
1151 }
1152
lpi2c_imx_master_isr(struct lpi2c_imx_struct * lpi2c_imx)1153 static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
1154 {
1155 unsigned int enabled;
1156 unsigned int temp;
1157
1158 enabled = readl(lpi2c_imx->base + LPI2C_MIER);
1159
1160 lpi2c_imx_intctrl(lpi2c_imx, 0);
1161 temp = readl(lpi2c_imx->base + LPI2C_MSR);
1162 temp &= enabled;
1163
1164 if (temp & MSR_NDF)
1165 complete(&lpi2c_imx->complete);
1166 else if (temp & MSR_RDF)
1167 lpi2c_imx_read_rxfifo(lpi2c_imx, false);
1168 else if (temp & MSR_TDF)
1169 lpi2c_imx_write_txfifo(lpi2c_imx, false);
1170
1171 return IRQ_HANDLED;
1172 }
1173
lpi2c_imx_isr(int irq,void * dev_id)1174 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
1175 {
1176 struct lpi2c_imx_struct *lpi2c_imx = dev_id;
1177
1178 if (lpi2c_imx->target) {
1179 u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
1180 u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
1181 u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
1182
1183 /*
1184 * The target is enabled and an interrupt has been triggered.
1185 * Enter the target's irq handler.
1186 */
1187 if ((scr & SCR_SEN) && sier_filter)
1188 return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
1189 }
1190
1191 /*
1192 * Otherwise the interrupt has been triggered by the master.
1193 * Enter the master's irq handler.
1194 */
1195 return lpi2c_imx_master_isr(lpi2c_imx);
1196 }
1197
lpi2c_imx_target_init(struct lpi2c_imx_struct * lpi2c_imx)1198 static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
1199 {
1200 u32 temp;
1201
1202 /* reset target module */
1203 writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1204 writel(0, lpi2c_imx->base + LPI2C_SCR);
1205
1206 /* Set target address */
1207 writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
1208
1209 writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
1210
1211 /*
1212 * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
1213 *
1214 * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
1215 * set to the same value and should be set >= 50ns.
1216 *
1217 * CLKHOLD is only used when clock stretching is enabled, but it will
1218 * extend the clock stretching to ensure there is an additional delay
1219 * between the target driving SDA and the target releasing the SCL pin.
1220 *
1221 * CLKHOLD setting is crucial for lpi2c target. When master read data
1222 * from target, if there is a delay caused by cpu idle, excessive load,
1223 * or other delays between two bytes in one message transmission, it
1224 * will cause a short interval time between the driving SDA signal and
1225 * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
1226 * signal resulting in an arbitration failure. This issue can be avoided
1227 * by setting CLKHOLD.
1228 *
1229 * In order to ensure lpi2c function normally when the lpi2c speed is as
1230 * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
1231 * higher clock frequency like 400kHz and 1MHz.
1232 */
1233 temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
1234 writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
1235
1236 /*
1237 * Enable module:
1238 * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
1239 * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
1240 * and CLKHOLD.
1241 */
1242 writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
1243
1244 /* Enable interrupt from i2c module */
1245 writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
1246 }
1247
lpi2c_imx_register_target(struct i2c_client * client)1248 static int lpi2c_imx_register_target(struct i2c_client *client)
1249 {
1250 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1251 int ret;
1252
1253 if (lpi2c_imx->target)
1254 return -EBUSY;
1255
1256 lpi2c_imx->target = client;
1257
1258 ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
1259 if (ret < 0) {
1260 dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
1261 return ret;
1262 }
1263
1264 lpi2c_imx_target_init(lpi2c_imx);
1265
1266 return 0;
1267 }
1268
lpi2c_imx_unregister_target(struct i2c_client * client)1269 static int lpi2c_imx_unregister_target(struct i2c_client *client)
1270 {
1271 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1272 int ret;
1273
1274 if (!lpi2c_imx->target)
1275 return -EINVAL;
1276
1277 /* Reset target address. */
1278 writel(0, lpi2c_imx->base + LPI2C_SAMR);
1279
1280 writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1281 writel(0, lpi2c_imx->base + LPI2C_SCR);
1282
1283 lpi2c_imx->target = NULL;
1284
1285 ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
1286 if (ret < 0)
1287 dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
1288
1289 return ret;
1290 }
1291
lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct * lpi2c_imx,struct platform_device * pdev)1292 static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
1293 struct platform_device *pdev)
1294 {
1295 struct i2c_bus_recovery_info *bri = &lpi2c_imx->rinfo;
1296
1297 bri->pinctrl = devm_pinctrl_get(&pdev->dev);
1298 if (IS_ERR(bri->pinctrl))
1299 return PTR_ERR(bri->pinctrl);
1300
1301 lpi2c_imx->adapter.bus_recovery_info = bri;
1302
1303 return 0;
1304 }
1305
dma_exit(struct device * dev,struct lpi2c_imx_dma * dma)1306 static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
1307 {
1308 if (dma->chan_rx)
1309 dma_release_channel(dma->chan_rx);
1310
1311 if (dma->chan_tx)
1312 dma_release_channel(dma->chan_tx);
1313
1314 devm_kfree(dev, dma);
1315 }
1316
lpi2c_dma_init(struct device * dev,dma_addr_t phy_addr)1317 static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
1318 {
1319 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1320 struct lpi2c_imx_dma *dma;
1321 int ret;
1322
1323 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
1324 if (!dma)
1325 return -ENOMEM;
1326
1327 dma->phy_addr = phy_addr;
1328
1329 /* Prepare for TX DMA: */
1330 dma->chan_tx = dma_request_chan(dev, "tx");
1331 if (IS_ERR(dma->chan_tx)) {
1332 ret = PTR_ERR(dma->chan_tx);
1333 if (ret != -ENODEV && ret != -EPROBE_DEFER)
1334 dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
1335 dma->chan_tx = NULL;
1336 goto dma_exit;
1337 }
1338
1339 /* Prepare for RX DMA: */
1340 dma->chan_rx = dma_request_chan(dev, "rx");
1341 if (IS_ERR(dma->chan_rx)) {
1342 ret = PTR_ERR(dma->chan_rx);
1343 if (ret != -ENODEV && ret != -EPROBE_DEFER)
1344 dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
1345 dma->chan_rx = NULL;
1346 goto dma_exit;
1347 }
1348
1349 lpi2c_imx->can_use_dma = true;
1350 lpi2c_imx->dma = dma;
1351 return 0;
1352
1353 dma_exit:
1354 dma_exit(dev, dma);
1355 return ret;
1356 }
1357
lpi2c_imx_func(struct i2c_adapter * adapter)1358 static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
1359 {
1360 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
1361 I2C_FUNC_SMBUS_READ_BLOCK_DATA;
1362 }
1363
1364 static const struct i2c_algorithm lpi2c_imx_algo = {
1365 .xfer = lpi2c_imx_xfer,
1366 .xfer_atomic = lpi2c_imx_xfer_atomic,
1367 .functionality = lpi2c_imx_func,
1368 .reg_target = lpi2c_imx_register_target,
1369 .unreg_target = lpi2c_imx_unregister_target,
1370 };
1371
1372 static const struct of_device_id lpi2c_imx_of_match[] = {
1373 { .compatible = "fsl,imx7ulp-lpi2c" },
1374 { }
1375 };
1376 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
1377
lpi2c_imx_probe(struct platform_device * pdev)1378 static int lpi2c_imx_probe(struct platform_device *pdev)
1379 {
1380 struct lpi2c_imx_struct *lpi2c_imx;
1381 struct resource *res;
1382 dma_addr_t phy_addr;
1383 unsigned int temp;
1384 int irq, ret;
1385
1386 lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
1387 if (!lpi2c_imx)
1388 return -ENOMEM;
1389
1390 lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1391 if (IS_ERR(lpi2c_imx->base))
1392 return PTR_ERR(lpi2c_imx->base);
1393
1394 irq = platform_get_irq(pdev, 0);
1395 if (irq < 0)
1396 return irq;
1397
1398 lpi2c_imx->adapter.owner = THIS_MODULE;
1399 lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
1400 lpi2c_imx->adapter.dev.parent = &pdev->dev;
1401 lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
1402 strscpy(lpi2c_imx->adapter.name, pdev->name,
1403 sizeof(lpi2c_imx->adapter.name));
1404 phy_addr = (dma_addr_t)res->start;
1405
1406 ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
1407 if (ret < 0)
1408 return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
1409 lpi2c_imx->num_clks = ret;
1410
1411 ret = of_property_read_u32(pdev->dev.of_node,
1412 "clock-frequency", &lpi2c_imx->bitrate);
1413 if (ret)
1414 lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
1415
1416 ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
1417 pdev->name, lpi2c_imx);
1418 if (ret)
1419 return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
1420
1421 i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
1422 platform_set_drvdata(pdev, lpi2c_imx);
1423
1424 ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1425 if (ret)
1426 return ret;
1427
1428 /*
1429 * Lock the parent clock rate to avoid getting parent clock upon
1430 * each transfer
1431 */
1432 ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
1433 if (ret)
1434 return dev_err_probe(&pdev->dev, ret,
1435 "can't lock I2C peripheral clock rate\n");
1436
1437 lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
1438 if (!lpi2c_imx->rate_per)
1439 return dev_err_probe(&pdev->dev, -EINVAL,
1440 "can't get I2C peripheral clock rate\n");
1441
1442 pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
1443 pm_runtime_use_autosuspend(&pdev->dev);
1444 pm_runtime_get_noresume(&pdev->dev);
1445 pm_runtime_set_active(&pdev->dev);
1446 pm_runtime_enable(&pdev->dev);
1447
1448 temp = readl(lpi2c_imx->base + LPI2C_PARAM);
1449 lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
1450 lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
1451
1452 /* Init optional bus recovery function */
1453 ret = lpi2c_imx_init_recovery_info(lpi2c_imx, pdev);
1454 /* Give it another chance if pinctrl used is not ready yet */
1455 if (ret == -EPROBE_DEFER)
1456 goto rpm_disable;
1457
1458 /* Init DMA */
1459 ret = lpi2c_dma_init(&pdev->dev, phy_addr);
1460 if (ret) {
1461 if (ret == -EPROBE_DEFER)
1462 goto rpm_disable;
1463 dev_info(&pdev->dev, "use pio mode\n");
1464 }
1465
1466 ret = i2c_add_adapter(&lpi2c_imx->adapter);
1467 if (ret)
1468 goto rpm_disable;
1469
1470 pm_runtime_put_autosuspend(&pdev->dev);
1471
1472 dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
1473
1474 return 0;
1475
1476 rpm_disable:
1477 pm_runtime_dont_use_autosuspend(&pdev->dev);
1478 pm_runtime_put_sync(&pdev->dev);
1479 pm_runtime_disable(&pdev->dev);
1480
1481 return ret;
1482 }
1483
lpi2c_imx_remove(struct platform_device * pdev)1484 static void lpi2c_imx_remove(struct platform_device *pdev)
1485 {
1486 struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
1487
1488 i2c_del_adapter(&lpi2c_imx->adapter);
1489
1490 pm_runtime_disable(&pdev->dev);
1491 pm_runtime_dont_use_autosuspend(&pdev->dev);
1492 }
1493
lpi2c_runtime_suspend(struct device * dev)1494 static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
1495 {
1496 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1497
1498 clk_bulk_disable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1499 pinctrl_pm_select_sleep_state(dev);
1500
1501 return 0;
1502 }
1503
lpi2c_runtime_resume(struct device * dev)1504 static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
1505 {
1506 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1507 int ret;
1508
1509 pinctrl_pm_select_default_state(dev);
1510 ret = clk_bulk_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1511 if (ret) {
1512 dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
1513 return ret;
1514 }
1515
1516 return 0;
1517 }
1518
lpi2c_suspend_noirq(struct device * dev)1519 static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
1520 {
1521 return pm_runtime_force_suspend(dev);
1522 }
1523
lpi2c_resume_noirq(struct device * dev)1524 static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
1525 {
1526 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1527 int ret;
1528
1529 ret = pm_runtime_force_resume(dev);
1530 if (ret)
1531 return ret;
1532
1533 /*
1534 * If the I2C module powers down during system suspend,
1535 * the register values will be lost. Therefore, reinitialize
1536 * the target when the system resumes.
1537 */
1538 if (lpi2c_imx->target)
1539 lpi2c_imx_target_init(lpi2c_imx);
1540
1541 return 0;
1542 }
1543
lpi2c_suspend(struct device * dev)1544 static int lpi2c_suspend(struct device *dev)
1545 {
1546 /*
1547 * Some I2C devices may need the I2C controller to remain active
1548 * during resume_noirq() or suspend_noirq(). If the controller is
1549 * autosuspended, there is no way to wake it up once runtime PM is
1550 * disabled (in suspend_late()).
1551 *
1552 * During system resume, the I2C controller will be available only
1553 * after runtime PM is re-enabled (in resume_early()). However, this
1554 * may be too late for some devices.
1555 *
1556 * Wake up the controller in the suspend() callback while runtime PM
1557 * is still enabled. The I2C controller will remain available until
1558 * the suspend_noirq() callback (pm_runtime_force_suspend()) is
1559 * called. During resume, the I2C controller can be restored by the
1560 * resume_noirq() callback (pm_runtime_force_resume()).
1561 *
1562 * Finally, the resume() callback re-enables autosuspend, ensuring
1563 * the I2C controller remains available until the system enters
1564 * suspend_noirq() and from resume_noirq().
1565 */
1566 return pm_runtime_resume_and_get(dev);
1567 }
1568
lpi2c_resume(struct device * dev)1569 static int lpi2c_resume(struct device *dev)
1570 {
1571 pm_runtime_put_autosuspend(dev);
1572
1573 return 0;
1574 }
1575
1576 static const struct dev_pm_ops lpi2c_pm_ops = {
1577 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
1578 lpi2c_resume_noirq)
1579 SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
1580 SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
1581 lpi2c_runtime_resume, NULL)
1582 };
1583
1584 static struct platform_driver lpi2c_imx_driver = {
1585 .probe = lpi2c_imx_probe,
1586 .remove = lpi2c_imx_remove,
1587 .driver = {
1588 .name = DRIVER_NAME,
1589 .of_match_table = lpi2c_imx_of_match,
1590 .pm = &lpi2c_pm_ops,
1591 },
1592 };
1593
1594 module_platform_driver(lpi2c_imx_driver);
1595
1596 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1597 MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
1598 MODULE_LICENSE("GPL");
1599