1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11
12 #define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
13
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
17 #include <linux/export.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/i2c.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/reset.h>
27
28 #include "i2c-designware-core.h"
29
30 #define AMD_TIMEOUT_MIN_US 25
31 #define AMD_TIMEOUT_MAX_US 250
32 #define AMD_MASTERCFG_MASK GENMASK(15, 0)
33
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)34 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
35 {
36 unsigned int comp_param1;
37 u32 sda_falling_time, scl_falling_time;
38 struct i2c_timings *t = &dev->timings;
39 const char *fp_str = "";
40 u32 ic_clk;
41 int ret;
42
43 ret = i2c_dw_acquire_lock(dev);
44 if (ret)
45 return ret;
46
47 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
48 i2c_dw_release_lock(dev);
49 if (ret)
50 return ret;
51
52 /* Set standard and fast speed dividers for high/low periods */
53 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
54 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
55
56 /* Calculate SCL timing parameters for standard mode if not set */
57 if (!dev->ss_hcnt || !dev->ss_lcnt) {
58 ic_clk = i2c_dw_clk_rate(dev);
59 dev->ss_hcnt =
60 i2c_dw_scl_hcnt(dev,
61 DW_IC_SS_SCL_HCNT,
62 ic_clk,
63 4000, /* tHD;STA = tHIGH = 4.0 us */
64 sda_falling_time,
65 0); /* No offset */
66 dev->ss_lcnt =
67 i2c_dw_scl_lcnt(dev,
68 DW_IC_SS_SCL_LCNT,
69 ic_clk,
70 4700, /* tLOW = 4.7 us */
71 scl_falling_time,
72 0); /* No offset */
73 }
74 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
75 dev->ss_hcnt, dev->ss_lcnt);
76
77 /*
78 * Set SCL timing parameters for fast mode or fast mode plus. Only
79 * difference is the timing parameter values since the registers are
80 * the same.
81 */
82 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
83 /*
84 * Check are Fast Mode Plus parameters available. Calculate
85 * SCL timing parameters for Fast Mode Plus if not set.
86 */
87 if (dev->fp_hcnt && dev->fp_lcnt) {
88 dev->fs_hcnt = dev->fp_hcnt;
89 dev->fs_lcnt = dev->fp_lcnt;
90 } else {
91 ic_clk = i2c_dw_clk_rate(dev);
92 dev->fs_hcnt =
93 i2c_dw_scl_hcnt(dev,
94 DW_IC_FS_SCL_HCNT,
95 ic_clk,
96 260, /* tHIGH = 260 ns */
97 sda_falling_time,
98 0); /* No offset */
99 dev->fs_lcnt =
100 i2c_dw_scl_lcnt(dev,
101 DW_IC_FS_SCL_LCNT,
102 ic_clk,
103 500, /* tLOW = 500 ns */
104 scl_falling_time,
105 0); /* No offset */
106 }
107 fp_str = " Plus";
108 }
109 /*
110 * Calculate SCL timing parameters for fast mode if not set. They are
111 * needed also in high speed mode.
112 */
113 if (!dev->fs_hcnt || !dev->fs_lcnt) {
114 ic_clk = i2c_dw_clk_rate(dev);
115 dev->fs_hcnt =
116 i2c_dw_scl_hcnt(dev,
117 DW_IC_FS_SCL_HCNT,
118 ic_clk,
119 600, /* tHD;STA = tHIGH = 0.6 us */
120 sda_falling_time,
121 0); /* No offset */
122 dev->fs_lcnt =
123 i2c_dw_scl_lcnt(dev,
124 DW_IC_FS_SCL_LCNT,
125 ic_clk,
126 1300, /* tLOW = 1.3 us */
127 scl_falling_time,
128 0); /* No offset */
129 }
130 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
131 fp_str, dev->fs_hcnt, dev->fs_lcnt);
132
133 /* Check is high speed possible and fall back to fast mode if not */
134 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
135 DW_IC_CON_SPEED_HIGH) {
136 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
137 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
138 dev_err(dev->dev, "High Speed not supported!\n");
139 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
140 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
141 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
142 dev->hs_hcnt = 0;
143 dev->hs_lcnt = 0;
144 } else if (!dev->hs_hcnt || !dev->hs_lcnt) {
145 u32 t_high, t_low;
146
147 /*
148 * The legal values stated in the databook for bus
149 * capacitance are only 100pF and 400pF.
150 * If dev->bus_capacitance_pF is greater than or equals
151 * to 400, t_high and t_low are assumed to be
152 * appropriate values for 400pF, otherwise 100pF.
153 */
154 if (dev->bus_capacitance_pF >= 400) {
155 /* assume bus capacitance is 400pF */
156 t_high = dev->clk_freq_optimized ? 160 : 120;
157 t_low = 320;
158 } else {
159 /* assume bus capacitance is 100pF */
160 t_high = 60;
161 t_low = dev->clk_freq_optimized ? 120 : 160;
162 }
163
164 ic_clk = i2c_dw_clk_rate(dev);
165 dev->hs_hcnt =
166 i2c_dw_scl_hcnt(dev,
167 DW_IC_HS_SCL_HCNT,
168 ic_clk,
169 t_high,
170 sda_falling_time,
171 0); /* No offset */
172 dev->hs_lcnt =
173 i2c_dw_scl_lcnt(dev,
174 DW_IC_HS_SCL_LCNT,
175 ic_clk,
176 t_low,
177 scl_falling_time,
178 0); /* No offset */
179 }
180 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
181 dev->hs_hcnt, dev->hs_lcnt);
182 }
183
184 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
185 return 0;
186 }
187
i2c_dw_xfer_init(struct dw_i2c_dev * dev)188 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
189 {
190 struct i2c_msg *msgs = dev->msgs;
191 u32 ic_con = 0, ic_tar = 0;
192 unsigned int dummy;
193
194 /* Disable the adapter */
195 __i2c_dw_disable(dev);
196
197 i2c_dw_set_mode(dev, DW_IC_MASTER);
198
199 /* If the slave address is ten bit address, enable 10BITADDR */
200 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
201 ic_con = DW_IC_CON_10BITADDR_MASTER;
202 /*
203 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
204 * mode has to be enabled via bit 12 of IC_TAR register.
205 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
206 * detected from registers.
207 */
208 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
209 }
210
211 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
212 ic_con);
213
214 /*
215 * Set the slave (target) address and enable 10-bit addressing mode
216 * if applicable.
217 */
218 regmap_write(dev->map, DW_IC_TAR,
219 msgs[dev->msg_write_idx].addr | ic_tar);
220
221 /* Enforce disabled interrupts (due to HW issues) */
222 __i2c_dw_write_intr_mask(dev, 0);
223
224 /* Enable the adapter */
225 __i2c_dw_enable(dev);
226
227 /* Dummy read to avoid the register getting stuck on Bay Trail */
228 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
229
230 /* Clear and enable interrupts */
231 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
232 __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
233 }
234
235 /*
236 * This function waits for the controller to be idle before disabling I2C
237 * When the controller is not in the IDLE state, the MST_ACTIVITY bit
238 * (IC_STATUS[5]) is set.
239 *
240 * Values:
241 * 0x1 (ACTIVE): Controller not idle
242 * 0x0 (IDLE): Controller is idle
243 *
244 * The function is called after completing the current transfer.
245 *
246 * Returns:
247 * False when the controller is in the IDLE state.
248 * True when the controller is in the ACTIVE state.
249 */
i2c_dw_is_controller_active(struct dw_i2c_dev * dev)250 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
251 {
252 u32 status;
253
254 regmap_read(dev->map, DW_IC_STATUS, &status);
255 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
256 return false;
257
258 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
259 !(status & DW_IC_STATUS_MASTER_ACTIVITY),
260 1100, 20000) != 0;
261 }
262
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)263 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
264 {
265 u32 val;
266 int ret;
267
268 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
269 !(val & DW_IC_INTR_STOP_DET),
270 1100, 20000);
271 if (ret)
272 dev_err(dev->dev, "i2c timeout error %d\n", ret);
273
274 return ret;
275 }
276
i2c_dw_status(struct dw_i2c_dev * dev)277 static int i2c_dw_status(struct dw_i2c_dev *dev)
278 {
279 int status;
280
281 status = i2c_dw_wait_bus_not_busy(dev);
282 if (status)
283 return status;
284
285 return i2c_dw_check_stopbit(dev);
286 }
287
288 /*
289 * Initiate and continue master read/write transaction with polling
290 * based transfer routine afterward write messages into the Tx buffer.
291 */
amd_i2c_dw_xfer_quirk(struct dw_i2c_dev * dev,struct i2c_msg * msgs,int num_msgs)292 static int amd_i2c_dw_xfer_quirk(struct dw_i2c_dev *dev, struct i2c_msg *msgs, int num_msgs)
293 {
294 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
295 int cmd = 0, status;
296 u8 *tx_buf;
297 unsigned int val;
298
299 PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev->dev, pm);
300 if (PM_RUNTIME_ACQUIRE_ERR(&pm))
301 return -ENXIO;
302
303 /*
304 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
305 * it is mandatory to set the right value in specific register
306 * (offset:0x474) as per the hardware IP specification.
307 */
308 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
309
310 dev->msgs = msgs;
311 dev->msgs_num = num_msgs;
312 dev->msg_write_idx = 0;
313 i2c_dw_xfer_init(dev);
314
315 /* Initiate messages read/write transaction */
316 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
317 tx_buf = msgs[msg_wrt_idx].buf;
318 buf_len = msgs[msg_wrt_idx].len;
319
320 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
321 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
322 /*
323 * Initiate the i2c read/write transaction of buffer length,
324 * and poll for bus busy status. For the last message transfer,
325 * update the command with stop bit enable.
326 */
327 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
328 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
329 cmd |= BIT(9);
330
331 if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
332 /* Due to hardware bug, need to write the same command twice. */
333 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
334 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
335 if (cmd) {
336 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
337 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
338 /*
339 * Need to check the stop bit. However, it cannot be
340 * detected from the registers so we check it always
341 * when read/write the last byte.
342 */
343 status = i2c_dw_status(dev);
344 if (status)
345 return status;
346
347 for (data_idx = 0; data_idx < buf_len; data_idx++) {
348 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
349 tx_buf[data_idx] = val;
350 }
351 status = i2c_dw_check_stopbit(dev);
352 if (status)
353 return status;
354 }
355 } else {
356 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
357 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
358 }
359 }
360 status = i2c_dw_check_stopbit(dev);
361 if (status)
362 return status;
363 }
364
365 return 0;
366 }
367
368 /*
369 * Initiate (and continue) low level master read/write transaction.
370 * This function is only called from i2c_dw_isr(), and pumping i2c_msg
371 * messages into the tx buffer. Even if the size of i2c_msg data is
372 * longer than the size of the tx buffer, it handles everything.
373 */
374 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)375 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
376 {
377 struct i2c_msg *msgs = dev->msgs;
378 u32 intr_mask;
379 int tx_limit, rx_limit;
380 u32 buf_len = dev->tx_buf_len;
381 u8 *buf = dev->tx_buf;
382 bool need_restart = false;
383 unsigned int flr;
384
385 intr_mask = DW_IC_INTR_MASTER_MASK;
386
387 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
388 u32 flags = msgs[dev->msg_write_idx].flags;
389
390 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
391 /* new i2c_msg */
392 buf = msgs[dev->msg_write_idx].buf;
393 buf_len = msgs[dev->msg_write_idx].len;
394
395 /*
396 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and
397 * IC_RESTART_EN are set, we must manually
398 * set restart bit between messages.
399 */
400 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
401 (dev->msg_write_idx > 0))
402 need_restart = true;
403 }
404
405 regmap_read(dev->map, DW_IC_TXFLR, &flr);
406 tx_limit = dev->tx_fifo_depth - flr;
407
408 regmap_read(dev->map, DW_IC_RXFLR, &flr);
409 rx_limit = dev->rx_fifo_depth - flr;
410
411 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
412 u32 cmd = 0;
413
414 /*
415 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
416 * manually set the stop bit. However, it cannot be
417 * detected from the registers so we set it always
418 * when writing/reading the last byte.
419 */
420
421 /*
422 * i2c-core always sets the buffer length of
423 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
424 * be adjusted when receiving the first byte.
425 * Thus we can't stop the transaction here.
426 */
427 if (dev->msg_write_idx == dev->msgs_num - 1 &&
428 buf_len == 1 && !(flags & I2C_M_RECV_LEN))
429 cmd |= BIT(9);
430
431 if (need_restart) {
432 cmd |= BIT(10);
433 need_restart = false;
434 }
435
436 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
437
438 /* Avoid rx buffer overrun */
439 if (dev->rx_outstanding >= dev->rx_fifo_depth)
440 break;
441
442 regmap_write(dev->map, DW_IC_DATA_CMD,
443 cmd | 0x100);
444 rx_limit--;
445 dev->rx_outstanding++;
446 } else {
447 regmap_write(dev->map, DW_IC_DATA_CMD,
448 cmd | *buf++);
449 }
450 tx_limit--; buf_len--;
451 }
452
453 dev->tx_buf = buf;
454 dev->tx_buf_len = buf_len;
455
456 /*
457 * Because we don't know the buffer length in the
458 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
459 * transaction here. Also disable the TX_EMPTY IRQ
460 * while waiting for the data length byte to avoid the
461 * bogus interrupts flood.
462 */
463 if (flags & I2C_M_RECV_LEN) {
464 dev->status |= STATUS_WRITE_IN_PROGRESS;
465 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
466 break;
467 } else if (buf_len > 0) {
468 /* more bytes to be written */
469 dev->status |= STATUS_WRITE_IN_PROGRESS;
470 break;
471 } else
472 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
473 }
474
475 /*
476 * If i2c_msg index search is completed, we don't need TX_EMPTY
477 * interrupt any more.
478 */
479 if (dev->msg_write_idx == dev->msgs_num)
480 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
481
482 if (dev->msg_err)
483 intr_mask = 0;
484
485 __i2c_dw_write_intr_mask(dev, intr_mask);
486 }
487
488 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)489 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
490 {
491 struct i2c_msg *msgs = dev->msgs;
492 u32 flags = msgs[dev->msg_read_idx].flags;
493 unsigned int intr_mask;
494
495 /*
496 * Adjust the buffer length and mask the flag
497 * after receiving the first byte.
498 */
499 len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
500 dev->tx_buf_len = len - min(len, dev->rx_outstanding);
501 msgs[dev->msg_read_idx].len = len;
502 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
503
504 /*
505 * Received buffer length, re-enable TX_EMPTY interrupt
506 * to resume the SMBUS transaction.
507 */
508 __i2c_dw_read_intr_mask(dev, &intr_mask);
509 intr_mask |= DW_IC_INTR_TX_EMPTY;
510 __i2c_dw_write_intr_mask(dev, intr_mask);
511
512 return len;
513 }
514
515 static void
i2c_dw_read(struct dw_i2c_dev * dev)516 i2c_dw_read(struct dw_i2c_dev *dev)
517 {
518 struct i2c_msg *msgs = dev->msgs;
519 unsigned int rx_valid;
520
521 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
522 u32 flags = msgs[dev->msg_read_idx].flags;
523 unsigned int tmp;
524 u32 len;
525 u8 *buf;
526
527 if (!(flags & I2C_M_RD))
528 continue;
529
530 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
531 len = msgs[dev->msg_read_idx].len;
532 buf = msgs[dev->msg_read_idx].buf;
533 } else {
534 len = dev->rx_buf_len;
535 buf = dev->rx_buf;
536 }
537
538 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
539
540 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
541 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
542 tmp &= DW_IC_DATA_CMD_DAT;
543 /* Ensure length byte is a valid value */
544 if (flags & I2C_M_RECV_LEN) {
545 /*
546 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
547 * detected from the registers, the controller can be
548 * disabled if the STOP bit is set. But it is only set
549 * after receiving block data response length in
550 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
551 * another byte with STOP bit set when the block data
552 * response length is invalid to complete the transaction.
553 */
554 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
555 tmp = 1;
556
557 len = i2c_dw_recv_len(dev, tmp);
558 }
559 *buf++ = tmp;
560 dev->rx_outstanding--;
561 }
562
563 if (len > 0) {
564 dev->status |= STATUS_READ_IN_PROGRESS;
565 dev->rx_buf_len = len;
566 dev->rx_buf = buf;
567 return;
568 } else
569 dev->status &= ~STATUS_READ_IN_PROGRESS;
570 }
571 }
572
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)573 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
574 {
575 unsigned int stat, dummy;
576
577 /*
578 * The IC_INTR_STAT register just indicates "enabled" interrupts.
579 * The unmasked raw version of interrupt status bits is available
580 * in the IC_RAW_INTR_STAT register.
581 *
582 * That is,
583 * stat = readl(IC_INTR_STAT);
584 * equals to,
585 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
586 *
587 * The raw version might be useful for debugging purposes.
588 */
589 if (!(dev->flags & ACCESS_POLLING)) {
590 regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
591 } else {
592 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
593 stat &= dev->sw_mask;
594 }
595
596 /*
597 * Do not use the IC_CLR_INTR register to clear interrupts, or
598 * you'll miss some interrupts, triggered during the period from
599 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
600 *
601 * Instead, use the separately-prepared IC_CLR_* registers.
602 */
603 if (stat & DW_IC_INTR_RX_UNDER)
604 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
605 if (stat & DW_IC_INTR_RX_OVER)
606 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
607 if (stat & DW_IC_INTR_TX_OVER)
608 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
609 if (stat & DW_IC_INTR_RD_REQ)
610 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
611 if (stat & DW_IC_INTR_TX_ABRT) {
612 /*
613 * The IC_TX_ABRT_SOURCE register is cleared whenever
614 * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
615 */
616 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
617 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
618 }
619 if (stat & DW_IC_INTR_RX_DONE)
620 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
621 if (stat & DW_IC_INTR_ACTIVITY)
622 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
623 if ((stat & DW_IC_INTR_STOP_DET) &&
624 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
625 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
626 if (stat & DW_IC_INTR_START_DET)
627 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
628 if (stat & DW_IC_INTR_GEN_CALL)
629 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
630
631 return stat;
632 }
633
i2c_dw_process_transfer(struct dw_i2c_dev * dev,unsigned int stat)634 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
635 {
636 if (stat & DW_IC_INTR_TX_ABRT) {
637 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
638 dev->status &= ~STATUS_MASK;
639 dev->rx_outstanding = 0;
640
641 /*
642 * Anytime TX_ABRT is set, the contents of the tx/rx
643 * buffers are flushed. Make sure to skip them.
644 */
645 __i2c_dw_write_intr_mask(dev, 0);
646 goto tx_aborted;
647 }
648
649 if (stat & DW_IC_INTR_RX_FULL)
650 i2c_dw_read(dev);
651
652 if (stat & DW_IC_INTR_TX_EMPTY)
653 i2c_dw_xfer_msg(dev);
654
655 /* Abort if we detect a STOP in the middle of a read or a write */
656 if ((stat & DW_IC_INTR_STOP_DET) &&
657 (dev->status & (STATUS_READ_IN_PROGRESS | STATUS_WRITE_IN_PROGRESS))) {
658 dev_err(dev->dev, "spurious STOP detected\n");
659 dev->rx_outstanding = 0;
660 dev->msg_err = -EIO;
661 }
662
663 /*
664 * No need to modify or disable the interrupt mask here.
665 * i2c_dw_xfer_msg() will take care of it according to
666 * the current transmit status.
667 */
668
669 tx_aborted:
670 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
671 (dev->rx_outstanding == 0))
672 complete(&dev->cmd_complete);
673 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
674 /* Workaround to trigger pending interrupt */
675 __i2c_dw_read_intr_mask(dev, &stat);
676 __i2c_dw_write_intr_mask(dev, 0);
677 __i2c_dw_write_intr_mask(dev, stat);
678 }
679 }
680
681 /*
682 * Interrupt service routine. This gets called whenever an I2C master interrupt
683 * occurs.
684 */
i2c_dw_isr_master(struct dw_i2c_dev * dev)685 irqreturn_t i2c_dw_isr_master(struct dw_i2c_dev *dev)
686 {
687 unsigned int stat, enabled;
688
689 regmap_read(dev->map, DW_IC_ENABLE, &enabled);
690 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
691 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
692 return IRQ_NONE;
693 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
694 return IRQ_NONE;
695 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
696
697 stat = i2c_dw_read_clear_intrbits(dev);
698
699 if (!(dev->status & STATUS_ACTIVE)) {
700 /*
701 * Unexpected interrupt in driver point of view. State
702 * variables are either unset or stale so acknowledge and
703 * disable interrupts for suppressing further interrupts if
704 * interrupt really came from this HW (E.g. firmware has left
705 * the HW active).
706 */
707 __i2c_dw_write_intr_mask(dev, 0);
708 return IRQ_HANDLED;
709 }
710
711 i2c_dw_process_transfer(dev, stat);
712
713 return IRQ_HANDLED;
714 }
715
i2c_dw_wait_transfer(struct dw_i2c_dev * dev)716 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
717 {
718 unsigned long timeout = dev->adapter.timeout;
719 unsigned int stat;
720 int ret;
721
722 if (!(dev->flags & ACCESS_POLLING)) {
723 ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
724 } else {
725 timeout += jiffies;
726 do {
727 ret = try_wait_for_completion(&dev->cmd_complete);
728 if (ret)
729 break;
730
731 stat = i2c_dw_read_clear_intrbits(dev);
732 if (stat)
733 i2c_dw_process_transfer(dev, stat);
734 else
735 /* Try save some power */
736 usleep_range(3, 25);
737 } while (time_before(jiffies, timeout));
738 }
739
740 return ret ? 0 : -ETIMEDOUT;
741 }
742
743 /*
744 * Prepare controller for a transaction, start the transfer of the @msgs
745 * and wait for completion, either a STOP or a error.
746 * Return: 0 or a negative error code.
747 */
748 static int
__i2c_dw_xfer_one_part(struct dw_i2c_dev * dev,struct i2c_msg * msgs,size_t num)749 __i2c_dw_xfer_one_part(struct dw_i2c_dev *dev, struct i2c_msg *msgs, size_t num)
750 {
751 int ret;
752
753 reinit_completion(&dev->cmd_complete);
754 dev->msgs = msgs;
755 dev->msgs_num = num;
756 dev->cmd_err = 0;
757 dev->msg_write_idx = 0;
758 dev->msg_read_idx = 0;
759 dev->msg_err = 0;
760 dev->status = 0;
761 dev->abort_source = 0;
762 dev->rx_outstanding = 0;
763
764 ret = i2c_dw_wait_bus_not_busy(dev);
765 if (ret < 0)
766 return ret;
767
768 /* Start the transfers */
769 i2c_dw_xfer_init(dev);
770
771 /* Wait for tx to complete */
772 ret = i2c_dw_wait_transfer(dev);
773 if (ret) {
774 dev_err(dev->dev, "controller timed out\n");
775 /* i2c_dw_init() implicitly disables the adapter */
776 i2c_recover_bus(&dev->adapter);
777 i2c_dw_init(dev);
778 return ret;
779 }
780
781 /*
782 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
783 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
784 * if disable IC_ENABLE.ENABLE immediately that can result in
785 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
786 * controller is still ACTIVE before disabling I2C.
787 */
788 if (i2c_dw_is_controller_active(dev))
789 dev_err(dev->dev, "controller active\n");
790
791 /*
792 * We must disable the adapter before returning and signaling the end
793 * of the current transfer. Otherwise the hardware might continue
794 * generating interrupts which in turn causes a race condition with
795 * the following transfer. Needs some more investigation if the
796 * additional interrupts are a hardware bug or this driver doesn't
797 * handle them correctly yet.
798 */
799 __i2c_dw_disable_nowait(dev);
800
801 if (dev->msg_err)
802 return dev->msg_err;
803
804 /* No error */
805 if (likely(!dev->cmd_err && !dev->status))
806 return 0;
807
808 /* We have an error */
809 if (dev->cmd_err == DW_IC_ERR_TX_ABRT)
810 return i2c_dw_handle_tx_abort(dev);
811
812 if (dev->status)
813 dev_err(dev->dev,
814 "transfer terminated early - interrupt latency too high?\n");
815
816 return -EIO;
817 }
818
819 /*
820 * Verify that the message at index @idx can be processed as part
821 * of a single transaction. The @msgs array contains the messages
822 * of the transaction. The message is checked against its predecessor
823 * to ensure that it respects the limitation of the controller.
824 * Return: true if the message can be processed, false otherwise.
825 */
826 static bool
i2c_dw_msg_is_valid(struct dw_i2c_dev * dev,const struct i2c_msg * msgs,size_t idx)827 i2c_dw_msg_is_valid(struct dw_i2c_dev *dev, const struct i2c_msg *msgs, size_t idx)
828 {
829 /*
830 * The first message of a transaction is valid,
831 * no constraints from a previous message.
832 */
833 if (!idx)
834 return true;
835
836 /*
837 * We cannot change the target address during a transaction, so make
838 * sure the address is identical to the one of the previous message.
839 */
840 if (msgs[idx - 1].addr != msgs[idx].addr) {
841 dev_err(dev->dev, "invalid target address\n");
842 return false;
843 }
844
845 /*
846 * Make sure we don't need explicit RESTART between two messages
847 * in the same direction for controllers that cannot emit them.
848 */
849 if (!dev->emptyfifo_hold_master &&
850 (msgs[idx - 1].flags & I2C_M_RD) == (msgs[idx].flags & I2C_M_RD)) {
851 dev_err(dev->dev, "cannot emit RESTART\n");
852 return false;
853 }
854
855 return true;
856 }
857
858 static int
i2c_dw_xfer_common(struct dw_i2c_dev * dev,struct i2c_msg msgs[],int num)859 i2c_dw_xfer_common(struct dw_i2c_dev *dev, struct i2c_msg msgs[], int num)
860 {
861 struct i2c_msg *msgs_part;
862 size_t cnt;
863 int ret;
864
865 dev_dbg(dev->dev, "msgs: %d\n", num);
866
867 PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev->dev, pm);
868 if (PM_RUNTIME_ACQUIRE_ERR(&pm))
869 return -ENXIO;
870
871 ret = i2c_dw_acquire_lock(dev);
872 if (ret)
873 return ret;
874
875 /*
876 * If the I2C_M_STOP is present in some the messages,
877 * we do one transaction for each part up to the STOP.
878 */
879 for (msgs_part = msgs; msgs_part < msgs + num; msgs_part += cnt) {
880 /*
881 * Count the messages in a transaction, up to a STOP or
882 * the end of the msgs. The last if below guarantees that
883 * we check all messages and that msg_parts and cnt are
884 * in-bounds of msgs and num.
885 */
886 for (cnt = 1; ; cnt++) {
887 if (!i2c_dw_msg_is_valid(dev, msgs_part, cnt - 1)) {
888 ret = -EINVAL;
889 break;
890 }
891
892 if ((msgs_part[cnt - 1].flags & I2C_M_STOP) ||
893 (msgs_part + cnt == msgs + num))
894 break;
895 }
896 if (ret < 0)
897 break;
898
899 /* transfer one part up to a STOP */
900 ret = __i2c_dw_xfer_one_part(dev, msgs_part, cnt);
901 if (ret < 0)
902 break;
903 }
904
905 i2c_dw_set_mode(dev, DW_IC_SLAVE);
906
907 i2c_dw_release_lock(dev);
908
909 if (ret < 0)
910 return ret;
911 return num;
912 }
913
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)914 int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
915 {
916 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
917
918 if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
919 return amd_i2c_dw_xfer_quirk(dev, msgs, num);
920
921 return i2c_dw_xfer_common(dev, msgs, num);
922 }
923
i2c_dw_configure_master(struct dw_i2c_dev * dev)924 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
925 {
926 struct i2c_timings *t = &dev->timings;
927
928 dev->functionality |= I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
929
930 /* amd_i2c_dw_xfer_quirk() does not implement protocol mangling */
931 if ((dev->flags & MODEL_MASK) != MODEL_AMD_NAVI_GPU)
932 dev->functionality |= I2C_FUNC_PROTOCOL_MANGLING;
933
934 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
935 DW_IC_CON_RESTART_EN;
936
937 dev->mode = DW_IC_MASTER;
938
939 switch (t->bus_freq_hz) {
940 case I2C_MAX_STANDARD_MODE_FREQ:
941 dev->master_cfg |= DW_IC_CON_SPEED_STD;
942 break;
943 case I2C_MAX_HIGH_SPEED_MODE_FREQ:
944 dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
945 break;
946 default:
947 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
948 }
949 }
950 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
951
i2c_dw_prepare_recovery(struct i2c_adapter * adap)952 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
953 {
954 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
955
956 i2c_dw_disable(dev);
957 reset_control_assert(dev->rst);
958 i2c_dw_prepare_clk(dev, false);
959 }
960
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)961 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
962 {
963 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
964
965 i2c_dw_prepare_clk(dev, true);
966 reset_control_deassert(dev->rst);
967 i2c_dw_init(dev);
968 }
969
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)970 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
971 {
972 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
973 struct i2c_adapter *adap = &dev->adapter;
974 struct gpio_desc *gpio;
975
976 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
977 if (IS_ERR_OR_NULL(gpio))
978 return PTR_ERR_OR_ZERO(gpio);
979
980 rinfo->scl_gpiod = gpio;
981
982 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
983 if (IS_ERR(gpio))
984 return PTR_ERR(gpio);
985 rinfo->sda_gpiod = gpio;
986
987 rinfo->pinctrl = devm_pinctrl_get(dev->dev);
988 if (IS_ERR(rinfo->pinctrl)) {
989 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
990 return PTR_ERR(rinfo->pinctrl);
991
992 rinfo->pinctrl = NULL;
993 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
994 } else if (!rinfo->pinctrl) {
995 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
996 }
997
998 rinfo->recover_bus = i2c_generic_scl_recovery;
999 rinfo->prepare_recovery = i2c_dw_prepare_recovery;
1000 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
1001 adap->bus_recovery_info = rinfo;
1002
1003 dev_info(dev->dev, "running with GPIO recovery mode! scl%s",
1004 rinfo->sda_gpiod ? ",sda" : "");
1005
1006 return 0;
1007 }
1008
i2c_dw_probe_master(struct dw_i2c_dev * dev)1009 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
1010 {
1011 unsigned int ic_con;
1012 int ret;
1013
1014 init_completion(&dev->cmd_complete);
1015
1016 ret = i2c_dw_set_timings_master(dev);
1017 if (ret)
1018 return ret;
1019
1020 /* Lock the bus for accessing DW_IC_CON */
1021 ret = i2c_dw_acquire_lock(dev);
1022 if (ret)
1023 return ret;
1024
1025 /*
1026 * On AMD platforms BIOS advertises the bus clear feature
1027 * and enables the SCL/SDA stuck low. SMU FW does the
1028 * bus recovery process. Driver should not ignore this BIOS
1029 * advertisement of bus clear feature.
1030 */
1031 ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1032 i2c_dw_release_lock(dev);
1033 if (ret)
1034 return ret;
1035
1036 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1037 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1038
1039 return i2c_dw_init_recovery_info(dev);
1040 }
1041
1042 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1043 MODULE_LICENSE("GPL");
1044 MODULE_IMPORT_NS("I2C_DW_COMMON");
1045