1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11
12 #define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
13
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
17 #include <linux/export.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/i2c.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/reset.h>
27
28 #include "i2c-designware-core.h"
29
30 #define AMD_TIMEOUT_MIN_US 25
31 #define AMD_TIMEOUT_MAX_US 250
32 #define AMD_MASTERCFG_MASK GENMASK(15, 0)
33
i2c_dw_configure_fifo_master(struct dw_i2c_dev * dev)34 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
35 {
36 /* Configure Tx/Rx FIFO threshold levels */
37 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
38 regmap_write(dev->map, DW_IC_RX_TL, 0);
39
40 /* Configure the I2C master */
41 regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
42 }
43
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)44 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
45 {
46 unsigned int comp_param1;
47 u32 sda_falling_time, scl_falling_time;
48 struct i2c_timings *t = &dev->timings;
49 const char *fp_str = "";
50 u32 ic_clk;
51 int ret;
52
53 ret = i2c_dw_acquire_lock(dev);
54 if (ret)
55 return ret;
56
57 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
58 i2c_dw_release_lock(dev);
59 if (ret)
60 return ret;
61
62 /* Set standard and fast speed dividers for high/low periods */
63 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
64 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
65
66 /* Calculate SCL timing parameters for standard mode if not set */
67 if (!dev->ss_hcnt || !dev->ss_lcnt) {
68 ic_clk = i2c_dw_clk_rate(dev);
69 dev->ss_hcnt =
70 i2c_dw_scl_hcnt(dev,
71 DW_IC_SS_SCL_HCNT,
72 ic_clk,
73 4000, /* tHD;STA = tHIGH = 4.0 us */
74 sda_falling_time,
75 0); /* No offset */
76 dev->ss_lcnt =
77 i2c_dw_scl_lcnt(dev,
78 DW_IC_SS_SCL_LCNT,
79 ic_clk,
80 4700, /* tLOW = 4.7 us */
81 scl_falling_time,
82 0); /* No offset */
83 }
84 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
85 dev->ss_hcnt, dev->ss_lcnt);
86
87 /*
88 * Set SCL timing parameters for fast mode or fast mode plus. Only
89 * difference is the timing parameter values since the registers are
90 * the same.
91 */
92 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
93 /*
94 * Check are Fast Mode Plus parameters available. Calculate
95 * SCL timing parameters for Fast Mode Plus if not set.
96 */
97 if (dev->fp_hcnt && dev->fp_lcnt) {
98 dev->fs_hcnt = dev->fp_hcnt;
99 dev->fs_lcnt = dev->fp_lcnt;
100 } else {
101 ic_clk = i2c_dw_clk_rate(dev);
102 dev->fs_hcnt =
103 i2c_dw_scl_hcnt(dev,
104 DW_IC_FS_SCL_HCNT,
105 ic_clk,
106 260, /* tHIGH = 260 ns */
107 sda_falling_time,
108 0); /* No offset */
109 dev->fs_lcnt =
110 i2c_dw_scl_lcnt(dev,
111 DW_IC_FS_SCL_LCNT,
112 ic_clk,
113 500, /* tLOW = 500 ns */
114 scl_falling_time,
115 0); /* No offset */
116 }
117 fp_str = " Plus";
118 }
119 /*
120 * Calculate SCL timing parameters for fast mode if not set. They are
121 * needed also in high speed mode.
122 */
123 if (!dev->fs_hcnt || !dev->fs_lcnt) {
124 ic_clk = i2c_dw_clk_rate(dev);
125 dev->fs_hcnt =
126 i2c_dw_scl_hcnt(dev,
127 DW_IC_FS_SCL_HCNT,
128 ic_clk,
129 600, /* tHD;STA = tHIGH = 0.6 us */
130 sda_falling_time,
131 0); /* No offset */
132 dev->fs_lcnt =
133 i2c_dw_scl_lcnt(dev,
134 DW_IC_FS_SCL_LCNT,
135 ic_clk,
136 1300, /* tLOW = 1.3 us */
137 scl_falling_time,
138 0); /* No offset */
139 }
140 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
141 fp_str, dev->fs_hcnt, dev->fs_lcnt);
142
143 /* Check is high speed possible and fall back to fast mode if not */
144 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
145 DW_IC_CON_SPEED_HIGH) {
146 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
147 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
148 dev_err(dev->dev, "High Speed not supported!\n");
149 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
150 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
151 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
152 dev->hs_hcnt = 0;
153 dev->hs_lcnt = 0;
154 } else if (!dev->hs_hcnt || !dev->hs_lcnt) {
155 u32 t_high, t_low;
156
157 /*
158 * The legal values stated in the databook for bus
159 * capacitance are only 100pF and 400pF.
160 * If dev->bus_capacitance_pF is greater than or equals
161 * to 400, t_high and t_low are assumed to be
162 * appropriate values for 400pF, otherwise 100pF.
163 */
164 if (dev->bus_capacitance_pF >= 400) {
165 /* assume bus capacitance is 400pF */
166 t_high = dev->clk_freq_optimized ? 160 : 120;
167 t_low = 320;
168 } else {
169 /* assume bus capacitance is 100pF */
170 t_high = 60;
171 t_low = dev->clk_freq_optimized ? 120 : 160;
172 }
173
174 ic_clk = i2c_dw_clk_rate(dev);
175 dev->hs_hcnt =
176 i2c_dw_scl_hcnt(dev,
177 DW_IC_HS_SCL_HCNT,
178 ic_clk,
179 t_high,
180 sda_falling_time,
181 0); /* No offset */
182 dev->hs_lcnt =
183 i2c_dw_scl_lcnt(dev,
184 DW_IC_HS_SCL_LCNT,
185 ic_clk,
186 t_low,
187 scl_falling_time,
188 0); /* No offset */
189 }
190 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
191 dev->hs_hcnt, dev->hs_lcnt);
192 }
193
194 ret = i2c_dw_set_sda_hold(dev);
195 if (ret)
196 return ret;
197
198 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
199 return 0;
200 }
201
202 /**
203 * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware
204 * @dev: device private data
205 *
206 * This functions configures and enables the I2C master.
207 * This function is called during I2C init function, and in case of timeout at
208 * run time.
209 *
210 * Return: 0 on success, or negative errno otherwise.
211 */
i2c_dw_init_master(struct dw_i2c_dev * dev)212 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
213 {
214 int ret;
215
216 ret = i2c_dw_acquire_lock(dev);
217 if (ret)
218 return ret;
219
220 /* Disable the adapter */
221 __i2c_dw_disable(dev);
222
223 /* Write standard speed timing parameters */
224 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
225 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
226
227 /* Write fast mode/fast mode plus timing parameters */
228 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
229 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
230
231 /* Write high speed timing parameters if supported */
232 if (dev->hs_hcnt && dev->hs_lcnt) {
233 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
234 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
235 }
236
237 /* Write SDA hold time if supported */
238 if (dev->sda_hold_time)
239 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
240
241 i2c_dw_configure_fifo_master(dev);
242 i2c_dw_release_lock(dev);
243
244 return 0;
245 }
246
i2c_dw_xfer_init(struct dw_i2c_dev * dev)247 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
248 {
249 struct i2c_msg *msgs = dev->msgs;
250 u32 ic_con = 0, ic_tar = 0;
251 unsigned int dummy;
252
253 /* Disable the adapter */
254 __i2c_dw_disable(dev);
255
256 /* If the slave address is ten bit address, enable 10BITADDR */
257 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
258 ic_con = DW_IC_CON_10BITADDR_MASTER;
259 /*
260 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
261 * mode has to be enabled via bit 12 of IC_TAR register.
262 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
263 * detected from registers.
264 */
265 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
266 }
267
268 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
269 ic_con);
270
271 /*
272 * Set the slave (target) address and enable 10-bit addressing mode
273 * if applicable.
274 */
275 regmap_write(dev->map, DW_IC_TAR,
276 msgs[dev->msg_write_idx].addr | ic_tar);
277
278 /* Enforce disabled interrupts (due to HW issues) */
279 __i2c_dw_write_intr_mask(dev, 0);
280
281 /* Enable the adapter */
282 __i2c_dw_enable(dev);
283
284 /* Dummy read to avoid the register getting stuck on Bay Trail */
285 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
286
287 /* Clear and enable interrupts */
288 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
289 __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
290 }
291
292 /*
293 * This function waits for the controller to be idle before disabling I2C
294 * When the controller is not in the IDLE state, the MST_ACTIVITY bit
295 * (IC_STATUS[5]) is set.
296 *
297 * Values:
298 * 0x1 (ACTIVE): Controller not idle
299 * 0x0 (IDLE): Controller is idle
300 *
301 * The function is called after completing the current transfer.
302 *
303 * Returns:
304 * False when the controller is in the IDLE state.
305 * True when the controller is in the ACTIVE state.
306 */
i2c_dw_is_controller_active(struct dw_i2c_dev * dev)307 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
308 {
309 u32 status;
310
311 regmap_read(dev->map, DW_IC_STATUS, &status);
312 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
313 return false;
314
315 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
316 !(status & DW_IC_STATUS_MASTER_ACTIVITY),
317 1100, 20000) != 0;
318 }
319
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)320 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
321 {
322 u32 val;
323 int ret;
324
325 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
326 !(val & DW_IC_INTR_STOP_DET),
327 1100, 20000);
328 if (ret)
329 dev_err(dev->dev, "i2c timeout error %d\n", ret);
330
331 return ret;
332 }
333
i2c_dw_status(struct dw_i2c_dev * dev)334 static int i2c_dw_status(struct dw_i2c_dev *dev)
335 {
336 int status;
337
338 status = i2c_dw_wait_bus_not_busy(dev);
339 if (status)
340 return status;
341
342 return i2c_dw_check_stopbit(dev);
343 }
344
345 /*
346 * Initiate and continue master read/write transaction with polling
347 * based transfer routine afterward write messages into the Tx buffer.
348 */
amd_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)349 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
350 {
351 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
352 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
353 int cmd = 0, status;
354 u8 *tx_buf;
355 unsigned int val;
356
357 /*
358 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
359 * it is mandatory to set the right value in specific register
360 * (offset:0x474) as per the hardware IP specification.
361 */
362 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
363
364 dev->msgs = msgs;
365 dev->msgs_num = num_msgs;
366 dev->msg_write_idx = 0;
367 i2c_dw_xfer_init(dev);
368
369 /* Initiate messages read/write transaction */
370 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
371 tx_buf = msgs[msg_wrt_idx].buf;
372 buf_len = msgs[msg_wrt_idx].len;
373
374 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
375 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
376 /*
377 * Initiate the i2c read/write transaction of buffer length,
378 * and poll for bus busy status. For the last message transfer,
379 * update the command with stop bit enable.
380 */
381 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
382 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
383 cmd |= BIT(9);
384
385 if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
386 /* Due to hardware bug, need to write the same command twice. */
387 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
388 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
389 if (cmd) {
390 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
391 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
392 /*
393 * Need to check the stop bit. However, it cannot be
394 * detected from the registers so we check it always
395 * when read/write the last byte.
396 */
397 status = i2c_dw_status(dev);
398 if (status)
399 return status;
400
401 for (data_idx = 0; data_idx < buf_len; data_idx++) {
402 regmap_read(dev->map, DW_IC_DATA_CMD, &val);
403 tx_buf[data_idx] = val;
404 }
405 status = i2c_dw_check_stopbit(dev);
406 if (status)
407 return status;
408 }
409 } else {
410 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
411 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
412 }
413 }
414 status = i2c_dw_check_stopbit(dev);
415 if (status)
416 return status;
417 }
418
419 return 0;
420 }
421
422 /*
423 * Initiate (and continue) low level master read/write transaction.
424 * This function is only called from i2c_dw_isr(), and pumping i2c_msg
425 * messages into the tx buffer. Even if the size of i2c_msg data is
426 * longer than the size of the tx buffer, it handles everything.
427 */
428 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)429 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
430 {
431 struct i2c_msg *msgs = dev->msgs;
432 u32 intr_mask;
433 int tx_limit, rx_limit;
434 u32 addr = msgs[dev->msg_write_idx].addr;
435 u32 buf_len = dev->tx_buf_len;
436 u8 *buf = dev->tx_buf;
437 bool need_restart = false;
438 unsigned int flr;
439
440 intr_mask = DW_IC_INTR_MASTER_MASK;
441
442 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
443 u32 flags = msgs[dev->msg_write_idx].flags;
444
445 /*
446 * If target address has changed, we need to
447 * reprogram the target address in the I2C
448 * adapter when we are done with this transfer.
449 */
450 if (msgs[dev->msg_write_idx].addr != addr) {
451 dev_err(dev->dev,
452 "%s: invalid target address\n", __func__);
453 dev->msg_err = -EINVAL;
454 break;
455 }
456
457 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
458 /* new i2c_msg */
459 buf = msgs[dev->msg_write_idx].buf;
460 buf_len = msgs[dev->msg_write_idx].len;
461
462 /*
463 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and
464 * IC_RESTART_EN are set, we must manually
465 * set restart bit between messages.
466 */
467 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
468 (dev->msg_write_idx > 0))
469 need_restart = true;
470 }
471
472 regmap_read(dev->map, DW_IC_TXFLR, &flr);
473 tx_limit = dev->tx_fifo_depth - flr;
474
475 regmap_read(dev->map, DW_IC_RXFLR, &flr);
476 rx_limit = dev->rx_fifo_depth - flr;
477
478 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
479 u32 cmd = 0;
480
481 /*
482 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
483 * manually set the stop bit. However, it cannot be
484 * detected from the registers so we set it always
485 * when writing/reading the last byte.
486 */
487
488 /*
489 * i2c-core always sets the buffer length of
490 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
491 * be adjusted when receiving the first byte.
492 * Thus we can't stop the transaction here.
493 */
494 if (dev->msg_write_idx == dev->msgs_num - 1 &&
495 buf_len == 1 && !(flags & I2C_M_RECV_LEN))
496 cmd |= BIT(9);
497
498 if (need_restart) {
499 cmd |= BIT(10);
500 need_restart = false;
501 }
502
503 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
504
505 /* Avoid rx buffer overrun */
506 if (dev->rx_outstanding >= dev->rx_fifo_depth)
507 break;
508
509 regmap_write(dev->map, DW_IC_DATA_CMD,
510 cmd | 0x100);
511 rx_limit--;
512 dev->rx_outstanding++;
513 } else {
514 regmap_write(dev->map, DW_IC_DATA_CMD,
515 cmd | *buf++);
516 }
517 tx_limit--; buf_len--;
518 }
519
520 dev->tx_buf = buf;
521 dev->tx_buf_len = buf_len;
522
523 /*
524 * Because we don't know the buffer length in the
525 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
526 * transaction here. Also disable the TX_EMPTY IRQ
527 * while waiting for the data length byte to avoid the
528 * bogus interrupts flood.
529 */
530 if (flags & I2C_M_RECV_LEN) {
531 dev->status |= STATUS_WRITE_IN_PROGRESS;
532 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
533 break;
534 } else if (buf_len > 0) {
535 /* more bytes to be written */
536 dev->status |= STATUS_WRITE_IN_PROGRESS;
537 break;
538 } else
539 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
540 }
541
542 /*
543 * If i2c_msg index search is completed, we don't need TX_EMPTY
544 * interrupt any more.
545 */
546 if (dev->msg_write_idx == dev->msgs_num)
547 intr_mask &= ~DW_IC_INTR_TX_EMPTY;
548
549 if (dev->msg_err)
550 intr_mask = 0;
551
552 __i2c_dw_write_intr_mask(dev, intr_mask);
553 }
554
555 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)556 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
557 {
558 struct i2c_msg *msgs = dev->msgs;
559 u32 flags = msgs[dev->msg_read_idx].flags;
560 unsigned int intr_mask;
561
562 /*
563 * Adjust the buffer length and mask the flag
564 * after receiving the first byte.
565 */
566 len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
567 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
568 msgs[dev->msg_read_idx].len = len;
569 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
570
571 /*
572 * Received buffer length, re-enable TX_EMPTY interrupt
573 * to resume the SMBUS transaction.
574 */
575 __i2c_dw_read_intr_mask(dev, &intr_mask);
576 intr_mask |= DW_IC_INTR_TX_EMPTY;
577 __i2c_dw_write_intr_mask(dev, intr_mask);
578
579 return len;
580 }
581
582 static void
i2c_dw_read(struct dw_i2c_dev * dev)583 i2c_dw_read(struct dw_i2c_dev *dev)
584 {
585 struct i2c_msg *msgs = dev->msgs;
586 unsigned int rx_valid;
587
588 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
589 unsigned int tmp;
590 u32 len;
591 u8 *buf;
592
593 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
594 continue;
595
596 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
597 len = msgs[dev->msg_read_idx].len;
598 buf = msgs[dev->msg_read_idx].buf;
599 } else {
600 len = dev->rx_buf_len;
601 buf = dev->rx_buf;
602 }
603
604 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
605
606 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
607 u32 flags = msgs[dev->msg_read_idx].flags;
608
609 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
610 tmp &= DW_IC_DATA_CMD_DAT;
611 /* Ensure length byte is a valid value */
612 if (flags & I2C_M_RECV_LEN) {
613 /*
614 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
615 * detected from the registers, the controller can be
616 * disabled if the STOP bit is set. But it is only set
617 * after receiving block data response length in
618 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
619 * another byte with STOP bit set when the block data
620 * response length is invalid to complete the transaction.
621 */
622 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
623 tmp = 1;
624
625 len = i2c_dw_recv_len(dev, tmp);
626 }
627 *buf++ = tmp;
628 dev->rx_outstanding--;
629 }
630
631 if (len > 0) {
632 dev->status |= STATUS_READ_IN_PROGRESS;
633 dev->rx_buf_len = len;
634 dev->rx_buf = buf;
635 return;
636 } else
637 dev->status &= ~STATUS_READ_IN_PROGRESS;
638 }
639 }
640
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)641 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
642 {
643 unsigned int stat, dummy;
644
645 /*
646 * The IC_INTR_STAT register just indicates "enabled" interrupts.
647 * The unmasked raw version of interrupt status bits is available
648 * in the IC_RAW_INTR_STAT register.
649 *
650 * That is,
651 * stat = readl(IC_INTR_STAT);
652 * equals to,
653 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
654 *
655 * The raw version might be useful for debugging purposes.
656 */
657 if (!(dev->flags & ACCESS_POLLING)) {
658 regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
659 } else {
660 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
661 stat &= dev->sw_mask;
662 }
663
664 /*
665 * Do not use the IC_CLR_INTR register to clear interrupts, or
666 * you'll miss some interrupts, triggered during the period from
667 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
668 *
669 * Instead, use the separately-prepared IC_CLR_* registers.
670 */
671 if (stat & DW_IC_INTR_RX_UNDER)
672 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
673 if (stat & DW_IC_INTR_RX_OVER)
674 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
675 if (stat & DW_IC_INTR_TX_OVER)
676 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
677 if (stat & DW_IC_INTR_RD_REQ)
678 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
679 if (stat & DW_IC_INTR_TX_ABRT) {
680 /*
681 * The IC_TX_ABRT_SOURCE register is cleared whenever
682 * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
683 */
684 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
685 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
686 }
687 if (stat & DW_IC_INTR_RX_DONE)
688 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
689 if (stat & DW_IC_INTR_ACTIVITY)
690 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
691 if ((stat & DW_IC_INTR_STOP_DET) &&
692 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
693 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
694 if (stat & DW_IC_INTR_START_DET)
695 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
696 if (stat & DW_IC_INTR_GEN_CALL)
697 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
698
699 return stat;
700 }
701
i2c_dw_process_transfer(struct dw_i2c_dev * dev,unsigned int stat)702 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
703 {
704 if (stat & DW_IC_INTR_TX_ABRT) {
705 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
706 dev->status &= ~STATUS_MASK;
707 dev->rx_outstanding = 0;
708
709 /*
710 * Anytime TX_ABRT is set, the contents of the tx/rx
711 * buffers are flushed. Make sure to skip them.
712 */
713 __i2c_dw_write_intr_mask(dev, 0);
714 goto tx_aborted;
715 }
716
717 if (stat & DW_IC_INTR_RX_FULL)
718 i2c_dw_read(dev);
719
720 if (stat & DW_IC_INTR_TX_EMPTY)
721 i2c_dw_xfer_msg(dev);
722
723 /*
724 * No need to modify or disable the interrupt mask here.
725 * i2c_dw_xfer_msg() will take care of it according to
726 * the current transmit status.
727 */
728
729 tx_aborted:
730 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
731 (dev->rx_outstanding == 0))
732 complete(&dev->cmd_complete);
733 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
734 /* Workaround to trigger pending interrupt */
735 __i2c_dw_read_intr_mask(dev, &stat);
736 __i2c_dw_write_intr_mask(dev, 0);
737 __i2c_dw_write_intr_mask(dev, stat);
738 }
739 }
740
741 /*
742 * Interrupt service routine. This gets called whenever an I2C master interrupt
743 * occurs.
744 */
i2c_dw_isr(int this_irq,void * dev_id)745 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
746 {
747 struct dw_i2c_dev *dev = dev_id;
748 unsigned int stat, enabled;
749
750 regmap_read(dev->map, DW_IC_ENABLE, &enabled);
751 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
752 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
753 return IRQ_NONE;
754 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
755 return IRQ_NONE;
756 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
757
758 stat = i2c_dw_read_clear_intrbits(dev);
759
760 if (!(dev->status & STATUS_ACTIVE)) {
761 /*
762 * Unexpected interrupt in driver point of view. State
763 * variables are either unset or stale so acknowledge and
764 * disable interrupts for suppressing further interrupts if
765 * interrupt really came from this HW (E.g. firmware has left
766 * the HW active).
767 */
768 __i2c_dw_write_intr_mask(dev, 0);
769 return IRQ_HANDLED;
770 }
771
772 i2c_dw_process_transfer(dev, stat);
773
774 return IRQ_HANDLED;
775 }
776
i2c_dw_wait_transfer(struct dw_i2c_dev * dev)777 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
778 {
779 unsigned long timeout = dev->adapter.timeout;
780 unsigned int stat;
781 int ret;
782
783 if (!(dev->flags & ACCESS_POLLING)) {
784 ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
785 } else {
786 timeout += jiffies;
787 do {
788 ret = try_wait_for_completion(&dev->cmd_complete);
789 if (ret)
790 break;
791
792 stat = i2c_dw_read_clear_intrbits(dev);
793 if (stat)
794 i2c_dw_process_transfer(dev, stat);
795 else
796 /* Try save some power */
797 usleep_range(3, 25);
798 } while (time_before(jiffies, timeout));
799 }
800
801 return ret ? 0 : -ETIMEDOUT;
802 }
803
804 /*
805 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
806 */
807 static int
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)808 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
809 {
810 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
811 int ret;
812
813 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
814
815 pm_runtime_get_sync(dev->dev);
816
817 switch (dev->flags & MODEL_MASK) {
818 case MODEL_AMD_NAVI_GPU:
819 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
820 goto done_nolock;
821 default:
822 break;
823 }
824
825 reinit_completion(&dev->cmd_complete);
826 dev->msgs = msgs;
827 dev->msgs_num = num;
828 dev->cmd_err = 0;
829 dev->msg_write_idx = 0;
830 dev->msg_read_idx = 0;
831 dev->msg_err = 0;
832 dev->status = 0;
833 dev->abort_source = 0;
834 dev->rx_outstanding = 0;
835
836 ret = i2c_dw_acquire_lock(dev);
837 if (ret)
838 goto done_nolock;
839
840 ret = i2c_dw_wait_bus_not_busy(dev);
841 if (ret < 0)
842 goto done;
843
844 /* Start the transfers */
845 i2c_dw_xfer_init(dev);
846
847 /* Wait for tx to complete */
848 ret = i2c_dw_wait_transfer(dev);
849 if (ret) {
850 dev_err(dev->dev, "controller timed out\n");
851 /* i2c_dw_init_master() implicitly disables the adapter */
852 i2c_recover_bus(&dev->adapter);
853 i2c_dw_init_master(dev);
854 goto done;
855 }
856
857 /*
858 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
859 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
860 * if disable IC_ENABLE.ENABLE immediately that can result in
861 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
862 * controller is still ACTIVE before disabling I2C.
863 */
864 if (i2c_dw_is_controller_active(dev))
865 dev_err(dev->dev, "controller active\n");
866
867 /*
868 * We must disable the adapter before returning and signaling the end
869 * of the current transfer. Otherwise the hardware might continue
870 * generating interrupts which in turn causes a race condition with
871 * the following transfer. Needs some more investigation if the
872 * additional interrupts are a hardware bug or this driver doesn't
873 * handle them correctly yet.
874 */
875 __i2c_dw_disable_nowait(dev);
876
877 if (dev->msg_err) {
878 ret = dev->msg_err;
879 goto done;
880 }
881
882 /* No error */
883 if (likely(!dev->cmd_err && !dev->status)) {
884 ret = num;
885 goto done;
886 }
887
888 /* We have an error */
889 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
890 ret = i2c_dw_handle_tx_abort(dev);
891 goto done;
892 }
893
894 if (dev->status)
895 dev_err(dev->dev,
896 "transfer terminated early - interrupt latency too high?\n");
897
898 ret = -EIO;
899
900 done:
901 i2c_dw_release_lock(dev);
902
903 done_nolock:
904 pm_runtime_mark_last_busy(dev->dev);
905 pm_runtime_put_autosuspend(dev->dev);
906
907 return ret;
908 }
909
910 static const struct i2c_algorithm i2c_dw_algo = {
911 .xfer = i2c_dw_xfer,
912 .functionality = i2c_dw_func,
913 };
914
915 static const struct i2c_adapter_quirks i2c_dw_quirks = {
916 .flags = I2C_AQ_NO_ZERO_LEN,
917 };
918
i2c_dw_configure_master(struct dw_i2c_dev * dev)919 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
920 {
921 struct i2c_timings *t = &dev->timings;
922
923 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
924
925 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
926 DW_IC_CON_RESTART_EN;
927
928 dev->mode = DW_IC_MASTER;
929
930 switch (t->bus_freq_hz) {
931 case I2C_MAX_STANDARD_MODE_FREQ:
932 dev->master_cfg |= DW_IC_CON_SPEED_STD;
933 break;
934 case I2C_MAX_HIGH_SPEED_MODE_FREQ:
935 dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
936 break;
937 default:
938 dev->master_cfg |= DW_IC_CON_SPEED_FAST;
939 }
940 }
941 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
942
i2c_dw_prepare_recovery(struct i2c_adapter * adap)943 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
944 {
945 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
946
947 i2c_dw_disable(dev);
948 reset_control_assert(dev->rst);
949 i2c_dw_prepare_clk(dev, false);
950 }
951
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)952 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
953 {
954 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
955
956 i2c_dw_prepare_clk(dev, true);
957 reset_control_deassert(dev->rst);
958 i2c_dw_init_master(dev);
959 }
960
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)961 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
962 {
963 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
964 struct i2c_adapter *adap = &dev->adapter;
965 struct gpio_desc *gpio;
966
967 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
968 if (IS_ERR_OR_NULL(gpio))
969 return PTR_ERR_OR_ZERO(gpio);
970
971 rinfo->scl_gpiod = gpio;
972
973 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
974 if (IS_ERR(gpio))
975 return PTR_ERR(gpio);
976 rinfo->sda_gpiod = gpio;
977
978 rinfo->pinctrl = devm_pinctrl_get(dev->dev);
979 if (IS_ERR(rinfo->pinctrl)) {
980 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
981 return PTR_ERR(rinfo->pinctrl);
982
983 rinfo->pinctrl = NULL;
984 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
985 } else if (!rinfo->pinctrl) {
986 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
987 }
988
989 rinfo->recover_bus = i2c_generic_scl_recovery;
990 rinfo->prepare_recovery = i2c_dw_prepare_recovery;
991 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
992 adap->bus_recovery_info = rinfo;
993
994 dev_info(dev->dev, "running with GPIO recovery mode! scl%s",
995 rinfo->sda_gpiod ? ",sda" : "");
996
997 return 0;
998 }
999
i2c_dw_probe_master(struct dw_i2c_dev * dev)1000 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
1001 {
1002 struct i2c_adapter *adap = &dev->adapter;
1003 unsigned long irq_flags;
1004 unsigned int ic_con;
1005 int ret;
1006
1007 init_completion(&dev->cmd_complete);
1008
1009 dev->init = i2c_dw_init_master;
1010
1011 ret = i2c_dw_init_regmap(dev);
1012 if (ret)
1013 return ret;
1014
1015 ret = i2c_dw_set_timings_master(dev);
1016 if (ret)
1017 return ret;
1018
1019 ret = i2c_dw_set_fifo_size(dev);
1020 if (ret)
1021 return ret;
1022
1023 /* Lock the bus for accessing DW_IC_CON */
1024 ret = i2c_dw_acquire_lock(dev);
1025 if (ret)
1026 return ret;
1027
1028 /*
1029 * On AMD platforms BIOS advertises the bus clear feature
1030 * and enables the SCL/SDA stuck low. SMU FW does the
1031 * bus recovery process. Driver should not ignore this BIOS
1032 * advertisement of bus clear feature.
1033 */
1034 ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1035 i2c_dw_release_lock(dev);
1036 if (ret)
1037 return ret;
1038
1039 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1040 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1041
1042 ret = dev->init(dev);
1043 if (ret)
1044 return ret;
1045
1046 if (!adap->name[0])
1047 scnprintf(adap->name, sizeof(adap->name),
1048 "Synopsys DesignWare I2C adapter");
1049 adap->retries = 3;
1050 adap->algo = &i2c_dw_algo;
1051 adap->quirks = &i2c_dw_quirks;
1052 adap->dev.parent = dev->dev;
1053 i2c_set_adapdata(adap, dev);
1054
1055 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1056 irq_flags = IRQF_NO_SUSPEND;
1057 } else {
1058 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1059 }
1060
1061 ret = i2c_dw_acquire_lock(dev);
1062 if (ret)
1063 return ret;
1064
1065 __i2c_dw_write_intr_mask(dev, 0);
1066 i2c_dw_release_lock(dev);
1067
1068 if (!(dev->flags & ACCESS_POLLING)) {
1069 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
1070 irq_flags, dev_name(dev->dev), dev);
1071 if (ret) {
1072 dev_err(dev->dev, "failure requesting irq %i: %d\n",
1073 dev->irq, ret);
1074 return ret;
1075 }
1076 }
1077
1078 ret = i2c_dw_init_recovery_info(dev);
1079 if (ret)
1080 return ret;
1081
1082 /*
1083 * Increment PM usage count during adapter registration in order to
1084 * avoid possible spurious runtime suspend when adapter device is
1085 * registered to the device core and immediate resume in case bus has
1086 * registered I2C slaves that do I2C transfers in their probe.
1087 */
1088 pm_runtime_get_noresume(dev->dev);
1089 ret = i2c_add_numbered_adapter(adap);
1090 if (ret)
1091 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1092 pm_runtime_put_noidle(dev->dev);
1093
1094 return ret;
1095 }
1096 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1097
1098 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1099 MODULE_LICENSE("GPL");
1100 MODULE_IMPORT_NS("I2C_DW_COMMON");
1101