xref: /linux/drivers/i2c/busses/i2c-designware-master.c (revision b0249c0d41b306ddd79de58ca7fea543ab5e7a2e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare I2C adapter driver (master only).
4  *
5  * Based on the TI DAVINCI I2C adapter driver.
6  *
7  * Copyright (C) 2006 Texas Instruments.
8  * Copyright (C) 2007 MontaVista Software Inc.
9  * Copyright (C) 2009 Provigent Ltd.
10  */
11 
12 #define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
13 
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
17 #include <linux/export.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/i2c.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/reset.h>
27 
28 #include "i2c-designware-core.h"
29 
30 #define AMD_TIMEOUT_MIN_US	25
31 #define AMD_TIMEOUT_MAX_US	250
32 #define AMD_MASTERCFG_MASK	GENMASK(15, 0)
33 
34 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
35 {
36 	unsigned int comp_param1;
37 	u32 sda_falling_time, scl_falling_time;
38 	struct i2c_timings *t = &dev->timings;
39 	const char *fp_str = "";
40 	u32 ic_clk;
41 	int ret;
42 
43 	ret = i2c_dw_acquire_lock(dev);
44 	if (ret)
45 		return ret;
46 
47 	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
48 	i2c_dw_release_lock(dev);
49 	if (ret)
50 		return ret;
51 
52 	/* Set standard and fast speed dividers for high/low periods */
53 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
54 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
55 
56 	/* Calculate SCL timing parameters for standard mode if not set */
57 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
58 		ic_clk = i2c_dw_clk_rate(dev);
59 		dev->ss_hcnt =
60 			i2c_dw_scl_hcnt(dev,
61 					DW_IC_SS_SCL_HCNT,
62 					ic_clk,
63 					4000,	/* tHD;STA = tHIGH = 4.0 us */
64 					sda_falling_time,
65 					0);	/* No offset */
66 		dev->ss_lcnt =
67 			i2c_dw_scl_lcnt(dev,
68 					DW_IC_SS_SCL_LCNT,
69 					ic_clk,
70 					4700,	/* tLOW = 4.7 us */
71 					scl_falling_time,
72 					0);	/* No offset */
73 	}
74 	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
75 		dev->ss_hcnt, dev->ss_lcnt);
76 
77 	/*
78 	 * Set SCL timing parameters for fast mode or fast mode plus. Only
79 	 * difference is the timing parameter values since the registers are
80 	 * the same.
81 	 */
82 	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
83 		/*
84 		 * Check are Fast Mode Plus parameters available. Calculate
85 		 * SCL timing parameters for Fast Mode Plus if not set.
86 		 */
87 		if (dev->fp_hcnt && dev->fp_lcnt) {
88 			dev->fs_hcnt = dev->fp_hcnt;
89 			dev->fs_lcnt = dev->fp_lcnt;
90 		} else {
91 			ic_clk = i2c_dw_clk_rate(dev);
92 			dev->fs_hcnt =
93 				i2c_dw_scl_hcnt(dev,
94 						DW_IC_FS_SCL_HCNT,
95 						ic_clk,
96 						260,	/* tHIGH = 260 ns */
97 						sda_falling_time,
98 						0);	/* No offset */
99 			dev->fs_lcnt =
100 				i2c_dw_scl_lcnt(dev,
101 						DW_IC_FS_SCL_LCNT,
102 						ic_clk,
103 						500,	/* tLOW = 500 ns */
104 						scl_falling_time,
105 						0);	/* No offset */
106 		}
107 		fp_str = " Plus";
108 	}
109 	/*
110 	 * Calculate SCL timing parameters for fast mode if not set. They are
111 	 * needed also in high speed mode.
112 	 */
113 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
114 		ic_clk = i2c_dw_clk_rate(dev);
115 		dev->fs_hcnt =
116 			i2c_dw_scl_hcnt(dev,
117 					DW_IC_FS_SCL_HCNT,
118 					ic_clk,
119 					600,	/* tHD;STA = tHIGH = 0.6 us */
120 					sda_falling_time,
121 					0);	/* No offset */
122 		dev->fs_lcnt =
123 			i2c_dw_scl_lcnt(dev,
124 					DW_IC_FS_SCL_LCNT,
125 					ic_clk,
126 					1300,	/* tLOW = 1.3 us */
127 					scl_falling_time,
128 					0);	/* No offset */
129 	}
130 	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
131 		fp_str, dev->fs_hcnt, dev->fs_lcnt);
132 
133 	/* Check is high speed possible and fall back to fast mode if not */
134 	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
135 		DW_IC_CON_SPEED_HIGH) {
136 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
137 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
138 			dev_err(dev->dev, "High Speed not supported!\n");
139 			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
140 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
141 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
142 			dev->hs_hcnt = 0;
143 			dev->hs_lcnt = 0;
144 		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
145 			u32 t_high, t_low;
146 
147 			/*
148 			 * The legal values stated in the databook for bus
149 			 * capacitance are only 100pF and 400pF.
150 			 * If dev->bus_capacitance_pF is greater than or equals
151 			 * to 400, t_high and t_low are assumed to be
152 			 * appropriate values for 400pF, otherwise 100pF.
153 			 */
154 			if (dev->bus_capacitance_pF >= 400) {
155 				/* assume bus capacitance is 400pF */
156 				t_high = dev->clk_freq_optimized ? 160 : 120;
157 				t_low = 320;
158 			} else {
159 				/* assume bus capacitance is 100pF */
160 				t_high = 60;
161 				t_low = dev->clk_freq_optimized ? 120 : 160;
162 			}
163 
164 			ic_clk = i2c_dw_clk_rate(dev);
165 			dev->hs_hcnt =
166 				i2c_dw_scl_hcnt(dev,
167 						DW_IC_HS_SCL_HCNT,
168 						ic_clk,
169 						t_high,
170 						sda_falling_time,
171 						0);	/* No offset */
172 			dev->hs_lcnt =
173 				i2c_dw_scl_lcnt(dev,
174 						DW_IC_HS_SCL_LCNT,
175 						ic_clk,
176 						t_low,
177 						scl_falling_time,
178 						0);	/* No offset */
179 		}
180 		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
181 			dev->hs_hcnt, dev->hs_lcnt);
182 	}
183 
184 	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
185 	return 0;
186 }
187 
188 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
189 {
190 	struct i2c_msg *msgs = dev->msgs;
191 	u32 ic_con = 0, ic_tar = 0;
192 	unsigned int dummy;
193 
194 	/* Disable the adapter */
195 	__i2c_dw_disable(dev);
196 
197 	i2c_dw_set_mode(dev, DW_IC_MASTER);
198 
199 	/* If the slave address is ten bit address, enable 10BITADDR */
200 	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
201 		ic_con = DW_IC_CON_10BITADDR_MASTER;
202 		/*
203 		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
204 		 * mode has to be enabled via bit 12 of IC_TAR register.
205 		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
206 		 * detected from registers.
207 		 */
208 		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
209 	}
210 
211 	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
212 			   ic_con);
213 
214 	/*
215 	 * Set the slave (target) address and enable 10-bit addressing mode
216 	 * if applicable.
217 	 */
218 	regmap_write(dev->map, DW_IC_TAR,
219 		     msgs[dev->msg_write_idx].addr | ic_tar);
220 
221 	/* Enforce disabled interrupts (due to HW issues) */
222 	__i2c_dw_write_intr_mask(dev, 0);
223 
224 	/* Enable the adapter */
225 	__i2c_dw_enable(dev);
226 
227 	/* Dummy read to avoid the register getting stuck on Bay Trail */
228 	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
229 
230 	/* Clear and enable interrupts */
231 	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
232 	__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
233 }
234 
235 /*
236  * This function waits for the controller to be idle before disabling I2C
237  * When the controller is not in the IDLE state, the MST_ACTIVITY bit
238  * (IC_STATUS[5]) is set.
239  *
240  * Values:
241  * 0x1 (ACTIVE): Controller not idle
242  * 0x0 (IDLE): Controller is idle
243  *
244  * The function is called after completing the current transfer.
245  *
246  * Returns:
247  * False when the controller is in the IDLE state.
248  * True when the controller is in the ACTIVE state.
249  */
250 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
251 {
252 	u32 status;
253 
254 	regmap_read(dev->map, DW_IC_STATUS, &status);
255 	if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
256 		return false;
257 
258 	return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
259 				       !(status & DW_IC_STATUS_MASTER_ACTIVITY),
260 				       1100, 20000) != 0;
261 }
262 
263 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
264 {
265 	u32 val;
266 	int ret;
267 
268 	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
269 				       !(val & DW_IC_INTR_STOP_DET),
270 					1100, 20000);
271 	if (ret)
272 		dev_err(dev->dev, "i2c timeout error %d\n", ret);
273 
274 	return ret;
275 }
276 
277 static int i2c_dw_status(struct dw_i2c_dev *dev)
278 {
279 	int status;
280 
281 	status = i2c_dw_wait_bus_not_busy(dev);
282 	if (status)
283 		return status;
284 
285 	return i2c_dw_check_stopbit(dev);
286 }
287 
288 /*
289  * Initiate and continue master read/write transaction with polling
290  * based transfer routine afterward write messages into the Tx buffer.
291  */
292 static int amd_i2c_dw_xfer_quirk(struct dw_i2c_dev *dev, struct i2c_msg *msgs, int num_msgs)
293 {
294 	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
295 	int cmd = 0, status;
296 	u8 *tx_buf;
297 	unsigned int val;
298 
299 	ACQUIRE(pm_runtime_active_auto_try, pm)(dev->dev);
300 	if (ACQUIRE_ERR(pm_runtime_active_auto_try, &pm))
301 		return -ENXIO;
302 
303 	/*
304 	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
305 	 * it is mandatory to set the right value in specific register
306 	 * (offset:0x474) as per the hardware IP specification.
307 	 */
308 	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
309 
310 	dev->msgs = msgs;
311 	dev->msgs_num = num_msgs;
312 	dev->msg_write_idx = 0;
313 	i2c_dw_xfer_init(dev);
314 
315 	/* Initiate messages read/write transaction */
316 	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
317 		tx_buf = msgs[msg_wrt_idx].buf;
318 		buf_len = msgs[msg_wrt_idx].len;
319 
320 		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
321 			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
322 		/*
323 		 * Initiate the i2c read/write transaction of buffer length,
324 		 * and poll for bus busy status. For the last message transfer,
325 		 * update the command with stop bit enable.
326 		 */
327 		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
328 			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
329 				cmd |= BIT(9);
330 
331 			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
332 				/* Due to hardware bug, need to write the same command twice. */
333 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
334 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
335 				if (cmd) {
336 					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
337 					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
338 					/*
339 					 * Need to check the stop bit. However, it cannot be
340 					 * detected from the registers so we check it always
341 					 * when read/write the last byte.
342 					 */
343 					status = i2c_dw_status(dev);
344 					if (status)
345 						return status;
346 
347 					for (data_idx = 0; data_idx < buf_len; data_idx++) {
348 						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
349 						tx_buf[data_idx] = val;
350 					}
351 					status = i2c_dw_check_stopbit(dev);
352 					if (status)
353 						return status;
354 				}
355 			} else {
356 				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
357 				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
358 			}
359 		}
360 		status = i2c_dw_check_stopbit(dev);
361 		if (status)
362 			return status;
363 	}
364 
365 	return 0;
366 }
367 
368 /*
369  * Initiate (and continue) low level master read/write transaction.
370  * This function is only called from i2c_dw_isr(), and pumping i2c_msg
371  * messages into the tx buffer.  Even if the size of i2c_msg data is
372  * longer than the size of the tx buffer, it handles everything.
373  */
374 static void
375 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
376 {
377 	struct i2c_msg *msgs = dev->msgs;
378 	u32 intr_mask;
379 	int tx_limit, rx_limit;
380 	u32 addr = msgs[dev->msg_write_idx].addr;
381 	u32 buf_len = dev->tx_buf_len;
382 	u8 *buf = dev->tx_buf;
383 	bool need_restart = false;
384 	unsigned int flr;
385 
386 	intr_mask = DW_IC_INTR_MASTER_MASK;
387 
388 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
389 		u32 flags = msgs[dev->msg_write_idx].flags;
390 
391 		/*
392 		 * If target address has changed, we need to
393 		 * reprogram the target address in the I2C
394 		 * adapter when we are done with this transfer.
395 		 */
396 		if (msgs[dev->msg_write_idx].addr != addr) {
397 			dev_err(dev->dev,
398 				"%s: invalid target address\n", __func__);
399 			dev->msg_err = -EINVAL;
400 			break;
401 		}
402 
403 		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
404 			/* new i2c_msg */
405 			buf = msgs[dev->msg_write_idx].buf;
406 			buf_len = msgs[dev->msg_write_idx].len;
407 
408 			/*
409 			 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and
410 			 * IC_RESTART_EN are set, we must manually
411 			 * set restart bit between messages.
412 			 */
413 			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
414 					(dev->msg_write_idx > 0))
415 				need_restart = true;
416 		}
417 
418 		regmap_read(dev->map, DW_IC_TXFLR, &flr);
419 		tx_limit = dev->tx_fifo_depth - flr;
420 
421 		regmap_read(dev->map, DW_IC_RXFLR, &flr);
422 		rx_limit = dev->rx_fifo_depth - flr;
423 
424 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
425 			u32 cmd = 0;
426 
427 			/*
428 			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
429 			 * manually set the stop bit. However, it cannot be
430 			 * detected from the registers so we set it always
431 			 * when writing/reading the last byte.
432 			 */
433 
434 			/*
435 			 * i2c-core always sets the buffer length of
436 			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
437 			 * be adjusted when receiving the first byte.
438 			 * Thus we can't stop the transaction here.
439 			 */
440 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
441 			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
442 				cmd |= BIT(9);
443 
444 			if (need_restart) {
445 				cmd |= BIT(10);
446 				need_restart = false;
447 			}
448 
449 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
450 
451 				/* Avoid rx buffer overrun */
452 				if (dev->rx_outstanding >= dev->rx_fifo_depth)
453 					break;
454 
455 				regmap_write(dev->map, DW_IC_DATA_CMD,
456 					     cmd | 0x100);
457 				rx_limit--;
458 				dev->rx_outstanding++;
459 			} else {
460 				regmap_write(dev->map, DW_IC_DATA_CMD,
461 					     cmd | *buf++);
462 			}
463 			tx_limit--; buf_len--;
464 		}
465 
466 		dev->tx_buf = buf;
467 		dev->tx_buf_len = buf_len;
468 
469 		/*
470 		 * Because we don't know the buffer length in the
471 		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
472 		 * transaction here. Also disable the TX_EMPTY IRQ
473 		 * while waiting for the data length byte to avoid the
474 		 * bogus interrupts flood.
475 		 */
476 		if (flags & I2C_M_RECV_LEN) {
477 			dev->status |= STATUS_WRITE_IN_PROGRESS;
478 			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
479 			break;
480 		} else if (buf_len > 0) {
481 			/* more bytes to be written */
482 			dev->status |= STATUS_WRITE_IN_PROGRESS;
483 			break;
484 		} else
485 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
486 	}
487 
488 	/*
489 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
490 	 * interrupt any more.
491 	 */
492 	if (dev->msg_write_idx == dev->msgs_num)
493 		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
494 
495 	if (dev->msg_err)
496 		intr_mask = 0;
497 
498 	__i2c_dw_write_intr_mask(dev, intr_mask);
499 }
500 
501 static u8
502 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
503 {
504 	struct i2c_msg *msgs = dev->msgs;
505 	u32 flags = msgs[dev->msg_read_idx].flags;
506 	unsigned int intr_mask;
507 
508 	/*
509 	 * Adjust the buffer length and mask the flag
510 	 * after receiving the first byte.
511 	 */
512 	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
513 	dev->tx_buf_len = len - min(len, dev->rx_outstanding);
514 	msgs[dev->msg_read_idx].len = len;
515 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
516 
517 	/*
518 	 * Received buffer length, re-enable TX_EMPTY interrupt
519 	 * to resume the SMBUS transaction.
520 	 */
521 	__i2c_dw_read_intr_mask(dev, &intr_mask);
522 	intr_mask |= DW_IC_INTR_TX_EMPTY;
523 	__i2c_dw_write_intr_mask(dev, intr_mask);
524 
525 	return len;
526 }
527 
528 static void
529 i2c_dw_read(struct dw_i2c_dev *dev)
530 {
531 	struct i2c_msg *msgs = dev->msgs;
532 	unsigned int rx_valid;
533 
534 	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
535 		u32 flags = msgs[dev->msg_read_idx].flags;
536 		unsigned int tmp;
537 		u32 len;
538 		u8 *buf;
539 
540 		if (!(flags & I2C_M_RD))
541 			continue;
542 
543 		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
544 			len = msgs[dev->msg_read_idx].len;
545 			buf = msgs[dev->msg_read_idx].buf;
546 		} else {
547 			len = dev->rx_buf_len;
548 			buf = dev->rx_buf;
549 		}
550 
551 		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
552 
553 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
554 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
555 			tmp &= DW_IC_DATA_CMD_DAT;
556 			/* Ensure length byte is a valid value */
557 			if (flags & I2C_M_RECV_LEN) {
558 				/*
559 				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
560 				 * detected from the registers, the controller can be
561 				 * disabled if the STOP bit is set. But it is only set
562 				 * after receiving block data response length in
563 				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
564 				 * another byte with STOP bit set when the block data
565 				 * response length is invalid to complete the transaction.
566 				 */
567 				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
568 					tmp = 1;
569 
570 				len = i2c_dw_recv_len(dev, tmp);
571 			}
572 			*buf++ = tmp;
573 			dev->rx_outstanding--;
574 		}
575 
576 		if (len > 0) {
577 			dev->status |= STATUS_READ_IN_PROGRESS;
578 			dev->rx_buf_len = len;
579 			dev->rx_buf = buf;
580 			return;
581 		} else
582 			dev->status &= ~STATUS_READ_IN_PROGRESS;
583 	}
584 }
585 
586 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
587 {
588 	unsigned int stat, dummy;
589 
590 	/*
591 	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
592 	 * The unmasked raw version of interrupt status bits is available
593 	 * in the IC_RAW_INTR_STAT register.
594 	 *
595 	 * That is,
596 	 *   stat = readl(IC_INTR_STAT);
597 	 * equals to,
598 	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
599 	 *
600 	 * The raw version might be useful for debugging purposes.
601 	 */
602 	if (!(dev->flags & ACCESS_POLLING)) {
603 		regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
604 	} else {
605 		regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
606 		stat &= dev->sw_mask;
607 	}
608 
609 	/*
610 	 * Do not use the IC_CLR_INTR register to clear interrupts, or
611 	 * you'll miss some interrupts, triggered during the period from
612 	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
613 	 *
614 	 * Instead, use the separately-prepared IC_CLR_* registers.
615 	 */
616 	if (stat & DW_IC_INTR_RX_UNDER)
617 		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
618 	if (stat & DW_IC_INTR_RX_OVER)
619 		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
620 	if (stat & DW_IC_INTR_TX_OVER)
621 		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
622 	if (stat & DW_IC_INTR_RD_REQ)
623 		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
624 	if (stat & DW_IC_INTR_TX_ABRT) {
625 		/*
626 		 * The IC_TX_ABRT_SOURCE register is cleared whenever
627 		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
628 		 */
629 		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
630 		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
631 	}
632 	if (stat & DW_IC_INTR_RX_DONE)
633 		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
634 	if (stat & DW_IC_INTR_ACTIVITY)
635 		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
636 	if ((stat & DW_IC_INTR_STOP_DET) &&
637 	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
638 		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
639 	if (stat & DW_IC_INTR_START_DET)
640 		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
641 	if (stat & DW_IC_INTR_GEN_CALL)
642 		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
643 
644 	return stat;
645 }
646 
647 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
648 {
649 	if (stat & DW_IC_INTR_TX_ABRT) {
650 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
651 		dev->status &= ~STATUS_MASK;
652 		dev->rx_outstanding = 0;
653 
654 		/*
655 		 * Anytime TX_ABRT is set, the contents of the tx/rx
656 		 * buffers are flushed. Make sure to skip them.
657 		 */
658 		__i2c_dw_write_intr_mask(dev, 0);
659 		goto tx_aborted;
660 	}
661 
662 	if (stat & DW_IC_INTR_RX_FULL)
663 		i2c_dw_read(dev);
664 
665 	if (stat & DW_IC_INTR_TX_EMPTY)
666 		i2c_dw_xfer_msg(dev);
667 
668 	/*
669 	 * No need to modify or disable the interrupt mask here.
670 	 * i2c_dw_xfer_msg() will take care of it according to
671 	 * the current transmit status.
672 	 */
673 
674 tx_aborted:
675 	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
676 	     (dev->rx_outstanding == 0))
677 		complete(&dev->cmd_complete);
678 	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
679 		/* Workaround to trigger pending interrupt */
680 		__i2c_dw_read_intr_mask(dev, &stat);
681 		__i2c_dw_write_intr_mask(dev, 0);
682 		__i2c_dw_write_intr_mask(dev, stat);
683 	}
684 }
685 
686 /*
687  * Interrupt service routine. This gets called whenever an I2C master interrupt
688  * occurs.
689  */
690 irqreturn_t i2c_dw_isr_master(struct dw_i2c_dev *dev)
691 {
692 	unsigned int stat, enabled;
693 
694 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
695 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
696 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
697 		return IRQ_NONE;
698 	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
699 		return IRQ_NONE;
700 	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
701 
702 	stat = i2c_dw_read_clear_intrbits(dev);
703 
704 	if (!(dev->status & STATUS_ACTIVE)) {
705 		/*
706 		 * Unexpected interrupt in driver point of view. State
707 		 * variables are either unset or stale so acknowledge and
708 		 * disable interrupts for suppressing further interrupts if
709 		 * interrupt really came from this HW (E.g. firmware has left
710 		 * the HW active).
711 		 */
712 		__i2c_dw_write_intr_mask(dev, 0);
713 		return IRQ_HANDLED;
714 	}
715 
716 	i2c_dw_process_transfer(dev, stat);
717 
718 	return IRQ_HANDLED;
719 }
720 
721 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
722 {
723 	unsigned long timeout = dev->adapter.timeout;
724 	unsigned int stat;
725 	int ret;
726 
727 	if (!(dev->flags & ACCESS_POLLING)) {
728 		ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
729 	} else {
730 		timeout += jiffies;
731 		do {
732 			ret = try_wait_for_completion(&dev->cmd_complete);
733 			if (ret)
734 				break;
735 
736 			stat = i2c_dw_read_clear_intrbits(dev);
737 			if (stat)
738 				i2c_dw_process_transfer(dev, stat);
739 			else
740 				/* Try save some power */
741 				usleep_range(3, 25);
742 		} while (time_before(jiffies, timeout));
743 	}
744 
745 	return ret ? 0 : -ETIMEDOUT;
746 }
747 
748 /*
749  * Prepare controller for a transaction and call i2c_dw_xfer_msg.
750  */
751 static int
752 i2c_dw_xfer_common(struct dw_i2c_dev *dev, struct i2c_msg msgs[], int num)
753 {
754 	int ret;
755 
756 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
757 
758 	pm_runtime_get_sync(dev->dev);
759 
760 	reinit_completion(&dev->cmd_complete);
761 	dev->msgs = msgs;
762 	dev->msgs_num = num;
763 	dev->cmd_err = 0;
764 	dev->msg_write_idx = 0;
765 	dev->msg_read_idx = 0;
766 	dev->msg_err = 0;
767 	dev->status = 0;
768 	dev->abort_source = 0;
769 	dev->rx_outstanding = 0;
770 
771 	ret = i2c_dw_acquire_lock(dev);
772 	if (ret)
773 		goto done_nolock;
774 
775 	ret = i2c_dw_wait_bus_not_busy(dev);
776 	if (ret < 0)
777 		goto done;
778 
779 	/* Start the transfers */
780 	i2c_dw_xfer_init(dev);
781 
782 	/* Wait for tx to complete */
783 	ret = i2c_dw_wait_transfer(dev);
784 	if (ret) {
785 		dev_err(dev->dev, "controller timed out\n");
786 		/* i2c_dw_init() implicitly disables the adapter */
787 		i2c_recover_bus(&dev->adapter);
788 		i2c_dw_init(dev);
789 		goto done;
790 	}
791 
792 	/*
793 	 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
794 	 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
795 	 * if disable IC_ENABLE.ENABLE immediately that can result in
796 	 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
797 	 * controller is still ACTIVE before disabling I2C.
798 	 */
799 	if (i2c_dw_is_controller_active(dev))
800 		dev_err(dev->dev, "controller active\n");
801 
802 	/*
803 	 * We must disable the adapter before returning and signaling the end
804 	 * of the current transfer. Otherwise the hardware might continue
805 	 * generating interrupts which in turn causes a race condition with
806 	 * the following transfer. Needs some more investigation if the
807 	 * additional interrupts are a hardware bug or this driver doesn't
808 	 * handle them correctly yet.
809 	 */
810 	__i2c_dw_disable_nowait(dev);
811 
812 	if (dev->msg_err) {
813 		ret = dev->msg_err;
814 		goto done;
815 	}
816 
817 	/* No error */
818 	if (likely(!dev->cmd_err && !dev->status)) {
819 		ret = num;
820 		goto done;
821 	}
822 
823 	/* We have an error */
824 	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
825 		ret = i2c_dw_handle_tx_abort(dev);
826 		goto done;
827 	}
828 
829 	if (dev->status)
830 		dev_err(dev->dev,
831 			"transfer terminated early - interrupt latency too high?\n");
832 
833 	ret = -EIO;
834 
835 done:
836 	i2c_dw_set_mode(dev, DW_IC_SLAVE);
837 
838 	i2c_dw_release_lock(dev);
839 
840 done_nolock:
841 	pm_runtime_put_autosuspend(dev->dev);
842 
843 	return ret;
844 }
845 
846 int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
847 {
848 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
849 
850 	if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
851 		return amd_i2c_dw_xfer_quirk(dev, msgs, num);
852 
853 	return i2c_dw_xfer_common(dev, msgs, num);
854 }
855 
856 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
857 {
858 	struct i2c_timings *t = &dev->timings;
859 
860 	dev->functionality |= I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
861 
862 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
863 			  DW_IC_CON_RESTART_EN;
864 
865 	dev->mode = DW_IC_MASTER;
866 
867 	switch (t->bus_freq_hz) {
868 	case I2C_MAX_STANDARD_MODE_FREQ:
869 		dev->master_cfg |= DW_IC_CON_SPEED_STD;
870 		break;
871 	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
872 		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
873 		break;
874 	default:
875 		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
876 	}
877 }
878 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
879 
880 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
881 {
882 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
883 
884 	i2c_dw_disable(dev);
885 	reset_control_assert(dev->rst);
886 	i2c_dw_prepare_clk(dev, false);
887 }
888 
889 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
890 {
891 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
892 
893 	i2c_dw_prepare_clk(dev, true);
894 	reset_control_deassert(dev->rst);
895 	i2c_dw_init(dev);
896 }
897 
898 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
899 {
900 	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
901 	struct i2c_adapter *adap = &dev->adapter;
902 	struct gpio_desc *gpio;
903 
904 	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
905 	if (IS_ERR_OR_NULL(gpio))
906 		return PTR_ERR_OR_ZERO(gpio);
907 
908 	rinfo->scl_gpiod = gpio;
909 
910 	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
911 	if (IS_ERR(gpio))
912 		return PTR_ERR(gpio);
913 	rinfo->sda_gpiod = gpio;
914 
915 	rinfo->pinctrl = devm_pinctrl_get(dev->dev);
916 	if (IS_ERR(rinfo->pinctrl)) {
917 		if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
918 			return PTR_ERR(rinfo->pinctrl);
919 
920 		rinfo->pinctrl = NULL;
921 		dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
922 	} else if (!rinfo->pinctrl) {
923 		dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
924 	}
925 
926 	rinfo->recover_bus = i2c_generic_scl_recovery;
927 	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
928 	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
929 	adap->bus_recovery_info = rinfo;
930 
931 	dev_info(dev->dev, "running with GPIO recovery mode! scl%s",
932 		 rinfo->sda_gpiod ? ",sda" : "");
933 
934 	return 0;
935 }
936 
937 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
938 {
939 	unsigned int ic_con;
940 	int ret;
941 
942 	init_completion(&dev->cmd_complete);
943 
944 	ret = i2c_dw_set_timings_master(dev);
945 	if (ret)
946 		return ret;
947 
948 	/* Lock the bus for accessing DW_IC_CON */
949 	ret = i2c_dw_acquire_lock(dev);
950 	if (ret)
951 		return ret;
952 
953 	/*
954 	 * On AMD platforms BIOS advertises the bus clear feature
955 	 * and enables the SCL/SDA stuck low. SMU FW does the
956 	 * bus recovery process. Driver should not ignore this BIOS
957 	 * advertisement of bus clear feature.
958 	 */
959 	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
960 	i2c_dw_release_lock(dev);
961 	if (ret)
962 		return ret;
963 
964 	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
965 		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
966 
967 	return i2c_dw_init_recovery_info(dev);
968 }
969 
970 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
971 MODULE_LICENSE("GPL");
972 MODULE_IMPORT_NS("I2C_DW_COMMON");
973