xref: /linux/drivers/i2c/busses/i2c-designware-master.c (revision 1c9f8dff62d85ce00b0e99f774a84bd783af7cac)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare I2C adapter driver (master only).
4  *
5  * Based on the TI DAVINCI I2C adapter driver.
6  *
7  * Copyright (C) 2006 Texas Instruments.
8  * Copyright (C) 2007 MontaVista Software Inc.
9  * Copyright (C) 2009 Provigent Ltd.
10  */
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/i2c.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/regmap.h>
22 #include <linux/reset.h>
23 
24 #include "i2c-designware-core.h"
25 
26 #define AMD_TIMEOUT_MIN_US	25
27 #define AMD_TIMEOUT_MAX_US	250
28 #define AMD_MASTERCFG_MASK	GENMASK(15, 0)
29 
30 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
31 {
32 	/* Configure Tx/Rx FIFO threshold levels */
33 	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
34 	regmap_write(dev->map, DW_IC_RX_TL, 0);
35 
36 	/* Configure the I2C master */
37 	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
38 }
39 
40 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
41 {
42 	unsigned int comp_param1;
43 	u32 sda_falling_time, scl_falling_time;
44 	struct i2c_timings *t = &dev->timings;
45 	const char *fp_str = "";
46 	u32 ic_clk;
47 	int ret;
48 
49 	ret = i2c_dw_acquire_lock(dev);
50 	if (ret)
51 		return ret;
52 
53 	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
54 	i2c_dw_release_lock(dev);
55 	if (ret)
56 		return ret;
57 
58 	/* Set standard and fast speed dividers for high/low periods */
59 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
60 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
61 
62 	/* Calculate SCL timing parameters for standard mode if not set */
63 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
64 		ic_clk = i2c_dw_clk_rate(dev);
65 		dev->ss_hcnt =
66 			i2c_dw_scl_hcnt(ic_clk,
67 					4000,	/* tHD;STA = tHIGH = 4.0 us */
68 					sda_falling_time,
69 					0,	/* 0: DW default, 1: Ideal */
70 					0);	/* No offset */
71 		dev->ss_lcnt =
72 			i2c_dw_scl_lcnt(ic_clk,
73 					4700,	/* tLOW = 4.7 us */
74 					scl_falling_time,
75 					0);	/* No offset */
76 	}
77 	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
78 		dev->ss_hcnt, dev->ss_lcnt);
79 
80 	/*
81 	 * Set SCL timing parameters for fast mode or fast mode plus. Only
82 	 * difference is the timing parameter values since the registers are
83 	 * the same.
84 	 */
85 	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
86 		/*
87 		 * Check are Fast Mode Plus parameters available. Calculate
88 		 * SCL timing parameters for Fast Mode Plus if not set.
89 		 */
90 		if (dev->fp_hcnt && dev->fp_lcnt) {
91 			dev->fs_hcnt = dev->fp_hcnt;
92 			dev->fs_lcnt = dev->fp_lcnt;
93 		} else {
94 			ic_clk = i2c_dw_clk_rate(dev);
95 			dev->fs_hcnt =
96 				i2c_dw_scl_hcnt(ic_clk,
97 						260,	/* tHIGH = 260 ns */
98 						sda_falling_time,
99 						0,	/* DW default */
100 						0);	/* No offset */
101 			dev->fs_lcnt =
102 				i2c_dw_scl_lcnt(ic_clk,
103 						500,	/* tLOW = 500 ns */
104 						scl_falling_time,
105 						0);	/* No offset */
106 		}
107 		fp_str = " Plus";
108 	}
109 	/*
110 	 * Calculate SCL timing parameters for fast mode if not set. They are
111 	 * needed also in high speed mode.
112 	 */
113 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
114 		ic_clk = i2c_dw_clk_rate(dev);
115 		dev->fs_hcnt =
116 			i2c_dw_scl_hcnt(ic_clk,
117 					600,	/* tHD;STA = tHIGH = 0.6 us */
118 					sda_falling_time,
119 					0,	/* 0: DW default, 1: Ideal */
120 					0);	/* No offset */
121 		dev->fs_lcnt =
122 			i2c_dw_scl_lcnt(ic_clk,
123 					1300,	/* tLOW = 1.3 us */
124 					scl_falling_time,
125 					0);	/* No offset */
126 	}
127 	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
128 		fp_str, dev->fs_hcnt, dev->fs_lcnt);
129 
130 	/* Check is high speed possible and fall back to fast mode if not */
131 	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
132 		DW_IC_CON_SPEED_HIGH) {
133 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
134 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
135 			dev_err(dev->dev, "High Speed not supported!\n");
136 			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
137 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
138 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
139 			dev->hs_hcnt = 0;
140 			dev->hs_lcnt = 0;
141 		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
142 			ic_clk = i2c_dw_clk_rate(dev);
143 			dev->hs_hcnt =
144 				i2c_dw_scl_hcnt(ic_clk,
145 						160,	/* tHIGH = 160 ns */
146 						sda_falling_time,
147 						0,	/* DW default */
148 						0);	/* No offset */
149 			dev->hs_lcnt =
150 				i2c_dw_scl_lcnt(ic_clk,
151 						320,	/* tLOW = 320 ns */
152 						scl_falling_time,
153 						0);	/* No offset */
154 		}
155 		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
156 			dev->hs_hcnt, dev->hs_lcnt);
157 	}
158 
159 	ret = i2c_dw_set_sda_hold(dev);
160 	if (ret)
161 		return ret;
162 
163 	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
164 	return 0;
165 }
166 
167 /**
168  * i2c_dw_init_master() - Initialize the designware I2C master hardware
169  * @dev: device private data
170  *
171  * This functions configures and enables the I2C master.
172  * This function is called during I2C init function, and in case of timeout at
173  * run time.
174  */
175 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
176 {
177 	int ret;
178 
179 	ret = i2c_dw_acquire_lock(dev);
180 	if (ret)
181 		return ret;
182 
183 	/* Disable the adapter */
184 	__i2c_dw_disable(dev);
185 
186 	/* Write standard speed timing parameters */
187 	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
188 	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
189 
190 	/* Write fast mode/fast mode plus timing parameters */
191 	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
192 	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
193 
194 	/* Write high speed timing parameters if supported */
195 	if (dev->hs_hcnt && dev->hs_lcnt) {
196 		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
197 		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
198 	}
199 
200 	/* Write SDA hold time if supported */
201 	if (dev->sda_hold_time)
202 		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
203 
204 	i2c_dw_configure_fifo_master(dev);
205 	i2c_dw_release_lock(dev);
206 
207 	return 0;
208 }
209 
210 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
211 {
212 	struct i2c_msg *msgs = dev->msgs;
213 	u32 ic_con = 0, ic_tar = 0;
214 	unsigned int dummy;
215 
216 	/* Disable the adapter */
217 	__i2c_dw_disable(dev);
218 
219 	/* If the slave address is ten bit address, enable 10BITADDR */
220 	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
221 		ic_con = DW_IC_CON_10BITADDR_MASTER;
222 		/*
223 		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
224 		 * mode has to be enabled via bit 12 of IC_TAR register.
225 		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
226 		 * detected from registers.
227 		 */
228 		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
229 	}
230 
231 	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
232 			   ic_con);
233 
234 	/*
235 	 * Set the slave (target) address and enable 10-bit addressing mode
236 	 * if applicable.
237 	 */
238 	regmap_write(dev->map, DW_IC_TAR,
239 		     msgs[dev->msg_write_idx].addr | ic_tar);
240 
241 	/* Enforce disabled interrupts (due to HW issues) */
242 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
243 
244 	/* Enable the adapter */
245 	__i2c_dw_enable(dev);
246 
247 	/* Dummy read to avoid the register getting stuck on Bay Trail */
248 	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
249 
250 	/* Clear and enable interrupts */
251 	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
252 	regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
253 }
254 
255 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
256 {
257 	u32 val;
258 	int ret;
259 
260 	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
261 				       !(val & DW_IC_INTR_STOP_DET),
262 					1100, 20000);
263 	if (ret)
264 		dev_err(dev->dev, "i2c timeout error %d\n", ret);
265 
266 	return ret;
267 }
268 
269 static int i2c_dw_status(struct dw_i2c_dev *dev)
270 {
271 	int status;
272 
273 	status = i2c_dw_wait_bus_not_busy(dev);
274 	if (status)
275 		return status;
276 
277 	return i2c_dw_check_stopbit(dev);
278 }
279 
280 /*
281  * Initiate and continue master read/write transaction with polling
282  * based transfer routine afterward write messages into the Tx buffer.
283  */
284 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
285 {
286 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
287 	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
288 	int cmd = 0, status;
289 	u8 *tx_buf;
290 	unsigned int val;
291 
292 	/*
293 	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
294 	 * it is mandatory to set the right value in specific register
295 	 * (offset:0x474) as per the hardware IP specification.
296 	 */
297 	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
298 
299 	dev->msgs = msgs;
300 	dev->msgs_num = num_msgs;
301 	i2c_dw_xfer_init(dev);
302 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
303 
304 	/* Initiate messages read/write transaction */
305 	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
306 		tx_buf = msgs[msg_wrt_idx].buf;
307 		buf_len = msgs[msg_wrt_idx].len;
308 
309 		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
310 			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
311 		/*
312 		 * Initiate the i2c read/write transaction of buffer length,
313 		 * and poll for bus busy status. For the last message transfer,
314 		 * update the command with stopbit enable.
315 		 */
316 		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
317 			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
318 				cmd |= BIT(9);
319 
320 			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
321 				/* Due to hardware bug, need to write the same command twice. */
322 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
323 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
324 				if (cmd) {
325 					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
326 					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
327 					/*
328 					 * Need to check the stop bit. However, it cannot be
329 					 * detected from the registers so we check it always
330 					 * when read/write the last byte.
331 					 */
332 					status = i2c_dw_status(dev);
333 					if (status)
334 						return status;
335 
336 					for (data_idx = 0; data_idx < buf_len; data_idx++) {
337 						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
338 						tx_buf[data_idx] = val;
339 					}
340 					status = i2c_dw_check_stopbit(dev);
341 					if (status)
342 						return status;
343 				}
344 			} else {
345 				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
346 				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
347 			}
348 		}
349 		status = i2c_dw_check_stopbit(dev);
350 		if (status)
351 			return status;
352 	}
353 
354 	return 0;
355 }
356 
357 static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
358 {
359 	u32 val;
360 
361 	return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
362 					val & DW_IC_INTR_TX_EMPTY,
363 					100, 1000);
364 }
365 
366 static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
367 {
368 	u32 val;
369 
370 	return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
371 					val & DW_IC_INTR_RX_FULL,
372 					100, 1000);
373 }
374 
375 static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
376 				   int num_msgs)
377 {
378 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
379 	int msg_idx, buf_len, data_idx, ret;
380 	unsigned int val, stop = 0;
381 	u8 *buf;
382 
383 	dev->msgs = msgs;
384 	dev->msgs_num = num_msgs;
385 	i2c_dw_xfer_init(dev);
386 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
387 
388 	for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
389 		buf = msgs[msg_idx].buf;
390 		buf_len = msgs[msg_idx].len;
391 
392 		for (data_idx = 0; data_idx < buf_len; data_idx++) {
393 			if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
394 				stop |= BIT(9);
395 
396 			if (msgs[msg_idx].flags & I2C_M_RD) {
397 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
398 
399 				ret = i2c_dw_poll_rx_full(dev);
400 				if (ret)
401 					return ret;
402 
403 				regmap_read(dev->map, DW_IC_DATA_CMD, &val);
404 				buf[data_idx] = val;
405 			} else {
406 				ret = i2c_dw_poll_tx_empty(dev);
407 				if (ret)
408 					return ret;
409 
410 				regmap_write(dev->map, DW_IC_DATA_CMD,
411 					     buf[data_idx] | stop);
412 			}
413 		}
414 	}
415 
416 	return num_msgs;
417 }
418 
419 /*
420  * Initiate (and continue) low level master read/write transaction.
421  * This function is only called from i2c_dw_isr, and pumping i2c_msg
422  * messages into the tx buffer.  Even if the size of i2c_msg data is
423  * longer than the size of the tx buffer, it handles everything.
424  */
425 static void
426 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
427 {
428 	struct i2c_msg *msgs = dev->msgs;
429 	u32 intr_mask;
430 	int tx_limit, rx_limit;
431 	u32 addr = msgs[dev->msg_write_idx].addr;
432 	u32 buf_len = dev->tx_buf_len;
433 	u8 *buf = dev->tx_buf;
434 	bool need_restart = false;
435 	unsigned int flr;
436 
437 	intr_mask = DW_IC_INTR_MASTER_MASK;
438 
439 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
440 		u32 flags = msgs[dev->msg_write_idx].flags;
441 
442 		/*
443 		 * If target address has changed, we need to
444 		 * reprogram the target address in the I2C
445 		 * adapter when we are done with this transfer.
446 		 */
447 		if (msgs[dev->msg_write_idx].addr != addr) {
448 			dev_err(dev->dev,
449 				"%s: invalid target address\n", __func__);
450 			dev->msg_err = -EINVAL;
451 			break;
452 		}
453 
454 		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
455 			/* new i2c_msg */
456 			buf = msgs[dev->msg_write_idx].buf;
457 			buf_len = msgs[dev->msg_write_idx].len;
458 
459 			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
460 			 * IC_RESTART_EN are set, we must manually
461 			 * set restart bit between messages.
462 			 */
463 			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
464 					(dev->msg_write_idx > 0))
465 				need_restart = true;
466 		}
467 
468 		regmap_read(dev->map, DW_IC_TXFLR, &flr);
469 		tx_limit = dev->tx_fifo_depth - flr;
470 
471 		regmap_read(dev->map, DW_IC_RXFLR, &flr);
472 		rx_limit = dev->rx_fifo_depth - flr;
473 
474 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
475 			u32 cmd = 0;
476 
477 			/*
478 			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
479 			 * manually set the stop bit. However, it cannot be
480 			 * detected from the registers so we set it always
481 			 * when writing/reading the last byte.
482 			 */
483 
484 			/*
485 			 * i2c-core always sets the buffer length of
486 			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
487 			 * be adjusted when receiving the first byte.
488 			 * Thus we can't stop the transaction here.
489 			 */
490 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
491 			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
492 				cmd |= BIT(9);
493 
494 			if (need_restart) {
495 				cmd |= BIT(10);
496 				need_restart = false;
497 			}
498 
499 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
500 
501 				/* Avoid rx buffer overrun */
502 				if (dev->rx_outstanding >= dev->rx_fifo_depth)
503 					break;
504 
505 				regmap_write(dev->map, DW_IC_DATA_CMD,
506 					     cmd | 0x100);
507 				rx_limit--;
508 				dev->rx_outstanding++;
509 			} else {
510 				regmap_write(dev->map, DW_IC_DATA_CMD,
511 					     cmd | *buf++);
512 			}
513 			tx_limit--; buf_len--;
514 		}
515 
516 		dev->tx_buf = buf;
517 		dev->tx_buf_len = buf_len;
518 
519 		/*
520 		 * Because we don't know the buffer length in the
521 		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
522 		 * the transaction here.
523 		 */
524 		if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
525 			/* more bytes to be written */
526 			dev->status |= STATUS_WRITE_IN_PROGRESS;
527 			break;
528 		} else
529 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
530 	}
531 
532 	/*
533 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
534 	 * interrupt any more.
535 	 */
536 	if (dev->msg_write_idx == dev->msgs_num)
537 		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
538 
539 	if (dev->msg_err)
540 		intr_mask = 0;
541 
542 	regmap_write(dev->map,  DW_IC_INTR_MASK, intr_mask);
543 }
544 
545 static u8
546 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
547 {
548 	struct i2c_msg *msgs = dev->msgs;
549 	u32 flags = msgs[dev->msg_read_idx].flags;
550 
551 	/*
552 	 * Adjust the buffer length and mask the flag
553 	 * after receiving the first byte.
554 	 */
555 	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
556 	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
557 	msgs[dev->msg_read_idx].len = len;
558 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
559 
560 	return len;
561 }
562 
563 static void
564 i2c_dw_read(struct dw_i2c_dev *dev)
565 {
566 	struct i2c_msg *msgs = dev->msgs;
567 	unsigned int rx_valid;
568 
569 	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
570 		unsigned int tmp;
571 		u32 len;
572 		u8 *buf;
573 
574 		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
575 			continue;
576 
577 		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
578 			len = msgs[dev->msg_read_idx].len;
579 			buf = msgs[dev->msg_read_idx].buf;
580 		} else {
581 			len = dev->rx_buf_len;
582 			buf = dev->rx_buf;
583 		}
584 
585 		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
586 
587 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
588 			u32 flags = msgs[dev->msg_read_idx].flags;
589 
590 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
591 			tmp &= DW_IC_DATA_CMD_DAT;
592 			/* Ensure length byte is a valid value */
593 			if (flags & I2C_M_RECV_LEN) {
594 				/*
595 				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
596 				 * detected from the registers, the controller can be
597 				 * disabled if the STOP bit is set. But it is only set
598 				 * after receiving block data response length in
599 				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
600 				 * another byte with STOP bit set when the block data
601 				 * response length is invalid to complete the transaction.
602 				 */
603 				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
604 					tmp = 1;
605 
606 				len = i2c_dw_recv_len(dev, tmp);
607 			}
608 			*buf++ = tmp;
609 			dev->rx_outstanding--;
610 		}
611 
612 		if (len > 0) {
613 			dev->status |= STATUS_READ_IN_PROGRESS;
614 			dev->rx_buf_len = len;
615 			dev->rx_buf = buf;
616 			return;
617 		} else
618 			dev->status &= ~STATUS_READ_IN_PROGRESS;
619 	}
620 }
621 
622 /*
623  * Prepare controller for a transaction and call i2c_dw_xfer_msg.
624  */
625 static int
626 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
627 {
628 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
629 	int ret;
630 
631 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
632 
633 	pm_runtime_get_sync(dev->dev);
634 
635 	/*
636 	 * Initiate I2C message transfer when polling mode is enabled,
637 	 * As it is polling based transfer mechanism, which does not support
638 	 * interrupt based functionalities of existing DesignWare driver.
639 	 */
640 	switch (dev->flags & MODEL_MASK) {
641 	case MODEL_AMD_NAVI_GPU:
642 		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
643 		goto done_nolock;
644 	case MODEL_WANGXUN_SP:
645 		ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
646 		goto done_nolock;
647 	default:
648 		break;
649 	}
650 
651 	reinit_completion(&dev->cmd_complete);
652 	dev->msgs = msgs;
653 	dev->msgs_num = num;
654 	dev->cmd_err = 0;
655 	dev->msg_write_idx = 0;
656 	dev->msg_read_idx = 0;
657 	dev->msg_err = 0;
658 	dev->status = 0;
659 	dev->abort_source = 0;
660 	dev->rx_outstanding = 0;
661 
662 	ret = i2c_dw_acquire_lock(dev);
663 	if (ret)
664 		goto done_nolock;
665 
666 	ret = i2c_dw_wait_bus_not_busy(dev);
667 	if (ret < 0)
668 		goto done;
669 
670 	/* Start the transfers */
671 	i2c_dw_xfer_init(dev);
672 
673 	/* Wait for tx to complete */
674 	if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
675 		dev_err(dev->dev, "controller timed out\n");
676 		/* i2c_dw_init implicitly disables the adapter */
677 		i2c_recover_bus(&dev->adapter);
678 		i2c_dw_init_master(dev);
679 		ret = -ETIMEDOUT;
680 		goto done;
681 	}
682 
683 	/*
684 	 * We must disable the adapter before returning and signaling the end
685 	 * of the current transfer. Otherwise the hardware might continue
686 	 * generating interrupts which in turn causes a race condition with
687 	 * the following transfer.  Needs some more investigation if the
688 	 * additional interrupts are a hardware bug or this driver doesn't
689 	 * handle them correctly yet.
690 	 */
691 	__i2c_dw_disable_nowait(dev);
692 
693 	if (dev->msg_err) {
694 		ret = dev->msg_err;
695 		goto done;
696 	}
697 
698 	/* No error */
699 	if (likely(!dev->cmd_err && !dev->status)) {
700 		ret = num;
701 		goto done;
702 	}
703 
704 	/* We have an error */
705 	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
706 		ret = i2c_dw_handle_tx_abort(dev);
707 		goto done;
708 	}
709 
710 	if (dev->status)
711 		dev_err(dev->dev,
712 			"transfer terminated early - interrupt latency too high?\n");
713 
714 	ret = -EIO;
715 
716 done:
717 	i2c_dw_release_lock(dev);
718 
719 done_nolock:
720 	pm_runtime_mark_last_busy(dev->dev);
721 	pm_runtime_put_autosuspend(dev->dev);
722 
723 	return ret;
724 }
725 
726 static const struct i2c_algorithm i2c_dw_algo = {
727 	.master_xfer = i2c_dw_xfer,
728 	.functionality = i2c_dw_func,
729 };
730 
731 static const struct i2c_adapter_quirks i2c_dw_quirks = {
732 	.flags = I2C_AQ_NO_ZERO_LEN,
733 };
734 
735 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
736 {
737 	unsigned int stat, dummy;
738 
739 	/*
740 	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
741 	 * The unmasked raw version of interrupt status bits is available
742 	 * in the IC_RAW_INTR_STAT register.
743 	 *
744 	 * That is,
745 	 *   stat = readl(IC_INTR_STAT);
746 	 * equals to,
747 	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
748 	 *
749 	 * The raw version might be useful for debugging purposes.
750 	 */
751 	regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
752 
753 	/*
754 	 * Do not use the IC_CLR_INTR register to clear interrupts, or
755 	 * you'll miss some interrupts, triggered during the period from
756 	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
757 	 *
758 	 * Instead, use the separately-prepared IC_CLR_* registers.
759 	 */
760 	if (stat & DW_IC_INTR_RX_UNDER)
761 		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
762 	if (stat & DW_IC_INTR_RX_OVER)
763 		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
764 	if (stat & DW_IC_INTR_TX_OVER)
765 		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
766 	if (stat & DW_IC_INTR_RD_REQ)
767 		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
768 	if (stat & DW_IC_INTR_TX_ABRT) {
769 		/*
770 		 * The IC_TX_ABRT_SOURCE register is cleared whenever
771 		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
772 		 */
773 		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
774 		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
775 	}
776 	if (stat & DW_IC_INTR_RX_DONE)
777 		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
778 	if (stat & DW_IC_INTR_ACTIVITY)
779 		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
780 	if ((stat & DW_IC_INTR_STOP_DET) &&
781 	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
782 		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
783 	if (stat & DW_IC_INTR_START_DET)
784 		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
785 	if (stat & DW_IC_INTR_GEN_CALL)
786 		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
787 
788 	return stat;
789 }
790 
791 /*
792  * Interrupt service routine. This gets called whenever an I2C master interrupt
793  * occurs.
794  */
795 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
796 {
797 	struct dw_i2c_dev *dev = dev_id;
798 	unsigned int stat, enabled;
799 
800 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
801 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
802 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
803 		return IRQ_NONE;
804 	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
805 		return IRQ_NONE;
806 	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
807 
808 	stat = i2c_dw_read_clear_intrbits(dev);
809 
810 	if (!(dev->status & STATUS_ACTIVE)) {
811 		/*
812 		 * Unexpected interrupt in driver point of view. State
813 		 * variables are either unset or stale so acknowledge and
814 		 * disable interrupts for suppressing further interrupts if
815 		 * interrupt really came from this HW (E.g. firmware has left
816 		 * the HW active).
817 		 */
818 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
819 		return IRQ_HANDLED;
820 	}
821 
822 	if (stat & DW_IC_INTR_TX_ABRT) {
823 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
824 		dev->status &= ~STATUS_MASK;
825 		dev->rx_outstanding = 0;
826 
827 		/*
828 		 * Anytime TX_ABRT is set, the contents of the tx/rx
829 		 * buffers are flushed. Make sure to skip them.
830 		 */
831 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
832 		goto tx_aborted;
833 	}
834 
835 	if (stat & DW_IC_INTR_RX_FULL)
836 		i2c_dw_read(dev);
837 
838 	if (stat & DW_IC_INTR_TX_EMPTY)
839 		i2c_dw_xfer_msg(dev);
840 
841 	/*
842 	 * No need to modify or disable the interrupt mask here.
843 	 * i2c_dw_xfer_msg() will take care of it according to
844 	 * the current transmit status.
845 	 */
846 
847 tx_aborted:
848 	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
849 	     (dev->rx_outstanding == 0))
850 		complete(&dev->cmd_complete);
851 	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
852 		/* Workaround to trigger pending interrupt */
853 		regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
854 		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
855 		regmap_write(dev->map, DW_IC_INTR_MASK, stat);
856 	}
857 
858 	return IRQ_HANDLED;
859 }
860 
861 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
862 {
863 	struct i2c_timings *t = &dev->timings;
864 
865 	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
866 
867 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
868 			  DW_IC_CON_RESTART_EN;
869 
870 	dev->mode = DW_IC_MASTER;
871 
872 	switch (t->bus_freq_hz) {
873 	case I2C_MAX_STANDARD_MODE_FREQ:
874 		dev->master_cfg |= DW_IC_CON_SPEED_STD;
875 		break;
876 	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
877 		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
878 		break;
879 	default:
880 		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
881 	}
882 }
883 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
884 
885 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
886 {
887 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
888 
889 	i2c_dw_disable(dev);
890 	reset_control_assert(dev->rst);
891 	i2c_dw_prepare_clk(dev, false);
892 }
893 
894 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
895 {
896 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
897 
898 	i2c_dw_prepare_clk(dev, true);
899 	reset_control_deassert(dev->rst);
900 	i2c_dw_init_master(dev);
901 }
902 
903 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
904 {
905 	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
906 	struct i2c_adapter *adap = &dev->adapter;
907 	struct gpio_desc *gpio;
908 
909 	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
910 	if (IS_ERR_OR_NULL(gpio))
911 		return PTR_ERR_OR_ZERO(gpio);
912 
913 	rinfo->scl_gpiod = gpio;
914 
915 	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
916 	if (IS_ERR(gpio))
917 		return PTR_ERR(gpio);
918 	rinfo->sda_gpiod = gpio;
919 
920 	rinfo->recover_bus = i2c_generic_scl_recovery;
921 	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
922 	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
923 	adap->bus_recovery_info = rinfo;
924 
925 	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
926 		 rinfo->sda_gpiod ? ",sda" : "");
927 
928 	return 0;
929 }
930 
931 static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
932 {
933 	struct i2c_adapter *adap = &dev->adapter;
934 	int ret;
935 
936 	pm_runtime_get_noresume(dev->dev);
937 	ret = i2c_add_numbered_adapter(adap);
938 	if (ret)
939 		dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
940 	pm_runtime_put_noidle(dev->dev);
941 
942 	return ret;
943 }
944 
945 static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
946 {
947 	switch (dev->flags & MODEL_MASK) {
948 	case MODEL_AMD_NAVI_GPU:
949 	case MODEL_WANGXUN_SP:
950 		return true;
951 	default:
952 		return false;
953 	}
954 }
955 
956 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
957 {
958 	struct i2c_adapter *adap = &dev->adapter;
959 	unsigned long irq_flags;
960 	unsigned int ic_con;
961 	int ret;
962 
963 	init_completion(&dev->cmd_complete);
964 
965 	dev->init = i2c_dw_init_master;
966 	dev->disable = i2c_dw_disable;
967 
968 	ret = i2c_dw_init_regmap(dev);
969 	if (ret)
970 		return ret;
971 
972 	ret = i2c_dw_set_timings_master(dev);
973 	if (ret)
974 		return ret;
975 
976 	ret = i2c_dw_set_fifo_size(dev);
977 	if (ret)
978 		return ret;
979 
980 	/* Lock the bus for accessing DW_IC_CON */
981 	ret = i2c_dw_acquire_lock(dev);
982 	if (ret)
983 		return ret;
984 
985 	/*
986 	 * On AMD platforms BIOS advertises the bus clear feature
987 	 * and enables the SCL/SDA stuck low. SMU FW does the
988 	 * bus recovery process. Driver should not ignore this BIOS
989 	 * advertisement of bus clear feature.
990 	 */
991 	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
992 	i2c_dw_release_lock(dev);
993 	if (ret)
994 		return ret;
995 
996 	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
997 		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
998 
999 	ret = dev->init(dev);
1000 	if (ret)
1001 		return ret;
1002 
1003 	snprintf(adap->name, sizeof(adap->name),
1004 		 "Synopsys DesignWare I2C adapter");
1005 	adap->retries = 3;
1006 	adap->algo = &i2c_dw_algo;
1007 	adap->quirks = &i2c_dw_quirks;
1008 	adap->dev.parent = dev->dev;
1009 	i2c_set_adapdata(adap, dev);
1010 
1011 	if (i2c_dw_is_model_poll(dev))
1012 		return i2c_dw_poll_adap_quirk(dev);
1013 
1014 	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1015 		irq_flags = IRQF_NO_SUSPEND;
1016 	} else {
1017 		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1018 	}
1019 
1020 	ret = i2c_dw_acquire_lock(dev);
1021 	if (ret)
1022 		return ret;
1023 
1024 	regmap_write(dev->map, DW_IC_INTR_MASK, 0);
1025 	i2c_dw_release_lock(dev);
1026 
1027 	ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
1028 			       dev_name(dev->dev), dev);
1029 	if (ret) {
1030 		dev_err(dev->dev, "failure requesting irq %i: %d\n",
1031 			dev->irq, ret);
1032 		return ret;
1033 	}
1034 
1035 	ret = i2c_dw_init_recovery_info(dev);
1036 	if (ret)
1037 		return ret;
1038 
1039 	/*
1040 	 * Increment PM usage count during adapter registration in order to
1041 	 * avoid possible spurious runtime suspend when adapter device is
1042 	 * registered to the device core and immediate resume in case bus has
1043 	 * registered I2C slaves that do I2C transfers in their probe.
1044 	 */
1045 	pm_runtime_get_noresume(dev->dev);
1046 	ret = i2c_add_numbered_adapter(adap);
1047 	if (ret)
1048 		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1049 	pm_runtime_put_noidle(dev->dev);
1050 
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1054 
1055 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1056 MODULE_LICENSE("GPL");
1057