xref: /linux/drivers/i2c/busses/i2c-aspeed.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Aspeed 24XX/25XX I2C Controller.
4  *
5  *  Copyright (C) 2012-2017 ASPEED Technology Inc.
6  *  Copyright 2017 IBM Corporation
7  *  Copyright 2017 Google, Inc.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27 
28 /* I2C Register */
29 #define ASPEED_I2C_FUN_CTRL_REG				0x00
30 #define ASPEED_I2C_AC_TIMING_REG1			0x04
31 #define ASPEED_I2C_AC_TIMING_REG2			0x08
32 #define ASPEED_I2C_INTR_CTRL_REG			0x0c
33 #define ASPEED_I2C_INTR_STS_REG				0x10
34 #define ASPEED_I2C_CMD_REG				0x14
35 #define ASPEED_I2C_DEV_ADDR_REG				0x18
36 #define ASPEED_I2C_BYTE_BUF_REG				0x20
37 
38 /* Global Register Definition */
39 /* 0x00 : I2C Interrupt Status Register  */
40 /* 0x08 : I2C Interrupt Target Assignment  */
41 
42 /* Device Register Definition */
43 /* 0x00 : I2CD Function Control Register  */
44 #define ASPEED_I2CD_MULTI_MASTER_DIS			BIT(15)
45 #define ASPEED_I2CD_SDA_DRIVE_1T_EN			BIT(8)
46 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN			BIT(7)
47 #define ASPEED_I2CD_M_HIGH_SPEED_EN			BIT(6)
48 #define ASPEED_I2CD_SLAVE_EN				BIT(1)
49 #define ASPEED_I2CD_MASTER_EN				BIT(0)
50 
51 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
52 #define ASPEED_I2CD_TIME_TBUF_MASK			GENMASK(31, 28)
53 #define ASPEED_I2CD_TIME_THDSTA_MASK			GENMASK(27, 24)
54 #define ASPEED_I2CD_TIME_TACST_MASK			GENMASK(23, 20)
55 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT			16
56 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK			GENMASK(19, 16)
57 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT			12
58 #define ASPEED_I2CD_TIME_SCL_LOW_MASK			GENMASK(15, 12)
59 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK		GENMASK(3, 0)
60 #define ASPEED_I2CD_TIME_SCL_REG_MAX			GENMASK(3, 0)
61 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
62 #define ASPEED_NO_TIMEOUT_CTRL				0
63 
64 /* 0x0c : I2CD Interrupt Control Register &
65  * 0x10 : I2CD Interrupt Status Register
66  *
67  * These share bit definitions, so use the same values for the enable &
68  * status bits.
69  */
70 #define ASPEED_I2CD_INTR_RECV_MASK			0xf000ffff
71 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT			BIT(14)
72 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE		BIT(13)
73 #define ASPEED_I2CD_INTR_SLAVE_MATCH			BIT(7)
74 #define ASPEED_I2CD_INTR_SCL_TIMEOUT			BIT(6)
75 #define ASPEED_I2CD_INTR_ABNORMAL			BIT(5)
76 #define ASPEED_I2CD_INTR_NORMAL_STOP			BIT(4)
77 #define ASPEED_I2CD_INTR_ARBIT_LOSS			BIT(3)
78 #define ASPEED_I2CD_INTR_RX_DONE			BIT(2)
79 #define ASPEED_I2CD_INTR_TX_NAK				BIT(1)
80 #define ASPEED_I2CD_INTR_TX_ACK				BIT(0)
81 #define ASPEED_I2CD_INTR_MASTER_ERRORS					       \
82 		(ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |			       \
83 		 ASPEED_I2CD_INTR_SCL_TIMEOUT |				       \
84 		 ASPEED_I2CD_INTR_ABNORMAL |				       \
85 		 ASPEED_I2CD_INTR_ARBIT_LOSS)
86 #define ASPEED_I2CD_INTR_ALL						       \
87 		(ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |			       \
88 		 ASPEED_I2CD_INTR_BUS_RECOVER_DONE |			       \
89 		 ASPEED_I2CD_INTR_SCL_TIMEOUT |				       \
90 		 ASPEED_I2CD_INTR_ABNORMAL |				       \
91 		 ASPEED_I2CD_INTR_NORMAL_STOP |				       \
92 		 ASPEED_I2CD_INTR_ARBIT_LOSS |				       \
93 		 ASPEED_I2CD_INTR_RX_DONE |				       \
94 		 ASPEED_I2CD_INTR_TX_NAK |				       \
95 		 ASPEED_I2CD_INTR_TX_ACK)
96 
97 /* 0x14 : I2CD Command/Status Register   */
98 #define ASPEED_I2CD_SCL_LINE_STS			BIT(18)
99 #define ASPEED_I2CD_SDA_LINE_STS			BIT(17)
100 #define ASPEED_I2CD_BUS_BUSY_STS			BIT(16)
101 #define ASPEED_I2CD_BUS_RECOVER_CMD			BIT(11)
102 
103 /* Command Bit */
104 #define ASPEED_I2CD_M_STOP_CMD				BIT(5)
105 #define ASPEED_I2CD_M_S_RX_CMD_LAST			BIT(4)
106 #define ASPEED_I2CD_M_RX_CMD				BIT(3)
107 #define ASPEED_I2CD_S_TX_CMD				BIT(2)
108 #define ASPEED_I2CD_M_TX_CMD				BIT(1)
109 #define ASPEED_I2CD_M_START_CMD				BIT(0)
110 #define ASPEED_I2CD_MASTER_CMDS_MASK					       \
111 		(ASPEED_I2CD_M_STOP_CMD |				       \
112 		 ASPEED_I2CD_M_S_RX_CMD_LAST |				       \
113 		 ASPEED_I2CD_M_RX_CMD |					       \
114 		 ASPEED_I2CD_M_TX_CMD |					       \
115 		 ASPEED_I2CD_M_START_CMD)
116 
117 /* 0x18 : I2CD Slave Device Address Register   */
118 #define ASPEED_I2CD_DEV_ADDR_MASK			GENMASK(6, 0)
119 
120 enum aspeed_i2c_master_state {
121 	ASPEED_I2C_MASTER_INACTIVE,
122 	ASPEED_I2C_MASTER_PENDING,
123 	ASPEED_I2C_MASTER_START,
124 	ASPEED_I2C_MASTER_TX_FIRST,
125 	ASPEED_I2C_MASTER_TX,
126 	ASPEED_I2C_MASTER_RX_FIRST,
127 	ASPEED_I2C_MASTER_RX,
128 	ASPEED_I2C_MASTER_STOP,
129 };
130 
131 enum aspeed_i2c_slave_state {
132 	ASPEED_I2C_SLAVE_INACTIVE,
133 	ASPEED_I2C_SLAVE_START,
134 	ASPEED_I2C_SLAVE_READ_REQUESTED,
135 	ASPEED_I2C_SLAVE_READ_PROCESSED,
136 	ASPEED_I2C_SLAVE_WRITE_REQUESTED,
137 	ASPEED_I2C_SLAVE_WRITE_RECEIVED,
138 	ASPEED_I2C_SLAVE_STOP,
139 };
140 
141 struct aspeed_i2c_bus {
142 	struct i2c_adapter		adap;
143 	struct device			*dev;
144 	void __iomem			*base;
145 	struct reset_control		*rst;
146 	/* Synchronizes I/O mem access to base. */
147 	spinlock_t			lock;
148 	struct completion		cmd_complete;
149 	u32				(*get_clk_reg_val)(struct device *dev,
150 							   u32 divisor);
151 	unsigned long			parent_clk_frequency;
152 	u32				bus_frequency;
153 	/* Transaction state. */
154 	enum aspeed_i2c_master_state	master_state;
155 	struct i2c_msg			*msgs;
156 	size_t				buf_index;
157 	size_t				msgs_index;
158 	size_t				msgs_count;
159 	bool				send_stop;
160 	int				cmd_err;
161 	/* Protected only by i2c_lock_bus */
162 	int				master_xfer_result;
163 	/* Multi-master */
164 	bool				multi_master;
165 #if IS_ENABLED(CONFIG_I2C_SLAVE)
166 	struct i2c_client		*slave;
167 	enum aspeed_i2c_slave_state	slave_state;
168 #endif /* CONFIG_I2C_SLAVE */
169 };
170 
171 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
172 
173 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
174 {
175 	unsigned long time_left, flags;
176 	int ret = 0;
177 	u32 command;
178 
179 	spin_lock_irqsave(&bus->lock, flags);
180 	command = readl(bus->base + ASPEED_I2C_CMD_REG);
181 
182 	if (command & ASPEED_I2CD_SDA_LINE_STS) {
183 		/* Bus is idle: no recovery needed. */
184 		if (command & ASPEED_I2CD_SCL_LINE_STS)
185 			goto out;
186 		dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n",
187 			command);
188 
189 		reinit_completion(&bus->cmd_complete);
190 		writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
191 		spin_unlock_irqrestore(&bus->lock, flags);
192 
193 		time_left = wait_for_completion_timeout(
194 				&bus->cmd_complete, bus->adap.timeout);
195 
196 		spin_lock_irqsave(&bus->lock, flags);
197 		if (time_left == 0)
198 			goto reset_out;
199 		else if (bus->cmd_err)
200 			goto reset_out;
201 		/* Recovery failed. */
202 		else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
203 			   ASPEED_I2CD_SCL_LINE_STS))
204 			goto reset_out;
205 	/* Bus error. */
206 	} else {
207 		dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n",
208 			command);
209 
210 		reinit_completion(&bus->cmd_complete);
211 		/* Writes 1 to 8 SCL clock cycles until SDA is released. */
212 		writel(ASPEED_I2CD_BUS_RECOVER_CMD,
213 		       bus->base + ASPEED_I2C_CMD_REG);
214 		spin_unlock_irqrestore(&bus->lock, flags);
215 
216 		time_left = wait_for_completion_timeout(
217 				&bus->cmd_complete, bus->adap.timeout);
218 
219 		spin_lock_irqsave(&bus->lock, flags);
220 		if (time_left == 0)
221 			goto reset_out;
222 		else if (bus->cmd_err)
223 			goto reset_out;
224 		/* Recovery failed. */
225 		else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
226 			   ASPEED_I2CD_SDA_LINE_STS))
227 			goto reset_out;
228 	}
229 
230 out:
231 	spin_unlock_irqrestore(&bus->lock, flags);
232 
233 	return ret;
234 
235 reset_out:
236 	spin_unlock_irqrestore(&bus->lock, flags);
237 
238 	return aspeed_i2c_reset(bus);
239 }
240 
241 #if IS_ENABLED(CONFIG_I2C_SLAVE)
242 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
243 {
244 	u32 command, irq_handled = 0;
245 	struct i2c_client *slave = bus->slave;
246 	u8 value;
247 	int ret;
248 
249 	if (!slave)
250 		return 0;
251 
252 	/*
253 	 * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
254 	 * transfers with low enough latency between the nak/stop phase of the current
255 	 * command and the start/address phase of the following command that the
256 	 * interrupts are coalesced by the time we process them.
257 	 */
258 	if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
259 		irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
260 		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
261 	}
262 
263 	if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
264 	    bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
265 		irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
266 		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
267 	}
268 
269 	/* Propagate any stop conditions to the slave implementation. */
270 	if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
271 		i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
272 		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
273 	}
274 
275 	/*
276 	 * Now that we've dealt with any potentially coalesced stop conditions,
277 	 * address any start conditions.
278 	 */
279 	if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
280 		irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
281 		bus->slave_state = ASPEED_I2C_SLAVE_START;
282 	}
283 
284 	/*
285 	 * If the slave has been stopped and not started then slave interrupt
286 	 * handling is complete.
287 	 */
288 	if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
289 		return irq_handled;
290 
291 	command = readl(bus->base + ASPEED_I2C_CMD_REG);
292 	dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
293 		irq_status, command);
294 
295 	/* Slave was sent something. */
296 	if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
297 		value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
298 		/* Handle address frame. */
299 		if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
300 			if (value & 0x1)
301 				bus->slave_state =
302 						ASPEED_I2C_SLAVE_READ_REQUESTED;
303 			else
304 				bus->slave_state =
305 						ASPEED_I2C_SLAVE_WRITE_REQUESTED;
306 		}
307 		irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
308 	}
309 
310 	switch (bus->slave_state) {
311 	case ASPEED_I2C_SLAVE_READ_REQUESTED:
312 		if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
313 			dev_err(bus->dev, "Unexpected ACK on read request.\n");
314 		bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED;
315 		i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
316 		writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
317 		writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
318 		break;
319 	case ASPEED_I2C_SLAVE_READ_PROCESSED:
320 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
321 			dev_err(bus->dev,
322 				"Expected ACK after processed read.\n");
323 			break;
324 		}
325 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
326 		i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
327 		writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
328 		writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
329 		break;
330 	case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
331 		bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
332 		ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
333 		/*
334 		 * Slave ACK's on this address phase already but as the backend driver
335 		 * returns an errno, the bus driver should nack the next incoming byte.
336 		 */
337 		if (ret < 0)
338 			writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG);
339 		break;
340 	case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
341 		i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
342 		break;
343 	case ASPEED_I2C_SLAVE_STOP:
344 		/* Stop event handling is done early. Unreachable. */
345 		break;
346 	case ASPEED_I2C_SLAVE_START:
347 		/* Slave was just started. Waiting for the next event. */;
348 		break;
349 	default:
350 		dev_err(bus->dev, "unknown slave_state: %d\n",
351 			bus->slave_state);
352 		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
353 		break;
354 	}
355 
356 	return irq_handled;
357 }
358 #endif /* CONFIG_I2C_SLAVE */
359 
360 /* precondition: bus.lock has been acquired. */
361 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
362 {
363 	u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
364 	struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
365 	u8 slave_addr = i2c_8bit_addr_from_msg(msg);
366 
367 #if IS_ENABLED(CONFIG_I2C_SLAVE)
368 	/*
369 	 * If it's requested in the middle of a slave session, set the master
370 	 * state to 'pending' then H/W will continue handling this master
371 	 * command when the bus comes back to the idle state.
372 	 */
373 	if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
374 		bus->master_state = ASPEED_I2C_MASTER_PENDING;
375 		return;
376 	}
377 #endif /* CONFIG_I2C_SLAVE */
378 
379 	bus->master_state = ASPEED_I2C_MASTER_START;
380 	bus->buf_index = 0;
381 
382 	if (msg->flags & I2C_M_RD) {
383 		command |= ASPEED_I2CD_M_RX_CMD;
384 		/* Need to let the hardware know to NACK after RX. */
385 		if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
386 			command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
387 	}
388 
389 	writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
390 	writel(command, bus->base + ASPEED_I2C_CMD_REG);
391 }
392 
393 /* precondition: bus.lock has been acquired. */
394 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
395 {
396 	bus->master_state = ASPEED_I2C_MASTER_STOP;
397 	writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
398 }
399 
400 /* precondition: bus.lock has been acquired. */
401 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
402 {
403 	if (bus->msgs_index + 1 < bus->msgs_count) {
404 		bus->msgs_index++;
405 		aspeed_i2c_do_start(bus);
406 	} else {
407 		aspeed_i2c_do_stop(bus);
408 	}
409 }
410 
411 static int aspeed_i2c_is_irq_error(u32 irq_status)
412 {
413 	if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
414 		return -EAGAIN;
415 	if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |
416 			  ASPEED_I2CD_INTR_SCL_TIMEOUT))
417 		return -EBUSY;
418 	if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL))
419 		return -EPROTO;
420 
421 	return 0;
422 }
423 
424 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
425 {
426 	u32 irq_handled = 0, command = 0;
427 	struct i2c_msg *msg;
428 	u8 recv_byte;
429 	int ret;
430 
431 	if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) {
432 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
433 		irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE;
434 		goto out_complete;
435 	}
436 
437 	/*
438 	 * We encountered an interrupt that reports an error: the hardware
439 	 * should clear the command queue effectively taking us back to the
440 	 * INACTIVE state.
441 	 */
442 	ret = aspeed_i2c_is_irq_error(irq_status);
443 	if (ret) {
444 		dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
445 			irq_status);
446 		irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
447 		if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
448 			bus->cmd_err = ret;
449 			bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
450 			goto out_complete;
451 		}
452 	}
453 
454 	/* Master is not currently active, irq was for someone else. */
455 	if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
456 	    bus->master_state == ASPEED_I2C_MASTER_PENDING)
457 		goto out_no_complete;
458 
459 	/* We are in an invalid state; reset bus to a known state. */
460 	if (!bus->msgs) {
461 		dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
462 			irq_status);
463 		bus->cmd_err = -EIO;
464 		if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
465 		    bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
466 			aspeed_i2c_do_stop(bus);
467 		goto out_no_complete;
468 	}
469 	msg = &bus->msgs[bus->msgs_index];
470 
471 	/*
472 	 * START is a special case because we still have to handle a subsequent
473 	 * TX or RX immediately after we handle it, so we handle it here and
474 	 * then update the state and handle the new state below.
475 	 */
476 	if (bus->master_state == ASPEED_I2C_MASTER_START) {
477 #if IS_ENABLED(CONFIG_I2C_SLAVE)
478 		/*
479 		 * If a peer master starts a xfer immediately after it queues a
480 		 * master command, clear the queued master command and change
481 		 * its state to 'pending'. To simplify handling of pending
482 		 * cases, it uses S/W solution instead of H/W command queue
483 		 * handling.
484 		 */
485 		if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
486 			writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
487 				~ASPEED_I2CD_MASTER_CMDS_MASK,
488 			       bus->base + ASPEED_I2C_CMD_REG);
489 			bus->master_state = ASPEED_I2C_MASTER_PENDING;
490 			dev_dbg(bus->dev,
491 				"master goes pending due to a slave start\n");
492 			goto out_no_complete;
493 		}
494 #endif /* CONFIG_I2C_SLAVE */
495 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
496 			if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
497 				bus->cmd_err = -ENXIO;
498 				bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
499 				goto out_complete;
500 			}
501 			pr_devel("no slave present at %02x\n", msg->addr);
502 			irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
503 			bus->cmd_err = -ENXIO;
504 			aspeed_i2c_do_stop(bus);
505 			goto out_no_complete;
506 		}
507 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
508 		if (msg->len == 0) { /* SMBUS_QUICK */
509 			aspeed_i2c_do_stop(bus);
510 			goto out_no_complete;
511 		}
512 		if (msg->flags & I2C_M_RD)
513 			bus->master_state = ASPEED_I2C_MASTER_RX_FIRST;
514 		else
515 			bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
516 	}
517 
518 	switch (bus->master_state) {
519 	case ASPEED_I2C_MASTER_TX:
520 		if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
521 			dev_dbg(bus->dev, "slave NACKed TX\n");
522 			irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
523 			goto error_and_stop;
524 		} else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
525 			dev_err(bus->dev, "slave failed to ACK TX\n");
526 			goto error_and_stop;
527 		}
528 		irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
529 		fallthrough;
530 	case ASPEED_I2C_MASTER_TX_FIRST:
531 		if (bus->buf_index < msg->len) {
532 			bus->master_state = ASPEED_I2C_MASTER_TX;
533 			writel(msg->buf[bus->buf_index++],
534 			       bus->base + ASPEED_I2C_BYTE_BUF_REG);
535 			writel(ASPEED_I2CD_M_TX_CMD,
536 			       bus->base + ASPEED_I2C_CMD_REG);
537 		} else {
538 			aspeed_i2c_next_msg_or_stop(bus);
539 		}
540 		goto out_no_complete;
541 	case ASPEED_I2C_MASTER_RX_FIRST:
542 		/* RX may not have completed yet (only address cycle) */
543 		if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
544 			goto out_no_complete;
545 		fallthrough;
546 	case ASPEED_I2C_MASTER_RX:
547 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
548 			dev_err(bus->dev, "master failed to RX\n");
549 			goto error_and_stop;
550 		}
551 		irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
552 
553 		recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
554 		msg->buf[bus->buf_index++] = recv_byte;
555 
556 		if (msg->flags & I2C_M_RECV_LEN) {
557 			if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
558 				bus->cmd_err = -EPROTO;
559 				aspeed_i2c_do_stop(bus);
560 				goto out_no_complete;
561 			}
562 			msg->len = recv_byte +
563 					((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
564 			msg->flags &= ~I2C_M_RECV_LEN;
565 		}
566 
567 		if (bus->buf_index < msg->len) {
568 			bus->master_state = ASPEED_I2C_MASTER_RX;
569 			command = ASPEED_I2CD_M_RX_CMD;
570 			if (bus->buf_index + 1 == msg->len)
571 				command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
572 			writel(command, bus->base + ASPEED_I2C_CMD_REG);
573 		} else {
574 			aspeed_i2c_next_msg_or_stop(bus);
575 		}
576 		goto out_no_complete;
577 	case ASPEED_I2C_MASTER_STOP:
578 		if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
579 			dev_err(bus->dev,
580 				"master failed to STOP. irq_status:0x%x\n",
581 				irq_status);
582 			bus->cmd_err = -EIO;
583 			/* Do not STOP as we have already tried. */
584 		} else {
585 			irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
586 		}
587 
588 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
589 		goto out_complete;
590 	case ASPEED_I2C_MASTER_INACTIVE:
591 		dev_err(bus->dev,
592 			"master received interrupt 0x%08x, but is inactive\n",
593 			irq_status);
594 		bus->cmd_err = -EIO;
595 		/* Do not STOP as we should be inactive. */
596 		goto out_complete;
597 	default:
598 		WARN(1, "unknown master state\n");
599 		bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
600 		bus->cmd_err = -EINVAL;
601 		goto out_complete;
602 	}
603 error_and_stop:
604 	bus->cmd_err = -EIO;
605 	aspeed_i2c_do_stop(bus);
606 	goto out_no_complete;
607 out_complete:
608 	bus->msgs = NULL;
609 	if (bus->cmd_err)
610 		bus->master_xfer_result = bus->cmd_err;
611 	else
612 		bus->master_xfer_result = bus->msgs_index + 1;
613 	complete(&bus->cmd_complete);
614 out_no_complete:
615 	return irq_handled;
616 }
617 
618 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
619 {
620 	struct aspeed_i2c_bus *bus = dev_id;
621 	u32 irq_received, irq_remaining, irq_handled;
622 
623 	spin_lock(&bus->lock);
624 	irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
625 	/* Ack all interrupts except for Rx done */
626 	writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
627 	       bus->base + ASPEED_I2C_INTR_STS_REG);
628 	readl(bus->base + ASPEED_I2C_INTR_STS_REG);
629 	irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
630 	irq_remaining = irq_received;
631 
632 #if IS_ENABLED(CONFIG_I2C_SLAVE)
633 	/*
634 	 * In most cases, interrupt bits will be set one by one, although
635 	 * multiple interrupt bits could be set at the same time. It's also
636 	 * possible that master interrupt bits could be set along with slave
637 	 * interrupt bits. Each case needs to be handled using corresponding
638 	 * handlers depending on the current state.
639 	 */
640 	if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
641 	    bus->master_state != ASPEED_I2C_MASTER_PENDING) {
642 		irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
643 		irq_remaining &= ~irq_handled;
644 		if (irq_remaining)
645 			irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
646 	} else {
647 		irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
648 		irq_remaining &= ~irq_handled;
649 		if (irq_remaining)
650 			irq_handled |= aspeed_i2c_master_irq(bus,
651 							     irq_remaining);
652 	}
653 
654 	/*
655 	 * Start a pending master command at here if a slave operation is
656 	 * completed.
657 	 */
658 	if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
659 	    bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
660 		aspeed_i2c_do_start(bus);
661 #else
662 	irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
663 #endif /* CONFIG_I2C_SLAVE */
664 
665 	irq_remaining &= ~irq_handled;
666 	if (irq_remaining)
667 		dev_err(bus->dev,
668 			"irq handled != irq. expected 0x%08x, but was 0x%08x\n",
669 			irq_received, irq_handled);
670 
671 	/* Ack Rx done */
672 	if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
673 		writel(ASPEED_I2CD_INTR_RX_DONE,
674 		       bus->base + ASPEED_I2C_INTR_STS_REG);
675 		readl(bus->base + ASPEED_I2C_INTR_STS_REG);
676 	}
677 	spin_unlock(&bus->lock);
678 	return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
679 }
680 
681 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
682 				  struct i2c_msg *msgs, int num)
683 {
684 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
685 	unsigned long time_left, flags;
686 
687 	spin_lock_irqsave(&bus->lock, flags);
688 	bus->cmd_err = 0;
689 
690 	/* If bus is busy in a single master environment, attempt recovery. */
691 	if (!bus->multi_master &&
692 	    (readl(bus->base + ASPEED_I2C_CMD_REG) &
693 	     ASPEED_I2CD_BUS_BUSY_STS)) {
694 		int ret;
695 
696 		spin_unlock_irqrestore(&bus->lock, flags);
697 		ret = aspeed_i2c_recover_bus(bus);
698 		if (ret)
699 			return ret;
700 		spin_lock_irqsave(&bus->lock, flags);
701 	}
702 
703 	bus->cmd_err = 0;
704 	bus->msgs = msgs;
705 	bus->msgs_index = 0;
706 	bus->msgs_count = num;
707 
708 	reinit_completion(&bus->cmd_complete);
709 	aspeed_i2c_do_start(bus);
710 	spin_unlock_irqrestore(&bus->lock, flags);
711 
712 	time_left = wait_for_completion_timeout(&bus->cmd_complete,
713 						bus->adap.timeout);
714 
715 	if (time_left == 0) {
716 		/*
717 		 * In a multi-master setup, if a timeout occurs, attempt
718 		 * recovery. But if the bus is idle, we still need to reset the
719 		 * i2c controller to clear the remaining interrupts.
720 		 */
721 		if (bus->multi_master &&
722 		    (readl(bus->base + ASPEED_I2C_CMD_REG) &
723 		     ASPEED_I2CD_BUS_BUSY_STS))
724 			aspeed_i2c_recover_bus(bus);
725 		else
726 			aspeed_i2c_reset(bus);
727 
728 		/*
729 		 * If timed out and the state is still pending, drop the pending
730 		 * master command.
731 		 */
732 		spin_lock_irqsave(&bus->lock, flags);
733 		if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
734 			bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
735 		spin_unlock_irqrestore(&bus->lock, flags);
736 
737 		return -ETIMEDOUT;
738 	}
739 
740 	return bus->master_xfer_result;
741 }
742 
743 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap)
744 {
745 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
746 }
747 
748 #if IS_ENABLED(CONFIG_I2C_SLAVE)
749 /* precondition: bus.lock has been acquired. */
750 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
751 {
752 	u32 addr_reg_val, func_ctrl_reg_val;
753 
754 	/*
755 	 * Set slave addr.  Reserved bits can all safely be written with zeros
756 	 * on all of ast2[456]00, so zero everything else to ensure we only
757 	 * enable a single slave address (ast2500 has two, ast2600 has three,
758 	 * the enable bits for which are also in this register) so that we don't
759 	 * end up with additional phantom devices responding on the bus.
760 	 */
761 	addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
762 	writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
763 
764 	/* Turn on slave mode. */
765 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
766 	func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
767 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
768 
769 	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
770 }
771 
772 static int aspeed_i2c_reg_slave(struct i2c_client *client)
773 {
774 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&bus->lock, flags);
778 	if (bus->slave) {
779 		spin_unlock_irqrestore(&bus->lock, flags);
780 		return -EINVAL;
781 	}
782 
783 	__aspeed_i2c_reg_slave(bus, client->addr);
784 
785 	bus->slave = client;
786 	spin_unlock_irqrestore(&bus->lock, flags);
787 
788 	return 0;
789 }
790 
791 static int aspeed_i2c_unreg_slave(struct i2c_client *client)
792 {
793 	struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
794 	u32 func_ctrl_reg_val;
795 	unsigned long flags;
796 
797 	spin_lock_irqsave(&bus->lock, flags);
798 	if (!bus->slave) {
799 		spin_unlock_irqrestore(&bus->lock, flags);
800 		return -EINVAL;
801 	}
802 
803 	/* Turn off slave mode. */
804 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
805 	func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
806 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
807 
808 	bus->slave = NULL;
809 	spin_unlock_irqrestore(&bus->lock, flags);
810 
811 	return 0;
812 }
813 #endif /* CONFIG_I2C_SLAVE */
814 
815 static const struct i2c_algorithm aspeed_i2c_algo = {
816 	.master_xfer	= aspeed_i2c_master_xfer,
817 	.functionality	= aspeed_i2c_functionality,
818 #if IS_ENABLED(CONFIG_I2C_SLAVE)
819 	.reg_slave	= aspeed_i2c_reg_slave,
820 	.unreg_slave	= aspeed_i2c_unreg_slave,
821 #endif /* CONFIG_I2C_SLAVE */
822 };
823 
824 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev,
825 				      u32 clk_high_low_mask,
826 				      u32 divisor)
827 {
828 	u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp;
829 
830 	/*
831 	 * SCL_high and SCL_low represent a value 1 greater than what is stored
832 	 * since a zero divider is meaningless. Thus, the max value each can
833 	 * store is every bit set + 1. Since SCL_high and SCL_low are added
834 	 * together (see below), the max value of both is the max value of one
835 	 * them times two.
836 	 */
837 	clk_high_low_max = (clk_high_low_mask + 1) * 2;
838 
839 	/*
840 	 * The actual clock frequency of SCL is:
841 	 *	SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
842 	 *		 = APB_freq / divisor
843 	 * where base_freq is a programmable clock divider; its value is
844 	 *	base_freq = 1 << base_clk_divisor
845 	 * SCL_high is the number of base_freq clock cycles that SCL stays high
846 	 * and SCL_low is the number of base_freq clock cycles that SCL stays
847 	 * low for a period of SCL.
848 	 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
849 	 * thus, they start counting at zero. So
850 	 *	SCL_high = clk_high + 1
851 	 *	SCL_low	 = clk_low + 1
852 	 * Thus,
853 	 *	SCL_freq = APB_freq /
854 	 *		((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
855 	 * The documentation recommends clk_high >= clk_high_max / 2 and
856 	 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
857 	 * gives us the following solution:
858 	 */
859 	base_clk_divisor = divisor > clk_high_low_max ?
860 			ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
861 
862 	if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) {
863 		base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK;
864 		clk_low = clk_high_low_mask;
865 		clk_high = clk_high_low_mask;
866 		dev_err(dev,
867 			"clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
868 			divisor, (1 << base_clk_divisor) * clk_high_low_max);
869 	} else {
870 		tmp = (divisor + (1 << base_clk_divisor) - 1)
871 				>> base_clk_divisor;
872 		clk_low = tmp / 2;
873 		clk_high = tmp - clk_low;
874 
875 		if (clk_high)
876 			clk_high--;
877 
878 		if (clk_low)
879 			clk_low--;
880 	}
881 
882 
883 	return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT)
884 		& ASPEED_I2CD_TIME_SCL_HIGH_MASK)
885 			| ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT)
886 			   & ASPEED_I2CD_TIME_SCL_LOW_MASK)
887 			| (base_clk_divisor
888 			   & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK);
889 }
890 
891 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
892 {
893 	/*
894 	 * clk_high and clk_low are each 3 bits wide, so each can hold a max
895 	 * value of 8 giving a clk_high_low_max of 16.
896 	 */
897 	return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
898 }
899 
900 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
901 {
902 	/*
903 	 * clk_high and clk_low are each 4 bits wide, so each can hold a max
904 	 * value of 16 giving a clk_high_low_max of 32.
905 	 */
906 	return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
907 }
908 
909 /* precondition: bus.lock has been acquired. */
910 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
911 {
912 	u32 divisor, clk_reg_val;
913 
914 	divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
915 	clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1);
916 	clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK |
917 			ASPEED_I2CD_TIME_THDSTA_MASK |
918 			ASPEED_I2CD_TIME_TACST_MASK);
919 	clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
920 	writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
921 	writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
922 
923 	return 0;
924 }
925 
926 /* precondition: bus.lock has been acquired. */
927 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
928 			     struct platform_device *pdev)
929 {
930 	u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN;
931 	int ret;
932 
933 	/* Disable everything. */
934 	writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
935 
936 	ret = aspeed_i2c_init_clk(bus);
937 	if (ret < 0)
938 		return ret;
939 
940 	if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
941 		bus->multi_master = true;
942 	else
943 		fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS;
944 
945 	/* Enable Master Mode */
946 	writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg,
947 	       bus->base + ASPEED_I2C_FUN_CTRL_REG);
948 
949 #if IS_ENABLED(CONFIG_I2C_SLAVE)
950 	/* If slave has already been registered, re-enable it. */
951 	if (bus->slave)
952 		__aspeed_i2c_reg_slave(bus, bus->slave->addr);
953 #endif /* CONFIG_I2C_SLAVE */
954 
955 	/* Set interrupt generation of I2C controller */
956 	writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
957 
958 	return 0;
959 }
960 
961 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
962 {
963 	struct platform_device *pdev = to_platform_device(bus->dev);
964 	unsigned long flags;
965 	int ret;
966 
967 	spin_lock_irqsave(&bus->lock, flags);
968 
969 	/* Disable and ack all interrupts. */
970 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
971 	writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
972 
973 	ret = aspeed_i2c_init(bus, pdev);
974 
975 	spin_unlock_irqrestore(&bus->lock, flags);
976 
977 	return ret;
978 }
979 
980 static const struct of_device_id aspeed_i2c_bus_of_table[] = {
981 	{
982 		.compatible = "aspeed,ast2400-i2c-bus",
983 		.data = aspeed_i2c_24xx_get_clk_reg_val,
984 	},
985 	{
986 		.compatible = "aspeed,ast2500-i2c-bus",
987 		.data = aspeed_i2c_25xx_get_clk_reg_val,
988 	},
989 	{
990 		.compatible = "aspeed,ast2600-i2c-bus",
991 		.data = aspeed_i2c_25xx_get_clk_reg_val,
992 	},
993 	{ },
994 };
995 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
996 
997 static int aspeed_i2c_probe_bus(struct platform_device *pdev)
998 {
999 	const struct of_device_id *match;
1000 	struct aspeed_i2c_bus *bus;
1001 	struct clk *parent_clk;
1002 	int irq, ret;
1003 
1004 	bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
1005 	if (!bus)
1006 		return -ENOMEM;
1007 
1008 	bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1009 	if (IS_ERR(bus->base))
1010 		return PTR_ERR(bus->base);
1011 
1012 	parent_clk = devm_clk_get(&pdev->dev, NULL);
1013 	if (IS_ERR(parent_clk))
1014 		return PTR_ERR(parent_clk);
1015 	bus->parent_clk_frequency = clk_get_rate(parent_clk);
1016 	/* We just need the clock rate, we don't actually use the clk object. */
1017 	devm_clk_put(&pdev->dev, parent_clk);
1018 
1019 	bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
1020 	if (IS_ERR(bus->rst)) {
1021 		dev_err(&pdev->dev,
1022 			"missing or invalid reset controller device tree entry\n");
1023 		return PTR_ERR(bus->rst);
1024 	}
1025 	reset_control_deassert(bus->rst);
1026 
1027 	ret = of_property_read_u32(pdev->dev.of_node,
1028 				   "bus-frequency", &bus->bus_frequency);
1029 	if (ret < 0) {
1030 		dev_err(&pdev->dev,
1031 			"Could not read bus-frequency property\n");
1032 		bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
1033 	}
1034 
1035 	match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
1036 	if (!match)
1037 		bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1038 	else
1039 		bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
1040 				match->data;
1041 
1042 	/* Initialize the I2C adapter */
1043 	spin_lock_init(&bus->lock);
1044 	init_completion(&bus->cmd_complete);
1045 	bus->adap.owner = THIS_MODULE;
1046 	bus->adap.retries = 0;
1047 	bus->adap.algo = &aspeed_i2c_algo;
1048 	bus->adap.dev.parent = &pdev->dev;
1049 	bus->adap.dev.of_node = pdev->dev.of_node;
1050 	strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
1051 	i2c_set_adapdata(&bus->adap, bus);
1052 
1053 	bus->dev = &pdev->dev;
1054 
1055 	/* Clean up any left over interrupt state. */
1056 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1057 	writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
1058 	/*
1059 	 * bus.lock does not need to be held because the interrupt handler has
1060 	 * not been enabled yet.
1061 	 */
1062 	ret = aspeed_i2c_init(bus, pdev);
1063 	if (ret < 0)
1064 		return ret;
1065 
1066 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1067 	ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
1068 			       0, dev_name(&pdev->dev), bus);
1069 	if (ret < 0)
1070 		return ret;
1071 
1072 	ret = i2c_add_adapter(&bus->adap);
1073 	if (ret < 0)
1074 		return ret;
1075 
1076 	platform_set_drvdata(pdev, bus);
1077 
1078 	dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
1079 		 bus->adap.nr, irq);
1080 
1081 	return 0;
1082 }
1083 
1084 static void aspeed_i2c_remove_bus(struct platform_device *pdev)
1085 {
1086 	struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
1087 	unsigned long flags;
1088 
1089 	spin_lock_irqsave(&bus->lock, flags);
1090 
1091 	/* Disable everything. */
1092 	writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
1093 	writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1094 
1095 	spin_unlock_irqrestore(&bus->lock, flags);
1096 
1097 	reset_control_assert(bus->rst);
1098 
1099 	i2c_del_adapter(&bus->adap);
1100 }
1101 
1102 static struct platform_driver aspeed_i2c_bus_driver = {
1103 	.probe		= aspeed_i2c_probe_bus,
1104 	.remove_new	= aspeed_i2c_remove_bus,
1105 	.driver		= {
1106 		.name		= "aspeed-i2c-bus",
1107 		.of_match_table	= aspeed_i2c_bus_of_table,
1108 	},
1109 };
1110 module_platform_driver(aspeed_i2c_bus_driver);
1111 
1112 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1113 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1114 MODULE_LICENSE("GPL v2");
1115