1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Aspeed 24XX/25XX I2C Controller. 4 * 5 * Copyright (C) 2012-2017 ASPEED Technology Inc. 6 * Copyright 2017 IBM Corporation 7 * Copyright 2017 Google, Inc. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/i2c.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/reset.h> 26 #include <linux/slab.h> 27 28 /* I2C Register */ 29 #define ASPEED_I2C_FUN_CTRL_REG 0x00 30 #define ASPEED_I2C_AC_TIMING_REG1 0x04 31 #define ASPEED_I2C_AC_TIMING_REG2 0x08 32 #define ASPEED_I2C_INTR_CTRL_REG 0x0c 33 #define ASPEED_I2C_INTR_STS_REG 0x10 34 #define ASPEED_I2C_CMD_REG 0x14 35 #define ASPEED_I2C_DEV_ADDR_REG 0x18 36 #define ASPEED_I2C_BYTE_BUF_REG 0x20 37 38 /* Global Register Definition */ 39 /* 0x00 : I2C Interrupt Status Register */ 40 /* 0x08 : I2C Interrupt Target Assignment */ 41 42 /* Device Register Definition */ 43 /* 0x00 : I2CD Function Control Register */ 44 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15) 45 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8) 46 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7) 47 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6) 48 #define ASPEED_I2CD_SLAVE_EN BIT(1) 49 #define ASPEED_I2CD_MASTER_EN BIT(0) 50 51 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */ 52 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28) 53 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24) 54 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20) 55 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 56 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) 57 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 58 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12) 59 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0) 60 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0) 61 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */ 62 #define ASPEED_NO_TIMEOUT_CTRL 0 63 64 /* 0x0c : I2CD Interrupt Control Register & 65 * 0x10 : I2CD Interrupt Status Register 66 * 67 * These share bit definitions, so use the same values for the enable & 68 * status bits. 69 */ 70 #define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff 71 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) 72 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) 73 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) 74 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6) 75 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5) 76 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4) 77 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3) 78 #define ASPEED_I2CD_INTR_RX_DONE BIT(2) 79 #define ASPEED_I2CD_INTR_TX_NAK BIT(1) 80 #define ASPEED_I2CD_INTR_TX_ACK BIT(0) 81 #define ASPEED_I2CD_INTR_MASTER_ERRORS \ 82 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 83 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 84 ASPEED_I2CD_INTR_ABNORMAL | \ 85 ASPEED_I2CD_INTR_ARBIT_LOSS) 86 #define ASPEED_I2CD_INTR_ALL \ 87 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ 88 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \ 89 ASPEED_I2CD_INTR_SCL_TIMEOUT | \ 90 ASPEED_I2CD_INTR_ABNORMAL | \ 91 ASPEED_I2CD_INTR_NORMAL_STOP | \ 92 ASPEED_I2CD_INTR_ARBIT_LOSS | \ 93 ASPEED_I2CD_INTR_RX_DONE | \ 94 ASPEED_I2CD_INTR_TX_NAK | \ 95 ASPEED_I2CD_INTR_TX_ACK) 96 97 /* 0x14 : I2CD Command/Status Register */ 98 #define ASPEED_I2CD_SCL_LINE_STS BIT(18) 99 #define ASPEED_I2CD_SDA_LINE_STS BIT(17) 100 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16) 101 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11) 102 103 /* Command Bit */ 104 #define ASPEED_I2CD_M_STOP_CMD BIT(5) 105 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4) 106 #define ASPEED_I2CD_M_RX_CMD BIT(3) 107 #define ASPEED_I2CD_S_TX_CMD BIT(2) 108 #define ASPEED_I2CD_M_TX_CMD BIT(1) 109 #define ASPEED_I2CD_M_START_CMD BIT(0) 110 #define ASPEED_I2CD_MASTER_CMDS_MASK \ 111 (ASPEED_I2CD_M_STOP_CMD | \ 112 ASPEED_I2CD_M_S_RX_CMD_LAST | \ 113 ASPEED_I2CD_M_RX_CMD | \ 114 ASPEED_I2CD_M_TX_CMD | \ 115 ASPEED_I2CD_M_START_CMD) 116 117 /* 0x18 : I2CD Slave Device Address Register */ 118 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) 119 120 enum aspeed_i2c_master_state { 121 ASPEED_I2C_MASTER_INACTIVE, 122 ASPEED_I2C_MASTER_PENDING, 123 ASPEED_I2C_MASTER_START, 124 ASPEED_I2C_MASTER_TX_FIRST, 125 ASPEED_I2C_MASTER_TX, 126 ASPEED_I2C_MASTER_RX_FIRST, 127 ASPEED_I2C_MASTER_RX, 128 ASPEED_I2C_MASTER_STOP, 129 }; 130 131 enum aspeed_i2c_slave_state { 132 ASPEED_I2C_SLAVE_INACTIVE, 133 ASPEED_I2C_SLAVE_START, 134 ASPEED_I2C_SLAVE_READ_REQUESTED, 135 ASPEED_I2C_SLAVE_READ_PROCESSED, 136 ASPEED_I2C_SLAVE_WRITE_REQUESTED, 137 ASPEED_I2C_SLAVE_WRITE_RECEIVED, 138 ASPEED_I2C_SLAVE_STOP, 139 }; 140 141 struct aspeed_i2c_bus { 142 struct i2c_adapter adap; 143 struct device *dev; 144 void __iomem *base; 145 struct reset_control *rst; 146 /* Synchronizes I/O mem access to base. */ 147 spinlock_t lock; 148 struct completion cmd_complete; 149 u32 (*get_clk_reg_val)(struct device *dev, 150 u32 divisor); 151 unsigned long parent_clk_frequency; 152 u32 bus_frequency; 153 /* Transaction state. */ 154 enum aspeed_i2c_master_state master_state; 155 struct i2c_msg *msgs; 156 size_t buf_index; 157 size_t msgs_index; 158 size_t msgs_count; 159 bool send_stop; 160 int cmd_err; 161 /* Protected only by i2c_lock_bus */ 162 int master_xfer_result; 163 /* Multi-master */ 164 bool multi_master; 165 #if IS_ENABLED(CONFIG_I2C_SLAVE) 166 struct i2c_client *slave; 167 enum aspeed_i2c_slave_state slave_state; 168 #endif /* CONFIG_I2C_SLAVE */ 169 }; 170 171 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); 172 173 /* precondition: bus.lock has been acquired. */ 174 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) 175 { 176 bus->master_state = ASPEED_I2C_MASTER_STOP; 177 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); 178 } 179 180 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) 181 { 182 unsigned long time_left, flags; 183 int ret = 0; 184 u32 command; 185 186 spin_lock_irqsave(&bus->lock, flags); 187 command = readl(bus->base + ASPEED_I2C_CMD_REG); 188 189 if (command & ASPEED_I2CD_SDA_LINE_STS) { 190 /* Bus is idle: no recovery needed. */ 191 if (command & ASPEED_I2CD_SCL_LINE_STS) 192 goto out; 193 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n", 194 command); 195 196 reinit_completion(&bus->cmd_complete); 197 aspeed_i2c_do_stop(bus); 198 spin_unlock_irqrestore(&bus->lock, flags); 199 200 time_left = wait_for_completion_timeout( 201 &bus->cmd_complete, bus->adap.timeout); 202 203 spin_lock_irqsave(&bus->lock, flags); 204 if (time_left == 0) 205 goto reset_out; 206 else if (bus->cmd_err) 207 goto reset_out; 208 /* Recovery failed. */ 209 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 210 ASPEED_I2CD_SCL_LINE_STS)) 211 goto reset_out; 212 /* Bus error. */ 213 } else { 214 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n", 215 command); 216 217 reinit_completion(&bus->cmd_complete); 218 /* Writes 1 to 8 SCL clock cycles until SDA is released. */ 219 writel(ASPEED_I2CD_BUS_RECOVER_CMD, 220 bus->base + ASPEED_I2C_CMD_REG); 221 spin_unlock_irqrestore(&bus->lock, flags); 222 223 time_left = wait_for_completion_timeout( 224 &bus->cmd_complete, bus->adap.timeout); 225 226 spin_lock_irqsave(&bus->lock, flags); 227 if (time_left == 0) 228 goto reset_out; 229 else if (bus->cmd_err) 230 goto reset_out; 231 /* Recovery failed. */ 232 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & 233 ASPEED_I2CD_SDA_LINE_STS)) 234 goto reset_out; 235 } 236 237 out: 238 spin_unlock_irqrestore(&bus->lock, flags); 239 240 return ret; 241 242 reset_out: 243 spin_unlock_irqrestore(&bus->lock, flags); 244 245 return aspeed_i2c_reset(bus); 246 } 247 248 #if IS_ENABLED(CONFIG_I2C_SLAVE) 249 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 250 { 251 u32 command, irq_handled = 0; 252 struct i2c_client *slave = bus->slave; 253 u8 value; 254 int ret; 255 256 if (!slave) 257 return 0; 258 259 /* 260 * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive 261 * transfers with low enough latency between the nak/stop phase of the current 262 * command and the start/address phase of the following command that the 263 * interrupts are coalesced by the time we process them. 264 */ 265 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { 266 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 267 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 268 } 269 270 if (irq_status & ASPEED_I2CD_INTR_TX_NAK && 271 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { 272 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 273 bus->slave_state = ASPEED_I2C_SLAVE_STOP; 274 } 275 276 /* Propagate any stop conditions to the slave implementation. */ 277 if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) { 278 i2c_slave_event(slave, I2C_SLAVE_STOP, &value); 279 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 280 } 281 282 /* 283 * Now that we've dealt with any potentially coalesced stop conditions, 284 * address any start conditions. 285 */ 286 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { 287 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; 288 bus->slave_state = ASPEED_I2C_SLAVE_START; 289 } 290 291 /* 292 * If the slave has been stopped and not started then slave interrupt 293 * handling is complete. 294 */ 295 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) 296 return irq_handled; 297 298 command = readl(bus->base + ASPEED_I2C_CMD_REG); 299 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", 300 irq_status, command); 301 302 /* Slave was sent something. */ 303 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) { 304 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 305 /* Handle address frame. */ 306 if (bus->slave_state == ASPEED_I2C_SLAVE_START) { 307 if (value & 0x1) 308 bus->slave_state = 309 ASPEED_I2C_SLAVE_READ_REQUESTED; 310 else 311 bus->slave_state = 312 ASPEED_I2C_SLAVE_WRITE_REQUESTED; 313 } 314 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 315 } 316 317 switch (bus->slave_state) { 318 case ASPEED_I2C_SLAVE_READ_REQUESTED: 319 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK)) 320 dev_err(bus->dev, "Unexpected ACK on read request.\n"); 321 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED; 322 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); 323 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 324 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 325 break; 326 case ASPEED_I2C_SLAVE_READ_PROCESSED: 327 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 328 dev_err(bus->dev, 329 "Expected ACK after processed read.\n"); 330 break; 331 } 332 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 333 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); 334 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); 335 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); 336 break; 337 case ASPEED_I2C_SLAVE_WRITE_REQUESTED: 338 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED; 339 ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); 340 /* 341 * Slave ACK's on this address phase already but as the backend driver 342 * returns an errno, the bus driver should nack the next incoming byte. 343 */ 344 if (ret < 0) 345 writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG); 346 break; 347 case ASPEED_I2C_SLAVE_WRITE_RECEIVED: 348 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); 349 break; 350 case ASPEED_I2C_SLAVE_STOP: 351 /* Stop event handling is done early. Unreachable. */ 352 break; 353 case ASPEED_I2C_SLAVE_START: 354 /* Slave was just started. Waiting for the next event. */; 355 break; 356 default: 357 dev_err(bus->dev, "unknown slave_state: %d\n", 358 bus->slave_state); 359 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 360 break; 361 } 362 363 return irq_handled; 364 } 365 #endif /* CONFIG_I2C_SLAVE */ 366 367 /* precondition: bus.lock has been acquired. */ 368 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) 369 { 370 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD; 371 struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; 372 u8 slave_addr = i2c_8bit_addr_from_msg(msg); 373 374 #if IS_ENABLED(CONFIG_I2C_SLAVE) 375 /* 376 * If it's requested in the middle of a slave session, set the master 377 * state to 'pending' then H/W will continue handling this master 378 * command when the bus comes back to the idle state. 379 */ 380 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) { 381 bus->master_state = ASPEED_I2C_MASTER_PENDING; 382 return; 383 } 384 #endif /* CONFIG_I2C_SLAVE */ 385 386 bus->master_state = ASPEED_I2C_MASTER_START; 387 bus->buf_index = 0; 388 389 if (msg->flags & I2C_M_RD) { 390 command |= ASPEED_I2CD_M_RX_CMD; 391 /* Need to let the hardware know to NACK after RX. */ 392 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) 393 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 394 } 395 396 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG); 397 writel(command, bus->base + ASPEED_I2C_CMD_REG); 398 } 399 400 /* precondition: bus.lock has been acquired. */ 401 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) 402 { 403 if (bus->msgs_index + 1 < bus->msgs_count) { 404 bus->msgs_index++; 405 aspeed_i2c_do_start(bus); 406 } else { 407 aspeed_i2c_do_stop(bus); 408 } 409 } 410 411 static int aspeed_i2c_is_irq_error(u32 irq_status) 412 { 413 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS) 414 return -EAGAIN; 415 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | 416 ASPEED_I2CD_INTR_SCL_TIMEOUT)) 417 return -EBUSY; 418 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL)) 419 return -EPROTO; 420 421 return 0; 422 } 423 424 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) 425 { 426 u32 irq_handled = 0, command = 0; 427 struct i2c_msg *msg; 428 u8 recv_byte; 429 int ret; 430 431 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) { 432 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 433 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE; 434 goto out_complete; 435 } 436 437 /* 438 * We encountered an interrupt that reports an error: the hardware 439 * should clear the command queue effectively taking us back to the 440 * INACTIVE state. 441 */ 442 ret = aspeed_i2c_is_irq_error(irq_status); 443 if (ret) { 444 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n", 445 irq_status); 446 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); 447 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) { 448 irq_handled = irq_status; 449 bus->cmd_err = ret; 450 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 451 goto out_complete; 452 } 453 } 454 455 /* Master is not currently active, irq was for someone else. */ 456 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE || 457 bus->master_state == ASPEED_I2C_MASTER_PENDING) 458 goto out_no_complete; 459 460 /* We are in an invalid state; reset bus to a known state. */ 461 if (!bus->msgs) { 462 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n", 463 irq_status); 464 bus->cmd_err = -EIO; 465 if (bus->master_state != ASPEED_I2C_MASTER_STOP && 466 bus->master_state != ASPEED_I2C_MASTER_INACTIVE) 467 aspeed_i2c_do_stop(bus); 468 goto out_no_complete; 469 } 470 msg = &bus->msgs[bus->msgs_index]; 471 472 /* 473 * START is a special case because we still have to handle a subsequent 474 * TX or RX immediately after we handle it, so we handle it here and 475 * then update the state and handle the new state below. 476 */ 477 if (bus->master_state == ASPEED_I2C_MASTER_START) { 478 #if IS_ENABLED(CONFIG_I2C_SLAVE) 479 /* 480 * If a peer master starts a xfer immediately after it queues a 481 * master command, clear the queued master command and change 482 * its state to 'pending'. To simplify handling of pending 483 * cases, it uses S/W solution instead of H/W command queue 484 * handling. 485 */ 486 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) { 487 writel(readl(bus->base + ASPEED_I2C_CMD_REG) & 488 ~ASPEED_I2CD_MASTER_CMDS_MASK, 489 bus->base + ASPEED_I2C_CMD_REG); 490 bus->master_state = ASPEED_I2C_MASTER_PENDING; 491 dev_dbg(bus->dev, 492 "master goes pending due to a slave start\n"); 493 goto out_no_complete; 494 } 495 #endif /* CONFIG_I2C_SLAVE */ 496 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 497 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) { 498 bus->cmd_err = -ENXIO; 499 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 500 goto out_complete; 501 } 502 pr_devel("no slave present at %02x\n", msg->addr); 503 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 504 bus->cmd_err = -ENXIO; 505 aspeed_i2c_do_stop(bus); 506 goto out_no_complete; 507 } 508 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 509 if (msg->len == 0) { /* SMBUS_QUICK */ 510 aspeed_i2c_do_stop(bus); 511 goto out_no_complete; 512 } 513 if (msg->flags & I2C_M_RD) 514 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; 515 else 516 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST; 517 } 518 519 switch (bus->master_state) { 520 case ASPEED_I2C_MASTER_TX: 521 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) { 522 dev_dbg(bus->dev, "slave NACKed TX\n"); 523 irq_handled |= ASPEED_I2CD_INTR_TX_NAK; 524 goto error_and_stop; 525 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { 526 dev_err(bus->dev, "slave failed to ACK TX\n"); 527 goto error_and_stop; 528 } 529 irq_handled |= ASPEED_I2CD_INTR_TX_ACK; 530 fallthrough; 531 case ASPEED_I2C_MASTER_TX_FIRST: 532 if (bus->buf_index < msg->len) { 533 bus->master_state = ASPEED_I2C_MASTER_TX; 534 writel(msg->buf[bus->buf_index++], 535 bus->base + ASPEED_I2C_BYTE_BUF_REG); 536 writel(ASPEED_I2CD_M_TX_CMD, 537 bus->base + ASPEED_I2C_CMD_REG); 538 } else { 539 aspeed_i2c_next_msg_or_stop(bus); 540 } 541 goto out_no_complete; 542 case ASPEED_I2C_MASTER_RX_FIRST: 543 /* RX may not have completed yet (only address cycle) */ 544 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) 545 goto out_no_complete; 546 fallthrough; 547 case ASPEED_I2C_MASTER_RX: 548 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { 549 dev_err(bus->dev, "master failed to RX\n"); 550 goto error_and_stop; 551 } 552 irq_handled |= ASPEED_I2CD_INTR_RX_DONE; 553 554 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; 555 msg->buf[bus->buf_index++] = recv_byte; 556 557 if (msg->flags & I2C_M_RECV_LEN) { 558 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) { 559 bus->cmd_err = -EPROTO; 560 aspeed_i2c_do_stop(bus); 561 goto out_no_complete; 562 } 563 msg->len = recv_byte + 564 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); 565 msg->flags &= ~I2C_M_RECV_LEN; 566 } 567 568 if (bus->buf_index < msg->len) { 569 bus->master_state = ASPEED_I2C_MASTER_RX; 570 command = ASPEED_I2CD_M_RX_CMD; 571 if (bus->buf_index + 1 == msg->len) 572 command |= ASPEED_I2CD_M_S_RX_CMD_LAST; 573 writel(command, bus->base + ASPEED_I2C_CMD_REG); 574 } else { 575 aspeed_i2c_next_msg_or_stop(bus); 576 } 577 goto out_no_complete; 578 case ASPEED_I2C_MASTER_STOP: 579 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) { 580 dev_err(bus->dev, 581 "master failed to STOP. irq_status:0x%x\n", 582 irq_status); 583 bus->cmd_err = -EIO; 584 /* Do not STOP as we have already tried. */ 585 } else { 586 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; 587 } 588 589 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 590 goto out_complete; 591 case ASPEED_I2C_MASTER_INACTIVE: 592 dev_err(bus->dev, 593 "master received interrupt 0x%08x, but is inactive\n", 594 irq_status); 595 bus->cmd_err = -EIO; 596 /* Do not STOP as we should be inactive. */ 597 goto out_complete; 598 default: 599 WARN(1, "unknown master state\n"); 600 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 601 bus->cmd_err = -EINVAL; 602 goto out_complete; 603 } 604 error_and_stop: 605 bus->cmd_err = -EIO; 606 aspeed_i2c_do_stop(bus); 607 goto out_no_complete; 608 out_complete: 609 bus->msgs = NULL; 610 if (bus->cmd_err) 611 bus->master_xfer_result = bus->cmd_err; 612 else 613 bus->master_xfer_result = bus->msgs_index + 1; 614 complete(&bus->cmd_complete); 615 out_no_complete: 616 return irq_handled; 617 } 618 619 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) 620 { 621 struct aspeed_i2c_bus *bus = dev_id; 622 u32 irq_received, irq_remaining, irq_handled; 623 624 spin_lock(&bus->lock); 625 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG); 626 /* Ack all interrupts except for Rx done */ 627 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, 628 bus->base + ASPEED_I2C_INTR_STS_REG); 629 readl(bus->base + ASPEED_I2C_INTR_STS_REG); 630 irq_received &= ASPEED_I2CD_INTR_RECV_MASK; 631 irq_remaining = irq_received; 632 633 #if IS_ENABLED(CONFIG_I2C_SLAVE) 634 /* 635 * In most cases, interrupt bits will be set one by one, although 636 * multiple interrupt bits could be set at the same time. It's also 637 * possible that master interrupt bits could be set along with slave 638 * interrupt bits. Each case needs to be handled using corresponding 639 * handlers depending on the current state. 640 */ 641 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE && 642 bus->master_state != ASPEED_I2C_MASTER_PENDING) { 643 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 644 irq_remaining &= ~irq_handled; 645 if (irq_remaining) 646 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining); 647 } else { 648 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining); 649 irq_remaining &= ~irq_handled; 650 if (irq_remaining) 651 irq_handled |= aspeed_i2c_master_irq(bus, 652 irq_remaining); 653 } 654 655 /* 656 * Start a pending master command at here if a slave operation is 657 * completed. 658 */ 659 if (bus->master_state == ASPEED_I2C_MASTER_PENDING && 660 bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) 661 aspeed_i2c_do_start(bus); 662 #else 663 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); 664 #endif /* CONFIG_I2C_SLAVE */ 665 666 irq_remaining &= ~irq_handled; 667 if (irq_remaining) 668 dev_err(bus->dev, 669 "irq handled != irq. expected 0x%08x, but was 0x%08x\n", 670 irq_received, irq_handled); 671 672 /* Ack Rx done */ 673 if (irq_received & ASPEED_I2CD_INTR_RX_DONE) { 674 writel(ASPEED_I2CD_INTR_RX_DONE, 675 bus->base + ASPEED_I2C_INTR_STS_REG); 676 readl(bus->base + ASPEED_I2C_INTR_STS_REG); 677 } 678 spin_unlock(&bus->lock); 679 return irq_remaining ? IRQ_NONE : IRQ_HANDLED; 680 } 681 682 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, 683 struct i2c_msg *msgs, int num) 684 { 685 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap); 686 unsigned long time_left, flags; 687 688 spin_lock_irqsave(&bus->lock, flags); 689 bus->cmd_err = 0; 690 691 /* If bus is busy in a single master environment, attempt recovery. */ 692 if (!bus->multi_master && 693 (readl(bus->base + ASPEED_I2C_CMD_REG) & 694 ASPEED_I2CD_BUS_BUSY_STS)) { 695 int ret; 696 697 spin_unlock_irqrestore(&bus->lock, flags); 698 ret = aspeed_i2c_recover_bus(bus); 699 if (ret) 700 return ret; 701 spin_lock_irqsave(&bus->lock, flags); 702 } 703 704 bus->cmd_err = 0; 705 bus->msgs = msgs; 706 bus->msgs_index = 0; 707 bus->msgs_count = num; 708 709 reinit_completion(&bus->cmd_complete); 710 aspeed_i2c_do_start(bus); 711 spin_unlock_irqrestore(&bus->lock, flags); 712 713 time_left = wait_for_completion_timeout(&bus->cmd_complete, 714 bus->adap.timeout); 715 716 if (time_left == 0) { 717 /* 718 * In a multi-master setup, if a timeout occurs, attempt 719 * recovery. But if the bus is idle, we still need to reset the 720 * i2c controller to clear the remaining interrupts. 721 */ 722 if (bus->multi_master && 723 (readl(bus->base + ASPEED_I2C_CMD_REG) & 724 ASPEED_I2CD_BUS_BUSY_STS)) 725 aspeed_i2c_recover_bus(bus); 726 else 727 aspeed_i2c_reset(bus); 728 729 /* 730 * If timed out and the state is still pending, drop the pending 731 * master command. 732 */ 733 spin_lock_irqsave(&bus->lock, flags); 734 if (bus->master_state == ASPEED_I2C_MASTER_PENDING) 735 bus->master_state = ASPEED_I2C_MASTER_INACTIVE; 736 spin_unlock_irqrestore(&bus->lock, flags); 737 738 return -ETIMEDOUT; 739 } 740 741 return bus->master_xfer_result; 742 } 743 744 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap) 745 { 746 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; 747 } 748 749 #if IS_ENABLED(CONFIG_I2C_SLAVE) 750 /* precondition: bus.lock has been acquired. */ 751 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) 752 { 753 u32 addr_reg_val, func_ctrl_reg_val; 754 755 /* 756 * Set slave addr. Reserved bits can all safely be written with zeros 757 * on all of ast2[456]00, so zero everything else to ensure we only 758 * enable a single slave address (ast2500 has two, ast2600 has three, 759 * the enable bits for which are also in this register) so that we don't 760 * end up with additional phantom devices responding on the bus. 761 */ 762 addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK; 763 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG); 764 765 /* Turn on slave mode. */ 766 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 767 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; 768 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 769 770 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; 771 } 772 773 static int aspeed_i2c_reg_slave(struct i2c_client *client) 774 { 775 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 776 unsigned long flags; 777 778 spin_lock_irqsave(&bus->lock, flags); 779 if (bus->slave) { 780 spin_unlock_irqrestore(&bus->lock, flags); 781 return -EINVAL; 782 } 783 784 __aspeed_i2c_reg_slave(bus, client->addr); 785 786 bus->slave = client; 787 spin_unlock_irqrestore(&bus->lock, flags); 788 789 return 0; 790 } 791 792 static int aspeed_i2c_unreg_slave(struct i2c_client *client) 793 { 794 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); 795 u32 func_ctrl_reg_val; 796 unsigned long flags; 797 798 spin_lock_irqsave(&bus->lock, flags); 799 if (!bus->slave) { 800 spin_unlock_irqrestore(&bus->lock, flags); 801 return -EINVAL; 802 } 803 804 /* Turn off slave mode. */ 805 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); 806 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN; 807 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); 808 809 bus->slave = NULL; 810 spin_unlock_irqrestore(&bus->lock, flags); 811 812 return 0; 813 } 814 #endif /* CONFIG_I2C_SLAVE */ 815 816 static const struct i2c_algorithm aspeed_i2c_algo = { 817 .master_xfer = aspeed_i2c_master_xfer, 818 .functionality = aspeed_i2c_functionality, 819 #if IS_ENABLED(CONFIG_I2C_SLAVE) 820 .reg_slave = aspeed_i2c_reg_slave, 821 .unreg_slave = aspeed_i2c_unreg_slave, 822 #endif /* CONFIG_I2C_SLAVE */ 823 }; 824 825 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev, 826 u32 clk_high_low_mask, 827 u32 divisor) 828 { 829 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp; 830 831 /* 832 * SCL_high and SCL_low represent a value 1 greater than what is stored 833 * since a zero divider is meaningless. Thus, the max value each can 834 * store is every bit set + 1. Since SCL_high and SCL_low are added 835 * together (see below), the max value of both is the max value of one 836 * them times two. 837 */ 838 clk_high_low_max = (clk_high_low_mask + 1) * 2; 839 840 /* 841 * The actual clock frequency of SCL is: 842 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) 843 * = APB_freq / divisor 844 * where base_freq is a programmable clock divider; its value is 845 * base_freq = 1 << base_clk_divisor 846 * SCL_high is the number of base_freq clock cycles that SCL stays high 847 * and SCL_low is the number of base_freq clock cycles that SCL stays 848 * low for a period of SCL. 849 * The actual register has a minimum SCL_high and SCL_low minimum of 1; 850 * thus, they start counting at zero. So 851 * SCL_high = clk_high + 1 852 * SCL_low = clk_low + 1 853 * Thus, 854 * SCL_freq = APB_freq / 855 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) 856 * The documentation recommends clk_high >= clk_high_max / 2 and 857 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint 858 * gives us the following solution: 859 */ 860 base_clk_divisor = divisor > clk_high_low_max ? 861 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; 862 863 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) { 864 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK; 865 clk_low = clk_high_low_mask; 866 clk_high = clk_high_low_mask; 867 dev_err(dev, 868 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n", 869 divisor, (1 << base_clk_divisor) * clk_high_low_max); 870 } else { 871 tmp = (divisor + (1 << base_clk_divisor) - 1) 872 >> base_clk_divisor; 873 clk_low = tmp / 2; 874 clk_high = tmp - clk_low; 875 876 if (clk_high) 877 clk_high--; 878 879 if (clk_low) 880 clk_low--; 881 } 882 883 884 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) 885 & ASPEED_I2CD_TIME_SCL_HIGH_MASK) 886 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) 887 & ASPEED_I2CD_TIME_SCL_LOW_MASK) 888 | (base_clk_divisor 889 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); 890 } 891 892 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor) 893 { 894 /* 895 * clk_high and clk_low are each 3 bits wide, so each can hold a max 896 * value of 8 giving a clk_high_low_max of 16. 897 */ 898 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor); 899 } 900 901 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor) 902 { 903 /* 904 * clk_high and clk_low are each 4 bits wide, so each can hold a max 905 * value of 16 giving a clk_high_low_max of 32. 906 */ 907 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor); 908 } 909 910 /* precondition: bus.lock has been acquired. */ 911 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) 912 { 913 u32 divisor, clk_reg_val; 914 915 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency); 916 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1); 917 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | 918 ASPEED_I2CD_TIME_THDSTA_MASK | 919 ASPEED_I2CD_TIME_TACST_MASK); 920 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor); 921 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); 922 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); 923 924 return 0; 925 } 926 927 /* precondition: bus.lock has been acquired. */ 928 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus, 929 struct platform_device *pdev) 930 { 931 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN; 932 int ret; 933 934 /* Disable everything. */ 935 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 936 937 ret = aspeed_i2c_init_clk(bus); 938 if (ret < 0) 939 return ret; 940 941 if (of_property_read_bool(pdev->dev.of_node, "multi-master")) 942 bus->multi_master = true; 943 else 944 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS; 945 946 /* Enable Master Mode */ 947 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg, 948 bus->base + ASPEED_I2C_FUN_CTRL_REG); 949 950 #if IS_ENABLED(CONFIG_I2C_SLAVE) 951 /* If slave has already been registered, re-enable it. */ 952 if (bus->slave) 953 __aspeed_i2c_reg_slave(bus, bus->slave->addr); 954 #endif /* CONFIG_I2C_SLAVE */ 955 956 /* Set interrupt generation of I2C controller */ 957 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG); 958 959 return 0; 960 } 961 962 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) 963 { 964 struct platform_device *pdev = to_platform_device(bus->dev); 965 unsigned long flags; 966 int ret; 967 968 spin_lock_irqsave(&bus->lock, flags); 969 970 /* Disable and ack all interrupts. */ 971 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 972 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 973 974 ret = aspeed_i2c_init(bus, pdev); 975 976 spin_unlock_irqrestore(&bus->lock, flags); 977 978 return ret; 979 } 980 981 static const struct of_device_id aspeed_i2c_bus_of_table[] = { 982 { 983 .compatible = "aspeed,ast2400-i2c-bus", 984 .data = aspeed_i2c_24xx_get_clk_reg_val, 985 }, 986 { 987 .compatible = "aspeed,ast2500-i2c-bus", 988 .data = aspeed_i2c_25xx_get_clk_reg_val, 989 }, 990 { 991 .compatible = "aspeed,ast2600-i2c-bus", 992 .data = aspeed_i2c_25xx_get_clk_reg_val, 993 }, 994 { } 995 }; 996 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); 997 998 static int aspeed_i2c_probe_bus(struct platform_device *pdev) 999 { 1000 const struct of_device_id *match; 1001 struct aspeed_i2c_bus *bus; 1002 struct clk *parent_clk; 1003 int irq, ret; 1004 1005 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); 1006 if (!bus) 1007 return -ENOMEM; 1008 1009 bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 1010 if (IS_ERR(bus->base)) 1011 return PTR_ERR(bus->base); 1012 1013 parent_clk = devm_clk_get(&pdev->dev, NULL); 1014 if (IS_ERR(parent_clk)) 1015 return PTR_ERR(parent_clk); 1016 bus->parent_clk_frequency = clk_get_rate(parent_clk); 1017 /* We just need the clock rate, we don't actually use the clk object. */ 1018 devm_clk_put(&pdev->dev, parent_clk); 1019 1020 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL); 1021 if (IS_ERR(bus->rst)) { 1022 dev_err(&pdev->dev, 1023 "missing or invalid reset controller device tree entry\n"); 1024 return PTR_ERR(bus->rst); 1025 } 1026 reset_control_deassert(bus->rst); 1027 1028 ret = of_property_read_u32(pdev->dev.of_node, 1029 "bus-frequency", &bus->bus_frequency); 1030 if (ret < 0) { 1031 dev_err(&pdev->dev, 1032 "Could not read bus-frequency property\n"); 1033 bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ; 1034 } 1035 1036 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); 1037 if (!match) 1038 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; 1039 else 1040 bus->get_clk_reg_val = (u32 (*)(struct device *, u32)) 1041 match->data; 1042 1043 /* Initialize the I2C adapter */ 1044 spin_lock_init(&bus->lock); 1045 init_completion(&bus->cmd_complete); 1046 bus->adap.owner = THIS_MODULE; 1047 bus->adap.retries = 0; 1048 bus->adap.algo = &aspeed_i2c_algo; 1049 bus->adap.dev.parent = &pdev->dev; 1050 bus->adap.dev.of_node = pdev->dev.of_node; 1051 strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name)); 1052 i2c_set_adapdata(&bus->adap, bus); 1053 1054 bus->dev = &pdev->dev; 1055 1056 /* Clean up any left over interrupt state. */ 1057 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1058 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); 1059 /* 1060 * bus.lock does not need to be held because the interrupt handler has 1061 * not been enabled yet. 1062 */ 1063 ret = aspeed_i2c_init(bus, pdev); 1064 if (ret < 0) 1065 return ret; 1066 1067 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1068 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq, 1069 0, dev_name(&pdev->dev), bus); 1070 if (ret < 0) 1071 return ret; 1072 1073 ret = i2c_add_adapter(&bus->adap); 1074 if (ret < 0) 1075 return ret; 1076 1077 platform_set_drvdata(pdev, bus); 1078 1079 dev_info(bus->dev, "i2c bus %d registered, irq %d\n", 1080 bus->adap.nr, irq); 1081 1082 return 0; 1083 } 1084 1085 static void aspeed_i2c_remove_bus(struct platform_device *pdev) 1086 { 1087 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev); 1088 unsigned long flags; 1089 1090 spin_lock_irqsave(&bus->lock, flags); 1091 1092 /* Disable everything. */ 1093 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); 1094 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); 1095 1096 spin_unlock_irqrestore(&bus->lock, flags); 1097 1098 reset_control_assert(bus->rst); 1099 1100 i2c_del_adapter(&bus->adap); 1101 } 1102 1103 static struct platform_driver aspeed_i2c_bus_driver = { 1104 .probe = aspeed_i2c_probe_bus, 1105 .remove_new = aspeed_i2c_remove_bus, 1106 .driver = { 1107 .name = "aspeed-i2c-bus", 1108 .of_match_table = aspeed_i2c_bus_of_table, 1109 }, 1110 }; 1111 module_platform_driver(aspeed_i2c_bus_driver); 1112 1113 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>"); 1114 MODULE_DESCRIPTION("Aspeed I2C Bus Driver"); 1115 MODULE_LICENSE("GPL v2"); 1116