1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare I2C adapter driver (master only). 4 * 5 * Based on the TI DAVINCI I2C adapter driver. 6 * 7 * Copyright (C) 2006 Texas Instruments. 8 * Copyright (C) 2007 MontaVista Software Inc. 9 * Copyright (C) 2009 Provigent Ltd. 10 */ 11 12 #define DEFAULT_SYMBOL_NAMESPACE "I2C_DW" 13 14 #include <linux/delay.h> 15 #include <linux/err.h> 16 #include <linux/errno.h> 17 #include <linux/export.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/i2c.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/module.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/regmap.h> 26 #include <linux/reset.h> 27 28 #include "i2c-designware-core.h" 29 30 #define AMD_TIMEOUT_MIN_US 25 31 #define AMD_TIMEOUT_MAX_US 250 32 #define AMD_MASTERCFG_MASK GENMASK(15, 0) 33 34 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) 35 { 36 /* Configure Tx/Rx FIFO threshold levels */ 37 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); 38 regmap_write(dev->map, DW_IC_RX_TL, 0); 39 40 /* Configure the I2C master */ 41 regmap_write(dev->map, DW_IC_CON, dev->master_cfg); 42 } 43 44 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 45 { 46 unsigned int comp_param1; 47 u32 sda_falling_time, scl_falling_time; 48 struct i2c_timings *t = &dev->timings; 49 const char *fp_str = ""; 50 u32 ic_clk; 51 int ret; 52 53 ret = i2c_dw_acquire_lock(dev); 54 if (ret) 55 return ret; 56 57 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); 58 i2c_dw_release_lock(dev); 59 if (ret) 60 return ret; 61 62 /* Set standard and fast speed dividers for high/low periods */ 63 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ 64 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ 65 66 /* Calculate SCL timing parameters for standard mode if not set */ 67 if (!dev->ss_hcnt || !dev->ss_lcnt) { 68 ic_clk = i2c_dw_clk_rate(dev); 69 dev->ss_hcnt = 70 i2c_dw_scl_hcnt(dev, 71 DW_IC_SS_SCL_HCNT, 72 ic_clk, 73 4000, /* tHD;STA = tHIGH = 4.0 us */ 74 sda_falling_time, 75 0); /* No offset */ 76 dev->ss_lcnt = 77 i2c_dw_scl_lcnt(dev, 78 DW_IC_SS_SCL_LCNT, 79 ic_clk, 80 4700, /* tLOW = 4.7 us */ 81 scl_falling_time, 82 0); /* No offset */ 83 } 84 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", 85 dev->ss_hcnt, dev->ss_lcnt); 86 87 /* 88 * Set SCL timing parameters for fast mode or fast mode plus. Only 89 * difference is the timing parameter values since the registers are 90 * the same. 91 */ 92 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { 93 /* 94 * Check are Fast Mode Plus parameters available. Calculate 95 * SCL timing parameters for Fast Mode Plus if not set. 96 */ 97 if (dev->fp_hcnt && dev->fp_lcnt) { 98 dev->fs_hcnt = dev->fp_hcnt; 99 dev->fs_lcnt = dev->fp_lcnt; 100 } else { 101 ic_clk = i2c_dw_clk_rate(dev); 102 dev->fs_hcnt = 103 i2c_dw_scl_hcnt(dev, 104 DW_IC_FS_SCL_HCNT, 105 ic_clk, 106 260, /* tHIGH = 260 ns */ 107 sda_falling_time, 108 0); /* No offset */ 109 dev->fs_lcnt = 110 i2c_dw_scl_lcnt(dev, 111 DW_IC_FS_SCL_LCNT, 112 ic_clk, 113 500, /* tLOW = 500 ns */ 114 scl_falling_time, 115 0); /* No offset */ 116 } 117 fp_str = " Plus"; 118 } 119 /* 120 * Calculate SCL timing parameters for fast mode if not set. They are 121 * needed also in high speed mode. 122 */ 123 if (!dev->fs_hcnt || !dev->fs_lcnt) { 124 ic_clk = i2c_dw_clk_rate(dev); 125 dev->fs_hcnt = 126 i2c_dw_scl_hcnt(dev, 127 DW_IC_FS_SCL_HCNT, 128 ic_clk, 129 600, /* tHD;STA = tHIGH = 0.6 us */ 130 sda_falling_time, 131 0); /* No offset */ 132 dev->fs_lcnt = 133 i2c_dw_scl_lcnt(dev, 134 DW_IC_FS_SCL_LCNT, 135 ic_clk, 136 1300, /* tLOW = 1.3 us */ 137 scl_falling_time, 138 0); /* No offset */ 139 } 140 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", 141 fp_str, dev->fs_hcnt, dev->fs_lcnt); 142 143 /* Check is high speed possible and fall back to fast mode if not */ 144 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == 145 DW_IC_CON_SPEED_HIGH) { 146 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) 147 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { 148 dev_err(dev->dev, "High Speed not supported!\n"); 149 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; 150 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; 151 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 152 dev->hs_hcnt = 0; 153 dev->hs_lcnt = 0; 154 } else if (!dev->hs_hcnt || !dev->hs_lcnt) { 155 u32 t_high, t_low; 156 157 /* 158 * The legal values stated in the databook for bus 159 * capacitance are only 100pF and 400pF. 160 * If dev->bus_capacitance_pF is greater than or equals 161 * to 400, t_high and t_low are assumed to be 162 * appropriate values for 400pF, otherwise 100pF. 163 */ 164 if (dev->bus_capacitance_pF >= 400) { 165 /* assume bus capacitance is 400pF */ 166 t_high = dev->clk_freq_optimized ? 160 : 120; 167 t_low = 320; 168 } else { 169 /* assume bus capacitance is 100pF */ 170 t_high = 60; 171 t_low = dev->clk_freq_optimized ? 120 : 160; 172 } 173 174 ic_clk = i2c_dw_clk_rate(dev); 175 dev->hs_hcnt = 176 i2c_dw_scl_hcnt(dev, 177 DW_IC_HS_SCL_HCNT, 178 ic_clk, 179 t_high, 180 sda_falling_time, 181 0); /* No offset */ 182 dev->hs_lcnt = 183 i2c_dw_scl_lcnt(dev, 184 DW_IC_HS_SCL_LCNT, 185 ic_clk, 186 t_low, 187 scl_falling_time, 188 0); /* No offset */ 189 } 190 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", 191 dev->hs_hcnt, dev->hs_lcnt); 192 } 193 194 ret = i2c_dw_set_sda_hold(dev); 195 if (ret) 196 return ret; 197 198 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); 199 return 0; 200 } 201 202 /** 203 * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware 204 * @dev: device private data 205 * 206 * This functions configures and enables the I2C master. 207 * This function is called during I2C init function, and in case of timeout at 208 * run time. 209 * 210 * Return: 0 on success, or negative errno otherwise. 211 */ 212 static int i2c_dw_init_master(struct dw_i2c_dev *dev) 213 { 214 int ret; 215 216 ret = i2c_dw_acquire_lock(dev); 217 if (ret) 218 return ret; 219 220 /* Disable the adapter */ 221 __i2c_dw_disable(dev); 222 223 /* 224 * Mask SMBus interrupts to block storms from broken 225 * firmware that leaves IC_SMBUS=1; the handler never 226 * services them. 227 */ 228 regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0); 229 230 /* Write standard speed timing parameters */ 231 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); 232 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); 233 234 /* Write fast mode/fast mode plus timing parameters */ 235 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); 236 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); 237 238 /* Write high speed timing parameters if supported */ 239 if (dev->hs_hcnt && dev->hs_lcnt) { 240 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); 241 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); 242 } 243 244 /* Write SDA hold time if supported */ 245 if (dev->sda_hold_time) 246 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); 247 248 i2c_dw_configure_fifo_master(dev); 249 i2c_dw_release_lock(dev); 250 251 return 0; 252 } 253 254 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 255 { 256 struct i2c_msg *msgs = dev->msgs; 257 u32 ic_con = 0, ic_tar = 0; 258 unsigned int dummy; 259 260 /* Disable the adapter */ 261 __i2c_dw_disable(dev); 262 263 /* If the slave address is ten bit address, enable 10BITADDR */ 264 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { 265 ic_con = DW_IC_CON_10BITADDR_MASTER; 266 /* 267 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 268 * mode has to be enabled via bit 12 of IC_TAR register. 269 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be 270 * detected from registers. 271 */ 272 ic_tar = DW_IC_TAR_10BITADDR_MASTER; 273 } 274 275 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, 276 ic_con); 277 278 /* 279 * Set the slave (target) address and enable 10-bit addressing mode 280 * if applicable. 281 */ 282 regmap_write(dev->map, DW_IC_TAR, 283 msgs[dev->msg_write_idx].addr | ic_tar); 284 285 /* Enforce disabled interrupts (due to HW issues) */ 286 __i2c_dw_write_intr_mask(dev, 0); 287 288 /* Enable the adapter */ 289 __i2c_dw_enable(dev); 290 291 /* Dummy read to avoid the register getting stuck on Bay Trail */ 292 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); 293 294 /* Clear and enable interrupts */ 295 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); 296 __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK); 297 } 298 299 /* 300 * This function waits for the controller to be idle before disabling I2C 301 * When the controller is not in the IDLE state, the MST_ACTIVITY bit 302 * (IC_STATUS[5]) is set. 303 * 304 * Values: 305 * 0x1 (ACTIVE): Controller not idle 306 * 0x0 (IDLE): Controller is idle 307 * 308 * The function is called after completing the current transfer. 309 * 310 * Returns: 311 * False when the controller is in the IDLE state. 312 * True when the controller is in the ACTIVE state. 313 */ 314 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev) 315 { 316 u32 status; 317 318 regmap_read(dev->map, DW_IC_STATUS, &status); 319 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY)) 320 return false; 321 322 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, 323 !(status & DW_IC_STATUS_MASTER_ACTIVITY), 324 1100, 20000) != 0; 325 } 326 327 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) 328 { 329 u32 val; 330 int ret; 331 332 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, 333 !(val & DW_IC_INTR_STOP_DET), 334 1100, 20000); 335 if (ret) 336 dev_err(dev->dev, "i2c timeout error %d\n", ret); 337 338 return ret; 339 } 340 341 static int i2c_dw_status(struct dw_i2c_dev *dev) 342 { 343 int status; 344 345 status = i2c_dw_wait_bus_not_busy(dev); 346 if (status) 347 return status; 348 349 return i2c_dw_check_stopbit(dev); 350 } 351 352 /* 353 * Initiate and continue master read/write transaction with polling 354 * based transfer routine afterward write messages into the Tx buffer. 355 */ 356 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) 357 { 358 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 359 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; 360 int cmd = 0, status; 361 u8 *tx_buf; 362 unsigned int val; 363 364 /* 365 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, 366 * it is mandatory to set the right value in specific register 367 * (offset:0x474) as per the hardware IP specification. 368 */ 369 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); 370 371 dev->msgs = msgs; 372 dev->msgs_num = num_msgs; 373 dev->msg_write_idx = 0; 374 i2c_dw_xfer_init(dev); 375 376 /* Initiate messages read/write transaction */ 377 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { 378 tx_buf = msgs[msg_wrt_idx].buf; 379 buf_len = msgs[msg_wrt_idx].len; 380 381 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) 382 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); 383 /* 384 * Initiate the i2c read/write transaction of buffer length, 385 * and poll for bus busy status. For the last message transfer, 386 * update the command with stop bit enable. 387 */ 388 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { 389 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) 390 cmd |= BIT(9); 391 392 if (msgs[msg_wrt_idx].flags & I2C_M_RD) { 393 /* Due to hardware bug, need to write the same command twice. */ 394 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); 395 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); 396 if (cmd) { 397 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); 398 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); 399 /* 400 * Need to check the stop bit. However, it cannot be 401 * detected from the registers so we check it always 402 * when read/write the last byte. 403 */ 404 status = i2c_dw_status(dev); 405 if (status) 406 return status; 407 408 for (data_idx = 0; data_idx < buf_len; data_idx++) { 409 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 410 tx_buf[data_idx] = val; 411 } 412 status = i2c_dw_check_stopbit(dev); 413 if (status) 414 return status; 415 } 416 } else { 417 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); 418 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); 419 } 420 } 421 status = i2c_dw_check_stopbit(dev); 422 if (status) 423 return status; 424 } 425 426 return 0; 427 } 428 429 /* 430 * Initiate (and continue) low level master read/write transaction. 431 * This function is only called from i2c_dw_isr(), and pumping i2c_msg 432 * messages into the tx buffer. Even if the size of i2c_msg data is 433 * longer than the size of the tx buffer, it handles everything. 434 */ 435 static void 436 i2c_dw_xfer_msg(struct dw_i2c_dev *dev) 437 { 438 struct i2c_msg *msgs = dev->msgs; 439 u32 intr_mask; 440 int tx_limit, rx_limit; 441 u32 addr = msgs[dev->msg_write_idx].addr; 442 u32 buf_len = dev->tx_buf_len; 443 u8 *buf = dev->tx_buf; 444 bool need_restart = false; 445 unsigned int flr; 446 447 intr_mask = DW_IC_INTR_MASTER_MASK; 448 449 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { 450 u32 flags = msgs[dev->msg_write_idx].flags; 451 452 /* 453 * If target address has changed, we need to 454 * reprogram the target address in the I2C 455 * adapter when we are done with this transfer. 456 */ 457 if (msgs[dev->msg_write_idx].addr != addr) { 458 dev_err(dev->dev, 459 "%s: invalid target address\n", __func__); 460 dev->msg_err = -EINVAL; 461 break; 462 } 463 464 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { 465 /* new i2c_msg */ 466 buf = msgs[dev->msg_write_idx].buf; 467 buf_len = msgs[dev->msg_write_idx].len; 468 469 /* 470 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and 471 * IC_RESTART_EN are set, we must manually 472 * set restart bit between messages. 473 */ 474 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && 475 (dev->msg_write_idx > 0)) 476 need_restart = true; 477 } 478 479 regmap_read(dev->map, DW_IC_TXFLR, &flr); 480 tx_limit = dev->tx_fifo_depth - flr; 481 482 regmap_read(dev->map, DW_IC_RXFLR, &flr); 483 rx_limit = dev->rx_fifo_depth - flr; 484 485 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { 486 u32 cmd = 0; 487 488 /* 489 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must 490 * manually set the stop bit. However, it cannot be 491 * detected from the registers so we set it always 492 * when writing/reading the last byte. 493 */ 494 495 /* 496 * i2c-core always sets the buffer length of 497 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will 498 * be adjusted when receiving the first byte. 499 * Thus we can't stop the transaction here. 500 */ 501 if (dev->msg_write_idx == dev->msgs_num - 1 && 502 buf_len == 1 && !(flags & I2C_M_RECV_LEN)) 503 cmd |= BIT(9); 504 505 if (need_restart) { 506 cmd |= BIT(10); 507 need_restart = false; 508 } 509 510 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 511 512 /* Avoid rx buffer overrun */ 513 if (dev->rx_outstanding >= dev->rx_fifo_depth) 514 break; 515 516 regmap_write(dev->map, DW_IC_DATA_CMD, 517 cmd | 0x100); 518 rx_limit--; 519 dev->rx_outstanding++; 520 } else { 521 regmap_write(dev->map, DW_IC_DATA_CMD, 522 cmd | *buf++); 523 } 524 tx_limit--; buf_len--; 525 } 526 527 dev->tx_buf = buf; 528 dev->tx_buf_len = buf_len; 529 530 /* 531 * Because we don't know the buffer length in the 532 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the 533 * transaction here. Also disable the TX_EMPTY IRQ 534 * while waiting for the data length byte to avoid the 535 * bogus interrupts flood. 536 */ 537 if (flags & I2C_M_RECV_LEN) { 538 dev->status |= STATUS_WRITE_IN_PROGRESS; 539 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 540 break; 541 } else if (buf_len > 0) { 542 /* more bytes to be written */ 543 dev->status |= STATUS_WRITE_IN_PROGRESS; 544 break; 545 } else 546 dev->status &= ~STATUS_WRITE_IN_PROGRESS; 547 } 548 549 /* 550 * If i2c_msg index search is completed, we don't need TX_EMPTY 551 * interrupt any more. 552 */ 553 if (dev->msg_write_idx == dev->msgs_num) 554 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 555 556 if (dev->msg_err) 557 intr_mask = 0; 558 559 __i2c_dw_write_intr_mask(dev, intr_mask); 560 } 561 562 static u8 563 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) 564 { 565 struct i2c_msg *msgs = dev->msgs; 566 u32 flags = msgs[dev->msg_read_idx].flags; 567 unsigned int intr_mask; 568 569 /* 570 * Adjust the buffer length and mask the flag 571 * after receiving the first byte. 572 */ 573 len += (flags & I2C_CLIENT_PEC) ? 2 : 1; 574 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); 575 msgs[dev->msg_read_idx].len = len; 576 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; 577 578 /* 579 * Received buffer length, re-enable TX_EMPTY interrupt 580 * to resume the SMBUS transaction. 581 */ 582 __i2c_dw_read_intr_mask(dev, &intr_mask); 583 intr_mask |= DW_IC_INTR_TX_EMPTY; 584 __i2c_dw_write_intr_mask(dev, intr_mask); 585 586 return len; 587 } 588 589 static void 590 i2c_dw_read(struct dw_i2c_dev *dev) 591 { 592 struct i2c_msg *msgs = dev->msgs; 593 unsigned int rx_valid; 594 595 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { 596 unsigned int tmp; 597 u32 len; 598 u8 *buf; 599 600 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) 601 continue; 602 603 if (!(dev->status & STATUS_READ_IN_PROGRESS)) { 604 len = msgs[dev->msg_read_idx].len; 605 buf = msgs[dev->msg_read_idx].buf; 606 } else { 607 len = dev->rx_buf_len; 608 buf = dev->rx_buf; 609 } 610 611 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); 612 613 for (; len > 0 && rx_valid > 0; len--, rx_valid--) { 614 u32 flags = msgs[dev->msg_read_idx].flags; 615 616 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 617 tmp &= DW_IC_DATA_CMD_DAT; 618 /* Ensure length byte is a valid value */ 619 if (flags & I2C_M_RECV_LEN) { 620 /* 621 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be 622 * detected from the registers, the controller can be 623 * disabled if the STOP bit is set. But it is only set 624 * after receiving block data response length in 625 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read 626 * another byte with STOP bit set when the block data 627 * response length is invalid to complete the transaction. 628 */ 629 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) 630 tmp = 1; 631 632 len = i2c_dw_recv_len(dev, tmp); 633 } 634 *buf++ = tmp; 635 dev->rx_outstanding--; 636 } 637 638 if (len > 0) { 639 dev->status |= STATUS_READ_IN_PROGRESS; 640 dev->rx_buf_len = len; 641 dev->rx_buf = buf; 642 return; 643 } else 644 dev->status &= ~STATUS_READ_IN_PROGRESS; 645 } 646 } 647 648 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) 649 { 650 unsigned int stat, dummy; 651 652 /* 653 * The IC_INTR_STAT register just indicates "enabled" interrupts. 654 * The unmasked raw version of interrupt status bits is available 655 * in the IC_RAW_INTR_STAT register. 656 * 657 * That is, 658 * stat = readl(IC_INTR_STAT); 659 * equals to, 660 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); 661 * 662 * The raw version might be useful for debugging purposes. 663 */ 664 if (!(dev->flags & ACCESS_POLLING)) { 665 regmap_read(dev->map, DW_IC_INTR_STAT, &stat); 666 } else { 667 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 668 stat &= dev->sw_mask; 669 } 670 671 /* 672 * Do not use the IC_CLR_INTR register to clear interrupts, or 673 * you'll miss some interrupts, triggered during the period from 674 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). 675 * 676 * Instead, use the separately-prepared IC_CLR_* registers. 677 */ 678 if (stat & DW_IC_INTR_RX_UNDER) 679 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); 680 if (stat & DW_IC_INTR_RX_OVER) 681 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); 682 if (stat & DW_IC_INTR_TX_OVER) 683 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); 684 if (stat & DW_IC_INTR_RD_REQ) 685 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); 686 if (stat & DW_IC_INTR_TX_ABRT) { 687 /* 688 * The IC_TX_ABRT_SOURCE register is cleared whenever 689 * the IC_CLR_TX_ABRT is read. Preserve it beforehand. 690 */ 691 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); 692 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); 693 } 694 if (stat & DW_IC_INTR_RX_DONE) 695 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); 696 if (stat & DW_IC_INTR_ACTIVITY) 697 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); 698 if ((stat & DW_IC_INTR_STOP_DET) && 699 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) 700 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); 701 if (stat & DW_IC_INTR_START_DET) 702 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); 703 if (stat & DW_IC_INTR_GEN_CALL) 704 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); 705 706 return stat; 707 } 708 709 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat) 710 { 711 if (stat & DW_IC_INTR_TX_ABRT) { 712 dev->cmd_err |= DW_IC_ERR_TX_ABRT; 713 dev->status &= ~STATUS_MASK; 714 dev->rx_outstanding = 0; 715 716 /* 717 * Anytime TX_ABRT is set, the contents of the tx/rx 718 * buffers are flushed. Make sure to skip them. 719 */ 720 __i2c_dw_write_intr_mask(dev, 0); 721 goto tx_aborted; 722 } 723 724 if (stat & DW_IC_INTR_RX_FULL) 725 i2c_dw_read(dev); 726 727 if (stat & DW_IC_INTR_TX_EMPTY) 728 i2c_dw_xfer_msg(dev); 729 730 /* 731 * No need to modify or disable the interrupt mask here. 732 * i2c_dw_xfer_msg() will take care of it according to 733 * the current transmit status. 734 */ 735 736 tx_aborted: 737 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && 738 (dev->rx_outstanding == 0)) 739 complete(&dev->cmd_complete); 740 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { 741 /* Workaround to trigger pending interrupt */ 742 __i2c_dw_read_intr_mask(dev, &stat); 743 __i2c_dw_write_intr_mask(dev, 0); 744 __i2c_dw_write_intr_mask(dev, stat); 745 } 746 } 747 748 /* 749 * Interrupt service routine. This gets called whenever an I2C master interrupt 750 * occurs. 751 */ 752 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) 753 { 754 struct dw_i2c_dev *dev = dev_id; 755 unsigned int stat, enabled; 756 757 regmap_read(dev->map, DW_IC_ENABLE, &enabled); 758 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 759 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) 760 return IRQ_NONE; 761 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) 762 return IRQ_NONE; 763 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); 764 765 stat = i2c_dw_read_clear_intrbits(dev); 766 767 if (!(dev->status & STATUS_ACTIVE)) { 768 /* 769 * Unexpected interrupt in driver point of view. State 770 * variables are either unset or stale so acknowledge and 771 * disable interrupts for suppressing further interrupts if 772 * interrupt really came from this HW (E.g. firmware has left 773 * the HW active). 774 */ 775 __i2c_dw_write_intr_mask(dev, 0); 776 return IRQ_HANDLED; 777 } 778 779 i2c_dw_process_transfer(dev, stat); 780 781 return IRQ_HANDLED; 782 } 783 784 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev) 785 { 786 unsigned long timeout = dev->adapter.timeout; 787 unsigned int stat; 788 int ret; 789 790 if (!(dev->flags & ACCESS_POLLING)) { 791 ret = wait_for_completion_timeout(&dev->cmd_complete, timeout); 792 } else { 793 timeout += jiffies; 794 do { 795 ret = try_wait_for_completion(&dev->cmd_complete); 796 if (ret) 797 break; 798 799 stat = i2c_dw_read_clear_intrbits(dev); 800 if (stat) 801 i2c_dw_process_transfer(dev, stat); 802 else 803 /* Try save some power */ 804 usleep_range(3, 25); 805 } while (time_before(jiffies, timeout)); 806 } 807 808 return ret ? 0 : -ETIMEDOUT; 809 } 810 811 /* 812 * Prepare controller for a transaction and call i2c_dw_xfer_msg. 813 */ 814 static int 815 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 816 { 817 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 818 int ret; 819 820 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); 821 822 pm_runtime_get_sync(dev->dev); 823 824 switch (dev->flags & MODEL_MASK) { 825 case MODEL_AMD_NAVI_GPU: 826 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); 827 goto done_nolock; 828 default: 829 break; 830 } 831 832 reinit_completion(&dev->cmd_complete); 833 dev->msgs = msgs; 834 dev->msgs_num = num; 835 dev->cmd_err = 0; 836 dev->msg_write_idx = 0; 837 dev->msg_read_idx = 0; 838 dev->msg_err = 0; 839 dev->status = 0; 840 dev->abort_source = 0; 841 dev->rx_outstanding = 0; 842 843 ret = i2c_dw_acquire_lock(dev); 844 if (ret) 845 goto done_nolock; 846 847 ret = i2c_dw_wait_bus_not_busy(dev); 848 if (ret < 0) 849 goto done; 850 851 /* Start the transfers */ 852 i2c_dw_xfer_init(dev); 853 854 /* Wait for tx to complete */ 855 ret = i2c_dw_wait_transfer(dev); 856 if (ret) { 857 dev_err(dev->dev, "controller timed out\n"); 858 /* i2c_dw_init_master() implicitly disables the adapter */ 859 i2c_recover_bus(&dev->adapter); 860 i2c_dw_init_master(dev); 861 goto done; 862 } 863 864 /* 865 * This happens rarely (~1:500) and is hard to reproduce. Debug trace 866 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred, 867 * if disable IC_ENABLE.ENABLE immediately that can result in 868 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if 869 * controller is still ACTIVE before disabling I2C. 870 */ 871 if (i2c_dw_is_controller_active(dev)) 872 dev_err(dev->dev, "controller active\n"); 873 874 /* 875 * We must disable the adapter before returning and signaling the end 876 * of the current transfer. Otherwise the hardware might continue 877 * generating interrupts which in turn causes a race condition with 878 * the following transfer. Needs some more investigation if the 879 * additional interrupts are a hardware bug or this driver doesn't 880 * handle them correctly yet. 881 */ 882 __i2c_dw_disable_nowait(dev); 883 884 if (dev->msg_err) { 885 ret = dev->msg_err; 886 goto done; 887 } 888 889 /* No error */ 890 if (likely(!dev->cmd_err && !dev->status)) { 891 ret = num; 892 goto done; 893 } 894 895 /* We have an error */ 896 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { 897 ret = i2c_dw_handle_tx_abort(dev); 898 goto done; 899 } 900 901 if (dev->status) 902 dev_err(dev->dev, 903 "transfer terminated early - interrupt latency too high?\n"); 904 905 ret = -EIO; 906 907 done: 908 i2c_dw_release_lock(dev); 909 910 done_nolock: 911 pm_runtime_put_autosuspend(dev->dev); 912 913 return ret; 914 } 915 916 static const struct i2c_algorithm i2c_dw_algo = { 917 .xfer = i2c_dw_xfer, 918 .functionality = i2c_dw_func, 919 }; 920 921 static const struct i2c_adapter_quirks i2c_dw_quirks = { 922 .flags = I2C_AQ_NO_ZERO_LEN, 923 }; 924 925 void i2c_dw_configure_master(struct dw_i2c_dev *dev) 926 { 927 struct i2c_timings *t = &dev->timings; 928 929 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 930 931 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | 932 DW_IC_CON_RESTART_EN; 933 934 dev->mode = DW_IC_MASTER; 935 936 switch (t->bus_freq_hz) { 937 case I2C_MAX_STANDARD_MODE_FREQ: 938 dev->master_cfg |= DW_IC_CON_SPEED_STD; 939 break; 940 case I2C_MAX_HIGH_SPEED_MODE_FREQ: 941 dev->master_cfg |= DW_IC_CON_SPEED_HIGH; 942 break; 943 default: 944 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 945 } 946 } 947 EXPORT_SYMBOL_GPL(i2c_dw_configure_master); 948 949 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) 950 { 951 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 952 953 i2c_dw_disable(dev); 954 reset_control_assert(dev->rst); 955 i2c_dw_prepare_clk(dev, false); 956 } 957 958 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) 959 { 960 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 961 962 i2c_dw_prepare_clk(dev, true); 963 reset_control_deassert(dev->rst); 964 i2c_dw_init_master(dev); 965 } 966 967 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) 968 { 969 struct i2c_bus_recovery_info *rinfo = &dev->rinfo; 970 struct i2c_adapter *adap = &dev->adapter; 971 struct gpio_desc *gpio; 972 973 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); 974 if (IS_ERR_OR_NULL(gpio)) 975 return PTR_ERR_OR_ZERO(gpio); 976 977 rinfo->scl_gpiod = gpio; 978 979 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); 980 if (IS_ERR(gpio)) 981 return PTR_ERR(gpio); 982 rinfo->sda_gpiod = gpio; 983 984 rinfo->pinctrl = devm_pinctrl_get(dev->dev); 985 if (IS_ERR(rinfo->pinctrl)) { 986 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER) 987 return PTR_ERR(rinfo->pinctrl); 988 989 rinfo->pinctrl = NULL; 990 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n"); 991 } else if (!rinfo->pinctrl) { 992 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n"); 993 } 994 995 rinfo->recover_bus = i2c_generic_scl_recovery; 996 rinfo->prepare_recovery = i2c_dw_prepare_recovery; 997 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; 998 adap->bus_recovery_info = rinfo; 999 1000 dev_info(dev->dev, "running with GPIO recovery mode! scl%s", 1001 rinfo->sda_gpiod ? ",sda" : ""); 1002 1003 return 0; 1004 } 1005 1006 int i2c_dw_probe_master(struct dw_i2c_dev *dev) 1007 { 1008 struct i2c_adapter *adap = &dev->adapter; 1009 unsigned long irq_flags; 1010 unsigned int ic_con; 1011 int ret; 1012 1013 init_completion(&dev->cmd_complete); 1014 1015 dev->init = i2c_dw_init_master; 1016 1017 ret = i2c_dw_init_regmap(dev); 1018 if (ret) 1019 return ret; 1020 1021 ret = i2c_dw_set_timings_master(dev); 1022 if (ret) 1023 return ret; 1024 1025 ret = i2c_dw_set_fifo_size(dev); 1026 if (ret) 1027 return ret; 1028 1029 /* Lock the bus for accessing DW_IC_CON */ 1030 ret = i2c_dw_acquire_lock(dev); 1031 if (ret) 1032 return ret; 1033 1034 /* 1035 * On AMD platforms BIOS advertises the bus clear feature 1036 * and enables the SCL/SDA stuck low. SMU FW does the 1037 * bus recovery process. Driver should not ignore this BIOS 1038 * advertisement of bus clear feature. 1039 */ 1040 ret = regmap_read(dev->map, DW_IC_CON, &ic_con); 1041 i2c_dw_release_lock(dev); 1042 if (ret) 1043 return ret; 1044 1045 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) 1046 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; 1047 1048 ret = dev->init(dev); 1049 if (ret) 1050 return ret; 1051 1052 if (!adap->name[0]) 1053 scnprintf(adap->name, sizeof(adap->name), 1054 "Synopsys DesignWare I2C adapter"); 1055 adap->retries = 3; 1056 adap->algo = &i2c_dw_algo; 1057 adap->quirks = &i2c_dw_quirks; 1058 adap->dev.parent = dev->dev; 1059 i2c_set_adapdata(adap, dev); 1060 1061 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { 1062 irq_flags = IRQF_NO_SUSPEND; 1063 } else { 1064 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 1065 } 1066 1067 ret = i2c_dw_acquire_lock(dev); 1068 if (ret) 1069 return ret; 1070 1071 __i2c_dw_write_intr_mask(dev, 0); 1072 i2c_dw_release_lock(dev); 1073 1074 if (!(dev->flags & ACCESS_POLLING)) { 1075 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, 1076 irq_flags, dev_name(dev->dev), dev); 1077 if (ret) 1078 return dev_err_probe(dev->dev, ret, 1079 "failure requesting irq %i: %d\n", 1080 dev->irq, ret); 1081 } 1082 1083 ret = i2c_dw_init_recovery_info(dev); 1084 if (ret) 1085 return ret; 1086 1087 /* 1088 * Increment PM usage count during adapter registration in order to 1089 * avoid possible spurious runtime suspend when adapter device is 1090 * registered to the device core and immediate resume in case bus has 1091 * registered I2C slaves that do I2C transfers in their probe. 1092 */ 1093 pm_runtime_get_noresume(dev->dev); 1094 ret = i2c_add_numbered_adapter(adap); 1095 if (ret) 1096 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 1097 pm_runtime_put_noidle(dev->dev); 1098 1099 return ret; 1100 } 1101 EXPORT_SYMBOL_GPL(i2c_dw_probe_master); 1102 1103 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); 1104 MODULE_LICENSE("GPL"); 1105 MODULE_IMPORT_NS("I2C_DW_COMMON"); 1106