1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare I2C adapter driver (master only). 4 * 5 * Based on the TI DAVINCI I2C adapter driver. 6 * 7 * Copyright (C) 2006 Texas Instruments. 8 * Copyright (C) 2007 MontaVista Software Inc. 9 * Copyright (C) 2009 Provigent Ltd. 10 */ 11 #include <linux/delay.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/export.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/i2c.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regmap.h> 23 #include <linux/reset.h> 24 25 #define DEFAULT_SYMBOL_NAMESPACE I2C_DW 26 27 #include "i2c-designware-core.h" 28 29 #define AMD_TIMEOUT_MIN_US 25 30 #define AMD_TIMEOUT_MAX_US 250 31 #define AMD_MASTERCFG_MASK GENMASK(15, 0) 32 33 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) 34 { 35 /* Configure Tx/Rx FIFO threshold levels */ 36 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); 37 regmap_write(dev->map, DW_IC_RX_TL, 0); 38 39 /* Configure the I2C master */ 40 regmap_write(dev->map, DW_IC_CON, dev->master_cfg); 41 } 42 43 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 44 { 45 unsigned int comp_param1; 46 u32 sda_falling_time, scl_falling_time; 47 struct i2c_timings *t = &dev->timings; 48 const char *fp_str = ""; 49 u32 ic_clk; 50 int ret; 51 52 ret = i2c_dw_acquire_lock(dev); 53 if (ret) 54 return ret; 55 56 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); 57 i2c_dw_release_lock(dev); 58 if (ret) 59 return ret; 60 61 /* Set standard and fast speed dividers for high/low periods */ 62 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ 63 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ 64 65 /* Calculate SCL timing parameters for standard mode if not set */ 66 if (!dev->ss_hcnt || !dev->ss_lcnt) { 67 ic_clk = i2c_dw_clk_rate(dev); 68 dev->ss_hcnt = 69 i2c_dw_scl_hcnt(dev, 70 DW_IC_SS_SCL_HCNT, 71 ic_clk, 72 4000, /* tHD;STA = tHIGH = 4.0 us */ 73 sda_falling_time, 74 0); /* No offset */ 75 dev->ss_lcnt = 76 i2c_dw_scl_lcnt(dev, 77 DW_IC_SS_SCL_LCNT, 78 ic_clk, 79 4700, /* tLOW = 4.7 us */ 80 scl_falling_time, 81 0); /* No offset */ 82 } 83 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", 84 dev->ss_hcnt, dev->ss_lcnt); 85 86 /* 87 * Set SCL timing parameters for fast mode or fast mode plus. Only 88 * difference is the timing parameter values since the registers are 89 * the same. 90 */ 91 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { 92 /* 93 * Check are Fast Mode Plus parameters available. Calculate 94 * SCL timing parameters for Fast Mode Plus if not set. 95 */ 96 if (dev->fp_hcnt && dev->fp_lcnt) { 97 dev->fs_hcnt = dev->fp_hcnt; 98 dev->fs_lcnt = dev->fp_lcnt; 99 } else { 100 ic_clk = i2c_dw_clk_rate(dev); 101 dev->fs_hcnt = 102 i2c_dw_scl_hcnt(dev, 103 DW_IC_FS_SCL_HCNT, 104 ic_clk, 105 260, /* tHIGH = 260 ns */ 106 sda_falling_time, 107 0); /* No offset */ 108 dev->fs_lcnt = 109 i2c_dw_scl_lcnt(dev, 110 DW_IC_FS_SCL_LCNT, 111 ic_clk, 112 500, /* tLOW = 500 ns */ 113 scl_falling_time, 114 0); /* No offset */ 115 } 116 fp_str = " Plus"; 117 } 118 /* 119 * Calculate SCL timing parameters for fast mode if not set. They are 120 * needed also in high speed mode. 121 */ 122 if (!dev->fs_hcnt || !dev->fs_lcnt) { 123 ic_clk = i2c_dw_clk_rate(dev); 124 dev->fs_hcnt = 125 i2c_dw_scl_hcnt(dev, 126 DW_IC_FS_SCL_HCNT, 127 ic_clk, 128 600, /* tHD;STA = tHIGH = 0.6 us */ 129 sda_falling_time, 130 0); /* No offset */ 131 dev->fs_lcnt = 132 i2c_dw_scl_lcnt(dev, 133 DW_IC_FS_SCL_LCNT, 134 ic_clk, 135 1300, /* tLOW = 1.3 us */ 136 scl_falling_time, 137 0); /* No offset */ 138 } 139 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", 140 fp_str, dev->fs_hcnt, dev->fs_lcnt); 141 142 /* Check is high speed possible and fall back to fast mode if not */ 143 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == 144 DW_IC_CON_SPEED_HIGH) { 145 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) 146 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { 147 dev_err(dev->dev, "High Speed not supported!\n"); 148 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; 149 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; 150 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 151 dev->hs_hcnt = 0; 152 dev->hs_lcnt = 0; 153 } else if (!dev->hs_hcnt || !dev->hs_lcnt) { 154 u32 t_high, t_low; 155 156 /* 157 * The legal values stated in the databook for bus 158 * capacitance are only 100pF and 400pF. 159 * If dev->bus_capacitance_pF is greater than or equals 160 * to 400, t_high and t_low are assumed to be 161 * appropriate values for 400pF, otherwise 100pF. 162 */ 163 if (dev->bus_capacitance_pF >= 400) { 164 /* assume bus capacitance is 400pF */ 165 t_high = dev->clk_freq_optimized ? 160 : 120; 166 t_low = 320; 167 } else { 168 /* assume bus capacitance is 100pF */ 169 t_high = 60; 170 t_low = dev->clk_freq_optimized ? 120 : 160; 171 } 172 173 ic_clk = i2c_dw_clk_rate(dev); 174 dev->hs_hcnt = 175 i2c_dw_scl_hcnt(dev, 176 DW_IC_HS_SCL_HCNT, 177 ic_clk, 178 t_high, 179 sda_falling_time, 180 0); /* No offset */ 181 dev->hs_lcnt = 182 i2c_dw_scl_lcnt(dev, 183 DW_IC_HS_SCL_LCNT, 184 ic_clk, 185 t_low, 186 scl_falling_time, 187 0); /* No offset */ 188 } 189 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", 190 dev->hs_hcnt, dev->hs_lcnt); 191 } 192 193 ret = i2c_dw_set_sda_hold(dev); 194 if (ret) 195 return ret; 196 197 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); 198 return 0; 199 } 200 201 /** 202 * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware 203 * @dev: device private data 204 * 205 * This functions configures and enables the I2C master. 206 * This function is called during I2C init function, and in case of timeout at 207 * run time. 208 * 209 * Return: 0 on success, or negative errno otherwise. 210 */ 211 static int i2c_dw_init_master(struct dw_i2c_dev *dev) 212 { 213 int ret; 214 215 ret = i2c_dw_acquire_lock(dev); 216 if (ret) 217 return ret; 218 219 /* Disable the adapter */ 220 __i2c_dw_disable(dev); 221 222 /* Write standard speed timing parameters */ 223 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); 224 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); 225 226 /* Write fast mode/fast mode plus timing parameters */ 227 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); 228 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); 229 230 /* Write high speed timing parameters if supported */ 231 if (dev->hs_hcnt && dev->hs_lcnt) { 232 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); 233 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); 234 } 235 236 /* Write SDA hold time if supported */ 237 if (dev->sda_hold_time) 238 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); 239 240 i2c_dw_configure_fifo_master(dev); 241 i2c_dw_release_lock(dev); 242 243 return 0; 244 } 245 246 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 247 { 248 struct i2c_msg *msgs = dev->msgs; 249 u32 ic_con = 0, ic_tar = 0; 250 unsigned int dummy; 251 252 /* Disable the adapter */ 253 __i2c_dw_disable(dev); 254 255 /* If the slave address is ten bit address, enable 10BITADDR */ 256 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { 257 ic_con = DW_IC_CON_10BITADDR_MASTER; 258 /* 259 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 260 * mode has to be enabled via bit 12 of IC_TAR register. 261 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be 262 * detected from registers. 263 */ 264 ic_tar = DW_IC_TAR_10BITADDR_MASTER; 265 } 266 267 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, 268 ic_con); 269 270 /* 271 * Set the slave (target) address and enable 10-bit addressing mode 272 * if applicable. 273 */ 274 regmap_write(dev->map, DW_IC_TAR, 275 msgs[dev->msg_write_idx].addr | ic_tar); 276 277 /* Enforce disabled interrupts (due to HW issues) */ 278 __i2c_dw_write_intr_mask(dev, 0); 279 280 /* Enable the adapter */ 281 __i2c_dw_enable(dev); 282 283 /* Dummy read to avoid the register getting stuck on Bay Trail */ 284 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); 285 286 /* Clear and enable interrupts */ 287 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); 288 __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK); 289 } 290 291 /* 292 * This function waits for the controller to be idle before disabling I2C 293 * When the controller is not in the IDLE state, the MST_ACTIVITY bit 294 * (IC_STATUS[5]) is set. 295 * 296 * Values: 297 * 0x1 (ACTIVE): Controller not idle 298 * 0x0 (IDLE): Controller is idle 299 * 300 * The function is called after completing the current transfer. 301 * 302 * Returns: 303 * False when the controller is in the IDLE state. 304 * True when the controller is in the ACTIVE state. 305 */ 306 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev) 307 { 308 u32 status; 309 310 regmap_read(dev->map, DW_IC_STATUS, &status); 311 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY)) 312 return false; 313 314 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, 315 !(status & DW_IC_STATUS_MASTER_ACTIVITY), 316 1100, 20000) != 0; 317 } 318 319 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) 320 { 321 u32 val; 322 int ret; 323 324 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, 325 !(val & DW_IC_INTR_STOP_DET), 326 1100, 20000); 327 if (ret) 328 dev_err(dev->dev, "i2c timeout error %d\n", ret); 329 330 return ret; 331 } 332 333 static int i2c_dw_status(struct dw_i2c_dev *dev) 334 { 335 int status; 336 337 status = i2c_dw_wait_bus_not_busy(dev); 338 if (status) 339 return status; 340 341 return i2c_dw_check_stopbit(dev); 342 } 343 344 /* 345 * Initiate and continue master read/write transaction with polling 346 * based transfer routine afterward write messages into the Tx buffer. 347 */ 348 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) 349 { 350 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 351 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; 352 int cmd = 0, status; 353 u8 *tx_buf; 354 unsigned int val; 355 356 /* 357 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, 358 * it is mandatory to set the right value in specific register 359 * (offset:0x474) as per the hardware IP specification. 360 */ 361 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); 362 363 dev->msgs = msgs; 364 dev->msgs_num = num_msgs; 365 i2c_dw_xfer_init(dev); 366 367 /* Initiate messages read/write transaction */ 368 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { 369 tx_buf = msgs[msg_wrt_idx].buf; 370 buf_len = msgs[msg_wrt_idx].len; 371 372 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) 373 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); 374 /* 375 * Initiate the i2c read/write transaction of buffer length, 376 * and poll for bus busy status. For the last message transfer, 377 * update the command with stop bit enable. 378 */ 379 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { 380 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) 381 cmd |= BIT(9); 382 383 if (msgs[msg_wrt_idx].flags & I2C_M_RD) { 384 /* Due to hardware bug, need to write the same command twice. */ 385 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); 386 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); 387 if (cmd) { 388 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); 389 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); 390 /* 391 * Need to check the stop bit. However, it cannot be 392 * detected from the registers so we check it always 393 * when read/write the last byte. 394 */ 395 status = i2c_dw_status(dev); 396 if (status) 397 return status; 398 399 for (data_idx = 0; data_idx < buf_len; data_idx++) { 400 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 401 tx_buf[data_idx] = val; 402 } 403 status = i2c_dw_check_stopbit(dev); 404 if (status) 405 return status; 406 } 407 } else { 408 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); 409 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); 410 } 411 } 412 status = i2c_dw_check_stopbit(dev); 413 if (status) 414 return status; 415 } 416 417 return 0; 418 } 419 420 /* 421 * Initiate (and continue) low level master read/write transaction. 422 * This function is only called from i2c_dw_isr(), and pumping i2c_msg 423 * messages into the tx buffer. Even if the size of i2c_msg data is 424 * longer than the size of the tx buffer, it handles everything. 425 */ 426 static void 427 i2c_dw_xfer_msg(struct dw_i2c_dev *dev) 428 { 429 struct i2c_msg *msgs = dev->msgs; 430 u32 intr_mask; 431 int tx_limit, rx_limit; 432 u32 addr = msgs[dev->msg_write_idx].addr; 433 u32 buf_len = dev->tx_buf_len; 434 u8 *buf = dev->tx_buf; 435 bool need_restart = false; 436 unsigned int flr; 437 438 intr_mask = DW_IC_INTR_MASTER_MASK; 439 440 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { 441 u32 flags = msgs[dev->msg_write_idx].flags; 442 443 /* 444 * If target address has changed, we need to 445 * reprogram the target address in the I2C 446 * adapter when we are done with this transfer. 447 */ 448 if (msgs[dev->msg_write_idx].addr != addr) { 449 dev_err(dev->dev, 450 "%s: invalid target address\n", __func__); 451 dev->msg_err = -EINVAL; 452 break; 453 } 454 455 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { 456 /* new i2c_msg */ 457 buf = msgs[dev->msg_write_idx].buf; 458 buf_len = msgs[dev->msg_write_idx].len; 459 460 /* 461 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and 462 * IC_RESTART_EN are set, we must manually 463 * set restart bit between messages. 464 */ 465 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && 466 (dev->msg_write_idx > 0)) 467 need_restart = true; 468 } 469 470 regmap_read(dev->map, DW_IC_TXFLR, &flr); 471 tx_limit = dev->tx_fifo_depth - flr; 472 473 regmap_read(dev->map, DW_IC_RXFLR, &flr); 474 rx_limit = dev->rx_fifo_depth - flr; 475 476 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { 477 u32 cmd = 0; 478 479 /* 480 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must 481 * manually set the stop bit. However, it cannot be 482 * detected from the registers so we set it always 483 * when writing/reading the last byte. 484 */ 485 486 /* 487 * i2c-core always sets the buffer length of 488 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will 489 * be adjusted when receiving the first byte. 490 * Thus we can't stop the transaction here. 491 */ 492 if (dev->msg_write_idx == dev->msgs_num - 1 && 493 buf_len == 1 && !(flags & I2C_M_RECV_LEN)) 494 cmd |= BIT(9); 495 496 if (need_restart) { 497 cmd |= BIT(10); 498 need_restart = false; 499 } 500 501 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 502 503 /* Avoid rx buffer overrun */ 504 if (dev->rx_outstanding >= dev->rx_fifo_depth) 505 break; 506 507 regmap_write(dev->map, DW_IC_DATA_CMD, 508 cmd | 0x100); 509 rx_limit--; 510 dev->rx_outstanding++; 511 } else { 512 regmap_write(dev->map, DW_IC_DATA_CMD, 513 cmd | *buf++); 514 } 515 tx_limit--; buf_len--; 516 } 517 518 dev->tx_buf = buf; 519 dev->tx_buf_len = buf_len; 520 521 /* 522 * Because we don't know the buffer length in the 523 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the 524 * transaction here. Also disable the TX_EMPTY IRQ 525 * while waiting for the data length byte to avoid the 526 * bogus interrupts flood. 527 */ 528 if (flags & I2C_M_RECV_LEN) { 529 dev->status |= STATUS_WRITE_IN_PROGRESS; 530 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 531 break; 532 } else if (buf_len > 0) { 533 /* more bytes to be written */ 534 dev->status |= STATUS_WRITE_IN_PROGRESS; 535 break; 536 } else 537 dev->status &= ~STATUS_WRITE_IN_PROGRESS; 538 } 539 540 /* 541 * If i2c_msg index search is completed, we don't need TX_EMPTY 542 * interrupt any more. 543 */ 544 if (dev->msg_write_idx == dev->msgs_num) 545 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 546 547 if (dev->msg_err) 548 intr_mask = 0; 549 550 __i2c_dw_write_intr_mask(dev, intr_mask); 551 } 552 553 static u8 554 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) 555 { 556 struct i2c_msg *msgs = dev->msgs; 557 u32 flags = msgs[dev->msg_read_idx].flags; 558 unsigned int intr_mask; 559 560 /* 561 * Adjust the buffer length and mask the flag 562 * after receiving the first byte. 563 */ 564 len += (flags & I2C_CLIENT_PEC) ? 2 : 1; 565 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); 566 msgs[dev->msg_read_idx].len = len; 567 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; 568 569 /* 570 * Received buffer length, re-enable TX_EMPTY interrupt 571 * to resume the SMBUS transaction. 572 */ 573 __i2c_dw_read_intr_mask(dev, &intr_mask); 574 intr_mask |= DW_IC_INTR_TX_EMPTY; 575 __i2c_dw_write_intr_mask(dev, intr_mask); 576 577 return len; 578 } 579 580 static void 581 i2c_dw_read(struct dw_i2c_dev *dev) 582 { 583 struct i2c_msg *msgs = dev->msgs; 584 unsigned int rx_valid; 585 586 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { 587 unsigned int tmp; 588 u32 len; 589 u8 *buf; 590 591 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) 592 continue; 593 594 if (!(dev->status & STATUS_READ_IN_PROGRESS)) { 595 len = msgs[dev->msg_read_idx].len; 596 buf = msgs[dev->msg_read_idx].buf; 597 } else { 598 len = dev->rx_buf_len; 599 buf = dev->rx_buf; 600 } 601 602 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); 603 604 for (; len > 0 && rx_valid > 0; len--, rx_valid--) { 605 u32 flags = msgs[dev->msg_read_idx].flags; 606 607 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 608 tmp &= DW_IC_DATA_CMD_DAT; 609 /* Ensure length byte is a valid value */ 610 if (flags & I2C_M_RECV_LEN) { 611 /* 612 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be 613 * detected from the registers, the controller can be 614 * disabled if the STOP bit is set. But it is only set 615 * after receiving block data response length in 616 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read 617 * another byte with STOP bit set when the block data 618 * response length is invalid to complete the transaction. 619 */ 620 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) 621 tmp = 1; 622 623 len = i2c_dw_recv_len(dev, tmp); 624 } 625 *buf++ = tmp; 626 dev->rx_outstanding--; 627 } 628 629 if (len > 0) { 630 dev->status |= STATUS_READ_IN_PROGRESS; 631 dev->rx_buf_len = len; 632 dev->rx_buf = buf; 633 return; 634 } else 635 dev->status &= ~STATUS_READ_IN_PROGRESS; 636 } 637 } 638 639 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) 640 { 641 unsigned int stat, dummy; 642 643 /* 644 * The IC_INTR_STAT register just indicates "enabled" interrupts. 645 * The unmasked raw version of interrupt status bits is available 646 * in the IC_RAW_INTR_STAT register. 647 * 648 * That is, 649 * stat = readl(IC_INTR_STAT); 650 * equals to, 651 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); 652 * 653 * The raw version might be useful for debugging purposes. 654 */ 655 if (!(dev->flags & ACCESS_POLLING)) { 656 regmap_read(dev->map, DW_IC_INTR_STAT, &stat); 657 } else { 658 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 659 stat &= dev->sw_mask; 660 } 661 662 /* 663 * Do not use the IC_CLR_INTR register to clear interrupts, or 664 * you'll miss some interrupts, triggered during the period from 665 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). 666 * 667 * Instead, use the separately-prepared IC_CLR_* registers. 668 */ 669 if (stat & DW_IC_INTR_RX_UNDER) 670 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); 671 if (stat & DW_IC_INTR_RX_OVER) 672 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); 673 if (stat & DW_IC_INTR_TX_OVER) 674 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); 675 if (stat & DW_IC_INTR_RD_REQ) 676 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); 677 if (stat & DW_IC_INTR_TX_ABRT) { 678 /* 679 * The IC_TX_ABRT_SOURCE register is cleared whenever 680 * the IC_CLR_TX_ABRT is read. Preserve it beforehand. 681 */ 682 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); 683 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); 684 } 685 if (stat & DW_IC_INTR_RX_DONE) 686 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); 687 if (stat & DW_IC_INTR_ACTIVITY) 688 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); 689 if ((stat & DW_IC_INTR_STOP_DET) && 690 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) 691 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); 692 if (stat & DW_IC_INTR_START_DET) 693 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); 694 if (stat & DW_IC_INTR_GEN_CALL) 695 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); 696 697 return stat; 698 } 699 700 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat) 701 { 702 if (stat & DW_IC_INTR_TX_ABRT) { 703 dev->cmd_err |= DW_IC_ERR_TX_ABRT; 704 dev->status &= ~STATUS_MASK; 705 dev->rx_outstanding = 0; 706 707 /* 708 * Anytime TX_ABRT is set, the contents of the tx/rx 709 * buffers are flushed. Make sure to skip them. 710 */ 711 __i2c_dw_write_intr_mask(dev, 0); 712 goto tx_aborted; 713 } 714 715 if (stat & DW_IC_INTR_RX_FULL) 716 i2c_dw_read(dev); 717 718 if (stat & DW_IC_INTR_TX_EMPTY) 719 i2c_dw_xfer_msg(dev); 720 721 /* 722 * No need to modify or disable the interrupt mask here. 723 * i2c_dw_xfer_msg() will take care of it according to 724 * the current transmit status. 725 */ 726 727 tx_aborted: 728 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && 729 (dev->rx_outstanding == 0)) 730 complete(&dev->cmd_complete); 731 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { 732 /* Workaround to trigger pending interrupt */ 733 __i2c_dw_read_intr_mask(dev, &stat); 734 __i2c_dw_write_intr_mask(dev, 0); 735 __i2c_dw_write_intr_mask(dev, stat); 736 } 737 } 738 739 /* 740 * Interrupt service routine. This gets called whenever an I2C master interrupt 741 * occurs. 742 */ 743 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) 744 { 745 struct dw_i2c_dev *dev = dev_id; 746 unsigned int stat, enabled; 747 748 regmap_read(dev->map, DW_IC_ENABLE, &enabled); 749 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 750 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) 751 return IRQ_NONE; 752 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) 753 return IRQ_NONE; 754 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); 755 756 stat = i2c_dw_read_clear_intrbits(dev); 757 758 if (!(dev->status & STATUS_ACTIVE)) { 759 /* 760 * Unexpected interrupt in driver point of view. State 761 * variables are either unset or stale so acknowledge and 762 * disable interrupts for suppressing further interrupts if 763 * interrupt really came from this HW (E.g. firmware has left 764 * the HW active). 765 */ 766 __i2c_dw_write_intr_mask(dev, 0); 767 return IRQ_HANDLED; 768 } 769 770 i2c_dw_process_transfer(dev, stat); 771 772 return IRQ_HANDLED; 773 } 774 775 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev) 776 { 777 unsigned long timeout = dev->adapter.timeout; 778 unsigned int stat; 779 int ret; 780 781 if (!(dev->flags & ACCESS_POLLING)) { 782 ret = wait_for_completion_timeout(&dev->cmd_complete, timeout); 783 } else { 784 timeout += jiffies; 785 do { 786 ret = try_wait_for_completion(&dev->cmd_complete); 787 if (ret) 788 break; 789 790 stat = i2c_dw_read_clear_intrbits(dev); 791 if (stat) 792 i2c_dw_process_transfer(dev, stat); 793 else 794 /* Try save some power */ 795 usleep_range(3, 25); 796 } while (time_before(jiffies, timeout)); 797 } 798 799 return ret ? 0 : -ETIMEDOUT; 800 } 801 802 /* 803 * Prepare controller for a transaction and call i2c_dw_xfer_msg. 804 */ 805 static int 806 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 807 { 808 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 809 int ret; 810 811 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); 812 813 pm_runtime_get_sync(dev->dev); 814 815 switch (dev->flags & MODEL_MASK) { 816 case MODEL_AMD_NAVI_GPU: 817 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); 818 goto done_nolock; 819 default: 820 break; 821 } 822 823 reinit_completion(&dev->cmd_complete); 824 dev->msgs = msgs; 825 dev->msgs_num = num; 826 dev->cmd_err = 0; 827 dev->msg_write_idx = 0; 828 dev->msg_read_idx = 0; 829 dev->msg_err = 0; 830 dev->status = 0; 831 dev->abort_source = 0; 832 dev->rx_outstanding = 0; 833 834 ret = i2c_dw_acquire_lock(dev); 835 if (ret) 836 goto done_nolock; 837 838 ret = i2c_dw_wait_bus_not_busy(dev); 839 if (ret < 0) 840 goto done; 841 842 /* Start the transfers */ 843 i2c_dw_xfer_init(dev); 844 845 /* Wait for tx to complete */ 846 ret = i2c_dw_wait_transfer(dev); 847 if (ret) { 848 dev_err(dev->dev, "controller timed out\n"); 849 /* i2c_dw_init_master() implicitly disables the adapter */ 850 i2c_recover_bus(&dev->adapter); 851 i2c_dw_init_master(dev); 852 goto done; 853 } 854 855 /* 856 * This happens rarely (~1:500) and is hard to reproduce. Debug trace 857 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred, 858 * if disable IC_ENABLE.ENABLE immediately that can result in 859 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if 860 * controller is still ACTIVE before disabling I2C. 861 */ 862 if (i2c_dw_is_controller_active(dev)) 863 dev_err(dev->dev, "controller active\n"); 864 865 /* 866 * We must disable the adapter before returning and signaling the end 867 * of the current transfer. Otherwise the hardware might continue 868 * generating interrupts which in turn causes a race condition with 869 * the following transfer. Needs some more investigation if the 870 * additional interrupts are a hardware bug or this driver doesn't 871 * handle them correctly yet. 872 */ 873 __i2c_dw_disable_nowait(dev); 874 875 if (dev->msg_err) { 876 ret = dev->msg_err; 877 goto done; 878 } 879 880 /* No error */ 881 if (likely(!dev->cmd_err && !dev->status)) { 882 ret = num; 883 goto done; 884 } 885 886 /* We have an error */ 887 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { 888 ret = i2c_dw_handle_tx_abort(dev); 889 goto done; 890 } 891 892 if (dev->status) 893 dev_err(dev->dev, 894 "transfer terminated early - interrupt latency too high?\n"); 895 896 ret = -EIO; 897 898 done: 899 i2c_dw_release_lock(dev); 900 901 done_nolock: 902 pm_runtime_mark_last_busy(dev->dev); 903 pm_runtime_put_autosuspend(dev->dev); 904 905 return ret; 906 } 907 908 static const struct i2c_algorithm i2c_dw_algo = { 909 .master_xfer = i2c_dw_xfer, 910 .functionality = i2c_dw_func, 911 }; 912 913 static const struct i2c_adapter_quirks i2c_dw_quirks = { 914 .flags = I2C_AQ_NO_ZERO_LEN, 915 }; 916 917 void i2c_dw_configure_master(struct dw_i2c_dev *dev) 918 { 919 struct i2c_timings *t = &dev->timings; 920 921 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 922 923 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | 924 DW_IC_CON_RESTART_EN; 925 926 dev->mode = DW_IC_MASTER; 927 928 switch (t->bus_freq_hz) { 929 case I2C_MAX_STANDARD_MODE_FREQ: 930 dev->master_cfg |= DW_IC_CON_SPEED_STD; 931 break; 932 case I2C_MAX_HIGH_SPEED_MODE_FREQ: 933 dev->master_cfg |= DW_IC_CON_SPEED_HIGH; 934 break; 935 default: 936 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 937 } 938 } 939 EXPORT_SYMBOL_GPL(i2c_dw_configure_master); 940 941 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) 942 { 943 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 944 945 i2c_dw_disable(dev); 946 reset_control_assert(dev->rst); 947 i2c_dw_prepare_clk(dev, false); 948 } 949 950 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) 951 { 952 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 953 954 i2c_dw_prepare_clk(dev, true); 955 reset_control_deassert(dev->rst); 956 i2c_dw_init_master(dev); 957 } 958 959 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) 960 { 961 struct i2c_bus_recovery_info *rinfo = &dev->rinfo; 962 struct i2c_adapter *adap = &dev->adapter; 963 struct gpio_desc *gpio; 964 965 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); 966 if (IS_ERR_OR_NULL(gpio)) 967 return PTR_ERR_OR_ZERO(gpio); 968 969 rinfo->scl_gpiod = gpio; 970 971 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); 972 if (IS_ERR(gpio)) 973 return PTR_ERR(gpio); 974 rinfo->sda_gpiod = gpio; 975 976 rinfo->pinctrl = devm_pinctrl_get(dev->dev); 977 if (IS_ERR(rinfo->pinctrl)) { 978 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER) 979 return PTR_ERR(rinfo->pinctrl); 980 981 rinfo->pinctrl = NULL; 982 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n"); 983 } else if (!rinfo->pinctrl) { 984 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n"); 985 } 986 987 rinfo->recover_bus = i2c_generic_scl_recovery; 988 rinfo->prepare_recovery = i2c_dw_prepare_recovery; 989 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; 990 adap->bus_recovery_info = rinfo; 991 992 dev_info(dev->dev, "running with GPIO recovery mode! scl%s", 993 rinfo->sda_gpiod ? ",sda" : ""); 994 995 return 0; 996 } 997 998 int i2c_dw_probe_master(struct dw_i2c_dev *dev) 999 { 1000 struct i2c_adapter *adap = &dev->adapter; 1001 unsigned long irq_flags; 1002 unsigned int ic_con; 1003 int ret; 1004 1005 init_completion(&dev->cmd_complete); 1006 1007 dev->init = i2c_dw_init_master; 1008 1009 ret = i2c_dw_init_regmap(dev); 1010 if (ret) 1011 return ret; 1012 1013 ret = i2c_dw_set_timings_master(dev); 1014 if (ret) 1015 return ret; 1016 1017 ret = i2c_dw_set_fifo_size(dev); 1018 if (ret) 1019 return ret; 1020 1021 /* Lock the bus for accessing DW_IC_CON */ 1022 ret = i2c_dw_acquire_lock(dev); 1023 if (ret) 1024 return ret; 1025 1026 /* 1027 * On AMD platforms BIOS advertises the bus clear feature 1028 * and enables the SCL/SDA stuck low. SMU FW does the 1029 * bus recovery process. Driver should not ignore this BIOS 1030 * advertisement of bus clear feature. 1031 */ 1032 ret = regmap_read(dev->map, DW_IC_CON, &ic_con); 1033 i2c_dw_release_lock(dev); 1034 if (ret) 1035 return ret; 1036 1037 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) 1038 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; 1039 1040 ret = dev->init(dev); 1041 if (ret) 1042 return ret; 1043 1044 snprintf(adap->name, sizeof(adap->name), 1045 "Synopsys DesignWare I2C adapter"); 1046 adap->retries = 3; 1047 adap->algo = &i2c_dw_algo; 1048 adap->quirks = &i2c_dw_quirks; 1049 adap->dev.parent = dev->dev; 1050 i2c_set_adapdata(adap, dev); 1051 1052 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { 1053 irq_flags = IRQF_NO_SUSPEND; 1054 } else { 1055 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 1056 } 1057 1058 ret = i2c_dw_acquire_lock(dev); 1059 if (ret) 1060 return ret; 1061 1062 __i2c_dw_write_intr_mask(dev, 0); 1063 i2c_dw_release_lock(dev); 1064 1065 if (!(dev->flags & ACCESS_POLLING)) { 1066 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, 1067 irq_flags, dev_name(dev->dev), dev); 1068 if (ret) { 1069 dev_err(dev->dev, "failure requesting irq %i: %d\n", 1070 dev->irq, ret); 1071 return ret; 1072 } 1073 } 1074 1075 ret = i2c_dw_init_recovery_info(dev); 1076 if (ret) 1077 return ret; 1078 1079 /* 1080 * Increment PM usage count during adapter registration in order to 1081 * avoid possible spurious runtime suspend when adapter device is 1082 * registered to the device core and immediate resume in case bus has 1083 * registered I2C slaves that do I2C transfers in their probe. 1084 */ 1085 pm_runtime_get_noresume(dev->dev); 1086 ret = i2c_add_numbered_adapter(adap); 1087 if (ret) 1088 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 1089 pm_runtime_put_noidle(dev->dev); 1090 1091 return ret; 1092 } 1093 EXPORT_SYMBOL_GPL(i2c_dw_probe_master); 1094 1095 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); 1096 MODULE_LICENSE("GPL"); 1097 MODULE_IMPORT_NS(I2C_DW_COMMON); 1098