1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare I2C adapter driver (master only). 4 * 5 * Based on the TI DAVINCI I2C adapter driver. 6 * 7 * Copyright (C) 2006 Texas Instruments. 8 * Copyright (C) 2007 MontaVista Software Inc. 9 * Copyright (C) 2009 Provigent Ltd. 10 */ 11 12 #define DEFAULT_SYMBOL_NAMESPACE "I2C_DW" 13 14 #include <linux/delay.h> 15 #include <linux/err.h> 16 #include <linux/errno.h> 17 #include <linux/export.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/i2c.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/module.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/regmap.h> 26 #include <linux/reset.h> 27 28 #include "i2c-designware-core.h" 29 30 #define AMD_TIMEOUT_MIN_US 25 31 #define AMD_TIMEOUT_MAX_US 250 32 #define AMD_MASTERCFG_MASK GENMASK(15, 0) 33 34 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) 35 { 36 /* Configure Tx/Rx FIFO threshold levels */ 37 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); 38 regmap_write(dev->map, DW_IC_RX_TL, 0); 39 40 /* Configure the I2C master */ 41 regmap_write(dev->map, DW_IC_CON, dev->master_cfg); 42 } 43 44 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 45 { 46 unsigned int comp_param1; 47 u32 sda_falling_time, scl_falling_time; 48 struct i2c_timings *t = &dev->timings; 49 const char *fp_str = ""; 50 u32 ic_clk; 51 int ret; 52 53 ret = i2c_dw_acquire_lock(dev); 54 if (ret) 55 return ret; 56 57 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); 58 i2c_dw_release_lock(dev); 59 if (ret) 60 return ret; 61 62 /* Set standard and fast speed dividers for high/low periods */ 63 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ 64 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ 65 66 /* Calculate SCL timing parameters for standard mode if not set */ 67 if (!dev->ss_hcnt || !dev->ss_lcnt) { 68 ic_clk = i2c_dw_clk_rate(dev); 69 dev->ss_hcnt = 70 i2c_dw_scl_hcnt(dev, 71 DW_IC_SS_SCL_HCNT, 72 ic_clk, 73 4000, /* tHD;STA = tHIGH = 4.0 us */ 74 sda_falling_time, 75 0); /* No offset */ 76 dev->ss_lcnt = 77 i2c_dw_scl_lcnt(dev, 78 DW_IC_SS_SCL_LCNT, 79 ic_clk, 80 4700, /* tLOW = 4.7 us */ 81 scl_falling_time, 82 0); /* No offset */ 83 } 84 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", 85 dev->ss_hcnt, dev->ss_lcnt); 86 87 /* 88 * Set SCL timing parameters for fast mode or fast mode plus. Only 89 * difference is the timing parameter values since the registers are 90 * the same. 91 */ 92 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { 93 /* 94 * Check are Fast Mode Plus parameters available. Calculate 95 * SCL timing parameters for Fast Mode Plus if not set. 96 */ 97 if (dev->fp_hcnt && dev->fp_lcnt) { 98 dev->fs_hcnt = dev->fp_hcnt; 99 dev->fs_lcnt = dev->fp_lcnt; 100 } else { 101 ic_clk = i2c_dw_clk_rate(dev); 102 dev->fs_hcnt = 103 i2c_dw_scl_hcnt(dev, 104 DW_IC_FS_SCL_HCNT, 105 ic_clk, 106 260, /* tHIGH = 260 ns */ 107 sda_falling_time, 108 0); /* No offset */ 109 dev->fs_lcnt = 110 i2c_dw_scl_lcnt(dev, 111 DW_IC_FS_SCL_LCNT, 112 ic_clk, 113 500, /* tLOW = 500 ns */ 114 scl_falling_time, 115 0); /* No offset */ 116 } 117 fp_str = " Plus"; 118 } 119 /* 120 * Calculate SCL timing parameters for fast mode if not set. They are 121 * needed also in high speed mode. 122 */ 123 if (!dev->fs_hcnt || !dev->fs_lcnt) { 124 ic_clk = i2c_dw_clk_rate(dev); 125 dev->fs_hcnt = 126 i2c_dw_scl_hcnt(dev, 127 DW_IC_FS_SCL_HCNT, 128 ic_clk, 129 600, /* tHD;STA = tHIGH = 0.6 us */ 130 sda_falling_time, 131 0); /* No offset */ 132 dev->fs_lcnt = 133 i2c_dw_scl_lcnt(dev, 134 DW_IC_FS_SCL_LCNT, 135 ic_clk, 136 1300, /* tLOW = 1.3 us */ 137 scl_falling_time, 138 0); /* No offset */ 139 } 140 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", 141 fp_str, dev->fs_hcnt, dev->fs_lcnt); 142 143 /* Check is high speed possible and fall back to fast mode if not */ 144 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == 145 DW_IC_CON_SPEED_HIGH) { 146 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) 147 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { 148 dev_err(dev->dev, "High Speed not supported!\n"); 149 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; 150 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; 151 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 152 dev->hs_hcnt = 0; 153 dev->hs_lcnt = 0; 154 } else if (!dev->hs_hcnt || !dev->hs_lcnt) { 155 u32 t_high, t_low; 156 157 /* 158 * The legal values stated in the databook for bus 159 * capacitance are only 100pF and 400pF. 160 * If dev->bus_capacitance_pF is greater than or equals 161 * to 400, t_high and t_low are assumed to be 162 * appropriate values for 400pF, otherwise 100pF. 163 */ 164 if (dev->bus_capacitance_pF >= 400) { 165 /* assume bus capacitance is 400pF */ 166 t_high = dev->clk_freq_optimized ? 160 : 120; 167 t_low = 320; 168 } else { 169 /* assume bus capacitance is 100pF */ 170 t_high = 60; 171 t_low = dev->clk_freq_optimized ? 120 : 160; 172 } 173 174 ic_clk = i2c_dw_clk_rate(dev); 175 dev->hs_hcnt = 176 i2c_dw_scl_hcnt(dev, 177 DW_IC_HS_SCL_HCNT, 178 ic_clk, 179 t_high, 180 sda_falling_time, 181 0); /* No offset */ 182 dev->hs_lcnt = 183 i2c_dw_scl_lcnt(dev, 184 DW_IC_HS_SCL_LCNT, 185 ic_clk, 186 t_low, 187 scl_falling_time, 188 0); /* No offset */ 189 } 190 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", 191 dev->hs_hcnt, dev->hs_lcnt); 192 } 193 194 ret = i2c_dw_set_sda_hold(dev); 195 if (ret) 196 return ret; 197 198 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); 199 return 0; 200 } 201 202 /** 203 * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware 204 * @dev: device private data 205 * 206 * This functions configures and enables the I2C master. 207 * This function is called during I2C init function, and in case of timeout at 208 * run time. 209 * 210 * Return: 0 on success, or negative errno otherwise. 211 */ 212 static int i2c_dw_init_master(struct dw_i2c_dev *dev) 213 { 214 int ret; 215 216 ret = i2c_dw_acquire_lock(dev); 217 if (ret) 218 return ret; 219 220 /* Disable the adapter */ 221 __i2c_dw_disable(dev); 222 223 /* Write standard speed timing parameters */ 224 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); 225 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); 226 227 /* Write fast mode/fast mode plus timing parameters */ 228 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); 229 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); 230 231 /* Write high speed timing parameters if supported */ 232 if (dev->hs_hcnt && dev->hs_lcnt) { 233 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); 234 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); 235 } 236 237 /* Write SDA hold time if supported */ 238 if (dev->sda_hold_time) 239 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); 240 241 i2c_dw_configure_fifo_master(dev); 242 i2c_dw_release_lock(dev); 243 244 return 0; 245 } 246 247 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 248 { 249 struct i2c_msg *msgs = dev->msgs; 250 u32 ic_con = 0, ic_tar = 0; 251 unsigned int dummy; 252 253 /* Disable the adapter */ 254 __i2c_dw_disable(dev); 255 256 /* If the slave address is ten bit address, enable 10BITADDR */ 257 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { 258 ic_con = DW_IC_CON_10BITADDR_MASTER; 259 /* 260 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 261 * mode has to be enabled via bit 12 of IC_TAR register. 262 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be 263 * detected from registers. 264 */ 265 ic_tar = DW_IC_TAR_10BITADDR_MASTER; 266 } 267 268 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, 269 ic_con); 270 271 /* 272 * Set the slave (target) address and enable 10-bit addressing mode 273 * if applicable. 274 */ 275 regmap_write(dev->map, DW_IC_TAR, 276 msgs[dev->msg_write_idx].addr | ic_tar); 277 278 /* Enforce disabled interrupts (due to HW issues) */ 279 __i2c_dw_write_intr_mask(dev, 0); 280 281 /* Enable the adapter */ 282 __i2c_dw_enable(dev); 283 284 /* Dummy read to avoid the register getting stuck on Bay Trail */ 285 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); 286 287 /* Clear and enable interrupts */ 288 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); 289 __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK); 290 } 291 292 /* 293 * This function waits for the controller to be idle before disabling I2C 294 * When the controller is not in the IDLE state, the MST_ACTIVITY bit 295 * (IC_STATUS[5]) is set. 296 * 297 * Values: 298 * 0x1 (ACTIVE): Controller not idle 299 * 0x0 (IDLE): Controller is idle 300 * 301 * The function is called after completing the current transfer. 302 * 303 * Returns: 304 * False when the controller is in the IDLE state. 305 * True when the controller is in the ACTIVE state. 306 */ 307 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev) 308 { 309 u32 status; 310 311 regmap_read(dev->map, DW_IC_STATUS, &status); 312 if (!(status & DW_IC_STATUS_MASTER_ACTIVITY)) 313 return false; 314 315 return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, 316 !(status & DW_IC_STATUS_MASTER_ACTIVITY), 317 1100, 20000) != 0; 318 } 319 320 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) 321 { 322 u32 val; 323 int ret; 324 325 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, 326 !(val & DW_IC_INTR_STOP_DET), 327 1100, 20000); 328 if (ret) 329 dev_err(dev->dev, "i2c timeout error %d\n", ret); 330 331 return ret; 332 } 333 334 static int i2c_dw_status(struct dw_i2c_dev *dev) 335 { 336 int status; 337 338 status = i2c_dw_wait_bus_not_busy(dev); 339 if (status) 340 return status; 341 342 return i2c_dw_check_stopbit(dev); 343 } 344 345 /* 346 * Initiate and continue master read/write transaction with polling 347 * based transfer routine afterward write messages into the Tx buffer. 348 */ 349 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) 350 { 351 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 352 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; 353 int cmd = 0, status; 354 u8 *tx_buf; 355 unsigned int val; 356 357 /* 358 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, 359 * it is mandatory to set the right value in specific register 360 * (offset:0x474) as per the hardware IP specification. 361 */ 362 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); 363 364 dev->msgs = msgs; 365 dev->msgs_num = num_msgs; 366 i2c_dw_xfer_init(dev); 367 368 /* Initiate messages read/write transaction */ 369 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { 370 tx_buf = msgs[msg_wrt_idx].buf; 371 buf_len = msgs[msg_wrt_idx].len; 372 373 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) 374 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); 375 /* 376 * Initiate the i2c read/write transaction of buffer length, 377 * and poll for bus busy status. For the last message transfer, 378 * update the command with stop bit enable. 379 */ 380 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { 381 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) 382 cmd |= BIT(9); 383 384 if (msgs[msg_wrt_idx].flags & I2C_M_RD) { 385 /* Due to hardware bug, need to write the same command twice. */ 386 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); 387 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); 388 if (cmd) { 389 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); 390 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); 391 /* 392 * Need to check the stop bit. However, it cannot be 393 * detected from the registers so we check it always 394 * when read/write the last byte. 395 */ 396 status = i2c_dw_status(dev); 397 if (status) 398 return status; 399 400 for (data_idx = 0; data_idx < buf_len; data_idx++) { 401 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 402 tx_buf[data_idx] = val; 403 } 404 status = i2c_dw_check_stopbit(dev); 405 if (status) 406 return status; 407 } 408 } else { 409 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); 410 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); 411 } 412 } 413 status = i2c_dw_check_stopbit(dev); 414 if (status) 415 return status; 416 } 417 418 return 0; 419 } 420 421 /* 422 * Initiate (and continue) low level master read/write transaction. 423 * This function is only called from i2c_dw_isr(), and pumping i2c_msg 424 * messages into the tx buffer. Even if the size of i2c_msg data is 425 * longer than the size of the tx buffer, it handles everything. 426 */ 427 static void 428 i2c_dw_xfer_msg(struct dw_i2c_dev *dev) 429 { 430 struct i2c_msg *msgs = dev->msgs; 431 u32 intr_mask; 432 int tx_limit, rx_limit; 433 u32 addr = msgs[dev->msg_write_idx].addr; 434 u32 buf_len = dev->tx_buf_len; 435 u8 *buf = dev->tx_buf; 436 bool need_restart = false; 437 unsigned int flr; 438 439 intr_mask = DW_IC_INTR_MASTER_MASK; 440 441 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { 442 u32 flags = msgs[dev->msg_write_idx].flags; 443 444 /* 445 * If target address has changed, we need to 446 * reprogram the target address in the I2C 447 * adapter when we are done with this transfer. 448 */ 449 if (msgs[dev->msg_write_idx].addr != addr) { 450 dev_err(dev->dev, 451 "%s: invalid target address\n", __func__); 452 dev->msg_err = -EINVAL; 453 break; 454 } 455 456 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { 457 /* new i2c_msg */ 458 buf = msgs[dev->msg_write_idx].buf; 459 buf_len = msgs[dev->msg_write_idx].len; 460 461 /* 462 * If both IC_EMPTYFIFO_HOLD_MASTER_EN and 463 * IC_RESTART_EN are set, we must manually 464 * set restart bit between messages. 465 */ 466 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && 467 (dev->msg_write_idx > 0)) 468 need_restart = true; 469 } 470 471 regmap_read(dev->map, DW_IC_TXFLR, &flr); 472 tx_limit = dev->tx_fifo_depth - flr; 473 474 regmap_read(dev->map, DW_IC_RXFLR, &flr); 475 rx_limit = dev->rx_fifo_depth - flr; 476 477 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { 478 u32 cmd = 0; 479 480 /* 481 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must 482 * manually set the stop bit. However, it cannot be 483 * detected from the registers so we set it always 484 * when writing/reading the last byte. 485 */ 486 487 /* 488 * i2c-core always sets the buffer length of 489 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will 490 * be adjusted when receiving the first byte. 491 * Thus we can't stop the transaction here. 492 */ 493 if (dev->msg_write_idx == dev->msgs_num - 1 && 494 buf_len == 1 && !(flags & I2C_M_RECV_LEN)) 495 cmd |= BIT(9); 496 497 if (need_restart) { 498 cmd |= BIT(10); 499 need_restart = false; 500 } 501 502 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 503 504 /* Avoid rx buffer overrun */ 505 if (dev->rx_outstanding >= dev->rx_fifo_depth) 506 break; 507 508 regmap_write(dev->map, DW_IC_DATA_CMD, 509 cmd | 0x100); 510 rx_limit--; 511 dev->rx_outstanding++; 512 } else { 513 regmap_write(dev->map, DW_IC_DATA_CMD, 514 cmd | *buf++); 515 } 516 tx_limit--; buf_len--; 517 } 518 519 dev->tx_buf = buf; 520 dev->tx_buf_len = buf_len; 521 522 /* 523 * Because we don't know the buffer length in the 524 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the 525 * transaction here. Also disable the TX_EMPTY IRQ 526 * while waiting for the data length byte to avoid the 527 * bogus interrupts flood. 528 */ 529 if (flags & I2C_M_RECV_LEN) { 530 dev->status |= STATUS_WRITE_IN_PROGRESS; 531 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 532 break; 533 } else if (buf_len > 0) { 534 /* more bytes to be written */ 535 dev->status |= STATUS_WRITE_IN_PROGRESS; 536 break; 537 } else 538 dev->status &= ~STATUS_WRITE_IN_PROGRESS; 539 } 540 541 /* 542 * If i2c_msg index search is completed, we don't need TX_EMPTY 543 * interrupt any more. 544 */ 545 if (dev->msg_write_idx == dev->msgs_num) 546 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 547 548 if (dev->msg_err) 549 intr_mask = 0; 550 551 __i2c_dw_write_intr_mask(dev, intr_mask); 552 } 553 554 static u8 555 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) 556 { 557 struct i2c_msg *msgs = dev->msgs; 558 u32 flags = msgs[dev->msg_read_idx].flags; 559 unsigned int intr_mask; 560 561 /* 562 * Adjust the buffer length and mask the flag 563 * after receiving the first byte. 564 */ 565 len += (flags & I2C_CLIENT_PEC) ? 2 : 1; 566 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); 567 msgs[dev->msg_read_idx].len = len; 568 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; 569 570 /* 571 * Received buffer length, re-enable TX_EMPTY interrupt 572 * to resume the SMBUS transaction. 573 */ 574 __i2c_dw_read_intr_mask(dev, &intr_mask); 575 intr_mask |= DW_IC_INTR_TX_EMPTY; 576 __i2c_dw_write_intr_mask(dev, intr_mask); 577 578 return len; 579 } 580 581 static void 582 i2c_dw_read(struct dw_i2c_dev *dev) 583 { 584 struct i2c_msg *msgs = dev->msgs; 585 unsigned int rx_valid; 586 587 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { 588 unsigned int tmp; 589 u32 len; 590 u8 *buf; 591 592 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) 593 continue; 594 595 if (!(dev->status & STATUS_READ_IN_PROGRESS)) { 596 len = msgs[dev->msg_read_idx].len; 597 buf = msgs[dev->msg_read_idx].buf; 598 } else { 599 len = dev->rx_buf_len; 600 buf = dev->rx_buf; 601 } 602 603 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); 604 605 for (; len > 0 && rx_valid > 0; len--, rx_valid--) { 606 u32 flags = msgs[dev->msg_read_idx].flags; 607 608 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 609 tmp &= DW_IC_DATA_CMD_DAT; 610 /* Ensure length byte is a valid value */ 611 if (flags & I2C_M_RECV_LEN) { 612 /* 613 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be 614 * detected from the registers, the controller can be 615 * disabled if the STOP bit is set. But it is only set 616 * after receiving block data response length in 617 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read 618 * another byte with STOP bit set when the block data 619 * response length is invalid to complete the transaction. 620 */ 621 if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) 622 tmp = 1; 623 624 len = i2c_dw_recv_len(dev, tmp); 625 } 626 *buf++ = tmp; 627 dev->rx_outstanding--; 628 } 629 630 if (len > 0) { 631 dev->status |= STATUS_READ_IN_PROGRESS; 632 dev->rx_buf_len = len; 633 dev->rx_buf = buf; 634 return; 635 } else 636 dev->status &= ~STATUS_READ_IN_PROGRESS; 637 } 638 } 639 640 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) 641 { 642 unsigned int stat, dummy; 643 644 /* 645 * The IC_INTR_STAT register just indicates "enabled" interrupts. 646 * The unmasked raw version of interrupt status bits is available 647 * in the IC_RAW_INTR_STAT register. 648 * 649 * That is, 650 * stat = readl(IC_INTR_STAT); 651 * equals to, 652 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); 653 * 654 * The raw version might be useful for debugging purposes. 655 */ 656 if (!(dev->flags & ACCESS_POLLING)) { 657 regmap_read(dev->map, DW_IC_INTR_STAT, &stat); 658 } else { 659 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 660 stat &= dev->sw_mask; 661 } 662 663 /* 664 * Do not use the IC_CLR_INTR register to clear interrupts, or 665 * you'll miss some interrupts, triggered during the period from 666 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). 667 * 668 * Instead, use the separately-prepared IC_CLR_* registers. 669 */ 670 if (stat & DW_IC_INTR_RX_UNDER) 671 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); 672 if (stat & DW_IC_INTR_RX_OVER) 673 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); 674 if (stat & DW_IC_INTR_TX_OVER) 675 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); 676 if (stat & DW_IC_INTR_RD_REQ) 677 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); 678 if (stat & DW_IC_INTR_TX_ABRT) { 679 /* 680 * The IC_TX_ABRT_SOURCE register is cleared whenever 681 * the IC_CLR_TX_ABRT is read. Preserve it beforehand. 682 */ 683 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); 684 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); 685 } 686 if (stat & DW_IC_INTR_RX_DONE) 687 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); 688 if (stat & DW_IC_INTR_ACTIVITY) 689 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); 690 if ((stat & DW_IC_INTR_STOP_DET) && 691 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) 692 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); 693 if (stat & DW_IC_INTR_START_DET) 694 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); 695 if (stat & DW_IC_INTR_GEN_CALL) 696 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); 697 698 return stat; 699 } 700 701 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat) 702 { 703 if (stat & DW_IC_INTR_TX_ABRT) { 704 dev->cmd_err |= DW_IC_ERR_TX_ABRT; 705 dev->status &= ~STATUS_MASK; 706 dev->rx_outstanding = 0; 707 708 /* 709 * Anytime TX_ABRT is set, the contents of the tx/rx 710 * buffers are flushed. Make sure to skip them. 711 */ 712 __i2c_dw_write_intr_mask(dev, 0); 713 goto tx_aborted; 714 } 715 716 if (stat & DW_IC_INTR_RX_FULL) 717 i2c_dw_read(dev); 718 719 if (stat & DW_IC_INTR_TX_EMPTY) 720 i2c_dw_xfer_msg(dev); 721 722 /* 723 * No need to modify or disable the interrupt mask here. 724 * i2c_dw_xfer_msg() will take care of it according to 725 * the current transmit status. 726 */ 727 728 tx_aborted: 729 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && 730 (dev->rx_outstanding == 0)) 731 complete(&dev->cmd_complete); 732 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { 733 /* Workaround to trigger pending interrupt */ 734 __i2c_dw_read_intr_mask(dev, &stat); 735 __i2c_dw_write_intr_mask(dev, 0); 736 __i2c_dw_write_intr_mask(dev, stat); 737 } 738 } 739 740 /* 741 * Interrupt service routine. This gets called whenever an I2C master interrupt 742 * occurs. 743 */ 744 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) 745 { 746 struct dw_i2c_dev *dev = dev_id; 747 unsigned int stat, enabled; 748 749 regmap_read(dev->map, DW_IC_ENABLE, &enabled); 750 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 751 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) 752 return IRQ_NONE; 753 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) 754 return IRQ_NONE; 755 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); 756 757 stat = i2c_dw_read_clear_intrbits(dev); 758 759 if (!(dev->status & STATUS_ACTIVE)) { 760 /* 761 * Unexpected interrupt in driver point of view. State 762 * variables are either unset or stale so acknowledge and 763 * disable interrupts for suppressing further interrupts if 764 * interrupt really came from this HW (E.g. firmware has left 765 * the HW active). 766 */ 767 __i2c_dw_write_intr_mask(dev, 0); 768 return IRQ_HANDLED; 769 } 770 771 i2c_dw_process_transfer(dev, stat); 772 773 return IRQ_HANDLED; 774 } 775 776 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev) 777 { 778 unsigned long timeout = dev->adapter.timeout; 779 unsigned int stat; 780 int ret; 781 782 if (!(dev->flags & ACCESS_POLLING)) { 783 ret = wait_for_completion_timeout(&dev->cmd_complete, timeout); 784 } else { 785 timeout += jiffies; 786 do { 787 ret = try_wait_for_completion(&dev->cmd_complete); 788 if (ret) 789 break; 790 791 stat = i2c_dw_read_clear_intrbits(dev); 792 if (stat) 793 i2c_dw_process_transfer(dev, stat); 794 else 795 /* Try save some power */ 796 usleep_range(3, 25); 797 } while (time_before(jiffies, timeout)); 798 } 799 800 return ret ? 0 : -ETIMEDOUT; 801 } 802 803 /* 804 * Prepare controller for a transaction and call i2c_dw_xfer_msg. 805 */ 806 static int 807 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 808 { 809 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 810 int ret; 811 812 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); 813 814 pm_runtime_get_sync(dev->dev); 815 816 switch (dev->flags & MODEL_MASK) { 817 case MODEL_AMD_NAVI_GPU: 818 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); 819 goto done_nolock; 820 default: 821 break; 822 } 823 824 reinit_completion(&dev->cmd_complete); 825 dev->msgs = msgs; 826 dev->msgs_num = num; 827 dev->cmd_err = 0; 828 dev->msg_write_idx = 0; 829 dev->msg_read_idx = 0; 830 dev->msg_err = 0; 831 dev->status = 0; 832 dev->abort_source = 0; 833 dev->rx_outstanding = 0; 834 835 ret = i2c_dw_acquire_lock(dev); 836 if (ret) 837 goto done_nolock; 838 839 ret = i2c_dw_wait_bus_not_busy(dev); 840 if (ret < 0) 841 goto done; 842 843 /* Start the transfers */ 844 i2c_dw_xfer_init(dev); 845 846 /* Wait for tx to complete */ 847 ret = i2c_dw_wait_transfer(dev); 848 if (ret) { 849 dev_err(dev->dev, "controller timed out\n"); 850 /* i2c_dw_init_master() implicitly disables the adapter */ 851 i2c_recover_bus(&dev->adapter); 852 i2c_dw_init_master(dev); 853 goto done; 854 } 855 856 /* 857 * This happens rarely (~1:500) and is hard to reproduce. Debug trace 858 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred, 859 * if disable IC_ENABLE.ENABLE immediately that can result in 860 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if 861 * controller is still ACTIVE before disabling I2C. 862 */ 863 if (i2c_dw_is_controller_active(dev)) 864 dev_err(dev->dev, "controller active\n"); 865 866 /* 867 * We must disable the adapter before returning and signaling the end 868 * of the current transfer. Otherwise the hardware might continue 869 * generating interrupts which in turn causes a race condition with 870 * the following transfer. Needs some more investigation if the 871 * additional interrupts are a hardware bug or this driver doesn't 872 * handle them correctly yet. 873 */ 874 __i2c_dw_disable_nowait(dev); 875 876 if (dev->msg_err) { 877 ret = dev->msg_err; 878 goto done; 879 } 880 881 /* No error */ 882 if (likely(!dev->cmd_err && !dev->status)) { 883 ret = num; 884 goto done; 885 } 886 887 /* We have an error */ 888 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { 889 ret = i2c_dw_handle_tx_abort(dev); 890 goto done; 891 } 892 893 if (dev->status) 894 dev_err(dev->dev, 895 "transfer terminated early - interrupt latency too high?\n"); 896 897 ret = -EIO; 898 899 done: 900 i2c_dw_release_lock(dev); 901 902 done_nolock: 903 pm_runtime_mark_last_busy(dev->dev); 904 pm_runtime_put_autosuspend(dev->dev); 905 906 return ret; 907 } 908 909 static const struct i2c_algorithm i2c_dw_algo = { 910 .master_xfer = i2c_dw_xfer, 911 .functionality = i2c_dw_func, 912 }; 913 914 static const struct i2c_adapter_quirks i2c_dw_quirks = { 915 .flags = I2C_AQ_NO_ZERO_LEN, 916 }; 917 918 void i2c_dw_configure_master(struct dw_i2c_dev *dev) 919 { 920 struct i2c_timings *t = &dev->timings; 921 922 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 923 924 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | 925 DW_IC_CON_RESTART_EN; 926 927 dev->mode = DW_IC_MASTER; 928 929 switch (t->bus_freq_hz) { 930 case I2C_MAX_STANDARD_MODE_FREQ: 931 dev->master_cfg |= DW_IC_CON_SPEED_STD; 932 break; 933 case I2C_MAX_HIGH_SPEED_MODE_FREQ: 934 dev->master_cfg |= DW_IC_CON_SPEED_HIGH; 935 break; 936 default: 937 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 938 } 939 } 940 EXPORT_SYMBOL_GPL(i2c_dw_configure_master); 941 942 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) 943 { 944 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 945 946 i2c_dw_disable(dev); 947 reset_control_assert(dev->rst); 948 i2c_dw_prepare_clk(dev, false); 949 } 950 951 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) 952 { 953 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 954 955 i2c_dw_prepare_clk(dev, true); 956 reset_control_deassert(dev->rst); 957 i2c_dw_init_master(dev); 958 } 959 960 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) 961 { 962 struct i2c_bus_recovery_info *rinfo = &dev->rinfo; 963 struct i2c_adapter *adap = &dev->adapter; 964 struct gpio_desc *gpio; 965 966 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); 967 if (IS_ERR_OR_NULL(gpio)) 968 return PTR_ERR_OR_ZERO(gpio); 969 970 rinfo->scl_gpiod = gpio; 971 972 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); 973 if (IS_ERR(gpio)) 974 return PTR_ERR(gpio); 975 rinfo->sda_gpiod = gpio; 976 977 rinfo->pinctrl = devm_pinctrl_get(dev->dev); 978 if (IS_ERR(rinfo->pinctrl)) { 979 if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER) 980 return PTR_ERR(rinfo->pinctrl); 981 982 rinfo->pinctrl = NULL; 983 dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n"); 984 } else if (!rinfo->pinctrl) { 985 dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n"); 986 } 987 988 rinfo->recover_bus = i2c_generic_scl_recovery; 989 rinfo->prepare_recovery = i2c_dw_prepare_recovery; 990 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; 991 adap->bus_recovery_info = rinfo; 992 993 dev_info(dev->dev, "running with GPIO recovery mode! scl%s", 994 rinfo->sda_gpiod ? ",sda" : ""); 995 996 return 0; 997 } 998 999 int i2c_dw_probe_master(struct dw_i2c_dev *dev) 1000 { 1001 struct i2c_adapter *adap = &dev->adapter; 1002 unsigned long irq_flags; 1003 unsigned int ic_con; 1004 int ret; 1005 1006 init_completion(&dev->cmd_complete); 1007 1008 dev->init = i2c_dw_init_master; 1009 1010 ret = i2c_dw_init_regmap(dev); 1011 if (ret) 1012 return ret; 1013 1014 ret = i2c_dw_set_timings_master(dev); 1015 if (ret) 1016 return ret; 1017 1018 ret = i2c_dw_set_fifo_size(dev); 1019 if (ret) 1020 return ret; 1021 1022 /* Lock the bus for accessing DW_IC_CON */ 1023 ret = i2c_dw_acquire_lock(dev); 1024 if (ret) 1025 return ret; 1026 1027 /* 1028 * On AMD platforms BIOS advertises the bus clear feature 1029 * and enables the SCL/SDA stuck low. SMU FW does the 1030 * bus recovery process. Driver should not ignore this BIOS 1031 * advertisement of bus clear feature. 1032 */ 1033 ret = regmap_read(dev->map, DW_IC_CON, &ic_con); 1034 i2c_dw_release_lock(dev); 1035 if (ret) 1036 return ret; 1037 1038 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) 1039 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; 1040 1041 ret = dev->init(dev); 1042 if (ret) 1043 return ret; 1044 1045 snprintf(adap->name, sizeof(adap->name), 1046 "Synopsys DesignWare I2C adapter"); 1047 adap->retries = 3; 1048 adap->algo = &i2c_dw_algo; 1049 adap->quirks = &i2c_dw_quirks; 1050 adap->dev.parent = dev->dev; 1051 i2c_set_adapdata(adap, dev); 1052 1053 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { 1054 irq_flags = IRQF_NO_SUSPEND; 1055 } else { 1056 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 1057 } 1058 1059 ret = i2c_dw_acquire_lock(dev); 1060 if (ret) 1061 return ret; 1062 1063 __i2c_dw_write_intr_mask(dev, 0); 1064 i2c_dw_release_lock(dev); 1065 1066 if (!(dev->flags & ACCESS_POLLING)) { 1067 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, 1068 irq_flags, dev_name(dev->dev), dev); 1069 if (ret) { 1070 dev_err(dev->dev, "failure requesting irq %i: %d\n", 1071 dev->irq, ret); 1072 return ret; 1073 } 1074 } 1075 1076 ret = i2c_dw_init_recovery_info(dev); 1077 if (ret) 1078 return ret; 1079 1080 /* 1081 * Increment PM usage count during adapter registration in order to 1082 * avoid possible spurious runtime suspend when adapter device is 1083 * registered to the device core and immediate resume in case bus has 1084 * registered I2C slaves that do I2C transfers in their probe. 1085 */ 1086 pm_runtime_get_noresume(dev->dev); 1087 ret = i2c_add_numbered_adapter(adap); 1088 if (ret) 1089 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 1090 pm_runtime_put_noidle(dev->dev); 1091 1092 return ret; 1093 } 1094 EXPORT_SYMBOL_GPL(i2c_dw_probe_master); 1095 1096 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); 1097 MODULE_LICENSE("GPL"); 1098 MODULE_IMPORT_NS("I2C_DW_COMMON"); 1099