1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare I2C adapter driver (master only). 4 * 5 * Based on the TI DAVINCI I2C adapter driver. 6 * 7 * Copyright (C) 2006 Texas Instruments. 8 * Copyright (C) 2007 MontaVista Software Inc. 9 * Copyright (C) 2009 Provigent Ltd. 10 */ 11 #include <linux/delay.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/export.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/i2c.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/regmap.h> 22 #include <linux/reset.h> 23 24 #include "i2c-designware-core.h" 25 26 #define AMD_TIMEOUT_MIN_US 25 27 #define AMD_TIMEOUT_MAX_US 250 28 #define AMD_MASTERCFG_MASK GENMASK(15, 0) 29 30 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) 31 { 32 /* Configure Tx/Rx FIFO threshold levels */ 33 regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); 34 regmap_write(dev->map, DW_IC_RX_TL, 0); 35 36 /* Configure the I2C master */ 37 regmap_write(dev->map, DW_IC_CON, dev->master_cfg); 38 } 39 40 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 41 { 42 unsigned int comp_param1; 43 u32 sda_falling_time, scl_falling_time; 44 struct i2c_timings *t = &dev->timings; 45 const char *fp_str = ""; 46 u32 ic_clk; 47 int ret; 48 49 ret = i2c_dw_acquire_lock(dev); 50 if (ret) 51 return ret; 52 53 ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); 54 i2c_dw_release_lock(dev); 55 if (ret) 56 return ret; 57 58 /* Set standard and fast speed dividers for high/low periods */ 59 sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ 60 scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ 61 62 /* Calculate SCL timing parameters for standard mode if not set */ 63 if (!dev->ss_hcnt || !dev->ss_lcnt) { 64 ic_clk = i2c_dw_clk_rate(dev); 65 dev->ss_hcnt = 66 i2c_dw_scl_hcnt(ic_clk, 67 4000, /* tHD;STA = tHIGH = 4.0 us */ 68 sda_falling_time, 69 0, /* 0: DW default, 1: Ideal */ 70 0); /* No offset */ 71 dev->ss_lcnt = 72 i2c_dw_scl_lcnt(ic_clk, 73 4700, /* tLOW = 4.7 us */ 74 scl_falling_time, 75 0); /* No offset */ 76 } 77 dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", 78 dev->ss_hcnt, dev->ss_lcnt); 79 80 /* 81 * Set SCL timing parameters for fast mode or fast mode plus. Only 82 * difference is the timing parameter values since the registers are 83 * the same. 84 */ 85 if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { 86 /* 87 * Check are Fast Mode Plus parameters available. Calculate 88 * SCL timing parameters for Fast Mode Plus if not set. 89 */ 90 if (dev->fp_hcnt && dev->fp_lcnt) { 91 dev->fs_hcnt = dev->fp_hcnt; 92 dev->fs_lcnt = dev->fp_lcnt; 93 } else { 94 ic_clk = i2c_dw_clk_rate(dev); 95 dev->fs_hcnt = 96 i2c_dw_scl_hcnt(ic_clk, 97 260, /* tHIGH = 260 ns */ 98 sda_falling_time, 99 0, /* DW default */ 100 0); /* No offset */ 101 dev->fs_lcnt = 102 i2c_dw_scl_lcnt(ic_clk, 103 500, /* tLOW = 500 ns */ 104 scl_falling_time, 105 0); /* No offset */ 106 } 107 fp_str = " Plus"; 108 } 109 /* 110 * Calculate SCL timing parameters for fast mode if not set. They are 111 * needed also in high speed mode. 112 */ 113 if (!dev->fs_hcnt || !dev->fs_lcnt) { 114 ic_clk = i2c_dw_clk_rate(dev); 115 dev->fs_hcnt = 116 i2c_dw_scl_hcnt(ic_clk, 117 600, /* tHD;STA = tHIGH = 0.6 us */ 118 sda_falling_time, 119 0, /* 0: DW default, 1: Ideal */ 120 0); /* No offset */ 121 dev->fs_lcnt = 122 i2c_dw_scl_lcnt(ic_clk, 123 1300, /* tLOW = 1.3 us */ 124 scl_falling_time, 125 0); /* No offset */ 126 } 127 dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", 128 fp_str, dev->fs_hcnt, dev->fs_lcnt); 129 130 /* Check is high speed possible and fall back to fast mode if not */ 131 if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == 132 DW_IC_CON_SPEED_HIGH) { 133 if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) 134 != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { 135 dev_err(dev->dev, "High Speed not supported!\n"); 136 t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; 137 dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; 138 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 139 dev->hs_hcnt = 0; 140 dev->hs_lcnt = 0; 141 } else if (!dev->hs_hcnt || !dev->hs_lcnt) { 142 ic_clk = i2c_dw_clk_rate(dev); 143 dev->hs_hcnt = 144 i2c_dw_scl_hcnt(ic_clk, 145 160, /* tHIGH = 160 ns */ 146 sda_falling_time, 147 0, /* DW default */ 148 0); /* No offset */ 149 dev->hs_lcnt = 150 i2c_dw_scl_lcnt(ic_clk, 151 320, /* tLOW = 320 ns */ 152 scl_falling_time, 153 0); /* No offset */ 154 } 155 dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", 156 dev->hs_hcnt, dev->hs_lcnt); 157 } 158 159 ret = i2c_dw_set_sda_hold(dev); 160 if (ret) 161 return ret; 162 163 dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); 164 return 0; 165 } 166 167 /** 168 * i2c_dw_init_master() - Initialize the designware I2C master hardware 169 * @dev: device private data 170 * 171 * This functions configures and enables the I2C master. 172 * This function is called during I2C init function, and in case of timeout at 173 * run time. 174 */ 175 static int i2c_dw_init_master(struct dw_i2c_dev *dev) 176 { 177 int ret; 178 179 ret = i2c_dw_acquire_lock(dev); 180 if (ret) 181 return ret; 182 183 /* Disable the adapter */ 184 __i2c_dw_disable(dev); 185 186 /* Write standard speed timing parameters */ 187 regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); 188 regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); 189 190 /* Write fast mode/fast mode plus timing parameters */ 191 regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); 192 regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); 193 194 /* Write high speed timing parameters if supported */ 195 if (dev->hs_hcnt && dev->hs_lcnt) { 196 regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); 197 regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); 198 } 199 200 /* Write SDA hold time if supported */ 201 if (dev->sda_hold_time) 202 regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); 203 204 i2c_dw_configure_fifo_master(dev); 205 i2c_dw_release_lock(dev); 206 207 return 0; 208 } 209 210 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 211 { 212 struct i2c_msg *msgs = dev->msgs; 213 u32 ic_con = 0, ic_tar = 0; 214 unsigned int dummy; 215 216 /* Disable the adapter */ 217 __i2c_dw_disable(dev); 218 219 /* If the slave address is ten bit address, enable 10BITADDR */ 220 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { 221 ic_con = DW_IC_CON_10BITADDR_MASTER; 222 /* 223 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 224 * mode has to be enabled via bit 12 of IC_TAR register. 225 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be 226 * detected from registers. 227 */ 228 ic_tar = DW_IC_TAR_10BITADDR_MASTER; 229 } 230 231 regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, 232 ic_con); 233 234 /* 235 * Set the slave (target) address and enable 10-bit addressing mode 236 * if applicable. 237 */ 238 regmap_write(dev->map, DW_IC_TAR, 239 msgs[dev->msg_write_idx].addr | ic_tar); 240 241 /* Enforce disabled interrupts (due to HW issues) */ 242 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 243 244 /* Enable the adapter */ 245 __i2c_dw_enable(dev); 246 247 /* Dummy read to avoid the register getting stuck on Bay Trail */ 248 regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); 249 250 /* Clear and enable interrupts */ 251 regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); 252 regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK); 253 } 254 255 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) 256 { 257 u32 val; 258 int ret; 259 260 ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, 261 !(val & DW_IC_INTR_STOP_DET), 262 1100, 20000); 263 if (ret) 264 dev_err(dev->dev, "i2c timeout error %d\n", ret); 265 266 return ret; 267 } 268 269 static int i2c_dw_status(struct dw_i2c_dev *dev) 270 { 271 int status; 272 273 status = i2c_dw_wait_bus_not_busy(dev); 274 if (status) 275 return status; 276 277 return i2c_dw_check_stopbit(dev); 278 } 279 280 /* 281 * Initiate and continue master read/write transaction with polling 282 * based transfer routine afterward write messages into the Tx buffer. 283 */ 284 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) 285 { 286 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 287 int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; 288 int cmd = 0, status; 289 u8 *tx_buf; 290 unsigned int val; 291 292 /* 293 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, 294 * it is mandatory to set the right value in specific register 295 * (offset:0x474) as per the hardware IP specification. 296 */ 297 regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); 298 299 dev->msgs = msgs; 300 dev->msgs_num = num_msgs; 301 i2c_dw_xfer_init(dev); 302 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 303 304 /* Initiate messages read/write transaction */ 305 for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { 306 tx_buf = msgs[msg_wrt_idx].buf; 307 buf_len = msgs[msg_wrt_idx].len; 308 309 if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) 310 regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); 311 /* 312 * Initiate the i2c read/write transaction of buffer length, 313 * and poll for bus busy status. For the last message transfer, 314 * update the command with stopbit enable. 315 */ 316 for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { 317 if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) 318 cmd |= BIT(9); 319 320 if (msgs[msg_wrt_idx].flags & I2C_M_RD) { 321 /* Due to hardware bug, need to write the same command twice. */ 322 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); 323 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); 324 if (cmd) { 325 regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); 326 regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); 327 /* 328 * Need to check the stop bit. However, it cannot be 329 * detected from the registers so we check it always 330 * when read/write the last byte. 331 */ 332 status = i2c_dw_status(dev); 333 if (status) 334 return status; 335 336 for (data_idx = 0; data_idx < buf_len; data_idx++) { 337 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 338 tx_buf[data_idx] = val; 339 } 340 status = i2c_dw_check_stopbit(dev); 341 if (status) 342 return status; 343 } 344 } else { 345 regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); 346 usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); 347 } 348 } 349 status = i2c_dw_check_stopbit(dev); 350 if (status) 351 return status; 352 } 353 354 return 0; 355 } 356 357 static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev) 358 { 359 u32 val; 360 361 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val, 362 val & DW_IC_INTR_TX_EMPTY, 363 100, 1000); 364 } 365 366 static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev) 367 { 368 u32 val; 369 370 return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val, 371 val & DW_IC_INTR_RX_FULL, 372 100, 1000); 373 } 374 375 static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, 376 int num_msgs) 377 { 378 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 379 int msg_idx, buf_len, data_idx, ret; 380 unsigned int val, stop = 0; 381 u8 *buf; 382 383 dev->msgs = msgs; 384 dev->msgs_num = num_msgs; 385 i2c_dw_xfer_init(dev); 386 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 387 388 for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) { 389 buf = msgs[msg_idx].buf; 390 buf_len = msgs[msg_idx].len; 391 392 for (data_idx = 0; data_idx < buf_len; data_idx++) { 393 if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1) 394 stop |= BIT(9); 395 396 if (msgs[msg_idx].flags & I2C_M_RD) { 397 regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop); 398 399 ret = i2c_dw_poll_rx_full(dev); 400 if (ret) 401 return ret; 402 403 regmap_read(dev->map, DW_IC_DATA_CMD, &val); 404 buf[data_idx] = val; 405 } else { 406 ret = i2c_dw_poll_tx_empty(dev); 407 if (ret) 408 return ret; 409 410 regmap_write(dev->map, DW_IC_DATA_CMD, 411 buf[data_idx] | stop); 412 } 413 } 414 } 415 416 return num_msgs; 417 } 418 419 /* 420 * Initiate (and continue) low level master read/write transaction. 421 * This function is only called from i2c_dw_isr, and pumping i2c_msg 422 * messages into the tx buffer. Even if the size of i2c_msg data is 423 * longer than the size of the tx buffer, it handles everything. 424 */ 425 static void 426 i2c_dw_xfer_msg(struct dw_i2c_dev *dev) 427 { 428 struct i2c_msg *msgs = dev->msgs; 429 u32 intr_mask; 430 int tx_limit, rx_limit; 431 u32 addr = msgs[dev->msg_write_idx].addr; 432 u32 buf_len = dev->tx_buf_len; 433 u8 *buf = dev->tx_buf; 434 bool need_restart = false; 435 unsigned int flr; 436 437 intr_mask = DW_IC_INTR_MASTER_MASK; 438 439 for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { 440 u32 flags = msgs[dev->msg_write_idx].flags; 441 442 /* 443 * If target address has changed, we need to 444 * reprogram the target address in the I2C 445 * adapter when we are done with this transfer. 446 */ 447 if (msgs[dev->msg_write_idx].addr != addr) { 448 dev_err(dev->dev, 449 "%s: invalid target address\n", __func__); 450 dev->msg_err = -EINVAL; 451 break; 452 } 453 454 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { 455 /* new i2c_msg */ 456 buf = msgs[dev->msg_write_idx].buf; 457 buf_len = msgs[dev->msg_write_idx].len; 458 459 /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and 460 * IC_RESTART_EN are set, we must manually 461 * set restart bit between messages. 462 */ 463 if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && 464 (dev->msg_write_idx > 0)) 465 need_restart = true; 466 } 467 468 regmap_read(dev->map, DW_IC_TXFLR, &flr); 469 tx_limit = dev->tx_fifo_depth - flr; 470 471 regmap_read(dev->map, DW_IC_RXFLR, &flr); 472 rx_limit = dev->rx_fifo_depth - flr; 473 474 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { 475 u32 cmd = 0; 476 477 /* 478 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must 479 * manually set the stop bit. However, it cannot be 480 * detected from the registers so we set it always 481 * when writing/reading the last byte. 482 */ 483 484 /* 485 * i2c-core always sets the buffer length of 486 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will 487 * be adjusted when receiving the first byte. 488 * Thus we can't stop the transaction here. 489 */ 490 if (dev->msg_write_idx == dev->msgs_num - 1 && 491 buf_len == 1 && !(flags & I2C_M_RECV_LEN)) 492 cmd |= BIT(9); 493 494 if (need_restart) { 495 cmd |= BIT(10); 496 need_restart = false; 497 } 498 499 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 500 501 /* Avoid rx buffer overrun */ 502 if (dev->rx_outstanding >= dev->rx_fifo_depth) 503 break; 504 505 regmap_write(dev->map, DW_IC_DATA_CMD, 506 cmd | 0x100); 507 rx_limit--; 508 dev->rx_outstanding++; 509 } else { 510 regmap_write(dev->map, DW_IC_DATA_CMD, 511 cmd | *buf++); 512 } 513 tx_limit--; buf_len--; 514 } 515 516 dev->tx_buf = buf; 517 dev->tx_buf_len = buf_len; 518 519 /* 520 * Because we don't know the buffer length in the 521 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop 522 * the transaction here. 523 */ 524 if (buf_len > 0 || flags & I2C_M_RECV_LEN) { 525 /* more bytes to be written */ 526 dev->status |= STATUS_WRITE_IN_PROGRESS; 527 break; 528 } else 529 dev->status &= ~STATUS_WRITE_IN_PROGRESS; 530 } 531 532 /* 533 * If i2c_msg index search is completed, we don't need TX_EMPTY 534 * interrupt any more. 535 */ 536 if (dev->msg_write_idx == dev->msgs_num) 537 intr_mask &= ~DW_IC_INTR_TX_EMPTY; 538 539 if (dev->msg_err) 540 intr_mask = 0; 541 542 regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask); 543 } 544 545 static u8 546 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) 547 { 548 struct i2c_msg *msgs = dev->msgs; 549 u32 flags = msgs[dev->msg_read_idx].flags; 550 551 /* 552 * Adjust the buffer length and mask the flag 553 * after receiving the first byte. 554 */ 555 len += (flags & I2C_CLIENT_PEC) ? 2 : 1; 556 dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); 557 msgs[dev->msg_read_idx].len = len; 558 msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; 559 560 return len; 561 } 562 563 static void 564 i2c_dw_read(struct dw_i2c_dev *dev) 565 { 566 struct i2c_msg *msgs = dev->msgs; 567 unsigned int rx_valid; 568 569 for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { 570 unsigned int tmp; 571 u32 len; 572 u8 *buf; 573 574 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) 575 continue; 576 577 if (!(dev->status & STATUS_READ_IN_PROGRESS)) { 578 len = msgs[dev->msg_read_idx].len; 579 buf = msgs[dev->msg_read_idx].buf; 580 } else { 581 len = dev->rx_buf_len; 582 buf = dev->rx_buf; 583 } 584 585 regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); 586 587 for (; len > 0 && rx_valid > 0; len--, rx_valid--) { 588 u32 flags = msgs[dev->msg_read_idx].flags; 589 590 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 591 /* Ensure length byte is a valid value */ 592 if (flags & I2C_M_RECV_LEN && 593 (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { 594 len = i2c_dw_recv_len(dev, tmp); 595 } 596 *buf++ = tmp; 597 dev->rx_outstanding--; 598 } 599 600 if (len > 0) { 601 dev->status |= STATUS_READ_IN_PROGRESS; 602 dev->rx_buf_len = len; 603 dev->rx_buf = buf; 604 return; 605 } else 606 dev->status &= ~STATUS_READ_IN_PROGRESS; 607 } 608 } 609 610 /* 611 * Prepare controller for a transaction and call i2c_dw_xfer_msg. 612 */ 613 static int 614 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 615 { 616 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 617 int ret; 618 619 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); 620 621 pm_runtime_get_sync(dev->dev); 622 623 /* 624 * Initiate I2C message transfer when polling mode is enabled, 625 * As it is polling based transfer mechanism, which does not support 626 * interrupt based functionalities of existing DesignWare driver. 627 */ 628 switch (dev->flags & MODEL_MASK) { 629 case MODEL_AMD_NAVI_GPU: 630 ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); 631 goto done_nolock; 632 case MODEL_WANGXUN_SP: 633 ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num); 634 goto done_nolock; 635 default: 636 break; 637 } 638 639 reinit_completion(&dev->cmd_complete); 640 dev->msgs = msgs; 641 dev->msgs_num = num; 642 dev->cmd_err = 0; 643 dev->msg_write_idx = 0; 644 dev->msg_read_idx = 0; 645 dev->msg_err = 0; 646 dev->status = 0; 647 dev->abort_source = 0; 648 dev->rx_outstanding = 0; 649 650 ret = i2c_dw_acquire_lock(dev); 651 if (ret) 652 goto done_nolock; 653 654 ret = i2c_dw_wait_bus_not_busy(dev); 655 if (ret < 0) 656 goto done; 657 658 /* Start the transfers */ 659 i2c_dw_xfer_init(dev); 660 661 /* Wait for tx to complete */ 662 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { 663 dev_err(dev->dev, "controller timed out\n"); 664 /* i2c_dw_init implicitly disables the adapter */ 665 i2c_recover_bus(&dev->adapter); 666 i2c_dw_init_master(dev); 667 ret = -ETIMEDOUT; 668 goto done; 669 } 670 671 /* 672 * We must disable the adapter before returning and signaling the end 673 * of the current transfer. Otherwise the hardware might continue 674 * generating interrupts which in turn causes a race condition with 675 * the following transfer. Needs some more investigation if the 676 * additional interrupts are a hardware bug or this driver doesn't 677 * handle them correctly yet. 678 */ 679 __i2c_dw_disable_nowait(dev); 680 681 if (dev->msg_err) { 682 ret = dev->msg_err; 683 goto done; 684 } 685 686 /* No error */ 687 if (likely(!dev->cmd_err && !dev->status)) { 688 ret = num; 689 goto done; 690 } 691 692 /* We have an error */ 693 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { 694 ret = i2c_dw_handle_tx_abort(dev); 695 goto done; 696 } 697 698 if (dev->status) 699 dev_err(dev->dev, 700 "transfer terminated early - interrupt latency too high?\n"); 701 702 ret = -EIO; 703 704 done: 705 i2c_dw_release_lock(dev); 706 707 done_nolock: 708 pm_runtime_mark_last_busy(dev->dev); 709 pm_runtime_put_autosuspend(dev->dev); 710 711 return ret; 712 } 713 714 static const struct i2c_algorithm i2c_dw_algo = { 715 .master_xfer = i2c_dw_xfer, 716 .functionality = i2c_dw_func, 717 }; 718 719 static const struct i2c_adapter_quirks i2c_dw_quirks = { 720 .flags = I2C_AQ_NO_ZERO_LEN, 721 }; 722 723 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) 724 { 725 unsigned int stat, dummy; 726 727 /* 728 * The IC_INTR_STAT register just indicates "enabled" interrupts. 729 * The unmasked raw version of interrupt status bits is available 730 * in the IC_RAW_INTR_STAT register. 731 * 732 * That is, 733 * stat = readl(IC_INTR_STAT); 734 * equals to, 735 * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); 736 * 737 * The raw version might be useful for debugging purposes. 738 */ 739 regmap_read(dev->map, DW_IC_INTR_STAT, &stat); 740 741 /* 742 * Do not use the IC_CLR_INTR register to clear interrupts, or 743 * you'll miss some interrupts, triggered during the period from 744 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). 745 * 746 * Instead, use the separately-prepared IC_CLR_* registers. 747 */ 748 if (stat & DW_IC_INTR_RX_UNDER) 749 regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); 750 if (stat & DW_IC_INTR_RX_OVER) 751 regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); 752 if (stat & DW_IC_INTR_TX_OVER) 753 regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); 754 if (stat & DW_IC_INTR_RD_REQ) 755 regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); 756 if (stat & DW_IC_INTR_TX_ABRT) { 757 /* 758 * The IC_TX_ABRT_SOURCE register is cleared whenever 759 * the IC_CLR_TX_ABRT is read. Preserve it beforehand. 760 */ 761 regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); 762 regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); 763 } 764 if (stat & DW_IC_INTR_RX_DONE) 765 regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); 766 if (stat & DW_IC_INTR_ACTIVITY) 767 regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); 768 if ((stat & DW_IC_INTR_STOP_DET) && 769 ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) 770 regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); 771 if (stat & DW_IC_INTR_START_DET) 772 regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); 773 if (stat & DW_IC_INTR_GEN_CALL) 774 regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); 775 776 return stat; 777 } 778 779 /* 780 * Interrupt service routine. This gets called whenever an I2C master interrupt 781 * occurs. 782 */ 783 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) 784 { 785 struct dw_i2c_dev *dev = dev_id; 786 unsigned int stat, enabled; 787 788 regmap_read(dev->map, DW_IC_ENABLE, &enabled); 789 regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); 790 if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) 791 return IRQ_NONE; 792 if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) 793 return IRQ_NONE; 794 dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); 795 796 stat = i2c_dw_read_clear_intrbits(dev); 797 798 if (!(dev->status & STATUS_ACTIVE)) { 799 /* 800 * Unexpected interrupt in driver point of view. State 801 * variables are either unset or stale so acknowledge and 802 * disable interrupts for suppressing further interrupts if 803 * interrupt really came from this HW (E.g. firmware has left 804 * the HW active). 805 */ 806 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 807 return IRQ_HANDLED; 808 } 809 810 if (stat & DW_IC_INTR_TX_ABRT) { 811 dev->cmd_err |= DW_IC_ERR_TX_ABRT; 812 dev->status &= ~STATUS_MASK; 813 dev->rx_outstanding = 0; 814 815 /* 816 * Anytime TX_ABRT is set, the contents of the tx/rx 817 * buffers are flushed. Make sure to skip them. 818 */ 819 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 820 goto tx_aborted; 821 } 822 823 if (stat & DW_IC_INTR_RX_FULL) 824 i2c_dw_read(dev); 825 826 if (stat & DW_IC_INTR_TX_EMPTY) 827 i2c_dw_xfer_msg(dev); 828 829 /* 830 * No need to modify or disable the interrupt mask here. 831 * i2c_dw_xfer_msg() will take care of it according to 832 * the current transmit status. 833 */ 834 835 tx_aborted: 836 if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && 837 (dev->rx_outstanding == 0)) 838 complete(&dev->cmd_complete); 839 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { 840 /* Workaround to trigger pending interrupt */ 841 regmap_read(dev->map, DW_IC_INTR_MASK, &stat); 842 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 843 regmap_write(dev->map, DW_IC_INTR_MASK, stat); 844 } 845 846 return IRQ_HANDLED; 847 } 848 849 void i2c_dw_configure_master(struct dw_i2c_dev *dev) 850 { 851 struct i2c_timings *t = &dev->timings; 852 853 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 854 855 dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | 856 DW_IC_CON_RESTART_EN; 857 858 dev->mode = DW_IC_MASTER; 859 860 switch (t->bus_freq_hz) { 861 case I2C_MAX_STANDARD_MODE_FREQ: 862 dev->master_cfg |= DW_IC_CON_SPEED_STD; 863 break; 864 case I2C_MAX_HIGH_SPEED_MODE_FREQ: 865 dev->master_cfg |= DW_IC_CON_SPEED_HIGH; 866 break; 867 default: 868 dev->master_cfg |= DW_IC_CON_SPEED_FAST; 869 } 870 } 871 EXPORT_SYMBOL_GPL(i2c_dw_configure_master); 872 873 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) 874 { 875 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 876 877 i2c_dw_disable(dev); 878 reset_control_assert(dev->rst); 879 i2c_dw_prepare_clk(dev, false); 880 } 881 882 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) 883 { 884 struct dw_i2c_dev *dev = i2c_get_adapdata(adap); 885 886 i2c_dw_prepare_clk(dev, true); 887 reset_control_deassert(dev->rst); 888 i2c_dw_init_master(dev); 889 } 890 891 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) 892 { 893 struct i2c_bus_recovery_info *rinfo = &dev->rinfo; 894 struct i2c_adapter *adap = &dev->adapter; 895 struct gpio_desc *gpio; 896 897 gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); 898 if (IS_ERR_OR_NULL(gpio)) 899 return PTR_ERR_OR_ZERO(gpio); 900 901 rinfo->scl_gpiod = gpio; 902 903 gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); 904 if (IS_ERR(gpio)) 905 return PTR_ERR(gpio); 906 rinfo->sda_gpiod = gpio; 907 908 rinfo->recover_bus = i2c_generic_scl_recovery; 909 rinfo->prepare_recovery = i2c_dw_prepare_recovery; 910 rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; 911 adap->bus_recovery_info = rinfo; 912 913 dev_info(dev->dev, "running with gpio recovery mode! scl%s", 914 rinfo->sda_gpiod ? ",sda" : ""); 915 916 return 0; 917 } 918 919 static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev) 920 { 921 struct i2c_adapter *adap = &dev->adapter; 922 int ret; 923 924 pm_runtime_get_noresume(dev->dev); 925 ret = i2c_add_numbered_adapter(adap); 926 if (ret) 927 dev_err(dev->dev, "Failed to add adapter: %d\n", ret); 928 pm_runtime_put_noidle(dev->dev); 929 930 return ret; 931 } 932 933 static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev) 934 { 935 switch (dev->flags & MODEL_MASK) { 936 case MODEL_AMD_NAVI_GPU: 937 case MODEL_WANGXUN_SP: 938 return true; 939 default: 940 return false; 941 } 942 } 943 944 int i2c_dw_probe_master(struct dw_i2c_dev *dev) 945 { 946 struct i2c_adapter *adap = &dev->adapter; 947 unsigned long irq_flags; 948 unsigned int ic_con; 949 int ret; 950 951 init_completion(&dev->cmd_complete); 952 953 dev->init = i2c_dw_init_master; 954 dev->disable = i2c_dw_disable; 955 956 ret = i2c_dw_init_regmap(dev); 957 if (ret) 958 return ret; 959 960 ret = i2c_dw_set_timings_master(dev); 961 if (ret) 962 return ret; 963 964 ret = i2c_dw_set_fifo_size(dev); 965 if (ret) 966 return ret; 967 968 /* Lock the bus for accessing DW_IC_CON */ 969 ret = i2c_dw_acquire_lock(dev); 970 if (ret) 971 return ret; 972 973 /* 974 * On AMD platforms BIOS advertises the bus clear feature 975 * and enables the SCL/SDA stuck low. SMU FW does the 976 * bus recovery process. Driver should not ignore this BIOS 977 * advertisement of bus clear feature. 978 */ 979 ret = regmap_read(dev->map, DW_IC_CON, &ic_con); 980 i2c_dw_release_lock(dev); 981 if (ret) 982 return ret; 983 984 if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) 985 dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; 986 987 ret = dev->init(dev); 988 if (ret) 989 return ret; 990 991 snprintf(adap->name, sizeof(adap->name), 992 "Synopsys DesignWare I2C adapter"); 993 adap->retries = 3; 994 adap->algo = &i2c_dw_algo; 995 adap->quirks = &i2c_dw_quirks; 996 adap->dev.parent = dev->dev; 997 i2c_set_adapdata(adap, dev); 998 999 if (i2c_dw_is_model_poll(dev)) 1000 return i2c_dw_poll_adap_quirk(dev); 1001 1002 if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { 1003 irq_flags = IRQF_NO_SUSPEND; 1004 } else { 1005 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 1006 } 1007 1008 ret = i2c_dw_acquire_lock(dev); 1009 if (ret) 1010 return ret; 1011 1012 regmap_write(dev->map, DW_IC_INTR_MASK, 0); 1013 i2c_dw_release_lock(dev); 1014 1015 ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, 1016 dev_name(dev->dev), dev); 1017 if (ret) { 1018 dev_err(dev->dev, "failure requesting irq %i: %d\n", 1019 dev->irq, ret); 1020 return ret; 1021 } 1022 1023 ret = i2c_dw_init_recovery_info(dev); 1024 if (ret) 1025 return ret; 1026 1027 /* 1028 * Increment PM usage count during adapter registration in order to 1029 * avoid possible spurious runtime suspend when adapter device is 1030 * registered to the device core and immediate resume in case bus has 1031 * registered I2C slaves that do I2C transfers in their probe. 1032 */ 1033 pm_runtime_get_noresume(dev->dev); 1034 ret = i2c_add_numbered_adapter(adap); 1035 if (ret) 1036 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 1037 pm_runtime_put_noidle(dev->dev); 1038 1039 return ret; 1040 } 1041 EXPORT_SYMBOL_GPL(i2c_dw_probe_master); 1042 1043 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); 1044 MODULE_LICENSE("GPL"); 1045