1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2009-2013, 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2014, Sony Mobile Communications AB. 5 * 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/atomic.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dmapool.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/i2c.h> 17 #include <linux/interconnect.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/property.h> 24 #include <linux/scatterlist.h> 25 26 /* QUP Registers */ 27 #define QUP_CONFIG 0x000 28 #define QUP_STATE 0x004 29 #define QUP_IO_MODE 0x008 30 #define QUP_SW_RESET 0x00c 31 #define QUP_OPERATIONAL 0x018 32 #define QUP_ERROR_FLAGS 0x01c 33 #define QUP_ERROR_FLAGS_EN 0x020 34 #define QUP_OPERATIONAL_MASK 0x028 35 #define QUP_HW_VERSION 0x030 36 #define QUP_MX_OUTPUT_CNT 0x100 37 #define QUP_OUT_FIFO_BASE 0x110 38 #define QUP_MX_WRITE_CNT 0x150 39 #define QUP_MX_INPUT_CNT 0x200 40 #define QUP_MX_READ_CNT 0x208 41 #define QUP_IN_FIFO_BASE 0x218 42 #define QUP_I2C_CLK_CTL 0x400 43 #define QUP_I2C_STATUS 0x404 44 #define QUP_I2C_MASTER_GEN 0x408 45 46 /* QUP States and reset values */ 47 #define QUP_RESET_STATE 0 48 #define QUP_RUN_STATE 1 49 #define QUP_PAUSE_STATE 3 50 #define QUP_STATE_MASK 3 51 52 #define QUP_STATE_VALID BIT(2) 53 #define QUP_I2C_MAST_GEN BIT(4) 54 #define QUP_I2C_FLUSH BIT(6) 55 56 #define QUP_OPERATIONAL_RESET 0x000ff0 57 #define QUP_I2C_STATUS_RESET 0xfffffc 58 59 /* QUP OPERATIONAL FLAGS */ 60 #define QUP_I2C_NACK_FLAG BIT(3) 61 #define QUP_OUT_NOT_EMPTY BIT(4) 62 #define QUP_IN_NOT_EMPTY BIT(5) 63 #define QUP_OUT_FULL BIT(6) 64 #define QUP_OUT_SVC_FLAG BIT(8) 65 #define QUP_IN_SVC_FLAG BIT(9) 66 #define QUP_MX_OUTPUT_DONE BIT(10) 67 #define QUP_MX_INPUT_DONE BIT(11) 68 #define OUT_BLOCK_WRITE_REQ BIT(12) 69 #define IN_BLOCK_READ_REQ BIT(13) 70 71 /* I2C mini core related values */ 72 #define QUP_NO_INPUT BIT(7) 73 #define QUP_CLOCK_AUTO_GATE BIT(13) 74 #define I2C_MINI_CORE (2 << 8) 75 #define I2C_N_VAL 15 76 #define I2C_N_VAL_V2 7 77 78 /* Most significant word offset in FIFO port */ 79 #define QUP_MSW_SHIFT (I2C_N_VAL + 1) 80 81 /* Packing/Unpacking words in FIFOs, and IO modes */ 82 #define QUP_OUTPUT_BLK_MODE (1 << 10) 83 #define QUP_OUTPUT_BAM_MODE (3 << 10) 84 #define QUP_INPUT_BLK_MODE (1 << 12) 85 #define QUP_INPUT_BAM_MODE (3 << 12) 86 #define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE) 87 #define QUP_UNPACK_EN BIT(14) 88 #define QUP_PACK_EN BIT(15) 89 90 #define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN) 91 #define QUP_V2_TAGS_EN 1 92 93 #define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03) 94 #define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07) 95 #define QUP_INPUT_BLOCK_SIZE(x) (((x) >> 5) & 0x03) 96 #define QUP_INPUT_FIFO_SIZE(x) (((x) >> 7) & 0x07) 97 98 /* QUP tags */ 99 #define QUP_TAG_START (1 << 8) 100 #define QUP_TAG_DATA (2 << 8) 101 #define QUP_TAG_STOP (3 << 8) 102 #define QUP_TAG_REC (4 << 8) 103 #define QUP_BAM_INPUT_EOT 0x93 104 #define QUP_BAM_FLUSH_STOP 0x96 105 106 /* QUP v2 tags */ 107 #define QUP_TAG_V2_START 0x81 108 #define QUP_TAG_V2_DATAWR 0x82 109 #define QUP_TAG_V2_DATAWR_STOP 0x83 110 #define QUP_TAG_V2_DATARD 0x85 111 #define QUP_TAG_V2_DATARD_NACK 0x86 112 #define QUP_TAG_V2_DATARD_STOP 0x87 113 114 /* Status, Error flags */ 115 #define I2C_STATUS_WR_BUFFER_FULL BIT(0) 116 #define I2C_STATUS_BUS_ACTIVE BIT(8) 117 #define I2C_STATUS_ERROR_MASK 0x38000fc 118 #define QUP_STATUS_ERROR_FLAGS 0x7c 119 120 #define QUP_READ_LIMIT 256 121 #define SET_BIT 0x1 122 #define RESET_BIT 0x0 123 #define ONE_BYTE 0x1 124 #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31) 125 126 /* Maximum transfer length for single DMA descriptor */ 127 #define MX_TX_RX_LEN SZ_64K 128 #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT) 129 /* Maximum transfer length for all DMA descriptors */ 130 #define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN) 131 #define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT) 132 133 /* 134 * Minimum transfer timeout for i2c transfers in seconds. It will be added on 135 * the top of maximum transfer time calculated from i2c bus speed to compensate 136 * the overheads. 137 */ 138 #define TOUT_MIN 2 139 140 /* Default values. Use these if FW query fails */ 141 #define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ 142 #define DEFAULT_SRC_CLK 20000000 143 144 /* 145 * Max tags length (start, stop and maximum 2 bytes address) for each QUP 146 * data transfer 147 */ 148 #define QUP_MAX_TAGS_LEN 4 149 /* Max data length for each DATARD tags */ 150 #define RECV_MAX_DATA_LEN 254 151 /* TAG length for DATA READ in RX FIFO */ 152 #define READ_RX_TAGS_LEN 2 153 154 #define QUP_BUS_WIDTH 8 155 156 static unsigned int scl_freq; 157 module_param_named(scl_freq, scl_freq, uint, 0444); 158 MODULE_PARM_DESC(scl_freq, "SCL frequency override"); 159 160 /* 161 * count: no of blocks 162 * pos: current block number 163 * tx_tag_len: tx tag length for current block 164 * rx_tag_len: rx tag length for current block 165 * data_len: remaining data length for current message 166 * cur_blk_len: data length for current block 167 * total_tx_len: total tx length including tag bytes for current QUP transfer 168 * total_rx_len: total rx length including tag bytes for current QUP transfer 169 * tx_fifo_data_pos: current byte number in TX FIFO word 170 * tx_fifo_free: number of free bytes in current QUP block write. 171 * rx_fifo_data_pos: current byte number in RX FIFO word 172 * fifo_available: number of available bytes in RX FIFO for current 173 * QUP block read 174 * tx_fifo_data: QUP TX FIFO write works on word basis (4 bytes). New byte write 175 * to TX FIFO will be appended in this data and will be written to 176 * TX FIFO when all the 4 bytes are available. 177 * rx_fifo_data: QUP RX FIFO read works on word basis (4 bytes). This will 178 * contains the 4 bytes of RX data. 179 * cur_data: pointer to tell cur data position for current message 180 * cur_tx_tags: pointer to tell cur position in tags 181 * tx_tags_sent: all tx tag bytes have been written in FIFO word 182 * send_last_word: for tx FIFO, last word send is pending in current block 183 * rx_bytes_read: if all the bytes have been read from rx FIFO. 184 * rx_tags_fetched: all the rx tag bytes have been fetched from rx fifo word 185 * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer. 186 * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer. 187 * tags: contains tx tag bytes for current QUP transfer 188 */ 189 struct qup_i2c_block { 190 int count; 191 int pos; 192 int tx_tag_len; 193 int rx_tag_len; 194 int data_len; 195 int cur_blk_len; 196 int total_tx_len; 197 int total_rx_len; 198 int tx_fifo_data_pos; 199 int tx_fifo_free; 200 int rx_fifo_data_pos; 201 int fifo_available; 202 u32 tx_fifo_data; 203 u32 rx_fifo_data; 204 u8 *cur_data; 205 u8 *cur_tx_tags; 206 bool tx_tags_sent; 207 bool send_last_word; 208 bool rx_tags_fetched; 209 bool rx_bytes_read; 210 bool is_tx_blk_mode; 211 bool is_rx_blk_mode; 212 u8 tags[6]; 213 }; 214 215 struct qup_i2c_tag { 216 u8 *start; 217 dma_addr_t addr; 218 }; 219 220 struct qup_i2c_bam { 221 struct qup_i2c_tag tag; 222 struct dma_chan *dma; 223 struct scatterlist *sg; 224 unsigned int sg_cnt; 225 }; 226 227 struct qup_i2c_dev { 228 struct device *dev; 229 void __iomem *base; 230 int irq; 231 struct clk *clk; 232 struct clk *pclk; 233 struct icc_path *icc_path; 234 struct i2c_adapter adap; 235 236 int clk_ctl; 237 int out_fifo_sz; 238 int in_fifo_sz; 239 int out_blk_sz; 240 int in_blk_sz; 241 242 int blk_xfer_limit; 243 unsigned long one_byte_t; 244 unsigned long xfer_timeout; 245 struct qup_i2c_block blk; 246 247 struct i2c_msg *msg; 248 /* Current posion in user message buffer */ 249 int pos; 250 /* I2C protocol errors */ 251 u32 bus_err; 252 /* QUP core errors */ 253 u32 qup_err; 254 255 /* To check if this is the last msg */ 256 bool is_last; 257 bool is_smbus_read; 258 259 /* To configure when bus is in run state */ 260 u32 config_run; 261 262 /* bandwidth votes */ 263 u32 src_clk_freq; 264 u32 cur_bw_clk_freq; 265 266 /* dma parameters */ 267 bool is_dma; 268 /* To check if the current transfer is using DMA */ 269 bool use_dma; 270 unsigned int max_xfer_sg_len; 271 unsigned int tag_buf_pos; 272 /* The threshold length above which block mode will be used */ 273 unsigned int blk_mode_threshold; 274 struct dma_pool *dpool; 275 struct qup_i2c_tag start_tag; 276 struct qup_i2c_bam brx; 277 struct qup_i2c_bam btx; 278 279 struct completion xfer; 280 /* function to write data in tx fifo */ 281 void (*write_tx_fifo)(struct qup_i2c_dev *qup); 282 /* function to read data from rx fifo */ 283 void (*read_rx_fifo)(struct qup_i2c_dev *qup); 284 /* function to write tags in tx fifo for i2c read transfer */ 285 void (*write_rx_tags)(struct qup_i2c_dev *qup); 286 }; 287 288 static irqreturn_t qup_i2c_interrupt(int irq, void *dev) 289 { 290 struct qup_i2c_dev *qup = dev; 291 struct qup_i2c_block *blk = &qup->blk; 292 u32 bus_err; 293 u32 qup_err; 294 u32 opflags; 295 296 bus_err = readl(qup->base + QUP_I2C_STATUS); 297 qup_err = readl(qup->base + QUP_ERROR_FLAGS); 298 opflags = readl(qup->base + QUP_OPERATIONAL); 299 300 if (!qup->msg) { 301 /* Clear Error interrupt */ 302 writel(QUP_RESET_STATE, qup->base + QUP_STATE); 303 return IRQ_HANDLED; 304 } 305 306 bus_err &= I2C_STATUS_ERROR_MASK; 307 qup_err &= QUP_STATUS_ERROR_FLAGS; 308 309 /* Clear the error bits in QUP_ERROR_FLAGS */ 310 if (qup_err) 311 writel(qup_err, qup->base + QUP_ERROR_FLAGS); 312 313 /* Clear the error bits in QUP_I2C_STATUS */ 314 if (bus_err) 315 writel(bus_err, qup->base + QUP_I2C_STATUS); 316 317 /* 318 * Check for BAM mode and returns if already error has come for current 319 * transfer. In Error case, sometimes, QUP generates more than one 320 * interrupt. 321 */ 322 if (qup->use_dma && (qup->qup_err || qup->bus_err)) 323 return IRQ_HANDLED; 324 325 /* Reset the QUP State in case of error */ 326 if (qup_err || bus_err) { 327 /* 328 * Don’t reset the QUP state in case of BAM mode. The BAM 329 * flush operation needs to be scheduled in transfer function 330 * which will clear the remaining schedule descriptors in BAM 331 * HW FIFO and generates the BAM interrupt. 332 */ 333 if (!qup->use_dma) 334 writel(QUP_RESET_STATE, qup->base + QUP_STATE); 335 goto done; 336 } 337 338 if (opflags & QUP_OUT_SVC_FLAG) { 339 writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL); 340 341 if (opflags & OUT_BLOCK_WRITE_REQ) { 342 blk->tx_fifo_free += qup->out_blk_sz; 343 if (qup->msg->flags & I2C_M_RD) 344 qup->write_rx_tags(qup); 345 else 346 qup->write_tx_fifo(qup); 347 } 348 } 349 350 if (opflags & QUP_IN_SVC_FLAG) { 351 writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL); 352 353 if (!blk->is_rx_blk_mode) { 354 blk->fifo_available += qup->in_fifo_sz; 355 qup->read_rx_fifo(qup); 356 } else if (opflags & IN_BLOCK_READ_REQ) { 357 blk->fifo_available += qup->in_blk_sz; 358 qup->read_rx_fifo(qup); 359 } 360 } 361 362 if (qup->msg->flags & I2C_M_RD) { 363 if (!blk->rx_bytes_read) 364 return IRQ_HANDLED; 365 } else { 366 /* 367 * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked 368 * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags 369 * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason 370 * of interrupt for write message in FIFO mode is 371 * QUP_MAX_OUTPUT_DONE_FLAG condition. 372 */ 373 if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE)) 374 return IRQ_HANDLED; 375 } 376 377 done: 378 qup->qup_err = qup_err; 379 qup->bus_err = bus_err; 380 complete(&qup->xfer); 381 return IRQ_HANDLED; 382 } 383 384 static int qup_i2c_poll_state_mask(struct qup_i2c_dev *qup, 385 u32 req_state, u32 req_mask) 386 { 387 int retries = 1; 388 u32 state; 389 390 /* 391 * State transition takes 3 AHB clocks cycles + 3 I2C master clock 392 * cycles. So retry once after a 1uS delay. 393 */ 394 do { 395 state = readl(qup->base + QUP_STATE); 396 397 if (state & QUP_STATE_VALID && 398 (state & req_mask) == req_state) 399 return 0; 400 401 udelay(1); 402 } while (retries--); 403 404 return -ETIMEDOUT; 405 } 406 407 static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state) 408 { 409 return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK); 410 } 411 412 static void qup_i2c_flush(struct qup_i2c_dev *qup) 413 { 414 u32 val = readl(qup->base + QUP_STATE); 415 416 val |= QUP_I2C_FLUSH; 417 writel(val, qup->base + QUP_STATE); 418 } 419 420 static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup) 421 { 422 return qup_i2c_poll_state_mask(qup, 0, 0); 423 } 424 425 static int qup_i2c_poll_state_i2c_master(struct qup_i2c_dev *qup) 426 { 427 return qup_i2c_poll_state_mask(qup, QUP_I2C_MAST_GEN, QUP_I2C_MAST_GEN); 428 } 429 430 static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state) 431 { 432 if (qup_i2c_poll_state_valid(qup) != 0) 433 return -EIO; 434 435 writel(state, qup->base + QUP_STATE); 436 437 if (qup_i2c_poll_state(qup, state) != 0) 438 return -EIO; 439 return 0; 440 } 441 442 /* Check if I2C bus returns to IDLE state */ 443 static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) 444 { 445 unsigned long timeout; 446 u32 status; 447 int ret = 0; 448 449 timeout = jiffies + len * 4; 450 for (;;) { 451 status = readl(qup->base + QUP_I2C_STATUS); 452 if (!(status & I2C_STATUS_BUS_ACTIVE)) 453 break; 454 455 if (time_after(jiffies, timeout)) { 456 ret = -ETIMEDOUT; 457 break; 458 } 459 460 usleep_range(len, len * 2); 461 } 462 463 return ret; 464 } 465 466 static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq) 467 { 468 u32 needed_peak_bw; 469 int ret; 470 471 if (qup->cur_bw_clk_freq == clk_freq) 472 return 0; 473 474 needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH); 475 ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw); 476 if (ret) 477 return ret; 478 479 qup->cur_bw_clk_freq = clk_freq; 480 return 0; 481 } 482 483 static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) 484 { 485 struct qup_i2c_block *blk = &qup->blk; 486 struct i2c_msg *msg = qup->msg; 487 u32 addr = i2c_8bit_addr_from_msg(msg); 488 u32 qup_tag; 489 int idx; 490 u32 val; 491 492 if (qup->pos == 0) { 493 val = QUP_TAG_START | addr; 494 idx = 1; 495 blk->tx_fifo_free--; 496 } else { 497 val = 0; 498 idx = 0; 499 } 500 501 while (blk->tx_fifo_free && qup->pos < msg->len) { 502 if (qup->pos == msg->len - 1) 503 qup_tag = QUP_TAG_STOP; 504 else 505 qup_tag = QUP_TAG_DATA; 506 507 if (idx & 1) 508 val |= (qup_tag | msg->buf[qup->pos]) << QUP_MSW_SHIFT; 509 else 510 val = qup_tag | msg->buf[qup->pos]; 511 512 /* Write out the pair and the last odd value */ 513 if (idx & 1 || qup->pos == msg->len - 1) 514 writel(val, qup->base + QUP_OUT_FIFO_BASE); 515 516 qup->pos++; 517 idx++; 518 blk->tx_fifo_free--; 519 } 520 } 521 522 static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup, 523 struct i2c_msg *msg) 524 { 525 qup->blk.pos = 0; 526 qup->blk.data_len = msg->len; 527 qup->blk.count = DIV_ROUND_UP(msg->len, qup->blk_xfer_limit); 528 } 529 530 static int qup_i2c_get_data_len(struct qup_i2c_dev *qup) 531 { 532 int data_len; 533 534 if (qup->blk.data_len > qup->blk_xfer_limit) 535 data_len = qup->blk_xfer_limit; 536 else 537 data_len = qup->blk.data_len; 538 539 return data_len; 540 } 541 542 static bool qup_i2c_check_msg_len(struct i2c_msg *msg) 543 { 544 return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN)); 545 } 546 547 static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup, 548 struct i2c_msg *msg) 549 { 550 int len = 0; 551 552 if (qup->is_smbus_read) { 553 tags[len++] = QUP_TAG_V2_DATARD_STOP; 554 tags[len++] = qup_i2c_get_data_len(qup); 555 } else { 556 tags[len++] = QUP_TAG_V2_START; 557 tags[len++] = addr & 0xff; 558 559 if (msg->flags & I2C_M_TEN) 560 tags[len++] = addr >> 8; 561 562 tags[len++] = QUP_TAG_V2_DATARD; 563 /* Read 1 byte indicating the length of the SMBus message */ 564 tags[len++] = 1; 565 } 566 return len; 567 } 568 569 static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup, 570 struct i2c_msg *msg) 571 { 572 u16 addr = i2c_8bit_addr_from_msg(msg); 573 int len = 0; 574 int data_len; 575 576 int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last); 577 578 /* Handle tags for SMBus block read */ 579 if (qup_i2c_check_msg_len(msg)) 580 return qup_i2c_set_tags_smb(addr, tags, qup, msg); 581 582 if (qup->blk.pos == 0) { 583 tags[len++] = QUP_TAG_V2_START; 584 tags[len++] = addr & 0xff; 585 586 if (msg->flags & I2C_M_TEN) 587 tags[len++] = addr >> 8; 588 } 589 590 /* Send _STOP commands for the last block */ 591 if (last) { 592 if (msg->flags & I2C_M_RD) 593 tags[len++] = QUP_TAG_V2_DATARD_STOP; 594 else 595 tags[len++] = QUP_TAG_V2_DATAWR_STOP; 596 } else { 597 if (msg->flags & I2C_M_RD) 598 tags[len++] = qup->blk.pos == (qup->blk.count - 1) ? 599 QUP_TAG_V2_DATARD_NACK : 600 QUP_TAG_V2_DATARD; 601 else 602 tags[len++] = QUP_TAG_V2_DATAWR; 603 } 604 605 data_len = qup_i2c_get_data_len(qup); 606 607 /* 0 implies 256 bytes */ 608 if (data_len == QUP_READ_LIMIT) 609 tags[len++] = 0; 610 else 611 tags[len++] = data_len; 612 613 return len; 614 } 615 616 617 static void qup_i2c_bam_cb(void *data) 618 { 619 struct qup_i2c_dev *qup = data; 620 621 complete(&qup->xfer); 622 } 623 624 static int qup_sg_set_buf(struct scatterlist *sg, void *buf, 625 unsigned int buflen, struct qup_i2c_dev *qup, 626 int dir) 627 { 628 int ret; 629 630 sg_set_buf(sg, buf, buflen); 631 ret = dma_map_sg(qup->dev, sg, 1, dir); 632 if (!ret) 633 return -EINVAL; 634 635 return 0; 636 } 637 638 static void qup_i2c_rel_dma(struct qup_i2c_dev *qup) 639 { 640 if (qup->btx.dma) 641 dma_release_channel(qup->btx.dma); 642 if (qup->brx.dma) 643 dma_release_channel(qup->brx.dma); 644 qup->btx.dma = NULL; 645 qup->brx.dma = NULL; 646 } 647 648 static int qup_i2c_req_dma(struct qup_i2c_dev *qup) 649 { 650 int err; 651 652 if (!qup->btx.dma) { 653 qup->btx.dma = dma_request_chan(qup->dev, "tx"); 654 if (IS_ERR(qup->btx.dma)) { 655 err = PTR_ERR(qup->btx.dma); 656 qup->btx.dma = NULL; 657 dev_err(qup->dev, "\n tx channel not available"); 658 return err; 659 } 660 } 661 662 if (!qup->brx.dma) { 663 qup->brx.dma = dma_request_chan(qup->dev, "rx"); 664 if (IS_ERR(qup->brx.dma)) { 665 dev_err(qup->dev, "\n rx channel not available"); 666 err = PTR_ERR(qup->brx.dma); 667 qup->brx.dma = NULL; 668 qup_i2c_rel_dma(qup); 669 return err; 670 } 671 } 672 return 0; 673 } 674 675 static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg) 676 { 677 int ret = 0, limit = QUP_READ_LIMIT; 678 u32 len = 0, blocks, rem; 679 u32 i = 0, tlen, tx_len = 0; 680 u8 *tags; 681 682 qup->blk_xfer_limit = QUP_READ_LIMIT; 683 qup_i2c_set_blk_data(qup, msg); 684 685 blocks = qup->blk.count; 686 rem = msg->len - (blocks - 1) * limit; 687 688 if (msg->flags & I2C_M_RD) { 689 while (qup->blk.pos < blocks) { 690 tlen = (i == (blocks - 1)) ? rem : limit; 691 tags = &qup->start_tag.start[qup->tag_buf_pos + len]; 692 len += qup_i2c_set_tags(tags, qup, msg); 693 qup->blk.data_len -= tlen; 694 695 /* scratch buf to read the start and len tags */ 696 ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], 697 &qup->brx.tag.start[0], 698 2, qup, DMA_FROM_DEVICE); 699 700 if (ret) 701 return ret; 702 703 ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], 704 &msg->buf[limit * i], 705 tlen, qup, 706 DMA_FROM_DEVICE); 707 if (ret) 708 return ret; 709 710 i++; 711 qup->blk.pos = i; 712 } 713 ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], 714 &qup->start_tag.start[qup->tag_buf_pos], 715 len, qup, DMA_TO_DEVICE); 716 if (ret) 717 return ret; 718 719 qup->tag_buf_pos += len; 720 } else { 721 while (qup->blk.pos < blocks) { 722 tlen = (i == (blocks - 1)) ? rem : limit; 723 tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len]; 724 len = qup_i2c_set_tags(tags, qup, msg); 725 qup->blk.data_len -= tlen; 726 727 ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], 728 tags, len, 729 qup, DMA_TO_DEVICE); 730 if (ret) 731 return ret; 732 733 tx_len += len; 734 ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], 735 &msg->buf[limit * i], 736 tlen, qup, DMA_TO_DEVICE); 737 if (ret) 738 return ret; 739 i++; 740 qup->blk.pos = i; 741 } 742 743 qup->tag_buf_pos += tx_len; 744 } 745 746 return 0; 747 } 748 749 static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup) 750 { 751 struct dma_async_tx_descriptor *txd, *rxd = NULL; 752 int ret = 0; 753 dma_cookie_t cookie_rx, cookie_tx; 754 u32 len = 0; 755 u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt; 756 757 /* schedule the EOT and FLUSH I2C tags */ 758 len = 1; 759 if (rx_cnt) { 760 qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT; 761 len++; 762 763 /* scratch buf to read the BAM EOT FLUSH tags */ 764 ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], 765 &qup->brx.tag.start[0], 766 1, qup, DMA_FROM_DEVICE); 767 if (ret) 768 return ret; 769 } 770 771 qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP; 772 ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0], 773 len, qup, DMA_TO_DEVICE); 774 if (ret) 775 return ret; 776 777 txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt, 778 DMA_MEM_TO_DEV, 779 DMA_PREP_INTERRUPT | DMA_PREP_FENCE); 780 if (!txd) { 781 dev_err(qup->dev, "failed to get tx desc\n"); 782 ret = -EINVAL; 783 goto desc_err; 784 } 785 786 if (!rx_cnt) { 787 txd->callback = qup_i2c_bam_cb; 788 txd->callback_param = qup; 789 } 790 791 cookie_tx = dmaengine_submit(txd); 792 if (dma_submit_error(cookie_tx)) { 793 ret = -EINVAL; 794 goto desc_err; 795 } 796 797 dma_async_issue_pending(qup->btx.dma); 798 799 if (rx_cnt) { 800 rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg, 801 rx_cnt, DMA_DEV_TO_MEM, 802 DMA_PREP_INTERRUPT); 803 if (!rxd) { 804 dev_err(qup->dev, "failed to get rx desc\n"); 805 ret = -EINVAL; 806 807 /* abort TX descriptors */ 808 dmaengine_terminate_sync(qup->btx.dma); 809 goto desc_err; 810 } 811 812 rxd->callback = qup_i2c_bam_cb; 813 rxd->callback_param = qup; 814 cookie_rx = dmaengine_submit(rxd); 815 if (dma_submit_error(cookie_rx)) { 816 ret = -EINVAL; 817 goto desc_err; 818 } 819 820 dma_async_issue_pending(qup->brx.dma); 821 } 822 823 if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout)) 824 ret = -ETIMEDOUT; 825 826 if (ret || qup->bus_err || qup->qup_err) { 827 reinit_completion(&qup->xfer); 828 829 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 830 if (ret) { 831 dev_err(qup->dev, "change to run state timed out"); 832 goto desc_err; 833 } 834 835 qup_i2c_flush(qup); 836 837 /* wait for remaining interrupts to occur */ 838 if (!wait_for_completion_timeout(&qup->xfer, HZ)) 839 dev_err(qup->dev, "flush timed out\n"); 840 841 ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; 842 } 843 844 desc_err: 845 dma_unmap_sg(qup->dev, qup->btx.sg, tx_cnt, DMA_TO_DEVICE); 846 847 if (rx_cnt) 848 dma_unmap_sg(qup->dev, qup->brx.sg, rx_cnt, 849 DMA_FROM_DEVICE); 850 851 return ret; 852 } 853 854 static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup) 855 { 856 qup->btx.sg_cnt = 0; 857 qup->brx.sg_cnt = 0; 858 qup->tag_buf_pos = 0; 859 } 860 861 static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, 862 int num) 863 { 864 struct qup_i2c_dev *qup = i2c_get_adapdata(adap); 865 int ret = 0; 866 int idx = 0; 867 868 ret = qup_i2c_vote_bw(qup, qup->src_clk_freq); 869 if (ret) 870 return ret; 871 872 enable_irq(qup->irq); 873 ret = qup_i2c_req_dma(qup); 874 875 if (ret) 876 goto out; 877 878 writel(0, qup->base + QUP_MX_INPUT_CNT); 879 writel(0, qup->base + QUP_MX_OUTPUT_CNT); 880 881 /* set BAM mode */ 882 writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE); 883 884 /* mask fifo irqs */ 885 writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK); 886 887 /* set RUN STATE */ 888 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 889 if (ret) 890 goto out; 891 892 writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); 893 qup_i2c_bam_clear_tag_buffers(qup); 894 895 for (idx = 0; idx < num; idx++) { 896 qup->msg = msg + idx; 897 qup->is_last = idx == (num - 1); 898 899 ret = qup_i2c_bam_make_desc(qup, qup->msg); 900 if (ret) 901 break; 902 903 /* 904 * Make DMA descriptor and schedule the BAM transfer if its 905 * already crossed the maximum length. Since the memory for all 906 * tags buffers have been taken for 2 maximum possible 907 * transfers length so it will never cross the buffer actual 908 * length. 909 */ 910 if (qup->btx.sg_cnt > qup->max_xfer_sg_len || 911 qup->brx.sg_cnt > qup->max_xfer_sg_len || 912 qup->is_last) { 913 ret = qup_i2c_bam_schedule_desc(qup); 914 if (ret) 915 break; 916 917 qup_i2c_bam_clear_tag_buffers(qup); 918 } 919 } 920 921 out: 922 disable_irq(qup->irq); 923 924 qup->msg = NULL; 925 return ret; 926 } 927 928 static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup, 929 struct i2c_msg *msg) 930 { 931 unsigned long left; 932 int ret = 0; 933 934 left = wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout); 935 if (!left) { 936 writel(1, qup->base + QUP_SW_RESET); 937 ret = -ETIMEDOUT; 938 } 939 940 if (qup->bus_err || qup->qup_err) 941 ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; 942 943 return ret; 944 } 945 946 static void qup_i2c_read_rx_fifo_v1(struct qup_i2c_dev *qup) 947 { 948 struct qup_i2c_block *blk = &qup->blk; 949 struct i2c_msg *msg = qup->msg; 950 u32 val = 0; 951 int idx = 0; 952 953 while (blk->fifo_available && qup->pos < msg->len) { 954 if ((idx & 1) == 0) { 955 /* Reading 2 words at time */ 956 val = readl(qup->base + QUP_IN_FIFO_BASE); 957 msg->buf[qup->pos++] = val & 0xFF; 958 } else { 959 msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT; 960 } 961 idx++; 962 blk->fifo_available--; 963 } 964 965 if (qup->pos == msg->len) 966 blk->rx_bytes_read = true; 967 } 968 969 static void qup_i2c_write_rx_tags_v1(struct qup_i2c_dev *qup) 970 { 971 struct i2c_msg *msg = qup->msg; 972 u32 addr, len, val; 973 974 addr = i2c_8bit_addr_from_msg(msg); 975 976 /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */ 977 len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; 978 979 val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr; 980 writel(val, qup->base + QUP_OUT_FIFO_BASE); 981 } 982 983 static void qup_i2c_conf_v1(struct qup_i2c_dev *qup) 984 { 985 struct qup_i2c_block *blk = &qup->blk; 986 u32 qup_config = I2C_MINI_CORE | I2C_N_VAL; 987 u32 io_mode = QUP_REPACK_EN; 988 989 blk->is_tx_blk_mode = blk->total_tx_len > qup->out_fifo_sz; 990 blk->is_rx_blk_mode = blk->total_rx_len > qup->in_fifo_sz; 991 992 if (blk->is_tx_blk_mode) { 993 io_mode |= QUP_OUTPUT_BLK_MODE; 994 writel(0, qup->base + QUP_MX_WRITE_CNT); 995 writel(blk->total_tx_len, qup->base + QUP_MX_OUTPUT_CNT); 996 } else { 997 writel(0, qup->base + QUP_MX_OUTPUT_CNT); 998 writel(blk->total_tx_len, qup->base + QUP_MX_WRITE_CNT); 999 } 1000 1001 if (blk->total_rx_len) { 1002 if (blk->is_rx_blk_mode) { 1003 io_mode |= QUP_INPUT_BLK_MODE; 1004 writel(0, qup->base + QUP_MX_READ_CNT); 1005 writel(blk->total_rx_len, qup->base + QUP_MX_INPUT_CNT); 1006 } else { 1007 writel(0, qup->base + QUP_MX_INPUT_CNT); 1008 writel(blk->total_rx_len, qup->base + QUP_MX_READ_CNT); 1009 } 1010 } else { 1011 qup_config |= QUP_NO_INPUT; 1012 } 1013 1014 writel(qup_config, qup->base + QUP_CONFIG); 1015 writel(io_mode, qup->base + QUP_IO_MODE); 1016 } 1017 1018 static void qup_i2c_clear_blk_v1(struct qup_i2c_block *blk) 1019 { 1020 blk->tx_fifo_free = 0; 1021 blk->fifo_available = 0; 1022 blk->rx_bytes_read = false; 1023 } 1024 1025 static int qup_i2c_conf_xfer_v1(struct qup_i2c_dev *qup, bool is_rx) 1026 { 1027 struct qup_i2c_block *blk = &qup->blk; 1028 int ret; 1029 1030 qup_i2c_clear_blk_v1(blk); 1031 qup_i2c_conf_v1(qup); 1032 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 1033 if (ret) 1034 return ret; 1035 1036 writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); 1037 1038 ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); 1039 if (ret) 1040 return ret; 1041 1042 reinit_completion(&qup->xfer); 1043 enable_irq(qup->irq); 1044 if (!blk->is_tx_blk_mode) { 1045 blk->tx_fifo_free = qup->out_fifo_sz; 1046 1047 if (is_rx) 1048 qup_i2c_write_rx_tags_v1(qup); 1049 else 1050 qup_i2c_write_tx_fifo_v1(qup); 1051 } 1052 1053 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 1054 if (ret) 1055 goto err; 1056 1057 ret = qup_i2c_wait_for_complete(qup, qup->msg); 1058 if (ret) 1059 goto err; 1060 1061 ret = qup_i2c_bus_active(qup, ONE_BYTE); 1062 1063 err: 1064 disable_irq(qup->irq); 1065 return ret; 1066 } 1067 1068 static int qup_i2c_write_one(struct qup_i2c_dev *qup) 1069 { 1070 struct i2c_msg *msg = qup->msg; 1071 struct qup_i2c_block *blk = &qup->blk; 1072 1073 qup->pos = 0; 1074 blk->total_tx_len = msg->len + 1; 1075 blk->total_rx_len = 0; 1076 1077 return qup_i2c_conf_xfer_v1(qup, false); 1078 } 1079 1080 static int qup_i2c_read_one(struct qup_i2c_dev *qup) 1081 { 1082 struct qup_i2c_block *blk = &qup->blk; 1083 1084 qup->pos = 0; 1085 blk->total_tx_len = 2; 1086 blk->total_rx_len = qup->msg->len; 1087 1088 return qup_i2c_conf_xfer_v1(qup, true); 1089 } 1090 1091 static int qup_i2c_xfer(struct i2c_adapter *adap, 1092 struct i2c_msg msgs[], 1093 int num) 1094 { 1095 struct qup_i2c_dev *qup = i2c_get_adapdata(adap); 1096 int ret, idx; 1097 1098 ret = pm_runtime_get_sync(qup->dev); 1099 if (ret < 0) 1100 goto out; 1101 1102 qup->bus_err = 0; 1103 qup->qup_err = 0; 1104 1105 writel(1, qup->base + QUP_SW_RESET); 1106 ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); 1107 if (ret) 1108 goto out; 1109 1110 /* Configure QUP as I2C mini core */ 1111 writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG); 1112 1113 for (idx = 0; idx < num; idx++) { 1114 if (qup_i2c_poll_state_i2c_master(qup)) { 1115 ret = -EIO; 1116 goto out; 1117 } 1118 1119 if (qup_i2c_check_msg_len(&msgs[idx])) { 1120 ret = -EINVAL; 1121 goto out; 1122 } 1123 1124 qup->msg = &msgs[idx]; 1125 if (msgs[idx].flags & I2C_M_RD) 1126 ret = qup_i2c_read_one(qup); 1127 else 1128 ret = qup_i2c_write_one(qup); 1129 1130 if (ret) 1131 break; 1132 1133 ret = qup_i2c_change_state(qup, QUP_RESET_STATE); 1134 if (ret) 1135 break; 1136 } 1137 1138 if (ret == 0) 1139 ret = num; 1140 out: 1141 1142 pm_runtime_mark_last_busy(qup->dev); 1143 pm_runtime_put_autosuspend(qup->dev); 1144 1145 return ret; 1146 } 1147 1148 /* 1149 * Configure registers related with reconfiguration during run and call it 1150 * before each i2c sub transfer. 1151 */ 1152 static void qup_i2c_conf_count_v2(struct qup_i2c_dev *qup) 1153 { 1154 struct qup_i2c_block *blk = &qup->blk; 1155 u32 qup_config = I2C_MINI_CORE | I2C_N_VAL_V2; 1156 1157 if (blk->is_tx_blk_mode) 1158 writel(qup->config_run | blk->total_tx_len, 1159 qup->base + QUP_MX_OUTPUT_CNT); 1160 else 1161 writel(qup->config_run | blk->total_tx_len, 1162 qup->base + QUP_MX_WRITE_CNT); 1163 1164 if (blk->total_rx_len) { 1165 if (blk->is_rx_blk_mode) 1166 writel(qup->config_run | blk->total_rx_len, 1167 qup->base + QUP_MX_INPUT_CNT); 1168 else 1169 writel(qup->config_run | blk->total_rx_len, 1170 qup->base + QUP_MX_READ_CNT); 1171 } else { 1172 qup_config |= QUP_NO_INPUT; 1173 } 1174 1175 writel(qup_config, qup->base + QUP_CONFIG); 1176 } 1177 1178 /* 1179 * Configure registers related with transfer mode (FIFO/Block) 1180 * before starting of i2c transfer. It will be called only once in 1181 * QUP RESET state. 1182 */ 1183 static void qup_i2c_conf_mode_v2(struct qup_i2c_dev *qup) 1184 { 1185 struct qup_i2c_block *blk = &qup->blk; 1186 u32 io_mode = QUP_REPACK_EN; 1187 1188 if (blk->is_tx_blk_mode) { 1189 io_mode |= QUP_OUTPUT_BLK_MODE; 1190 writel(0, qup->base + QUP_MX_WRITE_CNT); 1191 } else { 1192 writel(0, qup->base + QUP_MX_OUTPUT_CNT); 1193 } 1194 1195 if (blk->is_rx_blk_mode) { 1196 io_mode |= QUP_INPUT_BLK_MODE; 1197 writel(0, qup->base + QUP_MX_READ_CNT); 1198 } else { 1199 writel(0, qup->base + QUP_MX_INPUT_CNT); 1200 } 1201 1202 writel(io_mode, qup->base + QUP_IO_MODE); 1203 } 1204 1205 /* Clear required variables before starting of any QUP v2 sub transfer. */ 1206 static void qup_i2c_clear_blk_v2(struct qup_i2c_block *blk) 1207 { 1208 blk->send_last_word = false; 1209 blk->tx_tags_sent = false; 1210 blk->tx_fifo_data = 0; 1211 blk->tx_fifo_data_pos = 0; 1212 blk->tx_fifo_free = 0; 1213 1214 blk->rx_tags_fetched = false; 1215 blk->rx_bytes_read = false; 1216 blk->rx_fifo_data = 0; 1217 blk->rx_fifo_data_pos = 0; 1218 blk->fifo_available = 0; 1219 } 1220 1221 /* Receive data from RX FIFO for read message in QUP v2 i2c transfer. */ 1222 static void qup_i2c_recv_data(struct qup_i2c_dev *qup) 1223 { 1224 struct qup_i2c_block *blk = &qup->blk; 1225 int j; 1226 1227 for (j = blk->rx_fifo_data_pos; 1228 blk->cur_blk_len && blk->fifo_available; 1229 blk->cur_blk_len--, blk->fifo_available--) { 1230 if (j == 0) 1231 blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); 1232 1233 *(blk->cur_data++) = blk->rx_fifo_data; 1234 blk->rx_fifo_data >>= 8; 1235 1236 if (j == 3) 1237 j = 0; 1238 else 1239 j++; 1240 } 1241 1242 blk->rx_fifo_data_pos = j; 1243 } 1244 1245 /* Receive tags for read message in QUP v2 i2c transfer. */ 1246 static void qup_i2c_recv_tags(struct qup_i2c_dev *qup) 1247 { 1248 struct qup_i2c_block *blk = &qup->blk; 1249 1250 blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); 1251 blk->rx_fifo_data >>= blk->rx_tag_len * 8; 1252 blk->rx_fifo_data_pos = blk->rx_tag_len; 1253 blk->fifo_available -= blk->rx_tag_len; 1254 } 1255 1256 /* 1257 * Read the data and tags from RX FIFO. Since in read case, the tags will be 1258 * preceded by received data bytes so 1259 * 1. Check if rx_tags_fetched is false i.e. the start of QUP block so receive 1260 * all tag bytes and discard that. 1261 * 2. Read the data from RX FIFO. When all the data bytes have been read then 1262 * set rx_bytes_read to true. 1263 */ 1264 static void qup_i2c_read_rx_fifo_v2(struct qup_i2c_dev *qup) 1265 { 1266 struct qup_i2c_block *blk = &qup->blk; 1267 1268 if (!blk->rx_tags_fetched) { 1269 qup_i2c_recv_tags(qup); 1270 blk->rx_tags_fetched = true; 1271 } 1272 1273 qup_i2c_recv_data(qup); 1274 if (!blk->cur_blk_len) 1275 blk->rx_bytes_read = true; 1276 } 1277 1278 /* 1279 * Write bytes in TX FIFO for write message in QUP v2 i2c transfer. QUP TX FIFO 1280 * write works on word basis (4 bytes). Append new data byte write for TX FIFO 1281 * in tx_fifo_data and write to TX FIFO when all the 4 bytes are present. 1282 */ 1283 static void 1284 qup_i2c_write_blk_data(struct qup_i2c_dev *qup, u8 **data, unsigned int *len) 1285 { 1286 struct qup_i2c_block *blk = &qup->blk; 1287 unsigned int j; 1288 1289 for (j = blk->tx_fifo_data_pos; *len && blk->tx_fifo_free; 1290 (*len)--, blk->tx_fifo_free--) { 1291 blk->tx_fifo_data |= *(*data)++ << (j * 8); 1292 if (j == 3) { 1293 writel(blk->tx_fifo_data, 1294 qup->base + QUP_OUT_FIFO_BASE); 1295 blk->tx_fifo_data = 0x0; 1296 j = 0; 1297 } else { 1298 j++; 1299 } 1300 } 1301 1302 blk->tx_fifo_data_pos = j; 1303 } 1304 1305 /* Transfer tags for read message in QUP v2 i2c transfer. */ 1306 static void qup_i2c_write_rx_tags_v2(struct qup_i2c_dev *qup) 1307 { 1308 struct qup_i2c_block *blk = &qup->blk; 1309 1310 qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len); 1311 if (blk->tx_fifo_data_pos) 1312 writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); 1313 } 1314 1315 /* 1316 * Write the data and tags in TX FIFO. Since in write case, both tags and data 1317 * need to be written and QUP write tags can have maximum 256 data length, so 1318 * 1319 * 1. Check if tx_tags_sent is false i.e. the start of QUP block so write the 1320 * tags to TX FIFO and set tx_tags_sent to true. 1321 * 2. Check if send_last_word is true. It will be set when last few data bytes 1322 * (less than 4 bytes) are remaining to be written in FIFO because of no FIFO 1323 * space. All this data bytes are available in tx_fifo_data so write this 1324 * in FIFO. 1325 * 3. Write the data to TX FIFO and check for cur_blk_len. If it is non zero 1326 * then more data is pending otherwise following 3 cases can be possible 1327 * a. if tx_fifo_data_pos is zero i.e. all the data bytes in this block 1328 * have been written in TX FIFO so nothing else is required. 1329 * b. tx_fifo_free is non zero i.e tx FIFO is free so copy the remaining data 1330 * from tx_fifo_data to tx FIFO. Since, qup_i2c_write_blk_data do write 1331 * in 4 bytes and FIFO space is in multiple of 4 bytes so tx_fifo_free 1332 * will be always greater than or equal to 4 bytes. 1333 * c. tx_fifo_free is zero. In this case, last few bytes (less than 4 1334 * bytes) are copied to tx_fifo_data but couldn't be sent because of 1335 * FIFO full so make send_last_word true. 1336 */ 1337 static void qup_i2c_write_tx_fifo_v2(struct qup_i2c_dev *qup) 1338 { 1339 struct qup_i2c_block *blk = &qup->blk; 1340 1341 if (!blk->tx_tags_sent) { 1342 qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, 1343 &blk->tx_tag_len); 1344 blk->tx_tags_sent = true; 1345 } 1346 1347 if (blk->send_last_word) 1348 goto send_last_word; 1349 1350 qup_i2c_write_blk_data(qup, &blk->cur_data, &blk->cur_blk_len); 1351 if (!blk->cur_blk_len) { 1352 if (!blk->tx_fifo_data_pos) 1353 return; 1354 1355 if (blk->tx_fifo_free) 1356 goto send_last_word; 1357 1358 blk->send_last_word = true; 1359 } 1360 1361 return; 1362 1363 send_last_word: 1364 writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); 1365 } 1366 1367 /* 1368 * Main transfer function which read or write i2c data. 1369 * The QUP v2 supports reconfiguration during run in which multiple i2c sub 1370 * transfers can be scheduled. 1371 */ 1372 static int 1373 qup_i2c_conf_xfer_v2(struct qup_i2c_dev *qup, bool is_rx, bool is_first, 1374 bool change_pause_state) 1375 { 1376 struct qup_i2c_block *blk = &qup->blk; 1377 struct i2c_msg *msg = qup->msg; 1378 int ret; 1379 1380 /* 1381 * Check if its SMBus Block read for which the top level read will be 1382 * done into 2 QUP reads. One with message length 1 while other one is 1383 * with actual length. 1384 */ 1385 if (qup_i2c_check_msg_len(msg)) { 1386 if (qup->is_smbus_read) { 1387 /* 1388 * If the message length is already read in 1389 * the first byte of the buffer, account for 1390 * that by setting the offset 1391 */ 1392 blk->cur_data += 1; 1393 is_first = false; 1394 } else { 1395 change_pause_state = false; 1396 } 1397 } 1398 1399 qup->config_run = is_first ? 0 : QUP_I2C_MX_CONFIG_DURING_RUN; 1400 1401 qup_i2c_clear_blk_v2(blk); 1402 qup_i2c_conf_count_v2(qup); 1403 1404 /* If it is first sub transfer, then configure i2c bus clocks */ 1405 if (is_first) { 1406 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 1407 if (ret) 1408 return ret; 1409 1410 writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); 1411 1412 ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); 1413 if (ret) 1414 return ret; 1415 } 1416 1417 reinit_completion(&qup->xfer); 1418 enable_irq(qup->irq); 1419 /* 1420 * In FIFO mode, tx FIFO can be written directly while in block mode the 1421 * it will be written after getting OUT_BLOCK_WRITE_REQ interrupt 1422 */ 1423 if (!blk->is_tx_blk_mode) { 1424 blk->tx_fifo_free = qup->out_fifo_sz; 1425 1426 if (is_rx) 1427 qup_i2c_write_rx_tags_v2(qup); 1428 else 1429 qup_i2c_write_tx_fifo_v2(qup); 1430 } 1431 1432 ret = qup_i2c_change_state(qup, QUP_RUN_STATE); 1433 if (ret) 1434 goto err; 1435 1436 ret = qup_i2c_wait_for_complete(qup, msg); 1437 if (ret) 1438 goto err; 1439 1440 /* Move to pause state for all the transfers, except last one */ 1441 if (change_pause_state) { 1442 ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); 1443 if (ret) 1444 goto err; 1445 } 1446 1447 err: 1448 disable_irq(qup->irq); 1449 return ret; 1450 } 1451 1452 /* 1453 * Transfer one read/write message in i2c transfer. It splits the message into 1454 * multiple of blk_xfer_limit data length blocks and schedule each 1455 * QUP block individually. 1456 */ 1457 static int qup_i2c_xfer_v2_msg(struct qup_i2c_dev *qup, int msg_id, bool is_rx) 1458 { 1459 int ret = 0; 1460 unsigned int data_len, i; 1461 struct i2c_msg *msg = qup->msg; 1462 struct qup_i2c_block *blk = &qup->blk; 1463 u8 *msg_buf = msg->buf; 1464 1465 qup->blk_xfer_limit = is_rx ? RECV_MAX_DATA_LEN : QUP_READ_LIMIT; 1466 qup_i2c_set_blk_data(qup, msg); 1467 1468 for (i = 0; i < blk->count; i++) { 1469 data_len = qup_i2c_get_data_len(qup); 1470 blk->pos = i; 1471 blk->cur_tx_tags = blk->tags; 1472 blk->cur_blk_len = data_len; 1473 blk->tx_tag_len = 1474 qup_i2c_set_tags(blk->cur_tx_tags, qup, qup->msg); 1475 1476 blk->cur_data = msg_buf; 1477 1478 if (is_rx) { 1479 blk->total_tx_len = blk->tx_tag_len; 1480 blk->rx_tag_len = 2; 1481 blk->total_rx_len = blk->rx_tag_len + data_len; 1482 } else { 1483 blk->total_tx_len = blk->tx_tag_len + data_len; 1484 blk->total_rx_len = 0; 1485 } 1486 1487 ret = qup_i2c_conf_xfer_v2(qup, is_rx, !msg_id && !i, 1488 !qup->is_last || i < blk->count - 1); 1489 if (ret) 1490 return ret; 1491 1492 /* Handle SMBus block read length */ 1493 if (qup_i2c_check_msg_len(msg) && msg->len == 1 && 1494 !qup->is_smbus_read) { 1495 if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) 1496 return -EPROTO; 1497 1498 msg->len = msg->buf[0]; 1499 qup->is_smbus_read = true; 1500 ret = qup_i2c_xfer_v2_msg(qup, msg_id, true); 1501 qup->is_smbus_read = false; 1502 if (ret) 1503 return ret; 1504 1505 msg->len += 1; 1506 } 1507 1508 msg_buf += data_len; 1509 blk->data_len -= qup->blk_xfer_limit; 1510 } 1511 1512 return ret; 1513 } 1514 1515 /* 1516 * QUP v2 supports 3 modes 1517 * Programmed IO using FIFO mode : Less than FIFO size 1518 * Programmed IO using Block mode : Greater than FIFO size 1519 * DMA using BAM : Appropriate for any transaction size but the address should 1520 * be DMA applicable 1521 * 1522 * This function determines the mode which will be used for this transfer. An 1523 * i2c transfer contains multiple message. Following are the rules to determine 1524 * the mode used. 1525 * 1. Determine complete length, maximum tx and rx length for complete transfer. 1526 * 2. If complete transfer length is greater than fifo size then use the DMA 1527 * mode. 1528 * 3. In FIFO or block mode, tx and rx can operate in different mode so check 1529 * for maximum tx and rx length to determine mode. 1530 */ 1531 static int 1532 qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, 1533 struct i2c_msg msgs[], int num) 1534 { 1535 int idx; 1536 bool no_dma = false; 1537 unsigned int max_tx_len = 0, max_rx_len = 0, total_len = 0; 1538 1539 /* All i2c_msgs should be transferred using either dma or cpu */ 1540 for (idx = 0; idx < num; idx++) { 1541 if (msgs[idx].flags & I2C_M_RD) 1542 max_rx_len = max_t(unsigned int, max_rx_len, 1543 msgs[idx].len); 1544 else 1545 max_tx_len = max_t(unsigned int, max_tx_len, 1546 msgs[idx].len); 1547 1548 if (is_vmalloc_addr(msgs[idx].buf)) 1549 no_dma = true; 1550 1551 total_len += msgs[idx].len; 1552 } 1553 1554 if (!no_dma && qup->is_dma && 1555 (total_len > qup->out_fifo_sz || total_len > qup->in_fifo_sz)) { 1556 qup->use_dma = true; 1557 } else { 1558 qup->blk.is_tx_blk_mode = max_tx_len > qup->out_fifo_sz - 1559 QUP_MAX_TAGS_LEN; 1560 qup->blk.is_rx_blk_mode = max_rx_len > qup->in_fifo_sz - 1561 READ_RX_TAGS_LEN; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int qup_i2c_xfer_v2(struct i2c_adapter *adap, 1568 struct i2c_msg msgs[], 1569 int num) 1570 { 1571 struct qup_i2c_dev *qup = i2c_get_adapdata(adap); 1572 int ret, idx = 0; 1573 1574 qup->bus_err = 0; 1575 qup->qup_err = 0; 1576 1577 ret = pm_runtime_get_sync(qup->dev); 1578 if (ret < 0) 1579 goto out; 1580 1581 ret = qup_i2c_determine_mode_v2(qup, msgs, num); 1582 if (ret) 1583 goto out; 1584 1585 writel(1, qup->base + QUP_SW_RESET); 1586 ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); 1587 if (ret) 1588 goto out; 1589 1590 /* Configure QUP as I2C mini core */ 1591 writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG); 1592 writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN); 1593 1594 if (qup_i2c_poll_state_i2c_master(qup)) { 1595 ret = -EIO; 1596 goto out; 1597 } 1598 1599 if (qup->use_dma) { 1600 reinit_completion(&qup->xfer); 1601 ret = qup_i2c_bam_xfer(adap, &msgs[0], num); 1602 qup->use_dma = false; 1603 } else { 1604 qup_i2c_conf_mode_v2(qup); 1605 1606 for (idx = 0; idx < num; idx++) { 1607 qup->msg = &msgs[idx]; 1608 qup->is_last = idx == (num - 1); 1609 1610 ret = qup_i2c_xfer_v2_msg(qup, idx, 1611 !!(msgs[idx].flags & I2C_M_RD)); 1612 if (ret) 1613 break; 1614 } 1615 qup->msg = NULL; 1616 } 1617 1618 if (!ret) 1619 ret = qup_i2c_bus_active(qup, ONE_BYTE); 1620 1621 if (!ret) 1622 qup_i2c_change_state(qup, QUP_RESET_STATE); 1623 1624 if (ret == 0) 1625 ret = num; 1626 out: 1627 pm_runtime_mark_last_busy(qup->dev); 1628 pm_runtime_put_autosuspend(qup->dev); 1629 1630 return ret; 1631 } 1632 1633 static u32 qup_i2c_func(struct i2c_adapter *adap) 1634 { 1635 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK); 1636 } 1637 1638 static const struct i2c_algorithm qup_i2c_algo = { 1639 .xfer = qup_i2c_xfer, 1640 .functionality = qup_i2c_func, 1641 }; 1642 1643 static const struct i2c_algorithm qup_i2c_algo_v2 = { 1644 .xfer = qup_i2c_xfer_v2, 1645 .functionality = qup_i2c_func, 1646 }; 1647 1648 /* 1649 * The QUP block will issue a NACK and STOP on the bus when reaching 1650 * the end of the read, the length of the read is specified as one byte 1651 * which limits the possible read to 256 (QUP_READ_LIMIT) bytes. 1652 */ 1653 static const struct i2c_adapter_quirks qup_i2c_quirks = { 1654 .flags = I2C_AQ_NO_ZERO_LEN, 1655 .max_read_len = QUP_READ_LIMIT, 1656 }; 1657 1658 static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = { 1659 .flags = I2C_AQ_NO_ZERO_LEN, 1660 }; 1661 1662 static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup) 1663 { 1664 clk_prepare_enable(qup->clk); 1665 clk_prepare_enable(qup->pclk); 1666 } 1667 1668 static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) 1669 { 1670 u32 config; 1671 1672 qup_i2c_change_state(qup, QUP_RESET_STATE); 1673 clk_disable_unprepare(qup->clk); 1674 config = readl(qup->base + QUP_CONFIG); 1675 config |= QUP_CLOCK_AUTO_GATE; 1676 writel(config, qup->base + QUP_CONFIG); 1677 qup_i2c_vote_bw(qup, 0); 1678 clk_disable_unprepare(qup->pclk); 1679 } 1680 1681 static const struct acpi_device_id qup_i2c_acpi_match[] = { 1682 { "QCOM8010"}, 1683 { } 1684 }; 1685 MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match); 1686 1687 static int qup_i2c_probe(struct platform_device *pdev) 1688 { 1689 static const int blk_sizes[] = {4, 16, 32}; 1690 struct qup_i2c_dev *qup; 1691 unsigned long one_bit_t; 1692 u32 io_mode, hw_ver, size; 1693 int ret, fs_div, hs_div; 1694 u32 src_clk_freq = DEFAULT_SRC_CLK; 1695 u32 clk_freq = DEFAULT_CLK_FREQ; 1696 int blocks; 1697 bool is_qup_v1; 1698 1699 qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL); 1700 if (!qup) 1701 return -ENOMEM; 1702 1703 qup->dev = &pdev->dev; 1704 init_completion(&qup->xfer); 1705 platform_set_drvdata(pdev, qup); 1706 1707 if (scl_freq) { 1708 dev_notice(qup->dev, "Using override frequency of %u\n", scl_freq); 1709 clk_freq = scl_freq; 1710 } else { 1711 ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq); 1712 if (ret) { 1713 dev_notice(qup->dev, "using default clock-frequency %d", 1714 DEFAULT_CLK_FREQ); 1715 } 1716 } 1717 1718 if (device_is_compatible(&pdev->dev, "qcom,i2c-qup-v1.1.1")) { 1719 qup->adap.algo = &qup_i2c_algo; 1720 qup->adap.quirks = &qup_i2c_quirks; 1721 is_qup_v1 = true; 1722 } else { 1723 qup->adap.algo = &qup_i2c_algo_v2; 1724 qup->adap.quirks = &qup_i2c_quirks_v2; 1725 is_qup_v1 = false; 1726 if (acpi_match_device(qup_i2c_acpi_match, qup->dev)) 1727 goto nodma; 1728 else 1729 ret = qup_i2c_req_dma(qup); 1730 1731 if (ret == -EPROBE_DEFER) 1732 goto fail_dma; 1733 else if (ret != 0) 1734 goto nodma; 1735 1736 qup->max_xfer_sg_len = (MX_BLOCKS << 1); 1737 blocks = (MX_DMA_BLOCKS << 1) + 1; 1738 qup->btx.sg = devm_kcalloc(&pdev->dev, 1739 blocks, sizeof(*qup->btx.sg), 1740 GFP_KERNEL); 1741 if (!qup->btx.sg) { 1742 ret = -ENOMEM; 1743 goto fail_dma; 1744 } 1745 sg_init_table(qup->btx.sg, blocks); 1746 1747 qup->brx.sg = devm_kcalloc(&pdev->dev, 1748 blocks, sizeof(*qup->brx.sg), 1749 GFP_KERNEL); 1750 if (!qup->brx.sg) { 1751 ret = -ENOMEM; 1752 goto fail_dma; 1753 } 1754 sg_init_table(qup->brx.sg, blocks); 1755 1756 /* 2 tag bytes for each block + 5 for start, stop tags */ 1757 size = blocks * 2 + 5; 1758 1759 qup->start_tag.start = devm_kzalloc(&pdev->dev, 1760 size, GFP_KERNEL); 1761 if (!qup->start_tag.start) { 1762 ret = -ENOMEM; 1763 goto fail_dma; 1764 } 1765 1766 qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); 1767 if (!qup->brx.tag.start) { 1768 ret = -ENOMEM; 1769 goto fail_dma; 1770 } 1771 1772 qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); 1773 if (!qup->btx.tag.start) { 1774 ret = -ENOMEM; 1775 goto fail_dma; 1776 } 1777 qup->is_dma = true; 1778 1779 qup->icc_path = devm_of_icc_get(&pdev->dev, NULL); 1780 if (IS_ERR(qup->icc_path)) 1781 return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path), 1782 "failed to get interconnect path\n"); 1783 } 1784 1785 nodma: 1786 /* We support frequencies up to FAST Mode Plus (1MHz) */ 1787 if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) { 1788 dev_err(qup->dev, "clock frequency not supported %d\n", 1789 clk_freq); 1790 ret = -EINVAL; 1791 goto fail_dma; 1792 } 1793 1794 qup->base = devm_platform_ioremap_resource(pdev, 0); 1795 if (IS_ERR(qup->base)) { 1796 ret = PTR_ERR(qup->base); 1797 goto fail_dma; 1798 } 1799 1800 qup->irq = platform_get_irq(pdev, 0); 1801 if (qup->irq < 0) { 1802 ret = qup->irq; 1803 goto fail_dma; 1804 } 1805 1806 if (has_acpi_companion(qup->dev)) { 1807 ret = device_property_read_u32(qup->dev, 1808 "src-clock-hz", &src_clk_freq); 1809 if (ret) { 1810 dev_notice(qup->dev, "using default src-clock-hz %d", 1811 DEFAULT_SRC_CLK); 1812 } 1813 ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev)); 1814 } else { 1815 qup->clk = devm_clk_get(qup->dev, "core"); 1816 if (IS_ERR(qup->clk)) { 1817 dev_err(qup->dev, "Could not get core clock\n"); 1818 ret = PTR_ERR(qup->clk); 1819 goto fail_dma; 1820 } 1821 1822 qup->pclk = devm_clk_get(qup->dev, "iface"); 1823 if (IS_ERR(qup->pclk)) { 1824 dev_err(qup->dev, "Could not get iface clock\n"); 1825 ret = PTR_ERR(qup->pclk); 1826 goto fail_dma; 1827 } 1828 qup_i2c_enable_clocks(qup); 1829 src_clk_freq = clk_get_rate(qup->clk); 1830 } 1831 qup->src_clk_freq = src_clk_freq; 1832 1833 /* 1834 * Bootloaders might leave a pending interrupt on certain QUP's, 1835 * so we reset the core before registering for interrupts. 1836 */ 1837 writel(1, qup->base + QUP_SW_RESET); 1838 ret = qup_i2c_poll_state_valid(qup); 1839 if (ret) 1840 goto fail; 1841 1842 ret = devm_request_irq(qup->dev, qup->irq, qup_i2c_interrupt, 1843 IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, 1844 "i2c_qup", qup); 1845 if (ret) { 1846 dev_err(qup->dev, "Request %d IRQ failed\n", qup->irq); 1847 goto fail; 1848 } 1849 1850 hw_ver = readl(qup->base + QUP_HW_VERSION); 1851 dev_dbg(qup->dev, "Revision %x\n", hw_ver); 1852 1853 io_mode = readl(qup->base + QUP_IO_MODE); 1854 1855 /* 1856 * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag' 1857 * associated with each byte written/received 1858 */ 1859 size = QUP_OUTPUT_BLOCK_SIZE(io_mode); 1860 if (size >= ARRAY_SIZE(blk_sizes)) { 1861 ret = -EIO; 1862 goto fail; 1863 } 1864 qup->out_blk_sz = blk_sizes[size]; 1865 1866 size = QUP_INPUT_BLOCK_SIZE(io_mode); 1867 if (size >= ARRAY_SIZE(blk_sizes)) { 1868 ret = -EIO; 1869 goto fail; 1870 } 1871 qup->in_blk_sz = blk_sizes[size]; 1872 1873 if (is_qup_v1) { 1874 /* 1875 * in QUP v1, QUP_CONFIG uses N as 15 i.e 16 bits constitutes a 1876 * single transfer but the block size is in bytes so divide the 1877 * in_blk_sz and out_blk_sz by 2 1878 */ 1879 qup->in_blk_sz /= 2; 1880 qup->out_blk_sz /= 2; 1881 qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1; 1882 qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1; 1883 qup->write_rx_tags = qup_i2c_write_rx_tags_v1; 1884 } else { 1885 qup->write_tx_fifo = qup_i2c_write_tx_fifo_v2; 1886 qup->read_rx_fifo = qup_i2c_read_rx_fifo_v2; 1887 qup->write_rx_tags = qup_i2c_write_rx_tags_v2; 1888 } 1889 1890 size = QUP_OUTPUT_FIFO_SIZE(io_mode); 1891 qup->out_fifo_sz = qup->out_blk_sz * (2 << size); 1892 1893 size = QUP_INPUT_FIFO_SIZE(io_mode); 1894 qup->in_fifo_sz = qup->in_blk_sz * (2 << size); 1895 1896 hs_div = 3; 1897 if (clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) { 1898 fs_div = ((src_clk_freq / clk_freq) / 2) - 3; 1899 qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff); 1900 } else { 1901 /* 33%/66% duty cycle */ 1902 fs_div = ((src_clk_freq / clk_freq) - 6) * 2 / 3; 1903 qup->clk_ctl = ((fs_div / 2) << 16) | (hs_div << 8) | (fs_div & 0xff); 1904 } 1905 1906 /* 1907 * Time it takes for a byte to be clocked out on the bus. 1908 * Each byte takes 9 clock cycles (8 bits + 1 ack). 1909 */ 1910 one_bit_t = (USEC_PER_SEC / clk_freq) + 1; 1911 qup->one_byte_t = one_bit_t * 9; 1912 qup->xfer_timeout = TOUT_MIN * HZ + 1913 usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t); 1914 1915 dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", 1916 qup->in_blk_sz, qup->in_fifo_sz, 1917 qup->out_blk_sz, qup->out_fifo_sz); 1918 1919 i2c_set_adapdata(&qup->adap, qup); 1920 qup->adap.dev.parent = qup->dev; 1921 qup->adap.dev.of_node = pdev->dev.of_node; 1922 qup->is_last = true; 1923 1924 strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); 1925 1926 pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); 1927 pm_runtime_use_autosuspend(qup->dev); 1928 pm_runtime_set_active(qup->dev); 1929 pm_runtime_enable(qup->dev); 1930 1931 ret = i2c_add_adapter(&qup->adap); 1932 if (ret) 1933 goto fail_runtime; 1934 1935 return 0; 1936 1937 fail_runtime: 1938 pm_runtime_disable(qup->dev); 1939 pm_runtime_set_suspended(qup->dev); 1940 fail: 1941 qup_i2c_disable_clocks(qup); 1942 fail_dma: 1943 if (qup->btx.dma) 1944 dma_release_channel(qup->btx.dma); 1945 if (qup->brx.dma) 1946 dma_release_channel(qup->brx.dma); 1947 return ret; 1948 } 1949 1950 static void qup_i2c_remove(struct platform_device *pdev) 1951 { 1952 struct qup_i2c_dev *qup = platform_get_drvdata(pdev); 1953 1954 if (qup->is_dma) { 1955 dma_release_channel(qup->btx.dma); 1956 dma_release_channel(qup->brx.dma); 1957 } 1958 1959 disable_irq(qup->irq); 1960 qup_i2c_disable_clocks(qup); 1961 i2c_del_adapter(&qup->adap); 1962 pm_runtime_disable(qup->dev); 1963 pm_runtime_set_suspended(qup->dev); 1964 } 1965 1966 static int qup_i2c_pm_suspend_runtime(struct device *device) 1967 { 1968 struct qup_i2c_dev *qup = dev_get_drvdata(device); 1969 1970 dev_dbg(device, "pm_runtime: suspending...\n"); 1971 qup_i2c_disable_clocks(qup); 1972 return 0; 1973 } 1974 1975 static int qup_i2c_pm_resume_runtime(struct device *device) 1976 { 1977 struct qup_i2c_dev *qup = dev_get_drvdata(device); 1978 1979 dev_dbg(device, "pm_runtime: resuming...\n"); 1980 qup_i2c_enable_clocks(qup); 1981 return 0; 1982 } 1983 1984 static int qup_i2c_suspend(struct device *device) 1985 { 1986 if (!pm_runtime_suspended(device)) 1987 return qup_i2c_pm_suspend_runtime(device); 1988 return 0; 1989 } 1990 1991 static int qup_i2c_resume(struct device *device) 1992 { 1993 qup_i2c_pm_resume_runtime(device); 1994 pm_runtime_mark_last_busy(device); 1995 pm_request_autosuspend(device); 1996 return 0; 1997 } 1998 1999 static const struct dev_pm_ops qup_i2c_qup_pm_ops = { 2000 SYSTEM_SLEEP_PM_OPS(qup_i2c_suspend, qup_i2c_resume) 2001 RUNTIME_PM_OPS(qup_i2c_pm_suspend_runtime, 2002 qup_i2c_pm_resume_runtime, NULL) 2003 }; 2004 2005 static const struct of_device_id qup_i2c_dt_match[] = { 2006 { .compatible = "qcom,i2c-qup-v1.1.1" }, 2007 { .compatible = "qcom,i2c-qup-v2.1.1" }, 2008 { .compatible = "qcom,i2c-qup-v2.2.1" }, 2009 {} 2010 }; 2011 MODULE_DEVICE_TABLE(of, qup_i2c_dt_match); 2012 2013 static struct platform_driver qup_i2c_driver = { 2014 .probe = qup_i2c_probe, 2015 .remove = qup_i2c_remove, 2016 .driver = { 2017 .name = "i2c_qup", 2018 .pm = pm_ptr(&qup_i2c_qup_pm_ops), 2019 .of_match_table = qup_i2c_dt_match, 2020 .acpi_match_table = ACPI_PTR(qup_i2c_acpi_match), 2021 }, 2022 }; 2023 2024 module_platform_driver(qup_i2c_driver); 2025 2026 MODULE_DESCRIPTION("Qualcomm QUP based I2C controller"); 2027 MODULE_LICENSE("GPL v2"); 2028 MODULE_ALIAS("platform:i2c_qup"); 2029