1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * i2c-xiic.c 4 * Copyright (c) 2002-2007 Xilinx Inc. 5 * Copyright (c) 2009-2010 Intel Corporation 6 * 7 * This code was implemented by Mocean Laboratories AB when porting linux 8 * to the automotive development board Russellville. The copyright holder 9 * as seen in the header is Intel corporation. 10 * Mocean Laboratories forked off the GNU/Linux platform work into a 11 * separate company called Pelagicore AB, which committed the code to the 12 * kernel. 13 */ 14 15 /* Supports: 16 * Xilinx IIC 17 */ 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/errno.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/platform_device.h> 24 #include <linux/i2c.h> 25 #include <linux/interrupt.h> 26 #include <linux/completion.h> 27 #include <linux/platform_data/i2c-xiic.h> 28 #include <linux/io.h> 29 #include <linux/slab.h> 30 #include <linux/of.h> 31 #include <linux/clk.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/iopoll.h> 34 #include <linux/spinlock.h> 35 36 #define DRIVER_NAME "xiic-i2c" 37 #define DYNAMIC_MODE_READ_BROKEN_BIT BIT(0) 38 #define SMBUS_BLOCK_READ_MIN_LEN 3 39 40 enum xilinx_i2c_state { 41 STATE_DONE, 42 STATE_ERROR, 43 STATE_START 44 }; 45 46 enum xiic_endian { 47 LITTLE, 48 BIG 49 }; 50 51 enum i2c_scl_freq { 52 REG_VALUES_100KHZ = 0, 53 REG_VALUES_400KHZ = 1, 54 REG_VALUES_1MHZ = 2 55 }; 56 57 /** 58 * struct xiic_i2c - Internal representation of the XIIC I2C bus 59 * @dev: Pointer to device structure 60 * @base: Memory base of the HW registers 61 * @completion: Completion for callers 62 * @adap: Kernel adapter representation 63 * @tx_msg: Messages from above to be sent 64 * @lock: Mutual exclusion 65 * @tx_pos: Current pos in TX message 66 * @nmsgs: Number of messages in tx_msg 67 * @rx_msg: Current RX message 68 * @rx_pos: Position within current RX message 69 * @endianness: big/little-endian byte order 70 * @clk: Pointer to AXI4-lite input clock 71 * @state: See STATE_ 72 * @singlemaster: Indicates bus is single master 73 * @dynamic: Mode of controller 74 * @prev_msg_tx: Previous message is Tx 75 * @quirks: To hold platform specific bug info 76 * @smbus_block_read: Flag to handle block read 77 * @input_clk: Input clock to I2C controller 78 * @i2c_clk: I2C SCL frequency 79 * @atomic: Mode of transfer 80 * @atomic_lock: Lock for atomic transfer mode 81 * @atomic_xfer_state: See STATE_ 82 */ 83 struct xiic_i2c { 84 struct device *dev; 85 void __iomem *base; 86 struct completion completion; 87 struct i2c_adapter adap; 88 struct i2c_msg *tx_msg; 89 struct mutex lock; 90 unsigned int tx_pos; 91 unsigned int nmsgs; 92 struct i2c_msg *rx_msg; 93 int rx_pos; 94 enum xiic_endian endianness; 95 struct clk *clk; 96 enum xilinx_i2c_state state; 97 bool singlemaster; 98 bool dynamic; 99 bool prev_msg_tx; 100 u32 quirks; 101 bool smbus_block_read; 102 unsigned long input_clk; 103 unsigned int i2c_clk; 104 bool atomic; 105 spinlock_t atomic_lock; /* Lock for atomic transfer mode */ 106 enum xilinx_i2c_state atomic_xfer_state; 107 }; 108 109 struct xiic_version_data { 110 u32 quirks; 111 }; 112 113 /** 114 * struct timing_regs - AXI I2C timing registers that depend on I2C spec 115 * @tsusta: setup time for a repeated START condition 116 * @tsusto: setup time for a STOP condition 117 * @thdsta: hold time for a repeated START condition 118 * @tsudat: setup time for data 119 * @tbuf: bus free time between STOP and START 120 */ 121 struct timing_regs { 122 unsigned int tsusta; 123 unsigned int tsusto; 124 unsigned int thdsta; 125 unsigned int tsudat; 126 unsigned int tbuf; 127 }; 128 129 /* Reg values in ns derived from I2C spec and AXI I2C PG for different frequencies */ 130 static const struct timing_regs timing_reg_values[] = { 131 { 5700, 5000, 4300, 550, 5000 }, /* Reg values for 100KHz */ 132 { 900, 900, 900, 400, 1600 }, /* Reg values for 400KHz */ 133 { 380, 380, 380, 170, 620 }, /* Reg values for 1MHz */ 134 }; 135 136 #define XIIC_MSB_OFFSET 0 137 #define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET) 138 139 /* 140 * Register offsets in bytes from RegisterBase. Three is added to the 141 * base offset to access LSB (IBM style) of the word 142 */ 143 #define XIIC_CR_REG_OFFSET (0x00 + XIIC_REG_OFFSET) /* Control Register */ 144 #define XIIC_SR_REG_OFFSET (0x04 + XIIC_REG_OFFSET) /* Status Register */ 145 #define XIIC_DTR_REG_OFFSET (0x08 + XIIC_REG_OFFSET) /* Data Tx Register */ 146 #define XIIC_DRR_REG_OFFSET (0x0C + XIIC_REG_OFFSET) /* Data Rx Register */ 147 #define XIIC_ADR_REG_OFFSET (0x10 + XIIC_REG_OFFSET) /* Address Register */ 148 #define XIIC_TFO_REG_OFFSET (0x14 + XIIC_REG_OFFSET) /* Tx FIFO Occupancy */ 149 #define XIIC_RFO_REG_OFFSET (0x18 + XIIC_REG_OFFSET) /* Rx FIFO Occupancy */ 150 #define XIIC_TBA_REG_OFFSET (0x1C + XIIC_REG_OFFSET) /* 10 Bit Address reg */ 151 #define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */ 152 #define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */ 153 154 /* 155 * Timing register offsets from RegisterBase. These are used only for 156 * setting i2c clock frequency for the line. 157 */ 158 #define XIIC_TSUSTA_REG_OFFSET (0x28 + XIIC_REG_OFFSET) /* TSUSTA Register */ 159 #define XIIC_TSUSTO_REG_OFFSET (0x2C + XIIC_REG_OFFSET) /* TSUSTO Register */ 160 #define XIIC_THDSTA_REG_OFFSET (0x30 + XIIC_REG_OFFSET) /* THDSTA Register */ 161 #define XIIC_TSUDAT_REG_OFFSET (0x34 + XIIC_REG_OFFSET) /* TSUDAT Register */ 162 #define XIIC_TBUF_REG_OFFSET (0x38 + XIIC_REG_OFFSET) /* TBUF Register */ 163 #define XIIC_THIGH_REG_OFFSET (0x3C + XIIC_REG_OFFSET) /* THIGH Register */ 164 #define XIIC_TLOW_REG_OFFSET (0x40 + XIIC_REG_OFFSET) /* TLOW Register */ 165 #define XIIC_THDDAT_REG_OFFSET (0x44 + XIIC_REG_OFFSET) /* THDDAT Register */ 166 167 /* Control Register masks */ 168 #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */ 169 #define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */ 170 #define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */ 171 #define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */ 172 #define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */ 173 #define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */ 174 #define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */ 175 176 /* Status Register masks */ 177 #define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */ 178 #define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */ 179 #define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */ 180 #define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */ 181 #define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */ 182 #define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */ 183 #define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */ 184 #define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */ 185 186 /* Interrupt Status Register masks Interrupt occurs when... */ 187 #define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */ 188 #define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */ 189 #define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */ 190 #define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */ 191 #define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */ 192 #define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */ 193 #define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */ 194 #define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */ 195 196 /* The following constants specify the depth of the FIFOs */ 197 #define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */ 198 #define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */ 199 200 /* The following constants specify groups of interrupts that are typically 201 * enabled or disables at the same time 202 */ 203 #define XIIC_TX_INTERRUPTS \ 204 (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK) 205 206 #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS) 207 208 /* 209 * Tx Fifo upper bit masks. 210 */ 211 #define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */ 212 #define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */ 213 214 /* Dynamic mode constants */ 215 #define MAX_READ_LENGTH_DYNAMIC 255 /* Max length for dynamic read */ 216 217 /* 218 * The following constants define the register offsets for the Interrupt 219 * registers. There are some holes in the memory map for reserved addresses 220 * to allow other registers to be added and still match the memory map of the 221 * interrupt controller registers 222 */ 223 #define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */ 224 #define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */ 225 #define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */ 226 #define XIIC_RESETR_OFFSET 0x40 /* Reset Register */ 227 228 #define XIIC_RESET_MASK 0xAUL 229 230 #define XIIC_PM_TIMEOUT 1000 /* ms */ 231 /* timeout waiting for the controller to respond */ 232 #define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000)) 233 /* timeout waiting for the controller finish transfers */ 234 #define XIIC_XFER_TIMEOUT (msecs_to_jiffies(10000)) 235 /* timeout waiting for the controller finish transfers in micro seconds */ 236 #define XIIC_XFER_TIMEOUT_US 10000000 237 238 /* 239 * The following constant is used for the device global interrupt enable 240 * register, to enable all interrupts for the device, this is the only bit 241 * in the register 242 */ 243 #define XIIC_GINTR_ENABLE_MASK 0x80000000UL 244 245 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos) 246 #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos) 247 248 static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num); 249 static void __xiic_start_xfer(struct xiic_i2c *i2c); 250 251 static int xiic_i2c_runtime_suspend(struct device *dev) 252 { 253 struct xiic_i2c *i2c = dev_get_drvdata(dev); 254 255 clk_disable(i2c->clk); 256 257 return 0; 258 } 259 260 static int xiic_i2c_runtime_resume(struct device *dev) 261 { 262 struct xiic_i2c *i2c = dev_get_drvdata(dev); 263 int ret; 264 265 ret = clk_enable(i2c->clk); 266 if (ret) { 267 dev_err(dev, "Cannot enable clock.\n"); 268 return ret; 269 } 270 271 return 0; 272 } 273 274 /* 275 * For the register read and write functions, a little-endian and big-endian 276 * version are necessary. Endianness is detected during the probe function. 277 * Only the least significant byte [doublet] of the register are ever 278 * accessed. This requires an offset of 3 [2] from the base address for 279 * big-endian systems. 280 */ 281 282 static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value) 283 { 284 if (i2c->endianness == LITTLE) 285 iowrite8(value, i2c->base + reg); 286 else 287 iowrite8(value, i2c->base + reg + 3); 288 } 289 290 static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg) 291 { 292 u8 ret; 293 294 if (i2c->endianness == LITTLE) 295 ret = ioread8(i2c->base + reg); 296 else 297 ret = ioread8(i2c->base + reg + 3); 298 return ret; 299 } 300 301 static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value) 302 { 303 if (i2c->endianness == LITTLE) 304 iowrite16(value, i2c->base + reg); 305 else 306 iowrite16be(value, i2c->base + reg + 2); 307 } 308 309 static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value) 310 { 311 if (i2c->endianness == LITTLE) 312 iowrite32(value, i2c->base + reg); 313 else 314 iowrite32be(value, i2c->base + reg); 315 } 316 317 static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg) 318 { 319 u32 ret; 320 321 if (i2c->endianness == LITTLE) 322 ret = ioread32(i2c->base + reg); 323 else 324 ret = ioread32be(i2c->base + reg); 325 return ret; 326 } 327 328 static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask) 329 { 330 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 331 332 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask); 333 } 334 335 static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask) 336 { 337 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 338 339 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask); 340 } 341 342 static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask) 343 { 344 u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 345 346 xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask); 347 } 348 349 static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask) 350 { 351 xiic_irq_clr(i2c, mask); 352 xiic_irq_en(i2c, mask); 353 } 354 355 static int xiic_clear_rx_fifo(struct xiic_i2c *i2c) 356 { 357 u8 sr; 358 unsigned long timeout; 359 360 timeout = jiffies + XIIC_I2C_TIMEOUT; 361 for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); 362 !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK); 363 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) { 364 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); 365 if (time_after(jiffies, timeout)) { 366 dev_err(i2c->dev, "Failed to clear rx fifo\n"); 367 return -ETIMEDOUT; 368 } 369 } 370 371 return 0; 372 } 373 374 static int xiic_wait_tx_empty(struct xiic_i2c *i2c) 375 { 376 u8 isr; 377 unsigned long timeout; 378 379 timeout = jiffies + XIIC_I2C_TIMEOUT; 380 for (isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 381 !(isr & XIIC_INTR_TX_EMPTY_MASK); 382 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET)) { 383 if (time_after(jiffies, timeout)) { 384 dev_err(i2c->dev, "Timeout waiting at Tx empty\n"); 385 return -ETIMEDOUT; 386 } 387 } 388 389 return 0; 390 } 391 392 /** 393 * xiic_setclk - Sets the configured clock rate 394 * @i2c: Pointer to the xiic device structure 395 * 396 * The timing register values are calculated according to the input clock 397 * frequency and configured scl frequency. For details, please refer the 398 * AXI I2C PG and NXP I2C Spec. 399 * Supported frequencies are 100KHz, 400KHz and 1MHz. 400 * 401 * Return: 0 on success (Supported frequency selected or not configurable in SW) 402 * -EINVAL on failure (scl frequency not supported or THIGH is 0) 403 */ 404 static int xiic_setclk(struct xiic_i2c *i2c) 405 { 406 unsigned int clk_in_mhz; 407 unsigned int index = 0; 408 u32 reg_val; 409 410 if (!i2c->atomic) 411 dev_dbg(i2c->adap.dev.parent, 412 "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n", 413 __func__, i2c->input_clk, i2c->i2c_clk); 414 415 /* If not specified in DT, do not configure in SW. Rely only on Vivado design */ 416 if (!i2c->i2c_clk || !i2c->input_clk) 417 return 0; 418 419 clk_in_mhz = DIV_ROUND_UP(i2c->input_clk, 1000000); 420 421 switch (i2c->i2c_clk) { 422 case I2C_MAX_FAST_MODE_PLUS_FREQ: 423 index = REG_VALUES_1MHZ; 424 break; 425 case I2C_MAX_FAST_MODE_FREQ: 426 index = REG_VALUES_400KHZ; 427 break; 428 case I2C_MAX_STANDARD_MODE_FREQ: 429 index = REG_VALUES_100KHZ; 430 break; 431 default: 432 dev_warn(i2c->adap.dev.parent, "Unsupported scl frequency\n"); 433 return -EINVAL; 434 } 435 436 /* 437 * Value to be stored in a register is the number of clock cycles required 438 * for the time duration. So the time is divided by the input clock time 439 * period to get the number of clock cycles required. Refer Xilinx AXI I2C 440 * PG document and I2C specification for further details. 441 */ 442 443 /* THIGH - Depends on SCL clock frequency(i2c_clk) as below */ 444 reg_val = (DIV_ROUND_UP(i2c->input_clk, 2 * i2c->i2c_clk)) - 7; 445 if (reg_val == 0) 446 return -EINVAL; 447 448 xiic_setreg32(i2c, XIIC_THIGH_REG_OFFSET, reg_val - 1); 449 450 /* TLOW - Value same as THIGH */ 451 xiic_setreg32(i2c, XIIC_TLOW_REG_OFFSET, reg_val - 1); 452 453 /* TSUSTA */ 454 reg_val = (timing_reg_values[index].tsusta * clk_in_mhz) / 1000; 455 xiic_setreg32(i2c, XIIC_TSUSTA_REG_OFFSET, reg_val - 1); 456 457 /* TSUSTO */ 458 reg_val = (timing_reg_values[index].tsusto * clk_in_mhz) / 1000; 459 xiic_setreg32(i2c, XIIC_TSUSTO_REG_OFFSET, reg_val - 1); 460 461 /* THDSTA */ 462 reg_val = (timing_reg_values[index].thdsta * clk_in_mhz) / 1000; 463 xiic_setreg32(i2c, XIIC_THDSTA_REG_OFFSET, reg_val - 1); 464 465 /* TSUDAT */ 466 reg_val = (timing_reg_values[index].tsudat * clk_in_mhz) / 1000; 467 xiic_setreg32(i2c, XIIC_TSUDAT_REG_OFFSET, reg_val - 1); 468 469 /* TBUF */ 470 reg_val = (timing_reg_values[index].tbuf * clk_in_mhz) / 1000; 471 xiic_setreg32(i2c, XIIC_TBUF_REG_OFFSET, reg_val - 1); 472 473 /* THDDAT */ 474 xiic_setreg32(i2c, XIIC_THDDAT_REG_OFFSET, 1); 475 476 return 0; 477 } 478 479 static int xiic_reinit(struct xiic_i2c *i2c) 480 { 481 int ret; 482 483 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); 484 485 ret = xiic_setclk(i2c); 486 if (ret) 487 return ret; 488 489 /* Set receive Fifo depth to maximum (zero based). */ 490 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1); 491 492 /* Reset Tx Fifo. */ 493 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); 494 495 /* Enable IIC Device, remove Tx Fifo reset & disable general call. */ 496 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK); 497 498 /* make sure RX fifo is empty */ 499 ret = xiic_clear_rx_fifo(i2c); 500 if (ret) 501 return ret; 502 503 /* Enable interrupts */ 504 if (!i2c->atomic) 505 xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); 506 507 xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK); 508 509 return 0; 510 } 511 512 static void xiic_deinit(struct xiic_i2c *i2c) 513 { 514 u8 cr; 515 516 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); 517 518 /* Disable IIC Device. */ 519 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 520 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK); 521 } 522 523 static void xiic_smbus_block_read_setup(struct xiic_i2c *i2c) 524 { 525 u8 rxmsg_len, rfd_set = 0; 526 527 /* 528 * Clear the I2C_M_RECV_LEN flag to avoid setting 529 * message length again 530 */ 531 i2c->rx_msg->flags &= ~I2C_M_RECV_LEN; 532 533 /* Set smbus_block_read flag to identify in isr */ 534 i2c->smbus_block_read = true; 535 536 /* Read byte from rx fifo and set message length */ 537 rxmsg_len = xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); 538 539 i2c->rx_msg->buf[i2c->rx_pos++] = rxmsg_len; 540 541 /* Check if received length is valid */ 542 if (rxmsg_len <= I2C_SMBUS_BLOCK_MAX) { 543 /* Set Receive fifo depth */ 544 if (rxmsg_len > IIC_RX_FIFO_DEPTH) { 545 /* 546 * When Rx msg len greater than or equal to Rx fifo capacity 547 * Receive fifo depth should set to Rx fifo capacity minus 1 548 */ 549 rfd_set = IIC_RX_FIFO_DEPTH - 1; 550 i2c->rx_msg->len = rxmsg_len + 1; 551 } else if ((rxmsg_len == 1) || 552 (rxmsg_len == 0)) { 553 /* 554 * Minimum of 3 bytes required to exit cleanly. 1 byte 555 * already received, Second byte is being received. Have 556 * to set NACK in read_rx before receiving the last byte 557 */ 558 rfd_set = 0; 559 i2c->rx_msg->len = SMBUS_BLOCK_READ_MIN_LEN; 560 } else { 561 /* 562 * When Rx msg len less than Rx fifo capacity 563 * Receive fifo depth should set to Rx msg len minus 2 564 */ 565 rfd_set = rxmsg_len - 2; 566 i2c->rx_msg->len = rxmsg_len + 1; 567 } 568 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set); 569 570 return; 571 } 572 573 /* Invalid message length, trigger STATE_ERROR with tx_msg_len in ISR */ 574 i2c->tx_msg->len = 3; 575 i2c->smbus_block_read = false; 576 dev_err(i2c->adap.dev.parent, "smbus_block_read Invalid msg length\n"); 577 } 578 579 static void xiic_read_rx(struct xiic_i2c *i2c) 580 { 581 u8 bytes_in_fifo, cr = 0, bytes_to_read = 0; 582 u32 bytes_rem = 0; 583 int i; 584 585 bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1; 586 587 if (!i2c->atomic) 588 dev_dbg(i2c->adap.dev.parent, 589 "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n", 590 __func__, bytes_in_fifo, xiic_rx_space(i2c), 591 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), 592 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 593 594 if (bytes_in_fifo > xiic_rx_space(i2c)) 595 bytes_in_fifo = xiic_rx_space(i2c); 596 597 bytes_to_read = bytes_in_fifo; 598 599 if (!i2c->dynamic) { 600 bytes_rem = xiic_rx_space(i2c) - bytes_in_fifo; 601 602 /* Set msg length if smbus_block_read */ 603 if (i2c->rx_msg->flags & I2C_M_RECV_LEN) { 604 xiic_smbus_block_read_setup(i2c); 605 return; 606 } 607 608 if (bytes_rem > IIC_RX_FIFO_DEPTH) { 609 bytes_to_read = bytes_in_fifo; 610 } else if (bytes_rem > 1) { 611 bytes_to_read = bytes_rem - 1; 612 } else if (bytes_rem == 1) { 613 bytes_to_read = 1; 614 /* Set NACK in CR to indicate slave transmitter */ 615 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 616 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr | 617 XIIC_CR_NO_ACK_MASK); 618 } else if (bytes_rem == 0) { 619 bytes_to_read = bytes_in_fifo; 620 621 /* Generate stop on the bus if it is last message */ 622 if (i2c->nmsgs == 1) { 623 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 624 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & 625 ~XIIC_CR_MSMS_MASK); 626 } 627 628 /* Make TXACK=0, clean up for next transaction */ 629 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 630 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & 631 ~XIIC_CR_NO_ACK_MASK); 632 } 633 } 634 635 /* Read the fifo */ 636 for (i = 0; i < bytes_to_read; i++) { 637 i2c->rx_msg->buf[i2c->rx_pos++] = 638 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); 639 } 640 641 if (i2c->dynamic) { 642 u8 bytes; 643 644 /* Receive remaining bytes if less than fifo depth */ 645 bytes = min_t(u8, xiic_rx_space(i2c), IIC_RX_FIFO_DEPTH); 646 bytes--; 647 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes); 648 } 649 } 650 651 static bool xiic_error_check(struct xiic_i2c *i2c) 652 { 653 bool status = false; 654 u32 pend, isr, ier; 655 656 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 657 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 658 pend = isr & ier; 659 660 if ((pend & XIIC_INTR_ARB_LOST_MASK) || 661 ((pend & XIIC_INTR_TX_ERROR_MASK) && 662 !(pend & XIIC_INTR_RX_FULL_MASK))) { 663 xiic_reinit(i2c); 664 status = true; 665 if (i2c->tx_msg || i2c->rx_msg) 666 i2c->atomic_xfer_state = STATE_ERROR; 667 } 668 return status; 669 } 670 671 static int xiic_tx_fifo_space(struct xiic_i2c *i2c) 672 { 673 /* return the actual space left in the FIFO */ 674 return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1; 675 } 676 677 static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) 678 { 679 u8 fifo_space = xiic_tx_fifo_space(i2c); 680 int len = xiic_tx_space(i2c); 681 682 len = (len > fifo_space) ? fifo_space : len; 683 684 if (!i2c->atomic) 685 dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n", 686 __func__, len, fifo_space); 687 688 while (len--) { 689 u16 data = i2c->tx_msg->buf[i2c->tx_pos++]; 690 691 if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) { 692 /* last message in transfer -> STOP */ 693 if (i2c->dynamic) { 694 data |= XIIC_TX_DYN_STOP_MASK; 695 } else { 696 u8 cr; 697 int status; 698 699 /* Wait till FIFO is empty so STOP is sent last */ 700 status = xiic_wait_tx_empty(i2c); 701 if (status) 702 return; 703 704 /* Write to CR to stop */ 705 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 706 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & 707 ~XIIC_CR_MSMS_MASK); 708 } 709 if (!i2c->atomic) 710 dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); 711 } 712 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 713 714 if (i2c->atomic && xiic_error_check(i2c)) 715 return; 716 } 717 } 718 719 static void xiic_wakeup(struct xiic_i2c *i2c, enum xilinx_i2c_state code) 720 { 721 i2c->tx_msg = NULL; 722 i2c->rx_msg = NULL; 723 i2c->nmsgs = 0; 724 i2c->state = code; 725 complete(&i2c->completion); 726 } 727 728 static irqreturn_t xiic_process(int irq, void *dev_id) 729 { 730 struct xiic_i2c *i2c = dev_id; 731 u32 pend, isr, ier; 732 u32 clr = 0; 733 int xfer_more = 0; 734 int wakeup_req = 0; 735 enum xilinx_i2c_state wakeup_code = STATE_DONE; 736 int ret; 737 738 /* Get the interrupt Status from the IPIF. There is no clearing of 739 * interrupts in the IPIF. Interrupts must be cleared at the source. 740 * To find which interrupts are pending; AND interrupts pending with 741 * interrupts masked. 742 */ 743 mutex_lock(&i2c->lock); 744 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); 745 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); 746 pend = isr & ier; 747 748 dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n", 749 __func__, ier, isr, pend); 750 dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n", 751 __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), 752 i2c->tx_msg, i2c->nmsgs); 753 dev_dbg(i2c->adap.dev.parent, "%s, ISR: 0x%x, CR: 0x%x\n", 754 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), 755 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 756 757 /* Service requesting interrupt */ 758 if ((pend & XIIC_INTR_ARB_LOST_MASK) || 759 ((pend & XIIC_INTR_TX_ERROR_MASK) && 760 !(pend & XIIC_INTR_RX_FULL_MASK))) { 761 /* bus arbritration lost, or... 762 * Transmit error _OR_ RX completed 763 * if this happens when RX_FULL is not set 764 * this is probably a TX error 765 */ 766 767 dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__); 768 769 /* dynamic mode seem to suffer from problems if we just flushes 770 * fifos and the next message is a TX with len 0 (only addr) 771 * reset the IP instead of just flush fifos 772 */ 773 ret = xiic_reinit(i2c); 774 if (ret < 0) 775 dev_dbg(i2c->adap.dev.parent, "reinit failed\n"); 776 777 if (i2c->rx_msg) { 778 wakeup_req = 1; 779 wakeup_code = STATE_ERROR; 780 } 781 if (i2c->tx_msg) { 782 wakeup_req = 1; 783 wakeup_code = STATE_ERROR; 784 } 785 /* don't try to handle other events */ 786 goto out; 787 } 788 if (pend & XIIC_INTR_RX_FULL_MASK) { 789 /* Receive register/FIFO is full */ 790 791 clr |= XIIC_INTR_RX_FULL_MASK; 792 if (!i2c->rx_msg) { 793 dev_dbg(i2c->adap.dev.parent, 794 "%s unexpected RX IRQ\n", __func__); 795 xiic_clear_rx_fifo(i2c); 796 goto out; 797 } 798 799 xiic_read_rx(i2c); 800 if (xiic_rx_space(i2c) == 0) { 801 /* this is the last part of the message */ 802 i2c->rx_msg = NULL; 803 804 /* also clear TX error if there (RX complete) */ 805 clr |= (isr & XIIC_INTR_TX_ERROR_MASK); 806 807 dev_dbg(i2c->adap.dev.parent, 808 "%s end of message, nmsgs: %d\n", 809 __func__, i2c->nmsgs); 810 811 /* send next message if this wasn't the last, 812 * otherwise the transfer will be finialise when 813 * receiving the bus not busy interrupt 814 */ 815 if (i2c->nmsgs > 1) { 816 i2c->nmsgs--; 817 i2c->tx_msg++; 818 dev_dbg(i2c->adap.dev.parent, 819 "%s will start next...\n", __func__); 820 xfer_more = 1; 821 } 822 } 823 } 824 if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) { 825 /* Transmit register/FIFO is empty or ½ empty */ 826 827 clr |= (pend & 828 (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)); 829 830 if (!i2c->tx_msg) { 831 dev_dbg(i2c->adap.dev.parent, 832 "%s unexpected TX IRQ\n", __func__); 833 goto out; 834 } 835 836 if (xiic_tx_space(i2c)) { 837 xiic_fill_tx_fifo(i2c); 838 } else { 839 /* current message fully written */ 840 dev_dbg(i2c->adap.dev.parent, 841 "%s end of message sent, nmsgs: %d\n", 842 __func__, i2c->nmsgs); 843 /* Don't move onto the next message until the TX FIFO empties, 844 * to ensure that a NAK is not missed. 845 */ 846 if (i2c->nmsgs > 1 && (pend & XIIC_INTR_TX_EMPTY_MASK)) { 847 i2c->nmsgs--; 848 i2c->tx_msg++; 849 xfer_more = 1; 850 } else { 851 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); 852 853 dev_dbg(i2c->adap.dev.parent, 854 "%s Got TX IRQ but no more to do...\n", 855 __func__); 856 } 857 } 858 } 859 860 if (pend & XIIC_INTR_BNB_MASK) { 861 /* IIC bus has transitioned to not busy */ 862 clr |= XIIC_INTR_BNB_MASK; 863 864 /* The bus is not busy, disable BusNotBusy interrupt */ 865 xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK); 866 867 if (i2c->tx_msg && i2c->smbus_block_read) { 868 i2c->smbus_block_read = false; 869 /* Set requested message len=1 to indicate STATE_DONE */ 870 i2c->tx_msg->len = 1; 871 } 872 873 if (!i2c->tx_msg) 874 goto out; 875 876 wakeup_req = 1; 877 878 if (i2c->nmsgs == 1 && !i2c->rx_msg && 879 xiic_tx_space(i2c) == 0) 880 wakeup_code = STATE_DONE; 881 else 882 wakeup_code = STATE_ERROR; 883 } 884 885 out: 886 dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr); 887 888 xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr); 889 if (xfer_more) 890 __xiic_start_xfer(i2c); 891 if (wakeup_req) 892 xiic_wakeup(i2c, wakeup_code); 893 894 WARN_ON(xfer_more && wakeup_req); 895 896 mutex_unlock(&i2c->lock); 897 return IRQ_HANDLED; 898 } 899 900 static int xiic_bus_busy(struct xiic_i2c *i2c) 901 { 902 u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); 903 904 return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0; 905 } 906 907 static int xiic_wait_not_busy(struct xiic_i2c *i2c) 908 { 909 int tries = 3; 910 int err; 911 912 /* for instance if previous transfer was terminated due to TX error 913 * it might be that the bus is on it's way to become available 914 * give it at most 3 ms to wake 915 */ 916 err = xiic_bus_busy(i2c); 917 while (err && tries--) { 918 if (i2c->atomic) 919 udelay(1000); 920 else 921 usleep_range(1000, 1100); 922 err = xiic_bus_busy(i2c); 923 } 924 925 return err; 926 } 927 928 static void xiic_recv_atomic(struct xiic_i2c *i2c) 929 { 930 while (xiic_rx_space(i2c)) { 931 if (xiic_getreg32(i2c, XIIC_IISR_OFFSET) & XIIC_INTR_RX_FULL_MASK) { 932 xiic_read_rx(i2c); 933 934 /* Clear Rx full and Tx error interrupts. */ 935 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | 936 XIIC_INTR_TX_ERROR_MASK); 937 } 938 if (xiic_error_check(i2c)) 939 return; 940 } 941 942 i2c->rx_msg = NULL; 943 xiic_irq_clr_en(i2c, XIIC_INTR_TX_ERROR_MASK); 944 945 /* send next message if this wasn't the last. */ 946 if (i2c->nmsgs > 1) { 947 i2c->nmsgs--; 948 i2c->tx_msg++; 949 __xiic_start_xfer(i2c); 950 } 951 } 952 953 static void xiic_start_recv(struct xiic_i2c *i2c) 954 { 955 u16 rx_watermark; 956 u8 cr = 0, rfd_set = 0; 957 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; 958 959 if (!i2c->atomic) 960 dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n", 961 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), 962 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 963 964 /* Disable Tx interrupts */ 965 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK); 966 967 if (i2c->dynamic) { 968 u8 bytes; 969 u16 val; 970 971 /* Clear and enable Rx full interrupt. */ 972 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | 973 XIIC_INTR_TX_ERROR_MASK); 974 975 /* 976 * We want to get all but last byte, because the TX_ERROR IRQ 977 * is used to indicate error ACK on the address, and 978 * negative ack on the last received byte, so to not mix 979 * them receive all but last. 980 * In the case where there is only one byte to receive 981 * we can check if ERROR and RX full is set at the same time 982 */ 983 rx_watermark = msg->len; 984 bytes = min_t(u8, rx_watermark, IIC_RX_FIFO_DEPTH); 985 986 if (rx_watermark > 0) 987 bytes--; 988 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes); 989 990 /* write the address */ 991 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 992 i2c_8bit_addr_from_msg(msg) | 993 XIIC_TX_DYN_START_MASK); 994 995 /* If last message, include dynamic stop bit with length */ 996 val = (i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0; 997 val |= msg->len; 998 999 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, val); 1000 1001 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 1002 } else { 1003 /* 1004 * If previous message is Tx, make sure that Tx FIFO is empty 1005 * before starting a new transfer as the repeated start in 1006 * standard mode can corrupt the transaction if there are 1007 * still bytes to be transmitted in FIFO 1008 */ 1009 if (i2c->prev_msg_tx) { 1010 int status; 1011 1012 status = xiic_wait_tx_empty(i2c); 1013 if (status) 1014 return; 1015 } 1016 1017 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 1018 1019 /* Set Receive fifo depth */ 1020 rx_watermark = msg->len; 1021 if (rx_watermark > IIC_RX_FIFO_DEPTH) { 1022 rfd_set = IIC_RX_FIFO_DEPTH - 1; 1023 } else if (rx_watermark == 1) { 1024 rfd_set = rx_watermark - 1; 1025 1026 /* Set No_ACK, except for smbus_block_read */ 1027 if (!(i2c->rx_msg->flags & I2C_M_RECV_LEN)) { 1028 /* Handle single byte transfer separately */ 1029 cr |= XIIC_CR_NO_ACK_MASK; 1030 } 1031 } else if (rx_watermark == 0) { 1032 rfd_set = rx_watermark; 1033 } else { 1034 rfd_set = rx_watermark - 2; 1035 } 1036 /* Check if RSTA should be set */ 1037 if (cr & XIIC_CR_MSMS_MASK) { 1038 /* Already a master, RSTA should be set */ 1039 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | 1040 XIIC_CR_REPEATED_START_MASK) & 1041 ~(XIIC_CR_DIR_IS_TX_MASK)); 1042 } 1043 1044 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set); 1045 1046 /* Clear and enable Rx full and transmit complete interrupts */ 1047 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | 1048 XIIC_INTR_TX_ERROR_MASK); 1049 1050 /* Write the address */ 1051 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 1052 i2c_8bit_addr_from_msg(msg)); 1053 1054 /* Write to Control Register,to start transaction in Rx mode */ 1055 if ((cr & XIIC_CR_MSMS_MASK) == 0) { 1056 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | 1057 XIIC_CR_MSMS_MASK) 1058 & ~(XIIC_CR_DIR_IS_TX_MASK)); 1059 } 1060 if (!i2c->atomic) 1061 dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n", 1062 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), 1063 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 1064 } 1065 1066 if (i2c->nmsgs == 1) 1067 /* very last, enable bus not busy as well */ 1068 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 1069 1070 /* the message is tx:ed */ 1071 i2c->tx_pos = msg->len; 1072 1073 i2c->prev_msg_tx = false; 1074 1075 /* Enable interrupts */ 1076 if (!i2c->atomic) 1077 xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); 1078 else 1079 xiic_recv_atomic(i2c); 1080 } 1081 1082 static void xiic_send_rem_atomic(struct xiic_i2c *i2c) 1083 { 1084 while (xiic_tx_space(i2c)) { 1085 if (xiic_tx_fifo_space(i2c)) { 1086 u16 data; 1087 1088 data = i2c->tx_msg->buf[i2c->tx_pos]; 1089 i2c->tx_pos++; 1090 if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) { 1091 /* last message in transfer -> STOP */ 1092 if (i2c->dynamic) { 1093 data |= XIIC_TX_DYN_STOP_MASK; 1094 } else { 1095 u8 cr; 1096 int status; 1097 1098 /* Wait till FIFO is empty so STOP is sent last */ 1099 status = xiic_wait_tx_empty(i2c); 1100 if (status) 1101 return; 1102 1103 /* Write to CR to stop */ 1104 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 1105 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & 1106 ~XIIC_CR_MSMS_MASK); 1107 } 1108 } 1109 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 1110 } 1111 if (xiic_error_check(i2c)) 1112 return; 1113 } 1114 1115 if (i2c->nmsgs > 1) { 1116 i2c->nmsgs--; 1117 i2c->tx_msg++; 1118 __xiic_start_xfer(i2c); 1119 } else { 1120 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); 1121 } 1122 } 1123 1124 static void xiic_start_send(struct xiic_i2c *i2c) 1125 { 1126 u8 cr = 0; 1127 u16 data; 1128 struct i2c_msg *msg = i2c->tx_msg; 1129 1130 if (!i2c->atomic) { 1131 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d", 1132 __func__, msg, msg->len); 1133 dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n", 1134 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), 1135 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); 1136 } 1137 1138 if (i2c->dynamic) { 1139 /* write the address */ 1140 data = i2c_8bit_addr_from_msg(msg) | 1141 XIIC_TX_DYN_START_MASK; 1142 1143 if (i2c->nmsgs == 1 && msg->len == 0) 1144 /* no data and last message -> add STOP */ 1145 data |= XIIC_TX_DYN_STOP_MASK; 1146 1147 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 1148 1149 /* Clear any pending Tx empty, Tx Error and then enable them */ 1150 xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | 1151 XIIC_INTR_TX_ERROR_MASK | 1152 XIIC_INTR_BNB_MASK | 1153 ((i2c->nmsgs > 1 || xiic_tx_space(i2c)) ? 1154 XIIC_INTR_TX_HALF_MASK : 0)); 1155 1156 xiic_fill_tx_fifo(i2c); 1157 } else { 1158 /* 1159 * If previous message is Tx, make sure that Tx FIFO is empty 1160 * before starting a new transfer as the repeated start in 1161 * standard mode can corrupt the transaction if there are 1162 * still bytes to be transmitted in FIFO 1163 */ 1164 if (i2c->prev_msg_tx) { 1165 int status; 1166 1167 status = xiic_wait_tx_empty(i2c); 1168 if (status) 1169 return; 1170 } 1171 /* Check if RSTA should be set */ 1172 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 1173 if (cr & XIIC_CR_MSMS_MASK) { 1174 /* Already a master, RSTA should be set */ 1175 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | 1176 XIIC_CR_REPEATED_START_MASK | 1177 XIIC_CR_DIR_IS_TX_MASK) & 1178 ~(XIIC_CR_NO_ACK_MASK)); 1179 } 1180 1181 /* Write address to FIFO */ 1182 data = i2c_8bit_addr_from_msg(msg); 1183 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); 1184 1185 /* Fill fifo */ 1186 xiic_fill_tx_fifo(i2c); 1187 1188 if ((cr & XIIC_CR_MSMS_MASK) == 0) { 1189 /* Start Tx by writing to CR */ 1190 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); 1191 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr | 1192 XIIC_CR_MSMS_MASK | 1193 XIIC_CR_DIR_IS_TX_MASK); 1194 } 1195 1196 /* Clear any pending Tx empty, Tx Error and then enable them */ 1197 xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | 1198 XIIC_INTR_TX_ERROR_MASK | 1199 XIIC_INTR_BNB_MASK); 1200 } 1201 1202 i2c->prev_msg_tx = true; 1203 1204 if (i2c->atomic && !i2c->atomic_xfer_state) 1205 xiic_send_rem_atomic(i2c); 1206 } 1207 1208 static void __xiic_start_xfer(struct xiic_i2c *i2c) 1209 { 1210 int fifo_space = xiic_tx_fifo_space(i2c); 1211 1212 if (!i2c->atomic) 1213 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n", 1214 __func__, i2c->tx_msg, fifo_space); 1215 1216 if (!i2c->tx_msg) 1217 return; 1218 1219 if (i2c->atomic && xiic_error_check(i2c)) 1220 return; 1221 1222 i2c->rx_pos = 0; 1223 i2c->tx_pos = 0; 1224 i2c->state = STATE_START; 1225 if (i2c->tx_msg->flags & I2C_M_RD) { 1226 /* we dont date putting several reads in the FIFO */ 1227 xiic_start_recv(i2c); 1228 } else { 1229 xiic_start_send(i2c); 1230 } 1231 } 1232 1233 static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num) 1234 { 1235 bool broken_read, max_read_len, smbus_blk_read; 1236 int ret, count; 1237 1238 if (i2c->atomic) 1239 spin_lock(&i2c->atomic_lock); 1240 else 1241 mutex_lock(&i2c->lock); 1242 1243 if (i2c->tx_msg || i2c->rx_msg) { 1244 dev_err(i2c->adap.dev.parent, 1245 "cannot start a transfer while busy\n"); 1246 ret = -EBUSY; 1247 goto out; 1248 } 1249 1250 i2c->atomic_xfer_state = STATE_DONE; 1251 1252 /* In single master mode bus can only be busy, when in use by this 1253 * driver. If the register indicates bus being busy for some reason we 1254 * should ignore it, since bus will never be released and i2c will be 1255 * stuck forever. 1256 */ 1257 if (!i2c->singlemaster) { 1258 ret = xiic_wait_not_busy(i2c); 1259 if (ret) { 1260 /* If the bus is stuck in a busy state, such as due to spurious low 1261 * pulses on the bus causing a false start condition to be detected, 1262 * then try to recover by re-initializing the controller and check 1263 * again if the bus is still busy. 1264 */ 1265 dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n"); 1266 ret = xiic_reinit(i2c); 1267 if (ret) 1268 goto out; 1269 ret = xiic_wait_not_busy(i2c); 1270 if (ret) 1271 goto out; 1272 } 1273 } 1274 1275 i2c->tx_msg = msgs; 1276 i2c->rx_msg = NULL; 1277 i2c->nmsgs = num; 1278 1279 if (!i2c->atomic) 1280 init_completion(&i2c->completion); 1281 1282 /* Decide standard mode or Dynamic mode */ 1283 i2c->dynamic = true; 1284 1285 /* Initialize prev message type */ 1286 i2c->prev_msg_tx = false; 1287 1288 /* 1289 * Scan through nmsgs, use dynamic mode when none of the below three 1290 * conditions occur. We need standard mode even if one condition holds 1291 * true in the entire array of messages in a single transfer. 1292 * If read transaction as dynamic mode is broken for delayed reads 1293 * in xlnx,axi-iic-2.0 / xlnx,xps-iic-2.00.a IP versions. 1294 * If read length is > 255 bytes. 1295 * If smbus_block_read transaction. 1296 */ 1297 for (count = 0; count < i2c->nmsgs; count++) { 1298 broken_read = (i2c->quirks & DYNAMIC_MODE_READ_BROKEN_BIT) && 1299 (i2c->tx_msg[count].flags & I2C_M_RD); 1300 max_read_len = (i2c->tx_msg[count].flags & I2C_M_RD) && 1301 (i2c->tx_msg[count].len > MAX_READ_LENGTH_DYNAMIC); 1302 smbus_blk_read = (i2c->tx_msg[count].flags & I2C_M_RECV_LEN); 1303 1304 if (broken_read || max_read_len || smbus_blk_read) { 1305 i2c->dynamic = false; 1306 break; 1307 } 1308 } 1309 1310 ret = xiic_reinit(i2c); 1311 if (!ret) 1312 __xiic_start_xfer(i2c); 1313 1314 out: 1315 if (i2c->atomic) 1316 spin_unlock(&i2c->atomic_lock); 1317 else 1318 mutex_unlock(&i2c->lock); 1319 1320 return ret; 1321 } 1322 1323 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1324 { 1325 struct xiic_i2c *i2c = i2c_get_adapdata(adap); 1326 int err; 1327 1328 dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__, 1329 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)); 1330 1331 err = pm_runtime_resume_and_get(i2c->dev); 1332 if (err < 0) 1333 return err; 1334 1335 err = xiic_start_xfer(i2c, msgs, num); 1336 if (err < 0) 1337 goto out; 1338 1339 err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT); 1340 mutex_lock(&i2c->lock); 1341 if (err == 0) { /* Timeout */ 1342 i2c->tx_msg = NULL; 1343 i2c->rx_msg = NULL; 1344 i2c->nmsgs = 0; 1345 err = -ETIMEDOUT; 1346 } else { 1347 err = (i2c->state == STATE_DONE) ? num : -EIO; 1348 } 1349 mutex_unlock(&i2c->lock); 1350 1351 out: 1352 pm_runtime_mark_last_busy(i2c->dev); 1353 pm_runtime_put_autosuspend(i2c->dev); 1354 return err; 1355 } 1356 1357 static int xiic_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1358 { 1359 struct xiic_i2c *i2c = i2c_get_adapdata(adap); 1360 u32 status_reg; 1361 int err; 1362 1363 err = xiic_i2c_runtime_resume(i2c->dev); 1364 if (err) 1365 return err; 1366 1367 i2c->atomic = true; 1368 err = xiic_start_xfer(i2c, msgs, num); 1369 if (err < 0) 1370 return err; 1371 1372 err = readl_poll_timeout_atomic(i2c->base + XIIC_SR_REG_OFFSET, 1373 status_reg, !(status_reg & XIIC_SR_BUS_BUSY_MASK), 1374 1, XIIC_XFER_TIMEOUT_US); 1375 1376 if (err) /* Timeout */ 1377 err = -ETIMEDOUT; 1378 1379 spin_lock(&i2c->atomic_lock); 1380 if (err || i2c->state) { 1381 i2c->tx_msg = NULL; 1382 i2c->rx_msg = NULL; 1383 i2c->nmsgs = 0; 1384 } 1385 1386 err = (i2c->atomic_xfer_state == STATE_DONE) ? num : -EIO; 1387 spin_unlock(&i2c->atomic_lock); 1388 1389 i2c->atomic = false; 1390 xiic_i2c_runtime_suspend(i2c->dev); 1391 1392 return err; 1393 } 1394 1395 static u32 xiic_func(struct i2c_adapter *adap) 1396 { 1397 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; 1398 } 1399 1400 static const struct i2c_algorithm xiic_algorithm = { 1401 .master_xfer = xiic_xfer, 1402 .master_xfer_atomic = xiic_xfer_atomic, 1403 .functionality = xiic_func, 1404 }; 1405 1406 static const struct i2c_adapter xiic_adapter = { 1407 .owner = THIS_MODULE, 1408 .class = I2C_CLASS_DEPRECATED, 1409 .algo = &xiic_algorithm, 1410 }; 1411 1412 #if defined(CONFIG_OF) 1413 static const struct xiic_version_data xiic_2_00 = { 1414 .quirks = DYNAMIC_MODE_READ_BROKEN_BIT, 1415 }; 1416 1417 static const struct of_device_id xiic_of_match[] = { 1418 { .compatible = "xlnx,xps-iic-2.00.a", .data = &xiic_2_00 }, 1419 { .compatible = "xlnx,axi-iic-2.1", }, 1420 {}, 1421 }; 1422 MODULE_DEVICE_TABLE(of, xiic_of_match); 1423 #endif 1424 1425 static int xiic_i2c_probe(struct platform_device *pdev) 1426 { 1427 struct xiic_i2c *i2c; 1428 struct xiic_i2c_platform_data *pdata; 1429 const struct of_device_id *match; 1430 struct resource *res; 1431 int ret, irq; 1432 u8 i; 1433 u32 sr; 1434 1435 i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); 1436 if (!i2c) 1437 return -ENOMEM; 1438 1439 match = of_match_node(xiic_of_match, pdev->dev.of_node); 1440 if (match && match->data) { 1441 const struct xiic_version_data *data = match->data; 1442 1443 i2c->quirks = data->quirks; 1444 } 1445 1446 i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1447 if (IS_ERR(i2c->base)) 1448 return PTR_ERR(i2c->base); 1449 1450 irq = platform_get_irq(pdev, 0); 1451 if (irq < 0) 1452 return irq; 1453 1454 pdata = dev_get_platdata(&pdev->dev); 1455 1456 /* hook up driver to tree */ 1457 platform_set_drvdata(pdev, i2c); 1458 i2c->adap = xiic_adapter; 1459 i2c_set_adapdata(&i2c->adap, i2c); 1460 i2c->adap.dev.parent = &pdev->dev; 1461 i2c->adap.dev.of_node = pdev->dev.of_node; 1462 snprintf(i2c->adap.name, sizeof(i2c->adap.name), 1463 DRIVER_NAME " %s", pdev->name); 1464 1465 mutex_init(&i2c->lock); 1466 spin_lock_init(&i2c->atomic_lock); 1467 1468 i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL); 1469 if (IS_ERR(i2c->clk)) 1470 return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk), 1471 "failed to enable input clock.\n"); 1472 1473 i2c->dev = &pdev->dev; 1474 pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT); 1475 pm_runtime_use_autosuspend(i2c->dev); 1476 pm_runtime_set_active(i2c->dev); 1477 pm_runtime_enable(i2c->dev); 1478 1479 /* SCL frequency configuration */ 1480 i2c->input_clk = clk_get_rate(i2c->clk); 1481 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", 1482 &i2c->i2c_clk); 1483 /* If clock-frequency not specified in DT, do not configure in SW */ 1484 if (ret || i2c->i2c_clk > I2C_MAX_FAST_MODE_PLUS_FREQ) 1485 i2c->i2c_clk = 0; 1486 1487 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1488 xiic_process, IRQF_ONESHOT, 1489 pdev->name, i2c); 1490 1491 if (ret < 0) { 1492 dev_err(&pdev->dev, "Cannot claim IRQ\n"); 1493 goto err_pm_disable; 1494 } 1495 1496 i2c->singlemaster = 1497 of_property_read_bool(pdev->dev.of_node, "single-master"); 1498 1499 /* 1500 * Detect endianness 1501 * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not 1502 * set, assume that the endianness was wrong and swap. 1503 */ 1504 i2c->endianness = LITTLE; 1505 xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); 1506 /* Reset is cleared in xiic_reinit */ 1507 sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET); 1508 if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK)) 1509 i2c->endianness = BIG; 1510 1511 ret = xiic_reinit(i2c); 1512 if (ret < 0) { 1513 dev_err(&pdev->dev, "Cannot xiic_reinit\n"); 1514 goto err_pm_disable; 1515 } 1516 1517 /* add i2c adapter to i2c tree */ 1518 ret = i2c_add_adapter(&i2c->adap); 1519 if (ret) { 1520 xiic_deinit(i2c); 1521 goto err_pm_disable; 1522 } 1523 1524 if (pdata) { 1525 /* add in known devices to the bus */ 1526 for (i = 0; i < pdata->num_devices; i++) 1527 i2c_new_client_device(&i2c->adap, pdata->devices + i); 1528 } 1529 1530 dev_dbg(&pdev->dev, "mmio %08lx irq %d scl clock frequency %d\n", 1531 (unsigned long)res->start, irq, i2c->i2c_clk); 1532 1533 return 0; 1534 1535 err_pm_disable: 1536 pm_runtime_disable(&pdev->dev); 1537 pm_runtime_set_suspended(&pdev->dev); 1538 1539 return ret; 1540 } 1541 1542 static void xiic_i2c_remove(struct platform_device *pdev) 1543 { 1544 struct xiic_i2c *i2c = platform_get_drvdata(pdev); 1545 int ret; 1546 1547 /* remove adapter & data */ 1548 i2c_del_adapter(&i2c->adap); 1549 1550 ret = pm_runtime_get_sync(i2c->dev); 1551 1552 if (ret < 0) 1553 dev_warn(&pdev->dev, "Failed to activate device for removal (%pe)\n", 1554 ERR_PTR(ret)); 1555 else 1556 xiic_deinit(i2c); 1557 1558 pm_runtime_put_sync(i2c->dev); 1559 pm_runtime_disable(&pdev->dev); 1560 pm_runtime_set_suspended(&pdev->dev); 1561 pm_runtime_dont_use_autosuspend(&pdev->dev); 1562 } 1563 1564 static const struct dev_pm_ops xiic_dev_pm_ops = { 1565 SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend, 1566 xiic_i2c_runtime_resume, NULL) 1567 }; 1568 1569 static struct platform_driver xiic_i2c_driver = { 1570 .probe = xiic_i2c_probe, 1571 .remove = xiic_i2c_remove, 1572 .driver = { 1573 .name = DRIVER_NAME, 1574 .of_match_table = of_match_ptr(xiic_of_match), 1575 .pm = &xiic_dev_pm_ops, 1576 }, 1577 }; 1578 1579 module_platform_driver(xiic_i2c_driver); 1580 1581 MODULE_ALIAS("platform:" DRIVER_NAME); 1582 MODULE_AUTHOR("info@mocean-labs.com"); 1583 MODULE_DESCRIPTION("Xilinx I2C bus driver"); 1584 MODULE_LICENSE("GPL v2"); 1585