1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. 4 * 5 * Author: Vitor Soares <vitor.soares@synopsys.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/errno.h> 13 #include <linux/i3c/master.h> 14 #include <linux/interrupt.h> 15 #include <linux/ioport.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/reset.h> 24 #include <linux/slab.h> 25 26 #include "dw-i3c-master.h" 27 28 #define DEVICE_CTRL 0x0 29 #define DEV_CTRL_ENABLE BIT(31) 30 #define DEV_CTRL_RESUME BIT(30) 31 #define DEV_CTRL_HOT_JOIN_NACK BIT(8) 32 #define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7) 33 34 #define DEVICE_ADDR 0x4 35 #define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31) 36 #define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16)) 37 38 #define HW_CAPABILITY 0x8 39 #define COMMAND_QUEUE_PORT 0xc 40 #define COMMAND_PORT_TOC BIT(30) 41 #define COMMAND_PORT_READ_TRANSFER BIT(28) 42 #define COMMAND_PORT_SDAP BIT(27) 43 #define COMMAND_PORT_ROC BIT(26) 44 #define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21)) 45 #define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16)) 46 #define COMMAND_PORT_CP BIT(15) 47 #define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7)) 48 #define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3)) 49 50 #define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16)) 51 #define COMMAND_PORT_ARG_DATA_LEN_MAX 65536 52 #define COMMAND_PORT_TRANSFER_ARG 0x01 53 54 #define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24)) 55 #define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16)) 56 #define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8)) 57 #define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5) 58 #define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4) 59 #define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3) 60 #define COMMAND_PORT_SHORT_DATA_ARG 0x02 61 62 #define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21)) 63 #define COMMAND_PORT_ADDR_ASSGN_CMD 0x03 64 65 #define RESPONSE_QUEUE_PORT 0x10 66 #define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28) 67 #define RESPONSE_NO_ERROR 0 68 #define RESPONSE_ERROR_CRC 1 69 #define RESPONSE_ERROR_PARITY 2 70 #define RESPONSE_ERROR_FRAME 3 71 #define RESPONSE_ERROR_IBA_NACK 4 72 #define RESPONSE_ERROR_ADDRESS_NACK 5 73 #define RESPONSE_ERROR_OVER_UNDER_FLOW 6 74 #define RESPONSE_ERROR_TRANSF_ABORT 8 75 #define RESPONSE_ERROR_I2C_W_NACK_ERR 9 76 #define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24) 77 #define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0)) 78 79 #define RX_TX_DATA_PORT 0x14 80 #define IBI_QUEUE_STATUS 0x18 81 #define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8) 82 #define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0)) 83 #define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1) 84 #define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0)) 85 #define IBI_TYPE_MR(x) \ 86 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 87 #define IBI_TYPE_HJ(x) \ 88 ((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 89 #define IBI_TYPE_SIRQ(x) \ 90 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x)) 91 92 #define QUEUE_THLD_CTRL 0x1c 93 #define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24) 94 #define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24) 95 #define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16) 96 #define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16) 97 #define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8) 98 #define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8) 99 100 #define DATA_BUFFER_THLD_CTRL 0x20 101 #define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8) 102 103 #define IBI_QUEUE_CTRL 0x24 104 #define IBI_MR_REQ_REJECT 0x2C 105 #define IBI_SIR_REQ_REJECT 0x30 106 #define IBI_REQ_REJECT_ALL GENMASK(31, 0) 107 108 #define RESET_CTRL 0x34 109 #define RESET_CTRL_IBI_QUEUE BIT(5) 110 #define RESET_CTRL_RX_FIFO BIT(4) 111 #define RESET_CTRL_TX_FIFO BIT(3) 112 #define RESET_CTRL_RESP_QUEUE BIT(2) 113 #define RESET_CTRL_CMD_QUEUE BIT(1) 114 #define RESET_CTRL_SOFT BIT(0) 115 116 #define SLV_EVENT_CTRL 0x38 117 #define INTR_STATUS 0x3c 118 #define INTR_STATUS_EN 0x40 119 #define INTR_SIGNAL_EN 0x44 120 #define INTR_FORCE 0x48 121 #define INTR_BUSOWNER_UPDATE_STAT BIT(13) 122 #define INTR_IBI_UPDATED_STAT BIT(12) 123 #define INTR_READ_REQ_RECV_STAT BIT(11) 124 #define INTR_DEFSLV_STAT BIT(10) 125 #define INTR_TRANSFER_ERR_STAT BIT(9) 126 #define INTR_DYN_ADDR_ASSGN_STAT BIT(8) 127 #define INTR_CCC_UPDATED_STAT BIT(6) 128 #define INTR_TRANSFER_ABORT_STAT BIT(5) 129 #define INTR_RESP_READY_STAT BIT(4) 130 #define INTR_CMD_QUEUE_READY_STAT BIT(3) 131 #define INTR_IBI_THLD_STAT BIT(2) 132 #define INTR_RX_THLD_STAT BIT(1) 133 #define INTR_TX_THLD_STAT BIT(0) 134 #define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \ 135 INTR_IBI_UPDATED_STAT | \ 136 INTR_READ_REQ_RECV_STAT | \ 137 INTR_DEFSLV_STAT | \ 138 INTR_TRANSFER_ERR_STAT | \ 139 INTR_DYN_ADDR_ASSGN_STAT | \ 140 INTR_CCC_UPDATED_STAT | \ 141 INTR_TRANSFER_ABORT_STAT | \ 142 INTR_RESP_READY_STAT | \ 143 INTR_CMD_QUEUE_READY_STAT | \ 144 INTR_IBI_THLD_STAT | \ 145 INTR_TX_THLD_STAT | \ 146 INTR_RX_THLD_STAT) 147 148 #define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \ 149 INTR_RESP_READY_STAT) 150 151 #define QUEUE_STATUS_LEVEL 0x4c 152 #define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24) 153 #define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16) 154 #define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8) 155 #define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0)) 156 157 #define DATA_BUFFER_STATUS_LEVEL 0x50 158 #define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0)) 159 160 #define PRESENT_STATE 0x54 161 #define CCC_DEVICE_STATUS 0x58 162 #define DEVICE_ADDR_TABLE_POINTER 0x5c 163 #define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16) 164 #define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0)) 165 166 #define DEV_CHAR_TABLE_POINTER 0x60 167 #define VENDOR_SPECIFIC_REG_POINTER 0x6c 168 #define SLV_PID_VALUE 0x74 169 #define SLV_CHAR_CTRL 0x78 170 #define SLV_MAX_LEN 0x7c 171 #define MAX_READ_TURNAROUND 0x80 172 #define MAX_DATA_SPEED 0x84 173 #define SLV_DEBUG_STATUS 0x88 174 #define SLV_INTR_REQ 0x8c 175 #define DEVICE_CTRL_EXTENDED 0xb0 176 #define SCL_I3C_OD_TIMING 0xb4 177 #define SCL_I3C_PP_TIMING 0xb8 178 #define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 179 #define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0)) 180 #define SCL_I3C_TIMING_CNT_MIN 5 181 182 #define SCL_I2C_FM_TIMING 0xbc 183 #define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16)) 184 #define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 185 186 #define SCL_I2C_FMP_TIMING 0xc0 187 #define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 188 #define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 189 190 #define SCL_EXT_LCNT_TIMING 0xc8 191 #define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24)) 192 #define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16)) 193 #define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8)) 194 #define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0)) 195 196 #define SCL_EXT_TERMN_LCNT_TIMING 0xcc 197 #define BUS_FREE_TIMING 0xd4 198 #define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0)) 199 200 #define BUS_IDLE_TIMING 0xd8 201 #define I3C_VER_ID 0xe0 202 #define I3C_VER_TYPE 0xe4 203 #define EXTENDED_CAPABILITY 0xe8 204 #define SLAVE_CONFIG 0xec 205 206 #define DEV_ADDR_TABLE_IBI_MDB BIT(12) 207 #define DEV_ADDR_TABLE_SIR_REJECT BIT(13) 208 #define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31) 209 #define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16)) 210 #define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0)) 211 #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2)) 212 213 #define I3C_BUS_SDR1_SCL_RATE 8000000 214 #define I3C_BUS_SDR2_SCL_RATE 6000000 215 #define I3C_BUS_SDR3_SCL_RATE 4000000 216 #define I3C_BUS_SDR4_SCL_RATE 2000000 217 #define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300 218 #define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500 219 #define I3C_BUS_THIGH_MAX_NS 41 220 221 #define XFER_TIMEOUT (msecs_to_jiffies(1000)) 222 #define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */ 223 struct dw_i3c_cmd { 224 u32 cmd_lo; 225 u32 cmd_hi; 226 u16 tx_len; 227 const void *tx_buf; 228 u16 rx_len; 229 void *rx_buf; 230 u8 error; 231 }; 232 233 struct dw_i3c_xfer { 234 struct list_head node; 235 struct completion comp; 236 int ret; 237 unsigned int ncmds; 238 struct dw_i3c_cmd cmds[] __counted_by(ncmds); 239 }; 240 241 struct dw_i3c_i2c_dev_data { 242 u8 index; 243 struct i3c_generic_ibi_pool *ibi_pool; 244 }; 245 246 static u8 even_parity(u8 p) 247 { 248 p ^= p >> 4; 249 p &= 0xf; 250 251 return (0x9669 >> p) & 1; 252 } 253 254 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, 255 const struct i3c_ccc_cmd *cmd) 256 { 257 if (cmd->ndests > 1) 258 return false; 259 260 switch (cmd->id) { 261 case I3C_CCC_ENEC(true): 262 case I3C_CCC_ENEC(false): 263 case I3C_CCC_DISEC(true): 264 case I3C_CCC_DISEC(false): 265 case I3C_CCC_ENTAS(0, true): 266 case I3C_CCC_ENTAS(0, false): 267 case I3C_CCC_RSTDAA(true): 268 case I3C_CCC_RSTDAA(false): 269 case I3C_CCC_ENTDAA: 270 case I3C_CCC_SETMWL(true): 271 case I3C_CCC_SETMWL(false): 272 case I3C_CCC_SETMRL(true): 273 case I3C_CCC_SETMRL(false): 274 case I3C_CCC_ENTHDR(0): 275 case I3C_CCC_SETDASA: 276 case I3C_CCC_SETNEWDA: 277 case I3C_CCC_GETMWL: 278 case I3C_CCC_GETMRL: 279 case I3C_CCC_GETPID: 280 case I3C_CCC_GETBCR: 281 case I3C_CCC_GETDCR: 282 case I3C_CCC_GETSTATUS: 283 case I3C_CCC_GETMXDS: 284 case I3C_CCC_GETHDRCAP: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 static inline struct dw_i3c_master * 292 to_dw_i3c_master(struct i3c_master_controller *master) 293 { 294 return container_of(master, struct dw_i3c_master, base); 295 } 296 297 static void dw_i3c_master_disable(struct dw_i3c_master *master) 298 { 299 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, 300 master->regs + DEVICE_CTRL); 301 } 302 303 static void dw_i3c_master_enable(struct dw_i3c_master *master) 304 { 305 u32 dev_ctrl; 306 307 dev_ctrl = readl(master->regs + DEVICE_CTRL); 308 /* For now don't support Hot-Join */ 309 dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK; 310 if (master->i2c_slv_prsnt) 311 dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT; 312 writel(dev_ctrl | DEV_CTRL_ENABLE, 313 master->regs + DEVICE_CTRL); 314 } 315 316 static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr) 317 { 318 int pos; 319 320 for (pos = 0; pos < master->maxdevs; pos++) { 321 if (addr == master->devs[pos].addr) 322 return pos; 323 } 324 325 return -EINVAL; 326 } 327 328 static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master) 329 { 330 if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0))) 331 return -ENOSPC; 332 333 return ffs(master->free_pos) - 1; 334 } 335 336 static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master, 337 const u8 *bytes, int nbytes) 338 { 339 writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4); 340 if (nbytes & 3) { 341 u32 tmp = 0; 342 343 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3); 344 writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1); 345 } 346 } 347 348 static void dw_i3c_master_read_fifo(struct dw_i3c_master *master, 349 int reg, u8 *bytes, int nbytes) 350 { 351 readsl(master->regs + reg, bytes, nbytes / 4); 352 if (nbytes & 3) { 353 u32 tmp; 354 355 readsl(master->regs + reg, &tmp, 1); 356 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3); 357 } 358 } 359 360 static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master, 361 u8 *bytes, int nbytes) 362 { 363 return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes); 364 } 365 366 static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master, 367 u8 *bytes, int nbytes) 368 { 369 return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes); 370 } 371 372 static struct dw_i3c_xfer * 373 dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds) 374 { 375 struct dw_i3c_xfer *xfer; 376 377 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 378 if (!xfer) 379 return NULL; 380 381 INIT_LIST_HEAD(&xfer->node); 382 xfer->ncmds = ncmds; 383 xfer->ret = -ETIMEDOUT; 384 385 return xfer; 386 } 387 388 static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer) 389 { 390 kfree(xfer); 391 } 392 393 static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master) 394 { 395 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 396 unsigned int i; 397 u32 thld_ctrl; 398 399 if (!xfer) 400 return; 401 402 for (i = 0; i < xfer->ncmds; i++) { 403 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 404 405 dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len); 406 } 407 408 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 409 thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK; 410 thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds); 411 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 412 413 for (i = 0; i < xfer->ncmds; i++) { 414 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 415 416 writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT); 417 writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT); 418 } 419 } 420 421 static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, 422 struct dw_i3c_xfer *xfer) 423 { 424 unsigned long flags; 425 426 init_completion(&xfer->comp); 427 spin_lock_irqsave(&master->xferqueue.lock, flags); 428 if (master->xferqueue.cur) { 429 list_add_tail(&xfer->node, &master->xferqueue.list); 430 } else { 431 master->xferqueue.cur = xfer; 432 dw_i3c_master_start_xfer_locked(master); 433 } 434 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 435 } 436 437 static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, 438 struct dw_i3c_xfer *xfer) 439 { 440 if (master->xferqueue.cur == xfer) { 441 u32 status; 442 443 master->xferqueue.cur = NULL; 444 445 writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO | 446 RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE, 447 master->regs + RESET_CTRL); 448 449 readl_poll_timeout_atomic(master->regs + RESET_CTRL, status, 450 !status, 10, 1000000); 451 } else { 452 list_del_init(&xfer->node); 453 } 454 } 455 456 static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 457 struct dw_i3c_xfer *xfer) 458 { 459 unsigned long flags; 460 461 spin_lock_irqsave(&master->xferqueue.lock, flags); 462 dw_i3c_master_dequeue_xfer_locked(master, xfer); 463 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 464 } 465 466 static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) 467 { 468 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 469 int i, ret = 0; 470 u32 nresp; 471 472 if (!xfer) 473 return; 474 475 nresp = readl(master->regs + QUEUE_STATUS_LEVEL); 476 nresp = QUEUE_STATUS_LEVEL_RESP(nresp); 477 478 for (i = 0; i < nresp; i++) { 479 struct dw_i3c_cmd *cmd; 480 u32 resp; 481 482 resp = readl(master->regs + RESPONSE_QUEUE_PORT); 483 484 cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)]; 485 cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp); 486 cmd->error = RESPONSE_PORT_ERR_STATUS(resp); 487 if (cmd->rx_len && !cmd->error) 488 dw_i3c_master_read_rx_fifo(master, cmd->rx_buf, 489 cmd->rx_len); 490 } 491 492 for (i = 0; i < nresp; i++) { 493 switch (xfer->cmds[i].error) { 494 case RESPONSE_NO_ERROR: 495 break; 496 case RESPONSE_ERROR_PARITY: 497 case RESPONSE_ERROR_IBA_NACK: 498 case RESPONSE_ERROR_TRANSF_ABORT: 499 case RESPONSE_ERROR_CRC: 500 case RESPONSE_ERROR_FRAME: 501 ret = -EIO; 502 break; 503 case RESPONSE_ERROR_OVER_UNDER_FLOW: 504 ret = -ENOSPC; 505 break; 506 case RESPONSE_ERROR_I2C_W_NACK_ERR: 507 case RESPONSE_ERROR_ADDRESS_NACK: 508 default: 509 ret = -EINVAL; 510 break; 511 } 512 } 513 514 xfer->ret = ret; 515 complete(&xfer->comp); 516 517 if (ret < 0) { 518 dw_i3c_master_dequeue_xfer_locked(master, xfer); 519 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 520 master->regs + DEVICE_CTRL); 521 } 522 523 xfer = list_first_entry_or_null(&master->xferqueue.list, 524 struct dw_i3c_xfer, 525 node); 526 if (xfer) 527 list_del_init(&xfer->node); 528 529 master->xferqueue.cur = xfer; 530 dw_i3c_master_start_xfer_locked(master); 531 } 532 533 static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master) 534 { 535 u32 thld_ctrl; 536 537 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 538 thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK | 539 QUEUE_THLD_CTRL_IBI_STAT_MASK | 540 QUEUE_THLD_CTRL_IBI_DATA_MASK); 541 thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) | 542 QUEUE_THLD_CTRL_IBI_DATA(31); 543 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 544 545 thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL); 546 thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF; 547 writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL); 548 549 writel(INTR_ALL, master->regs + INTR_STATUS); 550 writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN); 551 writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN); 552 553 master->sir_rej_mask = IBI_REQ_REJECT_ALL; 554 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 555 556 writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT); 557 } 558 559 static int dw_i3c_clk_cfg(struct dw_i3c_master *master) 560 { 561 unsigned long core_rate, core_period; 562 u32 scl_timing; 563 u8 hcnt, lcnt; 564 565 core_rate = clk_get_rate(master->core_clk); 566 if (!core_rate) 567 return -EINVAL; 568 569 core_period = DIV_ROUND_UP(1000000000, core_rate); 570 571 hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1; 572 if (hcnt < SCL_I3C_TIMING_CNT_MIN) 573 hcnt = SCL_I3C_TIMING_CNT_MIN; 574 575 lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt; 576 if (lcnt < SCL_I3C_TIMING_CNT_MIN) 577 lcnt = SCL_I3C_TIMING_CNT_MIN; 578 579 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 580 writel(scl_timing, master->regs + SCL_I3C_PP_TIMING); 581 master->i3c_pp_timing = scl_timing; 582 583 /* 584 * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this 585 * will be set up by dw_i2c_clk_cfg as tLOW. 586 */ 587 if (master->base.bus.mode == I3C_BUS_MODE_PURE) { 588 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 589 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 590 } 591 592 lcnt = max_t(u8, 593 DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt); 594 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 595 writel(scl_timing, master->regs + SCL_I3C_OD_TIMING); 596 master->i3c_od_timing = scl_timing; 597 598 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt; 599 scl_timing = SCL_EXT_LCNT_1(lcnt); 600 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt; 601 scl_timing |= SCL_EXT_LCNT_2(lcnt); 602 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt; 603 scl_timing |= SCL_EXT_LCNT_3(lcnt); 604 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt; 605 scl_timing |= SCL_EXT_LCNT_4(lcnt); 606 writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING); 607 master->ext_lcnt_timing = scl_timing; 608 609 return 0; 610 } 611 612 static int dw_i2c_clk_cfg(struct dw_i3c_master *master) 613 { 614 unsigned long core_rate, core_period; 615 u16 hcnt, lcnt; 616 u32 scl_timing; 617 618 core_rate = clk_get_rate(master->core_clk); 619 if (!core_rate) 620 return -EINVAL; 621 622 core_period = DIV_ROUND_UP(1000000000, core_rate); 623 624 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period); 625 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt; 626 scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) | 627 SCL_I2C_FMP_TIMING_LCNT(lcnt); 628 writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING); 629 master->i2c_fmp_timing = scl_timing; 630 631 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period); 632 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt; 633 scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) | 634 SCL_I2C_FM_TIMING_LCNT(lcnt); 635 writel(scl_timing, master->regs + SCL_I2C_FM_TIMING); 636 master->i2c_fm_timing = scl_timing; 637 638 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 639 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 640 641 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT, 642 master->regs + DEVICE_CTRL); 643 master->i2c_slv_prsnt = true; 644 645 return 0; 646 } 647 648 static int dw_i3c_master_bus_init(struct i3c_master_controller *m) 649 { 650 struct dw_i3c_master *master = to_dw_i3c_master(m); 651 struct i3c_bus *bus = i3c_master_get_bus(m); 652 struct i3c_device_info info = { }; 653 int ret; 654 655 ret = pm_runtime_resume_and_get(master->dev); 656 if (ret < 0) { 657 dev_err(master->dev, 658 "<%s> cannot resume i3c bus master, err: %d\n", 659 __func__, ret); 660 return ret; 661 } 662 663 ret = master->platform_ops->init(master); 664 if (ret) 665 goto rpm_out; 666 667 switch (bus->mode) { 668 case I3C_BUS_MODE_MIXED_FAST: 669 case I3C_BUS_MODE_MIXED_LIMITED: 670 ret = dw_i2c_clk_cfg(master); 671 if (ret) 672 goto rpm_out; 673 fallthrough; 674 case I3C_BUS_MODE_PURE: 675 ret = dw_i3c_clk_cfg(master); 676 if (ret) 677 goto rpm_out; 678 break; 679 default: 680 ret = -EINVAL; 681 goto rpm_out; 682 } 683 684 ret = i3c_master_get_free_addr(m, 0); 685 if (ret < 0) 686 goto rpm_out; 687 688 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret), 689 master->regs + DEVICE_ADDR); 690 master->dev_addr = ret; 691 memset(&info, 0, sizeof(info)); 692 info.dyn_addr = ret; 693 694 ret = i3c_master_set_info(&master->base, &info); 695 if (ret) 696 goto rpm_out; 697 698 dw_i3c_master_set_intr_regs(master); 699 dw_i3c_master_enable(master); 700 701 rpm_out: 702 pm_runtime_mark_last_busy(master->dev); 703 pm_runtime_put_autosuspend(master->dev); 704 return ret; 705 } 706 707 static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m) 708 { 709 struct dw_i3c_master *master = to_dw_i3c_master(m); 710 711 dw_i3c_master_disable(master); 712 } 713 714 static int dw_i3c_ccc_set(struct dw_i3c_master *master, 715 struct i3c_ccc_cmd *ccc) 716 { 717 struct dw_i3c_xfer *xfer; 718 struct dw_i3c_cmd *cmd; 719 int ret, pos = 0; 720 721 if (ccc->id & I3C_CCC_DIRECT) { 722 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 723 if (pos < 0) 724 return pos; 725 } 726 727 xfer = dw_i3c_master_alloc_xfer(master, 1); 728 if (!xfer) 729 return -ENOMEM; 730 731 cmd = xfer->cmds; 732 cmd->tx_buf = ccc->dests[0].payload.data; 733 cmd->tx_len = ccc->dests[0].payload.len; 734 735 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 736 COMMAND_PORT_TRANSFER_ARG; 737 738 cmd->cmd_lo = COMMAND_PORT_CP | 739 COMMAND_PORT_DEV_INDEX(pos) | 740 COMMAND_PORT_CMD(ccc->id) | 741 COMMAND_PORT_TOC | 742 COMMAND_PORT_ROC; 743 744 dw_i3c_master_enqueue_xfer(master, xfer); 745 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 746 dw_i3c_master_dequeue_xfer(master, xfer); 747 748 ret = xfer->ret; 749 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 750 ccc->err = I3C_ERROR_M2; 751 752 dw_i3c_master_free_xfer(xfer); 753 754 return ret; 755 } 756 757 static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc) 758 { 759 struct dw_i3c_xfer *xfer; 760 struct dw_i3c_cmd *cmd; 761 int ret, pos; 762 763 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 764 if (pos < 0) 765 return pos; 766 767 xfer = dw_i3c_master_alloc_xfer(master, 1); 768 if (!xfer) 769 return -ENOMEM; 770 771 cmd = xfer->cmds; 772 cmd->rx_buf = ccc->dests[0].payload.data; 773 cmd->rx_len = ccc->dests[0].payload.len; 774 775 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 776 COMMAND_PORT_TRANSFER_ARG; 777 778 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 779 COMMAND_PORT_CP | 780 COMMAND_PORT_DEV_INDEX(pos) | 781 COMMAND_PORT_CMD(ccc->id) | 782 COMMAND_PORT_TOC | 783 COMMAND_PORT_ROC; 784 785 dw_i3c_master_enqueue_xfer(master, xfer); 786 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 787 dw_i3c_master_dequeue_xfer(master, xfer); 788 789 ret = xfer->ret; 790 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 791 ccc->err = I3C_ERROR_M2; 792 dw_i3c_master_free_xfer(xfer); 793 794 return ret; 795 } 796 797 static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 798 struct i3c_ccc_cmd *ccc) 799 { 800 struct dw_i3c_master *master = to_dw_i3c_master(m); 801 int ret = 0; 802 803 if (ccc->id == I3C_CCC_ENTDAA) 804 return -EINVAL; 805 806 ret = pm_runtime_resume_and_get(master->dev); 807 if (ret < 0) { 808 dev_err(master->dev, 809 "<%s> cannot resume i3c bus master, err: %d\n", 810 __func__, ret); 811 return ret; 812 } 813 814 if (ccc->rnw) 815 ret = dw_i3c_ccc_get(master, ccc); 816 else 817 ret = dw_i3c_ccc_set(master, ccc); 818 819 pm_runtime_mark_last_busy(master->dev); 820 pm_runtime_put_autosuspend(master->dev); 821 return ret; 822 } 823 824 static int dw_i3c_master_daa(struct i3c_master_controller *m) 825 { 826 struct dw_i3c_master *master = to_dw_i3c_master(m); 827 struct dw_i3c_xfer *xfer; 828 struct dw_i3c_cmd *cmd; 829 u32 olddevs, newdevs; 830 u8 p, last_addr = 0; 831 int ret, pos; 832 833 ret = pm_runtime_resume_and_get(master->dev); 834 if (ret < 0) { 835 dev_err(master->dev, 836 "<%s> cannot resume i3c bus master, err: %d\n", 837 __func__, ret); 838 return ret; 839 } 840 841 olddevs = ~(master->free_pos); 842 843 /* Prepare DAT before launching DAA. */ 844 for (pos = 0; pos < master->maxdevs; pos++) { 845 if (olddevs & BIT(pos)) 846 continue; 847 848 ret = i3c_master_get_free_addr(m, last_addr + 1); 849 if (ret < 0) { 850 ret = -ENOSPC; 851 goto rpm_out; 852 } 853 854 master->devs[pos].addr = ret; 855 p = even_parity(ret); 856 last_addr = ret; 857 ret |= (p << 7); 858 859 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret), 860 master->regs + 861 DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 862 863 ret = 0; 864 } 865 866 xfer = dw_i3c_master_alloc_xfer(master, 1); 867 if (!xfer) { 868 ret = -ENOMEM; 869 goto rpm_out; 870 } 871 872 pos = dw_i3c_master_get_free_pos(master); 873 if (pos < 0) { 874 dw_i3c_master_free_xfer(xfer); 875 ret = pos; 876 goto rpm_out; 877 } 878 cmd = &xfer->cmds[0]; 879 cmd->cmd_hi = 0x1; 880 cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) | 881 COMMAND_PORT_DEV_INDEX(pos) | 882 COMMAND_PORT_CMD(I3C_CCC_ENTDAA) | 883 COMMAND_PORT_ADDR_ASSGN_CMD | 884 COMMAND_PORT_TOC | 885 COMMAND_PORT_ROC; 886 887 dw_i3c_master_enqueue_xfer(master, xfer); 888 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 889 dw_i3c_master_dequeue_xfer(master, xfer); 890 891 newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0); 892 newdevs &= ~olddevs; 893 894 for (pos = 0; pos < master->maxdevs; pos++) { 895 if (newdevs & BIT(pos)) 896 i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr); 897 } 898 899 dw_i3c_master_free_xfer(xfer); 900 901 rpm_out: 902 pm_runtime_mark_last_busy(master->dev); 903 pm_runtime_put_autosuspend(master->dev); 904 return ret; 905 } 906 907 static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 908 struct i3c_priv_xfer *i3c_xfers, 909 int i3c_nxfers) 910 { 911 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 912 struct i3c_master_controller *m = i3c_dev_get_master(dev); 913 struct dw_i3c_master *master = to_dw_i3c_master(m); 914 unsigned int nrxwords = 0, ntxwords = 0; 915 struct dw_i3c_xfer *xfer; 916 int i, ret = 0; 917 918 if (!i3c_nxfers) 919 return 0; 920 921 if (i3c_nxfers > master->caps.cmdfifodepth) 922 return -ENOTSUPP; 923 924 for (i = 0; i < i3c_nxfers; i++) { 925 if (i3c_xfers[i].rnw) 926 nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 927 else 928 ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 929 } 930 931 if (ntxwords > master->caps.datafifodepth || 932 nrxwords > master->caps.datafifodepth) 933 return -ENOTSUPP; 934 935 xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers); 936 if (!xfer) 937 return -ENOMEM; 938 939 ret = pm_runtime_resume_and_get(master->dev); 940 if (ret < 0) { 941 dev_err(master->dev, 942 "<%s> cannot resume i3c bus master, err: %d\n", 943 __func__, ret); 944 return ret; 945 } 946 947 for (i = 0; i < i3c_nxfers; i++) { 948 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 949 950 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) | 951 COMMAND_PORT_TRANSFER_ARG; 952 953 if (i3c_xfers[i].rnw) { 954 cmd->rx_buf = i3c_xfers[i].data.in; 955 cmd->rx_len = i3c_xfers[i].len; 956 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 957 COMMAND_PORT_SPEED(dev->info.max_read_ds); 958 959 } else { 960 cmd->tx_buf = i3c_xfers[i].data.out; 961 cmd->tx_len = i3c_xfers[i].len; 962 cmd->cmd_lo = 963 COMMAND_PORT_SPEED(dev->info.max_write_ds); 964 } 965 966 cmd->cmd_lo |= COMMAND_PORT_TID(i) | 967 COMMAND_PORT_DEV_INDEX(data->index) | 968 COMMAND_PORT_ROC; 969 970 if (i == (i3c_nxfers - 1)) 971 cmd->cmd_lo |= COMMAND_PORT_TOC; 972 } 973 974 dw_i3c_master_enqueue_xfer(master, xfer); 975 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 976 dw_i3c_master_dequeue_xfer(master, xfer); 977 978 for (i = 0; i < i3c_nxfers; i++) { 979 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 980 981 if (i3c_xfers[i].rnw) 982 i3c_xfers[i].len = cmd->rx_len; 983 } 984 985 ret = xfer->ret; 986 dw_i3c_master_free_xfer(xfer); 987 988 pm_runtime_mark_last_busy(master->dev); 989 pm_runtime_put_autosuspend(master->dev); 990 return ret; 991 } 992 993 static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 994 u8 old_dyn_addr) 995 { 996 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 997 struct i3c_master_controller *m = i3c_dev_get_master(dev); 998 struct dw_i3c_master *master = to_dw_i3c_master(m); 999 int pos; 1000 1001 pos = dw_i3c_master_get_free_pos(master); 1002 1003 if (data->index > pos && pos > 0) { 1004 writel(0, 1005 master->regs + 1006 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1007 1008 master->devs[data->index].addr = 0; 1009 master->free_pos |= BIT(data->index); 1010 1011 data->index = pos; 1012 master->devs[pos].addr = dev->info.dyn_addr; 1013 master->free_pos &= ~BIT(pos); 1014 } 1015 1016 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1017 master->regs + 1018 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1019 1020 master->devs[data->index].addr = dev->info.dyn_addr; 1021 1022 return 0; 1023 } 1024 1025 static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 1026 { 1027 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1028 struct dw_i3c_master *master = to_dw_i3c_master(m); 1029 struct dw_i3c_i2c_dev_data *data; 1030 int pos; 1031 1032 pos = dw_i3c_master_get_free_pos(master); 1033 if (pos < 0) 1034 return pos; 1035 1036 data = kzalloc(sizeof(*data), GFP_KERNEL); 1037 if (!data) 1038 return -ENOMEM; 1039 1040 data->index = pos; 1041 master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr; 1042 master->free_pos &= ~BIT(pos); 1043 i3c_dev_set_master_data(dev, data); 1044 1045 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1046 master->regs + 1047 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1048 1049 return 0; 1050 } 1051 1052 static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 1053 { 1054 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1055 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1056 struct dw_i3c_master *master = to_dw_i3c_master(m); 1057 1058 writel(0, 1059 master->regs + 1060 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1061 1062 i3c_dev_set_master_data(dev, NULL); 1063 master->devs[data->index].addr = 0; 1064 master->free_pos |= BIT(data->index); 1065 kfree(data); 1066 } 1067 1068 static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1069 const struct i2c_msg *i2c_xfers, 1070 int i2c_nxfers) 1071 { 1072 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1073 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1074 struct dw_i3c_master *master = to_dw_i3c_master(m); 1075 unsigned int nrxwords = 0, ntxwords = 0; 1076 struct dw_i3c_xfer *xfer; 1077 int i, ret = 0; 1078 1079 if (!i2c_nxfers) 1080 return 0; 1081 1082 if (i2c_nxfers > master->caps.cmdfifodepth) 1083 return -ENOTSUPP; 1084 1085 for (i = 0; i < i2c_nxfers; i++) { 1086 if (i2c_xfers[i].flags & I2C_M_RD) 1087 nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1088 else 1089 ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1090 } 1091 1092 if (ntxwords > master->caps.datafifodepth || 1093 nrxwords > master->caps.datafifodepth) 1094 return -ENOTSUPP; 1095 1096 xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers); 1097 if (!xfer) 1098 return -ENOMEM; 1099 1100 ret = pm_runtime_resume_and_get(master->dev); 1101 if (ret < 0) { 1102 dev_err(master->dev, 1103 "<%s> cannot resume i3c bus master, err: %d\n", 1104 __func__, ret); 1105 return ret; 1106 } 1107 1108 for (i = 0; i < i2c_nxfers; i++) { 1109 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 1110 1111 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) | 1112 COMMAND_PORT_TRANSFER_ARG; 1113 1114 cmd->cmd_lo = COMMAND_PORT_TID(i) | 1115 COMMAND_PORT_DEV_INDEX(data->index) | 1116 COMMAND_PORT_ROC; 1117 1118 if (i2c_xfers[i].flags & I2C_M_RD) { 1119 cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER; 1120 cmd->rx_buf = i2c_xfers[i].buf; 1121 cmd->rx_len = i2c_xfers[i].len; 1122 } else { 1123 cmd->tx_buf = i2c_xfers[i].buf; 1124 cmd->tx_len = i2c_xfers[i].len; 1125 } 1126 1127 if (i == (i2c_nxfers - 1)) 1128 cmd->cmd_lo |= COMMAND_PORT_TOC; 1129 } 1130 1131 dw_i3c_master_enqueue_xfer(master, xfer); 1132 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 1133 dw_i3c_master_dequeue_xfer(master, xfer); 1134 1135 ret = xfer->ret; 1136 dw_i3c_master_free_xfer(xfer); 1137 1138 pm_runtime_mark_last_busy(master->dev); 1139 pm_runtime_put_autosuspend(master->dev); 1140 return ret; 1141 } 1142 1143 static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 1144 { 1145 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1146 struct dw_i3c_master *master = to_dw_i3c_master(m); 1147 struct dw_i3c_i2c_dev_data *data; 1148 int pos; 1149 1150 pos = dw_i3c_master_get_free_pos(master); 1151 if (pos < 0) 1152 return pos; 1153 1154 data = kzalloc(sizeof(*data), GFP_KERNEL); 1155 if (!data) 1156 return -ENOMEM; 1157 1158 data->index = pos; 1159 master->devs[pos].addr = dev->addr; 1160 master->devs[pos].is_i2c_addr = true; 1161 master->free_pos &= ~BIT(pos); 1162 i2c_dev_set_master_data(dev, data); 1163 1164 writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1165 DEV_ADDR_TABLE_STATIC_ADDR(dev->addr), 1166 master->regs + 1167 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1168 1169 return 0; 1170 } 1171 1172 static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 1173 { 1174 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1175 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1176 struct dw_i3c_master *master = to_dw_i3c_master(m); 1177 1178 writel(0, 1179 master->regs + 1180 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1181 1182 i2c_dev_set_master_data(dev, NULL); 1183 master->devs[data->index].addr = 0; 1184 master->free_pos |= BIT(data->index); 1185 kfree(data); 1186 } 1187 1188 static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1189 const struct i3c_ibi_setup *req) 1190 { 1191 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1192 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1193 struct dw_i3c_master *master = to_dw_i3c_master(m); 1194 unsigned long flags; 1195 1196 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1197 if (IS_ERR(data->ibi_pool)) 1198 return PTR_ERR(data->ibi_pool); 1199 1200 spin_lock_irqsave(&master->devs_lock, flags); 1201 master->devs[data->index].ibi_dev = dev; 1202 spin_unlock_irqrestore(&master->devs_lock, flags); 1203 1204 return 0; 1205 } 1206 1207 static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1208 { 1209 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1210 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1211 struct dw_i3c_master *master = to_dw_i3c_master(m); 1212 unsigned long flags; 1213 1214 spin_lock_irqsave(&master->devs_lock, flags); 1215 master->devs[data->index].ibi_dev = NULL; 1216 spin_unlock_irqrestore(&master->devs_lock, flags); 1217 1218 i3c_generic_ibi_free_pool(data->ibi_pool); 1219 data->ibi_pool = NULL; 1220 } 1221 1222 static void dw_i3c_master_enable_sir_signal(struct dw_i3c_master *master, bool enable) 1223 { 1224 u32 reg; 1225 1226 reg = readl(master->regs + INTR_STATUS_EN); 1227 reg &= ~INTR_IBI_THLD_STAT; 1228 if (enable) 1229 reg |= INTR_IBI_THLD_STAT; 1230 writel(reg, master->regs + INTR_STATUS_EN); 1231 1232 reg = readl(master->regs + INTR_SIGNAL_EN); 1233 reg &= ~INTR_IBI_THLD_STAT; 1234 if (enable) 1235 reg |= INTR_IBI_THLD_STAT; 1236 writel(reg, master->regs + INTR_SIGNAL_EN); 1237 } 1238 1239 static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master, 1240 struct i3c_dev_desc *dev, 1241 u8 idx, bool enable) 1242 { 1243 unsigned long flags; 1244 u32 dat_entry, reg; 1245 bool global; 1246 1247 dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx); 1248 1249 spin_lock_irqsave(&master->devs_lock, flags); 1250 reg = readl(master->regs + dat_entry); 1251 if (enable) { 1252 reg &= ~DEV_ADDR_TABLE_SIR_REJECT; 1253 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) 1254 reg |= DEV_ADDR_TABLE_IBI_MDB; 1255 } else { 1256 reg |= DEV_ADDR_TABLE_SIR_REJECT; 1257 } 1258 master->platform_ops->set_dat_ibi(master, dev, enable, ®); 1259 writel(reg, master->regs + dat_entry); 1260 1261 if (enable) { 1262 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL); 1263 master->sir_rej_mask &= ~BIT(idx); 1264 } else { 1265 bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK); 1266 1267 master->sir_rej_mask |= BIT(idx); 1268 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected; 1269 } 1270 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 1271 1272 if (global) 1273 dw_i3c_master_enable_sir_signal(master, enable); 1274 1275 1276 spin_unlock_irqrestore(&master->devs_lock, flags); 1277 } 1278 1279 static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m) 1280 { 1281 struct dw_i3c_master *master = to_dw_i3c_master(m); 1282 int ret; 1283 1284 ret = pm_runtime_resume_and_get(master->dev); 1285 if (ret < 0) { 1286 dev_err(master->dev, 1287 "<%s> cannot resume i3c bus master, err: %d\n", 1288 __func__, ret); 1289 return ret; 1290 } 1291 1292 dw_i3c_master_enable_sir_signal(master, true); 1293 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK, 1294 master->regs + DEVICE_CTRL); 1295 1296 return 0; 1297 } 1298 1299 static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m) 1300 { 1301 struct dw_i3c_master *master = to_dw_i3c_master(m); 1302 1303 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK, 1304 master->regs + DEVICE_CTRL); 1305 1306 pm_runtime_mark_last_busy(master->dev); 1307 pm_runtime_put_autosuspend(master->dev); 1308 return 0; 1309 } 1310 1311 static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1312 { 1313 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1314 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1315 struct dw_i3c_master *master = to_dw_i3c_master(m); 1316 int rc; 1317 1318 rc = pm_runtime_resume_and_get(master->dev); 1319 if (rc < 0) { 1320 dev_err(master->dev, 1321 "<%s> cannot resume i3c bus master, err: %d\n", 1322 __func__, rc); 1323 return rc; 1324 } 1325 1326 dw_i3c_master_set_sir_enabled(master, dev, data->index, true); 1327 1328 rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1329 1330 if (rc) { 1331 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1332 pm_runtime_mark_last_busy(master->dev); 1333 pm_runtime_put_autosuspend(master->dev); 1334 } 1335 1336 return rc; 1337 } 1338 1339 static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1340 { 1341 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1342 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1343 struct dw_i3c_master *master = to_dw_i3c_master(m); 1344 int rc; 1345 1346 rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1347 if (rc) 1348 return rc; 1349 1350 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1351 1352 pm_runtime_mark_last_busy(master->dev); 1353 pm_runtime_put_autosuspend(master->dev); 1354 return 0; 1355 } 1356 1357 static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1358 struct i3c_ibi_slot *slot) 1359 { 1360 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1361 1362 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1363 } 1364 1365 static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master, 1366 int len) 1367 { 1368 int i; 1369 1370 for (i = 0; i < DIV_ROUND_UP(len, 4); i++) 1371 readl(master->regs + IBI_QUEUE_STATUS); 1372 } 1373 1374 static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master, 1375 u32 status) 1376 { 1377 struct dw_i3c_i2c_dev_data *data; 1378 struct i3c_ibi_slot *slot; 1379 struct i3c_dev_desc *dev; 1380 unsigned long flags; 1381 u8 addr, len; 1382 int idx; 1383 1384 addr = IBI_QUEUE_IBI_ADDR(status); 1385 len = IBI_QUEUE_STATUS_DATA_LEN(status); 1386 1387 /* 1388 * We be tempted to check the error status in bit 30; however, due 1389 * to the PEC errata workaround on some platform implementations (see 1390 * ast2600_i3c_set_dat_ibi()), those will almost always have a PEC 1391 * error on IBI payload data, as well as losing the last byte of 1392 * payload. 1393 * 1394 * If we implement error status checking on that bit, we may need 1395 * a new platform op to validate it. 1396 */ 1397 1398 spin_lock_irqsave(&master->devs_lock, flags); 1399 idx = dw_i3c_master_get_addr_pos(master, addr); 1400 if (idx < 0) { 1401 dev_dbg_ratelimited(&master->base.dev, 1402 "IBI from unknown addr 0x%x\n", addr); 1403 goto err_drain; 1404 } 1405 1406 dev = master->devs[idx].ibi_dev; 1407 if (!dev || !dev->ibi) { 1408 dev_dbg_ratelimited(&master->base.dev, 1409 "IBI from non-requested dev idx %d\n", idx); 1410 goto err_drain; 1411 } 1412 1413 data = i3c_dev_get_master_data(dev); 1414 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 1415 if (!slot) { 1416 dev_dbg_ratelimited(&master->base.dev, 1417 "No IBI slots available\n"); 1418 goto err_drain; 1419 } 1420 1421 if (dev->ibi->max_payload_len < len) { 1422 dev_dbg_ratelimited(&master->base.dev, 1423 "IBI payload len %d greater than max %d\n", 1424 len, dev->ibi->max_payload_len); 1425 goto err_drain; 1426 } 1427 1428 if (len) { 1429 dw_i3c_master_read_ibi_fifo(master, slot->data, len); 1430 slot->len = len; 1431 } 1432 i3c_master_queue_ibi(dev, slot); 1433 1434 spin_unlock_irqrestore(&master->devs_lock, flags); 1435 1436 return; 1437 1438 err_drain: 1439 dw_i3c_master_drain_ibi_queue(master, len); 1440 1441 spin_unlock_irqrestore(&master->devs_lock, flags); 1442 } 1443 1444 /* "ibis": referring to In-Band Interrupts, and not 1445 * https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should 1446 * not be handled. 1447 */ 1448 static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master) 1449 { 1450 unsigned int i, len, n_ibis; 1451 u32 reg; 1452 1453 reg = readl(master->regs + QUEUE_STATUS_LEVEL); 1454 n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg); 1455 if (!n_ibis) 1456 return; 1457 1458 for (i = 0; i < n_ibis; i++) { 1459 reg = readl(master->regs + IBI_QUEUE_STATUS); 1460 1461 if (IBI_TYPE_SIRQ(reg)) { 1462 dw_i3c_master_handle_ibi_sir(master, reg); 1463 } else if (IBI_TYPE_HJ(reg)) { 1464 queue_work(master->base.wq, &master->hj_work); 1465 } else { 1466 len = IBI_QUEUE_STATUS_DATA_LEN(reg); 1467 dev_info(&master->base.dev, 1468 "unsupported IBI type 0x%lx len %d\n", 1469 IBI_QUEUE_STATUS_IBI_ID(reg), len); 1470 dw_i3c_master_drain_ibi_queue(master, len); 1471 } 1472 } 1473 } 1474 1475 static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id) 1476 { 1477 struct dw_i3c_master *master = dev_id; 1478 u32 status; 1479 1480 status = readl(master->regs + INTR_STATUS); 1481 1482 if (!(status & readl(master->regs + INTR_STATUS_EN))) { 1483 writel(INTR_ALL, master->regs + INTR_STATUS); 1484 return IRQ_NONE; 1485 } 1486 1487 spin_lock(&master->xferqueue.lock); 1488 dw_i3c_master_end_xfer_locked(master, status); 1489 if (status & INTR_TRANSFER_ERR_STAT) 1490 writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS); 1491 spin_unlock(&master->xferqueue.lock); 1492 1493 if (status & INTR_IBI_THLD_STAT) 1494 dw_i3c_master_irq_handle_ibis(master); 1495 1496 return IRQ_HANDLED; 1497 } 1498 1499 static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { 1500 .bus_init = dw_i3c_master_bus_init, 1501 .bus_cleanup = dw_i3c_master_bus_cleanup, 1502 .attach_i3c_dev = dw_i3c_master_attach_i3c_dev, 1503 .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev, 1504 .detach_i3c_dev = dw_i3c_master_detach_i3c_dev, 1505 .do_daa = dw_i3c_master_daa, 1506 .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd, 1507 .send_ccc_cmd = dw_i3c_master_send_ccc_cmd, 1508 .priv_xfers = dw_i3c_master_priv_xfers, 1509 .attach_i2c_dev = dw_i3c_master_attach_i2c_dev, 1510 .detach_i2c_dev = dw_i3c_master_detach_i2c_dev, 1511 .i2c_xfers = dw_i3c_master_i2c_xfers, 1512 .request_ibi = dw_i3c_master_request_ibi, 1513 .free_ibi = dw_i3c_master_free_ibi, 1514 .enable_ibi = dw_i3c_master_enable_ibi, 1515 .disable_ibi = dw_i3c_master_disable_ibi, 1516 .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot, 1517 .enable_hotjoin = dw_i3c_master_enable_hotjoin, 1518 .disable_hotjoin = dw_i3c_master_disable_hotjoin, 1519 }; 1520 1521 /* default platform ops implementations */ 1522 static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c) 1523 { 1524 return 0; 1525 } 1526 1527 static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c, 1528 struct i3c_dev_desc *dev, 1529 bool enable, u32 *dat) 1530 { 1531 } 1532 1533 static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = { 1534 .init = dw_i3c_platform_init_nop, 1535 .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop, 1536 }; 1537 1538 static void dw_i3c_hj_work(struct work_struct *work) 1539 { 1540 struct dw_i3c_master *master = 1541 container_of(work, typeof(*master), hj_work); 1542 1543 i3c_master_do_daa(&master->base); 1544 } 1545 1546 int dw_i3c_common_probe(struct dw_i3c_master *master, 1547 struct platform_device *pdev) 1548 { 1549 int ret, irq; 1550 1551 if (!master->platform_ops) 1552 master->platform_ops = &dw_i3c_platform_ops_default; 1553 1554 master->dev = &pdev->dev; 1555 1556 master->regs = devm_platform_ioremap_resource(pdev, 0); 1557 if (IS_ERR(master->regs)) 1558 return PTR_ERR(master->regs); 1559 1560 master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL); 1561 if (IS_ERR(master->core_clk)) 1562 return PTR_ERR(master->core_clk); 1563 1564 master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); 1565 if (IS_ERR(master->pclk)) 1566 return PTR_ERR(master->pclk); 1567 1568 master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 1569 "core_rst"); 1570 if (IS_ERR(master->core_rst)) 1571 return PTR_ERR(master->core_rst); 1572 1573 reset_control_deassert(master->core_rst); 1574 1575 spin_lock_init(&master->xferqueue.lock); 1576 INIT_LIST_HEAD(&master->xferqueue.list); 1577 1578 writel(INTR_ALL, master->regs + INTR_STATUS); 1579 irq = platform_get_irq(pdev, 0); 1580 ret = devm_request_irq(&pdev->dev, irq, 1581 dw_i3c_master_irq_handler, 0, 1582 dev_name(&pdev->dev), master); 1583 if (ret) 1584 goto err_assert_rst; 1585 1586 platform_set_drvdata(pdev, master); 1587 1588 pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT); 1589 pm_runtime_use_autosuspend(&pdev->dev); 1590 pm_runtime_set_active(&pdev->dev); 1591 pm_runtime_enable(&pdev->dev); 1592 1593 /* Information regarding the FIFOs/QUEUEs depth */ 1594 ret = readl(master->regs + QUEUE_STATUS_LEVEL); 1595 master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret); 1596 1597 ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL); 1598 master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret); 1599 1600 ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER); 1601 master->datstartaddr = ret; 1602 master->maxdevs = ret >> 16; 1603 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1604 1605 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1606 ret = i3c_master_register(&master->base, &pdev->dev, 1607 &dw_mipi_i3c_ops, false); 1608 if (ret) 1609 goto err_disable_pm; 1610 1611 return 0; 1612 1613 err_disable_pm: 1614 pm_runtime_disable(&pdev->dev); 1615 pm_runtime_set_suspended(&pdev->dev); 1616 pm_runtime_dont_use_autosuspend(&pdev->dev); 1617 1618 err_assert_rst: 1619 reset_control_assert(master->core_rst); 1620 1621 return ret; 1622 } 1623 EXPORT_SYMBOL_GPL(dw_i3c_common_probe); 1624 1625 void dw_i3c_common_remove(struct dw_i3c_master *master) 1626 { 1627 i3c_master_unregister(&master->base); 1628 1629 pm_runtime_disable(master->dev); 1630 pm_runtime_set_suspended(master->dev); 1631 pm_runtime_dont_use_autosuspend(master->dev); 1632 } 1633 EXPORT_SYMBOL_GPL(dw_i3c_common_remove); 1634 1635 /* base platform implementation */ 1636 1637 static int dw_i3c_probe(struct platform_device *pdev) 1638 { 1639 struct dw_i3c_master *master; 1640 1641 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); 1642 if (!master) 1643 return -ENOMEM; 1644 1645 return dw_i3c_common_probe(master, pdev); 1646 } 1647 1648 static void dw_i3c_remove(struct platform_device *pdev) 1649 { 1650 struct dw_i3c_master *master = platform_get_drvdata(pdev); 1651 1652 dw_i3c_common_remove(master); 1653 } 1654 1655 static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master) 1656 { 1657 u32 pos, reg_val; 1658 1659 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr), 1660 master->regs + DEVICE_ADDR); 1661 1662 for (pos = 0; pos < master->maxdevs; pos++) { 1663 if (master->free_pos & BIT(pos)) 1664 continue; 1665 1666 if (master->devs[pos].is_i2c_addr) 1667 reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1668 DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr); 1669 else 1670 reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr); 1671 1672 writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 1673 } 1674 } 1675 1676 static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master) 1677 { 1678 writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); 1679 writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING); 1680 writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); 1681 writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING); 1682 1683 if (master->i2c_slv_prsnt) { 1684 writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING); 1685 writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING); 1686 } 1687 } 1688 1689 static int dw_i3c_master_enable_clks(struct dw_i3c_master *master) 1690 { 1691 int ret = 0; 1692 1693 ret = clk_prepare_enable(master->core_clk); 1694 if (ret) 1695 return ret; 1696 1697 ret = clk_prepare_enable(master->pclk); 1698 if (ret) { 1699 clk_disable_unprepare(master->core_clk); 1700 return ret; 1701 } 1702 1703 return 0; 1704 } 1705 1706 static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master) 1707 { 1708 clk_disable_unprepare(master->pclk); 1709 clk_disable_unprepare(master->core_clk); 1710 } 1711 1712 static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev) 1713 { 1714 struct dw_i3c_master *master = dev_get_drvdata(dev); 1715 1716 dw_i3c_master_disable(master); 1717 1718 reset_control_assert(master->core_rst); 1719 dw_i3c_master_disable_clks(master); 1720 pinctrl_pm_select_sleep_state(dev); 1721 return 0; 1722 } 1723 1724 static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev) 1725 { 1726 struct dw_i3c_master *master = dev_get_drvdata(dev); 1727 1728 pinctrl_pm_select_default_state(dev); 1729 dw_i3c_master_enable_clks(master); 1730 reset_control_deassert(master->core_rst); 1731 1732 dw_i3c_master_set_intr_regs(master); 1733 dw_i3c_master_restore_timing_regs(master); 1734 dw_i3c_master_restore_addrs(master); 1735 1736 dw_i3c_master_enable(master); 1737 return 0; 1738 } 1739 1740 static const struct dev_pm_ops dw_i3c_pm_ops = { 1741 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1742 SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL) 1743 }; 1744 1745 static const struct of_device_id dw_i3c_master_of_match[] = { 1746 { .compatible = "snps,dw-i3c-master-1.00a", }, 1747 {}, 1748 }; 1749 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match); 1750 1751 static struct platform_driver dw_i3c_driver = { 1752 .probe = dw_i3c_probe, 1753 .remove_new = dw_i3c_remove, 1754 .driver = { 1755 .name = "dw-i3c-master", 1756 .of_match_table = dw_i3c_master_of_match, 1757 .pm = &dw_i3c_pm_ops, 1758 }, 1759 }; 1760 module_platform_driver(dw_i3c_driver); 1761 1762 MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>"); 1763 MODULE_DESCRIPTION("DesignWare MIPI I3C driver"); 1764 MODULE_LICENSE("GPL v2"); 1765