1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. 4 * 5 * Author: Vitor Soares <vitor.soares@synopsys.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/errno.h> 13 #include <linux/i3c/master.h> 14 #include <linux/interrupt.h> 15 #include <linux/ioport.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/reset.h> 24 #include <linux/slab.h> 25 26 #include "dw-i3c-master.h" 27 28 #define DEVICE_CTRL 0x0 29 #define DEV_CTRL_ENABLE BIT(31) 30 #define DEV_CTRL_RESUME BIT(30) 31 #define DEV_CTRL_HOT_JOIN_NACK BIT(8) 32 #define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7) 33 34 #define DEVICE_ADDR 0x4 35 #define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31) 36 #define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16)) 37 38 #define HW_CAPABILITY 0x8 39 #define COMMAND_QUEUE_PORT 0xc 40 #define COMMAND_PORT_TOC BIT(30) 41 #define COMMAND_PORT_READ_TRANSFER BIT(28) 42 #define COMMAND_PORT_SDAP BIT(27) 43 #define COMMAND_PORT_ROC BIT(26) 44 #define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21)) 45 #define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16)) 46 #define COMMAND_PORT_CP BIT(15) 47 #define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7)) 48 #define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3)) 49 50 #define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16)) 51 #define COMMAND_PORT_ARG_DATA_LEN_MAX 65536 52 #define COMMAND_PORT_TRANSFER_ARG 0x01 53 54 #define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24)) 55 #define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16)) 56 #define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8)) 57 #define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5) 58 #define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4) 59 #define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3) 60 #define COMMAND_PORT_SHORT_DATA_ARG 0x02 61 62 #define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21)) 63 #define COMMAND_PORT_ADDR_ASSGN_CMD 0x03 64 65 #define RESPONSE_QUEUE_PORT 0x10 66 #define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28) 67 #define RESPONSE_NO_ERROR 0 68 #define RESPONSE_ERROR_CRC 1 69 #define RESPONSE_ERROR_PARITY 2 70 #define RESPONSE_ERROR_FRAME 3 71 #define RESPONSE_ERROR_IBA_NACK 4 72 #define RESPONSE_ERROR_ADDRESS_NACK 5 73 #define RESPONSE_ERROR_OVER_UNDER_FLOW 6 74 #define RESPONSE_ERROR_TRANSF_ABORT 8 75 #define RESPONSE_ERROR_I2C_W_NACK_ERR 9 76 #define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24) 77 #define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0)) 78 79 #define RX_TX_DATA_PORT 0x14 80 #define IBI_QUEUE_STATUS 0x18 81 #define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8) 82 #define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0)) 83 #define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1) 84 #define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0)) 85 #define IBI_TYPE_MR(x) \ 86 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 87 #define IBI_TYPE_HJ(x) \ 88 ((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 89 #define IBI_TYPE_SIRQ(x) \ 90 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x)) 91 92 #define QUEUE_THLD_CTRL 0x1c 93 #define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24) 94 #define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24) 95 #define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16) 96 #define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16) 97 #define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8) 98 #define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8) 99 100 #define DATA_BUFFER_THLD_CTRL 0x20 101 #define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8) 102 103 #define IBI_QUEUE_CTRL 0x24 104 #define IBI_MR_REQ_REJECT 0x2C 105 #define IBI_SIR_REQ_REJECT 0x30 106 #define IBI_REQ_REJECT_ALL GENMASK(31, 0) 107 108 #define RESET_CTRL 0x34 109 #define RESET_CTRL_IBI_QUEUE BIT(5) 110 #define RESET_CTRL_RX_FIFO BIT(4) 111 #define RESET_CTRL_TX_FIFO BIT(3) 112 #define RESET_CTRL_RESP_QUEUE BIT(2) 113 #define RESET_CTRL_CMD_QUEUE BIT(1) 114 #define RESET_CTRL_SOFT BIT(0) 115 116 #define SLV_EVENT_CTRL 0x38 117 #define INTR_STATUS 0x3c 118 #define INTR_STATUS_EN 0x40 119 #define INTR_SIGNAL_EN 0x44 120 #define INTR_FORCE 0x48 121 #define INTR_BUSOWNER_UPDATE_STAT BIT(13) 122 #define INTR_IBI_UPDATED_STAT BIT(12) 123 #define INTR_READ_REQ_RECV_STAT BIT(11) 124 #define INTR_DEFSLV_STAT BIT(10) 125 #define INTR_TRANSFER_ERR_STAT BIT(9) 126 #define INTR_DYN_ADDR_ASSGN_STAT BIT(8) 127 #define INTR_CCC_UPDATED_STAT BIT(6) 128 #define INTR_TRANSFER_ABORT_STAT BIT(5) 129 #define INTR_RESP_READY_STAT BIT(4) 130 #define INTR_CMD_QUEUE_READY_STAT BIT(3) 131 #define INTR_IBI_THLD_STAT BIT(2) 132 #define INTR_RX_THLD_STAT BIT(1) 133 #define INTR_TX_THLD_STAT BIT(0) 134 #define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \ 135 INTR_IBI_UPDATED_STAT | \ 136 INTR_READ_REQ_RECV_STAT | \ 137 INTR_DEFSLV_STAT | \ 138 INTR_TRANSFER_ERR_STAT | \ 139 INTR_DYN_ADDR_ASSGN_STAT | \ 140 INTR_CCC_UPDATED_STAT | \ 141 INTR_TRANSFER_ABORT_STAT | \ 142 INTR_RESP_READY_STAT | \ 143 INTR_CMD_QUEUE_READY_STAT | \ 144 INTR_IBI_THLD_STAT | \ 145 INTR_TX_THLD_STAT | \ 146 INTR_RX_THLD_STAT) 147 148 #define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \ 149 INTR_RESP_READY_STAT) 150 151 #define QUEUE_STATUS_LEVEL 0x4c 152 #define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24) 153 #define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16) 154 #define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8) 155 #define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0)) 156 157 #define DATA_BUFFER_STATUS_LEVEL 0x50 158 #define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0)) 159 160 #define PRESENT_STATE 0x54 161 #define CCC_DEVICE_STATUS 0x58 162 #define DEVICE_ADDR_TABLE_POINTER 0x5c 163 #define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16) 164 #define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0)) 165 166 #define DEV_CHAR_TABLE_POINTER 0x60 167 #define VENDOR_SPECIFIC_REG_POINTER 0x6c 168 #define SLV_PID_VALUE 0x74 169 #define SLV_CHAR_CTRL 0x78 170 #define SLV_MAX_LEN 0x7c 171 #define MAX_READ_TURNAROUND 0x80 172 #define MAX_DATA_SPEED 0x84 173 #define SLV_DEBUG_STATUS 0x88 174 #define SLV_INTR_REQ 0x8c 175 #define DEVICE_CTRL_EXTENDED 0xb0 176 #define SCL_I3C_OD_TIMING 0xb4 177 #define SCL_I3C_PP_TIMING 0xb8 178 #define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 179 #define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0)) 180 #define SCL_I3C_TIMING_CNT_MIN 5 181 182 #define SCL_I2C_FM_TIMING 0xbc 183 #define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16)) 184 #define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 185 186 #define SCL_I2C_FMP_TIMING 0xc0 187 #define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 188 #define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 189 190 #define SCL_EXT_LCNT_TIMING 0xc8 191 #define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24)) 192 #define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16)) 193 #define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8)) 194 #define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0)) 195 196 #define SCL_EXT_TERMN_LCNT_TIMING 0xcc 197 #define BUS_FREE_TIMING 0xd4 198 #define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0)) 199 200 #define BUS_IDLE_TIMING 0xd8 201 #define I3C_VER_ID 0xe0 202 #define I3C_VER_TYPE 0xe4 203 #define EXTENDED_CAPABILITY 0xe8 204 #define SLAVE_CONFIG 0xec 205 206 #define DEV_ADDR_TABLE_IBI_MDB BIT(12) 207 #define DEV_ADDR_TABLE_SIR_REJECT BIT(13) 208 #define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31) 209 #define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16)) 210 #define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0)) 211 #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2)) 212 213 #define I3C_BUS_SDR1_SCL_RATE 8000000 214 #define I3C_BUS_SDR2_SCL_RATE 6000000 215 #define I3C_BUS_SDR3_SCL_RATE 4000000 216 #define I3C_BUS_SDR4_SCL_RATE 2000000 217 #define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300 218 #define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500 219 #define I3C_BUS_THIGH_MAX_NS 41 220 221 #define XFER_TIMEOUT (msecs_to_jiffies(1000)) 222 #define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */ 223 224 /* Timing values to configure 12.5MHz frequency */ 225 #define AMD_I3C_OD_TIMING 0x4C007C 226 #define AMD_I3C_PP_TIMING 0x8001A 227 228 /* List of quirks */ 229 #define AMD_I3C_OD_PP_TIMING BIT(1) 230 231 struct dw_i3c_cmd { 232 u32 cmd_lo; 233 u32 cmd_hi; 234 u16 tx_len; 235 const void *tx_buf; 236 u16 rx_len; 237 void *rx_buf; 238 u8 error; 239 }; 240 241 struct dw_i3c_xfer { 242 struct list_head node; 243 struct completion comp; 244 int ret; 245 unsigned int ncmds; 246 struct dw_i3c_cmd cmds[] __counted_by(ncmds); 247 }; 248 249 struct dw_i3c_i2c_dev_data { 250 u8 index; 251 struct i3c_generic_ibi_pool *ibi_pool; 252 }; 253 254 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, 255 const struct i3c_ccc_cmd *cmd) 256 { 257 if (cmd->ndests > 1) 258 return false; 259 260 switch (cmd->id) { 261 case I3C_CCC_ENEC(true): 262 case I3C_CCC_ENEC(false): 263 case I3C_CCC_DISEC(true): 264 case I3C_CCC_DISEC(false): 265 case I3C_CCC_ENTAS(0, true): 266 case I3C_CCC_ENTAS(0, false): 267 case I3C_CCC_RSTDAA(true): 268 case I3C_CCC_RSTDAA(false): 269 case I3C_CCC_ENTDAA: 270 case I3C_CCC_SETMWL(true): 271 case I3C_CCC_SETMWL(false): 272 case I3C_CCC_SETMRL(true): 273 case I3C_CCC_SETMRL(false): 274 case I3C_CCC_ENTHDR(0): 275 case I3C_CCC_SETDASA: 276 case I3C_CCC_SETNEWDA: 277 case I3C_CCC_GETMWL: 278 case I3C_CCC_GETMRL: 279 case I3C_CCC_GETPID: 280 case I3C_CCC_GETBCR: 281 case I3C_CCC_GETDCR: 282 case I3C_CCC_GETSTATUS: 283 case I3C_CCC_GETMXDS: 284 case I3C_CCC_GETHDRCAP: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 static inline struct dw_i3c_master * 292 to_dw_i3c_master(struct i3c_master_controller *master) 293 { 294 return container_of(master, struct dw_i3c_master, base); 295 } 296 297 static void dw_i3c_master_disable(struct dw_i3c_master *master) 298 { 299 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, 300 master->regs + DEVICE_CTRL); 301 } 302 303 static void dw_i3c_master_enable(struct dw_i3c_master *master) 304 { 305 u32 dev_ctrl; 306 307 dev_ctrl = readl(master->regs + DEVICE_CTRL); 308 /* For now don't support Hot-Join */ 309 dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK; 310 if (master->i2c_slv_prsnt) 311 dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT; 312 writel(dev_ctrl | DEV_CTRL_ENABLE, 313 master->regs + DEVICE_CTRL); 314 } 315 316 static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr) 317 { 318 int pos; 319 320 for (pos = 0; pos < master->maxdevs; pos++) { 321 if (addr == master->devs[pos].addr) 322 return pos; 323 } 324 325 return -EINVAL; 326 } 327 328 static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master) 329 { 330 if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0))) 331 return -ENOSPC; 332 333 return ffs(master->free_pos) - 1; 334 } 335 336 static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master, 337 const u8 *bytes, int nbytes) 338 { 339 writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4); 340 if (nbytes & 3) { 341 u32 tmp = 0; 342 343 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3); 344 writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1); 345 } 346 } 347 348 static void dw_i3c_master_read_fifo(struct dw_i3c_master *master, 349 int reg, u8 *bytes, int nbytes) 350 { 351 readsl(master->regs + reg, bytes, nbytes / 4); 352 if (nbytes & 3) { 353 u32 tmp; 354 355 readsl(master->regs + reg, &tmp, 1); 356 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3); 357 } 358 } 359 360 static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master, 361 u8 *bytes, int nbytes) 362 { 363 return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes); 364 } 365 366 static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master, 367 u8 *bytes, int nbytes) 368 { 369 return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes); 370 } 371 372 static struct dw_i3c_xfer * 373 dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds) 374 { 375 struct dw_i3c_xfer *xfer; 376 377 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 378 if (!xfer) 379 return NULL; 380 381 INIT_LIST_HEAD(&xfer->node); 382 xfer->ncmds = ncmds; 383 xfer->ret = -ETIMEDOUT; 384 385 return xfer; 386 } 387 388 static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer) 389 { 390 kfree(xfer); 391 } 392 393 static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master) 394 { 395 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 396 unsigned int i; 397 u32 thld_ctrl; 398 399 if (!xfer) 400 return; 401 402 for (i = 0; i < xfer->ncmds; i++) { 403 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 404 405 dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len); 406 } 407 408 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 409 thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK; 410 thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds); 411 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 412 413 for (i = 0; i < xfer->ncmds; i++) { 414 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 415 416 writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT); 417 writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT); 418 } 419 } 420 421 static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, 422 struct dw_i3c_xfer *xfer) 423 { 424 unsigned long flags; 425 426 init_completion(&xfer->comp); 427 spin_lock_irqsave(&master->xferqueue.lock, flags); 428 if (master->xferqueue.cur) { 429 list_add_tail(&xfer->node, &master->xferqueue.list); 430 } else { 431 master->xferqueue.cur = xfer; 432 dw_i3c_master_start_xfer_locked(master); 433 } 434 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 435 } 436 437 static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, 438 struct dw_i3c_xfer *xfer) 439 { 440 if (master->xferqueue.cur == xfer) { 441 u32 status; 442 443 master->xferqueue.cur = NULL; 444 445 writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO | 446 RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE, 447 master->regs + RESET_CTRL); 448 449 readl_poll_timeout_atomic(master->regs + RESET_CTRL, status, 450 !status, 10, 1000000); 451 } else { 452 list_del_init(&xfer->node); 453 } 454 } 455 456 static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 457 struct dw_i3c_xfer *xfer) 458 { 459 unsigned long flags; 460 461 spin_lock_irqsave(&master->xferqueue.lock, flags); 462 dw_i3c_master_dequeue_xfer_locked(master, xfer); 463 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 464 } 465 466 static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) 467 { 468 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 469 int i, ret = 0; 470 u32 nresp; 471 472 if (!xfer) 473 return; 474 475 nresp = readl(master->regs + QUEUE_STATUS_LEVEL); 476 nresp = QUEUE_STATUS_LEVEL_RESP(nresp); 477 478 for (i = 0; i < nresp; i++) { 479 struct dw_i3c_cmd *cmd; 480 u32 resp; 481 482 resp = readl(master->regs + RESPONSE_QUEUE_PORT); 483 484 cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)]; 485 cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp); 486 cmd->error = RESPONSE_PORT_ERR_STATUS(resp); 487 if (cmd->rx_len && !cmd->error) 488 dw_i3c_master_read_rx_fifo(master, cmd->rx_buf, 489 cmd->rx_len); 490 } 491 492 for (i = 0; i < nresp; i++) { 493 switch (xfer->cmds[i].error) { 494 case RESPONSE_NO_ERROR: 495 break; 496 case RESPONSE_ERROR_PARITY: 497 case RESPONSE_ERROR_IBA_NACK: 498 case RESPONSE_ERROR_TRANSF_ABORT: 499 case RESPONSE_ERROR_CRC: 500 case RESPONSE_ERROR_FRAME: 501 ret = -EIO; 502 break; 503 case RESPONSE_ERROR_OVER_UNDER_FLOW: 504 ret = -ENOSPC; 505 break; 506 case RESPONSE_ERROR_I2C_W_NACK_ERR: 507 case RESPONSE_ERROR_ADDRESS_NACK: 508 default: 509 ret = -EINVAL; 510 break; 511 } 512 } 513 514 xfer->ret = ret; 515 complete(&xfer->comp); 516 517 if (ret < 0) { 518 dw_i3c_master_dequeue_xfer_locked(master, xfer); 519 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 520 master->regs + DEVICE_CTRL); 521 } 522 523 xfer = list_first_entry_or_null(&master->xferqueue.list, 524 struct dw_i3c_xfer, 525 node); 526 if (xfer) 527 list_del_init(&xfer->node); 528 529 master->xferqueue.cur = xfer; 530 dw_i3c_master_start_xfer_locked(master); 531 } 532 533 static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master) 534 { 535 u32 thld_ctrl; 536 537 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 538 thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK | 539 QUEUE_THLD_CTRL_IBI_STAT_MASK | 540 QUEUE_THLD_CTRL_IBI_DATA_MASK); 541 thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) | 542 QUEUE_THLD_CTRL_IBI_DATA(31); 543 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 544 545 thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL); 546 thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF; 547 writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL); 548 549 writel(INTR_ALL, master->regs + INTR_STATUS); 550 writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN); 551 writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN); 552 553 master->sir_rej_mask = IBI_REQ_REJECT_ALL; 554 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 555 556 writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT); 557 } 558 559 static int dw_i3c_clk_cfg(struct dw_i3c_master *master) 560 { 561 unsigned long core_rate, core_period; 562 u32 scl_timing; 563 u8 hcnt, lcnt; 564 565 core_rate = clk_get_rate(master->core_clk); 566 if (!core_rate) 567 return -EINVAL; 568 569 core_period = DIV_ROUND_UP(1000000000, core_rate); 570 571 hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1; 572 if (hcnt < SCL_I3C_TIMING_CNT_MIN) 573 hcnt = SCL_I3C_TIMING_CNT_MIN; 574 575 lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt; 576 if (lcnt < SCL_I3C_TIMING_CNT_MIN) 577 lcnt = SCL_I3C_TIMING_CNT_MIN; 578 579 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 580 writel(scl_timing, master->regs + SCL_I3C_PP_TIMING); 581 master->i3c_pp_timing = scl_timing; 582 583 /* 584 * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this 585 * will be set up by dw_i2c_clk_cfg as tLOW. 586 */ 587 if (master->base.bus.mode == I3C_BUS_MODE_PURE) { 588 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 589 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 590 } 591 592 lcnt = max_t(u8, 593 DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt); 594 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 595 writel(scl_timing, master->regs + SCL_I3C_OD_TIMING); 596 master->i3c_od_timing = scl_timing; 597 598 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt; 599 scl_timing = SCL_EXT_LCNT_1(lcnt); 600 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt; 601 scl_timing |= SCL_EXT_LCNT_2(lcnt); 602 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt; 603 scl_timing |= SCL_EXT_LCNT_3(lcnt); 604 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt; 605 scl_timing |= SCL_EXT_LCNT_4(lcnt); 606 writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING); 607 master->ext_lcnt_timing = scl_timing; 608 609 return 0; 610 } 611 612 static int dw_i2c_clk_cfg(struct dw_i3c_master *master) 613 { 614 unsigned long core_rate, core_period; 615 u16 hcnt, lcnt; 616 u32 scl_timing; 617 618 core_rate = clk_get_rate(master->core_clk); 619 if (!core_rate) 620 return -EINVAL; 621 622 core_period = DIV_ROUND_UP(1000000000, core_rate); 623 624 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period); 625 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt; 626 scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) | 627 SCL_I2C_FMP_TIMING_LCNT(lcnt); 628 writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING); 629 master->i2c_fmp_timing = scl_timing; 630 631 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period); 632 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt; 633 scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) | 634 SCL_I2C_FM_TIMING_LCNT(lcnt); 635 writel(scl_timing, master->regs + SCL_I2C_FM_TIMING); 636 master->i2c_fm_timing = scl_timing; 637 638 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 639 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 640 641 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT, 642 master->regs + DEVICE_CTRL); 643 master->i2c_slv_prsnt = true; 644 645 return 0; 646 } 647 648 static int dw_i3c_master_bus_init(struct i3c_master_controller *m) 649 { 650 struct dw_i3c_master *master = to_dw_i3c_master(m); 651 struct i3c_bus *bus = i3c_master_get_bus(m); 652 struct i3c_device_info info = { }; 653 int ret; 654 655 ret = pm_runtime_resume_and_get(master->dev); 656 if (ret < 0) { 657 dev_err(master->dev, 658 "<%s> cannot resume i3c bus master, err: %d\n", 659 __func__, ret); 660 return ret; 661 } 662 663 ret = master->platform_ops->init(master); 664 if (ret) 665 goto rpm_out; 666 667 switch (bus->mode) { 668 case I3C_BUS_MODE_MIXED_FAST: 669 case I3C_BUS_MODE_MIXED_LIMITED: 670 ret = dw_i2c_clk_cfg(master); 671 if (ret) 672 goto rpm_out; 673 fallthrough; 674 case I3C_BUS_MODE_PURE: 675 ret = dw_i3c_clk_cfg(master); 676 if (ret) 677 goto rpm_out; 678 break; 679 default: 680 ret = -EINVAL; 681 goto rpm_out; 682 } 683 684 ret = i3c_master_get_free_addr(m, 0); 685 if (ret < 0) 686 goto rpm_out; 687 688 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret), 689 master->regs + DEVICE_ADDR); 690 master->dev_addr = ret; 691 memset(&info, 0, sizeof(info)); 692 info.dyn_addr = ret; 693 694 ret = i3c_master_set_info(&master->base, &info); 695 if (ret) 696 goto rpm_out; 697 698 dw_i3c_master_set_intr_regs(master); 699 dw_i3c_master_enable(master); 700 701 rpm_out: 702 pm_runtime_mark_last_busy(master->dev); 703 pm_runtime_put_autosuspend(master->dev); 704 return ret; 705 } 706 707 static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m) 708 { 709 struct dw_i3c_master *master = to_dw_i3c_master(m); 710 711 dw_i3c_master_disable(master); 712 } 713 714 static int dw_i3c_ccc_set(struct dw_i3c_master *master, 715 struct i3c_ccc_cmd *ccc) 716 { 717 struct dw_i3c_xfer *xfer; 718 struct dw_i3c_cmd *cmd; 719 int ret, pos = 0; 720 721 if (ccc->id & I3C_CCC_DIRECT) { 722 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 723 if (pos < 0) 724 return pos; 725 } 726 727 xfer = dw_i3c_master_alloc_xfer(master, 1); 728 if (!xfer) 729 return -ENOMEM; 730 731 cmd = xfer->cmds; 732 cmd->tx_buf = ccc->dests[0].payload.data; 733 cmd->tx_len = ccc->dests[0].payload.len; 734 735 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 736 COMMAND_PORT_TRANSFER_ARG; 737 738 cmd->cmd_lo = COMMAND_PORT_CP | 739 COMMAND_PORT_DEV_INDEX(pos) | 740 COMMAND_PORT_CMD(ccc->id) | 741 COMMAND_PORT_TOC | 742 COMMAND_PORT_ROC; 743 744 dw_i3c_master_enqueue_xfer(master, xfer); 745 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 746 dw_i3c_master_dequeue_xfer(master, xfer); 747 748 ret = xfer->ret; 749 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 750 ccc->err = I3C_ERROR_M2; 751 752 dw_i3c_master_free_xfer(xfer); 753 754 return ret; 755 } 756 757 static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc) 758 { 759 struct dw_i3c_xfer *xfer; 760 struct dw_i3c_cmd *cmd; 761 int ret, pos; 762 763 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 764 if (pos < 0) 765 return pos; 766 767 xfer = dw_i3c_master_alloc_xfer(master, 1); 768 if (!xfer) 769 return -ENOMEM; 770 771 cmd = xfer->cmds; 772 cmd->rx_buf = ccc->dests[0].payload.data; 773 cmd->rx_len = ccc->dests[0].payload.len; 774 775 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 776 COMMAND_PORT_TRANSFER_ARG; 777 778 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 779 COMMAND_PORT_CP | 780 COMMAND_PORT_DEV_INDEX(pos) | 781 COMMAND_PORT_CMD(ccc->id) | 782 COMMAND_PORT_TOC | 783 COMMAND_PORT_ROC; 784 785 dw_i3c_master_enqueue_xfer(master, xfer); 786 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 787 dw_i3c_master_dequeue_xfer(master, xfer); 788 789 ret = xfer->ret; 790 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 791 ccc->err = I3C_ERROR_M2; 792 dw_i3c_master_free_xfer(xfer); 793 794 return ret; 795 } 796 797 static void amd_configure_od_pp_quirk(struct dw_i3c_master *master) 798 { 799 master->i3c_od_timing = AMD_I3C_OD_TIMING; 800 master->i3c_pp_timing = AMD_I3C_PP_TIMING; 801 } 802 803 static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 804 struct i3c_ccc_cmd *ccc) 805 { 806 struct dw_i3c_master *master = to_dw_i3c_master(m); 807 int ret = 0; 808 809 if (ccc->id == I3C_CCC_ENTDAA) 810 return -EINVAL; 811 812 /* AMD platform specific OD and PP timings */ 813 if (master->quirks & AMD_I3C_OD_PP_TIMING) { 814 amd_configure_od_pp_quirk(master); 815 writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); 816 writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); 817 } 818 819 ret = pm_runtime_resume_and_get(master->dev); 820 if (ret < 0) { 821 dev_err(master->dev, 822 "<%s> cannot resume i3c bus master, err: %d\n", 823 __func__, ret); 824 return ret; 825 } 826 827 if (ccc->rnw) 828 ret = dw_i3c_ccc_get(master, ccc); 829 else 830 ret = dw_i3c_ccc_set(master, ccc); 831 832 pm_runtime_mark_last_busy(master->dev); 833 pm_runtime_put_autosuspend(master->dev); 834 return ret; 835 } 836 837 static int dw_i3c_master_daa(struct i3c_master_controller *m) 838 { 839 struct dw_i3c_master *master = to_dw_i3c_master(m); 840 struct dw_i3c_xfer *xfer; 841 struct dw_i3c_cmd *cmd; 842 u32 olddevs, newdevs; 843 u8 last_addr = 0; 844 int ret, pos; 845 846 ret = pm_runtime_resume_and_get(master->dev); 847 if (ret < 0) { 848 dev_err(master->dev, 849 "<%s> cannot resume i3c bus master, err: %d\n", 850 __func__, ret); 851 return ret; 852 } 853 854 olddevs = ~(master->free_pos); 855 856 /* Prepare DAT before launching DAA. */ 857 for (pos = 0; pos < master->maxdevs; pos++) { 858 if (olddevs & BIT(pos)) 859 continue; 860 861 ret = i3c_master_get_free_addr(m, last_addr + 1); 862 if (ret < 0) { 863 ret = -ENOSPC; 864 goto rpm_out; 865 } 866 867 master->devs[pos].addr = ret; 868 last_addr = ret; 869 870 ret |= parity8(ret) ? 0 : BIT(7); 871 872 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret), 873 master->regs + 874 DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 875 876 ret = 0; 877 } 878 879 xfer = dw_i3c_master_alloc_xfer(master, 1); 880 if (!xfer) { 881 ret = -ENOMEM; 882 goto rpm_out; 883 } 884 885 pos = dw_i3c_master_get_free_pos(master); 886 if (pos < 0) { 887 dw_i3c_master_free_xfer(xfer); 888 ret = pos; 889 goto rpm_out; 890 } 891 cmd = &xfer->cmds[0]; 892 cmd->cmd_hi = 0x1; 893 cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) | 894 COMMAND_PORT_DEV_INDEX(pos) | 895 COMMAND_PORT_CMD(I3C_CCC_ENTDAA) | 896 COMMAND_PORT_ADDR_ASSGN_CMD | 897 COMMAND_PORT_TOC | 898 COMMAND_PORT_ROC; 899 900 dw_i3c_master_enqueue_xfer(master, xfer); 901 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 902 dw_i3c_master_dequeue_xfer(master, xfer); 903 904 newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0); 905 newdevs &= ~olddevs; 906 907 for (pos = 0; pos < master->maxdevs; pos++) { 908 if (newdevs & BIT(pos)) 909 i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr); 910 } 911 912 dw_i3c_master_free_xfer(xfer); 913 914 rpm_out: 915 pm_runtime_mark_last_busy(master->dev); 916 pm_runtime_put_autosuspend(master->dev); 917 return ret; 918 } 919 920 static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 921 struct i3c_priv_xfer *i3c_xfers, 922 int i3c_nxfers) 923 { 924 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 925 struct i3c_master_controller *m = i3c_dev_get_master(dev); 926 struct dw_i3c_master *master = to_dw_i3c_master(m); 927 unsigned int nrxwords = 0, ntxwords = 0; 928 struct dw_i3c_xfer *xfer; 929 int i, ret = 0; 930 931 if (!i3c_nxfers) 932 return 0; 933 934 if (i3c_nxfers > master->caps.cmdfifodepth) 935 return -ENOTSUPP; 936 937 for (i = 0; i < i3c_nxfers; i++) { 938 if (i3c_xfers[i].rnw) 939 nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 940 else 941 ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 942 } 943 944 if (ntxwords > master->caps.datafifodepth || 945 nrxwords > master->caps.datafifodepth) 946 return -ENOTSUPP; 947 948 xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers); 949 if (!xfer) 950 return -ENOMEM; 951 952 ret = pm_runtime_resume_and_get(master->dev); 953 if (ret < 0) { 954 dev_err(master->dev, 955 "<%s> cannot resume i3c bus master, err: %d\n", 956 __func__, ret); 957 return ret; 958 } 959 960 for (i = 0; i < i3c_nxfers; i++) { 961 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 962 963 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) | 964 COMMAND_PORT_TRANSFER_ARG; 965 966 if (i3c_xfers[i].rnw) { 967 cmd->rx_buf = i3c_xfers[i].data.in; 968 cmd->rx_len = i3c_xfers[i].len; 969 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 970 COMMAND_PORT_SPEED(dev->info.max_read_ds); 971 972 } else { 973 cmd->tx_buf = i3c_xfers[i].data.out; 974 cmd->tx_len = i3c_xfers[i].len; 975 cmd->cmd_lo = 976 COMMAND_PORT_SPEED(dev->info.max_write_ds); 977 } 978 979 cmd->cmd_lo |= COMMAND_PORT_TID(i) | 980 COMMAND_PORT_DEV_INDEX(data->index) | 981 COMMAND_PORT_ROC; 982 983 if (i == (i3c_nxfers - 1)) 984 cmd->cmd_lo |= COMMAND_PORT_TOC; 985 } 986 987 dw_i3c_master_enqueue_xfer(master, xfer); 988 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 989 dw_i3c_master_dequeue_xfer(master, xfer); 990 991 for (i = 0; i < i3c_nxfers; i++) { 992 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 993 994 if (i3c_xfers[i].rnw) 995 i3c_xfers[i].len = cmd->rx_len; 996 } 997 998 ret = xfer->ret; 999 dw_i3c_master_free_xfer(xfer); 1000 1001 pm_runtime_mark_last_busy(master->dev); 1002 pm_runtime_put_autosuspend(master->dev); 1003 return ret; 1004 } 1005 1006 static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 1007 u8 old_dyn_addr) 1008 { 1009 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1010 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1011 struct dw_i3c_master *master = to_dw_i3c_master(m); 1012 int pos; 1013 1014 pos = dw_i3c_master_get_free_pos(master); 1015 1016 if (data->index > pos && pos > 0) { 1017 writel(0, 1018 master->regs + 1019 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1020 1021 master->devs[data->index].addr = 0; 1022 master->free_pos |= BIT(data->index); 1023 1024 data->index = pos; 1025 master->devs[pos].addr = dev->info.dyn_addr; 1026 master->free_pos &= ~BIT(pos); 1027 } 1028 1029 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1030 master->regs + 1031 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1032 1033 master->devs[data->index].addr = dev->info.dyn_addr; 1034 1035 return 0; 1036 } 1037 1038 static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 1039 { 1040 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1041 struct dw_i3c_master *master = to_dw_i3c_master(m); 1042 struct dw_i3c_i2c_dev_data *data; 1043 int pos; 1044 1045 pos = dw_i3c_master_get_free_pos(master); 1046 if (pos < 0) 1047 return pos; 1048 1049 data = kzalloc(sizeof(*data), GFP_KERNEL); 1050 if (!data) 1051 return -ENOMEM; 1052 1053 data->index = pos; 1054 master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr; 1055 master->free_pos &= ~BIT(pos); 1056 i3c_dev_set_master_data(dev, data); 1057 1058 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1059 master->regs + 1060 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1061 1062 return 0; 1063 } 1064 1065 static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 1066 { 1067 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1068 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1069 struct dw_i3c_master *master = to_dw_i3c_master(m); 1070 1071 writel(0, 1072 master->regs + 1073 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1074 1075 i3c_dev_set_master_data(dev, NULL); 1076 master->devs[data->index].addr = 0; 1077 master->free_pos |= BIT(data->index); 1078 kfree(data); 1079 } 1080 1081 static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1082 const struct i2c_msg *i2c_xfers, 1083 int i2c_nxfers) 1084 { 1085 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1086 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1087 struct dw_i3c_master *master = to_dw_i3c_master(m); 1088 unsigned int nrxwords = 0, ntxwords = 0; 1089 struct dw_i3c_xfer *xfer; 1090 int i, ret = 0; 1091 1092 if (!i2c_nxfers) 1093 return 0; 1094 1095 if (i2c_nxfers > master->caps.cmdfifodepth) 1096 return -ENOTSUPP; 1097 1098 for (i = 0; i < i2c_nxfers; i++) { 1099 if (i2c_xfers[i].flags & I2C_M_RD) 1100 nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1101 else 1102 ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1103 } 1104 1105 if (ntxwords > master->caps.datafifodepth || 1106 nrxwords > master->caps.datafifodepth) 1107 return -ENOTSUPP; 1108 1109 xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers); 1110 if (!xfer) 1111 return -ENOMEM; 1112 1113 ret = pm_runtime_resume_and_get(master->dev); 1114 if (ret < 0) { 1115 dev_err(master->dev, 1116 "<%s> cannot resume i3c bus master, err: %d\n", 1117 __func__, ret); 1118 return ret; 1119 } 1120 1121 for (i = 0; i < i2c_nxfers; i++) { 1122 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 1123 1124 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) | 1125 COMMAND_PORT_TRANSFER_ARG; 1126 1127 cmd->cmd_lo = COMMAND_PORT_TID(i) | 1128 COMMAND_PORT_DEV_INDEX(data->index) | 1129 COMMAND_PORT_ROC; 1130 1131 if (i2c_xfers[i].flags & I2C_M_RD) { 1132 cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER; 1133 cmd->rx_buf = i2c_xfers[i].buf; 1134 cmd->rx_len = i2c_xfers[i].len; 1135 } else { 1136 cmd->tx_buf = i2c_xfers[i].buf; 1137 cmd->tx_len = i2c_xfers[i].len; 1138 } 1139 1140 if (i == (i2c_nxfers - 1)) 1141 cmd->cmd_lo |= COMMAND_PORT_TOC; 1142 } 1143 1144 dw_i3c_master_enqueue_xfer(master, xfer); 1145 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 1146 dw_i3c_master_dequeue_xfer(master, xfer); 1147 1148 ret = xfer->ret; 1149 dw_i3c_master_free_xfer(xfer); 1150 1151 pm_runtime_mark_last_busy(master->dev); 1152 pm_runtime_put_autosuspend(master->dev); 1153 return ret; 1154 } 1155 1156 static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 1157 { 1158 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1159 struct dw_i3c_master *master = to_dw_i3c_master(m); 1160 struct dw_i3c_i2c_dev_data *data; 1161 int pos; 1162 1163 pos = dw_i3c_master_get_free_pos(master); 1164 if (pos < 0) 1165 return pos; 1166 1167 data = kzalloc(sizeof(*data), GFP_KERNEL); 1168 if (!data) 1169 return -ENOMEM; 1170 1171 data->index = pos; 1172 master->devs[pos].addr = dev->addr; 1173 master->devs[pos].is_i2c_addr = true; 1174 master->free_pos &= ~BIT(pos); 1175 i2c_dev_set_master_data(dev, data); 1176 1177 writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1178 DEV_ADDR_TABLE_STATIC_ADDR(dev->addr), 1179 master->regs + 1180 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1181 1182 return 0; 1183 } 1184 1185 static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 1186 { 1187 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1188 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1189 struct dw_i3c_master *master = to_dw_i3c_master(m); 1190 1191 writel(0, 1192 master->regs + 1193 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1194 1195 i2c_dev_set_master_data(dev, NULL); 1196 master->devs[data->index].addr = 0; 1197 master->free_pos |= BIT(data->index); 1198 kfree(data); 1199 } 1200 1201 static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1202 const struct i3c_ibi_setup *req) 1203 { 1204 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1205 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1206 struct dw_i3c_master *master = to_dw_i3c_master(m); 1207 unsigned long flags; 1208 1209 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1210 if (IS_ERR(data->ibi_pool)) 1211 return PTR_ERR(data->ibi_pool); 1212 1213 spin_lock_irqsave(&master->devs_lock, flags); 1214 master->devs[data->index].ibi_dev = dev; 1215 spin_unlock_irqrestore(&master->devs_lock, flags); 1216 1217 return 0; 1218 } 1219 1220 static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1221 { 1222 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1223 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1224 struct dw_i3c_master *master = to_dw_i3c_master(m); 1225 unsigned long flags; 1226 1227 spin_lock_irqsave(&master->devs_lock, flags); 1228 master->devs[data->index].ibi_dev = NULL; 1229 spin_unlock_irqrestore(&master->devs_lock, flags); 1230 1231 i3c_generic_ibi_free_pool(data->ibi_pool); 1232 data->ibi_pool = NULL; 1233 } 1234 1235 static void dw_i3c_master_enable_sir_signal(struct dw_i3c_master *master, bool enable) 1236 { 1237 u32 reg; 1238 1239 reg = readl(master->regs + INTR_STATUS_EN); 1240 reg &= ~INTR_IBI_THLD_STAT; 1241 if (enable) 1242 reg |= INTR_IBI_THLD_STAT; 1243 writel(reg, master->regs + INTR_STATUS_EN); 1244 1245 reg = readl(master->regs + INTR_SIGNAL_EN); 1246 reg &= ~INTR_IBI_THLD_STAT; 1247 if (enable) 1248 reg |= INTR_IBI_THLD_STAT; 1249 writel(reg, master->regs + INTR_SIGNAL_EN); 1250 } 1251 1252 static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master, 1253 struct i3c_dev_desc *dev, 1254 u8 idx, bool enable) 1255 { 1256 unsigned long flags; 1257 u32 dat_entry, reg; 1258 bool global; 1259 1260 dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx); 1261 1262 spin_lock_irqsave(&master->devs_lock, flags); 1263 reg = readl(master->regs + dat_entry); 1264 if (enable) { 1265 reg &= ~DEV_ADDR_TABLE_SIR_REJECT; 1266 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) 1267 reg |= DEV_ADDR_TABLE_IBI_MDB; 1268 } else { 1269 reg |= DEV_ADDR_TABLE_SIR_REJECT; 1270 } 1271 master->platform_ops->set_dat_ibi(master, dev, enable, ®); 1272 writel(reg, master->regs + dat_entry); 1273 1274 if (enable) { 1275 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL); 1276 master->sir_rej_mask &= ~BIT(idx); 1277 } else { 1278 bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK); 1279 1280 master->sir_rej_mask |= BIT(idx); 1281 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected; 1282 } 1283 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 1284 1285 if (global) 1286 dw_i3c_master_enable_sir_signal(master, enable); 1287 1288 1289 spin_unlock_irqrestore(&master->devs_lock, flags); 1290 } 1291 1292 static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m) 1293 { 1294 struct dw_i3c_master *master = to_dw_i3c_master(m); 1295 int ret; 1296 1297 ret = pm_runtime_resume_and_get(master->dev); 1298 if (ret < 0) { 1299 dev_err(master->dev, 1300 "<%s> cannot resume i3c bus master, err: %d\n", 1301 __func__, ret); 1302 return ret; 1303 } 1304 1305 dw_i3c_master_enable_sir_signal(master, true); 1306 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK, 1307 master->regs + DEVICE_CTRL); 1308 1309 return 0; 1310 } 1311 1312 static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m) 1313 { 1314 struct dw_i3c_master *master = to_dw_i3c_master(m); 1315 1316 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK, 1317 master->regs + DEVICE_CTRL); 1318 1319 pm_runtime_mark_last_busy(master->dev); 1320 pm_runtime_put_autosuspend(master->dev); 1321 return 0; 1322 } 1323 1324 static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1325 { 1326 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1327 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1328 struct dw_i3c_master *master = to_dw_i3c_master(m); 1329 int rc; 1330 1331 rc = pm_runtime_resume_and_get(master->dev); 1332 if (rc < 0) { 1333 dev_err(master->dev, 1334 "<%s> cannot resume i3c bus master, err: %d\n", 1335 __func__, rc); 1336 return rc; 1337 } 1338 1339 dw_i3c_master_set_sir_enabled(master, dev, data->index, true); 1340 1341 rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1342 1343 if (rc) { 1344 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1345 pm_runtime_mark_last_busy(master->dev); 1346 pm_runtime_put_autosuspend(master->dev); 1347 } 1348 1349 return rc; 1350 } 1351 1352 static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1353 { 1354 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1355 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1356 struct dw_i3c_master *master = to_dw_i3c_master(m); 1357 int rc; 1358 1359 rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1360 if (rc) 1361 return rc; 1362 1363 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1364 1365 pm_runtime_mark_last_busy(master->dev); 1366 pm_runtime_put_autosuspend(master->dev); 1367 return 0; 1368 } 1369 1370 static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1371 struct i3c_ibi_slot *slot) 1372 { 1373 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1374 1375 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1376 } 1377 1378 static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master, 1379 int len) 1380 { 1381 int i; 1382 1383 for (i = 0; i < DIV_ROUND_UP(len, 4); i++) 1384 readl(master->regs + IBI_QUEUE_STATUS); 1385 } 1386 1387 static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master, 1388 u32 status) 1389 { 1390 struct dw_i3c_i2c_dev_data *data; 1391 struct i3c_ibi_slot *slot; 1392 struct i3c_dev_desc *dev; 1393 unsigned long flags; 1394 u8 addr, len; 1395 int idx; 1396 1397 addr = IBI_QUEUE_IBI_ADDR(status); 1398 len = IBI_QUEUE_STATUS_DATA_LEN(status); 1399 1400 /* 1401 * We be tempted to check the error status in bit 30; however, due 1402 * to the PEC errata workaround on some platform implementations (see 1403 * ast2600_i3c_set_dat_ibi()), those will almost always have a PEC 1404 * error on IBI payload data, as well as losing the last byte of 1405 * payload. 1406 * 1407 * If we implement error status checking on that bit, we may need 1408 * a new platform op to validate it. 1409 */ 1410 1411 spin_lock_irqsave(&master->devs_lock, flags); 1412 idx = dw_i3c_master_get_addr_pos(master, addr); 1413 if (idx < 0) { 1414 dev_dbg_ratelimited(&master->base.dev, 1415 "IBI from unknown addr 0x%x\n", addr); 1416 goto err_drain; 1417 } 1418 1419 dev = master->devs[idx].ibi_dev; 1420 if (!dev || !dev->ibi) { 1421 dev_dbg_ratelimited(&master->base.dev, 1422 "IBI from non-requested dev idx %d\n", idx); 1423 goto err_drain; 1424 } 1425 1426 data = i3c_dev_get_master_data(dev); 1427 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 1428 if (!slot) { 1429 dev_dbg_ratelimited(&master->base.dev, 1430 "No IBI slots available\n"); 1431 goto err_drain; 1432 } 1433 1434 if (dev->ibi->max_payload_len < len) { 1435 dev_dbg_ratelimited(&master->base.dev, 1436 "IBI payload len %d greater than max %d\n", 1437 len, dev->ibi->max_payload_len); 1438 goto err_drain; 1439 } 1440 1441 if (len) { 1442 dw_i3c_master_read_ibi_fifo(master, slot->data, len); 1443 slot->len = len; 1444 } 1445 i3c_master_queue_ibi(dev, slot); 1446 1447 spin_unlock_irqrestore(&master->devs_lock, flags); 1448 1449 return; 1450 1451 err_drain: 1452 dw_i3c_master_drain_ibi_queue(master, len); 1453 1454 spin_unlock_irqrestore(&master->devs_lock, flags); 1455 } 1456 1457 /* "ibis": referring to In-Band Interrupts, and not 1458 * https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should 1459 * not be handled. 1460 */ 1461 static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master) 1462 { 1463 unsigned int i, len, n_ibis; 1464 u32 reg; 1465 1466 reg = readl(master->regs + QUEUE_STATUS_LEVEL); 1467 n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg); 1468 if (!n_ibis) 1469 return; 1470 1471 for (i = 0; i < n_ibis; i++) { 1472 reg = readl(master->regs + IBI_QUEUE_STATUS); 1473 1474 if (IBI_TYPE_SIRQ(reg)) { 1475 dw_i3c_master_handle_ibi_sir(master, reg); 1476 } else if (IBI_TYPE_HJ(reg)) { 1477 queue_work(master->base.wq, &master->hj_work); 1478 } else { 1479 len = IBI_QUEUE_STATUS_DATA_LEN(reg); 1480 dev_info(&master->base.dev, 1481 "unsupported IBI type 0x%lx len %d\n", 1482 IBI_QUEUE_STATUS_IBI_ID(reg), len); 1483 dw_i3c_master_drain_ibi_queue(master, len); 1484 } 1485 } 1486 } 1487 1488 static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id) 1489 { 1490 struct dw_i3c_master *master = dev_id; 1491 u32 status; 1492 1493 status = readl(master->regs + INTR_STATUS); 1494 1495 if (!(status & readl(master->regs + INTR_STATUS_EN))) { 1496 writel(INTR_ALL, master->regs + INTR_STATUS); 1497 return IRQ_NONE; 1498 } 1499 1500 spin_lock(&master->xferqueue.lock); 1501 dw_i3c_master_end_xfer_locked(master, status); 1502 if (status & INTR_TRANSFER_ERR_STAT) 1503 writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS); 1504 spin_unlock(&master->xferqueue.lock); 1505 1506 if (status & INTR_IBI_THLD_STAT) 1507 dw_i3c_master_irq_handle_ibis(master); 1508 1509 return IRQ_HANDLED; 1510 } 1511 1512 static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { 1513 .bus_init = dw_i3c_master_bus_init, 1514 .bus_cleanup = dw_i3c_master_bus_cleanup, 1515 .attach_i3c_dev = dw_i3c_master_attach_i3c_dev, 1516 .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev, 1517 .detach_i3c_dev = dw_i3c_master_detach_i3c_dev, 1518 .do_daa = dw_i3c_master_daa, 1519 .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd, 1520 .send_ccc_cmd = dw_i3c_master_send_ccc_cmd, 1521 .priv_xfers = dw_i3c_master_priv_xfers, 1522 .attach_i2c_dev = dw_i3c_master_attach_i2c_dev, 1523 .detach_i2c_dev = dw_i3c_master_detach_i2c_dev, 1524 .i2c_xfers = dw_i3c_master_i2c_xfers, 1525 .request_ibi = dw_i3c_master_request_ibi, 1526 .free_ibi = dw_i3c_master_free_ibi, 1527 .enable_ibi = dw_i3c_master_enable_ibi, 1528 .disable_ibi = dw_i3c_master_disable_ibi, 1529 .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot, 1530 .enable_hotjoin = dw_i3c_master_enable_hotjoin, 1531 .disable_hotjoin = dw_i3c_master_disable_hotjoin, 1532 }; 1533 1534 /* default platform ops implementations */ 1535 static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c) 1536 { 1537 return 0; 1538 } 1539 1540 static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c, 1541 struct i3c_dev_desc *dev, 1542 bool enable, u32 *dat) 1543 { 1544 } 1545 1546 static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = { 1547 .init = dw_i3c_platform_init_nop, 1548 .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop, 1549 }; 1550 1551 static void dw_i3c_hj_work(struct work_struct *work) 1552 { 1553 struct dw_i3c_master *master = 1554 container_of(work, typeof(*master), hj_work); 1555 1556 i3c_master_do_daa(&master->base); 1557 } 1558 1559 int dw_i3c_common_probe(struct dw_i3c_master *master, 1560 struct platform_device *pdev) 1561 { 1562 int ret, irq; 1563 1564 if (!master->platform_ops) 1565 master->platform_ops = &dw_i3c_platform_ops_default; 1566 1567 master->dev = &pdev->dev; 1568 1569 master->regs = devm_platform_ioremap_resource(pdev, 0); 1570 if (IS_ERR(master->regs)) 1571 return PTR_ERR(master->regs); 1572 1573 master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL); 1574 if (IS_ERR(master->core_clk)) 1575 return PTR_ERR(master->core_clk); 1576 1577 master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); 1578 if (IS_ERR(master->pclk)) 1579 return PTR_ERR(master->pclk); 1580 1581 master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 1582 "core_rst"); 1583 if (IS_ERR(master->core_rst)) 1584 return PTR_ERR(master->core_rst); 1585 1586 reset_control_deassert(master->core_rst); 1587 1588 spin_lock_init(&master->xferqueue.lock); 1589 INIT_LIST_HEAD(&master->xferqueue.list); 1590 1591 writel(INTR_ALL, master->regs + INTR_STATUS); 1592 irq = platform_get_irq(pdev, 0); 1593 ret = devm_request_irq(&pdev->dev, irq, 1594 dw_i3c_master_irq_handler, 0, 1595 dev_name(&pdev->dev), master); 1596 if (ret) 1597 goto err_assert_rst; 1598 1599 platform_set_drvdata(pdev, master); 1600 1601 pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT); 1602 pm_runtime_use_autosuspend(&pdev->dev); 1603 pm_runtime_set_active(&pdev->dev); 1604 pm_runtime_enable(&pdev->dev); 1605 1606 /* Information regarding the FIFOs/QUEUEs depth */ 1607 ret = readl(master->regs + QUEUE_STATUS_LEVEL); 1608 master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret); 1609 1610 ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL); 1611 master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret); 1612 1613 ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER); 1614 master->datstartaddr = ret; 1615 master->maxdevs = ret >> 16; 1616 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1617 1618 master->quirks = (unsigned long)device_get_match_data(&pdev->dev); 1619 1620 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1621 ret = i3c_master_register(&master->base, &pdev->dev, 1622 &dw_mipi_i3c_ops, false); 1623 if (ret) 1624 goto err_disable_pm; 1625 1626 return 0; 1627 1628 err_disable_pm: 1629 pm_runtime_disable(&pdev->dev); 1630 pm_runtime_set_suspended(&pdev->dev); 1631 pm_runtime_dont_use_autosuspend(&pdev->dev); 1632 1633 err_assert_rst: 1634 reset_control_assert(master->core_rst); 1635 1636 return ret; 1637 } 1638 EXPORT_SYMBOL_GPL(dw_i3c_common_probe); 1639 1640 void dw_i3c_common_remove(struct dw_i3c_master *master) 1641 { 1642 cancel_work_sync(&master->hj_work); 1643 i3c_master_unregister(&master->base); 1644 1645 pm_runtime_disable(master->dev); 1646 pm_runtime_set_suspended(master->dev); 1647 pm_runtime_dont_use_autosuspend(master->dev); 1648 } 1649 EXPORT_SYMBOL_GPL(dw_i3c_common_remove); 1650 1651 /* base platform implementation */ 1652 1653 static int dw_i3c_probe(struct platform_device *pdev) 1654 { 1655 struct dw_i3c_master *master; 1656 1657 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); 1658 if (!master) 1659 return -ENOMEM; 1660 1661 return dw_i3c_common_probe(master, pdev); 1662 } 1663 1664 static void dw_i3c_remove(struct platform_device *pdev) 1665 { 1666 struct dw_i3c_master *master = platform_get_drvdata(pdev); 1667 1668 dw_i3c_common_remove(master); 1669 } 1670 1671 static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master) 1672 { 1673 u32 pos, reg_val; 1674 1675 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr), 1676 master->regs + DEVICE_ADDR); 1677 1678 for (pos = 0; pos < master->maxdevs; pos++) { 1679 if (master->free_pos & BIT(pos)) 1680 continue; 1681 1682 if (master->devs[pos].is_i2c_addr) 1683 reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1684 DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr); 1685 else 1686 reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr); 1687 1688 writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 1689 } 1690 } 1691 1692 static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master) 1693 { 1694 /* AMD platform specific OD and PP timings */ 1695 if (master->quirks & AMD_I3C_OD_PP_TIMING) 1696 amd_configure_od_pp_quirk(master); 1697 1698 writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); 1699 writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING); 1700 writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); 1701 writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING); 1702 1703 if (master->i2c_slv_prsnt) { 1704 writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING); 1705 writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING); 1706 } 1707 } 1708 1709 static int dw_i3c_master_enable_clks(struct dw_i3c_master *master) 1710 { 1711 int ret = 0; 1712 1713 ret = clk_prepare_enable(master->core_clk); 1714 if (ret) 1715 return ret; 1716 1717 ret = clk_prepare_enable(master->pclk); 1718 if (ret) { 1719 clk_disable_unprepare(master->core_clk); 1720 return ret; 1721 } 1722 1723 return 0; 1724 } 1725 1726 static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master) 1727 { 1728 clk_disable_unprepare(master->pclk); 1729 clk_disable_unprepare(master->core_clk); 1730 } 1731 1732 static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev) 1733 { 1734 struct dw_i3c_master *master = dev_get_drvdata(dev); 1735 1736 dw_i3c_master_disable(master); 1737 1738 reset_control_assert(master->core_rst); 1739 dw_i3c_master_disable_clks(master); 1740 pinctrl_pm_select_sleep_state(dev); 1741 return 0; 1742 } 1743 1744 static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev) 1745 { 1746 struct dw_i3c_master *master = dev_get_drvdata(dev); 1747 1748 pinctrl_pm_select_default_state(dev); 1749 dw_i3c_master_enable_clks(master); 1750 reset_control_deassert(master->core_rst); 1751 1752 dw_i3c_master_set_intr_regs(master); 1753 dw_i3c_master_restore_timing_regs(master); 1754 dw_i3c_master_restore_addrs(master); 1755 1756 dw_i3c_master_enable(master); 1757 return 0; 1758 } 1759 1760 static const struct dev_pm_ops dw_i3c_pm_ops = { 1761 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1762 SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL) 1763 }; 1764 1765 static const struct of_device_id dw_i3c_master_of_match[] = { 1766 { .compatible = "snps,dw-i3c-master-1.00a", }, 1767 {}, 1768 }; 1769 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match); 1770 1771 static const struct acpi_device_id amd_i3c_device_match[] = { 1772 { "AMDI0015", AMD_I3C_OD_PP_TIMING }, 1773 { } 1774 }; 1775 MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match); 1776 1777 static struct platform_driver dw_i3c_driver = { 1778 .probe = dw_i3c_probe, 1779 .remove = dw_i3c_remove, 1780 .driver = { 1781 .name = "dw-i3c-master", 1782 .of_match_table = dw_i3c_master_of_match, 1783 .acpi_match_table = amd_i3c_device_match, 1784 .pm = &dw_i3c_pm_ops, 1785 }, 1786 }; 1787 module_platform_driver(dw_i3c_driver); 1788 1789 MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>"); 1790 MODULE_DESCRIPTION("DesignWare MIPI I3C driver"); 1791 MODULE_LICENSE("GPL v2"); 1792