1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. 4 * 5 * Author: Vitor Soares <vitor.soares@synopsys.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/errno.h> 13 #include <linux/i3c/master.h> 14 #include <linux/interrupt.h> 15 #include <linux/ioport.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/reset.h> 24 #include <linux/slab.h> 25 26 #include "../internals.h" 27 #include "dw-i3c-master.h" 28 29 #define DEVICE_CTRL 0x0 30 #define DEV_CTRL_ENABLE BIT(31) 31 #define DEV_CTRL_RESUME BIT(30) 32 #define DEV_CTRL_HOT_JOIN_NACK BIT(8) 33 #define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7) 34 35 #define DEVICE_ADDR 0x4 36 #define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31) 37 #define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16)) 38 39 #define HW_CAPABILITY 0x8 40 #define COMMAND_QUEUE_PORT 0xc 41 #define COMMAND_PORT_TOC BIT(30) 42 #define COMMAND_PORT_READ_TRANSFER BIT(28) 43 #define COMMAND_PORT_SDAP BIT(27) 44 #define COMMAND_PORT_ROC BIT(26) 45 #define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21)) 46 #define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16)) 47 #define COMMAND_PORT_CP BIT(15) 48 #define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7)) 49 #define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3)) 50 51 #define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16)) 52 #define COMMAND_PORT_ARG_DATA_LEN_MAX 65536 53 #define COMMAND_PORT_TRANSFER_ARG 0x01 54 55 #define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24)) 56 #define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16)) 57 #define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8)) 58 #define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5) 59 #define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4) 60 #define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3) 61 #define COMMAND_PORT_SHORT_DATA_ARG 0x02 62 63 #define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21)) 64 #define COMMAND_PORT_ADDR_ASSGN_CMD 0x03 65 66 #define RESPONSE_QUEUE_PORT 0x10 67 #define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28) 68 #define RESPONSE_NO_ERROR 0 69 #define RESPONSE_ERROR_CRC 1 70 #define RESPONSE_ERROR_PARITY 2 71 #define RESPONSE_ERROR_FRAME 3 72 #define RESPONSE_ERROR_IBA_NACK 4 73 #define RESPONSE_ERROR_ADDRESS_NACK 5 74 #define RESPONSE_ERROR_OVER_UNDER_FLOW 6 75 #define RESPONSE_ERROR_TRANSF_ABORT 8 76 #define RESPONSE_ERROR_I2C_W_NACK_ERR 9 77 #define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24) 78 #define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0)) 79 80 #define RX_TX_DATA_PORT 0x14 81 #define IBI_QUEUE_STATUS 0x18 82 #define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8) 83 #define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0)) 84 #define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1) 85 #define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0)) 86 #define IBI_TYPE_MR(x) \ 87 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 88 #define IBI_TYPE_HJ(x) \ 89 ((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x)) 90 #define IBI_TYPE_SIRQ(x) \ 91 ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x)) 92 93 #define QUEUE_THLD_CTRL 0x1c 94 #define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24) 95 #define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24) 96 #define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16) 97 #define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16) 98 #define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8) 99 #define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8) 100 101 #define DATA_BUFFER_THLD_CTRL 0x20 102 #define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8) 103 104 #define IBI_QUEUE_CTRL 0x24 105 #define IBI_MR_REQ_REJECT 0x2C 106 #define IBI_SIR_REQ_REJECT 0x30 107 #define IBI_REQ_REJECT_ALL GENMASK(31, 0) 108 109 #define RESET_CTRL 0x34 110 #define RESET_CTRL_IBI_QUEUE BIT(5) 111 #define RESET_CTRL_RX_FIFO BIT(4) 112 #define RESET_CTRL_TX_FIFO BIT(3) 113 #define RESET_CTRL_RESP_QUEUE BIT(2) 114 #define RESET_CTRL_CMD_QUEUE BIT(1) 115 #define RESET_CTRL_SOFT BIT(0) 116 117 #define SLV_EVENT_CTRL 0x38 118 #define INTR_STATUS 0x3c 119 #define INTR_STATUS_EN 0x40 120 #define INTR_SIGNAL_EN 0x44 121 #define INTR_FORCE 0x48 122 #define INTR_BUSOWNER_UPDATE_STAT BIT(13) 123 #define INTR_IBI_UPDATED_STAT BIT(12) 124 #define INTR_READ_REQ_RECV_STAT BIT(11) 125 #define INTR_DEFSLV_STAT BIT(10) 126 #define INTR_TRANSFER_ERR_STAT BIT(9) 127 #define INTR_DYN_ADDR_ASSGN_STAT BIT(8) 128 #define INTR_CCC_UPDATED_STAT BIT(6) 129 #define INTR_TRANSFER_ABORT_STAT BIT(5) 130 #define INTR_RESP_READY_STAT BIT(4) 131 #define INTR_CMD_QUEUE_READY_STAT BIT(3) 132 #define INTR_IBI_THLD_STAT BIT(2) 133 #define INTR_RX_THLD_STAT BIT(1) 134 #define INTR_TX_THLD_STAT BIT(0) 135 #define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \ 136 INTR_IBI_UPDATED_STAT | \ 137 INTR_READ_REQ_RECV_STAT | \ 138 INTR_DEFSLV_STAT | \ 139 INTR_TRANSFER_ERR_STAT | \ 140 INTR_DYN_ADDR_ASSGN_STAT | \ 141 INTR_CCC_UPDATED_STAT | \ 142 INTR_TRANSFER_ABORT_STAT | \ 143 INTR_RESP_READY_STAT | \ 144 INTR_CMD_QUEUE_READY_STAT | \ 145 INTR_IBI_THLD_STAT | \ 146 INTR_TX_THLD_STAT | \ 147 INTR_RX_THLD_STAT) 148 149 #define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \ 150 INTR_RESP_READY_STAT) 151 152 #define QUEUE_STATUS_LEVEL 0x4c 153 #define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24) 154 #define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16) 155 #define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8) 156 #define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0)) 157 158 #define DATA_BUFFER_STATUS_LEVEL 0x50 159 #define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0)) 160 161 #define PRESENT_STATE 0x54 162 #define CCC_DEVICE_STATUS 0x58 163 #define DEVICE_ADDR_TABLE_POINTER 0x5c 164 #define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16) 165 #define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0)) 166 167 #define DEV_CHAR_TABLE_POINTER 0x60 168 #define VENDOR_SPECIFIC_REG_POINTER 0x6c 169 #define SLV_PID_VALUE 0x74 170 #define SLV_CHAR_CTRL 0x78 171 #define SLV_MAX_LEN 0x7c 172 #define MAX_READ_TURNAROUND 0x80 173 #define MAX_DATA_SPEED 0x84 174 #define SLV_DEBUG_STATUS 0x88 175 #define SLV_INTR_REQ 0x8c 176 #define DEVICE_CTRL_EXTENDED 0xb0 177 #define SCL_I3C_OD_TIMING 0xb4 178 #define SCL_I3C_PP_TIMING 0xb8 179 #define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 180 #define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0)) 181 #define SCL_I3C_TIMING_CNT_MIN 5 182 183 #define SCL_I2C_FM_TIMING 0xbc 184 #define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16)) 185 #define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 186 187 #define SCL_I2C_FMP_TIMING 0xc0 188 #define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16)) 189 #define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0)) 190 191 #define SCL_EXT_LCNT_TIMING 0xc8 192 #define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24)) 193 #define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16)) 194 #define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8)) 195 #define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0)) 196 197 #define SCL_EXT_TERMN_LCNT_TIMING 0xcc 198 #define BUS_FREE_TIMING 0xd4 199 #define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0)) 200 201 #define BUS_IDLE_TIMING 0xd8 202 #define I3C_VER_ID 0xe0 203 #define I3C_VER_TYPE 0xe4 204 #define EXTENDED_CAPABILITY 0xe8 205 #define SLAVE_CONFIG 0xec 206 207 #define DEV_ADDR_TABLE_IBI_MDB BIT(12) 208 #define DEV_ADDR_TABLE_SIR_REJECT BIT(13) 209 #define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31) 210 #define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16)) 211 #define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0)) 212 #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2)) 213 214 #define I3C_BUS_SDR1_SCL_RATE 8000000 215 #define I3C_BUS_SDR2_SCL_RATE 6000000 216 #define I3C_BUS_SDR3_SCL_RATE 4000000 217 #define I3C_BUS_SDR4_SCL_RATE 2000000 218 #define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300 219 #define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500 220 #define I3C_BUS_THIGH_MAX_NS 41 221 222 #define XFER_TIMEOUT (msecs_to_jiffies(1000)) 223 #define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */ 224 225 /* Timing values to configure 12.5MHz frequency */ 226 #define AMD_I3C_OD_TIMING 0x4C007C 227 #define AMD_I3C_PP_TIMING 0x8001A 228 229 /* List of quirks */ 230 #define AMD_I3C_OD_PP_TIMING BIT(1) 231 #define DW_I3C_DISABLE_RUNTIME_PM_QUIRK BIT(2) 232 233 struct dw_i3c_cmd { 234 u32 cmd_lo; 235 u32 cmd_hi; 236 u16 tx_len; 237 const void *tx_buf; 238 u16 rx_len; 239 void *rx_buf; 240 u8 error; 241 }; 242 243 struct dw_i3c_xfer { 244 struct list_head node; 245 struct completion comp; 246 int ret; 247 unsigned int ncmds; 248 struct dw_i3c_cmd cmds[] __counted_by(ncmds); 249 }; 250 251 struct dw_i3c_i2c_dev_data { 252 u8 index; 253 struct i3c_generic_ibi_pool *ibi_pool; 254 }; 255 256 struct dw_i3c_drvdata { 257 u32 flags; 258 }; 259 260 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, 261 const struct i3c_ccc_cmd *cmd) 262 { 263 if (cmd->ndests > 1) 264 return false; 265 266 switch (cmd->id) { 267 case I3C_CCC_ENEC(true): 268 case I3C_CCC_ENEC(false): 269 case I3C_CCC_DISEC(true): 270 case I3C_CCC_DISEC(false): 271 case I3C_CCC_ENTAS(0, true): 272 case I3C_CCC_ENTAS(0, false): 273 case I3C_CCC_RSTDAA(true): 274 case I3C_CCC_RSTDAA(false): 275 case I3C_CCC_ENTDAA: 276 case I3C_CCC_SETMWL(true): 277 case I3C_CCC_SETMWL(false): 278 case I3C_CCC_SETMRL(true): 279 case I3C_CCC_SETMRL(false): 280 case I3C_CCC_ENTHDR(0): 281 case I3C_CCC_SETDASA: 282 case I3C_CCC_SETNEWDA: 283 case I3C_CCC_GETMWL: 284 case I3C_CCC_GETMRL: 285 case I3C_CCC_GETPID: 286 case I3C_CCC_GETBCR: 287 case I3C_CCC_GETDCR: 288 case I3C_CCC_GETSTATUS: 289 case I3C_CCC_GETMXDS: 290 case I3C_CCC_GETHDRCAP: 291 return true; 292 default: 293 return false; 294 } 295 } 296 297 static inline struct dw_i3c_master * 298 to_dw_i3c_master(struct i3c_master_controller *master) 299 { 300 return container_of(master, struct dw_i3c_master, base); 301 } 302 303 static void dw_i3c_master_disable(struct dw_i3c_master *master) 304 { 305 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, 306 master->regs + DEVICE_CTRL); 307 } 308 309 static void dw_i3c_master_enable(struct dw_i3c_master *master) 310 { 311 u32 dev_ctrl; 312 313 dev_ctrl = readl(master->regs + DEVICE_CTRL); 314 /* For now don't support Hot-Join */ 315 dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK; 316 if (master->i2c_slv_prsnt) 317 dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT; 318 writel(dev_ctrl | DEV_CTRL_ENABLE, 319 master->regs + DEVICE_CTRL); 320 } 321 322 static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr) 323 { 324 int pos; 325 326 for (pos = 0; pos < master->maxdevs; pos++) { 327 if (addr == master->devs[pos].addr) 328 return pos; 329 } 330 331 return -EINVAL; 332 } 333 334 static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master) 335 { 336 if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0))) 337 return -ENOSPC; 338 339 return ffs(master->free_pos) - 1; 340 } 341 342 static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master, 343 const u8 *bytes, int nbytes) 344 { 345 i3c_writel_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes); 346 } 347 348 static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master, 349 u8 *bytes, int nbytes) 350 { 351 i3c_readl_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes); 352 } 353 354 static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master, 355 u8 *bytes, int nbytes) 356 { 357 i3c_readl_fifo(master->regs + IBI_QUEUE_STATUS, bytes, nbytes); 358 } 359 360 static struct dw_i3c_xfer * 361 dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds) 362 { 363 struct dw_i3c_xfer *xfer; 364 365 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 366 if (!xfer) 367 return NULL; 368 369 INIT_LIST_HEAD(&xfer->node); 370 xfer->ncmds = ncmds; 371 xfer->ret = -ETIMEDOUT; 372 373 return xfer; 374 } 375 376 static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer) 377 { 378 kfree(xfer); 379 } 380 381 static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master) 382 { 383 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 384 unsigned int i; 385 u32 thld_ctrl; 386 387 if (!xfer) 388 return; 389 390 for (i = 0; i < xfer->ncmds; i++) { 391 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 392 393 dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len); 394 } 395 396 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 397 thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK; 398 thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds); 399 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 400 401 for (i = 0; i < xfer->ncmds; i++) { 402 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 403 404 writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT); 405 writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT); 406 } 407 } 408 409 static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, 410 struct dw_i3c_xfer *xfer) 411 { 412 unsigned long flags; 413 414 init_completion(&xfer->comp); 415 spin_lock_irqsave(&master->xferqueue.lock, flags); 416 if (master->xferqueue.cur) { 417 list_add_tail(&xfer->node, &master->xferqueue.list); 418 } else { 419 master->xferqueue.cur = xfer; 420 dw_i3c_master_start_xfer_locked(master); 421 } 422 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 423 } 424 425 static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, 426 struct dw_i3c_xfer *xfer) 427 { 428 if (master->xferqueue.cur == xfer) { 429 u32 status; 430 431 master->xferqueue.cur = NULL; 432 433 writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO | 434 RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE, 435 master->regs + RESET_CTRL); 436 437 readl_poll_timeout_atomic(master->regs + RESET_CTRL, status, 438 !status, 10, 1000000); 439 } else { 440 list_del_init(&xfer->node); 441 } 442 } 443 444 static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 445 struct dw_i3c_xfer *xfer) 446 { 447 unsigned long flags; 448 449 spin_lock_irqsave(&master->xferqueue.lock, flags); 450 dw_i3c_master_dequeue_xfer_locked(master, xfer); 451 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 452 } 453 454 static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) 455 { 456 struct dw_i3c_xfer *xfer = master->xferqueue.cur; 457 int i, ret = 0; 458 u32 nresp; 459 460 if (!xfer) 461 return; 462 463 nresp = readl(master->regs + QUEUE_STATUS_LEVEL); 464 nresp = QUEUE_STATUS_LEVEL_RESP(nresp); 465 466 for (i = 0; i < nresp; i++) { 467 struct dw_i3c_cmd *cmd; 468 u32 resp; 469 470 resp = readl(master->regs + RESPONSE_QUEUE_PORT); 471 472 cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)]; 473 cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp); 474 cmd->error = RESPONSE_PORT_ERR_STATUS(resp); 475 if (cmd->rx_len && !cmd->error) 476 dw_i3c_master_read_rx_fifo(master, cmd->rx_buf, 477 cmd->rx_len); 478 } 479 480 for (i = 0; i < nresp; i++) { 481 switch (xfer->cmds[i].error) { 482 case RESPONSE_NO_ERROR: 483 break; 484 case RESPONSE_ERROR_PARITY: 485 case RESPONSE_ERROR_IBA_NACK: 486 case RESPONSE_ERROR_TRANSF_ABORT: 487 case RESPONSE_ERROR_CRC: 488 case RESPONSE_ERROR_FRAME: 489 ret = -EIO; 490 break; 491 case RESPONSE_ERROR_OVER_UNDER_FLOW: 492 ret = -ENOSPC; 493 break; 494 case RESPONSE_ERROR_I2C_W_NACK_ERR: 495 case RESPONSE_ERROR_ADDRESS_NACK: 496 default: 497 ret = -EINVAL; 498 break; 499 } 500 } 501 502 xfer->ret = ret; 503 complete(&xfer->comp); 504 505 if (ret < 0) { 506 dw_i3c_master_dequeue_xfer_locked(master, xfer); 507 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 508 master->regs + DEVICE_CTRL); 509 } 510 511 xfer = list_first_entry_or_null(&master->xferqueue.list, 512 struct dw_i3c_xfer, 513 node); 514 if (xfer) 515 list_del_init(&xfer->node); 516 517 master->xferqueue.cur = xfer; 518 dw_i3c_master_start_xfer_locked(master); 519 } 520 521 static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master) 522 { 523 u32 thld_ctrl; 524 525 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL); 526 thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK | 527 QUEUE_THLD_CTRL_IBI_STAT_MASK | 528 QUEUE_THLD_CTRL_IBI_DATA_MASK); 529 thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) | 530 QUEUE_THLD_CTRL_IBI_DATA(31); 531 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL); 532 533 thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL); 534 thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF; 535 writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL); 536 537 writel(INTR_ALL, master->regs + INTR_STATUS); 538 writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN); 539 writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN); 540 541 master->sir_rej_mask = IBI_REQ_REJECT_ALL; 542 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 543 544 writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT); 545 } 546 547 static int dw_i3c_clk_cfg(struct dw_i3c_master *master) 548 { 549 unsigned long core_rate, core_period; 550 u32 scl_timing; 551 u8 hcnt, lcnt; 552 553 core_rate = clk_get_rate(master->core_clk); 554 if (!core_rate) 555 return -EINVAL; 556 557 core_period = DIV_ROUND_UP(1000000000, core_rate); 558 559 hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1; 560 if (hcnt < SCL_I3C_TIMING_CNT_MIN) 561 hcnt = SCL_I3C_TIMING_CNT_MIN; 562 563 lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt; 564 if (lcnt < SCL_I3C_TIMING_CNT_MIN) 565 lcnt = SCL_I3C_TIMING_CNT_MIN; 566 567 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 568 writel(scl_timing, master->regs + SCL_I3C_PP_TIMING); 569 master->i3c_pp_timing = scl_timing; 570 571 /* 572 * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this 573 * will be set up by dw_i2c_clk_cfg as tLOW. 574 */ 575 if (master->base.bus.mode == I3C_BUS_MODE_PURE) { 576 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 577 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 578 } 579 580 lcnt = max_t(u8, 581 DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt); 582 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); 583 writel(scl_timing, master->regs + SCL_I3C_OD_TIMING); 584 master->i3c_od_timing = scl_timing; 585 586 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt; 587 scl_timing = SCL_EXT_LCNT_1(lcnt); 588 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt; 589 scl_timing |= SCL_EXT_LCNT_2(lcnt); 590 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt; 591 scl_timing |= SCL_EXT_LCNT_3(lcnt); 592 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt; 593 scl_timing |= SCL_EXT_LCNT_4(lcnt); 594 writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING); 595 master->ext_lcnt_timing = scl_timing; 596 597 return 0; 598 } 599 600 static int dw_i2c_clk_cfg(struct dw_i3c_master *master) 601 { 602 unsigned long core_rate, core_period; 603 u16 hcnt, lcnt; 604 u32 scl_timing; 605 606 core_rate = clk_get_rate(master->core_clk); 607 if (!core_rate) 608 return -EINVAL; 609 610 core_period = DIV_ROUND_UP(1000000000, core_rate); 611 612 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period); 613 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE) - lcnt; 614 scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) | 615 SCL_I2C_FMP_TIMING_LCNT(lcnt); 616 writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING); 617 master->i2c_fmp_timing = scl_timing; 618 619 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period); 620 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_MAX_RATE) - lcnt; 621 scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) | 622 SCL_I2C_FM_TIMING_LCNT(lcnt); 623 writel(scl_timing, master->regs + SCL_I2C_FM_TIMING); 624 master->i2c_fm_timing = scl_timing; 625 626 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING); 627 master->bus_free_timing = BUS_I3C_MST_FREE(lcnt); 628 629 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT, 630 master->regs + DEVICE_CTRL); 631 master->i2c_slv_prsnt = true; 632 633 return 0; 634 } 635 636 static int dw_i3c_master_bus_init(struct i3c_master_controller *m) 637 { 638 struct dw_i3c_master *master = to_dw_i3c_master(m); 639 struct i3c_bus *bus = i3c_master_get_bus(m); 640 struct i3c_device_info info = { }; 641 int ret; 642 643 ret = pm_runtime_resume_and_get(master->dev); 644 if (ret < 0) { 645 dev_err(master->dev, 646 "<%s> cannot resume i3c bus master, err: %d\n", 647 __func__, ret); 648 return ret; 649 } 650 651 ret = master->platform_ops->init(master); 652 if (ret) 653 goto rpm_out; 654 655 switch (bus->mode) { 656 case I3C_BUS_MODE_MIXED_FAST: 657 case I3C_BUS_MODE_MIXED_LIMITED: 658 ret = dw_i2c_clk_cfg(master); 659 if (ret) 660 goto rpm_out; 661 fallthrough; 662 case I3C_BUS_MODE_PURE: 663 ret = dw_i3c_clk_cfg(master); 664 if (ret) 665 goto rpm_out; 666 break; 667 default: 668 ret = -EINVAL; 669 goto rpm_out; 670 } 671 672 ret = i3c_master_get_free_addr(m, 0); 673 if (ret < 0) 674 goto rpm_out; 675 676 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret), 677 master->regs + DEVICE_ADDR); 678 master->dev_addr = ret; 679 memset(&info, 0, sizeof(info)); 680 info.dyn_addr = ret; 681 682 ret = i3c_master_set_info(&master->base, &info); 683 if (ret) 684 goto rpm_out; 685 686 dw_i3c_master_set_intr_regs(master); 687 dw_i3c_master_enable(master); 688 689 rpm_out: 690 pm_runtime_put_autosuspend(master->dev); 691 return ret; 692 } 693 694 static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m) 695 { 696 struct dw_i3c_master *master = to_dw_i3c_master(m); 697 698 dw_i3c_master_disable(master); 699 } 700 701 static int dw_i3c_ccc_set(struct dw_i3c_master *master, 702 struct i3c_ccc_cmd *ccc) 703 { 704 struct dw_i3c_xfer *xfer; 705 struct dw_i3c_cmd *cmd; 706 int ret, pos = 0; 707 708 if (ccc->id & I3C_CCC_DIRECT) { 709 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 710 if (pos < 0) 711 return pos; 712 } 713 714 xfer = dw_i3c_master_alloc_xfer(master, 1); 715 if (!xfer) 716 return -ENOMEM; 717 718 cmd = xfer->cmds; 719 cmd->tx_buf = ccc->dests[0].payload.data; 720 cmd->tx_len = ccc->dests[0].payload.len; 721 722 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 723 COMMAND_PORT_TRANSFER_ARG; 724 725 cmd->cmd_lo = COMMAND_PORT_CP | 726 COMMAND_PORT_DEV_INDEX(pos) | 727 COMMAND_PORT_CMD(ccc->id) | 728 COMMAND_PORT_TOC | 729 COMMAND_PORT_ROC; 730 731 dw_i3c_master_enqueue_xfer(master, xfer); 732 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 733 dw_i3c_master_dequeue_xfer(master, xfer); 734 735 ret = xfer->ret; 736 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 737 ccc->err = I3C_ERROR_M2; 738 739 dw_i3c_master_free_xfer(xfer); 740 741 return ret; 742 } 743 744 static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc) 745 { 746 struct dw_i3c_xfer *xfer; 747 struct dw_i3c_cmd *cmd; 748 int ret, pos; 749 750 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); 751 if (pos < 0) 752 return pos; 753 754 xfer = dw_i3c_master_alloc_xfer(master, 1); 755 if (!xfer) 756 return -ENOMEM; 757 758 cmd = xfer->cmds; 759 cmd->rx_buf = ccc->dests[0].payload.data; 760 cmd->rx_len = ccc->dests[0].payload.len; 761 762 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | 763 COMMAND_PORT_TRANSFER_ARG; 764 765 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 766 COMMAND_PORT_CP | 767 COMMAND_PORT_DEV_INDEX(pos) | 768 COMMAND_PORT_CMD(ccc->id) | 769 COMMAND_PORT_TOC | 770 COMMAND_PORT_ROC; 771 772 dw_i3c_master_enqueue_xfer(master, xfer); 773 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 774 dw_i3c_master_dequeue_xfer(master, xfer); 775 776 ret = xfer->ret; 777 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK) 778 ccc->err = I3C_ERROR_M2; 779 dw_i3c_master_free_xfer(xfer); 780 781 return ret; 782 } 783 784 static void amd_configure_od_pp_quirk(struct dw_i3c_master *master) 785 { 786 master->i3c_od_timing = AMD_I3C_OD_TIMING; 787 master->i3c_pp_timing = AMD_I3C_PP_TIMING; 788 } 789 790 static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 791 struct i3c_ccc_cmd *ccc) 792 { 793 struct dw_i3c_master *master = to_dw_i3c_master(m); 794 int ret = 0; 795 796 if (ccc->id == I3C_CCC_ENTDAA) 797 return -EINVAL; 798 799 /* AMD platform specific OD and PP timings */ 800 if (master->quirks & AMD_I3C_OD_PP_TIMING) { 801 amd_configure_od_pp_quirk(master); 802 writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); 803 writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); 804 } 805 806 ret = pm_runtime_resume_and_get(master->dev); 807 if (ret < 0) { 808 dev_err(master->dev, 809 "<%s> cannot resume i3c bus master, err: %d\n", 810 __func__, ret); 811 return ret; 812 } 813 814 if (ccc->rnw) 815 ret = dw_i3c_ccc_get(master, ccc); 816 else 817 ret = dw_i3c_ccc_set(master, ccc); 818 819 pm_runtime_put_autosuspend(master->dev); 820 return ret; 821 } 822 823 static int dw_i3c_master_daa(struct i3c_master_controller *m) 824 { 825 struct dw_i3c_master *master = to_dw_i3c_master(m); 826 struct dw_i3c_xfer *xfer; 827 struct dw_i3c_cmd *cmd; 828 u32 olddevs, newdevs; 829 u8 last_addr = 0; 830 int ret, pos; 831 832 ret = pm_runtime_resume_and_get(master->dev); 833 if (ret < 0) { 834 dev_err(master->dev, 835 "<%s> cannot resume i3c bus master, err: %d\n", 836 __func__, ret); 837 return ret; 838 } 839 840 olddevs = ~(master->free_pos); 841 842 /* Prepare DAT before launching DAA. */ 843 for (pos = 0; pos < master->maxdevs; pos++) { 844 if (olddevs & BIT(pos)) 845 continue; 846 847 ret = i3c_master_get_free_addr(m, last_addr + 1); 848 if (ret < 0) { 849 ret = -ENOSPC; 850 goto rpm_out; 851 } 852 853 master->devs[pos].addr = ret; 854 last_addr = ret; 855 856 ret |= parity8(ret) ? 0 : BIT(7); 857 858 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret), 859 master->regs + 860 DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 861 862 ret = 0; 863 } 864 865 xfer = dw_i3c_master_alloc_xfer(master, 1); 866 if (!xfer) { 867 ret = -ENOMEM; 868 goto rpm_out; 869 } 870 871 pos = dw_i3c_master_get_free_pos(master); 872 if (pos < 0) { 873 dw_i3c_master_free_xfer(xfer); 874 ret = pos; 875 goto rpm_out; 876 } 877 cmd = &xfer->cmds[0]; 878 cmd->cmd_hi = 0x1; 879 cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) | 880 COMMAND_PORT_DEV_INDEX(pos) | 881 COMMAND_PORT_CMD(I3C_CCC_ENTDAA) | 882 COMMAND_PORT_ADDR_ASSGN_CMD | 883 COMMAND_PORT_TOC | 884 COMMAND_PORT_ROC; 885 886 dw_i3c_master_enqueue_xfer(master, xfer); 887 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 888 dw_i3c_master_dequeue_xfer(master, xfer); 889 890 newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0); 891 newdevs &= ~olddevs; 892 893 for (pos = 0; pos < master->maxdevs; pos++) { 894 if (newdevs & BIT(pos)) 895 i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr); 896 } 897 898 dw_i3c_master_free_xfer(xfer); 899 900 rpm_out: 901 pm_runtime_put_autosuspend(master->dev); 902 return ret; 903 } 904 905 static int dw_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, 906 struct i3c_xfer *i3c_xfers, 907 int i3c_nxfers, enum i3c_xfer_mode mode) 908 { 909 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 910 struct i3c_master_controller *m = i3c_dev_get_master(dev); 911 struct dw_i3c_master *master = to_dw_i3c_master(m); 912 unsigned int nrxwords = 0, ntxwords = 0; 913 struct dw_i3c_xfer *xfer; 914 int i, ret = 0; 915 916 if (!i3c_nxfers) 917 return 0; 918 919 if (i3c_nxfers > master->caps.cmdfifodepth) 920 return -EOPNOTSUPP; 921 922 for (i = 0; i < i3c_nxfers; i++) { 923 if (i3c_xfers[i].rnw) 924 nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 925 else 926 ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4); 927 } 928 929 if (ntxwords > master->caps.datafifodepth || 930 nrxwords > master->caps.datafifodepth) 931 return -EOPNOTSUPP; 932 933 xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers); 934 if (!xfer) 935 return -ENOMEM; 936 937 ret = pm_runtime_resume_and_get(master->dev); 938 if (ret < 0) { 939 dev_err(master->dev, 940 "<%s> cannot resume i3c bus master, err: %d\n", 941 __func__, ret); 942 return ret; 943 } 944 945 for (i = 0; i < i3c_nxfers; i++) { 946 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 947 948 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) | 949 COMMAND_PORT_TRANSFER_ARG; 950 951 if (i3c_xfers[i].rnw) { 952 cmd->rx_buf = i3c_xfers[i].data.in; 953 cmd->rx_len = i3c_xfers[i].len; 954 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER | 955 COMMAND_PORT_SPEED(dev->info.max_read_ds); 956 957 } else { 958 cmd->tx_buf = i3c_xfers[i].data.out; 959 cmd->tx_len = i3c_xfers[i].len; 960 cmd->cmd_lo = 961 COMMAND_PORT_SPEED(dev->info.max_write_ds); 962 } 963 964 cmd->cmd_lo |= COMMAND_PORT_TID(i) | 965 COMMAND_PORT_DEV_INDEX(data->index) | 966 COMMAND_PORT_ROC; 967 968 if (i == (i3c_nxfers - 1)) 969 cmd->cmd_lo |= COMMAND_PORT_TOC; 970 } 971 972 dw_i3c_master_enqueue_xfer(master, xfer); 973 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) 974 dw_i3c_master_dequeue_xfer(master, xfer); 975 976 for (i = 0; i < i3c_nxfers; i++) { 977 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 978 979 if (i3c_xfers[i].rnw) 980 i3c_xfers[i].len = cmd->rx_len; 981 } 982 983 ret = xfer->ret; 984 dw_i3c_master_free_xfer(xfer); 985 986 pm_runtime_put_autosuspend(master->dev); 987 return ret; 988 } 989 990 static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 991 u8 old_dyn_addr) 992 { 993 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 994 struct i3c_master_controller *m = i3c_dev_get_master(dev); 995 struct dw_i3c_master *master = to_dw_i3c_master(m); 996 int pos; 997 998 pos = dw_i3c_master_get_free_pos(master); 999 1000 if (data->index > pos && pos > 0) { 1001 writel(0, 1002 master->regs + 1003 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1004 1005 master->devs[data->index].addr = 0; 1006 master->free_pos |= BIT(data->index); 1007 1008 data->index = pos; 1009 master->devs[pos].addr = dev->info.dyn_addr; 1010 master->free_pos &= ~BIT(pos); 1011 } 1012 1013 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1014 master->regs + 1015 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1016 1017 master->devs[data->index].addr = dev->info.dyn_addr; 1018 1019 return 0; 1020 } 1021 1022 static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 1023 { 1024 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1025 struct dw_i3c_master *master = to_dw_i3c_master(m); 1026 struct dw_i3c_i2c_dev_data *data; 1027 int pos; 1028 1029 pos = dw_i3c_master_get_free_pos(master); 1030 if (pos < 0) 1031 return pos; 1032 1033 data = kzalloc(sizeof(*data), GFP_KERNEL); 1034 if (!data) 1035 return -ENOMEM; 1036 1037 data->index = pos; 1038 master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr; 1039 master->free_pos &= ~BIT(pos); 1040 i3c_dev_set_master_data(dev, data); 1041 1042 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1043 master->regs + 1044 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1045 1046 return 0; 1047 } 1048 1049 static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 1050 { 1051 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1052 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1053 struct dw_i3c_master *master = to_dw_i3c_master(m); 1054 1055 writel(0, 1056 master->regs + 1057 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1058 1059 i3c_dev_set_master_data(dev, NULL); 1060 master->devs[data->index].addr = 0; 1061 master->free_pos |= BIT(data->index); 1062 kfree(data); 1063 } 1064 1065 static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1066 struct i2c_msg *i2c_xfers, 1067 int i2c_nxfers) 1068 { 1069 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1070 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1071 struct dw_i3c_master *master = to_dw_i3c_master(m); 1072 unsigned int nrxwords = 0, ntxwords = 0; 1073 struct dw_i3c_xfer *xfer; 1074 int i, ret = 0; 1075 1076 if (!i2c_nxfers) 1077 return 0; 1078 1079 if (i2c_nxfers > master->caps.cmdfifodepth) 1080 return -EOPNOTSUPP; 1081 1082 for (i = 0; i < i2c_nxfers; i++) { 1083 if (i2c_xfers[i].flags & I2C_M_RD) 1084 nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1085 else 1086 ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4); 1087 } 1088 1089 if (ntxwords > master->caps.datafifodepth || 1090 nrxwords > master->caps.datafifodepth) 1091 return -EOPNOTSUPP; 1092 1093 xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers); 1094 if (!xfer) 1095 return -ENOMEM; 1096 1097 ret = pm_runtime_resume_and_get(master->dev); 1098 if (ret < 0) { 1099 dev_err(master->dev, 1100 "<%s> cannot resume i3c bus master, err: %d\n", 1101 __func__, ret); 1102 return ret; 1103 } 1104 1105 for (i = 0; i < i2c_nxfers; i++) { 1106 struct dw_i3c_cmd *cmd = &xfer->cmds[i]; 1107 1108 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) | 1109 COMMAND_PORT_TRANSFER_ARG; 1110 1111 cmd->cmd_lo = COMMAND_PORT_TID(i) | 1112 COMMAND_PORT_DEV_INDEX(data->index) | 1113 COMMAND_PORT_ROC; 1114 1115 if (i2c_xfers[i].flags & I2C_M_RD) { 1116 cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER; 1117 cmd->rx_buf = i2c_xfers[i].buf; 1118 cmd->rx_len = i2c_xfers[i].len; 1119 } else { 1120 cmd->tx_buf = i2c_xfers[i].buf; 1121 cmd->tx_len = i2c_xfers[i].len; 1122 } 1123 1124 if (i == (i2c_nxfers - 1)) 1125 cmd->cmd_lo |= COMMAND_PORT_TOC; 1126 } 1127 1128 dw_i3c_master_enqueue_xfer(master, xfer); 1129 if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout)) 1130 dw_i3c_master_dequeue_xfer(master, xfer); 1131 1132 ret = xfer->ret; 1133 dw_i3c_master_free_xfer(xfer); 1134 1135 pm_runtime_put_autosuspend(master->dev); 1136 return ret; 1137 } 1138 1139 static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 1140 { 1141 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1142 struct dw_i3c_master *master = to_dw_i3c_master(m); 1143 struct dw_i3c_i2c_dev_data *data; 1144 int pos; 1145 1146 pos = dw_i3c_master_get_free_pos(master); 1147 if (pos < 0) 1148 return pos; 1149 1150 data = kzalloc(sizeof(*data), GFP_KERNEL); 1151 if (!data) 1152 return -ENOMEM; 1153 1154 data->index = pos; 1155 master->devs[pos].addr = dev->addr; 1156 master->devs[pos].is_i2c_addr = true; 1157 master->free_pos &= ~BIT(pos); 1158 i2c_dev_set_master_data(dev, data); 1159 1160 writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1161 DEV_ADDR_TABLE_STATIC_ADDR(dev->addr), 1162 master->regs + 1163 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1164 1165 return 0; 1166 } 1167 1168 static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 1169 { 1170 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1171 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1172 struct dw_i3c_master *master = to_dw_i3c_master(m); 1173 1174 writel(0, 1175 master->regs + 1176 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1177 1178 i2c_dev_set_master_data(dev, NULL); 1179 master->devs[data->index].addr = 0; 1180 master->free_pos |= BIT(data->index); 1181 kfree(data); 1182 } 1183 1184 static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1185 const struct i3c_ibi_setup *req) 1186 { 1187 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1188 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1189 struct dw_i3c_master *master = to_dw_i3c_master(m); 1190 unsigned long flags; 1191 1192 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1193 if (IS_ERR(data->ibi_pool)) 1194 return PTR_ERR(data->ibi_pool); 1195 1196 spin_lock_irqsave(&master->devs_lock, flags); 1197 master->devs[data->index].ibi_dev = dev; 1198 spin_unlock_irqrestore(&master->devs_lock, flags); 1199 1200 return 0; 1201 } 1202 1203 static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1204 { 1205 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1206 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1207 struct dw_i3c_master *master = to_dw_i3c_master(m); 1208 unsigned long flags; 1209 1210 spin_lock_irqsave(&master->devs_lock, flags); 1211 master->devs[data->index].ibi_dev = NULL; 1212 spin_unlock_irqrestore(&master->devs_lock, flags); 1213 1214 i3c_generic_ibi_free_pool(data->ibi_pool); 1215 data->ibi_pool = NULL; 1216 } 1217 1218 static void dw_i3c_master_enable_sir_signal(struct dw_i3c_master *master, bool enable) 1219 { 1220 u32 reg; 1221 1222 reg = readl(master->regs + INTR_STATUS_EN); 1223 reg &= ~INTR_IBI_THLD_STAT; 1224 if (enable) 1225 reg |= INTR_IBI_THLD_STAT; 1226 writel(reg, master->regs + INTR_STATUS_EN); 1227 1228 reg = readl(master->regs + INTR_SIGNAL_EN); 1229 reg &= ~INTR_IBI_THLD_STAT; 1230 if (enable) 1231 reg |= INTR_IBI_THLD_STAT; 1232 writel(reg, master->regs + INTR_SIGNAL_EN); 1233 } 1234 1235 static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master, 1236 struct i3c_dev_desc *dev, 1237 u8 idx, bool enable) 1238 { 1239 unsigned long flags; 1240 u32 dat_entry, reg; 1241 bool global; 1242 1243 dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx); 1244 1245 spin_lock_irqsave(&master->devs_lock, flags); 1246 reg = readl(master->regs + dat_entry); 1247 if (enable) { 1248 reg &= ~DEV_ADDR_TABLE_SIR_REJECT; 1249 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) 1250 reg |= DEV_ADDR_TABLE_IBI_MDB; 1251 } else { 1252 reg |= DEV_ADDR_TABLE_SIR_REJECT; 1253 } 1254 master->platform_ops->set_dat_ibi(master, dev, enable, ®); 1255 writel(reg, master->regs + dat_entry); 1256 1257 if (enable) { 1258 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL); 1259 master->sir_rej_mask &= ~BIT(idx); 1260 } else { 1261 bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK); 1262 1263 master->sir_rej_mask |= BIT(idx); 1264 global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected; 1265 } 1266 writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT); 1267 1268 if (global) 1269 dw_i3c_master_enable_sir_signal(master, enable); 1270 1271 1272 spin_unlock_irqrestore(&master->devs_lock, flags); 1273 } 1274 1275 static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m) 1276 { 1277 struct dw_i3c_master *master = to_dw_i3c_master(m); 1278 int ret; 1279 1280 ret = pm_runtime_resume_and_get(master->dev); 1281 if (ret < 0) { 1282 dev_err(master->dev, 1283 "<%s> cannot resume i3c bus master, err: %d\n", 1284 __func__, ret); 1285 return ret; 1286 } 1287 1288 dw_i3c_master_enable_sir_signal(master, true); 1289 writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK, 1290 master->regs + DEVICE_CTRL); 1291 1292 return 0; 1293 } 1294 1295 static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m) 1296 { 1297 struct dw_i3c_master *master = to_dw_i3c_master(m); 1298 1299 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK, 1300 master->regs + DEVICE_CTRL); 1301 1302 pm_runtime_put_autosuspend(master->dev); 1303 return 0; 1304 } 1305 1306 static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1307 { 1308 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1309 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1310 struct dw_i3c_master *master = to_dw_i3c_master(m); 1311 int rc; 1312 1313 rc = pm_runtime_resume_and_get(master->dev); 1314 if (rc < 0) { 1315 dev_err(master->dev, 1316 "<%s> cannot resume i3c bus master, err: %d\n", 1317 __func__, rc); 1318 return rc; 1319 } 1320 1321 dw_i3c_master_set_sir_enabled(master, dev, data->index, true); 1322 1323 rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1324 1325 if (rc) { 1326 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1327 pm_runtime_put_autosuspend(master->dev); 1328 } 1329 1330 return rc; 1331 } 1332 1333 static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1334 { 1335 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1336 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1337 struct dw_i3c_master *master = to_dw_i3c_master(m); 1338 int rc; 1339 1340 rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1341 if (rc) 1342 return rc; 1343 1344 dw_i3c_master_set_sir_enabled(master, dev, data->index, false); 1345 1346 pm_runtime_put_autosuspend(master->dev); 1347 return 0; 1348 } 1349 1350 static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1351 struct i3c_ibi_slot *slot) 1352 { 1353 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1354 1355 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1356 } 1357 1358 static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master, 1359 int len) 1360 { 1361 int i; 1362 1363 for (i = 0; i < DIV_ROUND_UP(len, 4); i++) 1364 readl(master->regs + IBI_QUEUE_STATUS); 1365 } 1366 1367 static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master, 1368 u32 status) 1369 { 1370 struct dw_i3c_i2c_dev_data *data; 1371 struct i3c_ibi_slot *slot; 1372 struct i3c_dev_desc *dev; 1373 unsigned long flags; 1374 u8 addr, len; 1375 int idx; 1376 1377 addr = IBI_QUEUE_IBI_ADDR(status); 1378 len = IBI_QUEUE_STATUS_DATA_LEN(status); 1379 1380 /* 1381 * We be tempted to check the error status in bit 30; however, due 1382 * to the PEC errata workaround on some platform implementations (see 1383 * ast2600_i3c_set_dat_ibi()), those will almost always have a PEC 1384 * error on IBI payload data, as well as losing the last byte of 1385 * payload. 1386 * 1387 * If we implement error status checking on that bit, we may need 1388 * a new platform op to validate it. 1389 */ 1390 1391 spin_lock_irqsave(&master->devs_lock, flags); 1392 idx = dw_i3c_master_get_addr_pos(master, addr); 1393 if (idx < 0) { 1394 dev_dbg_ratelimited(&master->base.dev, 1395 "IBI from unknown addr 0x%x\n", addr); 1396 goto err_drain; 1397 } 1398 1399 dev = master->devs[idx].ibi_dev; 1400 if (!dev || !dev->ibi) { 1401 dev_dbg_ratelimited(&master->base.dev, 1402 "IBI from non-requested dev idx %d\n", idx); 1403 goto err_drain; 1404 } 1405 1406 data = i3c_dev_get_master_data(dev); 1407 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 1408 if (!slot) { 1409 dev_dbg_ratelimited(&master->base.dev, 1410 "No IBI slots available\n"); 1411 goto err_drain; 1412 } 1413 1414 if (dev->ibi->max_payload_len < len) { 1415 dev_dbg_ratelimited(&master->base.dev, 1416 "IBI payload len %d greater than max %d\n", 1417 len, dev->ibi->max_payload_len); 1418 goto err_drain; 1419 } 1420 1421 if (len) { 1422 dw_i3c_master_read_ibi_fifo(master, slot->data, len); 1423 slot->len = len; 1424 } 1425 i3c_master_queue_ibi(dev, slot); 1426 1427 spin_unlock_irqrestore(&master->devs_lock, flags); 1428 1429 return; 1430 1431 err_drain: 1432 dw_i3c_master_drain_ibi_queue(master, len); 1433 1434 spin_unlock_irqrestore(&master->devs_lock, flags); 1435 } 1436 1437 /* "ibis": referring to In-Band Interrupts, and not 1438 * https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should 1439 * not be handled. 1440 */ 1441 static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master) 1442 { 1443 unsigned int i, len, n_ibis; 1444 u32 reg; 1445 1446 reg = readl(master->regs + QUEUE_STATUS_LEVEL); 1447 n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg); 1448 if (!n_ibis) 1449 return; 1450 1451 for (i = 0; i < n_ibis; i++) { 1452 reg = readl(master->regs + IBI_QUEUE_STATUS); 1453 1454 if (IBI_TYPE_SIRQ(reg)) { 1455 dw_i3c_master_handle_ibi_sir(master, reg); 1456 } else if (IBI_TYPE_HJ(reg)) { 1457 queue_work(master->base.wq, &master->hj_work); 1458 } else { 1459 len = IBI_QUEUE_STATUS_DATA_LEN(reg); 1460 dev_info(&master->base.dev, 1461 "unsupported IBI type 0x%lx len %d\n", 1462 IBI_QUEUE_STATUS_IBI_ID(reg), len); 1463 dw_i3c_master_drain_ibi_queue(master, len); 1464 } 1465 } 1466 } 1467 1468 static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id) 1469 { 1470 struct dw_i3c_master *master = dev_id; 1471 u32 status; 1472 1473 status = readl(master->regs + INTR_STATUS); 1474 1475 if (!(status & readl(master->regs + INTR_STATUS_EN))) { 1476 writel(INTR_ALL, master->regs + INTR_STATUS); 1477 return IRQ_NONE; 1478 } 1479 1480 spin_lock(&master->xferqueue.lock); 1481 dw_i3c_master_end_xfer_locked(master, status); 1482 if (status & INTR_TRANSFER_ERR_STAT) 1483 writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS); 1484 spin_unlock(&master->xferqueue.lock); 1485 1486 if (status & INTR_IBI_THLD_STAT) 1487 dw_i3c_master_irq_handle_ibis(master); 1488 1489 return IRQ_HANDLED; 1490 } 1491 1492 static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { 1493 .bus_init = dw_i3c_master_bus_init, 1494 .bus_cleanup = dw_i3c_master_bus_cleanup, 1495 .attach_i3c_dev = dw_i3c_master_attach_i3c_dev, 1496 .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev, 1497 .detach_i3c_dev = dw_i3c_master_detach_i3c_dev, 1498 .do_daa = dw_i3c_master_daa, 1499 .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd, 1500 .send_ccc_cmd = dw_i3c_master_send_ccc_cmd, 1501 .i3c_xfers = dw_i3c_master_i3c_xfers, 1502 .attach_i2c_dev = dw_i3c_master_attach_i2c_dev, 1503 .detach_i2c_dev = dw_i3c_master_detach_i2c_dev, 1504 .i2c_xfers = dw_i3c_master_i2c_xfers, 1505 .request_ibi = dw_i3c_master_request_ibi, 1506 .free_ibi = dw_i3c_master_free_ibi, 1507 .enable_ibi = dw_i3c_master_enable_ibi, 1508 .disable_ibi = dw_i3c_master_disable_ibi, 1509 .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot, 1510 .enable_hotjoin = dw_i3c_master_enable_hotjoin, 1511 .disable_hotjoin = dw_i3c_master_disable_hotjoin, 1512 }; 1513 1514 /* default platform ops implementations */ 1515 static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c) 1516 { 1517 return 0; 1518 } 1519 1520 static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c, 1521 struct i3c_dev_desc *dev, 1522 bool enable, u32 *dat) 1523 { 1524 } 1525 1526 static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = { 1527 .init = dw_i3c_platform_init_nop, 1528 .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop, 1529 }; 1530 1531 static void dw_i3c_hj_work(struct work_struct *work) 1532 { 1533 struct dw_i3c_master *master = 1534 container_of(work, typeof(*master), hj_work); 1535 1536 i3c_master_do_daa(&master->base); 1537 } 1538 1539 int dw_i3c_common_probe(struct dw_i3c_master *master, 1540 struct platform_device *pdev) 1541 { 1542 int ret, irq; 1543 const struct dw_i3c_drvdata *drvdata; 1544 unsigned long quirks = 0; 1545 1546 if (!master->platform_ops) 1547 master->platform_ops = &dw_i3c_platform_ops_default; 1548 1549 master->dev = &pdev->dev; 1550 1551 master->regs = devm_platform_ioremap_resource(pdev, 0); 1552 if (IS_ERR(master->regs)) 1553 return PTR_ERR(master->regs); 1554 1555 master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL); 1556 if (IS_ERR(master->core_clk)) 1557 return PTR_ERR(master->core_clk); 1558 1559 master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); 1560 if (IS_ERR(master->pclk)) 1561 return PTR_ERR(master->pclk); 1562 1563 master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 1564 "core_rst"); 1565 if (IS_ERR(master->core_rst)) 1566 return PTR_ERR(master->core_rst); 1567 1568 reset_control_deassert(master->core_rst); 1569 1570 spin_lock_init(&master->xferqueue.lock); 1571 INIT_LIST_HEAD(&master->xferqueue.list); 1572 1573 writel(INTR_ALL, master->regs + INTR_STATUS); 1574 irq = platform_get_irq(pdev, 0); 1575 ret = devm_request_irq(&pdev->dev, irq, 1576 dw_i3c_master_irq_handler, 0, 1577 dev_name(&pdev->dev), master); 1578 if (ret) 1579 goto err_assert_rst; 1580 1581 platform_set_drvdata(pdev, master); 1582 1583 pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT); 1584 pm_runtime_use_autosuspend(&pdev->dev); 1585 pm_runtime_set_active(&pdev->dev); 1586 pm_runtime_enable(&pdev->dev); 1587 1588 /* Information regarding the FIFOs/QUEUEs depth */ 1589 ret = readl(master->regs + QUEUE_STATUS_LEVEL); 1590 master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret); 1591 1592 ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL); 1593 master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret); 1594 1595 ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER); 1596 master->datstartaddr = ret; 1597 master->maxdevs = ret >> 16; 1598 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1599 1600 if (has_acpi_companion(&pdev->dev)) { 1601 quirks = (unsigned long)device_get_match_data(&pdev->dev); 1602 } else if (pdev->dev.of_node) { 1603 drvdata = device_get_match_data(&pdev->dev); 1604 if (drvdata) 1605 quirks = drvdata->flags; 1606 } 1607 master->quirks = quirks; 1608 1609 /* Keep controller enabled by preventing runtime suspend */ 1610 if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1611 pm_runtime_get_noresume(&pdev->dev); 1612 1613 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1614 ret = i3c_master_register(&master->base, &pdev->dev, 1615 &dw_mipi_i3c_ops, false); 1616 if (ret) 1617 goto err_disable_pm; 1618 1619 return 0; 1620 1621 err_disable_pm: 1622 pm_runtime_disable(&pdev->dev); 1623 pm_runtime_set_suspended(&pdev->dev); 1624 pm_runtime_dont_use_autosuspend(&pdev->dev); 1625 1626 err_assert_rst: 1627 reset_control_assert(master->core_rst); 1628 1629 return ret; 1630 } 1631 EXPORT_SYMBOL_GPL(dw_i3c_common_probe); 1632 1633 void dw_i3c_common_remove(struct dw_i3c_master *master) 1634 { 1635 cancel_work_sync(&master->hj_work); 1636 i3c_master_unregister(&master->base); 1637 1638 /* Balance pm_runtime_get_noresume() from probe() */ 1639 if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1640 pm_runtime_put_noidle(master->dev); 1641 1642 pm_runtime_disable(master->dev); 1643 pm_runtime_set_suspended(master->dev); 1644 pm_runtime_dont_use_autosuspend(master->dev); 1645 } 1646 EXPORT_SYMBOL_GPL(dw_i3c_common_remove); 1647 1648 /* base platform implementation */ 1649 1650 static int dw_i3c_probe(struct platform_device *pdev) 1651 { 1652 struct dw_i3c_master *master; 1653 1654 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); 1655 if (!master) 1656 return -ENOMEM; 1657 1658 return dw_i3c_common_probe(master, pdev); 1659 } 1660 1661 static void dw_i3c_remove(struct platform_device *pdev) 1662 { 1663 struct dw_i3c_master *master = platform_get_drvdata(pdev); 1664 1665 dw_i3c_common_remove(master); 1666 } 1667 1668 static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master) 1669 { 1670 u32 pos, reg_val; 1671 1672 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr), 1673 master->regs + DEVICE_ADDR); 1674 1675 for (pos = 0; pos < master->maxdevs; pos++) { 1676 if (master->free_pos & BIT(pos)) 1677 continue; 1678 1679 if (master->devs[pos].is_i2c_addr) 1680 reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV | 1681 DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr); 1682 else 1683 reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr); 1684 1685 writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos)); 1686 } 1687 } 1688 1689 static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master) 1690 { 1691 /* AMD platform specific OD and PP timings */ 1692 if (master->quirks & AMD_I3C_OD_PP_TIMING) 1693 amd_configure_od_pp_quirk(master); 1694 1695 writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING); 1696 writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING); 1697 writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING); 1698 writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING); 1699 1700 if (master->i2c_slv_prsnt) { 1701 writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING); 1702 writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING); 1703 } 1704 } 1705 1706 static int dw_i3c_master_enable_clks(struct dw_i3c_master *master) 1707 { 1708 int ret = 0; 1709 1710 ret = clk_prepare_enable(master->core_clk); 1711 if (ret) 1712 return ret; 1713 1714 ret = clk_prepare_enable(master->pclk); 1715 if (ret) { 1716 clk_disable_unprepare(master->core_clk); 1717 return ret; 1718 } 1719 1720 return 0; 1721 } 1722 1723 static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master) 1724 { 1725 clk_disable_unprepare(master->pclk); 1726 clk_disable_unprepare(master->core_clk); 1727 } 1728 1729 static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev) 1730 { 1731 struct dw_i3c_master *master = dev_get_drvdata(dev); 1732 1733 dw_i3c_master_disable(master); 1734 1735 reset_control_assert(master->core_rst); 1736 dw_i3c_master_disable_clks(master); 1737 pinctrl_pm_select_sleep_state(dev); 1738 return 0; 1739 } 1740 1741 static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev) 1742 { 1743 struct dw_i3c_master *master = dev_get_drvdata(dev); 1744 1745 pinctrl_pm_select_default_state(dev); 1746 dw_i3c_master_enable_clks(master); 1747 reset_control_deassert(master->core_rst); 1748 1749 dw_i3c_master_set_intr_regs(master); 1750 dw_i3c_master_restore_timing_regs(master); 1751 dw_i3c_master_restore_addrs(master); 1752 1753 dw_i3c_master_enable(master); 1754 return 0; 1755 } 1756 1757 static const struct dev_pm_ops dw_i3c_pm_ops = { 1758 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 1759 SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL) 1760 }; 1761 1762 static void dw_i3c_shutdown(struct platform_device *pdev) 1763 { 1764 struct dw_i3c_master *master = platform_get_drvdata(pdev); 1765 int ret; 1766 1767 ret = pm_runtime_resume_and_get(master->dev); 1768 if (ret < 0) { 1769 dev_err(master->dev, 1770 "<%s> cannot resume i3c bus master, err: %d\n", 1771 __func__, ret); 1772 return; 1773 } 1774 1775 cancel_work_sync(&master->hj_work); 1776 1777 /* Disable interrupts */ 1778 writel((u32)~INTR_ALL, master->regs + INTR_STATUS_EN); 1779 writel((u32)~INTR_ALL, master->regs + INTR_SIGNAL_EN); 1780 1781 pm_runtime_put_autosuspend(master->dev); 1782 } 1783 1784 static const struct dw_i3c_drvdata altr_agilex5_drvdata = { 1785 .flags = DW_I3C_DISABLE_RUNTIME_PM_QUIRK, 1786 }; 1787 1788 static const struct of_device_id dw_i3c_master_of_match[] = { 1789 { .compatible = "snps,dw-i3c-master-1.00a", }, 1790 { .compatible = "altr,agilex5-dw-i3c-master", 1791 .data = &altr_agilex5_drvdata, 1792 }, 1793 {}, 1794 }; 1795 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match); 1796 1797 static const struct acpi_device_id amd_i3c_device_match[] = { 1798 { "AMDI0015", AMD_I3C_OD_PP_TIMING }, 1799 { } 1800 }; 1801 MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match); 1802 1803 static struct platform_driver dw_i3c_driver = { 1804 .probe = dw_i3c_probe, 1805 .remove = dw_i3c_remove, 1806 .shutdown = dw_i3c_shutdown, 1807 .driver = { 1808 .name = "dw-i3c-master", 1809 .of_match_table = dw_i3c_master_of_match, 1810 .acpi_match_table = amd_i3c_device_match, 1811 .pm = &dw_i3c_pm_ops, 1812 }, 1813 }; 1814 module_platform_driver(dw_i3c_driver); 1815 1816 MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>"); 1817 MODULE_DESCRIPTION("DesignWare MIPI I3C driver"); 1818 MODULE_LICENSE("GPL v2"); 1819