1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Silvaco dual-role I3C master driver 4 * 5 * Copyright (C) 2020 Silvaco 6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com> 7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/clk.h> 12 #include <linux/completion.h> 13 #include <linux/errno.h> 14 #include <linux/i3c/master.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 24 /* Master Mode Registers */ 25 #define SVC_I3C_MCONFIG 0x000 26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0) 27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x)) 28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x)) 29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x)) 30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x)) 31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x)) 32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x)) 33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x)) 34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x)) 35 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x)) 36 37 #define SVC_I3C_MCTRL 0x084 38 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0) 39 #define SVC_I3C_MCTRL_REQUEST_NONE 0 40 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 41 #define SVC_I3C_MCTRL_REQUEST_STOP 2 42 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 43 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 44 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 45 #define SVC_I3C_MCTRL_TYPE_I3C 0 46 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 47 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 48 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) 50 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6) 51 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6) 52 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x)) 53 #define SVC_I3C_MCTRL_DIR_WRITE 0 54 #define SVC_I3C_MCTRL_DIR_READ 1 55 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x)) 56 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x)) 57 58 #define SVC_I3C_MSTATUS 0x088 59 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x)) 60 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5) 61 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0) 62 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x)) 63 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x)) 64 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x)) 65 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1 66 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2 67 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3 68 #define SVC_I3C_MINT_SLVSTART BIT(8) 69 #define SVC_I3C_MINT_MCTRLDONE BIT(9) 70 #define SVC_I3C_MINT_COMPLETE BIT(10) 71 #define SVC_I3C_MINT_RXPEND BIT(11) 72 #define SVC_I3C_MINT_TXNOTFULL BIT(12) 73 #define SVC_I3C_MINT_IBIWON BIT(13) 74 #define SVC_I3C_MINT_ERRWARN BIT(15) 75 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x)) 76 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x)) 77 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x)) 78 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x)) 79 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x)) 80 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x)) 81 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x)) 82 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x)) 83 84 #define SVC_I3C_IBIRULES 0x08C 85 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \ 86 ((addr) & 0x3F) << ((slot) * 6)) 87 #define SVC_I3C_IBIRULES_ADDRS 5 88 #define SVC_I3C_IBIRULES_MSB0 BIT(30) 89 #define SVC_I3C_IBIRULES_NOBYTE BIT(31) 90 #define SVC_I3C_IBIRULES_MANDBYTE 0 91 #define SVC_I3C_MINTSET 0x090 92 #define SVC_I3C_MINTCLR 0x094 93 #define SVC_I3C_MINTMASKED 0x098 94 #define SVC_I3C_MERRWARN 0x09C 95 #define SVC_I3C_MERRWARN_NACK BIT(2) 96 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20) 97 #define SVC_I3C_MDMACTRL 0x0A0 98 #define SVC_I3C_MDATACTRL 0x0AC 99 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0) 100 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1) 101 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3) 102 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4) 103 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0 104 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x)) 105 #define SVC_I3C_MDATACTRL_TXFULL BIT(30) 106 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31) 107 108 #define SVC_I3C_MWDATAB 0x0B0 109 #define SVC_I3C_MWDATAB_END BIT(8) 110 111 #define SVC_I3C_MWDATABE 0x0B4 112 #define SVC_I3C_MWDATAH 0x0B8 113 #define SVC_I3C_MWDATAHE 0x0BC 114 #define SVC_I3C_MRDATAB 0x0C0 115 #define SVC_I3C_MRDATAH 0x0C8 116 #define SVC_I3C_MWMSG_SDR 0x0D0 117 #define SVC_I3C_MRMSG_SDR 0x0D4 118 #define SVC_I3C_MWMSG_DDR 0x0D8 119 #define SVC_I3C_MRMSG_DDR 0x0DC 120 121 #define SVC_I3C_MDYNADDR 0x0E4 122 #define SVC_MDYNADDR_VALID BIT(0) 123 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x)) 124 125 #define SVC_I3C_MAX_DEVS 32 126 #define SVC_I3C_PM_TIMEOUT_MS 1000 127 128 /* This parameter depends on the implementation and may be tuned */ 129 #define SVC_I3C_FIFO_SIZE 16 130 131 #define SVC_I3C_EVENT_IBI BIT(0) 132 #define SVC_I3C_EVENT_HOTJOIN BIT(1) 133 134 struct svc_i3c_cmd { 135 u8 addr; 136 bool rnw; 137 u8 *in; 138 const void *out; 139 unsigned int len; 140 unsigned int actual_len; 141 struct i3c_priv_xfer *xfer; 142 bool continued; 143 }; 144 145 struct svc_i3c_xfer { 146 struct list_head node; 147 struct completion comp; 148 int ret; 149 unsigned int type; 150 unsigned int ncmds; 151 struct svc_i3c_cmd cmds[] __counted_by(ncmds); 152 }; 153 154 struct svc_i3c_regs_save { 155 u32 mconfig; 156 u32 mdynaddr; 157 }; 158 159 /** 160 * struct svc_i3c_master - Silvaco I3C Master structure 161 * @base: I3C master controller 162 * @dev: Corresponding device 163 * @regs: Memory mapping 164 * @saved_regs: Volatile values for PM operations 165 * @free_slots: Bit array of available slots 166 * @addrs: Array containing the dynamic addresses of each attached device 167 * @descs: Array of descriptors, one per attached device 168 * @hj_work: Hot-join work 169 * @ibi_work: IBI work 170 * @irq: Main interrupt 171 * @pclk: System clock 172 * @fclk: Fast clock (bus) 173 * @sclk: Slow clock (other events) 174 * @xferqueue: Transfer queue structure 175 * @xferqueue.list: List member 176 * @xferqueue.cur: Current ongoing transfer 177 * @xferqueue.lock: Queue lock 178 * @ibi: IBI structure 179 * @ibi.num_slots: Number of slots available in @ibi.slots 180 * @ibi.slots: Available IBI slots 181 * @ibi.tbq_slot: To be queued IBI slot 182 * @ibi.lock: IBI lock 183 * @lock: Transfer lock, protect between IBI work thread and callbacks from master 184 * @enabled_events: Bit masks for enable events (IBI, HotJoin). 185 */ 186 struct svc_i3c_master { 187 struct i3c_master_controller base; 188 struct device *dev; 189 void __iomem *regs; 190 struct svc_i3c_regs_save saved_regs; 191 u32 free_slots; 192 u8 addrs[SVC_I3C_MAX_DEVS]; 193 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS]; 194 struct work_struct hj_work; 195 struct work_struct ibi_work; 196 int irq; 197 struct clk *pclk; 198 struct clk *fclk; 199 struct clk *sclk; 200 struct { 201 struct list_head list; 202 struct svc_i3c_xfer *cur; 203 /* Prevent races between transfers */ 204 spinlock_t lock; 205 } xferqueue; 206 struct { 207 unsigned int num_slots; 208 struct i3c_dev_desc **slots; 209 struct i3c_ibi_slot *tbq_slot; 210 /* Prevent races within IBI handlers */ 211 spinlock_t lock; 212 } ibi; 213 struct mutex lock; 214 int enabled_events; 215 }; 216 217 /** 218 * struct svc_i3c_i2c_dev_data - Device specific data 219 * @index: Index in the master tables corresponding to this device 220 * @ibi: IBI slot index in the master structure 221 * @ibi_pool: IBI pool associated to this device 222 */ 223 struct svc_i3c_i2c_dev_data { 224 u8 index; 225 int ibi; 226 struct i3c_generic_ibi_pool *ibi_pool; 227 }; 228 229 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask) 230 { 231 return !!(master->enabled_events & mask); 232 } 233 234 static bool svc_i3c_master_error(struct svc_i3c_master *master) 235 { 236 u32 mstatus, merrwarn; 237 238 mstatus = readl(master->regs + SVC_I3C_MSTATUS); 239 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) { 240 merrwarn = readl(master->regs + SVC_I3C_MERRWARN); 241 writel(merrwarn, master->regs + SVC_I3C_MERRWARN); 242 243 /* Ignore timeout error */ 244 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) { 245 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n", 246 mstatus, merrwarn); 247 return false; 248 } 249 250 dev_err(master->dev, 251 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n", 252 mstatus, merrwarn); 253 254 return true; 255 } 256 257 return false; 258 } 259 260 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask) 261 { 262 writel(mask, master->regs + SVC_I3C_MINTSET); 263 } 264 265 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master) 266 { 267 u32 mask = readl(master->regs + SVC_I3C_MINTSET); 268 269 writel(mask, master->regs + SVC_I3C_MINTCLR); 270 } 271 272 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master) 273 { 274 /* Clear pending warnings */ 275 writel(readl(master->regs + SVC_I3C_MERRWARN), 276 master->regs + SVC_I3C_MERRWARN); 277 } 278 279 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master) 280 { 281 /* Flush FIFOs */ 282 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB, 283 master->regs + SVC_I3C_MDATACTRL); 284 } 285 286 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master) 287 { 288 u32 reg; 289 290 /* Set RX and TX tigger levels, flush FIFOs */ 291 reg = SVC_I3C_MDATACTRL_FLUSHTB | 292 SVC_I3C_MDATACTRL_FLUSHRB | 293 SVC_I3C_MDATACTRL_UNLOCK_TRIG | 294 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL | 295 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY; 296 writel(reg, master->regs + SVC_I3C_MDATACTRL); 297 } 298 299 static void svc_i3c_master_reset(struct svc_i3c_master *master) 300 { 301 svc_i3c_master_clear_merrwarn(master); 302 svc_i3c_master_reset_fifo_trigger(master); 303 svc_i3c_master_disable_interrupts(master); 304 } 305 306 static inline struct svc_i3c_master * 307 to_svc_i3c_master(struct i3c_master_controller *master) 308 { 309 return container_of(master, struct svc_i3c_master, base); 310 } 311 312 static void svc_i3c_master_hj_work(struct work_struct *work) 313 { 314 struct svc_i3c_master *master; 315 316 master = container_of(work, struct svc_i3c_master, hj_work); 317 i3c_master_do_daa(&master->base); 318 } 319 320 static struct i3c_dev_desc * 321 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master, 322 unsigned int ibiaddr) 323 { 324 int i; 325 326 for (i = 0; i < SVC_I3C_MAX_DEVS; i++) 327 if (master->addrs[i] == ibiaddr) 328 break; 329 330 if (i == SVC_I3C_MAX_DEVS) 331 return NULL; 332 333 return master->descs[i]; 334 } 335 336 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 337 { 338 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); 339 340 /* 341 * This delay is necessary after the emission of a stop, otherwise eg. 342 * repeating IBIs do not get detected. There is a note in the manual 343 * about it, stating that the stop condition might not be settled 344 * correctly if a start condition follows too rapidly. 345 */ 346 udelay(1); 347 } 348 349 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, 350 struct i3c_dev_desc *dev) 351 { 352 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 353 struct i3c_ibi_slot *slot; 354 unsigned int count; 355 u32 mdatactrl; 356 int ret, val; 357 u8 *buf; 358 359 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 360 if (!slot) 361 return -ENOSPC; 362 363 slot->len = 0; 364 buf = slot->data; 365 366 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 367 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); 368 if (ret) { 369 dev_err(master->dev, "Timeout when polling for COMPLETE\n"); 370 return ret; 371 } 372 373 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 374 slot->len < SVC_I3C_FIFO_SIZE) { 375 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); 376 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); 377 readsl(master->regs + SVC_I3C_MRDATAB, buf, count); 378 slot->len += count; 379 buf += count; 380 } 381 382 master->ibi.tbq_slot = slot; 383 384 return 0; 385 } 386 387 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master, 388 bool mandatory_byte) 389 { 390 unsigned int ibi_ack_nack; 391 392 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK; 393 if (mandatory_byte) 394 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE; 395 else 396 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE; 397 398 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL); 399 } 400 401 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master) 402 { 403 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK | 404 SVC_I3C_MCTRL_IBIRESP_NACK, 405 master->regs + SVC_I3C_MCTRL); 406 } 407 408 static void svc_i3c_master_ibi_work(struct work_struct *work) 409 { 410 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work); 411 struct svc_i3c_i2c_dev_data *data; 412 unsigned int ibitype, ibiaddr; 413 struct i3c_dev_desc *dev; 414 u32 status, val; 415 int ret; 416 417 mutex_lock(&master->lock); 418 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ 419 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | 420 SVC_I3C_MCTRL_IBIRESP_AUTO, 421 master->regs + SVC_I3C_MCTRL); 422 423 /* Wait for IBIWON, should take approximately 100us */ 424 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 425 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000); 426 if (ret) { 427 dev_err(master->dev, "Timeout when polling for IBIWON\n"); 428 svc_i3c_master_emit_stop(master); 429 goto reenable_ibis; 430 } 431 432 /* Clear the interrupt status */ 433 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 434 435 status = readl(master->regs + SVC_I3C_MSTATUS); 436 ibitype = SVC_I3C_MSTATUS_IBITYPE(status); 437 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status); 438 439 /* Handle the critical responses to IBI's */ 440 switch (ibitype) { 441 case SVC_I3C_MSTATUS_IBITYPE_IBI: 442 dev = svc_i3c_master_dev_from_addr(master, ibiaddr); 443 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) 444 svc_i3c_master_nack_ibi(master); 445 else 446 svc_i3c_master_handle_ibi(master, dev); 447 break; 448 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 449 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) 450 svc_i3c_master_ack_ibi(master, false); 451 else 452 svc_i3c_master_nack_ibi(master); 453 break; 454 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 455 svc_i3c_master_nack_ibi(master); 456 break; 457 default: 458 break; 459 } 460 461 /* 462 * If an error happened, we probably got interrupted and the exchange 463 * timedout. In this case we just drop everything, emit a stop and wait 464 * for the slave to interrupt again. 465 */ 466 if (svc_i3c_master_error(master)) { 467 if (master->ibi.tbq_slot) { 468 data = i3c_dev_get_master_data(dev); 469 i3c_generic_ibi_recycle_slot(data->ibi_pool, 470 master->ibi.tbq_slot); 471 master->ibi.tbq_slot = NULL; 472 } 473 474 svc_i3c_master_emit_stop(master); 475 476 goto reenable_ibis; 477 } 478 479 /* Handle the non critical tasks */ 480 switch (ibitype) { 481 case SVC_I3C_MSTATUS_IBITYPE_IBI: 482 if (dev) { 483 i3c_master_queue_ibi(dev, master->ibi.tbq_slot); 484 master->ibi.tbq_slot = NULL; 485 } 486 svc_i3c_master_emit_stop(master); 487 break; 488 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: 489 svc_i3c_master_emit_stop(master); 490 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) 491 queue_work(master->base.wq, &master->hj_work); 492 break; 493 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: 494 default: 495 break; 496 } 497 498 reenable_ibis: 499 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 500 mutex_unlock(&master->lock); 501 } 502 503 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) 504 { 505 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id; 506 u32 active = readl(master->regs + SVC_I3C_MSTATUS); 507 508 if (!SVC_I3C_MSTATUS_SLVSTART(active)) 509 return IRQ_NONE; 510 511 /* Clear the interrupt status */ 512 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS); 513 514 svc_i3c_master_disable_interrupts(master); 515 516 /* Handle the interrupt in a non atomic context */ 517 queue_work(master->base.wq, &master->ibi_work); 518 519 return IRQ_HANDLED; 520 } 521 522 static int svc_i3c_master_bus_init(struct i3c_master_controller *m) 523 { 524 struct svc_i3c_master *master = to_svc_i3c_master(m); 525 struct i3c_bus *bus = i3c_master_get_bus(m); 526 struct i3c_device_info info = {}; 527 unsigned long fclk_rate, fclk_period_ns; 528 unsigned int high_period_ns, od_low_period_ns; 529 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg; 530 int ret; 531 532 ret = pm_runtime_resume_and_get(master->dev); 533 if (ret < 0) { 534 dev_err(master->dev, 535 "<%s> cannot resume i3c bus master, err: %d\n", 536 __func__, ret); 537 return ret; 538 } 539 540 /* Timings derivation */ 541 fclk_rate = clk_get_rate(master->fclk); 542 if (!fclk_rate) { 543 ret = -EINVAL; 544 goto rpm_out; 545 } 546 547 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate); 548 549 /* 550 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period. 551 * Simplest configuration is using a 50% duty-cycle of 40ns. 552 */ 553 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1; 554 pplow = 0; 555 556 /* 557 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a 558 * duty-cycle tuned so that high levels are filetered out by 559 * the 50ns filter (target being 40ns). 560 */ 561 odhpp = 1; 562 high_period_ns = (ppbaud + 1) * fclk_period_ns; 563 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1; 564 od_low_period_ns = (odbaud + 1) * high_period_ns; 565 566 switch (bus->mode) { 567 case I3C_BUS_MODE_PURE: 568 i2cbaud = 0; 569 odstop = 0; 570 break; 571 case I3C_BUS_MODE_MIXED_FAST: 572 case I3C_BUS_MODE_MIXED_LIMITED: 573 /* 574 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference 575 * between the high and low period does not really matter. 576 */ 577 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2; 578 odstop = 1; 579 break; 580 case I3C_BUS_MODE_MIXED_SLOW: 581 /* 582 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same 583 * constraints as the FM+ mode. 584 */ 585 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2; 586 odstop = 1; 587 break; 588 default: 589 goto rpm_out; 590 } 591 592 reg = SVC_I3C_MCONFIG_MASTER_EN | 593 SVC_I3C_MCONFIG_DISTO(0) | 594 SVC_I3C_MCONFIG_HKEEP(0) | 595 SVC_I3C_MCONFIG_ODSTOP(odstop) | 596 SVC_I3C_MCONFIG_PPBAUD(ppbaud) | 597 SVC_I3C_MCONFIG_PPLOW(pplow) | 598 SVC_I3C_MCONFIG_ODBAUD(odbaud) | 599 SVC_I3C_MCONFIG_ODHPP(odhpp) | 600 SVC_I3C_MCONFIG_SKEW(0) | 601 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud); 602 writel(reg, master->regs + SVC_I3C_MCONFIG); 603 604 /* Master core's registration */ 605 ret = i3c_master_get_free_addr(m, 0); 606 if (ret < 0) 607 goto rpm_out; 608 609 info.dyn_addr = ret; 610 611 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 612 master->regs + SVC_I3C_MDYNADDR); 613 614 ret = i3c_master_set_info(&master->base, &info); 615 if (ret) 616 goto rpm_out; 617 618 rpm_out: 619 pm_runtime_mark_last_busy(master->dev); 620 pm_runtime_put_autosuspend(master->dev); 621 622 return ret; 623 } 624 625 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m) 626 { 627 struct svc_i3c_master *master = to_svc_i3c_master(m); 628 int ret; 629 630 ret = pm_runtime_resume_and_get(master->dev); 631 if (ret < 0) { 632 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 633 return; 634 } 635 636 svc_i3c_master_disable_interrupts(master); 637 638 /* Disable master */ 639 writel(0, master->regs + SVC_I3C_MCONFIG); 640 641 pm_runtime_mark_last_busy(master->dev); 642 pm_runtime_put_autosuspend(master->dev); 643 } 644 645 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master) 646 { 647 unsigned int slot; 648 649 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0))) 650 return -ENOSPC; 651 652 slot = ffs(master->free_slots) - 1; 653 654 master->free_slots &= ~BIT(slot); 655 656 return slot; 657 } 658 659 static void svc_i3c_master_release_slot(struct svc_i3c_master *master, 660 unsigned int slot) 661 { 662 master->free_slots |= BIT(slot); 663 } 664 665 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) 666 { 667 struct i3c_master_controller *m = i3c_dev_get_master(dev); 668 struct svc_i3c_master *master = to_svc_i3c_master(m); 669 struct svc_i3c_i2c_dev_data *data; 670 int slot; 671 672 slot = svc_i3c_master_reserve_slot(master); 673 if (slot < 0) 674 return slot; 675 676 data = kzalloc(sizeof(*data), GFP_KERNEL); 677 if (!data) { 678 svc_i3c_master_release_slot(master, slot); 679 return -ENOMEM; 680 } 681 682 data->ibi = -1; 683 data->index = slot; 684 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr : 685 dev->info.static_addr; 686 master->descs[slot] = dev; 687 688 i3c_dev_set_master_data(dev, data); 689 690 return 0; 691 } 692 693 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, 694 u8 old_dyn_addr) 695 { 696 struct i3c_master_controller *m = i3c_dev_get_master(dev); 697 struct svc_i3c_master *master = to_svc_i3c_master(m); 698 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 699 700 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr : 701 dev->info.static_addr; 702 703 return 0; 704 } 705 706 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) 707 { 708 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 709 struct i3c_master_controller *m = i3c_dev_get_master(dev); 710 struct svc_i3c_master *master = to_svc_i3c_master(m); 711 712 master->addrs[data->index] = 0; 713 svc_i3c_master_release_slot(master, data->index); 714 715 kfree(data); 716 } 717 718 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) 719 { 720 struct i3c_master_controller *m = i2c_dev_get_master(dev); 721 struct svc_i3c_master *master = to_svc_i3c_master(m); 722 struct svc_i3c_i2c_dev_data *data; 723 int slot; 724 725 slot = svc_i3c_master_reserve_slot(master); 726 if (slot < 0) 727 return slot; 728 729 data = kzalloc(sizeof(*data), GFP_KERNEL); 730 if (!data) { 731 svc_i3c_master_release_slot(master, slot); 732 return -ENOMEM; 733 } 734 735 data->index = slot; 736 master->addrs[slot] = dev->addr; 737 738 i2c_dev_set_master_data(dev, data); 739 740 return 0; 741 } 742 743 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) 744 { 745 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 746 struct i3c_master_controller *m = i2c_dev_get_master(dev); 747 struct svc_i3c_master *master = to_svc_i3c_master(m); 748 749 svc_i3c_master_release_slot(master, data->index); 750 751 kfree(data); 752 } 753 754 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst, 755 unsigned int len) 756 { 757 int ret, i; 758 u32 reg; 759 760 for (i = 0; i < len; i++) { 761 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 762 reg, 763 SVC_I3C_MSTATUS_RXPEND(reg), 764 0, 1000); 765 if (ret) 766 return ret; 767 768 dst[i] = readl(master->regs + SVC_I3C_MRDATAB); 769 } 770 771 return 0; 772 } 773 774 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, 775 u8 *addrs, unsigned int *count) 776 { 777 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0; 778 unsigned int dev_nb = 0, last_addr = 0; 779 u32 reg; 780 int ret, i; 781 782 while (true) { 783 /* Enter/proceed with DAA */ 784 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA | 785 SVC_I3C_MCTRL_TYPE_I3C | 786 SVC_I3C_MCTRL_IBIRESP_NACK | 787 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE), 788 master->regs + SVC_I3C_MCTRL); 789 790 /* 791 * Either one slave will send its ID, or the assignment process 792 * is done. 793 */ 794 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 795 reg, 796 SVC_I3C_MSTATUS_RXPEND(reg) | 797 SVC_I3C_MSTATUS_MCTRLDONE(reg), 798 1, 1000); 799 if (ret) 800 return ret; 801 802 if (SVC_I3C_MSTATUS_RXPEND(reg)) { 803 u8 data[6]; 804 805 /* 806 * We only care about the 48-bit provisioned ID yet to 807 * be sure a device does not nack an address twice. 808 * Otherwise, we would just need to flush the RX FIFO. 809 */ 810 ret = svc_i3c_master_readb(master, data, 6); 811 if (ret) 812 return ret; 813 814 for (i = 0; i < 6; i++) 815 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i)); 816 817 /* We do not care about the BCR and DCR yet */ 818 ret = svc_i3c_master_readb(master, data, 2); 819 if (ret) 820 return ret; 821 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) { 822 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) && 823 SVC_I3C_MSTATUS_COMPLETE(reg)) { 824 /* 825 * All devices received and acked they dynamic 826 * address, this is the natural end of the DAA 827 * procedure. 828 */ 829 break; 830 } else if (SVC_I3C_MSTATUS_NACKED(reg)) { 831 /* No I3C devices attached */ 832 if (dev_nb == 0) 833 break; 834 835 /* 836 * A slave device nacked the address, this is 837 * allowed only once, DAA will be stopped and 838 * then resumed. The same device is supposed to 839 * answer again immediately and shall ack the 840 * address this time. 841 */ 842 if (prov_id[dev_nb] == nacking_prov_id) 843 return -EIO; 844 845 dev_nb--; 846 nacking_prov_id = prov_id[dev_nb]; 847 svc_i3c_master_emit_stop(master); 848 849 continue; 850 } else { 851 return -EIO; 852 } 853 } 854 855 /* Wait for the slave to be ready to receive its address */ 856 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, 857 reg, 858 SVC_I3C_MSTATUS_MCTRLDONE(reg) && 859 SVC_I3C_MSTATUS_STATE_DAA(reg) && 860 SVC_I3C_MSTATUS_BETWEEN(reg), 861 0, 1000); 862 if (ret) 863 return ret; 864 865 /* Give the slave device a suitable dynamic address */ 866 ret = i3c_master_get_free_addr(&master->base, last_addr + 1); 867 if (ret < 0) 868 return ret; 869 870 addrs[dev_nb] = ret; 871 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n", 872 dev_nb, addrs[dev_nb]); 873 874 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB); 875 last_addr = addrs[dev_nb++]; 876 } 877 878 *count = dev_nb; 879 880 return 0; 881 } 882 883 static int svc_i3c_update_ibirules(struct svc_i3c_master *master) 884 { 885 struct i3c_dev_desc *dev; 886 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE; 887 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0, 888 nobyte_addr_ko = 0; 889 bool list_mbyte = false, list_nobyte = false; 890 891 /* Create the IBIRULES register for both cases */ 892 i3c_bus_for_each_i3cdev(&master->base.bus, dev) { 893 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) 894 continue; 895 896 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { 897 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok, 898 dev->info.dyn_addr); 899 900 /* IBI rules cannot be applied to devices with MSb=1 */ 901 if (dev->info.dyn_addr & BIT(7)) 902 mbyte_addr_ko++; 903 else 904 mbyte_addr_ok++; 905 } else { 906 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok, 907 dev->info.dyn_addr); 908 909 /* IBI rules cannot be applied to devices with MSb=1 */ 910 if (dev->info.dyn_addr & BIT(7)) 911 nobyte_addr_ko++; 912 else 913 nobyte_addr_ok++; 914 } 915 } 916 917 /* Device list cannot be handled by hardware */ 918 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 919 list_mbyte = true; 920 921 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS) 922 list_nobyte = true; 923 924 /* No list can be properly handled, return an error */ 925 if (!list_mbyte && !list_nobyte) 926 return -ERANGE; 927 928 /* Pick the first list that can be handled by hardware, randomly */ 929 if (list_mbyte) 930 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES); 931 else 932 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES); 933 934 return 0; 935 } 936 937 static int svc_i3c_master_do_daa(struct i3c_master_controller *m) 938 { 939 struct svc_i3c_master *master = to_svc_i3c_master(m); 940 u8 addrs[SVC_I3C_MAX_DEVS]; 941 unsigned long flags; 942 unsigned int dev_nb; 943 int ret, i; 944 945 ret = pm_runtime_resume_and_get(master->dev); 946 if (ret < 0) { 947 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 948 return ret; 949 } 950 951 spin_lock_irqsave(&master->xferqueue.lock, flags); 952 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb); 953 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 954 if (ret) { 955 svc_i3c_master_emit_stop(master); 956 svc_i3c_master_clear_merrwarn(master); 957 goto rpm_out; 958 } 959 960 /* Register all devices who participated to the core */ 961 for (i = 0; i < dev_nb; i++) { 962 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); 963 if (ret) 964 goto rpm_out; 965 } 966 967 /* Configure IBI auto-rules */ 968 ret = svc_i3c_update_ibirules(master); 969 if (ret) 970 dev_err(master->dev, "Cannot handle such a list of devices"); 971 972 rpm_out: 973 pm_runtime_mark_last_busy(master->dev); 974 pm_runtime_put_autosuspend(master->dev); 975 976 return ret; 977 } 978 979 static int svc_i3c_master_read(struct svc_i3c_master *master, 980 u8 *in, unsigned int len) 981 { 982 int offset = 0, i; 983 u32 mdctrl, mstatus; 984 bool completed = false; 985 unsigned int count; 986 unsigned long start = jiffies; 987 988 while (!completed) { 989 mstatus = readl(master->regs + SVC_I3C_MSTATUS); 990 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0) 991 completed = true; 992 993 if (time_after(jiffies, start + msecs_to_jiffies(1000))) { 994 dev_dbg(master->dev, "I3C read timeout\n"); 995 return -ETIMEDOUT; 996 } 997 998 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL); 999 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl); 1000 if (offset + count > len) { 1001 dev_err(master->dev, "I3C receive length too long!\n"); 1002 return -EINVAL; 1003 } 1004 for (i = 0; i < count; i++) 1005 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB); 1006 1007 offset += count; 1008 } 1009 1010 return offset; 1011 } 1012 1013 static int svc_i3c_master_write(struct svc_i3c_master *master, 1014 const u8 *out, unsigned int len) 1015 { 1016 int offset = 0, ret; 1017 u32 mdctrl; 1018 1019 while (offset < len) { 1020 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL, 1021 mdctrl, 1022 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL), 1023 0, 1000); 1024 if (ret) 1025 return ret; 1026 1027 /* 1028 * The last byte to be sent over the bus must either have the 1029 * "end" bit set or be written in MWDATABE. 1030 */ 1031 if (likely(offset < (len - 1))) 1032 writel(out[offset++], master->regs + SVC_I3C_MWDATAB); 1033 else 1034 writel(out[offset++], master->regs + SVC_I3C_MWDATABE); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 1041 bool rnw, unsigned int xfer_type, u8 addr, 1042 u8 *in, const u8 *out, unsigned int xfer_len, 1043 unsigned int *actual_len, bool continued) 1044 { 1045 u32 reg; 1046 int ret; 1047 1048 /* clean SVC_I3C_MINT_IBIWON w1c bits */ 1049 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 1050 1051 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | 1052 xfer_type | 1053 SVC_I3C_MCTRL_IBIRESP_NACK | 1054 SVC_I3C_MCTRL_DIR(rnw) | 1055 SVC_I3C_MCTRL_ADDR(addr) | 1056 SVC_I3C_MCTRL_RDTERM(*actual_len), 1057 master->regs + SVC_I3C_MCTRL); 1058 1059 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1060 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); 1061 if (ret) 1062 goto emit_stop; 1063 1064 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) { 1065 ret = -ENXIO; 1066 *actual_len = 0; 1067 goto emit_stop; 1068 } 1069 1070 /* 1071 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame 1072 * with I3C Target Address. 1073 * 1074 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so 1075 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller 1076 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or 1077 * a Hot-Join Request has been made. 1078 * 1079 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure 1080 * and yield the above events handler. 1081 */ 1082 if (SVC_I3C_MSTATUS_IBIWON(reg)) { 1083 ret = -ENXIO; 1084 *actual_len = 0; 1085 goto emit_stop; 1086 } 1087 1088 if (rnw) 1089 ret = svc_i3c_master_read(master, in, xfer_len); 1090 else 1091 ret = svc_i3c_master_write(master, out, xfer_len); 1092 if (ret < 0) 1093 goto emit_stop; 1094 1095 if (rnw) 1096 *actual_len = ret; 1097 1098 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1099 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000); 1100 if (ret) 1101 goto emit_stop; 1102 1103 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); 1104 1105 if (!continued) { 1106 svc_i3c_master_emit_stop(master); 1107 1108 /* Wait idle if stop is sent. */ 1109 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, 1110 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000); 1111 } 1112 1113 return 0; 1114 1115 emit_stop: 1116 svc_i3c_master_emit_stop(master); 1117 svc_i3c_master_clear_merrwarn(master); 1118 1119 return ret; 1120 } 1121 1122 static struct svc_i3c_xfer * 1123 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds) 1124 { 1125 struct svc_i3c_xfer *xfer; 1126 1127 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); 1128 if (!xfer) 1129 return NULL; 1130 1131 INIT_LIST_HEAD(&xfer->node); 1132 xfer->ncmds = ncmds; 1133 xfer->ret = -ETIMEDOUT; 1134 1135 return xfer; 1136 } 1137 1138 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer) 1139 { 1140 kfree(xfer); 1141 } 1142 1143 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master, 1144 struct svc_i3c_xfer *xfer) 1145 { 1146 if (master->xferqueue.cur == xfer) 1147 master->xferqueue.cur = NULL; 1148 else 1149 list_del_init(&xfer->node); 1150 } 1151 1152 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master, 1153 struct svc_i3c_xfer *xfer) 1154 { 1155 unsigned long flags; 1156 1157 spin_lock_irqsave(&master->xferqueue.lock, flags); 1158 svc_i3c_master_dequeue_xfer_locked(master, xfer); 1159 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1160 } 1161 1162 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1163 { 1164 struct svc_i3c_xfer *xfer = master->xferqueue.cur; 1165 int ret, i; 1166 1167 if (!xfer) 1168 return; 1169 1170 svc_i3c_master_clear_merrwarn(master); 1171 svc_i3c_master_flush_fifo(master); 1172 1173 for (i = 0; i < xfer->ncmds; i++) { 1174 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1175 1176 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, 1177 cmd->addr, cmd->in, cmd->out, 1178 cmd->len, &cmd->actual_len, 1179 cmd->continued); 1180 /* cmd->xfer is NULL if I2C or CCC transfer */ 1181 if (cmd->xfer) 1182 cmd->xfer->actual_len = cmd->actual_len; 1183 1184 if (ret) 1185 break; 1186 } 1187 1188 xfer->ret = ret; 1189 complete(&xfer->comp); 1190 1191 if (ret < 0) 1192 svc_i3c_master_dequeue_xfer_locked(master, xfer); 1193 1194 xfer = list_first_entry_or_null(&master->xferqueue.list, 1195 struct svc_i3c_xfer, 1196 node); 1197 if (xfer) 1198 list_del_init(&xfer->node); 1199 1200 master->xferqueue.cur = xfer; 1201 svc_i3c_master_start_xfer_locked(master); 1202 } 1203 1204 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master, 1205 struct svc_i3c_xfer *xfer) 1206 { 1207 unsigned long flags; 1208 int ret; 1209 1210 ret = pm_runtime_resume_and_get(master->dev); 1211 if (ret < 0) { 1212 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1213 return; 1214 } 1215 1216 init_completion(&xfer->comp); 1217 spin_lock_irqsave(&master->xferqueue.lock, flags); 1218 if (master->xferqueue.cur) { 1219 list_add_tail(&xfer->node, &master->xferqueue.list); 1220 } else { 1221 master->xferqueue.cur = xfer; 1222 svc_i3c_master_start_xfer_locked(master); 1223 } 1224 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1225 1226 pm_runtime_mark_last_busy(master->dev); 1227 pm_runtime_put_autosuspend(master->dev); 1228 } 1229 1230 static bool 1231 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master, 1232 const struct i3c_ccc_cmd *cmd) 1233 { 1234 /* No software support for CCC commands targeting more than one slave */ 1235 return (cmd->ndests == 1); 1236 } 1237 1238 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master, 1239 struct i3c_ccc_cmd *ccc) 1240 { 1241 unsigned int xfer_len = ccc->dests[0].payload.len + 1; 1242 struct svc_i3c_xfer *xfer; 1243 struct svc_i3c_cmd *cmd; 1244 u8 *buf; 1245 int ret; 1246 1247 xfer = svc_i3c_master_alloc_xfer(master, 1); 1248 if (!xfer) 1249 return -ENOMEM; 1250 1251 buf = kmalloc(xfer_len, GFP_KERNEL); 1252 if (!buf) { 1253 svc_i3c_master_free_xfer(xfer); 1254 return -ENOMEM; 1255 } 1256 1257 buf[0] = ccc->id; 1258 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len); 1259 1260 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1261 1262 cmd = &xfer->cmds[0]; 1263 cmd->addr = ccc->dests[0].addr; 1264 cmd->rnw = ccc->rnw; 1265 cmd->in = NULL; 1266 cmd->out = buf; 1267 cmd->len = xfer_len; 1268 cmd->actual_len = 0; 1269 cmd->continued = false; 1270 1271 mutex_lock(&master->lock); 1272 svc_i3c_master_enqueue_xfer(master, xfer); 1273 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1274 svc_i3c_master_dequeue_xfer(master, xfer); 1275 mutex_unlock(&master->lock); 1276 1277 ret = xfer->ret; 1278 kfree(buf); 1279 svc_i3c_master_free_xfer(xfer); 1280 1281 return ret; 1282 } 1283 1284 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master, 1285 struct i3c_ccc_cmd *ccc) 1286 { 1287 unsigned int xfer_len = ccc->dests[0].payload.len; 1288 unsigned int actual_len = ccc->rnw ? xfer_len : 0; 1289 struct svc_i3c_xfer *xfer; 1290 struct svc_i3c_cmd *cmd; 1291 int ret; 1292 1293 xfer = svc_i3c_master_alloc_xfer(master, 2); 1294 if (!xfer) 1295 return -ENOMEM; 1296 1297 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1298 1299 /* Broadcasted message */ 1300 cmd = &xfer->cmds[0]; 1301 cmd->addr = I3C_BROADCAST_ADDR; 1302 cmd->rnw = 0; 1303 cmd->in = NULL; 1304 cmd->out = &ccc->id; 1305 cmd->len = 1; 1306 cmd->actual_len = 0; 1307 cmd->continued = true; 1308 1309 /* Directed message */ 1310 cmd = &xfer->cmds[1]; 1311 cmd->addr = ccc->dests[0].addr; 1312 cmd->rnw = ccc->rnw; 1313 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL; 1314 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data, 1315 cmd->len = xfer_len; 1316 cmd->actual_len = actual_len; 1317 cmd->continued = false; 1318 1319 mutex_lock(&master->lock); 1320 svc_i3c_master_enqueue_xfer(master, xfer); 1321 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1322 svc_i3c_master_dequeue_xfer(master, xfer); 1323 mutex_unlock(&master->lock); 1324 1325 if (cmd->actual_len != xfer_len) 1326 ccc->dests[0].payload.len = cmd->actual_len; 1327 1328 ret = xfer->ret; 1329 svc_i3c_master_free_xfer(xfer); 1330 1331 return ret; 1332 } 1333 1334 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, 1335 struct i3c_ccc_cmd *cmd) 1336 { 1337 struct svc_i3c_master *master = to_svc_i3c_master(m); 1338 bool broadcast = cmd->id < 0x80; 1339 int ret; 1340 1341 if (broadcast) 1342 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd); 1343 else 1344 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd); 1345 1346 if (ret) 1347 cmd->err = I3C_ERROR_M2; 1348 1349 return ret; 1350 } 1351 1352 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 1353 struct i3c_priv_xfer *xfers, 1354 int nxfers) 1355 { 1356 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1357 struct svc_i3c_master *master = to_svc_i3c_master(m); 1358 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1359 struct svc_i3c_xfer *xfer; 1360 int ret, i; 1361 1362 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1363 if (!xfer) 1364 return -ENOMEM; 1365 1366 xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1367 1368 for (i = 0; i < nxfers; i++) { 1369 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1370 1371 cmd->xfer = &xfers[i]; 1372 cmd->addr = master->addrs[data->index]; 1373 cmd->rnw = xfers[i].rnw; 1374 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL; 1375 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out; 1376 cmd->len = xfers[i].len; 1377 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0; 1378 cmd->continued = (i + 1) < nxfers; 1379 } 1380 1381 mutex_lock(&master->lock); 1382 svc_i3c_master_enqueue_xfer(master, xfer); 1383 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1384 svc_i3c_master_dequeue_xfer(master, xfer); 1385 mutex_unlock(&master->lock); 1386 1387 ret = xfer->ret; 1388 svc_i3c_master_free_xfer(xfer); 1389 1390 return ret; 1391 } 1392 1393 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, 1394 const struct i2c_msg *xfers, 1395 int nxfers) 1396 { 1397 struct i3c_master_controller *m = i2c_dev_get_master(dev); 1398 struct svc_i3c_master *master = to_svc_i3c_master(m); 1399 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); 1400 struct svc_i3c_xfer *xfer; 1401 int ret, i; 1402 1403 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1404 if (!xfer) 1405 return -ENOMEM; 1406 1407 xfer->type = SVC_I3C_MCTRL_TYPE_I2C; 1408 1409 for (i = 0; i < nxfers; i++) { 1410 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1411 1412 cmd->addr = master->addrs[data->index]; 1413 cmd->rnw = xfers[i].flags & I2C_M_RD; 1414 cmd->in = cmd->rnw ? xfers[i].buf : NULL; 1415 cmd->out = cmd->rnw ? NULL : xfers[i].buf; 1416 cmd->len = xfers[i].len; 1417 cmd->actual_len = cmd->rnw ? xfers[i].len : 0; 1418 cmd->continued = (i + 1 < nxfers); 1419 } 1420 1421 mutex_lock(&master->lock); 1422 svc_i3c_master_enqueue_xfer(master, xfer); 1423 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) 1424 svc_i3c_master_dequeue_xfer(master, xfer); 1425 mutex_unlock(&master->lock); 1426 1427 ret = xfer->ret; 1428 svc_i3c_master_free_xfer(xfer); 1429 1430 return ret; 1431 } 1432 1433 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev, 1434 const struct i3c_ibi_setup *req) 1435 { 1436 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1437 struct svc_i3c_master *master = to_svc_i3c_master(m); 1438 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1439 unsigned long flags; 1440 unsigned int i; 1441 1442 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) { 1443 dev_err(master->dev, "IBI max payload %d should be < %d\n", 1444 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE); 1445 return -ERANGE; 1446 } 1447 1448 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); 1449 if (IS_ERR(data->ibi_pool)) 1450 return PTR_ERR(data->ibi_pool); 1451 1452 spin_lock_irqsave(&master->ibi.lock, flags); 1453 for (i = 0; i < master->ibi.num_slots; i++) { 1454 if (!master->ibi.slots[i]) { 1455 data->ibi = i; 1456 master->ibi.slots[i] = dev; 1457 break; 1458 } 1459 } 1460 spin_unlock_irqrestore(&master->ibi.lock, flags); 1461 1462 if (i < master->ibi.num_slots) 1463 return 0; 1464 1465 i3c_generic_ibi_free_pool(data->ibi_pool); 1466 data->ibi_pool = NULL; 1467 1468 return -ENOSPC; 1469 } 1470 1471 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev) 1472 { 1473 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1474 struct svc_i3c_master *master = to_svc_i3c_master(m); 1475 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1476 unsigned long flags; 1477 1478 spin_lock_irqsave(&master->ibi.lock, flags); 1479 master->ibi.slots[data->ibi] = NULL; 1480 data->ibi = -1; 1481 spin_unlock_irqrestore(&master->ibi.lock, flags); 1482 1483 i3c_generic_ibi_free_pool(data->ibi_pool); 1484 } 1485 1486 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) 1487 { 1488 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1489 struct svc_i3c_master *master = to_svc_i3c_master(m); 1490 int ret; 1491 1492 ret = pm_runtime_resume_and_get(master->dev); 1493 if (ret < 0) { 1494 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1495 return ret; 1496 } 1497 1498 master->enabled_events |= SVC_I3C_EVENT_IBI; 1499 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 1500 1501 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1502 } 1503 1504 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) 1505 { 1506 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1507 struct svc_i3c_master *master = to_svc_i3c_master(m); 1508 int ret; 1509 1510 master->enabled_events &= ~SVC_I3C_EVENT_IBI; 1511 if (!master->enabled_events) 1512 svc_i3c_master_disable_interrupts(master); 1513 1514 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 1515 1516 pm_runtime_mark_last_busy(master->dev); 1517 pm_runtime_put_autosuspend(master->dev); 1518 1519 return ret; 1520 } 1521 1522 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m) 1523 { 1524 struct svc_i3c_master *master = to_svc_i3c_master(m); 1525 int ret; 1526 1527 ret = pm_runtime_resume_and_get(master->dev); 1528 if (ret < 0) { 1529 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); 1530 return ret; 1531 } 1532 1533 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN; 1534 1535 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); 1536 1537 return 0; 1538 } 1539 1540 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m) 1541 { 1542 struct svc_i3c_master *master = to_svc_i3c_master(m); 1543 1544 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN; 1545 1546 if (!master->enabled_events) 1547 svc_i3c_master_disable_interrupts(master); 1548 1549 pm_runtime_mark_last_busy(master->dev); 1550 pm_runtime_put_autosuspend(master->dev); 1551 1552 return 0; 1553 } 1554 1555 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, 1556 struct i3c_ibi_slot *slot) 1557 { 1558 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); 1559 1560 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 1561 } 1562 1563 static const struct i3c_master_controller_ops svc_i3c_master_ops = { 1564 .bus_init = svc_i3c_master_bus_init, 1565 .bus_cleanup = svc_i3c_master_bus_cleanup, 1566 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev, 1567 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev, 1568 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev, 1569 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev, 1570 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev, 1571 .do_daa = svc_i3c_master_do_daa, 1572 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1573 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1574 .priv_xfers = svc_i3c_master_priv_xfers, 1575 .i2c_xfers = svc_i3c_master_i2c_xfers, 1576 .request_ibi = svc_i3c_master_request_ibi, 1577 .free_ibi = svc_i3c_master_free_ibi, 1578 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot, 1579 .enable_ibi = svc_i3c_master_enable_ibi, 1580 .disable_ibi = svc_i3c_master_disable_ibi, 1581 .enable_hotjoin = svc_i3c_master_enable_hotjoin, 1582 .disable_hotjoin = svc_i3c_master_disable_hotjoin, 1583 }; 1584 1585 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) 1586 { 1587 int ret = 0; 1588 1589 ret = clk_prepare_enable(master->pclk); 1590 if (ret) 1591 return ret; 1592 1593 ret = clk_prepare_enable(master->fclk); 1594 if (ret) { 1595 clk_disable_unprepare(master->pclk); 1596 return ret; 1597 } 1598 1599 ret = clk_prepare_enable(master->sclk); 1600 if (ret) { 1601 clk_disable_unprepare(master->pclk); 1602 clk_disable_unprepare(master->fclk); 1603 return ret; 1604 } 1605 1606 return 0; 1607 } 1608 1609 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master) 1610 { 1611 clk_disable_unprepare(master->pclk); 1612 clk_disable_unprepare(master->fclk); 1613 clk_disable_unprepare(master->sclk); 1614 } 1615 1616 static int svc_i3c_master_probe(struct platform_device *pdev) 1617 { 1618 struct device *dev = &pdev->dev; 1619 struct svc_i3c_master *master; 1620 int ret; 1621 1622 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); 1623 if (!master) 1624 return -ENOMEM; 1625 1626 master->regs = devm_platform_ioremap_resource(pdev, 0); 1627 if (IS_ERR(master->regs)) 1628 return PTR_ERR(master->regs); 1629 1630 master->pclk = devm_clk_get(dev, "pclk"); 1631 if (IS_ERR(master->pclk)) 1632 return PTR_ERR(master->pclk); 1633 1634 master->fclk = devm_clk_get(dev, "fast_clk"); 1635 if (IS_ERR(master->fclk)) 1636 return PTR_ERR(master->fclk); 1637 1638 master->sclk = devm_clk_get(dev, "slow_clk"); 1639 if (IS_ERR(master->sclk)) 1640 return PTR_ERR(master->sclk); 1641 1642 master->irq = platform_get_irq(pdev, 0); 1643 if (master->irq < 0) 1644 return master->irq; 1645 1646 master->dev = dev; 1647 1648 ret = svc_i3c_master_prepare_clks(master); 1649 if (ret) 1650 return ret; 1651 1652 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); 1653 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); 1654 mutex_init(&master->lock); 1655 1656 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, 1657 IRQF_NO_SUSPEND, "svc-i3c-irq", master); 1658 if (ret) 1659 goto err_disable_clks; 1660 1661 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0); 1662 1663 spin_lock_init(&master->xferqueue.lock); 1664 INIT_LIST_HEAD(&master->xferqueue.list); 1665 1666 spin_lock_init(&master->ibi.lock); 1667 master->ibi.num_slots = SVC_I3C_MAX_DEVS; 1668 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, 1669 sizeof(*master->ibi.slots), 1670 GFP_KERNEL); 1671 if (!master->ibi.slots) { 1672 ret = -ENOMEM; 1673 goto err_disable_clks; 1674 } 1675 1676 platform_set_drvdata(pdev, master); 1677 1678 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS); 1679 pm_runtime_use_autosuspend(&pdev->dev); 1680 pm_runtime_get_noresume(&pdev->dev); 1681 pm_runtime_set_active(&pdev->dev); 1682 pm_runtime_enable(&pdev->dev); 1683 1684 svc_i3c_master_reset(master); 1685 1686 /* Register the master */ 1687 ret = i3c_master_register(&master->base, &pdev->dev, 1688 &svc_i3c_master_ops, false); 1689 if (ret) 1690 goto rpm_disable; 1691 1692 pm_runtime_mark_last_busy(&pdev->dev); 1693 pm_runtime_put_autosuspend(&pdev->dev); 1694 1695 return 0; 1696 1697 rpm_disable: 1698 pm_runtime_dont_use_autosuspend(&pdev->dev); 1699 pm_runtime_put_noidle(&pdev->dev); 1700 pm_runtime_set_suspended(&pdev->dev); 1701 pm_runtime_disable(&pdev->dev); 1702 1703 err_disable_clks: 1704 svc_i3c_master_unprepare_clks(master); 1705 1706 return ret; 1707 } 1708 1709 static void svc_i3c_master_remove(struct platform_device *pdev) 1710 { 1711 struct svc_i3c_master *master = platform_get_drvdata(pdev); 1712 1713 i3c_master_unregister(&master->base); 1714 1715 pm_runtime_dont_use_autosuspend(&pdev->dev); 1716 pm_runtime_disable(&pdev->dev); 1717 } 1718 1719 static void svc_i3c_save_regs(struct svc_i3c_master *master) 1720 { 1721 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG); 1722 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR); 1723 } 1724 1725 static void svc_i3c_restore_regs(struct svc_i3c_master *master) 1726 { 1727 if (readl(master->regs + SVC_I3C_MDYNADDR) != 1728 master->saved_regs.mdynaddr) { 1729 writel(master->saved_regs.mconfig, 1730 master->regs + SVC_I3C_MCONFIG); 1731 writel(master->saved_regs.mdynaddr, 1732 master->regs + SVC_I3C_MDYNADDR); 1733 } 1734 } 1735 1736 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) 1737 { 1738 struct svc_i3c_master *master = dev_get_drvdata(dev); 1739 1740 svc_i3c_save_regs(master); 1741 svc_i3c_master_unprepare_clks(master); 1742 pinctrl_pm_select_sleep_state(dev); 1743 1744 return 0; 1745 } 1746 1747 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) 1748 { 1749 struct svc_i3c_master *master = dev_get_drvdata(dev); 1750 1751 pinctrl_pm_select_default_state(dev); 1752 svc_i3c_master_prepare_clks(master); 1753 1754 svc_i3c_restore_regs(master); 1755 1756 return 0; 1757 } 1758 1759 static const struct dev_pm_ops svc_i3c_pm_ops = { 1760 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1761 pm_runtime_force_resume) 1762 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend, 1763 svc_i3c_runtime_resume, NULL) 1764 }; 1765 1766 static const struct of_device_id svc_i3c_master_of_match_tbl[] = { 1767 { .compatible = "silvaco,i3c-master-v1"}, 1768 { /* sentinel */ }, 1769 }; 1770 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl); 1771 1772 static struct platform_driver svc_i3c_master = { 1773 .probe = svc_i3c_master_probe, 1774 .remove_new = svc_i3c_master_remove, 1775 .driver = { 1776 .name = "silvaco-i3c-master", 1777 .of_match_table = svc_i3c_master_of_match_tbl, 1778 .pm = &svc_i3c_pm_ops, 1779 }, 1780 }; 1781 module_platform_driver(svc_i3c_master); 1782 1783 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>"); 1784 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); 1785 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver"); 1786 MODULE_LICENSE("GPL v2"); 1787