1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
36 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
37
38 #define SVC_I3C_MCTRL 0x084
39 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
40 #define SVC_I3C_MCTRL_REQUEST_NONE 0
41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
42 #define SVC_I3C_MCTRL_REQUEST_STOP 2
43 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
44 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
45 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
46 #define SVC_I3C_MCTRL_TYPE_I3C 0
47 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
48 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
50 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
51 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
52 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
53 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
54 #define SVC_I3C_MCTRL_DIR_WRITE 0
55 #define SVC_I3C_MCTRL_DIR_READ 1
56 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
57 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
58
59 #define SVC_I3C_MSTATUS 0x088
60 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
61 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
62 #define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
63 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
64 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
65 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
66 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
67 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
68 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
69 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
70 #define SVC_I3C_MINT_SLVSTART BIT(8)
71 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
72 #define SVC_I3C_MINT_COMPLETE BIT(10)
73 #define SVC_I3C_MINT_RXPEND BIT(11)
74 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
75 #define SVC_I3C_MINT_IBIWON BIT(13)
76 #define SVC_I3C_MINT_ERRWARN BIT(15)
77 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
78 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
79 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
80 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
81 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
82 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
83 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
84 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
85
86 #define SVC_I3C_IBIRULES 0x08C
87 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
88 ((addr) & 0x3F) << ((slot) * 6))
89 #define SVC_I3C_IBIRULES_ADDRS 5
90 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
91 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
92 #define SVC_I3C_IBIRULES_MANDBYTE 0
93 #define SVC_I3C_MINTSET 0x090
94 #define SVC_I3C_MINTCLR 0x094
95 #define SVC_I3C_MINTMASKED 0x098
96 #define SVC_I3C_MERRWARN 0x09C
97 #define SVC_I3C_MERRWARN_NACK BIT(2)
98 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
99 #define SVC_I3C_MDMACTRL 0x0A0
100 #define SVC_I3C_MDATACTRL 0x0AC
101 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
102 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
103 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
104 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
105 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
106 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
107 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
108 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
109
110 #define SVC_I3C_MWDATAB 0x0B0
111 #define SVC_I3C_MWDATAB_END BIT(8)
112
113 #define SVC_I3C_MWDATABE 0x0B4
114 #define SVC_I3C_MWDATAH 0x0B8
115 #define SVC_I3C_MWDATAHE 0x0BC
116 #define SVC_I3C_MRDATAB 0x0C0
117 #define SVC_I3C_MRDATAH 0x0C8
118 #define SVC_I3C_MWDATAB1 0x0CC
119 #define SVC_I3C_MWMSG_SDR 0x0D0
120 #define SVC_I3C_MRMSG_SDR 0x0D4
121 #define SVC_I3C_MWMSG_DDR 0x0D8
122 #define SVC_I3C_MRMSG_DDR 0x0DC
123
124 #define SVC_I3C_MDYNADDR 0x0E4
125 #define SVC_MDYNADDR_VALID BIT(0)
126 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
127
128 #define SVC_I3C_MAX_DEVS 32
129 #define SVC_I3C_PM_TIMEOUT_MS 1000
130
131 /* This parameter depends on the implementation and may be tuned */
132 #define SVC_I3C_FIFO_SIZE 16
133 #define SVC_I3C_PPBAUD_MAX 15
134 #define SVC_I3C_QUICK_I2C_CLK 4170000
135
136 #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
137 #define SVC_I3C_EVENT_HOTJOIN BIT(31)
138
139 /*
140 * SVC_I3C_QUIRK_FIFO_EMPTY:
141 * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
142 * when new data is written to FIFO, I3C HW resumes the transfer but
143 * the first transmitted data bit may have the wrong value.
144 * Workaround:
145 * Fill the FIFO in advance to prevent FIFO from becoming empty.
146 */
147 #define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
148 /*
149 * SVC_I3C_QUIRK_FLASE_SLVSTART:
150 * I3C HW may generate an invalid SlvStart event when emitting a STOP.
151 * If it is a true SlvStart, the MSTATUS state is SLVREQ.
152 */
153 #define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
154 /*
155 * SVC_I3C_QUIRK_DAA_CORRUPT:
156 * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
157 * corrupted and results in a no repeated-start condition at the end of
158 * address assignment.
159 * Workaround:
160 * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
161 * process is completed, return MCONFIG.SKEW to its previous value.
162 */
163 #define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
164
165 struct svc_i3c_cmd {
166 u8 addr;
167 bool rnw;
168 u8 *in;
169 const void *out;
170 unsigned int len;
171 unsigned int actual_len;
172 struct i3c_priv_xfer *xfer;
173 bool continued;
174 };
175
176 struct svc_i3c_xfer {
177 struct list_head node;
178 struct completion comp;
179 int ret;
180 unsigned int type;
181 unsigned int ncmds;
182 struct svc_i3c_cmd cmds[] __counted_by(ncmds);
183 };
184
185 struct svc_i3c_regs_save {
186 u32 mconfig;
187 u32 mdynaddr;
188 };
189
190 struct svc_i3c_drvdata {
191 u32 quirks;
192 };
193
194 /**
195 * struct svc_i3c_master - Silvaco I3C Master structure
196 * @base: I3C master controller
197 * @dev: Corresponding device
198 * @regs: Memory mapping
199 * @saved_regs: Volatile values for PM operations
200 * @free_slots: Bit array of available slots
201 * @addrs: Array containing the dynamic addresses of each attached device
202 * @descs: Array of descriptors, one per attached device
203 * @hj_work: Hot-join work
204 * @irq: Main interrupt
205 * @num_clks: I3C clock number
206 * @fclk: Fast clock (bus)
207 * @clks: I3C clock array
208 * @xferqueue: Transfer queue structure
209 * @xferqueue.list: List member
210 * @xferqueue.cur: Current ongoing transfer
211 * @xferqueue.lock: Queue lock
212 * @ibi: IBI structure
213 * @ibi.num_slots: Number of slots available in @ibi.slots
214 * @ibi.slots: Available IBI slots
215 * @ibi.tbq_slot: To be queued IBI slot
216 * @ibi.lock: IBI lock
217 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
218 * @drvdata: Driver data
219 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
220 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
221 */
222 struct svc_i3c_master {
223 struct i3c_master_controller base;
224 struct device *dev;
225 void __iomem *regs;
226 struct svc_i3c_regs_save saved_regs;
227 u32 free_slots;
228 u8 addrs[SVC_I3C_MAX_DEVS];
229 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
230 struct work_struct hj_work;
231 int irq;
232 int num_clks;
233 struct clk *fclk;
234 struct clk_bulk_data *clks;
235 struct {
236 struct list_head list;
237 struct svc_i3c_xfer *cur;
238 /* Prevent races between transfers */
239 spinlock_t lock;
240 } xferqueue;
241 struct {
242 unsigned int num_slots;
243 struct i3c_dev_desc **slots;
244 struct i3c_ibi_slot *tbq_slot;
245 /* Prevent races within IBI handlers */
246 spinlock_t lock;
247 } ibi;
248 struct mutex lock;
249 const struct svc_i3c_drvdata *drvdata;
250 u32 enabled_events;
251 u32 mctrl_config;
252 };
253
254 /**
255 * struct svc_i3c_i2c_dev_data - Device specific data
256 * @index: Index in the master tables corresponding to this device
257 * @ibi: IBI slot index in the master structure
258 * @ibi_pool: IBI pool associated to this device
259 */
260 struct svc_i3c_i2c_dev_data {
261 u8 index;
262 int ibi;
263 struct i3c_generic_ibi_pool *ibi_pool;
264 };
265
svc_has_quirk(struct svc_i3c_master * master,u32 quirk)266 static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
267 {
268 return (master->drvdata->quirks & quirk);
269 }
270
svc_has_daa_corrupt(struct svc_i3c_master * master)271 static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
272 {
273 return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
274 !(master->mctrl_config &
275 (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
276 }
277
is_events_enabled(struct svc_i3c_master * master,u32 mask)278 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
279 {
280 return !!(master->enabled_events & mask);
281 }
282
svc_i3c_master_error(struct svc_i3c_master * master)283 static bool svc_i3c_master_error(struct svc_i3c_master *master)
284 {
285 u32 mstatus, merrwarn;
286
287 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
288 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
289 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
290 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
291
292 /* Ignore timeout error */
293 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
294 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
295 mstatus, merrwarn);
296 return false;
297 }
298
299 dev_err(master->dev,
300 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
301 mstatus, merrwarn);
302
303 return true;
304 }
305
306 return false;
307 }
308
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)309 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
310 {
311 writel(mask, master->regs + SVC_I3C_MINTSET);
312 }
313
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)314 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
315 {
316 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
317
318 writel(mask, master->regs + SVC_I3C_MINTCLR);
319 }
320
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)321 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
322 {
323 /* Clear pending warnings */
324 writel(readl(master->regs + SVC_I3C_MERRWARN),
325 master->regs + SVC_I3C_MERRWARN);
326 }
327
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)328 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
329 {
330 /* Flush FIFOs */
331 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
332 master->regs + SVC_I3C_MDATACTRL);
333 }
334
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)335 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
336 {
337 u32 reg;
338
339 /* Set RX and TX tigger levels, flush FIFOs */
340 reg = SVC_I3C_MDATACTRL_FLUSHTB |
341 SVC_I3C_MDATACTRL_FLUSHRB |
342 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
343 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
344 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
345 writel(reg, master->regs + SVC_I3C_MDATACTRL);
346 }
347
svc_i3c_master_reset(struct svc_i3c_master * master)348 static void svc_i3c_master_reset(struct svc_i3c_master *master)
349 {
350 svc_i3c_master_clear_merrwarn(master);
351 svc_i3c_master_reset_fifo_trigger(master);
352 svc_i3c_master_disable_interrupts(master);
353 }
354
355 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)356 to_svc_i3c_master(struct i3c_master_controller *master)
357 {
358 return container_of(master, struct svc_i3c_master, base);
359 }
360
svc_i3c_master_hj_work(struct work_struct * work)361 static void svc_i3c_master_hj_work(struct work_struct *work)
362 {
363 struct svc_i3c_master *master;
364
365 master = container_of(work, struct svc_i3c_master, hj_work);
366 i3c_master_do_daa(&master->base);
367 }
368
369 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)370 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
371 unsigned int ibiaddr)
372 {
373 int i;
374
375 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
376 if (master->addrs[i] == ibiaddr)
377 break;
378
379 if (i == SVC_I3C_MAX_DEVS)
380 return NULL;
381
382 return master->descs[i];
383 }
384
svc_i3c_master_emit_stop(struct svc_i3c_master * master)385 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
386 {
387 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
388
389 /*
390 * This delay is necessary after the emission of a stop, otherwise eg.
391 * repeating IBIs do not get detected. There is a note in the manual
392 * about it, stating that the stop condition might not be settled
393 * correctly if a start condition follows too rapidly.
394 */
395 udelay(1);
396 }
397
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)398 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
399 struct i3c_dev_desc *dev)
400 {
401 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
402 struct i3c_ibi_slot *slot;
403 unsigned int count;
404 u32 mdatactrl;
405 int ret, val;
406 u8 *buf;
407
408 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
409 if (!slot)
410 return -ENOSPC;
411
412 slot->len = 0;
413 buf = slot->data;
414
415 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
416 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
417 if (ret) {
418 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
419 return ret;
420 }
421
422 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
423 slot->len < SVC_I3C_FIFO_SIZE) {
424 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
425 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
426 readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
427 slot->len += count;
428 buf += count;
429 }
430
431 master->ibi.tbq_slot = slot;
432
433 return 0;
434 }
435
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)436 static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
437 bool mandatory_byte)
438 {
439 unsigned int ibi_ack_nack;
440 u32 reg;
441
442 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
443 if (mandatory_byte)
444 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
445 else
446 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
447
448 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
449
450 return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
451 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
452
453 }
454
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)455 static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
456 {
457 int ret;
458 u32 reg;
459
460 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
461 SVC_I3C_MCTRL_IBIRESP_NACK,
462 master->regs + SVC_I3C_MCTRL);
463
464 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
465 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
466 return ret;
467 }
468
svc_i3c_master_handle_ibi_won(struct svc_i3c_master * master,u32 mstatus)469 static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
470 {
471 u32 ibitype;
472 int ret = 0;
473
474 ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
475
476 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
477
478 /* Hardware can't auto emit NACK for hot join and master request */
479 switch (ibitype) {
480 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
481 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
482 ret = svc_i3c_master_nack_ibi(master);
483 }
484
485 return ret;
486 }
487
svc_i3c_master_ibi_isr(struct svc_i3c_master * master)488 static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
489 {
490 struct svc_i3c_i2c_dev_data *data;
491 unsigned int ibitype, ibiaddr;
492 struct i3c_dev_desc *dev;
493 u32 status, val;
494 int ret;
495
496 /*
497 * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
498 *
499 * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
500 * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
501 * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
502 * any irq or schedule happen during transaction.
503 */
504 guard(spinlock)(&master->xferqueue.lock);
505
506 /*
507 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
508 * readl_relaxed_poll_timeout() to return immediately. Consequently,
509 * ibitype will be 0 since it was last updated only after the 8th SCL
510 * cycle, leading to missed client IBI handlers.
511 *
512 * A typical scenario is when IBIWON occurs and bus arbitration is lost
513 * at svc_i3c_master_priv_xfers().
514 *
515 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
516 */
517 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
518
519 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
520 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
521 SVC_I3C_MCTRL_IBIRESP_AUTO,
522 master->regs + SVC_I3C_MCTRL);
523
524 /* Wait for IBIWON, should take approximately 100us */
525 ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
526 SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
527 if (ret) {
528 dev_err(master->dev, "Timeout when polling for IBIWON\n");
529 svc_i3c_master_emit_stop(master);
530 return;
531 }
532
533 status = readl(master->regs + SVC_I3C_MSTATUS);
534 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
535 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
536
537 /* Handle the critical responses to IBI's */
538 switch (ibitype) {
539 case SVC_I3C_MSTATUS_IBITYPE_IBI:
540 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
541 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
542 svc_i3c_master_nack_ibi(master);
543 else
544 svc_i3c_master_handle_ibi(master, dev);
545 break;
546 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
547 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
548 svc_i3c_master_ack_ibi(master, false);
549 else
550 svc_i3c_master_nack_ibi(master);
551 break;
552 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
553 svc_i3c_master_nack_ibi(master);
554 break;
555 default:
556 break;
557 }
558
559 /*
560 * If an error happened, we probably got interrupted and the exchange
561 * timedout. In this case we just drop everything, emit a stop and wait
562 * for the slave to interrupt again.
563 */
564 if (svc_i3c_master_error(master)) {
565 if (master->ibi.tbq_slot) {
566 data = i3c_dev_get_master_data(dev);
567 i3c_generic_ibi_recycle_slot(data->ibi_pool,
568 master->ibi.tbq_slot);
569 master->ibi.tbq_slot = NULL;
570 }
571
572 svc_i3c_master_emit_stop(master);
573
574 return;
575 }
576
577 /* Handle the non critical tasks */
578 switch (ibitype) {
579 case SVC_I3C_MSTATUS_IBITYPE_IBI:
580 svc_i3c_master_emit_stop(master);
581 if (dev) {
582 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
583 master->ibi.tbq_slot = NULL;
584 }
585 break;
586 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
587 svc_i3c_master_emit_stop(master);
588 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
589 queue_work(master->base.wq, &master->hj_work);
590 break;
591 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
592 svc_i3c_master_emit_stop(master);
593 break;
594 default:
595 break;
596 }
597 }
598
svc_i3c_master_irq_handler(int irq,void * dev_id)599 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
600 {
601 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
602 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
603
604 if (!SVC_I3C_MSTATUS_SLVSTART(active))
605 return IRQ_NONE;
606
607 /* Clear the interrupt status */
608 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
609
610 /* Ignore the false event */
611 if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
612 !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
613 return IRQ_HANDLED;
614
615 /*
616 * The SDA line remains low until the request is processed.
617 * Receive the request in the interrupt context to respond promptly
618 * and restore the bus to idle state.
619 */
620 svc_i3c_master_ibi_isr(master);
621
622 return IRQ_HANDLED;
623 }
624
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)625 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
626 enum i3c_open_drain_speed speed)
627 {
628 struct svc_i3c_master *master = to_svc_i3c_master(m);
629 struct i3c_bus *bus = i3c_master_get_bus(&master->base);
630 u32 ppbaud, odbaud, odhpp, mconfig;
631 unsigned long fclk_rate;
632 int ret;
633
634 ret = pm_runtime_resume_and_get(master->dev);
635 if (ret < 0) {
636 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
637 return ret;
638 }
639
640 switch (speed) {
641 case I3C_OPEN_DRAIN_SLOW_SPEED:
642 fclk_rate = clk_get_rate(master->fclk);
643 if (!fclk_rate) {
644 ret = -EINVAL;
645 goto rpm_out;
646 }
647 /*
648 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
649 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
650 * I3C device working as a I2C device will turn off its 50ns Spike
651 * Filter to change to I3C mode.
652 */
653 mconfig = master->mctrl_config;
654 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
655 odhpp = 0;
656 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
657 mconfig &= ~GENMASK(24, 16);
658 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
659 writel(mconfig, master->regs + SVC_I3C_MCONFIG);
660 break;
661 case I3C_OPEN_DRAIN_NORMAL_SPEED:
662 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
663 break;
664 }
665
666 rpm_out:
667 pm_runtime_mark_last_busy(master->dev);
668 pm_runtime_put_autosuspend(master->dev);
669
670 return ret;
671 }
672
svc_i3c_master_bus_init(struct i3c_master_controller * m)673 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
674 {
675 struct svc_i3c_master *master = to_svc_i3c_master(m);
676 struct i3c_bus *bus = i3c_master_get_bus(m);
677 struct i3c_device_info info = {};
678 unsigned long fclk_rate, fclk_period_ns;
679 unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
680 unsigned int high_period_ns, od_low_period_ns;
681 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
682 int ret;
683
684 ret = pm_runtime_resume_and_get(master->dev);
685 if (ret < 0) {
686 dev_err(master->dev,
687 "<%s> cannot resume i3c bus master, err: %d\n",
688 __func__, ret);
689 return ret;
690 }
691
692 /* Timings derivation */
693 fclk_rate = clk_get_rate(master->fclk);
694 if (!fclk_rate) {
695 ret = -EINVAL;
696 goto rpm_out;
697 }
698
699 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
700 i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
701 i2c_scl_rate = bus->scl_rate.i2c;
702 i3c_scl_rate = bus->scl_rate.i3c;
703
704 /*
705 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
706 * Simplest configuration is using a 50% duty-cycle of 40ns.
707 */
708 ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
709 pplow = 0;
710
711 /*
712 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
713 * duty-cycle tuned so that high levels are filetered out by
714 * the 50ns filter (target being 40ns).
715 */
716 odhpp = 1;
717 high_period_ns = (ppbaud + 1) * fclk_period_ns;
718 odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
719 od_low_period_ns = (odbaud + 1) * high_period_ns;
720
721 switch (bus->mode) {
722 case I3C_BUS_MODE_PURE:
723 i2cbaud = 0;
724 odstop = 0;
725 break;
726 case I3C_BUS_MODE_MIXED_FAST:
727 /*
728 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
729 * between the high and low period does not really matter.
730 */
731 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
732 odstop = 1;
733 break;
734 case I3C_BUS_MODE_MIXED_LIMITED:
735 case I3C_BUS_MODE_MIXED_SLOW:
736 /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
737 if (ppbaud > SVC_I3C_PPBAUD_MAX) {
738 ppbaud = SVC_I3C_PPBAUD_MAX;
739 pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
740 }
741
742 high_period_ns = (ppbaud + 1) * fclk_period_ns;
743 odhpp = 0;
744 odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
745
746 od_low_period_ns = (odbaud + 1) * high_period_ns;
747 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
748 odstop = 1;
749 break;
750 default:
751 goto rpm_out;
752 }
753
754 reg = SVC_I3C_MCONFIG_MASTER_EN |
755 SVC_I3C_MCONFIG_DISTO(0) |
756 SVC_I3C_MCONFIG_HKEEP(0) |
757 SVC_I3C_MCONFIG_ODSTOP(odstop) |
758 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
759 SVC_I3C_MCONFIG_PPLOW(pplow) |
760 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
761 SVC_I3C_MCONFIG_ODHPP(odhpp) |
762 SVC_I3C_MCONFIG_SKEW(0) |
763 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
764 writel(reg, master->regs + SVC_I3C_MCONFIG);
765
766 master->mctrl_config = reg;
767 /* Master core's registration */
768 ret = i3c_master_get_free_addr(m, 0);
769 if (ret < 0)
770 goto rpm_out;
771
772 info.dyn_addr = ret;
773
774 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
775 master->regs + SVC_I3C_MDYNADDR);
776
777 ret = i3c_master_set_info(&master->base, &info);
778 if (ret)
779 goto rpm_out;
780
781 rpm_out:
782 pm_runtime_mark_last_busy(master->dev);
783 pm_runtime_put_autosuspend(master->dev);
784
785 return ret;
786 }
787
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)788 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
789 {
790 struct svc_i3c_master *master = to_svc_i3c_master(m);
791 int ret;
792
793 ret = pm_runtime_resume_and_get(master->dev);
794 if (ret < 0) {
795 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
796 return;
797 }
798
799 svc_i3c_master_disable_interrupts(master);
800
801 /* Disable master */
802 writel(0, master->regs + SVC_I3C_MCONFIG);
803
804 pm_runtime_mark_last_busy(master->dev);
805 pm_runtime_put_autosuspend(master->dev);
806 }
807
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)808 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
809 {
810 unsigned int slot;
811
812 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
813 return -ENOSPC;
814
815 slot = ffs(master->free_slots) - 1;
816
817 master->free_slots &= ~BIT(slot);
818
819 return slot;
820 }
821
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)822 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
823 unsigned int slot)
824 {
825 master->free_slots |= BIT(slot);
826 }
827
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)828 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
829 {
830 struct i3c_master_controller *m = i3c_dev_get_master(dev);
831 struct svc_i3c_master *master = to_svc_i3c_master(m);
832 struct svc_i3c_i2c_dev_data *data;
833 int slot;
834
835 slot = svc_i3c_master_reserve_slot(master);
836 if (slot < 0)
837 return slot;
838
839 data = kzalloc(sizeof(*data), GFP_KERNEL);
840 if (!data) {
841 svc_i3c_master_release_slot(master, slot);
842 return -ENOMEM;
843 }
844
845 data->ibi = -1;
846 data->index = slot;
847 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
848 dev->info.static_addr;
849 master->descs[slot] = dev;
850
851 i3c_dev_set_master_data(dev, data);
852
853 return 0;
854 }
855
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)856 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
857 u8 old_dyn_addr)
858 {
859 struct i3c_master_controller *m = i3c_dev_get_master(dev);
860 struct svc_i3c_master *master = to_svc_i3c_master(m);
861 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
862
863 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
864 dev->info.static_addr;
865
866 return 0;
867 }
868
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)869 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
870 {
871 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
872 struct i3c_master_controller *m = i3c_dev_get_master(dev);
873 struct svc_i3c_master *master = to_svc_i3c_master(m);
874
875 master->addrs[data->index] = 0;
876 svc_i3c_master_release_slot(master, data->index);
877
878 kfree(data);
879 }
880
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)881 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
882 {
883 struct i3c_master_controller *m = i2c_dev_get_master(dev);
884 struct svc_i3c_master *master = to_svc_i3c_master(m);
885 struct svc_i3c_i2c_dev_data *data;
886 int slot;
887
888 slot = svc_i3c_master_reserve_slot(master);
889 if (slot < 0)
890 return slot;
891
892 data = kzalloc(sizeof(*data), GFP_KERNEL);
893 if (!data) {
894 svc_i3c_master_release_slot(master, slot);
895 return -ENOMEM;
896 }
897
898 data->index = slot;
899 master->addrs[slot] = dev->addr;
900
901 i2c_dev_set_master_data(dev, data);
902
903 return 0;
904 }
905
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)906 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
907 {
908 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
909 struct i3c_master_controller *m = i2c_dev_get_master(dev);
910 struct svc_i3c_master *master = to_svc_i3c_master(m);
911
912 svc_i3c_master_release_slot(master, data->index);
913
914 kfree(data);
915 }
916
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)917 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
918 unsigned int len)
919 {
920 int ret, i;
921 u32 reg;
922
923 for (i = 0; i < len; i++) {
924 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
925 reg,
926 SVC_I3C_MSTATUS_RXPEND(reg),
927 0, 1000);
928 if (ret)
929 return ret;
930
931 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
932 }
933
934 return 0;
935 }
936
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)937 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
938 u8 *addrs, unsigned int *count)
939 {
940 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
941 unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
942 u32 reg;
943 int ret, i;
944
945 svc_i3c_master_flush_fifo(master);
946
947 while (true) {
948 /* clean SVC_I3C_MINT_IBIWON w1c bits */
949 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
950
951 /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
952 *
953 * ENTER DAA:
954 * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
955 * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
956 *
957 * PROCESS DAA:
958 * 1 The DA is written using MWDATAB or ADDR bits 6:0.
959 * 2 ProcessDAA is requested again to write the new address, and then starts the
960 * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
961 * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
962 * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
963 * (along with DONE), and a STOP issued automatically.
964 */
965 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
966 SVC_I3C_MCTRL_TYPE_I3C |
967 SVC_I3C_MCTRL_IBIRESP_NACK |
968 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
969 master->regs + SVC_I3C_MCTRL);
970
971 /*
972 * Either one slave will send its ID, or the assignment process
973 * is done.
974 */
975 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
976 reg,
977 SVC_I3C_MSTATUS_RXPEND(reg) |
978 SVC_I3C_MSTATUS_MCTRLDONE(reg),
979 1, 1000);
980 if (ret)
981 break;
982
983 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
984 u8 data[6];
985
986 /*
987 * One slave sends its ID to request for address assignment,
988 * prefilling the dynamic address can reduce SCL clock stalls
989 * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
990 *
991 * Ideally, prefilling before the processDAA command is better.
992 * However, it requires an additional check to write the dyn_addr
993 * at the right time because the driver needs to write the processDAA
994 * command twice for one assignment.
995 * Prefilling here is safe and efficient because the FIFO starts
996 * filling within a few hundred nanoseconds, which is significantly
997 * faster compared to the 64 SCL clock cycles.
998 */
999 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
1000 if (ret < 0)
1001 break;
1002
1003 dyn_addr = ret;
1004 writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
1005
1006 /*
1007 * We only care about the 48-bit provisioned ID yet to
1008 * be sure a device does not nack an address twice.
1009 * Otherwise, we would just need to flush the RX FIFO.
1010 */
1011 ret = svc_i3c_master_readb(master, data, 6);
1012 if (ret)
1013 break;
1014
1015 for (i = 0; i < 6; i++)
1016 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
1017
1018 /* We do not care about the BCR and DCR yet */
1019 ret = svc_i3c_master_readb(master, data, 2);
1020 if (ret)
1021 break;
1022 } else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1023 ret = svc_i3c_master_handle_ibi_won(master, reg);
1024 if (ret)
1025 break;
1026 continue;
1027 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
1028 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
1029 SVC_I3C_MSTATUS_COMPLETE(reg)) {
1030 /*
1031 * All devices received and acked they dynamic
1032 * address, this is the natural end of the DAA
1033 * procedure.
1034 *
1035 * Hardware will auto emit STOP at this case.
1036 */
1037 *count = dev_nb;
1038 return 0;
1039
1040 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
1041 /* No I3C devices attached */
1042 if (dev_nb == 0) {
1043 /*
1044 * Hardware can't treat first NACK for ENTAA as normal
1045 * COMPLETE. So need manual emit STOP.
1046 */
1047 ret = 0;
1048 *count = 0;
1049 break;
1050 }
1051
1052 /*
1053 * A slave device nacked the address, this is
1054 * allowed only once, DAA will be stopped and
1055 * then resumed. The same device is supposed to
1056 * answer again immediately and shall ack the
1057 * address this time.
1058 */
1059 if (prov_id[dev_nb] == nacking_prov_id) {
1060 ret = -EIO;
1061 break;
1062 }
1063
1064 dev_nb--;
1065 nacking_prov_id = prov_id[dev_nb];
1066 svc_i3c_master_emit_stop(master);
1067
1068 continue;
1069 } else {
1070 break;
1071 }
1072 }
1073
1074 /* Wait for the slave to be ready to receive its address */
1075 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1076 reg,
1077 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
1078 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
1079 SVC_I3C_MSTATUS_BETWEEN(reg),
1080 0, 1000);
1081 if (ret)
1082 break;
1083
1084 addrs[dev_nb] = dyn_addr;
1085 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
1086 dev_nb, addrs[dev_nb]);
1087 last_addr = addrs[dev_nb++];
1088 }
1089
1090 /* Need manual issue STOP except for Complete condition */
1091 svc_i3c_master_emit_stop(master);
1092 svc_i3c_master_flush_fifo(master);
1093
1094 return ret;
1095 }
1096
svc_i3c_update_ibirules(struct svc_i3c_master * master)1097 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
1098 {
1099 struct i3c_dev_desc *dev;
1100 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
1101 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
1102 nobyte_addr_ko = 0;
1103 bool list_mbyte = false, list_nobyte = false;
1104
1105 /* Create the IBIRULES register for both cases */
1106 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
1107 if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
1108 continue;
1109
1110 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
1111 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
1112 dev->info.dyn_addr);
1113
1114 /* IBI rules cannot be applied to devices with MSb=1 */
1115 if (dev->info.dyn_addr & BIT(7))
1116 mbyte_addr_ko++;
1117 else
1118 mbyte_addr_ok++;
1119 } else {
1120 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1121 dev->info.dyn_addr);
1122
1123 /* IBI rules cannot be applied to devices with MSb=1 */
1124 if (dev->info.dyn_addr & BIT(7))
1125 nobyte_addr_ko++;
1126 else
1127 nobyte_addr_ok++;
1128 }
1129 }
1130
1131 /* Device list cannot be handled by hardware */
1132 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1133 list_mbyte = true;
1134
1135 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1136 list_nobyte = true;
1137
1138 /* No list can be properly handled, return an error */
1139 if (!list_mbyte && !list_nobyte)
1140 return -ERANGE;
1141
1142 /* Pick the first list that can be handled by hardware, randomly */
1143 if (list_mbyte)
1144 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1145 else
1146 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1147
1148 return 0;
1149 }
1150
svc_i3c_master_do_daa(struct i3c_master_controller * m)1151 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1152 {
1153 struct svc_i3c_master *master = to_svc_i3c_master(m);
1154 u8 addrs[SVC_I3C_MAX_DEVS];
1155 unsigned long flags;
1156 unsigned int dev_nb;
1157 int ret, i;
1158
1159 ret = pm_runtime_resume_and_get(master->dev);
1160 if (ret < 0) {
1161 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1162 return ret;
1163 }
1164
1165 spin_lock_irqsave(&master->xferqueue.lock, flags);
1166
1167 if (svc_has_daa_corrupt(master))
1168 writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
1169 master->regs + SVC_I3C_MCONFIG);
1170
1171 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1172
1173 if (svc_has_daa_corrupt(master))
1174 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
1175
1176 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1177
1178 svc_i3c_master_clear_merrwarn(master);
1179 if (ret)
1180 goto rpm_out;
1181
1182 /*
1183 * Register all devices who participated to the core
1184 *
1185 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1186 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1187 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1188 * registered on the bus. The I3C stack might still consider 0xb a free
1189 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1190 * causing both devices A and B to use the same address 0xb, violating the I3C
1191 * specification.
1192 *
1193 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1194 * because subsequent steps will scan the entire I3C bus, independent of
1195 * whether i3c_master_add_i3c_dev_locked() returns success.
1196 *
1197 * If device A registration fails, there is still a chance to register device
1198 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1199 * retrieving device information.
1200 */
1201 for (i = 0; i < dev_nb; i++)
1202 i3c_master_add_i3c_dev_locked(m, addrs[i]);
1203
1204 /* Configure IBI auto-rules */
1205 ret = svc_i3c_update_ibirules(master);
1206 if (ret)
1207 dev_err(master->dev, "Cannot handle such a list of devices");
1208
1209 rpm_out:
1210 pm_runtime_mark_last_busy(master->dev);
1211 pm_runtime_put_autosuspend(master->dev);
1212
1213 return ret;
1214 }
1215
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1216 static int svc_i3c_master_read(struct svc_i3c_master *master,
1217 u8 *in, unsigned int len)
1218 {
1219 int offset = 0, i;
1220 u32 mdctrl, mstatus;
1221 bool completed = false;
1222 unsigned int count;
1223 unsigned long start = jiffies;
1224
1225 while (!completed) {
1226 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1227 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1228 completed = true;
1229
1230 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1231 dev_dbg(master->dev, "I3C read timeout\n");
1232 return -ETIMEDOUT;
1233 }
1234
1235 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1236 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1237 if (offset + count > len) {
1238 dev_err(master->dev, "I3C receive length too long!\n");
1239 return -EINVAL;
1240 }
1241 for (i = 0; i < count; i++)
1242 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1243
1244 offset += count;
1245 }
1246
1247 return offset;
1248 }
1249
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1250 static int svc_i3c_master_write(struct svc_i3c_master *master,
1251 const u8 *out, unsigned int len)
1252 {
1253 int offset = 0, ret;
1254 u32 mdctrl;
1255
1256 while (offset < len) {
1257 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1258 mdctrl,
1259 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1260 0, 1000);
1261 if (ret)
1262 return ret;
1263
1264 /*
1265 * The last byte to be sent over the bus must either have the
1266 * "end" bit set or be written in MWDATABE.
1267 */
1268 if (likely(offset < (len - 1)))
1269 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1270 else
1271 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1272 }
1273
1274 return 0;
1275 }
1276
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued,bool repeat_start)1277 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1278 bool rnw, unsigned int xfer_type, u8 addr,
1279 u8 *in, const u8 *out, unsigned int xfer_len,
1280 unsigned int *actual_len, bool continued, bool repeat_start)
1281 {
1282 int retry = repeat_start ? 1 : 2;
1283 u32 reg;
1284 int ret;
1285
1286 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1287 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1288
1289
1290 while (retry--) {
1291 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1292 xfer_type |
1293 SVC_I3C_MCTRL_IBIRESP_NACK |
1294 SVC_I3C_MCTRL_DIR(rnw) |
1295 SVC_I3C_MCTRL_ADDR(addr) |
1296 SVC_I3C_MCTRL_RDTERM(*actual_len),
1297 master->regs + SVC_I3C_MCTRL);
1298
1299 /*
1300 * The entire transaction can consist of multiple write transfers.
1301 * Prefilling before EmitStartAddr causes the data to be emitted
1302 * immediately, becoming part of the previous transfer.
1303 * The only way to work around this hardware issue is to let the
1304 * FIFO start filling as soon as possible after EmitStartAddr.
1305 */
1306 if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
1307 u32 end = xfer_len > SVC_I3C_FIFO_SIZE ? 0 : SVC_I3C_MWDATAB_END;
1308 u32 len = min_t(u32, xfer_len, SVC_I3C_FIFO_SIZE);
1309
1310 writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
1311 /* Mark END bit if this is the last byte */
1312 writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
1313 xfer_len -= len;
1314 out += len;
1315 }
1316
1317 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1318 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1319 if (ret)
1320 goto emit_stop;
1321
1322 /*
1323 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
1324 * Frame with I3C Target Address.
1325 *
1326 * The I3C Controller normally should start a Frame, the Address may be arbitrated,
1327 * and so the Controller shall monitor to see whether an In-Band Interrupt request,
1328 * a Controller Role Request (i.e., Secondary Controller requests to become the
1329 * Active Controller), or a Hot-Join Request has been made.
1330 *
1331 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
1332 * repeat start. Address arbitrate only happen at START, never happen at REPEAT
1333 * start.
1334 */
1335 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1336 ret = svc_i3c_master_handle_ibi_won(master, reg);
1337 if (ret)
1338 goto emit_stop;
1339 continue;
1340 }
1341
1342 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1343 /*
1344 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1345 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1346 * Address, then special provisions shall be made because that same I3C
1347 * Target may be initiating an IBI or a Controller Role Request. So, one of
1348 * three things may happen: (skip 1, 2)
1349 *
1350 * 3. The Addresses match and the RnW bits also match, and so neither
1351 * Controller nor Target will ACK since both are expecting the other side to
1352 * provide ACK. As a result, each side might think it had "won" arbitration,
1353 * but neither side would continue, as each would subsequently see that the
1354 * other did not provide ACK.
1355 * ...
1356 * For either value of RnW: Due to the NACK, the Controller shall defer the
1357 * Private Write or Private Read, and should typically transmit the Target
1358 * Address again after a Repeated START (i.e., the next one or any one prior
1359 * to a STOP in the Frame). Since the Address Header following a Repeated
1360 * START is not arbitrated, the Controller will always win (see Section
1361 * 5.1.2.2.4).
1362 */
1363 if (retry && addr != 0x7e) {
1364 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1365 } else {
1366 ret = -ENXIO;
1367 *actual_len = 0;
1368 goto emit_stop;
1369 }
1370 } else {
1371 break;
1372 }
1373 }
1374
1375 if (rnw)
1376 ret = svc_i3c_master_read(master, in, xfer_len);
1377 else
1378 ret = svc_i3c_master_write(master, out, xfer_len);
1379 if (ret < 0)
1380 goto emit_stop;
1381
1382 if (rnw)
1383 *actual_len = ret;
1384
1385 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1386 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1387 if (ret)
1388 goto emit_stop;
1389
1390 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1391
1392 if (!continued) {
1393 svc_i3c_master_emit_stop(master);
1394
1395 /* Wait idle if stop is sent. */
1396 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1397 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1398 }
1399
1400 return 0;
1401
1402 emit_stop:
1403 svc_i3c_master_emit_stop(master);
1404 svc_i3c_master_clear_merrwarn(master);
1405 svc_i3c_master_flush_fifo(master);
1406
1407 return ret;
1408 }
1409
1410 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1411 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1412 {
1413 struct svc_i3c_xfer *xfer;
1414
1415 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1416 if (!xfer)
1417 return NULL;
1418
1419 INIT_LIST_HEAD(&xfer->node);
1420 xfer->ncmds = ncmds;
1421 xfer->ret = -ETIMEDOUT;
1422
1423 return xfer;
1424 }
1425
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1426 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1427 {
1428 kfree(xfer);
1429 }
1430
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1431 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1432 struct svc_i3c_xfer *xfer)
1433 {
1434 if (master->xferqueue.cur == xfer)
1435 master->xferqueue.cur = NULL;
1436 else
1437 list_del_init(&xfer->node);
1438 }
1439
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1440 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1441 struct svc_i3c_xfer *xfer)
1442 {
1443 unsigned long flags;
1444
1445 spin_lock_irqsave(&master->xferqueue.lock, flags);
1446 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1447 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1448 }
1449
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1450 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1451 {
1452 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1453 int ret, i;
1454
1455 if (!xfer)
1456 return;
1457
1458 svc_i3c_master_clear_merrwarn(master);
1459 svc_i3c_master_flush_fifo(master);
1460
1461 for (i = 0; i < xfer->ncmds; i++) {
1462 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1463
1464 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1465 cmd->addr, cmd->in, cmd->out,
1466 cmd->len, &cmd->actual_len,
1467 cmd->continued, i > 0);
1468 /* cmd->xfer is NULL if I2C or CCC transfer */
1469 if (cmd->xfer)
1470 cmd->xfer->actual_len = cmd->actual_len;
1471
1472 if (ret)
1473 break;
1474 }
1475
1476 xfer->ret = ret;
1477 complete(&xfer->comp);
1478
1479 if (ret < 0)
1480 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1481
1482 xfer = list_first_entry_or_null(&master->xferqueue.list,
1483 struct svc_i3c_xfer,
1484 node);
1485 if (xfer)
1486 list_del_init(&xfer->node);
1487
1488 master->xferqueue.cur = xfer;
1489 svc_i3c_master_start_xfer_locked(master);
1490 }
1491
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1492 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1493 struct svc_i3c_xfer *xfer)
1494 {
1495 unsigned long flags;
1496 int ret;
1497
1498 ret = pm_runtime_resume_and_get(master->dev);
1499 if (ret < 0) {
1500 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1501 return;
1502 }
1503
1504 init_completion(&xfer->comp);
1505 spin_lock_irqsave(&master->xferqueue.lock, flags);
1506 if (master->xferqueue.cur) {
1507 list_add_tail(&xfer->node, &master->xferqueue.list);
1508 } else {
1509 master->xferqueue.cur = xfer;
1510 svc_i3c_master_start_xfer_locked(master);
1511 }
1512 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1513
1514 pm_runtime_mark_last_busy(master->dev);
1515 pm_runtime_put_autosuspend(master->dev);
1516 }
1517
1518 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1519 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1520 const struct i3c_ccc_cmd *cmd)
1521 {
1522 /* No software support for CCC commands targeting more than one slave */
1523 return (cmd->ndests == 1);
1524 }
1525
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1526 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1527 struct i3c_ccc_cmd *ccc)
1528 {
1529 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1530 struct svc_i3c_xfer *xfer;
1531 struct svc_i3c_cmd *cmd;
1532 u8 *buf;
1533 int ret;
1534
1535 xfer = svc_i3c_master_alloc_xfer(master, 1);
1536 if (!xfer)
1537 return -ENOMEM;
1538
1539 buf = kmalloc(xfer_len, GFP_KERNEL);
1540 if (!buf) {
1541 svc_i3c_master_free_xfer(xfer);
1542 return -ENOMEM;
1543 }
1544
1545 buf[0] = ccc->id;
1546 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1547
1548 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1549
1550 cmd = &xfer->cmds[0];
1551 cmd->addr = ccc->dests[0].addr;
1552 cmd->rnw = ccc->rnw;
1553 cmd->in = NULL;
1554 cmd->out = buf;
1555 cmd->len = xfer_len;
1556 cmd->actual_len = 0;
1557 cmd->continued = false;
1558
1559 mutex_lock(&master->lock);
1560 svc_i3c_master_enqueue_xfer(master, xfer);
1561 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1562 svc_i3c_master_dequeue_xfer(master, xfer);
1563 mutex_unlock(&master->lock);
1564
1565 ret = xfer->ret;
1566 kfree(buf);
1567 svc_i3c_master_free_xfer(xfer);
1568
1569 return ret;
1570 }
1571
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1572 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1573 struct i3c_ccc_cmd *ccc)
1574 {
1575 unsigned int xfer_len = ccc->dests[0].payload.len;
1576 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1577 struct svc_i3c_xfer *xfer;
1578 struct svc_i3c_cmd *cmd;
1579 int ret;
1580
1581 xfer = svc_i3c_master_alloc_xfer(master, 2);
1582 if (!xfer)
1583 return -ENOMEM;
1584
1585 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1586
1587 /* Broadcasted message */
1588 cmd = &xfer->cmds[0];
1589 cmd->addr = I3C_BROADCAST_ADDR;
1590 cmd->rnw = 0;
1591 cmd->in = NULL;
1592 cmd->out = &ccc->id;
1593 cmd->len = 1;
1594 cmd->actual_len = 0;
1595 cmd->continued = true;
1596
1597 /* Directed message */
1598 cmd = &xfer->cmds[1];
1599 cmd->addr = ccc->dests[0].addr;
1600 cmd->rnw = ccc->rnw;
1601 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1602 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1603 cmd->len = xfer_len;
1604 cmd->actual_len = actual_len;
1605 cmd->continued = false;
1606
1607 mutex_lock(&master->lock);
1608 svc_i3c_master_enqueue_xfer(master, xfer);
1609 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1610 svc_i3c_master_dequeue_xfer(master, xfer);
1611 mutex_unlock(&master->lock);
1612
1613 if (cmd->actual_len != xfer_len)
1614 ccc->dests[0].payload.len = cmd->actual_len;
1615
1616 ret = xfer->ret;
1617 svc_i3c_master_free_xfer(xfer);
1618
1619 return ret;
1620 }
1621
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1622 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1623 struct i3c_ccc_cmd *cmd)
1624 {
1625 struct svc_i3c_master *master = to_svc_i3c_master(m);
1626 bool broadcast = cmd->id < 0x80;
1627 int ret;
1628
1629 if (broadcast)
1630 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1631 else
1632 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1633
1634 if (ret)
1635 cmd->err = I3C_ERROR_M2;
1636
1637 return ret;
1638 }
1639
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1640 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1641 struct i3c_priv_xfer *xfers,
1642 int nxfers)
1643 {
1644 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1645 struct svc_i3c_master *master = to_svc_i3c_master(m);
1646 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1647 struct svc_i3c_xfer *xfer;
1648 int ret, i;
1649
1650 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1651 if (!xfer)
1652 return -ENOMEM;
1653
1654 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1655
1656 for (i = 0; i < nxfers; i++) {
1657 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1658
1659 cmd->xfer = &xfers[i];
1660 cmd->addr = master->addrs[data->index];
1661 cmd->rnw = xfers[i].rnw;
1662 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1663 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1664 cmd->len = xfers[i].len;
1665 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1666 cmd->continued = (i + 1) < nxfers;
1667 }
1668
1669 mutex_lock(&master->lock);
1670 svc_i3c_master_enqueue_xfer(master, xfer);
1671 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1672 svc_i3c_master_dequeue_xfer(master, xfer);
1673 mutex_unlock(&master->lock);
1674
1675 ret = xfer->ret;
1676 svc_i3c_master_free_xfer(xfer);
1677
1678 return ret;
1679 }
1680
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)1681 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1682 struct i2c_msg *xfers,
1683 int nxfers)
1684 {
1685 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1686 struct svc_i3c_master *master = to_svc_i3c_master(m);
1687 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1688 struct svc_i3c_xfer *xfer;
1689 int ret, i;
1690
1691 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1692 if (!xfer)
1693 return -ENOMEM;
1694
1695 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1696
1697 for (i = 0; i < nxfers; i++) {
1698 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1699
1700 cmd->addr = master->addrs[data->index];
1701 cmd->rnw = xfers[i].flags & I2C_M_RD;
1702 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1703 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1704 cmd->len = xfers[i].len;
1705 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1706 cmd->continued = (i + 1 < nxfers);
1707 }
1708
1709 mutex_lock(&master->lock);
1710 svc_i3c_master_enqueue_xfer(master, xfer);
1711 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1712 svc_i3c_master_dequeue_xfer(master, xfer);
1713 mutex_unlock(&master->lock);
1714
1715 ret = xfer->ret;
1716 svc_i3c_master_free_xfer(xfer);
1717
1718 return ret;
1719 }
1720
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1721 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1722 const struct i3c_ibi_setup *req)
1723 {
1724 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1725 struct svc_i3c_master *master = to_svc_i3c_master(m);
1726 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1727 unsigned long flags;
1728 unsigned int i;
1729
1730 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1731 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1732 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1733 return -ERANGE;
1734 }
1735
1736 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1737 if (IS_ERR(data->ibi_pool))
1738 return PTR_ERR(data->ibi_pool);
1739
1740 spin_lock_irqsave(&master->ibi.lock, flags);
1741 for (i = 0; i < master->ibi.num_slots; i++) {
1742 if (!master->ibi.slots[i]) {
1743 data->ibi = i;
1744 master->ibi.slots[i] = dev;
1745 break;
1746 }
1747 }
1748 spin_unlock_irqrestore(&master->ibi.lock, flags);
1749
1750 if (i < master->ibi.num_slots)
1751 return 0;
1752
1753 i3c_generic_ibi_free_pool(data->ibi_pool);
1754 data->ibi_pool = NULL;
1755
1756 return -ENOSPC;
1757 }
1758
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1759 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1760 {
1761 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1762 struct svc_i3c_master *master = to_svc_i3c_master(m);
1763 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1764 unsigned long flags;
1765
1766 spin_lock_irqsave(&master->ibi.lock, flags);
1767 master->ibi.slots[data->ibi] = NULL;
1768 data->ibi = -1;
1769 spin_unlock_irqrestore(&master->ibi.lock, flags);
1770
1771 i3c_generic_ibi_free_pool(data->ibi_pool);
1772 }
1773
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1774 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1775 {
1776 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1777 struct svc_i3c_master *master = to_svc_i3c_master(m);
1778 int ret;
1779
1780 ret = pm_runtime_resume_and_get(master->dev);
1781 if (ret < 0) {
1782 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1783 return ret;
1784 }
1785
1786 master->enabled_events++;
1787 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1788
1789 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1790 }
1791
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1792 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1793 {
1794 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1795 struct svc_i3c_master *master = to_svc_i3c_master(m);
1796 int ret;
1797
1798 master->enabled_events--;
1799 if (!master->enabled_events)
1800 svc_i3c_master_disable_interrupts(master);
1801
1802 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1803
1804 pm_runtime_mark_last_busy(master->dev);
1805 pm_runtime_put_autosuspend(master->dev);
1806
1807 return ret;
1808 }
1809
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1810 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1811 {
1812 struct svc_i3c_master *master = to_svc_i3c_master(m);
1813 int ret;
1814
1815 ret = pm_runtime_resume_and_get(master->dev);
1816 if (ret < 0) {
1817 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1818 return ret;
1819 }
1820
1821 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1822
1823 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1824
1825 return 0;
1826 }
1827
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1828 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1829 {
1830 struct svc_i3c_master *master = to_svc_i3c_master(m);
1831
1832 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1833
1834 if (!master->enabled_events)
1835 svc_i3c_master_disable_interrupts(master);
1836
1837 pm_runtime_mark_last_busy(master->dev);
1838 pm_runtime_put_autosuspend(master->dev);
1839
1840 return 0;
1841 }
1842
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1843 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1844 struct i3c_ibi_slot *slot)
1845 {
1846 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1847
1848 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1849 }
1850
1851 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1852 .bus_init = svc_i3c_master_bus_init,
1853 .bus_cleanup = svc_i3c_master_bus_cleanup,
1854 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1855 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1856 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1857 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1858 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1859 .do_daa = svc_i3c_master_do_daa,
1860 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1861 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1862 .priv_xfers = svc_i3c_master_priv_xfers,
1863 .i2c_xfers = svc_i3c_master_i2c_xfers,
1864 .request_ibi = svc_i3c_master_request_ibi,
1865 .free_ibi = svc_i3c_master_free_ibi,
1866 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1867 .enable_ibi = svc_i3c_master_enable_ibi,
1868 .disable_ibi = svc_i3c_master_disable_ibi,
1869 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1870 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1871 .set_speed = svc_i3c_master_set_speed,
1872 };
1873
svc_i3c_master_probe(struct platform_device * pdev)1874 static int svc_i3c_master_probe(struct platform_device *pdev)
1875 {
1876 struct device *dev = &pdev->dev;
1877 struct svc_i3c_master *master;
1878 int ret, i;
1879
1880 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1881 if (!master)
1882 return -ENOMEM;
1883
1884 master->drvdata = of_device_get_match_data(dev);
1885 if (!master->drvdata)
1886 return -EINVAL;
1887
1888 master->regs = devm_platform_ioremap_resource(pdev, 0);
1889 if (IS_ERR(master->regs))
1890 return PTR_ERR(master->regs);
1891
1892 master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
1893 if (master->num_clks < 0)
1894 return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
1895
1896 for (i = 0; i < master->num_clks; i++) {
1897 if (!strcmp(master->clks[i].id, "fast_clk"))
1898 break;
1899 }
1900
1901 if (i == master->num_clks)
1902 return dev_err_probe(dev, -EINVAL,
1903 "can't get I3C peripheral clock\n");
1904
1905 master->fclk = master->clks[i].clk;
1906 if (IS_ERR(master->fclk))
1907 return PTR_ERR(master->fclk);
1908
1909 master->irq = platform_get_irq(pdev, 0);
1910 if (master->irq < 0)
1911 return master->irq;
1912
1913 master->dev = dev;
1914 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
1915 if (ret)
1916 return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
1917
1918 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1919 mutex_init(&master->lock);
1920
1921 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1922 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1923 if (ret)
1924 goto err_disable_clks;
1925
1926 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1927
1928 spin_lock_init(&master->xferqueue.lock);
1929 INIT_LIST_HEAD(&master->xferqueue.list);
1930
1931 spin_lock_init(&master->ibi.lock);
1932 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1933 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1934 sizeof(*master->ibi.slots),
1935 GFP_KERNEL);
1936 if (!master->ibi.slots) {
1937 ret = -ENOMEM;
1938 goto err_disable_clks;
1939 }
1940
1941 platform_set_drvdata(pdev, master);
1942
1943 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1944 pm_runtime_use_autosuspend(&pdev->dev);
1945 pm_runtime_get_noresume(&pdev->dev);
1946 pm_runtime_set_active(&pdev->dev);
1947 pm_runtime_enable(&pdev->dev);
1948
1949 svc_i3c_master_reset(master);
1950
1951 /* Register the master */
1952 ret = i3c_master_register(&master->base, &pdev->dev,
1953 &svc_i3c_master_ops, false);
1954 if (ret)
1955 goto rpm_disable;
1956
1957 pm_runtime_mark_last_busy(&pdev->dev);
1958 pm_runtime_put_autosuspend(&pdev->dev);
1959
1960 return 0;
1961
1962 rpm_disable:
1963 pm_runtime_dont_use_autosuspend(&pdev->dev);
1964 pm_runtime_put_noidle(&pdev->dev);
1965 pm_runtime_disable(&pdev->dev);
1966 pm_runtime_set_suspended(&pdev->dev);
1967
1968 err_disable_clks:
1969 clk_bulk_disable_unprepare(master->num_clks, master->clks);
1970
1971 return ret;
1972 }
1973
svc_i3c_master_remove(struct platform_device * pdev)1974 static void svc_i3c_master_remove(struct platform_device *pdev)
1975 {
1976 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1977
1978 cancel_work_sync(&master->hj_work);
1979 i3c_master_unregister(&master->base);
1980
1981 pm_runtime_dont_use_autosuspend(&pdev->dev);
1982 pm_runtime_disable(&pdev->dev);
1983 }
1984
svc_i3c_save_regs(struct svc_i3c_master * master)1985 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1986 {
1987 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1988 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1989 }
1990
svc_i3c_restore_regs(struct svc_i3c_master * master)1991 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1992 {
1993 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1994 master->saved_regs.mdynaddr) {
1995 writel(master->saved_regs.mconfig,
1996 master->regs + SVC_I3C_MCONFIG);
1997 writel(master->saved_regs.mdynaddr,
1998 master->regs + SVC_I3C_MDYNADDR);
1999 }
2000 }
2001
svc_i3c_runtime_suspend(struct device * dev)2002 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
2003 {
2004 struct svc_i3c_master *master = dev_get_drvdata(dev);
2005
2006 svc_i3c_save_regs(master);
2007 clk_bulk_disable_unprepare(master->num_clks, master->clks);
2008 pinctrl_pm_select_sleep_state(dev);
2009
2010 return 0;
2011 }
2012
svc_i3c_runtime_resume(struct device * dev)2013 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
2014 {
2015 struct svc_i3c_master *master = dev_get_drvdata(dev);
2016 int ret;
2017
2018 pinctrl_pm_select_default_state(dev);
2019 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
2020 if (ret)
2021 return ret;
2022
2023 svc_i3c_restore_regs(master);
2024
2025 return 0;
2026 }
2027
2028 static const struct dev_pm_ops svc_i3c_pm_ops = {
2029 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2030 pm_runtime_force_resume)
2031 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
2032 svc_i3c_runtime_resume, NULL)
2033 };
2034
2035 static const struct svc_i3c_drvdata npcm845_drvdata = {
2036 .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
2037 SVC_I3C_QUIRK_FALSE_SLVSTART |
2038 SVC_I3C_QUIRK_DAA_CORRUPT,
2039 };
2040
2041 static const struct svc_i3c_drvdata svc_default_drvdata = {};
2042
2043 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
2044 { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
2045 { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
2046 { /* sentinel */ },
2047 };
2048 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
2049
2050 static struct platform_driver svc_i3c_master = {
2051 .probe = svc_i3c_master_probe,
2052 .remove = svc_i3c_master_remove,
2053 .driver = {
2054 .name = "silvaco-i3c-master",
2055 .of_match_table = svc_i3c_master_of_match_tbl,
2056 .pm = &svc_i3c_pm_ops,
2057 },
2058 };
2059 module_platform_driver(svc_i3c_master);
2060
2061 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
2062 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
2063 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
2064 MODULE_LICENSE("GPL v2");
2065