1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018 Cadence Design Systems Inc.
4 *
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25
26 #include "../internals.h"
27
28 #define DEV_ID 0x0
29 #define DEV_ID_I3C_MASTER 0x5034
30
31 #define CONF_STATUS0 0x4
32 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
33 #define CONF_STATUS0_ECC_CHK BIT(28)
34 #define CONF_STATUS0_INTEG_CHK BIT(27)
35 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
36 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
37 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
38 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
39 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
40 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
41 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
42 #define CONF_STATUS0_SEC_MASTER BIT(4)
43 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
44
45 #define CONF_STATUS1 0x8
46 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
47 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
48 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
49 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
50 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
51 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
52 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
53
54 #define REV_ID 0xc
55 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
56 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
57 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
58 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
59
60 #define CTRL 0x10
61 #define CTRL_DEV_EN BIT(31)
62 #define CTRL_HALT_EN BIT(30)
63 #define CTRL_MCS BIT(29)
64 #define CTRL_MCS_EN BIT(28)
65 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
66 #define CTRL_HJ_DISEC BIT(8)
67 #define CTRL_MST_ACK BIT(7)
68 #define CTRL_HJ_ACK BIT(6)
69 #define CTRL_HJ_INIT BIT(5)
70 #define CTRL_MST_INIT BIT(4)
71 #define CTRL_AHDR_OPT BIT(3)
72 #define CTRL_PURE_BUS_MODE 0
73 #define CTRL_MIXED_FAST_BUS_MODE 2
74 #define CTRL_MIXED_SLOW_BUS_MODE 3
75 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
76 #define THD_DELAY_MAX 3
77
78 #define PRESCL_CTRL0 0x14
79 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
80 #define PRESCL_CTRL0_I3C(x) (x)
81 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
82 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
83
84 #define PRESCL_CTRL1 0x18
85 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
86 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
87 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
88 #define PRESCL_CTRL1_OD_LOW(x) (x)
89
90 #define MST_IER 0x20
91 #define MST_IDR 0x24
92 #define MST_IMR 0x28
93 #define MST_ICR 0x2c
94 #define MST_ISR 0x30
95 #define MST_INT_HALTED BIT(18)
96 #define MST_INT_MR_DONE BIT(17)
97 #define MST_INT_IMM_COMP BIT(16)
98 #define MST_INT_TX_THR BIT(15)
99 #define MST_INT_TX_OVF BIT(14)
100 #define MST_INT_IBID_THR BIT(12)
101 #define MST_INT_IBID_UNF BIT(11)
102 #define MST_INT_IBIR_THR BIT(10)
103 #define MST_INT_IBIR_UNF BIT(9)
104 #define MST_INT_IBIR_OVF BIT(8)
105 #define MST_INT_RX_THR BIT(7)
106 #define MST_INT_RX_UNF BIT(6)
107 #define MST_INT_CMDD_EMP BIT(5)
108 #define MST_INT_CMDD_THR BIT(4)
109 #define MST_INT_CMDD_OVF BIT(3)
110 #define MST_INT_CMDR_THR BIT(2)
111 #define MST_INT_CMDR_UNF BIT(1)
112 #define MST_INT_CMDR_OVF BIT(0)
113
114 #define MST_STATUS0 0x34
115 #define MST_STATUS0_IDLE BIT(18)
116 #define MST_STATUS0_HALTED BIT(17)
117 #define MST_STATUS0_MASTER_MODE BIT(16)
118 #define MST_STATUS0_TX_FULL BIT(13)
119 #define MST_STATUS0_IBID_FULL BIT(12)
120 #define MST_STATUS0_IBIR_FULL BIT(11)
121 #define MST_STATUS0_RX_FULL BIT(10)
122 #define MST_STATUS0_CMDD_FULL BIT(9)
123 #define MST_STATUS0_CMDR_FULL BIT(8)
124 #define MST_STATUS0_TX_EMP BIT(5)
125 #define MST_STATUS0_IBID_EMP BIT(4)
126 #define MST_STATUS0_IBIR_EMP BIT(3)
127 #define MST_STATUS0_RX_EMP BIT(2)
128 #define MST_STATUS0_CMDD_EMP BIT(1)
129 #define MST_STATUS0_CMDR_EMP BIT(0)
130
131 #define CMDR 0x38
132 #define CMDR_NO_ERROR 0
133 #define CMDR_DDR_PREAMBLE_ERROR 1
134 #define CMDR_DDR_PARITY_ERROR 2
135 #define CMDR_DDR_RX_FIFO_OVF 3
136 #define CMDR_DDR_TX_FIFO_UNF 4
137 #define CMDR_M0_ERROR 5
138 #define CMDR_M1_ERROR 6
139 #define CMDR_M2_ERROR 7
140 #define CMDR_MST_ABORT 8
141 #define CMDR_NACK_RESP 9
142 #define CMDR_INVALID_DA 10
143 #define CMDR_DDR_DROPPED 11
144 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
145 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
146 #define CMDR_CMDID_HJACK_DISEC 0xfe
147 #define CMDR_CMDID_HJACK_ENTDAA 0xff
148 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
149
150 #define IBIR 0x3c
151 #define IBIR_ACKED BIT(12)
152 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
153 #define IBIR_ERROR BIT(7)
154 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
155 #define IBIR_TYPE_IBI 0
156 #define IBIR_TYPE_HJ 1
157 #define IBIR_TYPE_MR 2
158 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
159
160 #define SLV_IER 0x40
161 #define SLV_IDR 0x44
162 #define SLV_IMR 0x48
163 #define SLV_ICR 0x4c
164 #define SLV_ISR 0x50
165 #define SLV_INT_TM BIT(20)
166 #define SLV_INT_ERROR BIT(19)
167 #define SLV_INT_EVENT_UP BIT(18)
168 #define SLV_INT_HJ_DONE BIT(17)
169 #define SLV_INT_MR_DONE BIT(16)
170 #define SLV_INT_DA_UPD BIT(15)
171 #define SLV_INT_SDR_FAIL BIT(14)
172 #define SLV_INT_DDR_FAIL BIT(13)
173 #define SLV_INT_M_RD_ABORT BIT(12)
174 #define SLV_INT_DDR_RX_THR BIT(11)
175 #define SLV_INT_DDR_TX_THR BIT(10)
176 #define SLV_INT_SDR_RX_THR BIT(9)
177 #define SLV_INT_SDR_TX_THR BIT(8)
178 #define SLV_INT_DDR_RX_UNF BIT(7)
179 #define SLV_INT_DDR_TX_OVF BIT(6)
180 #define SLV_INT_SDR_RX_UNF BIT(5)
181 #define SLV_INT_SDR_TX_OVF BIT(4)
182 #define SLV_INT_DDR_RD_COMP BIT(3)
183 #define SLV_INT_DDR_WR_COMP BIT(2)
184 #define SLV_INT_SDR_RD_COMP BIT(1)
185 #define SLV_INT_SDR_WR_COMP BIT(0)
186
187 #define SLV_STATUS0 0x54
188 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
189 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
190
191 #define SLV_STATUS1 0x58
192 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
193 #define SLV_STATUS1_VEN_TM BIT(19)
194 #define SLV_STATUS1_HJ_DIS BIT(18)
195 #define SLV_STATUS1_MR_DIS BIT(17)
196 #define SLV_STATUS1_PROT_ERR BIT(16)
197 #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
198 #define SLV_STATUS1_HAS_DA BIT(8)
199 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
200 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
201 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
202 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
203 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
204 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
205 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
206 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
207
208 #define CMD0_FIFO 0x60
209 #define CMD0_FIFO_IS_DDR BIT(31)
210 #define CMD0_FIFO_IS_CCC BIT(30)
211 #define CMD0_FIFO_BCH BIT(29)
212 #define XMIT_BURST_STATIC_SUBADDR 0
213 #define XMIT_SINGLE_INC_SUBADDR 1
214 #define XMIT_SINGLE_STATIC_SUBADDR 2
215 #define XMIT_BURST_WITHOUT_SUBADDR 3
216 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
217 #define CMD0_FIFO_SBCA BIT(26)
218 #define CMD0_FIFO_RSBC BIT(25)
219 #define CMD0_FIFO_IS_10B BIT(24)
220 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
221 #define CMD0_FIFO_PL_LEN_MAX 4095
222 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
223 #define CMD0_FIFO_RNW BIT(0)
224
225 #define CMD1_FIFO 0x64
226 #define CMD1_FIFO_CMDID(id) ((id) << 24)
227 #define CMD1_FIFO_CSRADDR(a) (a)
228 #define CMD1_FIFO_CCC(id) (id)
229
230 #define TX_FIFO 0x68
231
232 #define IMD_CMD0 0x70
233 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
234 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
235 #define IMD_CMD0_RNW BIT(0)
236
237 #define IMD_CMD1 0x74
238 #define IMD_CMD1_CCC(id) (id)
239
240 #define IMD_DATA 0x78
241 #define RX_FIFO 0x80
242 #define IBI_DATA_FIFO 0x84
243 #define SLV_DDR_TX_FIFO 0x88
244 #define SLV_DDR_RX_FIFO 0x8c
245
246 #define CMD_IBI_THR_CTRL 0x90
247 #define IBIR_THR(t) ((t) << 24)
248 #define CMDR_THR(t) ((t) << 16)
249 #define IBI_THR(t) ((t) << 8)
250 #define CMD_THR(t) (t)
251
252 #define TX_RX_THR_CTRL 0x94
253 #define RX_THR(t) ((t) << 16)
254 #define TX_THR(t) (t)
255
256 #define SLV_DDR_TX_RX_THR_CTRL 0x98
257 #define SLV_DDR_RX_THR(t) ((t) << 16)
258 #define SLV_DDR_TX_THR(t) (t)
259
260 #define FLUSH_CTRL 0x9c
261 #define FLUSH_IBI_RESP BIT(23)
262 #define FLUSH_CMD_RESP BIT(22)
263 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
264 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
265 #define FLUSH_IMM_FIFO BIT(20)
266 #define FLUSH_IBI_FIFO BIT(19)
267 #define FLUSH_RX_FIFO BIT(18)
268 #define FLUSH_TX_FIFO BIT(17)
269 #define FLUSH_CMD_FIFO BIT(16)
270
271 #define TTO_PRESCL_CTRL0 0xb0
272 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
273 #define TTO_PRESCL_CTRL0_DIVA(x) (x)
274
275 #define TTO_PRESCL_CTRL1 0xb4
276 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
277 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
278
279 #define DEVS_CTRL 0xb8
280 #define DEVS_CTRL_DEV_CLR_SHIFT 16
281 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
282 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
283 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
284 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
285 #define MAX_DEVS 16
286
287 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
288 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
289 #define DEV_ID_RR0_HDR_CAP BIT(10)
290 #define DEV_ID_RR0_IS_I3C BIT(9)
291 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
292 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
293 (((a) & GENMASK(9, 7)) << 6))
294 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
295 (((x) >> 6) & GENMASK(9, 7)))
296
297 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
298 #define DEV_ID_RR1_PID_MSB(pid) (pid)
299
300 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
301 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
302 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
303 #define DEV_ID_RR2_DCR(dcr) (dcr)
304 #define DEV_ID_RR2_LVR(lvr) (lvr)
305
306 #define SIR_MAP(x) (0x180 + ((x) * 4))
307 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
308 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
309 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
310 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
311 #define DEV_ROLE_SLAVE 0
312 #define DEV_ROLE_MASTER 1
313 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
314 #define SIR_MAP_DEV_SLOW BIT(13)
315 #define SIR_MAP_DEV_PL(l) ((l) << 8)
316 #define SIR_MAP_PL_MAX GENMASK(4, 0)
317 #define SIR_MAP_DEV_DA(a) ((a) << 1)
318 #define SIR_MAP_DEV_ACK BIT(0)
319
320 #define GPIR_WORD(x) (0x200 + ((x) * 4))
321 #define GPI_REG(val, id) \
322 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
323
324 #define GPOR_WORD(x) (0x220 + ((x) * 4))
325 #define GPO_REG(val, id) \
326 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
327
328 #define ASF_INT_STATUS 0x300
329 #define ASF_INT_RAW_STATUS 0x304
330 #define ASF_INT_MASK 0x308
331 #define ASF_INT_TEST 0x30c
332 #define ASF_INT_FATAL_SELECT 0x310
333 #define ASF_INTEGRITY_ERR BIT(6)
334 #define ASF_PROTOCOL_ERR BIT(5)
335 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
336 #define ASF_CSR_ERR BIT(3)
337 #define ASF_DAP_ERR BIT(2)
338 #define ASF_SRAM_UNCORR_ERR BIT(1)
339 #define ASF_SRAM_CORR_ERR BIT(0)
340
341 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
342 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
343 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
344 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
345
346 #define ASF_SRAM_FAULT_STATS 0x328
347 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
348 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
349
350 #define ASF_TRANS_TOUT_CTRL 0x330
351 #define ASF_TRANS_TOUT_EN BIT(31)
352 #define ASF_TRANS_TOUT_VAL(x) (x)
353
354 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
355 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
356 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
357 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
358 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
359 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
360
361 #define ASF_PROTO_FAULT_MASK 0x340
362 #define ASF_PROTO_FAULT_STATUS 0x344
363 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
364 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
365 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
366 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
367 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
368 #define ASF_PROTO_FAULT_M(x) BIT(x)
369
370 struct cdns_i3c_master_caps {
371 u32 cmdfifodepth;
372 u32 cmdrfifodepth;
373 u32 txfifodepth;
374 u32 rxfifodepth;
375 u32 ibirfifodepth;
376 };
377
378 struct cdns_i3c_cmd {
379 u32 cmd0;
380 u32 cmd1;
381 u32 tx_len;
382 const void *tx_buf;
383 u32 rx_len;
384 void *rx_buf;
385 u32 error;
386 };
387
388 struct cdns_i3c_xfer {
389 struct list_head node;
390 struct completion comp;
391 int ret;
392 unsigned int ncmds;
393 struct cdns_i3c_cmd cmds[] __counted_by(ncmds);
394 };
395
396 struct cdns_i3c_data {
397 u8 thd_delay_ns;
398 };
399
400 struct cdns_i3c_master {
401 struct work_struct hj_work;
402 struct i3c_master_controller base;
403 u32 free_rr_slots;
404 unsigned int maxdevs;
405 struct {
406 unsigned int num_slots;
407 struct i3c_dev_desc **slots;
408 spinlock_t lock;
409 } ibi;
410 struct {
411 struct list_head list;
412 struct cdns_i3c_xfer *cur;
413 spinlock_t lock;
414 } xferqueue;
415 void __iomem *regs;
416 struct clk *sysclk;
417 struct cdns_i3c_master_caps caps;
418 unsigned long i3c_scl_lim;
419 const struct cdns_i3c_data *devdata;
420 };
421
422 static inline struct cdns_i3c_master *
to_cdns_i3c_master(struct i3c_master_controller * master)423 to_cdns_i3c_master(struct i3c_master_controller *master)
424 {
425 return container_of(master, struct cdns_i3c_master, base);
426 }
427
cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master * master,const u8 * bytes,int nbytes)428 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
429 const u8 *bytes, int nbytes)
430 {
431 i3c_writel_fifo(master->regs + TX_FIFO, bytes, nbytes);
432 }
433
cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master * master,u8 * bytes,int nbytes)434 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
435 u8 *bytes, int nbytes)
436 {
437 i3c_readl_fifo(master->regs + RX_FIFO, bytes, nbytes);
438 }
439
cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller * m,const struct i3c_ccc_cmd * cmd)440 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
441 const struct i3c_ccc_cmd *cmd)
442 {
443 if (cmd->ndests > 1)
444 return false;
445
446 switch (cmd->id) {
447 case I3C_CCC_ENEC(true):
448 case I3C_CCC_ENEC(false):
449 case I3C_CCC_DISEC(true):
450 case I3C_CCC_DISEC(false):
451 case I3C_CCC_ENTAS(0, true):
452 case I3C_CCC_ENTAS(0, false):
453 case I3C_CCC_RSTDAA(true):
454 case I3C_CCC_RSTDAA(false):
455 case I3C_CCC_ENTDAA:
456 case I3C_CCC_SETMWL(true):
457 case I3C_CCC_SETMWL(false):
458 case I3C_CCC_SETMRL(true):
459 case I3C_CCC_SETMRL(false):
460 case I3C_CCC_DEFSLVS:
461 case I3C_CCC_ENTHDR(0):
462 case I3C_CCC_SETDASA:
463 case I3C_CCC_SETNEWDA:
464 case I3C_CCC_GETMWL:
465 case I3C_CCC_GETMRL:
466 case I3C_CCC_GETPID:
467 case I3C_CCC_GETBCR:
468 case I3C_CCC_GETDCR:
469 case I3C_CCC_GETSTATUS:
470 case I3C_CCC_GETACCMST:
471 case I3C_CCC_GETMXDS:
472 case I3C_CCC_GETHDRCAP:
473 return true;
474 default:
475 break;
476 }
477
478 return false;
479 }
480
cdns_i3c_master_disable(struct cdns_i3c_master * master)481 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
482 {
483 u32 status;
484
485 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
486
487 return readl_poll_timeout(master->regs + MST_STATUS0, status,
488 status & MST_STATUS0_IDLE, 10, 1000000);
489 }
490
cdns_i3c_master_enable(struct cdns_i3c_master * master)491 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
492 {
493 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
494 }
495
496 static struct cdns_i3c_xfer *
cdns_i3c_master_alloc_xfer(struct cdns_i3c_master * master,unsigned int ncmds)497 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
498 {
499 struct cdns_i3c_xfer *xfer;
500
501 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
502 if (!xfer)
503 return NULL;
504
505 INIT_LIST_HEAD(&xfer->node);
506 xfer->ncmds = ncmds;
507 xfer->ret = -ETIMEDOUT;
508
509 return xfer;
510 }
511
cdns_i3c_master_free_xfer(struct cdns_i3c_xfer * xfer)512 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
513 {
514 kfree(xfer);
515 }
516
cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master * master)517 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
518 {
519 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
520 unsigned int i;
521
522 if (!xfer)
523 return;
524
525 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
526 for (i = 0; i < xfer->ncmds; i++) {
527 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
528
529 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
530 cmd->tx_len);
531 }
532
533 for (i = 0; i < xfer->ncmds; i++) {
534 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
535
536 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
537 master->regs + CMD1_FIFO);
538 writel(cmd->cmd0, master->regs + CMD0_FIFO);
539 }
540
541 writel(readl(master->regs + CTRL) | CTRL_MCS,
542 master->regs + CTRL);
543 writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
544 }
545
cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master * master,u32 isr)546 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
547 u32 isr)
548 {
549 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
550 int i, ret = 0;
551 u32 status0;
552
553 if (!xfer)
554 return;
555
556 if (!(isr & MST_INT_CMDD_EMP))
557 return;
558
559 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
560
561 for (status0 = readl(master->regs + MST_STATUS0);
562 !(status0 & MST_STATUS0_CMDR_EMP);
563 status0 = readl(master->regs + MST_STATUS0)) {
564 struct cdns_i3c_cmd *cmd;
565 u32 cmdr, rx_len, id;
566
567 cmdr = readl(master->regs + CMDR);
568 id = CMDR_CMDID(cmdr);
569 if (id == CMDR_CMDID_HJACK_DISEC ||
570 id == CMDR_CMDID_HJACK_ENTDAA ||
571 WARN_ON(id >= xfer->ncmds))
572 continue;
573
574 cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
575 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
576 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
577 cmd->error = CMDR_ERROR(cmdr);
578 }
579
580 for (i = 0; i < xfer->ncmds; i++) {
581 switch (xfer->cmds[i].error) {
582 case CMDR_NO_ERROR:
583 break;
584
585 case CMDR_DDR_PREAMBLE_ERROR:
586 case CMDR_DDR_PARITY_ERROR:
587 case CMDR_M0_ERROR:
588 case CMDR_M1_ERROR:
589 case CMDR_M2_ERROR:
590 case CMDR_MST_ABORT:
591 case CMDR_NACK_RESP:
592 case CMDR_DDR_DROPPED:
593 ret = -EIO;
594 break;
595
596 case CMDR_DDR_RX_FIFO_OVF:
597 case CMDR_DDR_TX_FIFO_UNF:
598 ret = -ENOSPC;
599 break;
600
601 case CMDR_INVALID_DA:
602 default:
603 ret = -EINVAL;
604 break;
605 }
606 }
607
608 xfer->ret = ret;
609 complete(&xfer->comp);
610
611 xfer = list_first_entry_or_null(&master->xferqueue.list,
612 struct cdns_i3c_xfer, node);
613 if (xfer)
614 list_del_init(&xfer->node);
615
616 master->xferqueue.cur = xfer;
617 cdns_i3c_master_start_xfer_locked(master);
618 }
619
cdns_i3c_master_queue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)620 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
621 struct cdns_i3c_xfer *xfer)
622 {
623 unsigned long flags;
624
625 init_completion(&xfer->comp);
626 spin_lock_irqsave(&master->xferqueue.lock, flags);
627 if (master->xferqueue.cur) {
628 list_add_tail(&xfer->node, &master->xferqueue.list);
629 } else {
630 master->xferqueue.cur = xfer;
631 cdns_i3c_master_start_xfer_locked(master);
632 }
633 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
634 }
635
cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)636 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
637 struct cdns_i3c_xfer *xfer)
638 {
639 unsigned long flags;
640
641 spin_lock_irqsave(&master->xferqueue.lock, flags);
642 if (master->xferqueue.cur == xfer) {
643 u32 status;
644
645 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
646 master->regs + CTRL);
647 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
648 status & MST_STATUS0_IDLE, 10,
649 1000000);
650 master->xferqueue.cur = NULL;
651 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
652 FLUSH_CMD_RESP,
653 master->regs + FLUSH_CTRL);
654 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
655 writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
656 master->regs + CTRL);
657 } else {
658 list_del_init(&xfer->node);
659 }
660 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
661 }
662
cdns_i3c_cmd_get_err(struct cdns_i3c_cmd * cmd)663 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
664 {
665 switch (cmd->error) {
666 case CMDR_M0_ERROR:
667 return I3C_ERROR_M0;
668
669 case CMDR_M1_ERROR:
670 return I3C_ERROR_M1;
671
672 case CMDR_M2_ERROR:
673 case CMDR_NACK_RESP:
674 return I3C_ERROR_M2;
675
676 default:
677 break;
678 }
679
680 return I3C_ERROR_UNKNOWN;
681 }
682
cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)683 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
684 struct i3c_ccc_cmd *cmd)
685 {
686 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
687 struct cdns_i3c_xfer *xfer;
688 struct cdns_i3c_cmd *ccmd;
689 int ret;
690
691 xfer = cdns_i3c_master_alloc_xfer(master, 1);
692 if (!xfer)
693 return -ENOMEM;
694
695 ccmd = xfer->cmds;
696 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
697 ccmd->cmd0 = CMD0_FIFO_IS_CCC |
698 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
699
700 if (cmd->id & I3C_CCC_DIRECT)
701 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
702
703 if (cmd->rnw) {
704 ccmd->cmd0 |= CMD0_FIFO_RNW;
705 ccmd->rx_buf = cmd->dests[0].payload.data;
706 ccmd->rx_len = cmd->dests[0].payload.len;
707 } else {
708 ccmd->tx_buf = cmd->dests[0].payload.data;
709 ccmd->tx_len = cmd->dests[0].payload.len;
710 }
711
712 cdns_i3c_master_queue_xfer(master, xfer);
713 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
714 cdns_i3c_master_unqueue_xfer(master, xfer);
715
716 ret = xfer->ret;
717 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
718 cdns_i3c_master_free_xfer(xfer);
719
720 return ret;
721 }
722
cdns_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)723 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
724 struct i3c_priv_xfer *xfers,
725 int nxfers)
726 {
727 struct i3c_master_controller *m = i3c_dev_get_master(dev);
728 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
729 int txslots = 0, rxslots = 0, i, ret;
730 struct cdns_i3c_xfer *cdns_xfer;
731
732 for (i = 0; i < nxfers; i++) {
733 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
734 return -EOPNOTSUPP;
735 }
736
737 if (!nxfers)
738 return 0;
739
740 if (nxfers > master->caps.cmdfifodepth ||
741 nxfers > master->caps.cmdrfifodepth)
742 return -EOPNOTSUPP;
743
744 /*
745 * First make sure that all transactions (block of transfers separated
746 * by a STOP marker) fit in the FIFOs.
747 */
748 for (i = 0; i < nxfers; i++) {
749 if (xfers[i].rnw)
750 rxslots += DIV_ROUND_UP(xfers[i].len, 4);
751 else
752 txslots += DIV_ROUND_UP(xfers[i].len, 4);
753 }
754
755 if (rxslots > master->caps.rxfifodepth ||
756 txslots > master->caps.txfifodepth)
757 return -EOPNOTSUPP;
758
759 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
760 if (!cdns_xfer)
761 return -ENOMEM;
762
763 for (i = 0; i < nxfers; i++) {
764 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
765 u32 pl_len = xfers[i].len;
766
767 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
768 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
769
770 if (xfers[i].rnw) {
771 ccmd->cmd0 |= CMD0_FIFO_RNW;
772 ccmd->rx_buf = xfers[i].data.in;
773 ccmd->rx_len = xfers[i].len;
774 pl_len++;
775 } else {
776 ccmd->tx_buf = xfers[i].data.out;
777 ccmd->tx_len = xfers[i].len;
778 }
779
780 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
781
782 if (i < nxfers - 1)
783 ccmd->cmd0 |= CMD0_FIFO_RSBC;
784
785 if (!i)
786 ccmd->cmd0 |= CMD0_FIFO_BCH;
787 }
788
789 cdns_i3c_master_queue_xfer(master, cdns_xfer);
790 if (!wait_for_completion_timeout(&cdns_xfer->comp,
791 msecs_to_jiffies(1000)))
792 cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
793
794 ret = cdns_xfer->ret;
795
796 for (i = 0; i < nxfers; i++)
797 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
798
799 cdns_i3c_master_free_xfer(cdns_xfer);
800
801 return ret;
802 }
803
cdns_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)804 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
805 struct i2c_msg *xfers, int nxfers)
806 {
807 struct i3c_master_controller *m = i2c_dev_get_master(dev);
808 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
809 unsigned int nrxwords = 0, ntxwords = 0;
810 struct cdns_i3c_xfer *xfer;
811 int i, ret = 0;
812
813 if (nxfers > master->caps.cmdfifodepth)
814 return -EOPNOTSUPP;
815
816 for (i = 0; i < nxfers; i++) {
817 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
818 return -EOPNOTSUPP;
819
820 if (xfers[i].flags & I2C_M_RD)
821 nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
822 else
823 ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
824 }
825
826 if (ntxwords > master->caps.txfifodepth ||
827 nrxwords > master->caps.rxfifodepth)
828 return -EOPNOTSUPP;
829
830 xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
831 if (!xfer)
832 return -ENOMEM;
833
834 for (i = 0; i < nxfers; i++) {
835 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
836
837 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
838 CMD0_FIFO_PL_LEN(xfers[i].len) |
839 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
840
841 if (xfers[i].flags & I2C_M_TEN)
842 ccmd->cmd0 |= CMD0_FIFO_IS_10B;
843
844 if (xfers[i].flags & I2C_M_RD) {
845 ccmd->cmd0 |= CMD0_FIFO_RNW;
846 ccmd->rx_buf = xfers[i].buf;
847 ccmd->rx_len = xfers[i].len;
848 } else {
849 ccmd->tx_buf = xfers[i].buf;
850 ccmd->tx_len = xfers[i].len;
851 }
852 }
853
854 cdns_i3c_master_queue_xfer(master, xfer);
855 if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
856 cdns_i3c_master_unqueue_xfer(master, xfer);
857
858 ret = xfer->ret;
859 cdns_i3c_master_free_xfer(xfer);
860
861 return ret;
862 }
863
864 struct cdns_i3c_i2c_dev_data {
865 u16 id;
866 s16 ibi;
867 struct i3c_generic_ibi_pool *ibi_pool;
868 };
869
prepare_rr0_dev_address(u32 addr)870 static u32 prepare_rr0_dev_address(u32 addr)
871 {
872 u32 ret = (addr << 1) & 0xff;
873
874 /* RR0[7:1] = addr[6:0] */
875 ret |= (addr & GENMASK(6, 0)) << 1;
876
877 /* RR0[15:13] = addr[9:7] */
878 ret |= (addr & GENMASK(9, 7)) << 6;
879
880 /* RR0[0] = ~XOR(addr[6:0]) */
881 ret |= parity8(addr & 0x7f) ? 0 : BIT(0);
882
883 return ret;
884 }
885
cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc * dev)886 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
887 {
888 struct i3c_master_controller *m = i3c_dev_get_master(dev);
889 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
890 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
891 u32 rr;
892
893 rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
894 dev->info.dyn_addr :
895 dev->info.static_addr);
896 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
897 }
898
cdns_i3c_master_get_rr_slot(struct cdns_i3c_master * master,u8 dyn_addr)899 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
900 u8 dyn_addr)
901 {
902 unsigned long activedevs;
903 u32 rr;
904 int i;
905
906 if (!dyn_addr) {
907 if (!master->free_rr_slots)
908 return -ENOSPC;
909
910 return ffs(master->free_rr_slots) - 1;
911 }
912
913 activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
914 activedevs &= ~BIT(0);
915
916 for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
917 rr = readl(master->regs + DEV_ID_RR0(i));
918 if (!(rr & DEV_ID_RR0_IS_I3C) ||
919 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
920 continue;
921
922 return i;
923 }
924
925 return -EINVAL;
926 }
927
cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)928 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
929 u8 old_dyn_addr)
930 {
931 cdns_i3c_master_upd_i3c_addr(dev);
932
933 return 0;
934 }
935
cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)936 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
937 {
938 struct i3c_master_controller *m = i3c_dev_get_master(dev);
939 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
940 struct cdns_i3c_i2c_dev_data *data;
941 int slot;
942
943 data = kzalloc(sizeof(*data), GFP_KERNEL);
944 if (!data)
945 return -ENOMEM;
946
947 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
948 if (slot < 0) {
949 kfree(data);
950 return slot;
951 }
952
953 data->ibi = -1;
954 data->id = slot;
955 i3c_dev_set_master_data(dev, data);
956 master->free_rr_slots &= ~BIT(slot);
957
958 if (!dev->info.dyn_addr) {
959 cdns_i3c_master_upd_i3c_addr(dev);
960 writel(readl(master->regs + DEVS_CTRL) |
961 DEVS_CTRL_DEV_ACTIVE(data->id),
962 master->regs + DEVS_CTRL);
963 }
964
965 return 0;
966 }
967
cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)968 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
969 {
970 struct i3c_master_controller *m = i3c_dev_get_master(dev);
971 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
972 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
973
974 writel(readl(master->regs + DEVS_CTRL) |
975 DEVS_CTRL_DEV_CLR(data->id),
976 master->regs + DEVS_CTRL);
977
978 i3c_dev_set_master_data(dev, NULL);
979 master->free_rr_slots |= BIT(data->id);
980 kfree(data);
981 }
982
cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)983 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
984 {
985 struct i3c_master_controller *m = i2c_dev_get_master(dev);
986 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
987 struct cdns_i3c_i2c_dev_data *data;
988 int slot;
989
990 slot = cdns_i3c_master_get_rr_slot(master, 0);
991 if (slot < 0)
992 return slot;
993
994 data = kzalloc(sizeof(*data), GFP_KERNEL);
995 if (!data)
996 return -ENOMEM;
997
998 data->id = slot;
999 master->free_rr_slots &= ~BIT(slot);
1000 i2c_dev_set_master_data(dev, data);
1001
1002 writel(prepare_rr0_dev_address(dev->addr),
1003 master->regs + DEV_ID_RR0(data->id));
1004 writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
1005 writel(readl(master->regs + DEVS_CTRL) |
1006 DEVS_CTRL_DEV_ACTIVE(data->id),
1007 master->regs + DEVS_CTRL);
1008
1009 return 0;
1010 }
1011
cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)1012 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1013 {
1014 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1015 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1016 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1017
1018 writel(readl(master->regs + DEVS_CTRL) |
1019 DEVS_CTRL_DEV_CLR(data->id),
1020 master->regs + DEVS_CTRL);
1021 master->free_rr_slots |= BIT(data->id);
1022
1023 i2c_dev_set_master_data(dev, NULL);
1024 kfree(data);
1025 }
1026
cdns_i3c_master_bus_cleanup(struct i3c_master_controller * m)1027 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1028 {
1029 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1030
1031 cdns_i3c_master_disable(master);
1032 }
1033
cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master * master,unsigned int slot,struct i3c_device_info * info)1034 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1035 unsigned int slot,
1036 struct i3c_device_info *info)
1037 {
1038 u32 rr;
1039
1040 memset(info, 0, sizeof(*info));
1041 rr = readl(master->regs + DEV_ID_RR0(slot));
1042 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1043 rr = readl(master->regs + DEV_ID_RR2(slot));
1044 info->dcr = rr;
1045 info->bcr = rr >> 8;
1046 info->pid = rr >> 16;
1047 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1048 }
1049
cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master * master)1050 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1051 {
1052 struct i3c_master_controller *m = &master->base;
1053 unsigned long i3c_lim_period, pres_step, ncycles;
1054 struct i3c_bus *bus = i3c_master_get_bus(m);
1055 unsigned long new_i3c_scl_lim = 0;
1056 struct i3c_dev_desc *dev;
1057 u32 prescl1, ctrl;
1058
1059 i3c_bus_for_each_i3cdev(bus, dev) {
1060 unsigned long max_fscl;
1061
1062 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1063 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1064 switch (max_fscl) {
1065 case I3C_SDR1_FSCL_8MHZ:
1066 max_fscl = 8000000;
1067 break;
1068 case I3C_SDR2_FSCL_6MHZ:
1069 max_fscl = 6000000;
1070 break;
1071 case I3C_SDR3_FSCL_4MHZ:
1072 max_fscl = 4000000;
1073 break;
1074 case I3C_SDR4_FSCL_2MHZ:
1075 max_fscl = 2000000;
1076 break;
1077 case I3C_SDR0_FSCL_MAX:
1078 default:
1079 max_fscl = 0;
1080 break;
1081 }
1082
1083 if (max_fscl &&
1084 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1085 new_i3c_scl_lim = max_fscl;
1086 }
1087
1088 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1089 if (new_i3c_scl_lim == master->i3c_scl_lim)
1090 return;
1091 master->i3c_scl_lim = new_i3c_scl_lim;
1092 if (!new_i3c_scl_lim)
1093 return;
1094 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1095
1096 /* Configure PP_LOW to meet I3C slave limitations. */
1097 prescl1 = readl(master->regs + PRESCL_CTRL1) &
1098 ~PRESCL_CTRL1_PP_LOW_MASK;
1099 ctrl = readl(master->regs + CTRL);
1100
1101 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1102 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1103 if (ncycles < 4)
1104 ncycles = 0;
1105 else
1106 ncycles -= 4;
1107
1108 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1109
1110 /* Disable I3C master before updating PRESCL_CTRL1. */
1111 if (ctrl & CTRL_DEV_EN)
1112 cdns_i3c_master_disable(master);
1113
1114 writel(prescl1, master->regs + PRESCL_CTRL1);
1115
1116 if (ctrl & CTRL_DEV_EN)
1117 cdns_i3c_master_enable(master);
1118 }
1119
cdns_i3c_master_do_daa(struct i3c_master_controller * m)1120 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1121 {
1122 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1123 unsigned long olddevs, newdevs;
1124 int ret, slot;
1125 u8 addrs[MAX_DEVS] = { };
1126 u8 last_addr = 0;
1127
1128 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1129 olddevs |= BIT(0);
1130
1131 /* Prepare RR slots before launching DAA. */
1132 for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
1133 ret = i3c_master_get_free_addr(m, last_addr + 1);
1134 if (ret < 0)
1135 return -ENOSPC;
1136
1137 last_addr = ret;
1138 addrs[slot] = last_addr;
1139 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1140 master->regs + DEV_ID_RR0(slot));
1141 writel(0, master->regs + DEV_ID_RR1(slot));
1142 writel(0, master->regs + DEV_ID_RR2(slot));
1143 }
1144
1145 ret = i3c_master_entdaa_locked(&master->base);
1146 if (ret && ret != I3C_ERROR_M2)
1147 return ret;
1148
1149 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1150 newdevs &= ~olddevs;
1151
1152 /*
1153 * Clear all retaining registers filled during DAA. We already
1154 * have the addressed assigned to them in the addrs array.
1155 */
1156 for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
1157 i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1158
1159 /*
1160 * Clear slots that ended up not being used. Can be caused by I3C
1161 * device creation failure or when the I3C device was already known
1162 * by the system but with a different address (in this case the device
1163 * already has a slot and does not need a new one).
1164 */
1165 writel(readl(master->regs + DEVS_CTRL) |
1166 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1167 master->regs + DEVS_CTRL);
1168
1169 i3c_master_defslvs_locked(&master->base);
1170
1171 cdns_i3c_master_upd_i3c_scl_lim(master);
1172
1173 /* Unmask Hot-Join and Mastership request interrupts. */
1174 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1175 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1176
1177 return 0;
1178 }
1179
cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master * master)1180 static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
1181 {
1182 unsigned long sysclk_rate = clk_get_rate(master->sysclk);
1183 u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
1184 (NSEC_PER_SEC / sysclk_rate));
1185
1186 /* Every value greater than 3 is not valid. */
1187 if (thd_delay > THD_DELAY_MAX)
1188 thd_delay = THD_DELAY_MAX;
1189
1190 /* CTLR_THD_DEL value is encoded. */
1191 return (THD_DELAY_MAX - thd_delay);
1192 }
1193
cdns_i3c_master_bus_init(struct i3c_master_controller * m)1194 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1195 {
1196 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1197 unsigned long pres_step, sysclk_rate, max_i2cfreq;
1198 struct i3c_bus *bus = i3c_master_get_bus(m);
1199 u32 ctrl, prescl0, prescl1, pres, low;
1200 struct i3c_device_info info = { };
1201 int ret, ncycles;
1202
1203 switch (bus->mode) {
1204 case I3C_BUS_MODE_PURE:
1205 ctrl = CTRL_PURE_BUS_MODE;
1206 break;
1207
1208 case I3C_BUS_MODE_MIXED_FAST:
1209 ctrl = CTRL_MIXED_FAST_BUS_MODE;
1210 break;
1211
1212 case I3C_BUS_MODE_MIXED_SLOW:
1213 ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1214 break;
1215
1216 default:
1217 return -EINVAL;
1218 }
1219
1220 sysclk_rate = clk_get_rate(master->sysclk);
1221 if (!sysclk_rate)
1222 return -EINVAL;
1223
1224 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1225 if (pres > PRESCL_CTRL0_I3C_MAX)
1226 return -ERANGE;
1227
1228 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1229
1230 prescl0 = PRESCL_CTRL0_I3C(pres);
1231
1232 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1233 prescl1 = PRESCL_CTRL1_OD_LOW(low);
1234
1235 max_i2cfreq = bus->scl_rate.i2c;
1236
1237 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1238 if (pres > PRESCL_CTRL0_I2C_MAX)
1239 return -ERANGE;
1240
1241 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1242
1243 prescl0 |= PRESCL_CTRL0_I2C(pres);
1244 writel(prescl0, master->regs + PRESCL_CTRL0);
1245
1246 /* Calculate OD and PP low. */
1247 pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1248 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1249 if (ncycles < 0)
1250 ncycles = 0;
1251 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1252 writel(prescl1, master->regs + PRESCL_CTRL1);
1253
1254 /* Get an address for the master. */
1255 ret = i3c_master_get_free_addr(m, 0);
1256 if (ret < 0)
1257 return ret;
1258
1259 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1260 master->regs + DEV_ID_RR0(0));
1261
1262 cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1263 if (info.bcr & I3C_BCR_HDR_CAP)
1264 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1265
1266 ret = i3c_master_set_info(&master->base, &info);
1267 if (ret)
1268 return ret;
1269
1270 /*
1271 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1272 * events coming from this device.
1273 *
1274 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1275 */
1276 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1277
1278 /*
1279 * Configure data hold delay based on device-specific data.
1280 *
1281 * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
1282 * master output. This setting allows to meet this timing on master's
1283 * SoC outputs, regardless of PCB balancing.
1284 */
1285 ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
1286 writel(ctrl, master->regs + CTRL);
1287
1288 cdns_i3c_master_enable(master);
1289
1290 return 0;
1291 }
1292
cdns_i3c_master_handle_ibi(struct cdns_i3c_master * master,u32 ibir)1293 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1294 u32 ibir)
1295 {
1296 struct cdns_i3c_i2c_dev_data *data;
1297 bool data_consumed = false;
1298 struct i3c_ibi_slot *slot;
1299 u32 id = IBIR_SLVID(ibir);
1300 struct i3c_dev_desc *dev;
1301 size_t nbytes;
1302 u8 *buf;
1303
1304 /*
1305 * FIXME: maybe we should report the FIFO OVF errors to the upper
1306 * layer.
1307 */
1308 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1309 goto out;
1310
1311 dev = master->ibi.slots[id];
1312 spin_lock(&master->ibi.lock);
1313
1314 data = i3c_dev_get_master_data(dev);
1315 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1316 if (!slot)
1317 goto out_unlock;
1318
1319 buf = slot->data;
1320
1321 nbytes = IBIR_XFER_BYTES(ibir);
1322 i3c_readl_fifo(master->regs + IBI_DATA_FIFO, buf, nbytes);
1323
1324 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1325 dev->ibi->max_payload_len);
1326 i3c_master_queue_ibi(dev, slot);
1327 data_consumed = true;
1328
1329 out_unlock:
1330 spin_unlock(&master->ibi.lock);
1331
1332 out:
1333 /* Consume data from the FIFO if it's not been done already. */
1334 if (!data_consumed) {
1335 int i;
1336
1337 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1338 readl(master->regs + IBI_DATA_FIFO);
1339 }
1340 }
1341
cnds_i3c_master_demux_ibis(struct cdns_i3c_master * master)1342 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1343 {
1344 u32 status0;
1345
1346 writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1347
1348 for (status0 = readl(master->regs + MST_STATUS0);
1349 !(status0 & MST_STATUS0_IBIR_EMP);
1350 status0 = readl(master->regs + MST_STATUS0)) {
1351 u32 ibir = readl(master->regs + IBIR);
1352
1353 switch (IBIR_TYPE(ibir)) {
1354 case IBIR_TYPE_IBI:
1355 cdns_i3c_master_handle_ibi(master, ibir);
1356 break;
1357
1358 case IBIR_TYPE_HJ:
1359 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1360 queue_work(master->base.wq, &master->hj_work);
1361 break;
1362
1363 case IBIR_TYPE_MR:
1364 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1365 break;
1366
1367 default:
1368 break;
1369 }
1370 }
1371 }
1372
cdns_i3c_master_interrupt(int irq,void * data)1373 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1374 {
1375 struct cdns_i3c_master *master = data;
1376 u32 status;
1377
1378 status = readl(master->regs + MST_ISR);
1379 if (!(status & readl(master->regs + MST_IMR)))
1380 return IRQ_NONE;
1381
1382 spin_lock(&master->xferqueue.lock);
1383 cdns_i3c_master_end_xfer_locked(master, status);
1384 spin_unlock(&master->xferqueue.lock);
1385
1386 if (status & MST_INT_IBIR_THR)
1387 cnds_i3c_master_demux_ibis(master);
1388
1389 return IRQ_HANDLED;
1390 }
1391
cdns_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1392 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1393 {
1394 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1395 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1396 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1397 unsigned long flags;
1398 u32 sirmap;
1399 int ret;
1400
1401 ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1402 I3C_CCC_EVENT_SIR);
1403 if (ret)
1404 return ret;
1405
1406 spin_lock_irqsave(&master->ibi.lock, flags);
1407 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1408 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1409 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1410 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1411 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1412 spin_unlock_irqrestore(&master->ibi.lock, flags);
1413
1414 return ret;
1415 }
1416
cdns_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1417 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1418 {
1419 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1420 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1421 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1422 unsigned long flags;
1423 u32 sircfg, sirmap;
1424 int ret;
1425
1426 spin_lock_irqsave(&master->ibi.lock, flags);
1427 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1428 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1429 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1430 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1431 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1432 SIR_MAP_DEV_ACK;
1433
1434 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1435 sircfg |= SIR_MAP_DEV_SLOW;
1436
1437 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1438 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1439 spin_unlock_irqrestore(&master->ibi.lock, flags);
1440
1441 ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1442 I3C_CCC_EVENT_SIR);
1443 if (ret) {
1444 spin_lock_irqsave(&master->ibi.lock, flags);
1445 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1446 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1447 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1448 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1449 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1450 spin_unlock_irqrestore(&master->ibi.lock, flags);
1451 }
1452
1453 return ret;
1454 }
1455
cdns_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1456 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1457 const struct i3c_ibi_setup *req)
1458 {
1459 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1460 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1461 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1462 unsigned long flags;
1463 unsigned int i;
1464
1465 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1466 if (IS_ERR(data->ibi_pool))
1467 return PTR_ERR(data->ibi_pool);
1468
1469 spin_lock_irqsave(&master->ibi.lock, flags);
1470 for (i = 0; i < master->ibi.num_slots; i++) {
1471 if (!master->ibi.slots[i]) {
1472 data->ibi = i;
1473 master->ibi.slots[i] = dev;
1474 break;
1475 }
1476 }
1477 spin_unlock_irqrestore(&master->ibi.lock, flags);
1478
1479 if (i < master->ibi.num_slots)
1480 return 0;
1481
1482 i3c_generic_ibi_free_pool(data->ibi_pool);
1483 data->ibi_pool = NULL;
1484
1485 return -ENOSPC;
1486 }
1487
cdns_i3c_master_free_ibi(struct i3c_dev_desc * dev)1488 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1489 {
1490 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1491 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1492 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1493 unsigned long flags;
1494
1495 spin_lock_irqsave(&master->ibi.lock, flags);
1496 master->ibi.slots[data->ibi] = NULL;
1497 data->ibi = -1;
1498 spin_unlock_irqrestore(&master->ibi.lock, flags);
1499
1500 i3c_generic_ibi_free_pool(data->ibi_pool);
1501 }
1502
cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1503 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1504 struct i3c_ibi_slot *slot)
1505 {
1506 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1507
1508 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1509 }
1510
1511 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1512 .bus_init = cdns_i3c_master_bus_init,
1513 .bus_cleanup = cdns_i3c_master_bus_cleanup,
1514 .do_daa = cdns_i3c_master_do_daa,
1515 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1516 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1517 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1518 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1519 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1520 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1521 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1522 .priv_xfers = cdns_i3c_master_priv_xfers,
1523 .i2c_xfers = cdns_i3c_master_i2c_xfers,
1524 .enable_ibi = cdns_i3c_master_enable_ibi,
1525 .disable_ibi = cdns_i3c_master_disable_ibi,
1526 .request_ibi = cdns_i3c_master_request_ibi,
1527 .free_ibi = cdns_i3c_master_free_ibi,
1528 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1529 };
1530
cdns_i3c_master_hj(struct work_struct * work)1531 static void cdns_i3c_master_hj(struct work_struct *work)
1532 {
1533 struct cdns_i3c_master *master = container_of(work,
1534 struct cdns_i3c_master,
1535 hj_work);
1536
1537 i3c_master_do_daa(&master->base);
1538 }
1539
1540 static struct cdns_i3c_data cdns_i3c_devdata = {
1541 .thd_delay_ns = 10,
1542 };
1543
1544 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1545 { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
1546 { /* sentinel */ },
1547 };
1548 MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
1549
cdns_i3c_master_probe(struct platform_device * pdev)1550 static int cdns_i3c_master_probe(struct platform_device *pdev)
1551 {
1552 struct cdns_i3c_master *master;
1553 struct clk *pclk;
1554 int ret, irq;
1555 u32 val;
1556
1557 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1558 if (!master)
1559 return -ENOMEM;
1560
1561 master->devdata = of_device_get_match_data(&pdev->dev);
1562 if (!master->devdata)
1563 return -EINVAL;
1564
1565 master->regs = devm_platform_ioremap_resource(pdev, 0);
1566 if (IS_ERR(master->regs))
1567 return PTR_ERR(master->regs);
1568
1569 pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
1570 if (IS_ERR(pclk))
1571 return PTR_ERR(pclk);
1572
1573 master->sysclk = devm_clk_get_enabled(&pdev->dev, "sysclk");
1574 if (IS_ERR(master->sysclk))
1575 return PTR_ERR(master->sysclk);
1576
1577 irq = platform_get_irq(pdev, 0);
1578 if (irq < 0)
1579 return irq;
1580
1581 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER)
1582 return -EINVAL;
1583
1584 spin_lock_init(&master->xferqueue.lock);
1585 INIT_LIST_HEAD(&master->xferqueue.list);
1586
1587 INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1588 writel(0xffffffff, master->regs + MST_IDR);
1589 writel(0xffffffff, master->regs + SLV_IDR);
1590 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1591 dev_name(&pdev->dev), master);
1592 if (ret)
1593 return ret;
1594
1595 platform_set_drvdata(pdev, master);
1596
1597 val = readl(master->regs + CONF_STATUS0);
1598
1599 /* Device ID0 is reserved to describe this master. */
1600 master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1601 master->free_rr_slots = GENMASK(master->maxdevs, 1);
1602 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1603 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1604
1605 val = readl(master->regs + CONF_STATUS1);
1606 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1607 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1608 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1609
1610 spin_lock_init(&master->ibi.lock);
1611 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1612 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1613 sizeof(*master->ibi.slots),
1614 GFP_KERNEL);
1615 if (!master->ibi.slots)
1616 return -ENOMEM;
1617
1618 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1619 writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1620 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1621
1622 return i3c_master_register(&master->base, &pdev->dev,
1623 &cdns_i3c_master_ops, false);
1624 }
1625
cdns_i3c_master_remove(struct platform_device * pdev)1626 static void cdns_i3c_master_remove(struct platform_device *pdev)
1627 {
1628 struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1629
1630 cancel_work_sync(&master->hj_work);
1631 i3c_master_unregister(&master->base);
1632 }
1633
1634 static struct platform_driver cdns_i3c_master = {
1635 .probe = cdns_i3c_master_probe,
1636 .remove = cdns_i3c_master_remove,
1637 .driver = {
1638 .name = "cdns-i3c-master",
1639 .of_match_table = cdns_i3c_master_of_ids,
1640 },
1641 };
1642 module_platform_driver(cdns_i3c_master);
1643
1644 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1645 MODULE_DESCRIPTION("Cadence I3C master driver");
1646 MODULE_LICENSE("GPL v2");
1647 MODULE_ALIAS("platform:cdns-i3c-master");
1648