1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * Copyright (c) 2020, MIPI Alliance, Inc.
4 *
5 * Author: Nicolas Pitre <npitre@baylibre.com>
6 *
7 * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
8 * v1.x of the spec and v2.0 will likely be split out.
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/i3c/master.h>
16 #include <linux/io.h>
17 #include <linux/pci.h>
18
19 #include "hci.h"
20 #include "cmd.h"
21 #include "ibi.h"
22
23 /*
24 * Software Parameter Values (somewhat arb itrary for now).
25 * Some of them could be determined at run time eventually.
26 */
27
28 #define XFER_RINGS 1 /* max: 8 */
29 #define XFER_RING_ENTRIES 16 /* max: 255 */
30
31 #define IBI_RINGS 1 /* max: 8 */
32 #define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
33 #define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
34 #define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
35
36 /*
37 * Ring Header Preamble
38 */
39
40 #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
41 #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
42
43 #define RHS_CONTROL 0x00
44 #define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
45 #define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
46 #define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
47 #define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
48
49 #define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
50
51 /*
52 * Ring Header (Per-Ring Bundle)
53 */
54
55 #define rh_reg_read(r) readl(rh->regs + (RH_##r))
56 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
57
58 #define RH_CR_SETUP 0x00 /* Command/Response Ring */
59 #define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
60 #define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
61 #define CR_RING_SIZE GENMASK(8, 0)
62
63 #define RH_IBI_SETUP 0x04
64 #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
65 #define IBI_STATUS_RING_SIZE GENMASK(23, 16)
66 #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
67 #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
68
69 #define RH_CHUNK_CONTROL 0x08
70
71 #define RH_INTR_STATUS 0x10
72 #define RH_INTR_STATUS_ENABLE 0x14
73 #define RH_INTR_SIGNAL_ENABLE 0x18
74 #define RH_INTR_FORCE 0x1c
75 #define INTR_IBI_READY BIT(12)
76 #define INTR_TRANSFER_COMPLETION BIT(11)
77 #define INTR_RING_OP BIT(10)
78 #define INTR_TRANSFER_ERR BIT(9)
79 #define INTR_IBI_RING_FULL BIT(6)
80 #define INTR_TRANSFER_ABORT BIT(5)
81
82 #define RH_RING_STATUS 0x20
83 #define RING_STATUS_LOCKED BIT(3)
84 #define RING_STATUS_ABORTED BIT(2)
85 #define RING_STATUS_RUNNING BIT(1)
86 #define RING_STATUS_ENABLED BIT(0)
87
88 #define RH_RING_CONTROL 0x24
89 #define RING_CTRL_ABORT BIT(2)
90 #define RING_CTRL_RUN_STOP BIT(1)
91 #define RING_CTRL_ENABLE BIT(0)
92
93 #define RH_RING_OPERATION1 0x28
94 #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
95 #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
96 #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
97
98 #define RH_RING_OPERATION2 0x2c
99 #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
100 #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
101
102 #define RH_CMD_RING_BASE_LO 0x30
103 #define RH_CMD_RING_BASE_HI 0x34
104 #define RH_RESP_RING_BASE_LO 0x38
105 #define RH_RESP_RING_BASE_HI 0x3c
106 #define RH_IBI_STATUS_RING_BASE_LO 0x40
107 #define RH_IBI_STATUS_RING_BASE_HI 0x44
108 #define RH_IBI_DATA_RING_BASE_LO 0x48
109 #define RH_IBI_DATA_RING_BASE_HI 0x4c
110
111 #define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
112 #define RH_RESP_RING_SG 0x54
113 #define RH_IBI_STATUS_RING_SG 0x58
114 #define RH_IBI_DATA_RING_SG 0x5c
115 #define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
116 #define RING_SG_LIST_SIZE GENMASK(15, 0)
117
118 /*
119 * Data Buffer Descriptor (in memory)
120 */
121
122 #define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
123 #define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
124 #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
125
126 struct hci_rh_data {
127 void __iomem *regs;
128 void *xfer, *resp, *ibi_status, *ibi_data;
129 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
130 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
131 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
132 unsigned int done_ptr, ibi_chunk_ptr, xfer_space;
133 struct hci_xfer **src_xfers;
134 struct completion op_done;
135 };
136
137 struct hci_rings_data {
138 struct device *sysdev;
139 unsigned int total;
140 struct hci_rh_data headers[] __counted_by(total);
141 };
142
143 struct hci_dma_dev_ibi_data {
144 struct i3c_generic_ibi_pool *pool;
145 unsigned int max_len;
146 };
147
hci_dma_cleanup(struct i3c_hci * hci)148 static void hci_dma_cleanup(struct i3c_hci *hci)
149 {
150 struct hci_rings_data *rings = hci->io_data;
151 struct hci_rh_data *rh;
152 unsigned int i;
153
154 if (!rings)
155 return;
156
157 for (i = 0; i < rings->total; i++) {
158 rh = &rings->headers[i];
159
160 rh_reg_write(INTR_SIGNAL_ENABLE, 0);
161 rh_reg_write(RING_CONTROL, 0);
162 }
163
164 i3c_hci_sync_irq_inactive(hci);
165
166 for (i = 0; i < rings->total; i++) {
167 rh = &rings->headers[i];
168
169 rh_reg_write(CR_SETUP, 0);
170 rh_reg_write(IBI_SETUP, 0);
171 }
172
173 rhs_reg_write(CONTROL, 0);
174 }
175
hci_dma_free(void * data)176 static void hci_dma_free(void *data)
177 {
178 struct i3c_hci *hci = data;
179 struct hci_rings_data *rings = hci->io_data;
180 struct hci_rh_data *rh;
181
182 if (!rings)
183 return;
184
185 for (int i = 0; i < rings->total; i++) {
186 rh = &rings->headers[i];
187
188 if (rh->xfer)
189 dma_free_coherent(rings->sysdev,
190 rh->xfer_struct_sz * rh->xfer_entries,
191 rh->xfer, rh->xfer_dma);
192 if (rh->resp)
193 dma_free_coherent(rings->sysdev,
194 rh->resp_struct_sz * rh->xfer_entries,
195 rh->resp, rh->resp_dma);
196 kfree(rh->src_xfers);
197 if (rh->ibi_status)
198 dma_free_coherent(rings->sysdev,
199 rh->ibi_status_sz * rh->ibi_status_entries,
200 rh->ibi_status, rh->ibi_status_dma);
201 if (rh->ibi_data_dma)
202 dma_unmap_single(rings->sysdev, rh->ibi_data_dma,
203 rh->ibi_chunk_sz * rh->ibi_chunks_total,
204 DMA_FROM_DEVICE);
205 kfree(rh->ibi_data);
206 }
207
208 kfree(rings);
209 hci->io_data = NULL;
210 }
211
hci_dma_init_rh(struct i3c_hci * hci,struct hci_rh_data * rh,int i)212 static void hci_dma_init_rh(struct i3c_hci *hci, struct hci_rh_data *rh, int i)
213 {
214 u32 regval;
215
216 rh_reg_write(CMD_RING_BASE_LO, lower_32_bits(rh->xfer_dma));
217 rh_reg_write(CMD_RING_BASE_HI, upper_32_bits(rh->xfer_dma));
218 rh_reg_write(RESP_RING_BASE_LO, lower_32_bits(rh->resp_dma));
219 rh_reg_write(RESP_RING_BASE_HI, upper_32_bits(rh->resp_dma));
220
221 regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
222 rh_reg_write(CR_SETUP, regval);
223
224 rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
225 rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
226 INTR_TRANSFER_COMPLETION |
227 INTR_RING_OP |
228 INTR_TRANSFER_ERR |
229 INTR_IBI_RING_FULL |
230 INTR_TRANSFER_ABORT);
231
232 if (i >= IBI_RINGS)
233 goto ring_ready;
234
235 rh_reg_write(IBI_STATUS_RING_BASE_LO, lower_32_bits(rh->ibi_status_dma));
236 rh_reg_write(IBI_STATUS_RING_BASE_HI, upper_32_bits(rh->ibi_status_dma));
237 rh_reg_write(IBI_DATA_RING_BASE_LO, lower_32_bits(rh->ibi_data_dma));
238 rh_reg_write(IBI_DATA_RING_BASE_HI, upper_32_bits(rh->ibi_data_dma));
239
240 regval = FIELD_PREP(IBI_STATUS_RING_SIZE, rh->ibi_status_entries) |
241 FIELD_PREP(IBI_DATA_CHUNK_SIZE, ilog2(rh->ibi_chunk_sz) - 2) |
242 FIELD_PREP(IBI_DATA_CHUNK_COUNT, rh->ibi_chunks_total);
243 rh_reg_write(IBI_SETUP, regval);
244
245 regval = rh_reg_read(INTR_SIGNAL_ENABLE);
246 regval |= INTR_IBI_READY;
247 rh_reg_write(INTR_SIGNAL_ENABLE, regval);
248
249 ring_ready:
250 /*
251 * The MIPI I3C HCI specification does not document reset values for
252 * RING_OPERATION1 fields and some controllers (e.g. Intel controllers)
253 * do not reset the values, so ensure the ring pointers are set to zero
254 * here.
255 */
256 rh_reg_write(RING_OPERATION1, 0);
257
258 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
259 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
260
261 rh->done_ptr = 0;
262 rh->ibi_chunk_ptr = 0;
263 rh->xfer_space = rh->xfer_entries;
264 }
265
hci_dma_init_rings(struct i3c_hci * hci)266 static void hci_dma_init_rings(struct i3c_hci *hci)
267 {
268 struct hci_rings_data *rings = hci->io_data;
269 u32 regval;
270
271 regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
272 rhs_reg_write(CONTROL, regval);
273
274 for (int i = 0; i < rings->total; i++)
275 hci_dma_init_rh(hci, &rings->headers[i], i);
276 }
277
hci_dma_suspend(struct i3c_hci * hci)278 static void hci_dma_suspend(struct i3c_hci *hci)
279 {
280 struct hci_rings_data *rings = hci->io_data;
281 int n = rings ? rings->total : 0;
282
283 for (int i = 0; i < n; i++) {
284 struct hci_rh_data *rh = &rings->headers[i];
285
286 rh_reg_write(INTR_SIGNAL_ENABLE, 0);
287 rh_reg_write(RING_CONTROL, 0);
288 }
289
290 i3c_hci_sync_irq_inactive(hci);
291 }
292
hci_dma_resume(struct i3c_hci * hci)293 static void hci_dma_resume(struct i3c_hci *hci)
294 {
295 struct hci_rings_data *rings = hci->io_data;
296
297 if (rings)
298 hci_dma_init_rings(hci);
299 }
300
hci_dma_init(struct i3c_hci * hci)301 static int hci_dma_init(struct i3c_hci *hci)
302 {
303 struct hci_rings_data *rings;
304 struct hci_rh_data *rh;
305 struct device *sysdev;
306 u32 regval;
307 unsigned int i, nr_rings, xfers_sz, resps_sz;
308 unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
309 int ret;
310
311 /*
312 * Set pointer to a physical device that does DMA and has IOMMU setup
313 * done for it in case of enabled IOMMU and use it with the DMA API.
314 * Here such device is either
315 * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or
316 * grandparent (PCI enumeration).
317 */
318 sysdev = hci->master.dev.parent;
319 if (sysdev->parent && dev_is_pci(sysdev->parent))
320 sysdev = sysdev->parent;
321
322 regval = rhs_reg_read(CONTROL);
323 nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
324 dev_dbg(&hci->master.dev, "%d DMA rings available\n", nr_rings);
325 if (unlikely(nr_rings > 8)) {
326 dev_err(&hci->master.dev, "number of rings should be <= 8\n");
327 nr_rings = 8;
328 }
329 if (nr_rings > XFER_RINGS)
330 nr_rings = XFER_RINGS;
331 rings = kzalloc_flex(*rings, headers, nr_rings);
332 if (!rings)
333 return -ENOMEM;
334 hci->io_data = rings;
335 rings->total = nr_rings;
336 rings->sysdev = sysdev;
337
338 for (i = 0; i < rings->total; i++) {
339 u32 offset = rhs_reg_read(RHn_OFFSET(i));
340
341 dev_dbg(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
342 ret = -EINVAL;
343 if (!offset)
344 goto err_out;
345 rh = &rings->headers[i];
346 rh->regs = hci->base_regs + offset;
347 init_completion(&rh->op_done);
348
349 rh->xfer_entries = XFER_RING_ENTRIES;
350
351 regval = rh_reg_read(CR_SETUP);
352 rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
353 rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
354 dev_dbg(&hci->master.dev,
355 "xfer_struct_sz = %d, resp_struct_sz = %d",
356 rh->xfer_struct_sz, rh->resp_struct_sz);
357 xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
358 resps_sz = rh->resp_struct_sz * rh->xfer_entries;
359
360 rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz,
361 &rh->xfer_dma, GFP_KERNEL);
362 rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
363 &rh->resp_dma, GFP_KERNEL);
364 rh->src_xfers =
365 kmalloc_objs(*rh->src_xfers, rh->xfer_entries);
366 ret = -ENOMEM;
367 if (!rh->xfer || !rh->resp || !rh->src_xfers)
368 goto err_out;
369
370 /* IBIs */
371
372 if (i >= IBI_RINGS)
373 continue;
374
375 regval = rh_reg_read(IBI_SETUP);
376 rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
377 rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
378 rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
379
380 rh->ibi_chunk_sz = dma_get_cache_alignment();
381 rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
382 /*
383 * Round IBI data chunk size to number of bytes supported by
384 * the HW. Chunk size can be 2^n number of DWORDs which is the
385 * same as 2^(n+2) bytes, where n is 0..6.
386 */
387 rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz);
388 rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz);
389 if (rh->ibi_chunk_sz > 256) {
390 ret = -EINVAL;
391 goto err_out;
392 }
393
394 ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
395 ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
396
397 rh->ibi_status =
398 dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz,
399 &rh->ibi_status_dma, GFP_KERNEL);
400 rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
401 ret = -ENOMEM;
402 if (!rh->ibi_status || !rh->ibi_data)
403 goto err_out;
404 rh->ibi_data_dma =
405 dma_map_single(rings->sysdev, rh->ibi_data,
406 ibi_data_ring_sz, DMA_FROM_DEVICE);
407 if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) {
408 rh->ibi_data_dma = 0;
409 ret = -ENOMEM;
410 goto err_out;
411 }
412 }
413
414 ret = devm_add_action(hci->master.dev.parent, hci_dma_free, hci);
415 if (ret)
416 goto err_out;
417
418 hci_dma_init_rings(hci);
419
420 return 0;
421
422 err_out:
423 hci_dma_free(hci);
424 return ret;
425 }
426
hci_dma_unmap_xfer(struct i3c_hci * hci,struct hci_xfer * xfer_list,unsigned int n)427 static void hci_dma_unmap_xfer(struct i3c_hci *hci,
428 struct hci_xfer *xfer_list, unsigned int n)
429 {
430 struct hci_xfer *xfer;
431 unsigned int i;
432
433 for (i = 0; i < n; i++) {
434 xfer = xfer_list + i;
435 if (!xfer->data)
436 continue;
437 i3c_master_dma_unmap_single(xfer->dma);
438 }
439 }
440
hci_dma_map_xfer(struct device * dev,struct hci_xfer * xfer)441 static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer)
442 {
443 enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
444 bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3);
445
446 return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir);
447 }
448
hci_dma_map_xfer_list(struct i3c_hci * hci,struct device * dev,struct hci_xfer * xfer_list,int n)449 static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev,
450 struct hci_xfer *xfer_list, int n)
451 {
452 for (int i = 0; i < n; i++) {
453 struct hci_xfer *xfer = xfer_list + i;
454
455 if (!xfer->data)
456 continue;
457
458 xfer->dma = hci_dma_map_xfer(dev, xfer);
459 if (!xfer->dma) {
460 hci_dma_unmap_xfer(hci, xfer_list, i);
461 return -ENOMEM;
462 }
463 }
464
465 return 0;
466 }
467
hci_dma_queue_xfer(struct i3c_hci * hci,struct hci_xfer * xfer_list,int n)468 static int hci_dma_queue_xfer(struct i3c_hci *hci,
469 struct hci_xfer *xfer_list, int n)
470 {
471 struct hci_rings_data *rings = hci->io_data;
472 struct hci_rh_data *rh;
473 unsigned int i, ring, enqueue_ptr;
474 u32 op1_val;
475 int ret;
476
477 ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n);
478 if (ret)
479 return ret;
480
481 /* For now we only use ring 0 */
482 ring = 0;
483 rh = &rings->headers[ring];
484
485 spin_lock_irq(&hci->lock);
486
487 if (n > rh->xfer_space) {
488 spin_unlock_irq(&hci->lock);
489 hci_dma_unmap_xfer(hci, xfer_list, n);
490 return -EBUSY;
491 }
492
493 op1_val = rh_reg_read(RING_OPERATION1);
494 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
495 for (i = 0; i < n; i++) {
496 struct hci_xfer *xfer = xfer_list + i;
497 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
498
499 /* store cmd descriptor */
500 *ring_data++ = xfer->cmd_desc[0];
501 *ring_data++ = xfer->cmd_desc[1];
502 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
503 *ring_data++ = xfer->cmd_desc[2];
504 *ring_data++ = xfer->cmd_desc[3];
505 }
506
507 /* first word of Data Buffer Descriptor Structure */
508 if (!xfer->data)
509 xfer->data_len = 0;
510 *ring_data++ =
511 FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
512 ((i == n - 1) ? DATA_BUF_IOC : 0);
513
514 /* 2nd and 3rd words of Data Buffer Descriptor Structure */
515 if (xfer->data) {
516 *ring_data++ = lower_32_bits(xfer->dma->addr);
517 *ring_data++ = upper_32_bits(xfer->dma->addr);
518 } else {
519 *ring_data++ = 0;
520 *ring_data++ = 0;
521 }
522
523 /* remember corresponding xfer struct */
524 rh->src_xfers[enqueue_ptr] = xfer;
525 /* remember corresponding ring/entry for this xfer structure */
526 xfer->ring_number = ring;
527 xfer->ring_entry = enqueue_ptr;
528
529 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
530 }
531
532 rh->xfer_space -= n;
533
534 op1_val &= ~RING_OP1_CR_ENQ_PTR;
535 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
536 rh_reg_write(RING_OPERATION1, op1_val);
537 spin_unlock_irq(&hci->lock);
538
539 return 0;
540 }
541
hci_dma_dequeue_xfer(struct i3c_hci * hci,struct hci_xfer * xfer_list,int n)542 static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
543 struct hci_xfer *xfer_list, int n)
544 {
545 struct hci_rings_data *rings = hci->io_data;
546 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
547 unsigned int i;
548 bool did_unqueue = false;
549 u32 ring_status;
550
551 guard(mutex)(&hci->control_mutex);
552
553 ring_status = rh_reg_read(RING_STATUS);
554 if (ring_status & RING_STATUS_RUNNING) {
555 /* stop the ring */
556 reinit_completion(&rh->op_done);
557 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT);
558 wait_for_completion_timeout(&rh->op_done, HZ);
559 ring_status = rh_reg_read(RING_STATUS);
560 if (ring_status & RING_STATUS_RUNNING) {
561 /*
562 * We're deep in it if ever this condition is ever met.
563 * Hardware might still be writing to memory, etc.
564 */
565 dev_crit(&hci->master.dev, "unable to abort the ring\n");
566 WARN_ON(1);
567 }
568 }
569
570 spin_lock_irq(&hci->lock);
571
572 for (i = 0; i < n; i++) {
573 struct hci_xfer *xfer = xfer_list + i;
574 int idx = xfer->ring_entry;
575
576 /*
577 * At the time the abort happened, the xfer might have
578 * completed already. If not then replace corresponding
579 * descriptor entries with a no-op.
580 */
581 if (idx >= 0) {
582 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
583
584 /* store no-op cmd descriptor */
585 *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid);
586 *ring_data++ = 0;
587 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
588 *ring_data++ = 0;
589 *ring_data++ = 0;
590 }
591
592 /* disassociate this xfer struct */
593 rh->src_xfers[idx] = NULL;
594
595 /* and unmap it */
596 hci_dma_unmap_xfer(hci, xfer, 1);
597
598 did_unqueue = true;
599 }
600 }
601
602 /* restart the ring */
603 mipi_i3c_hci_resume(hci);
604 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
605 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
606
607 spin_unlock_irq(&hci->lock);
608
609 return did_unqueue;
610 }
611
hci_dma_handle_error(struct i3c_hci * hci,struct hci_xfer * xfer_list,int n)612 static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n)
613 {
614 return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0;
615 }
616
hci_dma_xfer_done(struct i3c_hci * hci,struct hci_rh_data * rh)617 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
618 {
619 u32 op1_val, op2_val, resp, *ring_resp;
620 unsigned int tid, done_ptr = rh->done_ptr;
621 unsigned int done_cnt = 0;
622 struct hci_xfer *xfer;
623
624 for (;;) {
625 op2_val = rh_reg_read(RING_OPERATION2);
626 if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
627 break;
628
629 ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
630 resp = *ring_resp;
631 tid = RESP_TID(resp);
632 dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
633
634 xfer = rh->src_xfers[done_ptr];
635 if (!xfer) {
636 dev_dbg(&hci->master.dev, "orphaned ring entry");
637 } else {
638 hci_dma_unmap_xfer(hci, xfer, 1);
639 rh->src_xfers[done_ptr] = NULL;
640 xfer->ring_entry = -1;
641 xfer->response = resp;
642 if (tid != xfer->cmd_tid) {
643 dev_err(&hci->master.dev,
644 "response tid=%d when expecting %d\n",
645 tid, xfer->cmd_tid);
646 /* TODO: do something about it? */
647 }
648 if (xfer->completion)
649 complete(xfer->completion);
650 }
651
652 done_ptr = (done_ptr + 1) % rh->xfer_entries;
653 rh->done_ptr = done_ptr;
654 done_cnt += 1;
655 }
656
657 rh->xfer_space += done_cnt;
658 op1_val = rh_reg_read(RING_OPERATION1);
659 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
660 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
661 rh_reg_write(RING_OPERATION1, op1_val);
662 }
663
hci_dma_request_ibi(struct i3c_hci * hci,struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)664 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
665 const struct i3c_ibi_setup *req)
666 {
667 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
668 struct i3c_generic_ibi_pool *pool;
669 struct hci_dma_dev_ibi_data *dev_ibi;
670
671 dev_ibi = kmalloc_obj(*dev_ibi);
672 if (!dev_ibi)
673 return -ENOMEM;
674 pool = i3c_generic_ibi_alloc_pool(dev, req);
675 if (IS_ERR(pool)) {
676 kfree(dev_ibi);
677 return PTR_ERR(pool);
678 }
679 dev_ibi->pool = pool;
680 dev_ibi->max_len = req->max_payload_len;
681 dev_data->ibi_data = dev_ibi;
682 return 0;
683 }
684
hci_dma_free_ibi(struct i3c_hci * hci,struct i3c_dev_desc * dev)685 static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
686 {
687 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
688 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
689
690 dev_data->ibi_data = NULL;
691 i3c_generic_ibi_free_pool(dev_ibi->pool);
692 kfree(dev_ibi);
693 }
694
hci_dma_recycle_ibi_slot(struct i3c_hci * hci,struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)695 static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
696 struct i3c_dev_desc *dev,
697 struct i3c_ibi_slot *slot)
698 {
699 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
700 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
701
702 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
703 }
704
hci_dma_process_ibi(struct i3c_hci * hci,struct hci_rh_data * rh)705 static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
706 {
707 struct hci_rings_data *rings = hci->io_data;
708 struct i3c_dev_desc *dev;
709 struct i3c_hci_dev_data *dev_data;
710 struct hci_dma_dev_ibi_data *dev_ibi;
711 struct i3c_ibi_slot *slot;
712 u32 op1_val, op2_val, ibi_status_error;
713 unsigned int ptr, enq_ptr, deq_ptr;
714 unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
715 int ibi_addr, last_ptr;
716 void *ring_ibi_data;
717 dma_addr_t ring_ibi_data_dma;
718
719 op1_val = rh_reg_read(RING_OPERATION1);
720 deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
721
722 op2_val = rh_reg_read(RING_OPERATION2);
723 enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
724
725 ibi_status_error = 0;
726 ibi_addr = -1;
727 ibi_chunks = 0;
728 ibi_size = 0;
729 last_ptr = -1;
730
731 /* let's find all we can about this IBI */
732 for (ptr = deq_ptr; ptr != enq_ptr;
733 ptr = (ptr + 1) % rh->ibi_status_entries) {
734 u32 ibi_status, *ring_ibi_status;
735 unsigned int chunks;
736
737 ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
738 ibi_status = *ring_ibi_status;
739 dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
740
741 if (ibi_status_error) {
742 /* we no longer care */
743 } else if (ibi_status & IBI_ERROR) {
744 ibi_status_error = ibi_status;
745 } else if (ibi_addr == -1) {
746 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
747 } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
748 /* the address changed unexpectedly */
749 ibi_status_error = ibi_status;
750 }
751
752 chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
753 ibi_chunks += chunks;
754 if (!(ibi_status & IBI_LAST_STATUS)) {
755 ibi_size += chunks * rh->ibi_chunk_sz;
756 } else {
757 ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
758 last_ptr = ptr;
759 break;
760 }
761 }
762
763 /* validate what we've got */
764
765 if (last_ptr == -1) {
766 /* this IBI sequence is not yet complete */
767 dev_dbg(&hci->master.dev,
768 "no LAST_STATUS available (e=%d d=%d)",
769 enq_ptr, deq_ptr);
770 return;
771 }
772 deq_ptr = last_ptr + 1;
773 deq_ptr %= rh->ibi_status_entries;
774
775 if (ibi_status_error) {
776 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
777 goto done;
778 }
779
780 /* determine who this is for */
781 dev = i3c_hci_addr_to_dev(hci, ibi_addr);
782 if (!dev) {
783 dev_err(&hci->master.dev,
784 "IBI for unknown device %#x\n", ibi_addr);
785 goto done;
786 }
787
788 dev_data = i3c_dev_get_master_data(dev);
789 dev_ibi = dev_data->ibi_data;
790 if (ibi_size > dev_ibi->max_len) {
791 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
792 ibi_size, dev_ibi->max_len);
793 goto done;
794 }
795
796 /*
797 * This ring model is not suitable for zero-copy processing of IBIs.
798 * We have the data chunk ring wrap-around to deal with, meaning
799 * that the payload might span multiple chunks beginning at the
800 * end of the ring and wrap to the start of the ring. Furthermore
801 * there is no guarantee that those chunks will be released in order
802 * and in a timely manner by the upper driver. So let's just copy
803 * them to a discrete buffer. In practice they're supposed to be
804 * small anyway.
805 */
806 slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
807 if (!slot) {
808 dev_err(&hci->master.dev, "no free slot for IBI\n");
809 goto done;
810 }
811
812 /* copy first part of the payload */
813 ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
814 ring_ibi_data = rh->ibi_data + ibi_data_offset;
815 ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
816 first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
817 * rh->ibi_chunk_sz;
818 if (first_part > ibi_size)
819 first_part = ibi_size;
820 dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
821 first_part, DMA_FROM_DEVICE);
822 memcpy(slot->data, ring_ibi_data, first_part);
823
824 /* copy second part if any */
825 if (ibi_size > first_part) {
826 /* we wrap back to the start and copy remaining data */
827 ring_ibi_data = rh->ibi_data;
828 ring_ibi_data_dma = rh->ibi_data_dma;
829 dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
830 ibi_size - first_part, DMA_FROM_DEVICE);
831 memcpy(slot->data + first_part, ring_ibi_data,
832 ibi_size - first_part);
833 }
834
835 /* submit it */
836 slot->dev = dev;
837 slot->len = ibi_size;
838 i3c_master_queue_ibi(dev, slot);
839
840 done:
841 op1_val = rh_reg_read(RING_OPERATION1);
842 op1_val &= ~RING_OP1_IBI_DEQ_PTR;
843 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
844 rh_reg_write(RING_OPERATION1, op1_val);
845
846 /* update the chunk pointer */
847 rh->ibi_chunk_ptr += ibi_chunks;
848 rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
849
850 /* and tell the hardware about freed chunks */
851 rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
852 }
853
hci_dma_irq_handler(struct i3c_hci * hci)854 static bool hci_dma_irq_handler(struct i3c_hci *hci)
855 {
856 struct hci_rings_data *rings = hci->io_data;
857 unsigned int i;
858 bool handled = false;
859
860 for (i = 0; i < rings->total; i++) {
861 struct hci_rh_data *rh;
862 u32 status;
863
864 rh = &rings->headers[i];
865 status = rh_reg_read(INTR_STATUS);
866 dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x",
867 i, status);
868 if (!status)
869 continue;
870 rh_reg_write(INTR_STATUS, status);
871
872 if (status & INTR_IBI_READY)
873 hci_dma_process_ibi(hci, rh);
874 if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
875 hci_dma_xfer_done(hci, rh);
876 if (status & INTR_RING_OP)
877 complete(&rh->op_done);
878 if (status & INTR_TRANSFER_ABORT)
879 dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i);
880 if (status & INTR_IBI_RING_FULL)
881 dev_err_ratelimited(&hci->master.dev,
882 "Ring %d: IBI Ring Full Condition\n", i);
883
884 handled = true;
885 }
886
887 return handled;
888 }
889
890 const struct hci_io_ops mipi_i3c_hci_dma = {
891 .init = hci_dma_init,
892 .cleanup = hci_dma_cleanup,
893 .queue_xfer = hci_dma_queue_xfer,
894 .dequeue_xfer = hci_dma_dequeue_xfer,
895 .handle_error = hci_dma_handle_error,
896 .irq_handler = hci_dma_irq_handler,
897 .request_ibi = hci_dma_request_ibi,
898 .free_ibi = hci_dma_free_ibi,
899 .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
900 .suspend = hci_dma_suspend,
901 .resume = hci_dma_resume,
902 };
903