1 // SPDX-License-Identifier: BSD-3-Clause 2 /* 3 * Copyright (c) 2020, MIPI Alliance, Inc. 4 * 5 * Author: Nicolas Pitre <npitre@baylibre.com> 6 * 7 * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on 8 * v1.x of the spec and v2.0 will likely be split out. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/device.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/errno.h> 15 #include <linux/i3c/master.h> 16 #include <linux/io.h> 17 #include <linux/pci.h> 18 19 #include "hci.h" 20 #include "cmd.h" 21 #include "ibi.h" 22 23 /* 24 * Software Parameter Values (somewhat arb itrary for now). 25 * Some of them could be determined at run time eventually. 26 */ 27 28 #define XFER_RINGS 1 /* max: 8 */ 29 #define XFER_RING_ENTRIES 16 /* max: 255 */ 30 31 #define IBI_RINGS 1 /* max: 8 */ 32 #define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */ 33 #define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */ 34 #define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */ 35 36 /* 37 * Ring Header Preamble 38 */ 39 40 #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r)) 41 #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r)) 42 43 #define RHS_CONTROL 0x00 44 #define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */ 45 #define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */ 46 #define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */ 47 #define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */ 48 49 #define RHS_RHn_OFFSET(n) (0x04 + (n)*4) 50 51 /* 52 * Ring Header (Per-Ring Bundle) 53 */ 54 55 #define rh_reg_read(r) readl(rh->regs + (RH_##r)) 56 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r)) 57 58 #define RH_CR_SETUP 0x00 /* Command/Response Ring */ 59 #define CR_XFER_STRUCT_SIZE GENMASK(31, 24) 60 #define CR_RESP_STRUCT_SIZE GENMASK(23, 16) 61 #define CR_RING_SIZE GENMASK(8, 0) 62 63 #define RH_IBI_SETUP 0x04 64 #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24) 65 #define IBI_STATUS_RING_SIZE GENMASK(23, 16) 66 #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10) 67 #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0) 68 69 #define RH_CHUNK_CONTROL 0x08 70 71 #define RH_INTR_STATUS 0x10 72 #define RH_INTR_STATUS_ENABLE 0x14 73 #define RH_INTR_SIGNAL_ENABLE 0x18 74 #define RH_INTR_FORCE 0x1c 75 #define INTR_IBI_READY BIT(12) 76 #define INTR_TRANSFER_COMPLETION BIT(11) 77 #define INTR_RING_OP BIT(10) 78 #define INTR_TRANSFER_ERR BIT(9) 79 #define INTR_IBI_RING_FULL BIT(6) 80 #define INTR_TRANSFER_ABORT BIT(5) 81 82 #define RH_RING_STATUS 0x20 83 #define RING_STATUS_LOCKED BIT(3) 84 #define RING_STATUS_ABORTED BIT(2) 85 #define RING_STATUS_RUNNING BIT(1) 86 #define RING_STATUS_ENABLED BIT(0) 87 88 #define RH_RING_CONTROL 0x24 89 #define RING_CTRL_ABORT BIT(2) 90 #define RING_CTRL_RUN_STOP BIT(1) 91 #define RING_CTRL_ENABLE BIT(0) 92 93 #define RH_RING_OPERATION1 0x28 94 #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16) 95 #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8) 96 #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0) 97 98 #define RH_RING_OPERATION2 0x2c 99 #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16) 100 #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0) 101 102 #define RH_CMD_RING_BASE_LO 0x30 103 #define RH_CMD_RING_BASE_HI 0x34 104 #define RH_RESP_RING_BASE_LO 0x38 105 #define RH_RESP_RING_BASE_HI 0x3c 106 #define RH_IBI_STATUS_RING_BASE_LO 0x40 107 #define RH_IBI_STATUS_RING_BASE_HI 0x44 108 #define RH_IBI_DATA_RING_BASE_LO 0x48 109 #define RH_IBI_DATA_RING_BASE_HI 0x4c 110 111 #define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */ 112 #define RH_RESP_RING_SG 0x54 113 #define RH_IBI_STATUS_RING_SG 0x58 114 #define RH_IBI_DATA_RING_SG 0x5c 115 #define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */ 116 #define RING_SG_LIST_SIZE GENMASK(15, 0) 117 118 /* 119 * Data Buffer Descriptor (in memory) 120 */ 121 122 #define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */ 123 #define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */ 124 #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0) 125 126 struct hci_rh_data { 127 void __iomem *regs; 128 void *xfer, *resp, *ibi_status, *ibi_data; 129 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma; 130 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total; 131 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz; 132 unsigned int done_ptr, ibi_chunk_ptr; 133 struct hci_xfer **src_xfers; 134 spinlock_t lock; 135 struct completion op_done; 136 }; 137 138 struct hci_rings_data { 139 struct device *sysdev; 140 unsigned int total; 141 struct hci_rh_data headers[] __counted_by(total); 142 }; 143 144 struct hci_dma_dev_ibi_data { 145 struct i3c_generic_ibi_pool *pool; 146 unsigned int max_len; 147 }; 148 149 static void hci_dma_cleanup(struct i3c_hci *hci) 150 { 151 struct hci_rings_data *rings = hci->io_data; 152 struct hci_rh_data *rh; 153 unsigned int i; 154 155 if (!rings) 156 return; 157 158 for (i = 0; i < rings->total; i++) { 159 rh = &rings->headers[i]; 160 161 rh_reg_write(INTR_SIGNAL_ENABLE, 0); 162 rh_reg_write(RING_CONTROL, 0); 163 } 164 165 i3c_hci_sync_irq_inactive(hci); 166 167 for (i = 0; i < rings->total; i++) { 168 rh = &rings->headers[i]; 169 170 rh_reg_write(CR_SETUP, 0); 171 rh_reg_write(IBI_SETUP, 0); 172 } 173 174 rhs_reg_write(CONTROL, 0); 175 } 176 177 static void hci_dma_free(void *data) 178 { 179 struct i3c_hci *hci = data; 180 struct hci_rings_data *rings = hci->io_data; 181 struct hci_rh_data *rh; 182 183 if (!rings) 184 return; 185 186 for (int i = 0; i < rings->total; i++) { 187 rh = &rings->headers[i]; 188 189 if (rh->xfer) 190 dma_free_coherent(rings->sysdev, 191 rh->xfer_struct_sz * rh->xfer_entries, 192 rh->xfer, rh->xfer_dma); 193 if (rh->resp) 194 dma_free_coherent(rings->sysdev, 195 rh->resp_struct_sz * rh->xfer_entries, 196 rh->resp, rh->resp_dma); 197 kfree(rh->src_xfers); 198 if (rh->ibi_status) 199 dma_free_coherent(rings->sysdev, 200 rh->ibi_status_sz * rh->ibi_status_entries, 201 rh->ibi_status, rh->ibi_status_dma); 202 if (rh->ibi_data_dma) 203 dma_unmap_single(rings->sysdev, rh->ibi_data_dma, 204 rh->ibi_chunk_sz * rh->ibi_chunks_total, 205 DMA_FROM_DEVICE); 206 kfree(rh->ibi_data); 207 } 208 209 kfree(rings); 210 hci->io_data = NULL; 211 } 212 213 static void hci_dma_init_rh(struct i3c_hci *hci, struct hci_rh_data *rh, int i) 214 { 215 u32 regval; 216 217 rh_reg_write(CMD_RING_BASE_LO, lower_32_bits(rh->xfer_dma)); 218 rh_reg_write(CMD_RING_BASE_HI, upper_32_bits(rh->xfer_dma)); 219 rh_reg_write(RESP_RING_BASE_LO, lower_32_bits(rh->resp_dma)); 220 rh_reg_write(RESP_RING_BASE_HI, upper_32_bits(rh->resp_dma)); 221 222 regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries); 223 rh_reg_write(CR_SETUP, regval); 224 225 rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff); 226 rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY | 227 INTR_TRANSFER_COMPLETION | 228 INTR_RING_OP | 229 INTR_TRANSFER_ERR | 230 INTR_IBI_RING_FULL | 231 INTR_TRANSFER_ABORT); 232 233 if (i >= IBI_RINGS) 234 goto ring_ready; 235 236 rh_reg_write(IBI_STATUS_RING_BASE_LO, lower_32_bits(rh->ibi_status_dma)); 237 rh_reg_write(IBI_STATUS_RING_BASE_HI, upper_32_bits(rh->ibi_status_dma)); 238 rh_reg_write(IBI_DATA_RING_BASE_LO, lower_32_bits(rh->ibi_data_dma)); 239 rh_reg_write(IBI_DATA_RING_BASE_HI, upper_32_bits(rh->ibi_data_dma)); 240 241 regval = FIELD_PREP(IBI_STATUS_RING_SIZE, rh->ibi_status_entries) | 242 FIELD_PREP(IBI_DATA_CHUNK_SIZE, ilog2(rh->ibi_chunk_sz) - 2) | 243 FIELD_PREP(IBI_DATA_CHUNK_COUNT, rh->ibi_chunks_total); 244 rh_reg_write(IBI_SETUP, regval); 245 246 regval = rh_reg_read(INTR_SIGNAL_ENABLE); 247 regval |= INTR_IBI_READY; 248 rh_reg_write(INTR_SIGNAL_ENABLE, regval); 249 250 ring_ready: 251 /* 252 * The MIPI I3C HCI specification does not document reset values for 253 * RING_OPERATION1 fields and some controllers (e.g. Intel controllers) 254 * do not reset the values, so ensure the ring pointers are set to zero 255 * here. 256 */ 257 rh_reg_write(RING_OPERATION1, 0); 258 259 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 260 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP); 261 262 rh->done_ptr = 0; 263 rh->ibi_chunk_ptr = 0; 264 } 265 266 static void hci_dma_init_rings(struct i3c_hci *hci) 267 { 268 struct hci_rings_data *rings = hci->io_data; 269 u32 regval; 270 271 regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total); 272 rhs_reg_write(CONTROL, regval); 273 274 for (int i = 0; i < rings->total; i++) 275 hci_dma_init_rh(hci, &rings->headers[i], i); 276 } 277 278 static void hci_dma_suspend(struct i3c_hci *hci) 279 { 280 struct hci_rings_data *rings = hci->io_data; 281 int n = rings ? rings->total : 0; 282 283 for (int i = 0; i < n; i++) { 284 struct hci_rh_data *rh = &rings->headers[i]; 285 286 rh_reg_write(INTR_SIGNAL_ENABLE, 0); 287 rh_reg_write(RING_CONTROL, 0); 288 } 289 290 i3c_hci_sync_irq_inactive(hci); 291 } 292 293 static void hci_dma_resume(struct i3c_hci *hci) 294 { 295 struct hci_rings_data *rings = hci->io_data; 296 297 if (rings) 298 hci_dma_init_rings(hci); 299 } 300 301 static int hci_dma_init(struct i3c_hci *hci) 302 { 303 struct hci_rings_data *rings; 304 struct hci_rh_data *rh; 305 struct device *sysdev; 306 u32 regval; 307 unsigned int i, nr_rings, xfers_sz, resps_sz; 308 unsigned int ibi_status_ring_sz, ibi_data_ring_sz; 309 int ret; 310 311 /* 312 * Set pointer to a physical device that does DMA and has IOMMU setup 313 * done for it in case of enabled IOMMU and use it with the DMA API. 314 * Here such device is either 315 * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or 316 * grandparent (PCI enumeration). 317 */ 318 sysdev = hci->master.dev.parent; 319 if (sysdev->parent && dev_is_pci(sysdev->parent)) 320 sysdev = sysdev->parent; 321 322 regval = rhs_reg_read(CONTROL); 323 nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval); 324 dev_dbg(&hci->master.dev, "%d DMA rings available\n", nr_rings); 325 if (unlikely(nr_rings > 8)) { 326 dev_err(&hci->master.dev, "number of rings should be <= 8\n"); 327 nr_rings = 8; 328 } 329 if (nr_rings > XFER_RINGS) 330 nr_rings = XFER_RINGS; 331 rings = kzalloc_flex(*rings, headers, nr_rings); 332 if (!rings) 333 return -ENOMEM; 334 hci->io_data = rings; 335 rings->total = nr_rings; 336 rings->sysdev = sysdev; 337 338 for (i = 0; i < rings->total; i++) { 339 u32 offset = rhs_reg_read(RHn_OFFSET(i)); 340 341 dev_dbg(&hci->master.dev, "Ring %d at offset %#x\n", i, offset); 342 ret = -EINVAL; 343 if (!offset) 344 goto err_out; 345 rh = &rings->headers[i]; 346 rh->regs = hci->base_regs + offset; 347 spin_lock_init(&rh->lock); 348 init_completion(&rh->op_done); 349 350 rh->xfer_entries = XFER_RING_ENTRIES; 351 352 regval = rh_reg_read(CR_SETUP); 353 rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval); 354 rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval); 355 dev_dbg(&hci->master.dev, 356 "xfer_struct_sz = %d, resp_struct_sz = %d", 357 rh->xfer_struct_sz, rh->resp_struct_sz); 358 xfers_sz = rh->xfer_struct_sz * rh->xfer_entries; 359 resps_sz = rh->resp_struct_sz * rh->xfer_entries; 360 361 rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz, 362 &rh->xfer_dma, GFP_KERNEL); 363 rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz, 364 &rh->resp_dma, GFP_KERNEL); 365 rh->src_xfers = 366 kmalloc_objs(*rh->src_xfers, rh->xfer_entries); 367 ret = -ENOMEM; 368 if (!rh->xfer || !rh->resp || !rh->src_xfers) 369 goto err_out; 370 371 /* IBIs */ 372 373 if (i >= IBI_RINGS) 374 continue; 375 376 regval = rh_reg_read(IBI_SETUP); 377 rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval); 378 rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES; 379 rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE; 380 381 rh->ibi_chunk_sz = dma_get_cache_alignment(); 382 rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES; 383 /* 384 * Round IBI data chunk size to number of bytes supported by 385 * the HW. Chunk size can be 2^n number of DWORDs which is the 386 * same as 2^(n+2) bytes, where n is 0..6. 387 */ 388 rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz); 389 rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz); 390 if (rh->ibi_chunk_sz > 256) { 391 ret = -EINVAL; 392 goto err_out; 393 } 394 395 ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries; 396 ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total; 397 398 rh->ibi_status = 399 dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz, 400 &rh->ibi_status_dma, GFP_KERNEL); 401 rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL); 402 ret = -ENOMEM; 403 if (!rh->ibi_status || !rh->ibi_data) 404 goto err_out; 405 rh->ibi_data_dma = 406 dma_map_single(rings->sysdev, rh->ibi_data, 407 ibi_data_ring_sz, DMA_FROM_DEVICE); 408 if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) { 409 rh->ibi_data_dma = 0; 410 ret = -ENOMEM; 411 goto err_out; 412 } 413 } 414 415 ret = devm_add_action(hci->master.dev.parent, hci_dma_free, hci); 416 if (ret) 417 goto err_out; 418 419 hci_dma_init_rings(hci); 420 421 return 0; 422 423 err_out: 424 hci_dma_free(hci); 425 return ret; 426 } 427 428 static void hci_dma_unmap_xfer(struct i3c_hci *hci, 429 struct hci_xfer *xfer_list, unsigned int n) 430 { 431 struct hci_xfer *xfer; 432 unsigned int i; 433 434 for (i = 0; i < n; i++) { 435 xfer = xfer_list + i; 436 if (!xfer->data) 437 continue; 438 i3c_master_dma_unmap_single(xfer->dma); 439 } 440 } 441 442 static int hci_dma_queue_xfer(struct i3c_hci *hci, 443 struct hci_xfer *xfer_list, int n) 444 { 445 struct hci_rings_data *rings = hci->io_data; 446 struct hci_rh_data *rh; 447 unsigned int i, ring, enqueue_ptr; 448 u32 op1_val, op2_val; 449 450 /* For now we only use ring 0 */ 451 ring = 0; 452 rh = &rings->headers[ring]; 453 454 op1_val = rh_reg_read(RING_OPERATION1); 455 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val); 456 for (i = 0; i < n; i++) { 457 struct hci_xfer *xfer = xfer_list + i; 458 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr; 459 enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : 460 DMA_TO_DEVICE; 461 bool need_bounce; 462 463 /* store cmd descriptor */ 464 *ring_data++ = xfer->cmd_desc[0]; 465 *ring_data++ = xfer->cmd_desc[1]; 466 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 467 *ring_data++ = xfer->cmd_desc[2]; 468 *ring_data++ = xfer->cmd_desc[3]; 469 } 470 471 /* first word of Data Buffer Descriptor Structure */ 472 if (!xfer->data) 473 xfer->data_len = 0; 474 *ring_data++ = 475 FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) | 476 ((i == n - 1) ? DATA_BUF_IOC : 0); 477 478 /* 2nd and 3rd words of Data Buffer Descriptor Structure */ 479 if (xfer->data) { 480 need_bounce = device_iommu_mapped(rings->sysdev) && 481 xfer->rnw && 482 xfer->data_len != ALIGN(xfer->data_len, 4); 483 xfer->dma = i3c_master_dma_map_single(rings->sysdev, 484 xfer->data, 485 xfer->data_len, 486 need_bounce, 487 dir); 488 if (!xfer->dma) { 489 hci_dma_unmap_xfer(hci, xfer_list, i); 490 return -ENOMEM; 491 } 492 *ring_data++ = lower_32_bits(xfer->dma->addr); 493 *ring_data++ = upper_32_bits(xfer->dma->addr); 494 } else { 495 *ring_data++ = 0; 496 *ring_data++ = 0; 497 } 498 499 /* remember corresponding xfer struct */ 500 rh->src_xfers[enqueue_ptr] = xfer; 501 /* remember corresponding ring/entry for this xfer structure */ 502 xfer->ring_number = ring; 503 xfer->ring_entry = enqueue_ptr; 504 505 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries; 506 507 /* 508 * We may update the hardware view of the enqueue pointer 509 * only if we didn't reach its dequeue pointer. 510 */ 511 op2_val = rh_reg_read(RING_OPERATION2); 512 if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { 513 /* the ring is full */ 514 hci_dma_unmap_xfer(hci, xfer_list, i + 1); 515 return -EBUSY; 516 } 517 } 518 519 /* take care to update the hardware enqueue pointer atomically */ 520 spin_lock_irq(&rh->lock); 521 op1_val = rh_reg_read(RING_OPERATION1); 522 op1_val &= ~RING_OP1_CR_ENQ_PTR; 523 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr); 524 rh_reg_write(RING_OPERATION1, op1_val); 525 spin_unlock_irq(&rh->lock); 526 527 return 0; 528 } 529 530 static bool hci_dma_dequeue_xfer(struct i3c_hci *hci, 531 struct hci_xfer *xfer_list, int n) 532 { 533 struct hci_rings_data *rings = hci->io_data; 534 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; 535 unsigned int i; 536 bool did_unqueue = false; 537 538 /* stop the ring */ 539 rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); 540 if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { 541 /* 542 * We're deep in it if ever this condition is ever met. 543 * Hardware might still be writing to memory, etc. 544 */ 545 dev_crit(&hci->master.dev, "unable to abort the ring\n"); 546 WARN_ON(1); 547 } 548 549 for (i = 0; i < n; i++) { 550 struct hci_xfer *xfer = xfer_list + i; 551 int idx = xfer->ring_entry; 552 553 /* 554 * At the time the abort happened, the xfer might have 555 * completed already. If not then replace corresponding 556 * descriptor entries with a no-op. 557 */ 558 if (idx >= 0) { 559 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx; 560 561 /* store no-op cmd descriptor */ 562 *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7); 563 *ring_data++ = 0; 564 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 565 *ring_data++ = 0; 566 *ring_data++ = 0; 567 } 568 569 /* disassociate this xfer struct */ 570 rh->src_xfers[idx] = NULL; 571 572 /* and unmap it */ 573 hci_dma_unmap_xfer(hci, xfer, 1); 574 575 did_unqueue = true; 576 } 577 } 578 579 /* restart the ring */ 580 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 581 582 return did_unqueue; 583 } 584 585 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) 586 { 587 u32 op1_val, op2_val, resp, *ring_resp; 588 unsigned int tid, done_ptr = rh->done_ptr; 589 struct hci_xfer *xfer; 590 591 for (;;) { 592 op2_val = rh_reg_read(RING_OPERATION2); 593 if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) 594 break; 595 596 ring_resp = rh->resp + rh->resp_struct_sz * done_ptr; 597 resp = *ring_resp; 598 tid = RESP_TID(resp); 599 dev_dbg(&hci->master.dev, "resp = 0x%08x", resp); 600 601 xfer = rh->src_xfers[done_ptr]; 602 if (!xfer) { 603 dev_dbg(&hci->master.dev, "orphaned ring entry"); 604 } else { 605 hci_dma_unmap_xfer(hci, xfer, 1); 606 xfer->ring_entry = -1; 607 xfer->response = resp; 608 if (tid != xfer->cmd_tid) { 609 dev_err(&hci->master.dev, 610 "response tid=%d when expecting %d\n", 611 tid, xfer->cmd_tid); 612 /* TODO: do something about it? */ 613 } 614 if (xfer->completion) 615 complete(xfer->completion); 616 } 617 618 done_ptr = (done_ptr + 1) % rh->xfer_entries; 619 rh->done_ptr = done_ptr; 620 } 621 622 /* take care to update the software dequeue pointer atomically */ 623 spin_lock(&rh->lock); 624 op1_val = rh_reg_read(RING_OPERATION1); 625 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR; 626 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr); 627 rh_reg_write(RING_OPERATION1, op1_val); 628 spin_unlock(&rh->lock); 629 } 630 631 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, 632 const struct i3c_ibi_setup *req) 633 { 634 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 635 struct i3c_generic_ibi_pool *pool; 636 struct hci_dma_dev_ibi_data *dev_ibi; 637 638 dev_ibi = kmalloc_obj(*dev_ibi); 639 if (!dev_ibi) 640 return -ENOMEM; 641 pool = i3c_generic_ibi_alloc_pool(dev, req); 642 if (IS_ERR(pool)) { 643 kfree(dev_ibi); 644 return PTR_ERR(pool); 645 } 646 dev_ibi->pool = pool; 647 dev_ibi->max_len = req->max_payload_len; 648 dev_data->ibi_data = dev_ibi; 649 return 0; 650 } 651 652 static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev) 653 { 654 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 655 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data; 656 657 dev_data->ibi_data = NULL; 658 i3c_generic_ibi_free_pool(dev_ibi->pool); 659 kfree(dev_ibi); 660 } 661 662 static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci, 663 struct i3c_dev_desc *dev, 664 struct i3c_ibi_slot *slot) 665 { 666 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 667 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data; 668 669 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot); 670 } 671 672 static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh) 673 { 674 struct hci_rings_data *rings = hci->io_data; 675 struct i3c_dev_desc *dev; 676 struct i3c_hci_dev_data *dev_data; 677 struct hci_dma_dev_ibi_data *dev_ibi; 678 struct i3c_ibi_slot *slot; 679 u32 op1_val, op2_val, ibi_status_error; 680 unsigned int ptr, enq_ptr, deq_ptr; 681 unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part; 682 int ibi_addr, last_ptr; 683 void *ring_ibi_data; 684 dma_addr_t ring_ibi_data_dma; 685 686 op1_val = rh_reg_read(RING_OPERATION1); 687 deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val); 688 689 op2_val = rh_reg_read(RING_OPERATION2); 690 enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val); 691 692 ibi_status_error = 0; 693 ibi_addr = -1; 694 ibi_chunks = 0; 695 ibi_size = 0; 696 last_ptr = -1; 697 698 /* let's find all we can about this IBI */ 699 for (ptr = deq_ptr; ptr != enq_ptr; 700 ptr = (ptr + 1) % rh->ibi_status_entries) { 701 u32 ibi_status, *ring_ibi_status; 702 unsigned int chunks; 703 704 ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr; 705 ibi_status = *ring_ibi_status; 706 dev_dbg(&hci->master.dev, "status = %#x", ibi_status); 707 708 if (ibi_status_error) { 709 /* we no longer care */ 710 } else if (ibi_status & IBI_ERROR) { 711 ibi_status_error = ibi_status; 712 } else if (ibi_addr == -1) { 713 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status); 714 } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) { 715 /* the address changed unexpectedly */ 716 ibi_status_error = ibi_status; 717 } 718 719 chunks = FIELD_GET(IBI_CHUNKS, ibi_status); 720 ibi_chunks += chunks; 721 if (!(ibi_status & IBI_LAST_STATUS)) { 722 ibi_size += chunks * rh->ibi_chunk_sz; 723 } else { 724 ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status); 725 last_ptr = ptr; 726 break; 727 } 728 } 729 730 /* validate what we've got */ 731 732 if (last_ptr == -1) { 733 /* this IBI sequence is not yet complete */ 734 dev_dbg(&hci->master.dev, 735 "no LAST_STATUS available (e=%d d=%d)", 736 enq_ptr, deq_ptr); 737 return; 738 } 739 deq_ptr = last_ptr + 1; 740 deq_ptr %= rh->ibi_status_entries; 741 742 if (ibi_status_error) { 743 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr); 744 goto done; 745 } 746 747 /* determine who this is for */ 748 dev = i3c_hci_addr_to_dev(hci, ibi_addr); 749 if (!dev) { 750 dev_err(&hci->master.dev, 751 "IBI for unknown device %#x\n", ibi_addr); 752 goto done; 753 } 754 755 dev_data = i3c_dev_get_master_data(dev); 756 dev_ibi = dev_data->ibi_data; 757 if (ibi_size > dev_ibi->max_len) { 758 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n", 759 ibi_size, dev_ibi->max_len); 760 goto done; 761 } 762 763 /* 764 * This ring model is not suitable for zero-copy processing of IBIs. 765 * We have the data chunk ring wrap-around to deal with, meaning 766 * that the payload might span multiple chunks beginning at the 767 * end of the ring and wrap to the start of the ring. Furthermore 768 * there is no guarantee that those chunks will be released in order 769 * and in a timely manner by the upper driver. So let's just copy 770 * them to a discrete buffer. In practice they're supposed to be 771 * small anyway. 772 */ 773 slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool); 774 if (!slot) { 775 dev_err(&hci->master.dev, "no free slot for IBI\n"); 776 goto done; 777 } 778 779 /* copy first part of the payload */ 780 ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr; 781 ring_ibi_data = rh->ibi_data + ibi_data_offset; 782 ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset; 783 first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr) 784 * rh->ibi_chunk_sz; 785 if (first_part > ibi_size) 786 first_part = ibi_size; 787 dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma, 788 first_part, DMA_FROM_DEVICE); 789 memcpy(slot->data, ring_ibi_data, first_part); 790 791 /* copy second part if any */ 792 if (ibi_size > first_part) { 793 /* we wrap back to the start and copy remaining data */ 794 ring_ibi_data = rh->ibi_data; 795 ring_ibi_data_dma = rh->ibi_data_dma; 796 dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma, 797 ibi_size - first_part, DMA_FROM_DEVICE); 798 memcpy(slot->data + first_part, ring_ibi_data, 799 ibi_size - first_part); 800 } 801 802 /* submit it */ 803 slot->dev = dev; 804 slot->len = ibi_size; 805 i3c_master_queue_ibi(dev, slot); 806 807 done: 808 /* take care to update the ibi dequeue pointer atomically */ 809 spin_lock(&rh->lock); 810 op1_val = rh_reg_read(RING_OPERATION1); 811 op1_val &= ~RING_OP1_IBI_DEQ_PTR; 812 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr); 813 rh_reg_write(RING_OPERATION1, op1_val); 814 spin_unlock(&rh->lock); 815 816 /* update the chunk pointer */ 817 rh->ibi_chunk_ptr += ibi_chunks; 818 rh->ibi_chunk_ptr %= rh->ibi_chunks_total; 819 820 /* and tell the hardware about freed chunks */ 821 rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks); 822 } 823 824 static bool hci_dma_irq_handler(struct i3c_hci *hci) 825 { 826 struct hci_rings_data *rings = hci->io_data; 827 unsigned int i; 828 bool handled = false; 829 830 for (i = 0; i < rings->total; i++) { 831 struct hci_rh_data *rh; 832 u32 status; 833 834 rh = &rings->headers[i]; 835 status = rh_reg_read(INTR_STATUS); 836 dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x", 837 i, status); 838 if (!status) 839 continue; 840 rh_reg_write(INTR_STATUS, status); 841 842 if (status & INTR_IBI_READY) 843 hci_dma_process_ibi(hci, rh); 844 if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR)) 845 hci_dma_xfer_done(hci, rh); 846 if (status & INTR_RING_OP) 847 complete(&rh->op_done); 848 849 if (status & INTR_TRANSFER_ABORT) { 850 u32 ring_status; 851 852 dev_notice_ratelimited(&hci->master.dev, 853 "Ring %d: Transfer Aborted\n", i); 854 mipi_i3c_hci_resume(hci); 855 ring_status = rh_reg_read(RING_STATUS); 856 if (!(ring_status & RING_STATUS_RUNNING) && 857 status & INTR_TRANSFER_COMPLETION && 858 status & INTR_TRANSFER_ERR) { 859 /* 860 * Ring stop followed by run is an Intel 861 * specific required quirk after resuming the 862 * halted controller. Do it only when the ring 863 * is not in running state after a transfer 864 * error. 865 */ 866 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 867 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | 868 RING_CTRL_RUN_STOP); 869 } 870 } 871 if (status & INTR_IBI_RING_FULL) 872 dev_err_ratelimited(&hci->master.dev, 873 "Ring %d: IBI Ring Full Condition\n", i); 874 875 handled = true; 876 } 877 878 return handled; 879 } 880 881 const struct hci_io_ops mipi_i3c_hci_dma = { 882 .init = hci_dma_init, 883 .cleanup = hci_dma_cleanup, 884 .queue_xfer = hci_dma_queue_xfer, 885 .dequeue_xfer = hci_dma_dequeue_xfer, 886 .irq_handler = hci_dma_irq_handler, 887 .request_ibi = hci_dma_request_ibi, 888 .free_ibi = hci_dma_free_ibi, 889 .recycle_ibi_slot = hci_dma_recycle_ibi_slot, 890 .suspend = hci_dma_suspend, 891 .resume = hci_dma_resume, 892 }; 893