Lines Matching +full:riscv +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for RISC-V IOMMU implementations.
5 * Copyright © 2022-2024 Rivos Inc.
6 * Copyright © 2023 FORTH-ICS/CARV
13 #define pr_fmt(fmt) "riscv-iommu: " fmt
20 #include <linux/iommu.h>
25 #include "../iommu-pages.h"
26 #include "iommu-bits.h"
27 #include "iommu.h"
39 /* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */
40 #define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10))
41 #define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12))
44 iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu)
46 /* IOMMU PSCID allocation namespace. */
48 #define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1)
50 /* Device resource-managed allocations */
59 iommu_free_pages(devres->addr); in riscv_iommu_devres_pages_release()
67 return devres->addr == target->addr; in riscv_iommu_devres_pages_match()
70 static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, in riscv_iommu_get_pages() argument
76 addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev), in riscv_iommu_get_pages()
89 devres->addr = addr; in riscv_iommu_get_pages()
91 devres_add(iommu->dev, devres); in riscv_iommu_get_pages()
96 static void riscv_iommu_free_pages(struct riscv_iommu_device *iommu, void *addr) in riscv_iommu_free_pages() argument
100 devres_release(iommu->dev, riscv_iommu_devres_pages_release, in riscv_iommu_free_pages()
111 _q->qid = RISCV_IOMMU_INTR_ ## name; \
112 _q->qbr = RISCV_IOMMU_REG_ ## name ## B; \
113 _q->qcr = RISCV_IOMMU_REG_ ## name ## CSR; \
114 _q->mask = _q->mask ?: (RISCV_IOMMU_DEF_ ## name ## _COUNT) - 1;\
118 #define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB))
119 #define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB))
120 #define Q_ITEM(q, index) ((q)->mask & (index))
121 #define Q_IPSR(q) BIT((q)->qid)
124 * Discover queue ring buffer hardware configuration, allocate in-memory
128 * @queue - data structure, configured with RISCV_IOMMU_QUEUE_INIT()
129 * @entry_size - queue single element size in bytes.
131 static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, in riscv_iommu_queue_alloc() argument
142 riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD); in riscv_iommu_queue_alloc()
143 qb = riscv_iommu_readq(iommu, queue->qbr); in riscv_iommu_queue_alloc()
150 logsz = ilog2(queue->mask); in riscv_iommu_queue_alloc()
162 queue->phys = pfn_to_phys(FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)); in riscv_iommu_queue_alloc()
163 queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size); in riscv_iommu_queue_alloc()
168 queue->base = riscv_iommu_get_pages( in riscv_iommu_queue_alloc()
169 iommu, max(queue_size, SZ_4K)); in riscv_iommu_queue_alloc()
170 queue->phys = __pa(queue->base); in riscv_iommu_queue_alloc()
171 } while (!queue->base && logsz-- > 0); in riscv_iommu_queue_alloc()
174 if (!queue->base) in riscv_iommu_queue_alloc()
175 return -ENOMEM; in riscv_iommu_queue_alloc()
177 qb = phys_to_ppn(queue->phys) | in riscv_iommu_queue_alloc()
181 riscv_iommu_writeq(iommu, queue->qbr, qb); in riscv_iommu_queue_alloc()
182 rb = riscv_iommu_readq(iommu, queue->qbr); in riscv_iommu_queue_alloc()
184 dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid); in riscv_iommu_queue_alloc()
185 return -ENODEV; in riscv_iommu_queue_alloc()
189 queue->mask = (2U << logsz) - 1; in riscv_iommu_queue_alloc()
191 dev_dbg(iommu->dev, "queue #%u allocated 2^%u entries", in riscv_iommu_queue_alloc()
192 queue->qid, logsz + 1); in riscv_iommu_queue_alloc()
202 if (riscv_iommu_readl(queue->iommu, RISCV_IOMMU_REG_IPSR) & Q_IPSR(queue)) in riscv_iommu_queue_ipsr()
208 static int riscv_iommu_queue_vec(struct riscv_iommu_device *iommu, int n) in riscv_iommu_queue_vec() argument
211 return (iommu->icvec >> (n * 4)) & RISCV_IOMMU_ICVEC_CIV; in riscv_iommu_queue_vec()
217 * @queue - data structure, already allocated with riscv_iommu_queue_alloc()
218 * @irq_handler - threaded interrupt handler.
220 static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu, in riscv_iommu_queue_enable() argument
224 const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)]; in riscv_iommu_queue_enable()
228 if (queue->iommu) in riscv_iommu_queue_enable()
229 return -EBUSY; in riscv_iommu_queue_enable()
233 return -ENODEV; in riscv_iommu_queue_enable()
235 queue->iommu = iommu; in riscv_iommu_queue_enable()
238 dev_name(iommu->dev), queue); in riscv_iommu_queue_enable()
240 queue->iommu = NULL; in riscv_iommu_queue_enable()
245 if (queue->qid == RISCV_IOMMU_INTR_CQ) in riscv_iommu_queue_enable()
246 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0); in riscv_iommu_queue_enable()
248 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0); in riscv_iommu_queue_enable()
256 riscv_iommu_writel(iommu, queue->qcr, in riscv_iommu_queue_enable()
261 riscv_iommu_readl_timeout(iommu, queue->qcr, in riscv_iommu_queue_enable()
269 riscv_iommu_writel(iommu, queue->qcr, 0); in riscv_iommu_queue_enable()
271 queue->iommu = NULL; in riscv_iommu_queue_enable()
272 dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid); in riscv_iommu_queue_enable()
273 return -EBUSY; in riscv_iommu_queue_enable()
277 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); in riscv_iommu_queue_enable()
288 struct riscv_iommu_device *iommu = queue->iommu; in riscv_iommu_queue_disable() local
291 if (!iommu) in riscv_iommu_queue_disable()
294 free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue); in riscv_iommu_queue_disable()
295 riscv_iommu_writel(iommu, queue->qcr, 0); in riscv_iommu_queue_disable()
296 riscv_iommu_readl_timeout(iommu, queue->qcr, in riscv_iommu_queue_disable()
301 dev_err(iommu->dev, "fail to disable hardware queue #%u, csr 0x%x\n", in riscv_iommu_queue_disable()
302 queue->qid, csr); in riscv_iommu_queue_disable()
304 queue->iommu = NULL; in riscv_iommu_queue_disable()
314 unsigned int head = atomic_read(&queue->head); in riscv_iommu_queue_consume()
315 unsigned int tail = atomic_read(&queue->tail); in riscv_iommu_queue_consume()
317 int available = (int)(tail - head); in riscv_iommu_queue_consume()
325 if (riscv_iommu_readl_timeout(queue->iommu, Q_TAIL(queue), in riscv_iommu_queue_consume()
326 tail, (tail & ~queue->mask) == 0, in riscv_iommu_queue_consume()
328 dev_err_once(queue->iommu->dev, in riscv_iommu_queue_consume()
337 return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head); in riscv_iommu_queue_consume()
345 const unsigned int head = atomic_add_return(count, &queue->head); in riscv_iommu_queue_release()
347 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), Q_ITEM(queue, head)); in riscv_iommu_queue_release()
353 const unsigned int cons = atomic_read(&queue->head); in riscv_iommu_queue_cons()
357 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, in riscv_iommu_queue_cons()
358 !(head & ~queue->mask), in riscv_iommu_queue_cons()
362 return cons + ((head - last) & queue->mask); in riscv_iommu_queue_cons()
370 unsigned int cons = atomic_read(&queue->head); in riscv_iommu_queue_wait()
373 if ((int)(cons - index) > 0) in riscv_iommu_queue_wait()
378 (int)(cons - index) > 0, 0, timeout_us); in riscv_iommu_queue_wait()
383 * Error handling for IOMMU hardware not responding in reasonable time
399 prod = atomic_inc_return(&queue->prod) - 1; in riscv_iommu_queue_send()
400 head = atomic_read(&queue->head); in riscv_iommu_queue_send()
403 if ((prod - head) > queue->mask) { in riscv_iommu_queue_send()
404 if (readx_poll_timeout(atomic_read, &queue->head, in riscv_iommu_queue_send()
405 head, (prod - head) < queue->mask, in riscv_iommu_queue_send()
408 } else if ((prod - head) == queue->mask) { in riscv_iommu_queue_send()
411 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, in riscv_iommu_queue_send()
412 !(head & ~queue->mask) && head != last, in riscv_iommu_queue_send()
415 atomic_add((head - last) & queue->mask, &queue->head); in riscv_iommu_queue_send()
419 memcpy(queue->base + Q_ITEM(queue, prod) * entry_size, entry, entry_size); in riscv_iommu_queue_send()
422 if (readx_poll_timeout(atomic_read, &queue->tail, tail, prod == tail, in riscv_iommu_queue_send()
432 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), Q_ITEM(queue, prod + 1)); in riscv_iommu_queue_send()
439 atomic_inc(&queue->tail); in riscv_iommu_queue_send()
448 dev_err_once(queue->iommu->dev, "Hardware error: command enqueue failed\n"); in riscv_iommu_queue_send()
454 * IOMMU Command queue chapter 3.1
464 ctrl = riscv_iommu_readl(queue->iommu, queue->qcr); in riscv_iommu_cmdq_process()
467 riscv_iommu_writel(queue->iommu, queue->qcr, ctrl); in riscv_iommu_cmdq_process()
468 dev_warn(queue->iommu->dev, in riscv_iommu_cmdq_process()
470 queue->qid, in riscv_iommu_cmdq_process()
480 riscv_iommu_writel(queue->iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); in riscv_iommu_cmdq_process()
485 /* Send command to the IOMMU command queue */
486 static void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, in riscv_iommu_cmd_send() argument
489 riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd)); in riscv_iommu_cmd_send()
493 static void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, in riscv_iommu_cmd_sync() argument
500 prod = riscv_iommu_queue_send(&iommu->cmdq, &cmd, sizeof(cmd)); in riscv_iommu_cmd_sync()
505 if (riscv_iommu_queue_wait(&iommu->cmdq, prod, timeout_us)) in riscv_iommu_cmd_sync()
506 dev_err_once(iommu->dev, in riscv_iommu_cmd_sync()
511 * IOMMU Fault/Event queue chapter 3.2
514 static void riscv_iommu_fault(struct riscv_iommu_device *iommu, in riscv_iommu_fault() argument
517 unsigned int err = FIELD_GET(RISCV_IOMMU_FQ_HDR_CAUSE, event->hdr); in riscv_iommu_fault()
518 unsigned int devid = FIELD_GET(RISCV_IOMMU_FQ_HDR_DID, event->hdr); in riscv_iommu_fault()
522 dev_warn_ratelimited(iommu->dev, in riscv_iommu_fault()
524 err, devid, event->iotval, event->iotval2); in riscv_iommu_fault()
531 struct riscv_iommu_device *iommu = queue->iommu; in riscv_iommu_fltq_process() local
536 events = (struct riscv_iommu_fq_record *)queue->base; in riscv_iommu_fltq_process()
539 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); in riscv_iommu_fltq_process()
544 riscv_iommu_fault(iommu, &events[Q_ITEM(queue, idx)]); in riscv_iommu_fltq_process()
549 ctrl = riscv_iommu_readl(iommu, queue->qcr); in riscv_iommu_fltq_process()
551 riscv_iommu_writel(iommu, queue->qcr, ctrl); in riscv_iommu_fltq_process()
552 dev_warn(iommu->dev, in riscv_iommu_fltq_process()
554 queue->qid, in riscv_iommu_fltq_process()
563 static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iommu, in riscv_iommu_get_dc() argument
566 const bool base_format = !(iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT); in riscv_iommu_get_dc()
574 if (iommu->ddt_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL || in riscv_iommu_get_dc()
575 iommu->ddt_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL) in riscv_iommu_get_dc()
580 * DDI[0]: bits 0 - 6 (1st level) (7 bits) in riscv_iommu_get_dc()
581 * DDI[1]: bits 7 - 15 (2nd level) (9 bits) in riscv_iommu_get_dc()
582 * DDI[2]: bits 16 - 23 (3rd level) (8 bits) in riscv_iommu_get_dc()
585 * DDI[0]: bits 0 - 5 (1st level) (6 bits) in riscv_iommu_get_dc()
586 * DDI[1]: bits 6 - 14 (2nd level) (9 bits) in riscv_iommu_get_dc()
587 * DDI[2]: bits 15 - 23 (3rd level) (9 bits) in riscv_iommu_get_dc()
600 depth = iommu->ddt_mode - RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL; in riscv_iommu_get_dc()
604 /* Get to the level of the non-leaf node that holds the device context */ in riscv_iommu_get_dc()
605 for (ddtp = iommu->ddt_root; depth-- > 0;) { in riscv_iommu_get_dc()
608 * Each non-leaf node is 64bits wide and on each level in riscv_iommu_get_dc()
624 ptr = riscv_iommu_get_pages(iommu, SZ_4K); in riscv_iommu_get_dc()
636 /* Race setting DDT detected, re-read and retry. */ in riscv_iommu_get_dc()
637 riscv_iommu_free_pages(iommu, ptr); in riscv_iommu_get_dc()
644 * is 8 * 64bits, hence the (3 - base_format) below. in riscv_iommu_get_dc()
646 ddtp += (devid & ((64 << base_format) - 1)) << (3 - base_format); in riscv_iommu_get_dc()
652 * This is best effort IOMMU translation shutdown flow.
653 * Disable IOMMU without waiting for hardware response.
655 void riscv_iommu_disable(struct riscv_iommu_device *iommu) in riscv_iommu_disable() argument
657 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, in riscv_iommu_disable()
660 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0); in riscv_iommu_disable()
661 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0); in riscv_iommu_disable()
662 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0); in riscv_iommu_disable()
665 #define riscv_iommu_read_ddtp(iommu) ({ \ argument
667 riscv_iommu_readq_timeout((iommu), RISCV_IOMMU_REG_DDTP, ddtp, \
672 static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu) in riscv_iommu_iodir_alloc() argument
677 ddtp = riscv_iommu_read_ddtp(iommu); in riscv_iommu_iodir_alloc()
679 return -EBUSY; in riscv_iommu_iodir_alloc()
689 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, in riscv_iommu_iodir_alloc()
691 ddtp = riscv_iommu_read_ddtp(iommu); in riscv_iommu_iodir_alloc()
693 return -EBUSY; in riscv_iommu_iodir_alloc()
695 iommu->ddt_phys = ppn_to_phys(ddtp); in riscv_iommu_iodir_alloc()
696 if (iommu->ddt_phys) in riscv_iommu_iodir_alloc()
697 iommu->ddt_root = devm_ioremap(iommu->dev, in riscv_iommu_iodir_alloc()
698 iommu->ddt_phys, PAGE_SIZE); in riscv_iommu_iodir_alloc()
699 if (iommu->ddt_root) in riscv_iommu_iodir_alloc()
700 memset(iommu->ddt_root, 0, PAGE_SIZE); in riscv_iommu_iodir_alloc()
703 if (!iommu->ddt_root) { in riscv_iommu_iodir_alloc()
704 iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K); in riscv_iommu_iodir_alloc()
705 iommu->ddt_phys = __pa(iommu->ddt_root); in riscv_iommu_iodir_alloc()
708 if (!iommu->ddt_root) in riscv_iommu_iodir_alloc()
709 return -ENOMEM; in riscv_iommu_iodir_alloc()
717 * Accepted iommu->ddt_mode is updated on success.
719 static int riscv_iommu_iodir_set_mode(struct riscv_iommu_device *iommu, in riscv_iommu_iodir_set_mode() argument
722 struct device *dev = iommu->dev; in riscv_iommu_iodir_set_mode()
727 ddtp = riscv_iommu_read_ddtp(iommu); in riscv_iommu_iodir_set_mode()
729 return -EBUSY; in riscv_iommu_iodir_set_mode()
737 return -EINVAL; in riscv_iommu_iodir_set_mode()
742 rq_ddtp |= phys_to_ppn(iommu->ddt_phys); in riscv_iommu_iodir_set_mode()
744 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, rq_ddtp); in riscv_iommu_iodir_set_mode()
745 ddtp = riscv_iommu_read_ddtp(iommu); in riscv_iommu_iodir_set_mode()
749 return -EBUSY; in riscv_iommu_iodir_set_mode()
752 /* Verify IOMMU hardware accepts new DDTP config. */ in riscv_iommu_iodir_set_mode()
762 return -EINVAL; in riscv_iommu_iodir_set_mode()
766 * Mode field is WARL, an IOMMU may support a subset of in riscv_iommu_iodir_set_mode()
775 rq_mode--; in riscv_iommu_iodir_set_mode()
780 * We tried all supported modes and IOMMU hardware failed to in riscv_iommu_iodir_set_mode()
786 return -EINVAL; in riscv_iommu_iodir_set_mode()
789 iommu->ddt_mode = mode; in riscv_iommu_iodir_set_mode()
795 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iodir_set_mode()
799 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iodir_set_mode()
802 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); in riscv_iommu_iodir_set_mode()
807 /* This struct contains protection domain specific IOMMU driver data. */
822 /* Private IOMMU data for managed devices, dev_iommu_priv_* */
834 * Blocking and identity domains are not tracked here, as the IOMMU hardware
838 * The device pointer and IOMMU data remain stable in the bond struct after
839 * _probe_device() where it's attached to the managed IOMMU, up to the
852 struct riscv_iommu_device *iommu = dev_to_iommu(dev); in riscv_iommu_bond_link() local
858 return -ENOMEM; in riscv_iommu_bond_link()
859 bond->dev = dev; in riscv_iommu_bond_link()
863 * managed IOMMU device. in riscv_iommu_bond_link()
866 spin_lock(&domain->lock); in riscv_iommu_bond_link()
867 list_for_each(bonds, &domain->bonds) in riscv_iommu_bond_link()
868 if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu) in riscv_iommu_bond_link()
870 list_add_rcu(&bond->list, bonds); in riscv_iommu_bond_link()
871 spin_unlock(&domain->lock); in riscv_iommu_bond_link()
882 struct riscv_iommu_device *iommu = dev_to_iommu(dev); in riscv_iommu_bond_unlink() local
890 spin_lock(&domain->lock); in riscv_iommu_bond_unlink()
891 list_for_each_entry(bond, &domain->bonds, list) { in riscv_iommu_bond_unlink()
894 else if (bond->dev == dev) in riscv_iommu_bond_unlink()
896 else if (dev_to_iommu(bond->dev) == iommu) in riscv_iommu_bond_unlink()
900 list_del_rcu(&found->list); in riscv_iommu_bond_unlink()
901 spin_unlock(&domain->lock); in riscv_iommu_bond_unlink()
905 * If this was the last bond between this domain and the IOMMU in riscv_iommu_bond_unlink()
910 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); in riscv_iommu_bond_unlink()
911 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_bond_unlink()
913 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); in riscv_iommu_bond_unlink()
920 * the hardware, when RISC-V IOMMU architecture specification update for
929 struct riscv_iommu_device *iommu, *prev; in riscv_iommu_iotlb_inval() local
931 unsigned long len = end - start + 1; in riscv_iommu_iotlb_inval()
935 * For each IOMMU linked with this protection domain (via bonds->dev), in riscv_iommu_iotlb_inval()
939 * bond creation - riscv_iommu_bond_link(), and device directory in riscv_iommu_iotlb_inval()
940 * update - riscv_iommu_iodir_update(). in riscv_iommu_iotlb_inval()
943 * -------------------------- -------------------------- in riscv_iommu_iotlb_inval()
952 * be configured with already valid page table content. If an IOMMU is in riscv_iommu_iotlb_inval()
961 list_for_each_entry_rcu(bond, &domain->bonds, list) { in riscv_iommu_iotlb_inval()
962 iommu = dev_to_iommu(bond->dev); in riscv_iommu_iotlb_inval()
966 * to the IOMMU for the same PSCID, and with domain->bonds list in riscv_iommu_iotlb_inval()
967 * arranged based on the device's IOMMU, it's sufficient to check in riscv_iommu_iotlb_inval()
970 if (iommu == prev) in riscv_iommu_iotlb_inval()
974 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); in riscv_iommu_iotlb_inval()
978 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iotlb_inval()
981 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iotlb_inval()
983 prev = iommu; in riscv_iommu_iotlb_inval()
987 list_for_each_entry_rcu(bond, &domain->bonds, list) { in riscv_iommu_iotlb_inval()
988 iommu = dev_to_iommu(bond->dev); in riscv_iommu_iotlb_inval()
989 if (iommu == prev) in riscv_iommu_iotlb_inval()
992 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); in riscv_iommu_iotlb_inval()
993 prev = iommu; in riscv_iommu_iotlb_inval()
1007 * cached by the IOMMU hardware.
1012 static void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, in riscv_iommu_iodir_update() argument
1022 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_iodir_update()
1023 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_iodir_update()
1024 tc = READ_ONCE(dc->tc); in riscv_iommu_iodir_update()
1028 WRITE_ONCE(dc->tc, tc & ~RISCV_IOMMU_DC_TC_V); in riscv_iommu_iodir_update()
1032 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); in riscv_iommu_iodir_update()
1033 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iodir_update()
1038 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); in riscv_iommu_iodir_update()
1044 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_iodir_update()
1045 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_iodir_update()
1046 tc = READ_ONCE(dc->tc); in riscv_iommu_iodir_update()
1049 WRITE_ONCE(dc->fsc, fsc); in riscv_iommu_iodir_update()
1050 WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID); in riscv_iommu_iodir_update()
1053 WRITE_ONCE(dc->tc, tc); in riscv_iommu_iodir_update()
1057 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); in riscv_iommu_iodir_update()
1058 riscv_iommu_cmd_send(iommu, &cmd); in riscv_iommu_iodir_update()
1061 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); in riscv_iommu_iodir_update()
1080 riscv_iommu_iotlb_inval(domain, gather->start, gather->end); in riscv_iommu_iotlb_sync()
1083 #define PT_SHIFT (PAGE_SHIFT - ilog2(sizeof(pte_t)))
1119 unsigned long *ptr = domain->pgd_root; in riscv_iommu_pte_alloc()
1121 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; in riscv_iommu_pte_alloc()
1127 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); in riscv_iommu_pte_alloc()
1129 * Note: returned entry might be a non-leaf if there was in riscv_iommu_pte_alloc()
1145 * Non-leaf entry is missing, allocate and try to add to the in riscv_iommu_pte_alloc()
1149 addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp, in riscv_iommu_pte_alloc()
1161 } while (level-- > 0); in riscv_iommu_pte_alloc()
1169 unsigned long *ptr = domain->pgd_root; in riscv_iommu_pte_fetch()
1171 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; in riscv_iommu_pte_fetch()
1176 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); in riscv_iommu_pte_fetch()
1185 } while (level-- > 0); in riscv_iommu_pte_fetch()
1204 else if (domain->amo_enabled) in riscv_iommu_map_pages()
1212 rc = -ENOMEM; in riscv_iommu_map_pages()
1226 --pgcount; in riscv_iommu_map_pages()
1234 * invalidate all levels of page table (i.e. leaf and non-leaf) in riscv_iommu_map_pages()
1235 * is an invalidate-all-PSCID IOTINVAL.VMA with AV=0. in riscv_iommu_map_pages()
1237 * capability.NL (non-leaf) IOTINVAL command. in riscv_iommu_map_pages()
1263 if (iova & (pte_size - 1)) in riscv_iommu_unmap_pages()
1270 iommu_iotlb_gather_add_page(&domain->domain, gather, iova, in riscv_iommu_unmap_pages()
1291 return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); in riscv_iommu_iova_to_phys()
1297 const unsigned long pfn = virt_to_pfn(domain->pgd_root); in riscv_iommu_free_paging_domain()
1299 WARN_ON(!list_empty(&domain->bonds)); in riscv_iommu_free_paging_domain()
1301 if ((int)domain->pscid > 0) in riscv_iommu_free_paging_domain()
1302 ida_free(&riscv_iommu_pscids, domain->pscid); in riscv_iommu_free_paging_domain()
1308 static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_mode) in riscv_iommu_pt_supported() argument
1312 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39; in riscv_iommu_pt_supported()
1315 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48; in riscv_iommu_pt_supported()
1318 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57; in riscv_iommu_pt_supported()
1328 struct riscv_iommu_device *iommu = dev_to_iommu(dev); in riscv_iommu_attach_paging_domain() local
1332 if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode)) in riscv_iommu_attach_paging_domain()
1333 return -ENODEV; in riscv_iommu_attach_paging_domain()
1335 fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | in riscv_iommu_attach_paging_domain()
1336 FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); in riscv_iommu_attach_paging_domain()
1337 ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | in riscv_iommu_attach_paging_domain()
1341 return -ENOMEM; in riscv_iommu_attach_paging_domain()
1343 riscv_iommu_iodir_update(iommu, dev, fsc, ta); in riscv_iommu_attach_paging_domain()
1344 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_paging_domain()
1345 info->domain = domain; in riscv_iommu_attach_paging_domain()
1363 struct riscv_iommu_device *iommu; in riscv_iommu_alloc_paging_domain() local
1368 iommu = dev_to_iommu(dev); in riscv_iommu_alloc_paging_domain()
1369 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57) { in riscv_iommu_alloc_paging_domain()
1372 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48) { in riscv_iommu_alloc_paging_domain()
1375 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39) { in riscv_iommu_alloc_paging_domain()
1380 return ERR_PTR(-ENODEV); in riscv_iommu_alloc_paging_domain()
1385 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1387 INIT_LIST_HEAD_RCU(&domain->bonds); in riscv_iommu_alloc_paging_domain()
1388 spin_lock_init(&domain->lock); in riscv_iommu_alloc_paging_domain()
1389 domain->numa_node = dev_to_node(iommu->dev); in riscv_iommu_alloc_paging_domain()
1390 domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); in riscv_iommu_alloc_paging_domain()
1391 domain->pgd_mode = pgd_mode; in riscv_iommu_alloc_paging_domain()
1392 domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node, in riscv_iommu_alloc_paging_domain()
1394 if (!domain->pgd_root) { in riscv_iommu_alloc_paging_domain()
1396 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1399 domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, in riscv_iommu_alloc_paging_domain()
1401 if (domain->pscid < 0) { in riscv_iommu_alloc_paging_domain()
1402 iommu_free_pages(domain->pgd_root); in riscv_iommu_alloc_paging_domain()
1404 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1408 * Note: RISC-V Privilege spec mandates that virtual addresses in riscv_iommu_alloc_paging_domain()
1409 * need to be sign-extended, so if (VA_BITS - 1) is set, all in riscv_iommu_alloc_paging_domain()
1415 * limit the available virtual addresses to VA_BITS - 1. in riscv_iommu_alloc_paging_domain()
1417 va_mask = DMA_BIT_MASK(va_bits - 1); in riscv_iommu_alloc_paging_domain()
1419 domain->domain.geometry.aperture_start = 0; in riscv_iommu_alloc_paging_domain()
1420 domain->domain.geometry.aperture_end = va_mask; in riscv_iommu_alloc_paging_domain()
1421 domain->domain.geometry.force_aperture = true; in riscv_iommu_alloc_paging_domain()
1422 domain->domain.pgsize_bitmap = va_mask & (SZ_4K | SZ_2M | SZ_1G | SZ_512G); in riscv_iommu_alloc_paging_domain()
1424 domain->domain.ops = &riscv_iommu_paging_domain_ops; in riscv_iommu_alloc_paging_domain()
1426 return &domain->domain; in riscv_iommu_alloc_paging_domain()
1433 struct riscv_iommu_device *iommu = dev_to_iommu(dev); in riscv_iommu_attach_blocking_domain() local
1437 riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, 0); in riscv_iommu_attach_blocking_domain()
1438 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_blocking_domain()
1439 info->domain = NULL; in riscv_iommu_attach_blocking_domain()
1455 struct riscv_iommu_device *iommu = dev_to_iommu(dev); in riscv_iommu_attach_identity_domain() local
1458 riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, RISCV_IOMMU_PC_TA_V); in riscv_iommu_attach_identity_domain()
1459 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_identity_domain()
1460 info->domain = NULL; in riscv_iommu_attach_identity_domain()
1481 return iommu_fwspec_add_ids(dev, args->args, 1); in riscv_iommu_of_xlate()
1487 struct riscv_iommu_device *iommu; in riscv_iommu_probe_device() local
1493 if (!fwspec || !fwspec->iommu_fwnode->dev || !fwspec->num_ids) in riscv_iommu_probe_device()
1494 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1496 iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev); in riscv_iommu_probe_device()
1497 if (!iommu) in riscv_iommu_probe_device()
1498 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1501 * IOMMU hardware operating in fail-over BARE mode will provide in riscv_iommu_probe_device()
1504 if (iommu->ddt_mode <= RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) in riscv_iommu_probe_device()
1505 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1509 return ERR_PTR(-ENOMEM); in riscv_iommu_probe_device()
1511 * Allocate and pre-configure device context entries in in riscv_iommu_probe_device()
1515 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD) in riscv_iommu_probe_device()
1517 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_probe_device()
1518 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_probe_device()
1521 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1523 if (READ_ONCE(dc->tc) & RISCV_IOMMU_DC_TC_V) in riscv_iommu_probe_device()
1524 dev_warn(dev, "already attached to IOMMU device directory\n"); in riscv_iommu_probe_device()
1525 WRITE_ONCE(dc->tc, tc); in riscv_iommu_probe_device()
1530 return &iommu->iommu; in riscv_iommu_probe_device()
1551 static int riscv_iommu_init_check(struct riscv_iommu_device *iommu) in riscv_iommu_init_check() argument
1556 * Make sure the IOMMU is switched off or in pass-through mode during in riscv_iommu_init_check()
1560 ddtp = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_DDTP); in riscv_iommu_init_check()
1562 return -EBUSY; in riscv_iommu_init_check()
1567 return -EBUSY; in riscv_iommu_init_check()
1568 riscv_iommu_disable(iommu); in riscv_iommu_init_check()
1571 /* Configure accesses to in-memory data structures for CPU-native byte order. */ in riscv_iommu_init_check()
1573 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) { in riscv_iommu_init_check()
1574 if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_END)) in riscv_iommu_init_check()
1575 return -EINVAL; in riscv_iommu_init_check()
1576 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, in riscv_iommu_init_check()
1577 iommu->fctl ^ RISCV_IOMMU_FCTL_BE); in riscv_iommu_init_check()
1578 iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); in riscv_iommu_init_check()
1580 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) in riscv_iommu_init_check()
1581 return -EINVAL; in riscv_iommu_init_check()
1588 if (!iommu->irqs_count) in riscv_iommu_init_check()
1589 return -EINVAL; in riscv_iommu_init_check()
1591 iommu->icvec = FIELD_PREP(RISCV_IOMMU_ICVEC_FIV, 1 % iommu->irqs_count) | in riscv_iommu_init_check()
1592 FIELD_PREP(RISCV_IOMMU_ICVEC_PIV, 2 % iommu->irqs_count) | in riscv_iommu_init_check()
1593 FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count); in riscv_iommu_init_check()
1594 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec); in riscv_iommu_init_check()
1595 iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC); in riscv_iommu_init_check()
1596 if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), in riscv_iommu_init_check()
1597 FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)), in riscv_iommu_init_check()
1598 max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), in riscv_iommu_init_check()
1599 FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) in riscv_iommu_init_check()
1600 return -EINVAL; in riscv_iommu_init_check()
1605 void riscv_iommu_remove(struct riscv_iommu_device *iommu) in riscv_iommu_remove() argument
1607 iommu_device_unregister(&iommu->iommu); in riscv_iommu_remove()
1608 iommu_device_sysfs_remove(&iommu->iommu); in riscv_iommu_remove()
1609 riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); in riscv_iommu_remove()
1610 riscv_iommu_queue_disable(&iommu->cmdq); in riscv_iommu_remove()
1611 riscv_iommu_queue_disable(&iommu->fltq); in riscv_iommu_remove()
1614 int riscv_iommu_init(struct riscv_iommu_device *iommu) in riscv_iommu_init() argument
1618 RISCV_IOMMU_QUEUE_INIT(&iommu->cmdq, CQ); in riscv_iommu_init()
1619 RISCV_IOMMU_QUEUE_INIT(&iommu->fltq, FQ); in riscv_iommu_init()
1621 rc = riscv_iommu_init_check(iommu); in riscv_iommu_init()
1623 return dev_err_probe(iommu->dev, rc, "unexpected device state\n"); in riscv_iommu_init()
1625 rc = riscv_iommu_iodir_alloc(iommu); in riscv_iommu_init()
1629 rc = riscv_iommu_queue_alloc(iommu, &iommu->cmdq, in riscv_iommu_init()
1634 rc = riscv_iommu_queue_alloc(iommu, &iommu->fltq, in riscv_iommu_init()
1639 rc = riscv_iommu_queue_enable(iommu, &iommu->cmdq, riscv_iommu_cmdq_process); in riscv_iommu_init()
1643 rc = riscv_iommu_queue_enable(iommu, &iommu->fltq, riscv_iommu_fltq_process); in riscv_iommu_init()
1647 rc = riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_MAX); in riscv_iommu_init()
1651 rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s", in riscv_iommu_init()
1652 dev_name(iommu->dev)); in riscv_iommu_init()
1654 dev_err_probe(iommu->dev, rc, "cannot register sysfs interface\n"); in riscv_iommu_init()
1659 rc = rimt_iommu_register(iommu->dev); in riscv_iommu_init()
1661 dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n"); in riscv_iommu_init()
1666 rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev); in riscv_iommu_init()
1668 dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n"); in riscv_iommu_init()
1675 iommu_device_sysfs_remove(&iommu->iommu); in riscv_iommu_init()
1677 riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); in riscv_iommu_init()
1679 riscv_iommu_queue_disable(&iommu->fltq); in riscv_iommu_init()
1680 riscv_iommu_queue_disable(&iommu->cmdq); in riscv_iommu_init()