11802d0beSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20df4fabeSYong Wu /* 30df4fabeSYong Wu * Copyright (c) 2015-2016 MediaTek Inc. 40df4fabeSYong Wu * Author: Yong Wu <yong.wu@mediatek.com> 50df4fabeSYong Wu */ 6ef0f0986SYong Wu #include <linux/bitfield.h> 70df4fabeSYong Wu #include <linux/bug.h> 80df4fabeSYong Wu #include <linux/clk.h> 90df4fabeSYong Wu #include <linux/component.h> 100df4fabeSYong Wu #include <linux/device.h> 11803cf9e5SYong Wu #include <linux/dma-direct.h> 120df4fabeSYong Wu #include <linux/err.h> 130df4fabeSYong Wu #include <linux/interrupt.h> 140df4fabeSYong Wu #include <linux/io.h> 150df4fabeSYong Wu #include <linux/iommu.h> 160df4fabeSYong Wu #include <linux/iopoll.h> 176a513de3SYong Wu #include <linux/io-pgtable.h> 180df4fabeSYong Wu #include <linux/list.h> 19c2c59456SMiles Chen #include <linux/mfd/syscon.h> 2018d8c74eSYong Wu #include <linux/module.h> 210df4fabeSYong Wu #include <linux/of_address.h> 220df4fabeSYong Wu #include <linux/of_irq.h> 230df4fabeSYong Wu #include <linux/of_platform.h> 24e7629070SYong Wu #include <linux/pci.h> 250df4fabeSYong Wu #include <linux/platform_device.h> 26baf94e6eSYong Wu #include <linux/pm_runtime.h> 27c2c59456SMiles Chen #include <linux/regmap.h> 280df4fabeSYong Wu #include <linux/slab.h> 290df4fabeSYong Wu #include <linux/spinlock.h> 30c2c59456SMiles Chen #include <linux/soc/mediatek/infracfg.h> 310df4fabeSYong Wu #include <asm/barrier.h> 320df4fabeSYong Wu #include <soc/mediatek/smi.h> 330df4fabeSYong Wu 346a513de3SYong Wu #include <dt-bindings/memory/mtk-memory-port.h> 350df4fabeSYong Wu 360df4fabeSYong Wu #define REG_MMU_PT_BASE_ADDR 0x000 370df4fabeSYong Wu 380df4fabeSYong Wu #define REG_MMU_INVALIDATE 0x020 390df4fabeSYong Wu #define F_ALL_INVLD 0x2 400df4fabeSYong Wu #define F_MMU_INV_RANGE 0x1 410df4fabeSYong Wu 420df4fabeSYong Wu #define REG_MMU_INVLD_START_A 0x024 430df4fabeSYong Wu #define REG_MMU_INVLD_END_A 0x028 440df4fabeSYong Wu 45068c86e9SChao Hao #define REG_MMU_INV_SEL_GEN2 0x02c 46b053bc71SChao Hao #define REG_MMU_INV_SEL_GEN1 0x038 470df4fabeSYong Wu #define F_INVLD_EN0 BIT(0) 480df4fabeSYong Wu #define F_INVLD_EN1 BIT(1) 490df4fabeSYong Wu 5075eed350SChao Hao #define REG_MMU_MISC_CTRL 0x048 514bb2bf4cSChao Hao #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) 524bb2bf4cSChao Hao #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) 534bb2bf4cSChao Hao 540df4fabeSYong Wu #define REG_MMU_DCM_DIS 0x050 559a87005eSYong Wu #define F_MMU_DCM BIT(8) 569a87005eSYong Wu 5735c1b48dSChao Hao #define REG_MMU_WR_LEN_CTRL 0x054 5835c1b48dSChao Hao #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) 590df4fabeSYong Wu 600df4fabeSYong Wu #define REG_MMU_CTRL_REG 0x110 61acb3c92aSYong Wu #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) 620df4fabeSYong Wu #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) 63acb3c92aSYong Wu #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) 640df4fabeSYong Wu 650df4fabeSYong Wu #define REG_MMU_IVRP_PADDR 0x114 6670ca608bSYong Wu 6730e2fccfSYong Wu #define REG_MMU_VLD_PA_RNG 0x118 6830e2fccfSYong Wu #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) 690df4fabeSYong Wu 700df4fabeSYong Wu #define REG_MMU_INT_CONTROL0 0x120 710df4fabeSYong Wu #define F_L2_MULIT_HIT_EN BIT(0) 720df4fabeSYong Wu #define F_TABLE_WALK_FAULT_INT_EN BIT(1) 730df4fabeSYong Wu #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) 740df4fabeSYong Wu #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) 750df4fabeSYong Wu #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) 760df4fabeSYong Wu #define F_MISS_FIFO_ERR_INT_EN BIT(6) 770df4fabeSYong Wu #define F_INT_CLR_BIT BIT(12) 780df4fabeSYong Wu 790df4fabeSYong Wu #define REG_MMU_INT_MAIN_CONTROL 0x124 8015a01f4cSYong Wu /* mmu0 | mmu1 */ 8115a01f4cSYong Wu #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) 8215a01f4cSYong Wu #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) 8315a01f4cSYong Wu #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) 8415a01f4cSYong Wu #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) 8515a01f4cSYong Wu #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) 8615a01f4cSYong Wu #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) 8715a01f4cSYong Wu #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) 880df4fabeSYong Wu 890df4fabeSYong Wu #define REG_MMU_CPE_DONE 0x12C 900df4fabeSYong Wu 910df4fabeSYong Wu #define REG_MMU_FAULT_ST1 0x134 9215a01f4cSYong Wu #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) 9315a01f4cSYong Wu #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) 940df4fabeSYong Wu 9515a01f4cSYong Wu #define REG_MMU0_FAULT_VA 0x13c 96ef0f0986SYong Wu #define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12) 97ef0f0986SYong Wu #define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9) 98ef0f0986SYong Wu #define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6) 990df4fabeSYong Wu #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) 1000df4fabeSYong Wu #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) 1010df4fabeSYong Wu 10215a01f4cSYong Wu #define REG_MMU0_INVLD_PA 0x140 10315a01f4cSYong Wu #define REG_MMU1_FAULT_VA 0x144 10415a01f4cSYong Wu #define REG_MMU1_INVLD_PA 0x148 10515a01f4cSYong Wu #define REG_MMU0_INT_ID 0x150 10615a01f4cSYong Wu #define REG_MMU1_INT_ID 0x154 10737276e00SChao Hao #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) 10837276e00SChao Hao #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) 1099ec30c09SYong Wu #define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7) 1109ec30c09SYong Wu #define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7) 11165df7d82SFabien Parent /* Macro for 5 bits length port ID field (default) */ 11215a01f4cSYong Wu #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) 11315a01f4cSYong Wu #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) 11465df7d82SFabien Parent /* Macro for 6 bits length port ID field */ 11565df7d82SFabien Parent #define F_MMU_INT_ID_LARB_ID_WID_6(a) (((a) >> 8) & 0x7) 11665df7d82SFabien Parent #define F_MMU_INT_ID_PORT_ID_WID_6(a) (((a) >> 2) & 0x3f) 1170df4fabeSYong Wu 118829316b3SChao Hao #define MTK_PROTECT_PA_ALIGN 256 11942d57fc5SYong Wu #define MTK_IOMMU_BANK_SZ 0x1000 1200df4fabeSYong Wu 121f9b8c9b2SYong Wu #define PERICFG_IOMMU_1 0x714 122f9b8c9b2SYong Wu 1236b717796SChao Hao #define HAS_4GB_MODE BIT(0) 1246b717796SChao Hao /* HW will use the EMI clock if there isn't the "bclk". */ 1256b717796SChao Hao #define HAS_BCLK BIT(1) 1266b717796SChao Hao #define HAS_VLD_PA_RNG BIT(2) 1276b717796SChao Hao #define RESET_AXI BIT(3) 1284bb2bf4cSChao Hao #define OUT_ORDER_WR_EN BIT(4) 1299ec30c09SYong Wu #define HAS_SUB_COMM_2BITS BIT(5) 1309ec30c09SYong Wu #define HAS_SUB_COMM_3BITS BIT(6) 1319ec30c09SYong Wu #define WR_THROT_EN BIT(7) 1329ec30c09SYong Wu #define HAS_LEGACY_IVRP_PADDR BIT(8) 1339ec30c09SYong Wu #define IOVA_34_EN BIT(9) 1349ec30c09SYong Wu #define SHARE_PGTABLE BIT(10) /* 2 HW share pgtable */ 1359ec30c09SYong Wu #define DCM_DISABLE BIT(11) 1369ec30c09SYong Wu #define STD_AXI_MODE BIT(12) /* For non MM iommu */ 1378cd1e619SYong Wu /* 2 bits: iommu type */ 1388cd1e619SYong Wu #define MTK_IOMMU_TYPE_MM (0x0 << 13) 1398cd1e619SYong Wu #define MTK_IOMMU_TYPE_INFRA (0x1 << 13) 1408cd1e619SYong Wu #define MTK_IOMMU_TYPE_MASK (0x3 << 13) 1416077c7e5SYong Wu /* PM and clock always on. e.g. infra iommu */ 1426077c7e5SYong Wu #define PM_CLK_AO BIT(15) 143e7629070SYong Wu #define IFA_IOMMU_PCIE_SUPPORT BIT(16) 144301c3ca1SYunfei Wang #define PGTABLE_PA_35_EN BIT(17) 14586580ec9SAngeloGioacchino Del Regno #define TF_PORT_TO_ADDR_MT8173 BIT(18) 14665df7d82SFabien Parent #define INT_ID_PORT_WIDTH_6 BIT(19) 1476b717796SChao Hao 1488cd1e619SYong Wu #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ 1498cd1e619SYong Wu ((((pdata)->flags) & (mask)) == (_x)) 1508cd1e619SYong Wu 1518cd1e619SYong Wu #define MTK_IOMMU_HAS_FLAG(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x) 1528cd1e619SYong Wu #define MTK_IOMMU_IS_TYPE(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\ 1538cd1e619SYong Wu MTK_IOMMU_TYPE_MASK) 1546b717796SChao Hao 155d2e9a110SYong Wu #define MTK_INVALID_LARBID MTK_LARB_NR_MAX 156d2e9a110SYong Wu 1579485a04aSYong Wu #define MTK_LARB_COM_MAX 8 1589485a04aSYong Wu #define MTK_LARB_SUBCOM_MAX 8 1599485a04aSYong Wu 1609485a04aSYong Wu #define MTK_IOMMU_GROUP_MAX 8 16199ca0228SYong Wu #define MTK_IOMMU_BANK_MAX 5 1629485a04aSYong Wu 1639485a04aSYong Wu enum mtk_iommu_plat { 1649485a04aSYong Wu M4U_MT2712, 1659485a04aSYong Wu M4U_MT6779, 166717ec15eSAngeloGioacchino Del Regno M4U_MT6795, 1679485a04aSYong Wu M4U_MT8167, 1689485a04aSYong Wu M4U_MT8173, 1699485a04aSYong Wu M4U_MT8183, 170e8d7ccaaSYong Wu M4U_MT8186, 1719485a04aSYong Wu M4U_MT8192, 1729485a04aSYong Wu M4U_MT8195, 1733cd0e4a3SFabien Parent M4U_MT8365, 1749485a04aSYong Wu }; 1759485a04aSYong Wu 1769485a04aSYong Wu struct mtk_iommu_iova_region { 1779485a04aSYong Wu dma_addr_t iova_base; 1789485a04aSYong Wu unsigned long long size; 1799485a04aSYong Wu }; 1809485a04aSYong Wu 1816a513de3SYong Wu struct mtk_iommu_suspend_reg { 1826a513de3SYong Wu u32 misc_ctrl; 1836a513de3SYong Wu u32 dcm_dis; 1846a513de3SYong Wu u32 ctrl_reg; 1856a513de3SYong Wu u32 vld_pa_rng; 1866a513de3SYong Wu u32 wr_len_ctrl; 187d7127de1SYong Wu 188d7127de1SYong Wu u32 int_control[MTK_IOMMU_BANK_MAX]; 189d7127de1SYong Wu u32 int_main_control[MTK_IOMMU_BANK_MAX]; 190d7127de1SYong Wu u32 ivrp_paddr[MTK_IOMMU_BANK_MAX]; 1916a513de3SYong Wu }; 1926a513de3SYong Wu 1939485a04aSYong Wu struct mtk_iommu_plat_data { 1949485a04aSYong Wu enum mtk_iommu_plat m4u_plat; 1959485a04aSYong Wu u32 flags; 1969485a04aSYong Wu u32 inv_sel_reg; 1979485a04aSYong Wu 1989485a04aSYong Wu char *pericfg_comp_str; 1999485a04aSYong Wu struct list_head *hw_list; 2009485a04aSYong Wu unsigned int iova_region_nr; 2019485a04aSYong Wu const struct mtk_iommu_iova_region *iova_region; 20299ca0228SYong Wu 20399ca0228SYong Wu u8 banks_num; 20499ca0228SYong Wu bool banks_enable[MTK_IOMMU_BANK_MAX]; 20557fb481fSYong Wu unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX]; 2069485a04aSYong Wu unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; 2079485a04aSYong Wu }; 2089485a04aSYong Wu 20999ca0228SYong Wu struct mtk_iommu_bank_data { 2109485a04aSYong Wu void __iomem *base; 2119485a04aSYong Wu int irq; 21299ca0228SYong Wu u8 id; 21399ca0228SYong Wu struct device *parent_dev; 21499ca0228SYong Wu struct mtk_iommu_data *parent_data; 21599ca0228SYong Wu spinlock_t tlb_lock; /* lock for tlb range flush */ 21699ca0228SYong Wu struct mtk_iommu_domain *m4u_dom; /* Each bank has a domain */ 21799ca0228SYong Wu }; 21899ca0228SYong Wu 21999ca0228SYong Wu struct mtk_iommu_data { 2209485a04aSYong Wu struct device *dev; 2219485a04aSYong Wu struct clk *bclk; 2229485a04aSYong Wu phys_addr_t protect_base; /* protect memory base */ 2239485a04aSYong Wu struct mtk_iommu_suspend_reg reg; 2249485a04aSYong Wu struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX]; 2259485a04aSYong Wu bool enable_4GB; 2269485a04aSYong Wu 2279485a04aSYong Wu struct iommu_device iommu; 2289485a04aSYong Wu const struct mtk_iommu_plat_data *plat_data; 2299485a04aSYong Wu struct device *smicomm_dev; 2309485a04aSYong Wu 23199ca0228SYong Wu struct mtk_iommu_bank_data *bank; 23299ca0228SYong Wu 2339485a04aSYong Wu struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ 2349485a04aSYong Wu struct regmap *pericfg; 2359485a04aSYong Wu 2369485a04aSYong Wu struct mutex mutex; /* Protect m4u_group/m4u_dom above */ 2379485a04aSYong Wu 2389485a04aSYong Wu /* 2399485a04aSYong Wu * In the sharing pgtable case, list data->list to the global list like m4ulist. 2409485a04aSYong Wu * In the non-sharing pgtable case, list data->list to the itself hw_list_head. 2419485a04aSYong Wu */ 2429485a04aSYong Wu struct list_head *hw_list; 2439485a04aSYong Wu struct list_head hw_list_head; 2449485a04aSYong Wu struct list_head list; 2459485a04aSYong Wu struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; 2469485a04aSYong Wu }; 2479485a04aSYong Wu 2480df4fabeSYong Wu struct mtk_iommu_domain { 2490df4fabeSYong Wu struct io_pgtable_cfg cfg; 2500df4fabeSYong Wu struct io_pgtable_ops *iop; 2510df4fabeSYong Wu 25299ca0228SYong Wu struct mtk_iommu_bank_data *bank; 2530df4fabeSYong Wu struct iommu_domain domain; 254ddf67a87SYong Wu 255ddf67a87SYong Wu struct mutex mutex; /* Protect "data" in this structure */ 2560df4fabeSYong Wu }; 2570df4fabeSYong Wu 2589485a04aSYong Wu static int mtk_iommu_bind(struct device *dev) 2599485a04aSYong Wu { 2609485a04aSYong Wu struct mtk_iommu_data *data = dev_get_drvdata(dev); 2619485a04aSYong Wu 2629485a04aSYong Wu return component_bind_all(dev, &data->larb_imu); 2639485a04aSYong Wu } 2649485a04aSYong Wu 2659485a04aSYong Wu static void mtk_iommu_unbind(struct device *dev) 2669485a04aSYong Wu { 2679485a04aSYong Wu struct mtk_iommu_data *data = dev_get_drvdata(dev); 2689485a04aSYong Wu 2699485a04aSYong Wu component_unbind_all(dev, &data->larb_imu); 2709485a04aSYong Wu } 2719485a04aSYong Wu 272b65f5016SArvind Yadav static const struct iommu_ops mtk_iommu_ops; 2730df4fabeSYong Wu 274e24453e1SYong Wu static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid); 2757f37a91dSYong Wu 276bfed8731SYong Wu #define MTK_IOMMU_TLB_ADDR(iova) ({ \ 277bfed8731SYong Wu dma_addr_t _addr = iova; \ 278bfed8731SYong Wu ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\ 279bfed8731SYong Wu }) 280bfed8731SYong Wu 28176ce6546SYong Wu /* 28276ce6546SYong Wu * In M4U 4GB mode, the physical address is remapped as below: 28376ce6546SYong Wu * 28476ce6546SYong Wu * CPU Physical address: 28576ce6546SYong Wu * ==================== 28676ce6546SYong Wu * 28776ce6546SYong Wu * 0 1G 2G 3G 4G 5G 28876ce6546SYong Wu * |---A---|---B---|---C---|---D---|---E---| 28976ce6546SYong Wu * +--I/O--+------------Memory-------------+ 29076ce6546SYong Wu * 29176ce6546SYong Wu * IOMMU output physical address: 29276ce6546SYong Wu * ============================= 29376ce6546SYong Wu * 29476ce6546SYong Wu * 4G 5G 6G 7G 8G 29576ce6546SYong Wu * |---E---|---B---|---C---|---D---| 29676ce6546SYong Wu * +------------Memory-------------+ 29776ce6546SYong Wu * 29876ce6546SYong Wu * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the 29976ce6546SYong Wu * bit32 of the CPU physical address always is needed to set, and for Region 30076ce6546SYong Wu * 'E', the CPU physical address keep as is. 30176ce6546SYong Wu * Additionally, The iommu consumers always use the CPU phyiscal address. 30276ce6546SYong Wu */ 303b4dad40eSYong Wu #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL 30476ce6546SYong Wu 3057c3a2ec0SYong Wu static LIST_HEAD(m4ulist); /* List all the M4U HWs */ 3067c3a2ec0SYong Wu 3079e3a2a64SYong Wu #define for_each_m4u(data, head) list_for_each_entry(data, head, list) 3087c3a2ec0SYong Wu 309585e58f4SYong Wu static const struct mtk_iommu_iova_region single_domain[] = { 310585e58f4SYong Wu {.iova_base = 0, .size = SZ_4G}, 311585e58f4SYong Wu }; 312585e58f4SYong Wu 3139e3489e0SYong Wu static const struct mtk_iommu_iova_region mt8192_multi_dom[] = { 314129a3b88SYong Wu { .iova_base = 0x0, .size = SZ_4G}, /* 0 ~ 4G */ 3159e3489e0SYong Wu #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) 316129a3b88SYong Wu { .iova_base = SZ_4G, .size = SZ_4G}, /* 4G ~ 8G */ 317129a3b88SYong Wu { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* 8G ~ 12G */ 318129a3b88SYong Wu { .iova_base = SZ_4G * 3, .size = SZ_4G}, /* 12G ~ 16G */ 319129a3b88SYong Wu 3209e3489e0SYong Wu { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */ 3219e3489e0SYong Wu { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */ 3229e3489e0SYong Wu #endif 3239e3489e0SYong Wu }; 3249e3489e0SYong Wu 3259e3a2a64SYong Wu /* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/ 3269e3a2a64SYong Wu static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist) 3277c3a2ec0SYong Wu { 3289e3a2a64SYong Wu return list_first_entry(hwlist, struct mtk_iommu_data, list); 3297c3a2ec0SYong Wu } 3307c3a2ec0SYong Wu 3310df4fabeSYong Wu static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) 3320df4fabeSYong Wu { 3330df4fabeSYong Wu return container_of(dom, struct mtk_iommu_domain, domain); 3340df4fabeSYong Wu } 3350df4fabeSYong Wu 3360954d61aSYong Wu static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) 3370df4fabeSYong Wu { 33899ca0228SYong Wu /* Tlb flush all always is in bank0. */ 33999ca0228SYong Wu struct mtk_iommu_bank_data *bank = &data->bank[0]; 34099ca0228SYong Wu void __iomem *base = bank->base; 34115672b6dSYong Wu unsigned long flags; 342c0b57581SYong Wu 34399ca0228SYong Wu spin_lock_irqsave(&bank->tlb_lock, flags); 344887cf6a7SYong Wu writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg); 345887cf6a7SYong Wu writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE); 3460df4fabeSYong Wu wmb(); /* Make sure the tlb flush all done */ 34799ca0228SYong Wu spin_unlock_irqrestore(&bank->tlb_lock, flags); 3487c3a2ec0SYong Wu } 3490df4fabeSYong Wu 3501f4fd624SYong Wu static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, 35199ca0228SYong Wu struct mtk_iommu_bank_data *bank) 3520df4fabeSYong Wu { 35399ca0228SYong Wu struct list_head *head = bank->parent_data->hw_list; 35499ca0228SYong Wu struct mtk_iommu_bank_data *curbank; 35599ca0228SYong Wu struct mtk_iommu_data *data; 3566077c7e5SYong Wu bool check_pm_status; 3571f4fd624SYong Wu unsigned long flags; 358887cf6a7SYong Wu void __iomem *base; 3591f4fd624SYong Wu int ret; 3601f4fd624SYong Wu u32 tmp; 3610df4fabeSYong Wu 3629e3a2a64SYong Wu for_each_m4u(data, head) { 3636077c7e5SYong Wu /* 3646077c7e5SYong Wu * To avoid resume the iommu device frequently when the iommu device 3656077c7e5SYong Wu * is not active, it doesn't always call pm_runtime_get here, then tlb 3666077c7e5SYong Wu * flush depends on the tlb flush all in the runtime resume. 3676077c7e5SYong Wu * 3686077c7e5SYong Wu * There are 2 special cases: 3696077c7e5SYong Wu * 3706077c7e5SYong Wu * Case1: The iommu dev doesn't have power domain but has bclk. This case 3716077c7e5SYong Wu * should also avoid the tlb flush while the dev is not active to mute 3726077c7e5SYong Wu * the tlb timeout log. like mt8173. 3736077c7e5SYong Wu * 3746077c7e5SYong Wu * Case2: The power/clock of infra iommu is always on, and it doesn't 3756077c7e5SYong Wu * have the device link with the master devices. This case should avoid 3766077c7e5SYong Wu * the PM status check. 3776077c7e5SYong Wu */ 3786077c7e5SYong Wu check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO); 3796077c7e5SYong Wu 3806077c7e5SYong Wu if (check_pm_status) { 381c0b57581SYong Wu if (pm_runtime_get_if_in_use(data->dev) <= 0) 382c0b57581SYong Wu continue; 3836077c7e5SYong Wu } 384c0b57581SYong Wu 38599ca0228SYong Wu curbank = &data->bank[bank->id]; 38699ca0228SYong Wu base = curbank->base; 387887cf6a7SYong Wu 38899ca0228SYong Wu spin_lock_irqsave(&curbank->tlb_lock, flags); 3897c3a2ec0SYong Wu writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 390887cf6a7SYong Wu base + data->plat_data->inv_sel_reg); 3910df4fabeSYong Wu 392887cf6a7SYong Wu writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A); 393bfed8731SYong Wu writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), 394887cf6a7SYong Wu base + REG_MMU_INVLD_END_A); 395887cf6a7SYong Wu writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE); 3960df4fabeSYong Wu 3971f4fd624SYong Wu /* tlb sync */ 398887cf6a7SYong Wu ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE, 399c90ae4a6SYong Wu tmp, tmp != 0, 10, 1000); 40015672b6dSYong Wu 40115672b6dSYong Wu /* Clear the CPE status */ 402887cf6a7SYong Wu writel_relaxed(0, base + REG_MMU_CPE_DONE); 40399ca0228SYong Wu spin_unlock_irqrestore(&curbank->tlb_lock, flags); 40415672b6dSYong Wu 4050df4fabeSYong Wu if (ret) { 4060df4fabeSYong Wu dev_warn(data->dev, 4070df4fabeSYong Wu "Partial TLB flush timed out, falling back to full flush\n"); 4080954d61aSYong Wu mtk_iommu_tlb_flush_all(data); 4090df4fabeSYong Wu } 410c0b57581SYong Wu 4116077c7e5SYong Wu if (check_pm_status) 412c0b57581SYong Wu pm_runtime_put(data->dev); 4130df4fabeSYong Wu } 4147c3a2ec0SYong Wu } 4150df4fabeSYong Wu 4160df4fabeSYong Wu static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) 4170df4fabeSYong Wu { 41899ca0228SYong Wu struct mtk_iommu_bank_data *bank = dev_id; 41999ca0228SYong Wu struct mtk_iommu_data *data = bank->parent_data; 42099ca0228SYong Wu struct mtk_iommu_domain *dom = bank->m4u_dom; 421d2e9a110SYong Wu unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0; 422ef0f0986SYong Wu u32 int_state, regval, va34_32, pa34_32; 423887cf6a7SYong Wu const struct mtk_iommu_plat_data *plat_data = data->plat_data; 42499ca0228SYong Wu void __iomem *base = bank->base; 425ef0f0986SYong Wu u64 fault_iova, fault_pa; 4260df4fabeSYong Wu bool layer, write; 4270df4fabeSYong Wu 4280df4fabeSYong Wu /* Read error info from registers */ 429887cf6a7SYong Wu int_state = readl_relaxed(base + REG_MMU_FAULT_ST1); 43015a01f4cSYong Wu if (int_state & F_REG_MMU0_FAULT_MASK) { 431887cf6a7SYong Wu regval = readl_relaxed(base + REG_MMU0_INT_ID); 432887cf6a7SYong Wu fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA); 433887cf6a7SYong Wu fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA); 43415a01f4cSYong Wu } else { 435887cf6a7SYong Wu regval = readl_relaxed(base + REG_MMU1_INT_ID); 436887cf6a7SYong Wu fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA); 437887cf6a7SYong Wu fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA); 43815a01f4cSYong Wu } 4390df4fabeSYong Wu layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; 4400df4fabeSYong Wu write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; 441887cf6a7SYong Wu if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) { 442ef0f0986SYong Wu va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); 443ef0f0986SYong Wu fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; 444ef0f0986SYong Wu fault_iova |= (u64)va34_32 << 32; 445ef0f0986SYong Wu } 44682e51771SYong Wu pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); 44782e51771SYong Wu fault_pa |= (u64)pa34_32 << 32; 448ef0f0986SYong Wu 449887cf6a7SYong Wu if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) { 450887cf6a7SYong Wu if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) { 45137276e00SChao Hao fault_larb = F_MMU_INT_ID_COMM_ID(regval); 45237276e00SChao Hao sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); 45365df7d82SFabien Parent fault_port = F_MMU_INT_ID_PORT_ID(regval); 454887cf6a7SYong Wu } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) { 4559ec30c09SYong Wu fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval); 4569ec30c09SYong Wu sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval); 45765df7d82SFabien Parent fault_port = F_MMU_INT_ID_PORT_ID(regval); 45865df7d82SFabien Parent } else if (MTK_IOMMU_HAS_FLAG(plat_data, INT_ID_PORT_WIDTH_6)) { 45965df7d82SFabien Parent fault_port = F_MMU_INT_ID_PORT_ID_WID_6(regval); 46065df7d82SFabien Parent fault_larb = F_MMU_INT_ID_LARB_ID_WID_6(regval); 46137276e00SChao Hao } else { 46265df7d82SFabien Parent fault_port = F_MMU_INT_ID_PORT_ID(regval); 46337276e00SChao Hao fault_larb = F_MMU_INT_ID_LARB_ID(regval); 46437276e00SChao Hao } 46537276e00SChao Hao fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; 466d2e9a110SYong Wu } 467b3e5eee7SYong Wu 46899ca0228SYong Wu if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova, 4690df4fabeSYong Wu write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { 4700df4fabeSYong Wu dev_err_ratelimited( 47199ca0228SYong Wu bank->parent_dev, 472f9b8c9b2SYong Wu "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n", 473f9b8c9b2SYong Wu int_state, fault_iova, fault_pa, regval, fault_larb, fault_port, 4740df4fabeSYong Wu layer, write ? "write" : "read"); 4750df4fabeSYong Wu } 4760df4fabeSYong Wu 4770df4fabeSYong Wu /* Interrupt clear */ 478887cf6a7SYong Wu regval = readl_relaxed(base + REG_MMU_INT_CONTROL0); 4790df4fabeSYong Wu regval |= F_INT_CLR_BIT; 480887cf6a7SYong Wu writel_relaxed(regval, base + REG_MMU_INT_CONTROL0); 4810df4fabeSYong Wu 4820df4fabeSYong Wu mtk_iommu_tlb_flush_all(data); 4830df4fabeSYong Wu 4840df4fabeSYong Wu return IRQ_HANDLED; 4850df4fabeSYong Wu } 4860df4fabeSYong Wu 48757fb481fSYong Wu static unsigned int mtk_iommu_get_bank_id(struct device *dev, 48857fb481fSYong Wu const struct mtk_iommu_plat_data *plat_data) 48957fb481fSYong Wu { 49057fb481fSYong Wu struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 49157fb481fSYong Wu unsigned int i, portmsk = 0, bankid = 0; 49257fb481fSYong Wu 49357fb481fSYong Wu if (plat_data->banks_num == 1) 49457fb481fSYong Wu return bankid; 49557fb481fSYong Wu 49657fb481fSYong Wu for (i = 0; i < fwspec->num_ids; i++) 49757fb481fSYong Wu portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i])); 49857fb481fSYong Wu 49957fb481fSYong Wu for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) { 50057fb481fSYong Wu if (!plat_data->banks_enable[i]) 50157fb481fSYong Wu continue; 50257fb481fSYong Wu 50357fb481fSYong Wu if (portmsk & plat_data->banks_portmsk[i]) { 50457fb481fSYong Wu bankid = i; 50557fb481fSYong Wu break; 50657fb481fSYong Wu } 50757fb481fSYong Wu } 50857fb481fSYong Wu return bankid; /* default is 0 */ 50957fb481fSYong Wu } 51057fb481fSYong Wu 511d72e0ff5SYong Wu static int mtk_iommu_get_iova_region_id(struct device *dev, 512803cf9e5SYong Wu const struct mtk_iommu_plat_data *plat_data) 513803cf9e5SYong Wu { 514803cf9e5SYong Wu const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; 515803cf9e5SYong Wu const struct bus_dma_region *dma_rgn = dev->dma_range_map; 516803cf9e5SYong Wu int i, candidate = -1; 517803cf9e5SYong Wu dma_addr_t dma_end; 518803cf9e5SYong Wu 519803cf9e5SYong Wu if (!dma_rgn || plat_data->iova_region_nr == 1) 520803cf9e5SYong Wu return 0; 521803cf9e5SYong Wu 522803cf9e5SYong Wu dma_end = dma_rgn->dma_start + dma_rgn->size - 1; 523803cf9e5SYong Wu for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { 524803cf9e5SYong Wu /* Best fit. */ 525803cf9e5SYong Wu if (dma_rgn->dma_start == rgn->iova_base && 526803cf9e5SYong Wu dma_end == rgn->iova_base + rgn->size - 1) 527803cf9e5SYong Wu return i; 528803cf9e5SYong Wu /* ok if it is inside this region. */ 529803cf9e5SYong Wu if (dma_rgn->dma_start >= rgn->iova_base && 530803cf9e5SYong Wu dma_end < rgn->iova_base + rgn->size) 531803cf9e5SYong Wu candidate = i; 532803cf9e5SYong Wu } 533803cf9e5SYong Wu 534803cf9e5SYong Wu if (candidate >= 0) 535803cf9e5SYong Wu return candidate; 536803cf9e5SYong Wu dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n", 537803cf9e5SYong Wu &dma_rgn->dma_start, dma_rgn->size); 538803cf9e5SYong Wu return -EINVAL; 539803cf9e5SYong Wu } 540803cf9e5SYong Wu 541f9b8c9b2SYong Wu static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, 542d72e0ff5SYong Wu bool enable, unsigned int regionid) 5430df4fabeSYong Wu { 5440df4fabeSYong Wu struct mtk_smi_larb_iommu *larb_mmu; 5450df4fabeSYong Wu unsigned int larbid, portid; 546a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 5478d2c749eSYong Wu const struct mtk_iommu_iova_region *region; 548f9b8c9b2SYong Wu u32 peri_mmuen, peri_mmuen_msk; 549f9b8c9b2SYong Wu int i, ret = 0; 5500df4fabeSYong Wu 55158f0d1d5SRobin Murphy for (i = 0; i < fwspec->num_ids; ++i) { 55258f0d1d5SRobin Murphy larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); 55358f0d1d5SRobin Murphy portid = MTK_M4U_TO_PORT(fwspec->ids[i]); 5548d2c749eSYong Wu 555d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 5561ee9feb2SYong Wu larb_mmu = &data->larb_imu[larbid]; 5570df4fabeSYong Wu 558d72e0ff5SYong Wu region = data->plat_data->iova_region + regionid; 5598d2c749eSYong Wu larb_mmu->bank[portid] = upper_32_bits(region->iova_base); 5608d2c749eSYong Wu 561d72e0ff5SYong Wu dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n", 5628d2c749eSYong Wu enable ? "enable" : "disable", dev_name(larb_mmu->dev), 563d72e0ff5SYong Wu portid, regionid, larb_mmu->bank[portid]); 5640df4fabeSYong Wu 5650df4fabeSYong Wu if (enable) 5660df4fabeSYong Wu larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 5670df4fabeSYong Wu else 5680df4fabeSYong Wu larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 569f9b8c9b2SYong Wu } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { 570f9b8c9b2SYong Wu peri_mmuen_msk = BIT(portid); 571e7629070SYong Wu /* PCI dev has only one output id, enable the next writing bit for PCIe */ 572e7629070SYong Wu if (dev_is_pci(dev)) 573e7629070SYong Wu peri_mmuen_msk |= BIT(portid + 1); 574f9b8c9b2SYong Wu 575e7629070SYong Wu peri_mmuen = enable ? peri_mmuen_msk : 0; 576f9b8c9b2SYong Wu ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1, 577f9b8c9b2SYong Wu peri_mmuen_msk, peri_mmuen); 578f9b8c9b2SYong Wu if (ret) 579f9b8c9b2SYong Wu dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n", 580f9b8c9b2SYong Wu enable ? "enable" : "disable", 581f9b8c9b2SYong Wu dev_name(data->dev), peri_mmuen_msk, ret); 5820df4fabeSYong Wu } 5830df4fabeSYong Wu } 584f9b8c9b2SYong Wu return ret; 585d2e9a110SYong Wu } 5860df4fabeSYong Wu 5874f956c97SYong Wu static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, 588c3045f39SYong Wu struct mtk_iommu_data *data, 589d72e0ff5SYong Wu unsigned int region_id) 5900df4fabeSYong Wu { 591c3045f39SYong Wu const struct mtk_iommu_iova_region *region; 59299ca0228SYong Wu struct mtk_iommu_domain *m4u_dom; 593c3045f39SYong Wu 59499ca0228SYong Wu /* Always use bank0 in sharing pgtable case */ 59599ca0228SYong Wu m4u_dom = data->bank[0].m4u_dom; 59699ca0228SYong Wu if (m4u_dom) { 59799ca0228SYong Wu dom->iop = m4u_dom->iop; 59899ca0228SYong Wu dom->cfg = m4u_dom->cfg; 59999ca0228SYong Wu dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap; 600c3045f39SYong Wu goto update_iova_region; 601c3045f39SYong Wu } 602c3045f39SYong Wu 6030df4fabeSYong Wu dom->cfg = (struct io_pgtable_cfg) { 6040df4fabeSYong Wu .quirks = IO_PGTABLE_QUIRK_ARM_NS | 6050df4fabeSYong Wu IO_PGTABLE_QUIRK_NO_PERMS | 606b4dad40eSYong Wu IO_PGTABLE_QUIRK_ARM_MTK_EXT, 6070df4fabeSYong Wu .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 6082f317da4SYong Wu .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, 6090df4fabeSYong Wu .iommu_dev = data->dev, 6100df4fabeSYong Wu }; 6110df4fabeSYong Wu 612301c3ca1SYunfei Wang if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) 613301c3ca1SYunfei Wang dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT; 614301c3ca1SYunfei Wang 6159bdfe4c1SYong Wu if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) 6169bdfe4c1SYong Wu dom->cfg.oas = data->enable_4GB ? 33 : 32; 6179bdfe4c1SYong Wu else 6189bdfe4c1SYong Wu dom->cfg.oas = 35; 6199bdfe4c1SYong Wu 6200df4fabeSYong Wu dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); 6210df4fabeSYong Wu if (!dom->iop) { 6220df4fabeSYong Wu dev_err(data->dev, "Failed to alloc io pgtable\n"); 6230df4fabeSYong Wu return -EINVAL; 6240df4fabeSYong Wu } 6250df4fabeSYong Wu 6260df4fabeSYong Wu /* Update our support page sizes bitmap */ 627d16e0faaSRobin Murphy dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; 628b7875eb9SYong Wu 629c3045f39SYong Wu update_iova_region: 630c3045f39SYong Wu /* Update the iova region for this domain */ 631d72e0ff5SYong Wu region = data->plat_data->iova_region + region_id; 632c3045f39SYong Wu dom->domain.geometry.aperture_start = region->iova_base; 633c3045f39SYong Wu dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; 634b7875eb9SYong Wu dom->domain.geometry.force_aperture = true; 6350df4fabeSYong Wu return 0; 6360df4fabeSYong Wu } 6370df4fabeSYong Wu 6380df4fabeSYong Wu static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) 6390df4fabeSYong Wu { 6400df4fabeSYong Wu struct mtk_iommu_domain *dom; 6410df4fabeSYong Wu 64232e1cccfSYong Wu if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) 6430df4fabeSYong Wu return NULL; 6440df4fabeSYong Wu 6450df4fabeSYong Wu dom = kzalloc(sizeof(*dom), GFP_KERNEL); 6460df4fabeSYong Wu if (!dom) 6470df4fabeSYong Wu return NULL; 648ddf67a87SYong Wu mutex_init(&dom->mutex); 6490df4fabeSYong Wu 6504f956c97SYong Wu return &dom->domain; 6514f956c97SYong Wu } 6524f956c97SYong Wu 6530df4fabeSYong Wu static void mtk_iommu_domain_free(struct iommu_domain *domain) 6540df4fabeSYong Wu { 6550df4fabeSYong Wu kfree(to_mtk_domain(domain)); 6560df4fabeSYong Wu } 6570df4fabeSYong Wu 6580df4fabeSYong Wu static int mtk_iommu_attach_device(struct iommu_domain *domain, 6590df4fabeSYong Wu struct device *dev) 6600df4fabeSYong Wu { 661645b87c1SYong Wu struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; 6620df4fabeSYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 6639e3a2a64SYong Wu struct list_head *hw_list = data->hw_list; 664c0b57581SYong Wu struct device *m4udev = data->dev; 66599ca0228SYong Wu struct mtk_iommu_bank_data *bank; 66657fb481fSYong Wu unsigned int bankid; 667d72e0ff5SYong Wu int ret, region_id; 6680df4fabeSYong Wu 669d72e0ff5SYong Wu region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data); 670d72e0ff5SYong Wu if (region_id < 0) 671d72e0ff5SYong Wu return region_id; 672803cf9e5SYong Wu 67357fb481fSYong Wu bankid = mtk_iommu_get_bank_id(dev, data->plat_data); 674ddf67a87SYong Wu mutex_lock(&dom->mutex); 67599ca0228SYong Wu if (!dom->bank) { 676645b87c1SYong Wu /* Data is in the frstdata in sharing pgtable case. */ 6779e3a2a64SYong Wu frstdata = mtk_iommu_get_frst_data(hw_list); 678645b87c1SYong Wu 679d72e0ff5SYong Wu ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); 680ddf67a87SYong Wu if (ret) { 681ddf67a87SYong Wu mutex_unlock(&dom->mutex); 6824f956c97SYong Wu return -ENODEV; 683ddf67a87SYong Wu } 68499ca0228SYong Wu dom->bank = &data->bank[bankid]; 6854f956c97SYong Wu } 686ddf67a87SYong Wu mutex_unlock(&dom->mutex); 6874f956c97SYong Wu 6880e5a3f2eSYong Wu mutex_lock(&data->mutex); 68999ca0228SYong Wu bank = &data->bank[bankid]; 690e24453e1SYong Wu if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */ 691c0b57581SYong Wu ret = pm_runtime_resume_and_get(m4udev); 692e24453e1SYong Wu if (ret < 0) { 693e24453e1SYong Wu dev_err(m4udev, "pm get fail(%d) in attach.\n", ret); 6940e5a3f2eSYong Wu goto err_unlock; 695e24453e1SYong Wu } 696c0b57581SYong Wu 697e24453e1SYong Wu ret = mtk_iommu_hw_init(data, bankid); 698c0b57581SYong Wu if (ret) { 699c0b57581SYong Wu pm_runtime_put(m4udev); 7000e5a3f2eSYong Wu goto err_unlock; 701c0b57581SYong Wu } 70299ca0228SYong Wu bank->m4u_dom = dom; 703301c3ca1SYunfei Wang writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR); 704c0b57581SYong Wu 705c0b57581SYong Wu pm_runtime_put(m4udev); 7060df4fabeSYong Wu } 7070e5a3f2eSYong Wu mutex_unlock(&data->mutex); 7080df4fabeSYong Wu 709d72e0ff5SYong Wu return mtk_iommu_config(data, dev, true, region_id); 7100e5a3f2eSYong Wu 7110e5a3f2eSYong Wu err_unlock: 7120e5a3f2eSYong Wu mutex_unlock(&data->mutex); 7130e5a3f2eSYong Wu return ret; 7140df4fabeSYong Wu } 7150df4fabeSYong Wu 7160df4fabeSYong Wu static void mtk_iommu_detach_device(struct iommu_domain *domain, 7170df4fabeSYong Wu struct device *dev) 7180df4fabeSYong Wu { 7193524b559SJoerg Roedel struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 7200df4fabeSYong Wu 7218d2c749eSYong Wu mtk_iommu_config(data, dev, false, 0); 7220df4fabeSYong Wu } 7230df4fabeSYong Wu 7240df4fabeSYong Wu static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, 725781ca2deSTom Murphy phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 7260df4fabeSYong Wu { 7270df4fabeSYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 7280df4fabeSYong Wu 729b4dad40eSYong Wu /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ 73099ca0228SYong Wu if (dom->bank->parent_data->enable_4GB) 731b4dad40eSYong Wu paddr |= BIT_ULL(32); 732b4dad40eSYong Wu 73360829b4dSYong Wu /* Synchronize with the tlb_lock */ 734f34ce7a7SBaolin Wang return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); 7350df4fabeSYong Wu } 7360df4fabeSYong Wu 7370df4fabeSYong Wu static size_t mtk_iommu_unmap(struct iommu_domain *domain, 73856f8af5eSWill Deacon unsigned long iova, size_t size, 73956f8af5eSWill Deacon struct iommu_iotlb_gather *gather) 7400df4fabeSYong Wu { 7410df4fabeSYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 7420df4fabeSYong Wu 7433136895cSRobin Murphy iommu_iotlb_gather_add_range(gather, iova, size); 74460829b4dSYong Wu return dom->iop->unmap(dom->iop, iova, size, gather); 7450df4fabeSYong Wu } 7460df4fabeSYong Wu 74756f8af5eSWill Deacon static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) 74856f8af5eSWill Deacon { 74908500c43SYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 75008500c43SYong Wu 75199ca0228SYong Wu mtk_iommu_tlb_flush_all(dom->bank->parent_data); 75256f8af5eSWill Deacon } 75356f8af5eSWill Deacon 75456f8af5eSWill Deacon static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, 75556f8af5eSWill Deacon struct iommu_iotlb_gather *gather) 7564d689b61SRobin Murphy { 75708500c43SYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 758862c3715SYong Wu size_t length = gather->end - gather->start + 1; 759da3cc91bSYong Wu 76099ca0228SYong Wu mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank); 7614d689b61SRobin Murphy } 7624d689b61SRobin Murphy 76320143451SYong Wu static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, 76420143451SYong Wu size_t size) 76520143451SYong Wu { 76608500c43SYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 76720143451SYong Wu 76899ca0228SYong Wu mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank); 76920143451SYong Wu } 77020143451SYong Wu 7710df4fabeSYong Wu static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, 7720df4fabeSYong Wu dma_addr_t iova) 7730df4fabeSYong Wu { 7740df4fabeSYong Wu struct mtk_iommu_domain *dom = to_mtk_domain(domain); 7750df4fabeSYong Wu phys_addr_t pa; 7760df4fabeSYong Wu 7770df4fabeSYong Wu pa = dom->iop->iova_to_phys(dom->iop, iova); 778f13efafcSArnd Bergmann if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && 77999ca0228SYong Wu dom->bank->parent_data->enable_4GB && 780f13efafcSArnd Bergmann pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) 781b4dad40eSYong Wu pa &= ~BIT_ULL(32); 78230e2fccfSYong Wu 7830df4fabeSYong Wu return pa; 7840df4fabeSYong Wu } 7850df4fabeSYong Wu 78680e4592aSJoerg Roedel static struct iommu_device *mtk_iommu_probe_device(struct device *dev) 7870df4fabeSYong Wu { 788a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 789b16c0170SJoerg Roedel struct mtk_iommu_data *data; 790635319a4SYong Wu struct device_link *link; 791635319a4SYong Wu struct device *larbdev; 792635319a4SYong Wu unsigned int larbid, larbidx, i; 7930df4fabeSYong Wu 794a9bf2eecSJoerg Roedel if (!fwspec || fwspec->ops != &mtk_iommu_ops) 79580e4592aSJoerg Roedel return ERR_PTR(-ENODEV); /* Not a iommu client device */ 7960df4fabeSYong Wu 7973524b559SJoerg Roedel data = dev_iommu_priv_get(dev); 798b16c0170SJoerg Roedel 799d2e9a110SYong Wu if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) 800d2e9a110SYong Wu return &data->iommu; 801d2e9a110SYong Wu 802635319a4SYong Wu /* 803635319a4SYong Wu * Link the consumer device with the smi-larb device(supplier). 804635319a4SYong Wu * The device that connects with each a larb is a independent HW. 805635319a4SYong Wu * All the ports in each a device should be in the same larbs. 806635319a4SYong Wu */ 807635319a4SYong Wu larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); 808de78657eSMiles Chen if (larbid >= MTK_LARB_NR_MAX) 809de78657eSMiles Chen return ERR_PTR(-EINVAL); 810de78657eSMiles Chen 811635319a4SYong Wu for (i = 1; i < fwspec->num_ids; i++) { 812635319a4SYong Wu larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); 813635319a4SYong Wu if (larbid != larbidx) { 814635319a4SYong Wu dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", 815635319a4SYong Wu larbid, larbidx); 816635319a4SYong Wu return ERR_PTR(-EINVAL); 817635319a4SYong Wu } 818635319a4SYong Wu } 819635319a4SYong Wu larbdev = data->larb_imu[larbid].dev; 820de78657eSMiles Chen if (!larbdev) 821de78657eSMiles Chen return ERR_PTR(-EINVAL); 822de78657eSMiles Chen 823635319a4SYong Wu link = device_link_add(dev, larbdev, 824635319a4SYong Wu DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); 825635319a4SYong Wu if (!link) 826635319a4SYong Wu dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); 82780e4592aSJoerg Roedel return &data->iommu; 8280df4fabeSYong Wu } 8290df4fabeSYong Wu 83080e4592aSJoerg Roedel static void mtk_iommu_release_device(struct device *dev) 8310df4fabeSYong Wu { 832a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 833635319a4SYong Wu struct mtk_iommu_data *data; 834635319a4SYong Wu struct device *larbdev; 835635319a4SYong Wu unsigned int larbid; 836b16c0170SJoerg Roedel 837635319a4SYong Wu data = dev_iommu_priv_get(dev); 838d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 839635319a4SYong Wu larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); 840635319a4SYong Wu larbdev = data->larb_imu[larbid].dev; 841635319a4SYong Wu device_link_remove(dev, larbdev); 842d2e9a110SYong Wu } 8430df4fabeSYong Wu } 8440df4fabeSYong Wu 84557fb481fSYong Wu static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data) 84657fb481fSYong Wu { 84757fb481fSYong Wu unsigned int bankid; 84857fb481fSYong Wu 84957fb481fSYong Wu /* 85057fb481fSYong Wu * If the bank function is enabled, each bank is a iommu group/domain. 85157fb481fSYong Wu * Otherwise, each iova region is a iommu group/domain. 85257fb481fSYong Wu */ 85357fb481fSYong Wu bankid = mtk_iommu_get_bank_id(dev, plat_data); 85457fb481fSYong Wu if (bankid) 85557fb481fSYong Wu return bankid; 85657fb481fSYong Wu 85757fb481fSYong Wu return mtk_iommu_get_iova_region_id(dev, plat_data); 85857fb481fSYong Wu } 85957fb481fSYong Wu 8600df4fabeSYong Wu static struct iommu_group *mtk_iommu_device_group(struct device *dev) 8610df4fabeSYong Wu { 8629e3a2a64SYong Wu struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data; 8639e3a2a64SYong Wu struct list_head *hw_list = c_data->hw_list; 864c3045f39SYong Wu struct iommu_group *group; 86557fb481fSYong Wu int groupid; 8660df4fabeSYong Wu 8679e3a2a64SYong Wu data = mtk_iommu_get_frst_data(hw_list); 86858f0d1d5SRobin Murphy if (!data) 8690df4fabeSYong Wu return ERR_PTR(-ENODEV); 8700df4fabeSYong Wu 87157fb481fSYong Wu groupid = mtk_iommu_get_group_id(dev, data->plat_data); 87257fb481fSYong Wu if (groupid < 0) 87357fb481fSYong Wu return ERR_PTR(groupid); 874803cf9e5SYong Wu 8750e5a3f2eSYong Wu mutex_lock(&data->mutex); 87657fb481fSYong Wu group = data->m4u_group[groupid]; 877c3045f39SYong Wu if (!group) { 878c3045f39SYong Wu group = iommu_group_alloc(); 879c3045f39SYong Wu if (!IS_ERR(group)) 88057fb481fSYong Wu data->m4u_group[groupid] = group; 8813a8d40b6SRobin Murphy } else { 882c3045f39SYong Wu iommu_group_ref_get(group); 8830df4fabeSYong Wu } 8840e5a3f2eSYong Wu mutex_unlock(&data->mutex); 885c3045f39SYong Wu return group; 8860df4fabeSYong Wu } 8870df4fabeSYong Wu 8880df4fabeSYong Wu static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 8890df4fabeSYong Wu { 8900df4fabeSYong Wu struct platform_device *m4updev; 8910df4fabeSYong Wu 8920df4fabeSYong Wu if (args->args_count != 1) { 8930df4fabeSYong Wu dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 8940df4fabeSYong Wu args->args_count); 8950df4fabeSYong Wu return -EINVAL; 8960df4fabeSYong Wu } 8970df4fabeSYong Wu 8983524b559SJoerg Roedel if (!dev_iommu_priv_get(dev)) { 8990df4fabeSYong Wu /* Get the m4u device */ 9000df4fabeSYong Wu m4updev = of_find_device_by_node(args->np); 9010df4fabeSYong Wu if (WARN_ON(!m4updev)) 9020df4fabeSYong Wu return -EINVAL; 9030df4fabeSYong Wu 9043524b559SJoerg Roedel dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); 9050df4fabeSYong Wu } 9060df4fabeSYong Wu 90758f0d1d5SRobin Murphy return iommu_fwspec_add_ids(dev, args->args, 1); 9080df4fabeSYong Wu } 9090df4fabeSYong Wu 910ab1d5281SYong Wu static void mtk_iommu_get_resv_regions(struct device *dev, 911ab1d5281SYong Wu struct list_head *head) 912ab1d5281SYong Wu { 913ab1d5281SYong Wu struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 914d72e0ff5SYong Wu unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i; 915ab1d5281SYong Wu const struct mtk_iommu_iova_region *resv, *curdom; 916ab1d5281SYong Wu struct iommu_resv_region *region; 917ab1d5281SYong Wu int prot = IOMMU_WRITE | IOMMU_READ; 918ab1d5281SYong Wu 919d72e0ff5SYong Wu if ((int)regionid < 0) 920ab1d5281SYong Wu return; 921d72e0ff5SYong Wu curdom = data->plat_data->iova_region + regionid; 922ab1d5281SYong Wu for (i = 0; i < data->plat_data->iova_region_nr; i++) { 923ab1d5281SYong Wu resv = data->plat_data->iova_region + i; 924ab1d5281SYong Wu 925ab1d5281SYong Wu /* Only reserve when the region is inside the current domain */ 926ab1d5281SYong Wu if (resv->iova_base <= curdom->iova_base || 927ab1d5281SYong Wu resv->iova_base + resv->size >= curdom->iova_base + curdom->size) 928ab1d5281SYong Wu continue; 929ab1d5281SYong Wu 930ab1d5281SYong Wu region = iommu_alloc_resv_region(resv->iova_base, resv->size, 9310251d010SLu Baolu prot, IOMMU_RESV_RESERVED, 9320251d010SLu Baolu GFP_KERNEL); 933ab1d5281SYong Wu if (!region) 934ab1d5281SYong Wu return; 935ab1d5281SYong Wu 936ab1d5281SYong Wu list_add_tail(®ion->list, head); 937ab1d5281SYong Wu } 938ab1d5281SYong Wu } 939ab1d5281SYong Wu 940b65f5016SArvind Yadav static const struct iommu_ops mtk_iommu_ops = { 9410df4fabeSYong Wu .domain_alloc = mtk_iommu_domain_alloc, 94280e4592aSJoerg Roedel .probe_device = mtk_iommu_probe_device, 94380e4592aSJoerg Roedel .release_device = mtk_iommu_release_device, 9440df4fabeSYong Wu .device_group = mtk_iommu_device_group, 9450df4fabeSYong Wu .of_xlate = mtk_iommu_of_xlate, 946ab1d5281SYong Wu .get_resv_regions = mtk_iommu_get_resv_regions, 9470df4fabeSYong Wu .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, 94818d8c74eSYong Wu .owner = THIS_MODULE, 9499a630a4bSLu Baolu .default_domain_ops = &(const struct iommu_domain_ops) { 9509a630a4bSLu Baolu .attach_dev = mtk_iommu_attach_device, 9519a630a4bSLu Baolu .detach_dev = mtk_iommu_detach_device, 9529a630a4bSLu Baolu .map = mtk_iommu_map, 9539a630a4bSLu Baolu .unmap = mtk_iommu_unmap, 9549a630a4bSLu Baolu .flush_iotlb_all = mtk_iommu_flush_iotlb_all, 9559a630a4bSLu Baolu .iotlb_sync = mtk_iommu_iotlb_sync, 9569a630a4bSLu Baolu .iotlb_sync_map = mtk_iommu_sync_map, 9579a630a4bSLu Baolu .iova_to_phys = mtk_iommu_iova_to_phys, 9589a630a4bSLu Baolu .free = mtk_iommu_domain_free, 9599a630a4bSLu Baolu } 9600df4fabeSYong Wu }; 9610df4fabeSYong Wu 962e24453e1SYong Wu static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid) 9630df4fabeSYong Wu { 964e24453e1SYong Wu const struct mtk_iommu_bank_data *bankx = &data->bank[bankid]; 96599ca0228SYong Wu const struct mtk_iommu_bank_data *bank0 = &data->bank[0]; 9660df4fabeSYong Wu u32 regval; 9670df4fabeSYong Wu 968e24453e1SYong Wu /* 969e24453e1SYong Wu * Global control settings are in bank0. May re-init these global registers 970e24453e1SYong Wu * since no sure if there is bank0 consumers. 971e24453e1SYong Wu */ 97286580ec9SAngeloGioacchino Del Regno if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) { 973acb3c92aSYong Wu regval = F_MMU_PREFETCH_RT_REPLACE_MOD | 974acb3c92aSYong Wu F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; 97586444413SChao Hao } else { 97699ca0228SYong Wu regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG); 97786444413SChao Hao regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; 97886444413SChao Hao } 97999ca0228SYong Wu writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG); 9800df4fabeSYong Wu 9816b717796SChao Hao if (data->enable_4GB && 9826b717796SChao Hao MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { 98330e2fccfSYong Wu /* 98430e2fccfSYong Wu * If 4GB mode is enabled, the validate PA range is from 98530e2fccfSYong Wu * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. 98630e2fccfSYong Wu */ 98730e2fccfSYong Wu regval = F_MMU_VLD_PA_RNG(7, 4); 98899ca0228SYong Wu writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG); 98930e2fccfSYong Wu } 9909a87005eSYong Wu if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE)) 99199ca0228SYong Wu writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS); 9929a87005eSYong Wu else 99399ca0228SYong Wu writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS); 9949a87005eSYong Wu 99535c1b48dSChao Hao if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { 99635c1b48dSChao Hao /* write command throttling mode */ 99799ca0228SYong Wu regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL); 99835c1b48dSChao Hao regval &= ~F_MMU_WR_THROT_DIS_MASK; 99999ca0228SYong Wu writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL); 100035c1b48dSChao Hao } 1001e6dec923SYong Wu 10026b717796SChao Hao if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { 100375eed350SChao Hao /* The register is called STANDARD_AXI_MODE in this case */ 10044bb2bf4cSChao Hao regval = 0; 10054bb2bf4cSChao Hao } else { 100699ca0228SYong Wu regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL); 1007d265a4adSYong Wu if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE)) 10084bb2bf4cSChao Hao regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; 10094bb2bf4cSChao Hao if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) 10104bb2bf4cSChao Hao regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; 101175eed350SChao Hao } 101299ca0228SYong Wu writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL); 10130df4fabeSYong Wu 1014e24453e1SYong Wu /* Independent settings for each bank */ 1015634f57dfSYong Wu regval = F_L2_MULIT_HIT_EN | 1016634f57dfSYong Wu F_TABLE_WALK_FAULT_INT_EN | 1017634f57dfSYong Wu F_PREETCH_FIFO_OVERFLOW_INT_EN | 1018634f57dfSYong Wu F_MISS_FIFO_OVERFLOW_INT_EN | 1019634f57dfSYong Wu F_PREFETCH_FIFO_ERR_INT_EN | 1020634f57dfSYong Wu F_MISS_FIFO_ERR_INT_EN; 1021e24453e1SYong Wu writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0); 1022634f57dfSYong Wu 1023634f57dfSYong Wu regval = F_INT_TRANSLATION_FAULT | 1024634f57dfSYong Wu F_INT_MAIN_MULTI_HIT_FAULT | 1025634f57dfSYong Wu F_INT_INVALID_PA_FAULT | 1026634f57dfSYong Wu F_INT_ENTRY_REPLACEMENT_FAULT | 1027634f57dfSYong Wu F_INT_TLB_MISS_FAULT | 1028634f57dfSYong Wu F_INT_MISS_TRANSACTION_FIFO_FAULT | 1029634f57dfSYong Wu F_INT_PRETETCH_TRANSATION_FIFO_FAULT; 1030e24453e1SYong Wu writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL); 1031634f57dfSYong Wu 1032634f57dfSYong Wu if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) 1033634f57dfSYong Wu regval = (data->protect_base >> 1) | (data->enable_4GB << 31); 1034634f57dfSYong Wu else 1035634f57dfSYong Wu regval = lower_32_bits(data->protect_base) | 1036634f57dfSYong Wu upper_32_bits(data->protect_base); 1037e24453e1SYong Wu writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR); 1038634f57dfSYong Wu 1039e24453e1SYong Wu if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0, 1040e24453e1SYong Wu dev_name(bankx->parent_dev), (void *)bankx)) { 1041e24453e1SYong Wu writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR); 1042e24453e1SYong Wu dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq); 10430df4fabeSYong Wu return -ENODEV; 10440df4fabeSYong Wu } 10450df4fabeSYong Wu 10460df4fabeSYong Wu return 0; 10470df4fabeSYong Wu } 10480df4fabeSYong Wu 10490df4fabeSYong Wu static const struct component_master_ops mtk_iommu_com_ops = { 10500df4fabeSYong Wu .bind = mtk_iommu_bind, 10510df4fabeSYong Wu .unbind = mtk_iommu_unbind, 10520df4fabeSYong Wu }; 10530df4fabeSYong Wu 1054d2e9a110SYong Wu static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match, 1055d2e9a110SYong Wu struct mtk_iommu_data *data) 1056d2e9a110SYong Wu { 1057*6cde583dSYong Wu struct device_node *larbnode, *frst_avail_smicomm_node = NULL; 1058dcb40e9fSYong Wu struct platform_device *plarbdev, *pcommdev; 1059d2e9a110SYong Wu struct device_link *link; 1060d2e9a110SYong Wu int i, larb_nr, ret; 1061d2e9a110SYong Wu 1062d2e9a110SYong Wu larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); 1063d2e9a110SYong Wu if (larb_nr < 0) 1064d2e9a110SYong Wu return larb_nr; 1065ef693a84SGuenter Roeck if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX) 1066ef693a84SGuenter Roeck return -EINVAL; 1067d2e9a110SYong Wu 1068d2e9a110SYong Wu for (i = 0; i < larb_nr; i++) { 1069*6cde583dSYong Wu struct device_node *smicomm_node, *smi_subcomm_node; 1070d2e9a110SYong Wu u32 id; 1071d2e9a110SYong Wu 1072d2e9a110SYong Wu larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); 107326593928SYong Wu if (!larbnode) { 107426593928SYong Wu ret = -EINVAL; 107526593928SYong Wu goto err_larbdev_put; 107626593928SYong Wu } 1077d2e9a110SYong Wu 1078d2e9a110SYong Wu if (!of_device_is_available(larbnode)) { 1079d2e9a110SYong Wu of_node_put(larbnode); 1080d2e9a110SYong Wu continue; 1081d2e9a110SYong Wu } 1082d2e9a110SYong Wu 1083d2e9a110SYong Wu ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); 1084d2e9a110SYong Wu if (ret)/* The id is consecutive if there is no this property */ 1085d2e9a110SYong Wu id = i; 1086ef693a84SGuenter Roeck if (id >= MTK_LARB_NR_MAX) { 1087ef693a84SGuenter Roeck of_node_put(larbnode); 1088ef693a84SGuenter Roeck ret = -EINVAL; 1089ef693a84SGuenter Roeck goto err_larbdev_put; 1090ef693a84SGuenter Roeck } 1091d2e9a110SYong Wu 1092d2e9a110SYong Wu plarbdev = of_find_device_by_node(larbnode); 1093d2e9a110SYong Wu of_node_put(larbnode); 109426593928SYong Wu if (!plarbdev) { 109526593928SYong Wu ret = -ENODEV; 109626593928SYong Wu goto err_larbdev_put; 1097d2e9a110SYong Wu } 1098ef693a84SGuenter Roeck if (data->larb_imu[id].dev) { 1099ef693a84SGuenter Roeck platform_device_put(plarbdev); 1100ef693a84SGuenter Roeck ret = -EEXIST; 1101ef693a84SGuenter Roeck goto err_larbdev_put; 1102ef693a84SGuenter Roeck } 1103d2e9a110SYong Wu data->larb_imu[id].dev = &plarbdev->dev; 1104d2e9a110SYong Wu 110526593928SYong Wu if (!plarbdev->dev.driver) { 110626593928SYong Wu ret = -EPROBE_DEFER; 110726593928SYong Wu goto err_larbdev_put; 110826593928SYong Wu } 110926593928SYong Wu 1110f7b71d0dSYong Wu /* Get smi-(sub)-common dev from the last larb. */ 1111f7b71d0dSYong Wu smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); 1112*6cde583dSYong Wu if (!smi_subcomm_node) { 1113*6cde583dSYong Wu ret = -EINVAL; 1114*6cde583dSYong Wu goto err_larbdev_put; 1115*6cde583dSYong Wu } 1116d2e9a110SYong Wu 1117f7b71d0dSYong Wu /* 1118f7b71d0dSYong Wu * It may have two level smi-common. the node is smi-sub-common if it 1119f7b71d0dSYong Wu * has a new mediatek,smi property. otherwise it is smi-commmon. 1120f7b71d0dSYong Wu */ 1121f7b71d0dSYong Wu smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); 1122f7b71d0dSYong Wu if (smicomm_node) 1123f7b71d0dSYong Wu of_node_put(smi_subcomm_node); 1124f7b71d0dSYong Wu else 1125f7b71d0dSYong Wu smicomm_node = smi_subcomm_node; 1126f7b71d0dSYong Wu 1127*6cde583dSYong Wu /* 1128*6cde583dSYong Wu * All the larbs that connect to one IOMMU must connect with the same 1129*6cde583dSYong Wu * smi-common. 1130*6cde583dSYong Wu */ 1131*6cde583dSYong Wu if (!frst_avail_smicomm_node) { 1132*6cde583dSYong Wu frst_avail_smicomm_node = smicomm_node; 1133*6cde583dSYong Wu } else if (frst_avail_smicomm_node != smicomm_node) { 1134*6cde583dSYong Wu dev_err(dev, "mediatek,smi property is not right @larb%d.", id); 1135d2e9a110SYong Wu of_node_put(smicomm_node); 1136*6cde583dSYong Wu ret = -EINVAL; 1137*6cde583dSYong Wu goto err_larbdev_put; 1138*6cde583dSYong Wu } else { 1139*6cde583dSYong Wu of_node_put(smicomm_node); 1140*6cde583dSYong Wu } 1141*6cde583dSYong Wu 1142*6cde583dSYong Wu component_match_add(dev, match, component_compare_dev, &plarbdev->dev); 1143*6cde583dSYong Wu platform_device_put(plarbdev); 1144*6cde583dSYong Wu } 1145*6cde583dSYong Wu 1146*6cde583dSYong Wu if (!frst_avail_smicomm_node) 1147*6cde583dSYong Wu return -EINVAL; 1148*6cde583dSYong Wu 1149*6cde583dSYong Wu pcommdev = of_find_device_by_node(frst_avail_smicomm_node); 1150*6cde583dSYong Wu of_node_put(frst_avail_smicomm_node); 1151dcb40e9fSYong Wu if (!pcommdev) 1152dcb40e9fSYong Wu return -ENODEV; 1153dcb40e9fSYong Wu data->smicomm_dev = &pcommdev->dev; 1154d2e9a110SYong Wu 1155d2e9a110SYong Wu link = device_link_add(data->smicomm_dev, dev, 1156d2e9a110SYong Wu DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); 1157dcb40e9fSYong Wu platform_device_put(pcommdev); 1158d2e9a110SYong Wu if (!link) { 1159d2e9a110SYong Wu dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); 1160d2e9a110SYong Wu return -EINVAL; 1161d2e9a110SYong Wu } 1162d2e9a110SYong Wu return 0; 116326593928SYong Wu 116426593928SYong Wu err_larbdev_put: 116526593928SYong Wu /* id may be not linear mapping, loop whole the array */ 116626593928SYong Wu for (i = MTK_LARB_NR_MAX - 1; i >= 0; i++) { 116726593928SYong Wu if (!data->larb_imu[i].dev) 116826593928SYong Wu continue; 116926593928SYong Wu put_device(data->larb_imu[i].dev); 117026593928SYong Wu } 117126593928SYong Wu return ret; 1172d2e9a110SYong Wu } 1173d2e9a110SYong Wu 11740df4fabeSYong Wu static int mtk_iommu_probe(struct platform_device *pdev) 11750df4fabeSYong Wu { 11760df4fabeSYong Wu struct mtk_iommu_data *data; 11770df4fabeSYong Wu struct device *dev = &pdev->dev; 11780df4fabeSYong Wu struct resource *res; 1179b16c0170SJoerg Roedel resource_size_t ioaddr; 11800df4fabeSYong Wu struct component_match *match = NULL; 1181c2c59456SMiles Chen struct regmap *infracfg; 11820df4fabeSYong Wu void *protect; 118342d57fc5SYong Wu int ret, banks_num, i = 0; 1184c2c59456SMiles Chen u32 val; 1185c2c59456SMiles Chen char *p; 118699ca0228SYong Wu struct mtk_iommu_bank_data *bank; 118799ca0228SYong Wu void __iomem *base; 11880df4fabeSYong Wu 11890df4fabeSYong Wu data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 11900df4fabeSYong Wu if (!data) 11910df4fabeSYong Wu return -ENOMEM; 11920df4fabeSYong Wu data->dev = dev; 1193cecdce9dSYong Wu data->plat_data = of_device_get_match_data(dev); 11940df4fabeSYong Wu 11950df4fabeSYong Wu /* Protect memory. HW will access here while translation fault.*/ 11960df4fabeSYong Wu protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); 11970df4fabeSYong Wu if (!protect) 11980df4fabeSYong Wu return -ENOMEM; 11990df4fabeSYong Wu data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 12000df4fabeSYong Wu 1201c2c59456SMiles Chen if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) { 12027d748ffdSAngeloGioacchino Del Regno infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg"); 12037d748ffdSAngeloGioacchino Del Regno if (IS_ERR(infracfg)) { 12047d748ffdSAngeloGioacchino Del Regno /* 12057d748ffdSAngeloGioacchino Del Regno * Legacy devicetrees will not specify a phandle to 12067d748ffdSAngeloGioacchino Del Regno * mediatek,infracfg: in that case, we use the older 12077d748ffdSAngeloGioacchino Del Regno * way to retrieve a syscon to infra. 12087d748ffdSAngeloGioacchino Del Regno * 12097d748ffdSAngeloGioacchino Del Regno * This is for retrocompatibility purposes only, hence 12107d748ffdSAngeloGioacchino Del Regno * no more compatibles shall be added to this. 12117d748ffdSAngeloGioacchino Del Regno */ 1212c2c59456SMiles Chen switch (data->plat_data->m4u_plat) { 1213c2c59456SMiles Chen case M4U_MT2712: 1214c2c59456SMiles Chen p = "mediatek,mt2712-infracfg"; 1215c2c59456SMiles Chen break; 1216c2c59456SMiles Chen case M4U_MT8173: 1217c2c59456SMiles Chen p = "mediatek,mt8173-infracfg"; 1218c2c59456SMiles Chen break; 1219c2c59456SMiles Chen default: 1220c2c59456SMiles Chen p = NULL; 1221c2c59456SMiles Chen } 1222c2c59456SMiles Chen 1223c2c59456SMiles Chen infracfg = syscon_regmap_lookup_by_compatible(p); 1224c2c59456SMiles Chen if (IS_ERR(infracfg)) 1225c2c59456SMiles Chen return PTR_ERR(infracfg); 12267d748ffdSAngeloGioacchino Del Regno } 1227c2c59456SMiles Chen 1228c2c59456SMiles Chen ret = regmap_read(infracfg, REG_INFRA_MISC, &val); 1229c2c59456SMiles Chen if (ret) 1230c2c59456SMiles Chen return ret; 1231c2c59456SMiles Chen data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); 1232c2c59456SMiles Chen } 123301e23c93SYong Wu 123442d57fc5SYong Wu banks_num = data->plat_data->banks_num; 12350df4fabeSYong Wu res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 123673b6924cSYang Yingliang if (!res) 123773b6924cSYang Yingliang return -EINVAL; 123842d57fc5SYong Wu if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) { 123942d57fc5SYong Wu dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res); 124042d57fc5SYong Wu return -EINVAL; 124142d57fc5SYong Wu } 124299ca0228SYong Wu base = devm_ioremap_resource(dev, res); 124399ca0228SYong Wu if (IS_ERR(base)) 124499ca0228SYong Wu return PTR_ERR(base); 1245b16c0170SJoerg Roedel ioaddr = res->start; 12460df4fabeSYong Wu 124799ca0228SYong Wu data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL); 124899ca0228SYong Wu if (!data->bank) 124999ca0228SYong Wu return -ENOMEM; 125099ca0228SYong Wu 125142d57fc5SYong Wu do { 125242d57fc5SYong Wu if (!data->plat_data->banks_enable[i]) 125342d57fc5SYong Wu continue; 125442d57fc5SYong Wu bank = &data->bank[i]; 125542d57fc5SYong Wu bank->id = i; 125642d57fc5SYong Wu bank->base = base + i * MTK_IOMMU_BANK_SZ; 125799ca0228SYong Wu bank->m4u_dom = NULL; 125842d57fc5SYong Wu 125942d57fc5SYong Wu bank->irq = platform_get_irq(pdev, i); 126099ca0228SYong Wu if (bank->irq < 0) 126199ca0228SYong Wu return bank->irq; 126299ca0228SYong Wu bank->parent_dev = dev; 126399ca0228SYong Wu bank->parent_data = data; 126499ca0228SYong Wu spin_lock_init(&bank->tlb_lock); 126542d57fc5SYong Wu } while (++i < banks_num); 12660df4fabeSYong Wu 12676b717796SChao Hao if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { 12680df4fabeSYong Wu data->bclk = devm_clk_get(dev, "bclk"); 12690df4fabeSYong Wu if (IS_ERR(data->bclk)) 12700df4fabeSYong Wu return PTR_ERR(data->bclk); 12712aa4c259SYong Wu } 12720df4fabeSYong Wu 1273c0b57581SYong Wu pm_runtime_enable(dev); 1274c0b57581SYong Wu 1275d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 1276d2e9a110SYong Wu ret = mtk_iommu_mm_dts_parse(dev, &match, data); 1277d2e9a110SYong Wu if (ret) { 12783168010dSNícolas F. R. A. Prado dev_err_probe(dev, ret, "mm dts parse fail\n"); 1279c0b57581SYong Wu goto out_runtime_disable; 1280baf94e6eSYong Wu } 128121fd9be4SAngeloGioacchino Del Regno } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { 128221fd9be4SAngeloGioacchino Del Regno p = data->plat_data->pericfg_comp_str; 128321fd9be4SAngeloGioacchino Del Regno data->pericfg = syscon_regmap_lookup_by_compatible(p); 128421fd9be4SAngeloGioacchino Del Regno if (IS_ERR(data->pericfg)) { 128521fd9be4SAngeloGioacchino Del Regno ret = PTR_ERR(data->pericfg); 1286f9b8c9b2SYong Wu goto out_runtime_disable; 1287f9b8c9b2SYong Wu } 1288d2e9a110SYong Wu } 1289baf94e6eSYong Wu 12900df4fabeSYong Wu platform_set_drvdata(pdev, data); 12910e5a3f2eSYong Wu mutex_init(&data->mutex); 12920df4fabeSYong Wu 1293b16c0170SJoerg Roedel ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, 1294b16c0170SJoerg Roedel "mtk-iommu.%pa", &ioaddr); 1295b16c0170SJoerg Roedel if (ret) 1296baf94e6eSYong Wu goto out_link_remove; 1297b16c0170SJoerg Roedel 12982d471b20SRobin Murphy ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); 1299b16c0170SJoerg Roedel if (ret) 1300986d9ec5SYong Wu goto out_sysfs_remove; 1301b16c0170SJoerg Roedel 13029e3a2a64SYong Wu if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { 13039e3a2a64SYong Wu list_add_tail(&data->list, data->plat_data->hw_list); 13049e3a2a64SYong Wu data->hw_list = data->plat_data->hw_list; 13059e3a2a64SYong Wu } else { 13069e3a2a64SYong Wu INIT_LIST_HEAD(&data->hw_list_head); 13079e3a2a64SYong Wu list_add_tail(&data->list, &data->hw_list_head); 13089e3a2a64SYong Wu data->hw_list = &data->hw_list_head; 13099e3a2a64SYong Wu } 13107c3a2ec0SYong Wu 1311d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 1312986d9ec5SYong Wu ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); 1313986d9ec5SYong Wu if (ret) 1314e7629070SYong Wu goto out_list_del; 1315e7629070SYong Wu } 1316986d9ec5SYong Wu return ret; 1317986d9ec5SYong Wu 1318986d9ec5SYong Wu out_list_del: 1319986d9ec5SYong Wu list_del(&data->list); 1320986d9ec5SYong Wu iommu_device_unregister(&data->iommu); 1321986d9ec5SYong Wu out_sysfs_remove: 1322986d9ec5SYong Wu iommu_device_sysfs_remove(&data->iommu); 1323baf94e6eSYong Wu out_link_remove: 1324d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) 1325baf94e6eSYong Wu device_link_remove(data->smicomm_dev, dev); 1326c0b57581SYong Wu out_runtime_disable: 1327c0b57581SYong Wu pm_runtime_disable(dev); 1328986d9ec5SYong Wu return ret; 13290df4fabeSYong Wu } 13300df4fabeSYong Wu 13310df4fabeSYong Wu static int mtk_iommu_remove(struct platform_device *pdev) 13320df4fabeSYong Wu { 13330df4fabeSYong Wu struct mtk_iommu_data *data = platform_get_drvdata(pdev); 133442d57fc5SYong Wu struct mtk_iommu_bank_data *bank; 133542d57fc5SYong Wu int i; 13360df4fabeSYong Wu 1337b16c0170SJoerg Roedel iommu_device_sysfs_remove(&data->iommu); 1338b16c0170SJoerg Roedel iommu_device_unregister(&data->iommu); 1339b16c0170SJoerg Roedel 1340ee55f75eSYong Wu list_del(&data->list); 13410df4fabeSYong Wu 1342d2e9a110SYong Wu if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { 1343baf94e6eSYong Wu device_link_remove(data->smicomm_dev, &pdev->dev); 1344d2e9a110SYong Wu component_master_del(&pdev->dev, &mtk_iommu_com_ops); 1345d2e9a110SYong Wu } 1346c0b57581SYong Wu pm_runtime_disable(&pdev->dev); 134742d57fc5SYong Wu for (i = 0; i < data->plat_data->banks_num; i++) { 134842d57fc5SYong Wu bank = &data->bank[i]; 134942d57fc5SYong Wu if (!bank->m4u_dom) 135042d57fc5SYong Wu continue; 135199ca0228SYong Wu devm_free_irq(&pdev->dev, bank->irq, bank); 135242d57fc5SYong Wu } 13530df4fabeSYong Wu return 0; 13540df4fabeSYong Wu } 13550df4fabeSYong Wu 135634665c79SYong Wu static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) 13570df4fabeSYong Wu { 13580df4fabeSYong Wu struct mtk_iommu_data *data = dev_get_drvdata(dev); 13590df4fabeSYong Wu struct mtk_iommu_suspend_reg *reg = &data->reg; 1360d7127de1SYong Wu void __iomem *base; 1361d7127de1SYong Wu int i = 0; 13620df4fabeSYong Wu 1363d7127de1SYong Wu base = data->bank[i].base; 136435c1b48dSChao Hao reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); 136575eed350SChao Hao reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); 13660df4fabeSYong Wu reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); 13670df4fabeSYong Wu reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 1368b9475b34SYong Wu reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); 1369d7127de1SYong Wu do { 1370d7127de1SYong Wu if (!data->plat_data->banks_enable[i]) 1371d7127de1SYong Wu continue; 1372d7127de1SYong Wu base = data->bank[i].base; 1373d7127de1SYong Wu reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0); 1374d7127de1SYong Wu reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); 1375d7127de1SYong Wu reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR); 1376d7127de1SYong Wu } while (++i < data->plat_data->banks_num); 13776254b64fSYong Wu clk_disable_unprepare(data->bclk); 13780df4fabeSYong Wu return 0; 13790df4fabeSYong Wu } 13800df4fabeSYong Wu 138134665c79SYong Wu static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) 13820df4fabeSYong Wu { 13830df4fabeSYong Wu struct mtk_iommu_data *data = dev_get_drvdata(dev); 13840df4fabeSYong Wu struct mtk_iommu_suspend_reg *reg = &data->reg; 1385d7127de1SYong Wu struct mtk_iommu_domain *m4u_dom; 1386d7127de1SYong Wu void __iomem *base; 1387d7127de1SYong Wu int ret, i = 0; 13880df4fabeSYong Wu 13896254b64fSYong Wu ret = clk_prepare_enable(data->bclk); 13906254b64fSYong Wu if (ret) { 13916254b64fSYong Wu dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); 13926254b64fSYong Wu return ret; 13936254b64fSYong Wu } 1394b34ea31fSDafna Hirschfeld 1395b34ea31fSDafna Hirschfeld /* 1396b34ea31fSDafna Hirschfeld * Uppon first resume, only enable the clk and return, since the values of the 1397b34ea31fSDafna Hirschfeld * registers are not yet set. 1398b34ea31fSDafna Hirschfeld */ 1399d7127de1SYong Wu if (!reg->wr_len_ctrl) 1400b34ea31fSDafna Hirschfeld return 0; 1401b34ea31fSDafna Hirschfeld 1402d7127de1SYong Wu base = data->bank[i].base; 140335c1b48dSChao Hao writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); 140475eed350SChao Hao writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); 14050df4fabeSYong Wu writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); 14060df4fabeSYong Wu writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 1407b9475b34SYong Wu writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); 1408d7127de1SYong Wu do { 1409d7127de1SYong Wu m4u_dom = data->bank[i].m4u_dom; 1410d7127de1SYong Wu if (!data->plat_data->banks_enable[i] || !m4u_dom) 1411d7127de1SYong Wu continue; 1412d7127de1SYong Wu base = data->bank[i].base; 1413d7127de1SYong Wu writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0); 1414d7127de1SYong Wu writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL); 1415d7127de1SYong Wu writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR); 1416301c3ca1SYunfei Wang writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR); 1417d7127de1SYong Wu } while (++i < data->plat_data->banks_num); 14184f23f6d4SYong Wu 14194f23f6d4SYong Wu /* 14204f23f6d4SYong Wu * Users may allocate dma buffer before they call pm_runtime_get, 14214f23f6d4SYong Wu * in which case it will lack the necessary tlb flush. 14224f23f6d4SYong Wu * Thus, make sure to update the tlb after each PM resume. 14234f23f6d4SYong Wu */ 14244f23f6d4SYong Wu mtk_iommu_tlb_flush_all(data); 14250df4fabeSYong Wu return 0; 14260df4fabeSYong Wu } 14270df4fabeSYong Wu 1428e6dec923SYong Wu static const struct dev_pm_ops mtk_iommu_pm_ops = { 142934665c79SYong Wu SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL) 143034665c79SYong Wu SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 143134665c79SYong Wu pm_runtime_force_resume) 14320df4fabeSYong Wu }; 14330df4fabeSYong Wu 1434cecdce9dSYong Wu static const struct mtk_iommu_plat_data mt2712_data = { 1435cecdce9dSYong Wu .m4u_plat = M4U_MT2712, 1436d2e9a110SYong Wu .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE | 1437d2e9a110SYong Wu MTK_IOMMU_TYPE_MM, 14389e3a2a64SYong Wu .hw_list = &m4ulist, 1439b053bc71SChao Hao .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1440585e58f4SYong Wu .iova_region = single_domain, 144199ca0228SYong Wu .banks_num = 1, 144299ca0228SYong Wu .banks_enable = {true}, 1443585e58f4SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 144437276e00SChao Hao .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, 1445cecdce9dSYong Wu }; 1446cecdce9dSYong Wu 1447068c86e9SChao Hao static const struct mtk_iommu_plat_data mt6779_data = { 1448068c86e9SChao Hao .m4u_plat = M4U_MT6779, 1449d2e9a110SYong Wu .flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN | 1450301c3ca1SYunfei Wang MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN, 1451068c86e9SChao Hao .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 145299ca0228SYong Wu .banks_num = 1, 145399ca0228SYong Wu .banks_enable = {true}, 1454585e58f4SYong Wu .iova_region = single_domain, 1455585e58f4SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 1456068c86e9SChao Hao .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, 1457cecdce9dSYong Wu }; 1458cecdce9dSYong Wu 1459717ec15eSAngeloGioacchino Del Regno static const struct mtk_iommu_plat_data mt6795_data = { 1460717ec15eSAngeloGioacchino Del Regno .m4u_plat = M4U_MT6795, 1461717ec15eSAngeloGioacchino Del Regno .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | 1462717ec15eSAngeloGioacchino Del Regno HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | 1463717ec15eSAngeloGioacchino Del Regno TF_PORT_TO_ADDR_MT8173, 1464717ec15eSAngeloGioacchino Del Regno .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1465717ec15eSAngeloGioacchino Del Regno .banks_num = 1, 1466717ec15eSAngeloGioacchino Del Regno .banks_enable = {true}, 1467717ec15eSAngeloGioacchino Del Regno .iova_region = single_domain, 1468717ec15eSAngeloGioacchino Del Regno .iova_region_nr = ARRAY_SIZE(single_domain), 1469717ec15eSAngeloGioacchino Del Regno .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */ 1470717ec15eSAngeloGioacchino Del Regno }; 1471717ec15eSAngeloGioacchino Del Regno 14723c213562SFabien Parent static const struct mtk_iommu_plat_data mt8167_data = { 14733c213562SFabien Parent .m4u_plat = M4U_MT8167, 1474d2e9a110SYong Wu .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, 14753c213562SFabien Parent .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 147699ca0228SYong Wu .banks_num = 1, 147799ca0228SYong Wu .banks_enable = {true}, 1478585e58f4SYong Wu .iova_region = single_domain, 1479585e58f4SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 14803c213562SFabien Parent .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ 14813c213562SFabien Parent }; 14823c213562SFabien Parent 1483cecdce9dSYong Wu static const struct mtk_iommu_plat_data mt8173_data = { 1484cecdce9dSYong Wu .m4u_plat = M4U_MT8173, 1485d1b5ef00SFabien Parent .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | 148686580ec9SAngeloGioacchino Del Regno HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM | 148786580ec9SAngeloGioacchino Del Regno TF_PORT_TO_ADDR_MT8173, 1488b053bc71SChao Hao .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 148999ca0228SYong Wu .banks_num = 1, 149099ca0228SYong Wu .banks_enable = {true}, 1491585e58f4SYong Wu .iova_region = single_domain, 1492585e58f4SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 149337276e00SChao Hao .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ 1494cecdce9dSYong Wu }; 1495cecdce9dSYong Wu 1496907ba6a1SYong Wu static const struct mtk_iommu_plat_data mt8183_data = { 1497907ba6a1SYong Wu .m4u_plat = M4U_MT8183, 1498d2e9a110SYong Wu .flags = RESET_AXI | MTK_IOMMU_TYPE_MM, 1499b053bc71SChao Hao .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 150099ca0228SYong Wu .banks_num = 1, 150199ca0228SYong Wu .banks_enable = {true}, 1502585e58f4SYong Wu .iova_region = single_domain, 1503585e58f4SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 150437276e00SChao Hao .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, 1505907ba6a1SYong Wu }; 1506907ba6a1SYong Wu 1507e8d7ccaaSYong Wu static const struct mtk_iommu_plat_data mt8186_data_mm = { 1508e8d7ccaaSYong Wu .m4u_plat = M4U_MT8186, 1509e8d7ccaaSYong Wu .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | 1510e8d7ccaaSYong Wu WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, 1511e8d7ccaaSYong Wu .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20}, 1512e8d7ccaaSYong Wu {MTK_INVALID_LARBID, 14, 16}, 1513e8d7ccaaSYong Wu {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}}, 1514e8d7ccaaSYong Wu .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 1515e8d7ccaaSYong Wu .banks_num = 1, 1516e8d7ccaaSYong Wu .banks_enable = {true}, 1517e8d7ccaaSYong Wu .iova_region = mt8192_multi_dom, 1518e8d7ccaaSYong Wu .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 1519e8d7ccaaSYong Wu }; 1520e8d7ccaaSYong Wu 15219e3489e0SYong Wu static const struct mtk_iommu_plat_data mt8192_data = { 15229e3489e0SYong Wu .m4u_plat = M4U_MT8192, 15239ec30c09SYong Wu .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | 1524d2e9a110SYong Wu WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, 15259e3489e0SYong Wu .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 152699ca0228SYong Wu .banks_num = 1, 152799ca0228SYong Wu .banks_enable = {true}, 15289e3489e0SYong Wu .iova_region = mt8192_multi_dom, 15299e3489e0SYong Wu .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 15309e3489e0SYong Wu .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, 15319e3489e0SYong Wu {0, 14, 16}, {0, 13, 18, 17}}, 15329e3489e0SYong Wu }; 15339e3489e0SYong Wu 1534ef68a193SYong Wu static const struct mtk_iommu_plat_data mt8195_data_infra = { 1535ef68a193SYong Wu .m4u_plat = M4U_MT8195, 1536ef68a193SYong Wu .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | 1537ef68a193SYong Wu MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT, 1538ef68a193SYong Wu .pericfg_comp_str = "mediatek,mt8195-pericfg_ao", 1539ef68a193SYong Wu .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 15407597e3c5SYong Wu .banks_num = 5, 15417597e3c5SYong Wu .banks_enable = {true, false, false, false, true}, 15427597e3c5SYong Wu .banks_portmsk = {[0] = GENMASK(19, 16), /* PCIe */ 15437597e3c5SYong Wu [4] = GENMASK(31, 20), /* USB */ 15447597e3c5SYong Wu }, 1545ef68a193SYong Wu .iova_region = single_domain, 1546ef68a193SYong Wu .iova_region_nr = ARRAY_SIZE(single_domain), 1547ef68a193SYong Wu }; 1548ef68a193SYong Wu 1549ef68a193SYong Wu static const struct mtk_iommu_plat_data mt8195_data_vdo = { 1550ef68a193SYong Wu .m4u_plat = M4U_MT8195, 1551ef68a193SYong Wu .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | 1552ef68a193SYong Wu WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, 1553ef68a193SYong Wu .hw_list = &m4ulist, 1554ef68a193SYong Wu .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 155599ca0228SYong Wu .banks_num = 1, 155699ca0228SYong Wu .banks_enable = {true}, 1557ef68a193SYong Wu .iova_region = mt8192_multi_dom, 1558ef68a193SYong Wu .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 1559ef68a193SYong Wu .larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11}, 1560ef68a193SYong Wu {13, 17, 15/* 17b */, 25}, {5}}, 1561ef68a193SYong Wu }; 1562ef68a193SYong Wu 1563ef68a193SYong Wu static const struct mtk_iommu_plat_data mt8195_data_vpp = { 1564ef68a193SYong Wu .m4u_plat = M4U_MT8195, 1565ef68a193SYong Wu .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | 1566ef68a193SYong Wu WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, 1567ef68a193SYong Wu .hw_list = &m4ulist, 1568ef68a193SYong Wu .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 156999ca0228SYong Wu .banks_num = 1, 157099ca0228SYong Wu .banks_enable = {true}, 1571ef68a193SYong Wu .iova_region = mt8192_multi_dom, 1572ef68a193SYong Wu .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), 1573ef68a193SYong Wu .larbid_remap = {{1}, {3}, 1574ef68a193SYong Wu {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23}, 1575ef68a193SYong Wu {8}, {20}, {12}, 1576ef68a193SYong Wu /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */ 1577ef68a193SYong Wu {14, 16, 29, 26, 30, 31, 18}, 1578ef68a193SYong Wu {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}}, 1579ef68a193SYong Wu }; 1580ef68a193SYong Wu 15813cd0e4a3SFabien Parent static const struct mtk_iommu_plat_data mt8365_data = { 15823cd0e4a3SFabien Parent .m4u_plat = M4U_MT8365, 15833cd0e4a3SFabien Parent .flags = RESET_AXI | INT_ID_PORT_WIDTH_6, 15843cd0e4a3SFabien Parent .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 15853cd0e4a3SFabien Parent .banks_num = 1, 15863cd0e4a3SFabien Parent .banks_enable = {true}, 15873cd0e4a3SFabien Parent .iova_region = single_domain, 15883cd0e4a3SFabien Parent .iova_region_nr = ARRAY_SIZE(single_domain), 15893cd0e4a3SFabien Parent .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ 15903cd0e4a3SFabien Parent }; 15913cd0e4a3SFabien Parent 15920df4fabeSYong Wu static const struct of_device_id mtk_iommu_of_ids[] = { 1593cecdce9dSYong Wu { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, 1594068c86e9SChao Hao { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, 1595717ec15eSAngeloGioacchino Del Regno { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data}, 15963c213562SFabien Parent { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, 1597cecdce9dSYong Wu { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, 1598907ba6a1SYong Wu { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, 1599e8d7ccaaSYong Wu { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */ 16009e3489e0SYong Wu { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, 1601ef68a193SYong Wu { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, 1602ef68a193SYong Wu { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo}, 1603ef68a193SYong Wu { .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp}, 16043cd0e4a3SFabien Parent { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data}, 16050df4fabeSYong Wu {} 16060df4fabeSYong Wu }; 16070df4fabeSYong Wu 16080df4fabeSYong Wu static struct platform_driver mtk_iommu_driver = { 16090df4fabeSYong Wu .probe = mtk_iommu_probe, 16100df4fabeSYong Wu .remove = mtk_iommu_remove, 16110df4fabeSYong Wu .driver = { 16120df4fabeSYong Wu .name = "mtk-iommu", 1613f53dd978SKrzysztof Kozlowski .of_match_table = mtk_iommu_of_ids, 16140df4fabeSYong Wu .pm = &mtk_iommu_pm_ops, 16150df4fabeSYong Wu } 16160df4fabeSYong Wu }; 161718d8c74eSYong Wu module_platform_driver(mtk_iommu_driver); 16180df4fabeSYong Wu 161918d8c74eSYong Wu MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations"); 162018d8c74eSYong Wu MODULE_LICENSE("GPL v2"); 1621