1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #ifndef AMD_IOMMU_H 8 #define AMD_IOMMU_H 9 10 #include <linux/iommu.h> 11 12 #include "amd_iommu_types.h" 13 14 irqreturn_t amd_iommu_int_thread(int irq, void *data); 15 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data); 16 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data); 17 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data); 18 irqreturn_t amd_iommu_int_handler(int irq, void *data); 19 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); 20 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, 21 u8 cntrl_intr, u8 cntrl_log, 22 u32 status_run_mask, u32 status_overflow_mask); 23 void amd_iommu_restart_event_logging(struct amd_iommu *iommu); 24 void amd_iommu_restart_ga_log(struct amd_iommu *iommu); 25 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu); 26 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); 27 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit); 28 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 29 gfp_t gfp, size_t size); 30 31 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 32 void amd_iommu_debugfs_setup(struct amd_iommu *iommu); 33 #else 34 static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} 35 #endif 36 37 /* Needed for interrupt remapping */ 38 int amd_iommu_prepare(void); 39 int amd_iommu_enable(void); 40 void amd_iommu_disable(void); 41 int amd_iommu_reenable(int mode); 42 int amd_iommu_enable_faulting(unsigned int cpu); 43 extern int amd_iommu_guest_ir; 44 extern enum io_pgtable_fmt amd_iommu_pgtable; 45 extern int amd_iommu_gpt_level; 46 47 /* Protection domain ops */ 48 struct protection_domain *protection_domain_alloc(unsigned int type); 49 void protection_domain_free(struct protection_domain *domain); 50 struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, 51 struct mm_struct *mm); 52 void amd_iommu_domain_free(struct iommu_domain *dom); 53 int iommu_sva_set_dev_pasid(struct iommu_domain *domain, 54 struct device *dev, ioasid_t pasid); 55 void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, 56 struct iommu_domain *domain); 57 58 /* SVA/PASID */ 59 bool amd_iommu_pasid_supported(void); 60 61 /* IOPF */ 62 int amd_iommu_iopf_init(struct amd_iommu *iommu); 63 void amd_iommu_iopf_uninit(struct amd_iommu *iommu); 64 void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, 65 struct iommu_page_response *resp); 66 int amd_iommu_iopf_add_device(struct amd_iommu *iommu, 67 struct iommu_dev_data *dev_data); 68 void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, 69 struct iommu_dev_data *dev_data); 70 71 /* GCR3 setup */ 72 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, 73 ioasid_t pasid, unsigned long gcr3); 74 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid); 75 76 /* PPR */ 77 int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu); 78 void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu); 79 void amd_iommu_enable_ppr_log(struct amd_iommu *iommu); 80 void amd_iommu_poll_ppr_log(struct amd_iommu *iommu); 81 int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag); 82 83 /* 84 * This function flushes all internal caches of 85 * the IOMMU used by this driver. 86 */ 87 void amd_iommu_flush_all_caches(struct amd_iommu *iommu); 88 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); 89 void amd_iommu_domain_update(struct protection_domain *domain); 90 void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set); 91 void amd_iommu_domain_flush_complete(struct protection_domain *domain); 92 void amd_iommu_domain_flush_pages(struct protection_domain *domain, 93 u64 address, size_t size); 94 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, 95 ioasid_t pasid, u64 address, size_t size); 96 void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data, 97 ioasid_t pasid); 98 99 #ifdef CONFIG_IRQ_REMAP 100 int amd_iommu_create_irq_domain(struct amd_iommu *iommu); 101 #else 102 static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) 103 { 104 return 0; 105 } 106 #endif 107 108 static inline bool is_rd890_iommu(struct pci_dev *pdev) 109 { 110 return (pdev->vendor == PCI_VENDOR_ID_ATI) && 111 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 112 } 113 114 static inline bool check_feature(u64 mask) 115 { 116 return (amd_iommu_efr & mask); 117 } 118 119 static inline bool check_feature2(u64 mask) 120 { 121 return (amd_iommu_efr2 & mask); 122 } 123 124 static inline int check_feature_gpt_level(void) 125 { 126 return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK); 127 } 128 129 static inline bool amd_iommu_gt_ppr_supported(void) 130 { 131 return (check_feature(FEATURE_GT) && 132 check_feature(FEATURE_PPR) && 133 check_feature(FEATURE_EPHSUP)); 134 } 135 136 static inline u64 iommu_virt_to_phys(void *vaddr) 137 { 138 return (u64)__sme_set(virt_to_phys(vaddr)); 139 } 140 141 static inline void *iommu_phys_to_virt(unsigned long paddr) 142 { 143 return phys_to_virt(__sme_clr(paddr)); 144 } 145 146 static inline 147 void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) 148 { 149 domain->iop.root = (u64 *)(root & PAGE_MASK); 150 domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */ 151 } 152 153 static inline 154 void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) 155 { 156 amd_iommu_domain_set_pt_root(domain, 0); 157 } 158 159 static inline int get_pci_sbdf_id(struct pci_dev *pdev) 160 { 161 int seg = pci_domain_nr(pdev->bus); 162 u16 devid = pci_dev_id(pdev); 163 164 return PCI_SEG_DEVID_TO_SBDF(seg, devid); 165 } 166 167 /* 168 * This must be called after device probe completes. During probe 169 * use rlookup_amd_iommu() get the iommu. 170 */ 171 static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev) 172 { 173 return iommu_get_iommu_dev(dev, struct amd_iommu, iommu); 174 } 175 176 /* This must be called after device probe completes. */ 177 static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data) 178 { 179 return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu); 180 } 181 182 static inline struct protection_domain *to_pdomain(struct iommu_domain *dom) 183 { 184 return container_of(dom, struct protection_domain, domain); 185 } 186 187 bool translation_pre_enabled(struct amd_iommu *iommu); 188 bool amd_iommu_is_attach_deferred(struct device *dev); 189 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line); 190 191 #ifdef CONFIG_DMI 192 void amd_iommu_apply_ivrs_quirks(void); 193 #else 194 static inline void amd_iommu_apply_ivrs_quirks(void) { } 195 #endif 196 197 void amd_iommu_domain_set_pgtable(struct protection_domain *domain, 198 u64 *root, int mode); 199 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); 200 201 #endif 202