1 #ifndef __POWERNV_PCI_H 2 #define __POWERNV_PCI_H 3 4 struct pci_dn; 5 6 enum pnv_phb_type { 7 PNV_PHB_P5IOC2 = 0, 8 PNV_PHB_IODA1 = 1, 9 PNV_PHB_IODA2 = 2, 10 PNV_PHB_NPU = 3, 11 }; 12 13 /* Precise PHB model for error management */ 14 enum pnv_phb_model { 15 PNV_PHB_MODEL_UNKNOWN, 16 PNV_PHB_MODEL_P5IOC2, 17 PNV_PHB_MODEL_P7IOC, 18 PNV_PHB_MODEL_PHB3, 19 PNV_PHB_MODEL_NPU, 20 }; 21 22 #define PNV_PCI_DIAG_BUF_SIZE 8192 23 #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */ 24 #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */ 25 #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */ 26 #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */ 27 #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ 28 #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */ 29 #define PNV_IODA_PE_PEER (1 << 6) /* PE has peers */ 30 31 /* Data associated with a PE, including IOMMU tracking etc.. */ 32 struct pnv_phb; 33 struct pnv_ioda_pe { 34 unsigned long flags; 35 struct pnv_phb *phb; 36 37 #define PNV_IODA_MAX_PEER_PES 8 38 struct pnv_ioda_pe *peers[PNV_IODA_MAX_PEER_PES]; 39 40 /* A PE can be associated with a single device or an 41 * entire bus (& children). In the former case, pdev 42 * is populated, in the later case, pbus is. 43 */ 44 #ifdef CONFIG_PCI_IOV 45 struct pci_dev *parent_dev; 46 #endif 47 struct pci_dev *pdev; 48 struct pci_bus *pbus; 49 50 /* Effective RID (device RID for a device PE and base bus 51 * RID with devfn 0 for a bus PE) 52 */ 53 unsigned int rid; 54 55 /* PE number */ 56 unsigned int pe_number; 57 58 /* "Weight" assigned to the PE for the sake of DMA resource 59 * allocations 60 */ 61 unsigned int dma_weight; 62 63 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ 64 int tce32_seg; 65 int tce32_segcount; 66 struct iommu_table_group table_group; 67 68 /* 64-bit TCE bypass region */ 69 bool tce_bypass_enabled; 70 uint64_t tce_bypass_base; 71 72 /* MSIs. MVE index is identical for for 32 and 64 bit MSI 73 * and -1 if not supported. (It's actually identical to the 74 * PE number) 75 */ 76 int mve_number; 77 78 /* PEs in compound case */ 79 struct pnv_ioda_pe *master; 80 struct list_head slaves; 81 82 /* Link in list of PE#s */ 83 struct list_head dma_link; 84 struct list_head list; 85 }; 86 87 #define PNV_PHB_FLAG_EEH (1 << 0) 88 89 struct pnv_phb { 90 struct pci_controller *hose; 91 enum pnv_phb_type type; 92 enum pnv_phb_model model; 93 u64 hub_id; 94 u64 opal_id; 95 int flags; 96 void __iomem *regs; 97 int initialized; 98 spinlock_t lock; 99 100 #ifdef CONFIG_DEBUG_FS 101 int has_dbgfs; 102 struct dentry *dbgfs; 103 #endif 104 105 #ifdef CONFIG_PCI_MSI 106 unsigned int msi_base; 107 unsigned int msi32_support; 108 struct msi_bitmap msi_bmp; 109 #endif 110 int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev, 111 unsigned int hwirq, unsigned int virq, 112 unsigned int is_64, struct msi_msg *msg); 113 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 114 void (*fixup_phb)(struct pci_controller *hose); 115 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 116 int (*init_m64)(struct pnv_phb *phb); 117 void (*reserve_m64_pe)(struct pci_bus *bus, 118 unsigned long *pe_bitmap, bool all); 119 int (*pick_m64_pe)(struct pci_bus *bus, bool all); 120 int (*get_pe_state)(struct pnv_phb *phb, int pe_no); 121 void (*freeze_pe)(struct pnv_phb *phb, int pe_no); 122 int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt); 123 124 union { 125 struct { 126 struct iommu_table iommu_table; 127 struct iommu_table_group table_group; 128 } p5ioc2; 129 130 struct { 131 /* Global bridge info */ 132 unsigned int total_pe; 133 unsigned int reserved_pe; 134 135 /* 32-bit MMIO window */ 136 unsigned int m32_size; 137 unsigned int m32_segsize; 138 unsigned int m32_pci_base; 139 140 /* 64-bit MMIO window */ 141 unsigned int m64_bar_idx; 142 unsigned long m64_size; 143 unsigned long m64_segsize; 144 unsigned long m64_base; 145 unsigned long m64_bar_alloc; 146 147 /* IO ports */ 148 unsigned int io_size; 149 unsigned int io_segsize; 150 unsigned int io_pci_base; 151 152 /* PE allocation bitmap */ 153 unsigned long *pe_alloc; 154 /* PE allocation mutex */ 155 struct mutex pe_alloc_mutex; 156 157 /* M32 & IO segment maps */ 158 unsigned int *m32_segmap; 159 unsigned int *io_segmap; 160 struct pnv_ioda_pe *pe_array; 161 162 /* IRQ chip */ 163 int irq_chip_init; 164 struct irq_chip irq_chip; 165 166 /* Sorted list of used PE's based 167 * on the sequence of creation 168 */ 169 struct list_head pe_list; 170 struct mutex pe_list_mutex; 171 172 /* Reverse map of PEs, will have to extend if 173 * we are to support more than 256 PEs, indexed 174 * bus { bus, devfn } 175 */ 176 unsigned char pe_rmap[0x10000]; 177 178 /* 32-bit TCE tables allocation */ 179 unsigned long tce32_count; 180 181 /* Total "weight" for the sake of DMA resources 182 * allocation 183 */ 184 unsigned int dma_weight; 185 unsigned int dma_pe_count; 186 187 /* Sorted list of used PE's, sorted at 188 * boot for resource allocation purposes 189 */ 190 struct list_head pe_dma_list; 191 192 /* TCE cache invalidate registers (physical and 193 * remapped) 194 */ 195 phys_addr_t tce_inval_reg_phys; 196 __be64 __iomem *tce_inval_reg; 197 } ioda; 198 }; 199 200 /* PHB and hub status structure */ 201 union { 202 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; 203 struct OpalIoP7IOCPhbErrorData p7ioc; 204 struct OpalIoPhb3ErrorData phb3; 205 struct OpalIoP7IOCErrorData hub_diag; 206 } diag; 207 208 }; 209 210 extern struct pci_ops pnv_pci_ops; 211 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 212 unsigned long uaddr, enum dma_data_direction direction, 213 struct dma_attrs *attrs); 214 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); 215 extern int pnv_tce_xchg(struct iommu_table *tbl, long index, 216 unsigned long *hpa, enum dma_data_direction *direction); 217 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); 218 219 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 220 unsigned char *log_buff); 221 int pnv_pci_cfg_read(struct pci_dn *pdn, 222 int where, int size, u32 *val); 223 int pnv_pci_cfg_write(struct pci_dn *pdn, 224 int where, int size, u32 val); 225 extern struct iommu_table *pnv_pci_table_alloc(int nid); 226 227 extern long pnv_pci_link_table_and_group(int node, int num, 228 struct iommu_table *tbl, 229 struct iommu_table_group *table_group); 230 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 231 struct iommu_table_group *table_group); 232 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 233 void *tce_mem, u64 tce_size, 234 u64 dma_offset, unsigned page_shift); 235 extern void pnv_pci_init_p5ioc2_hub(struct device_node *np); 236 extern void pnv_pci_init_ioda_hub(struct device_node *np); 237 extern void pnv_pci_init_ioda2_phb(struct device_node *np); 238 extern void pnv_pci_init_npu_phb(struct device_node *np); 239 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 240 __be64 *startp, __be64 *endp, bool rm); 241 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); 242 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); 243 244 extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); 245 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); 246 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); 247 248 /* Nvlink functions */ 249 extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe); 250 extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe, 251 struct iommu_table *tbl, 252 unsigned long index, 253 unsigned long npages, 254 bool rm); 255 extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe); 256 extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe); 257 extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled); 258 extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask); 259 260 #endif /* __POWERNV_PCI_H */ 261