1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. 4 */ 5 #ifndef IOATDMA_H 6 #define IOATDMA_H 7 8 #include <linux/dmaengine.h> 9 #include <linux/init.h> 10 #include <linux/dmapool.h> 11 #include <linux/cache.h> 12 #include <linux/pci_ids.h> 13 #include <linux/circ_buf.h> 14 #include <linux/interrupt.h> 15 #include "registers.h" 16 #include "hw.h" 17 18 #define IOAT_DMA_VERSION "5.00" 19 20 #define IOAT_DMA_DCA_ANY_CPU ~0 21 22 int system_has_dca_enabled(struct pci_dev *pdev); 23 24 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) 25 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) 26 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) 27 28 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) 29 30 /* ioat hardware assumes at least two sources for raid operations */ 31 #define src_cnt_to_sw(x) ((x) + 2) 32 #define src_cnt_to_hw(x) ((x) - 2) 33 #define ndest_to_sw(x) ((x) + 1) 34 #define ndest_to_hw(x) ((x) - 1) 35 #define src16_cnt_to_sw(x) ((x) + 9) 36 #define src16_cnt_to_hw(x) ((x) - 9) 37 38 /* 39 * workaround for IOAT ver.3.0 null descriptor issue 40 * (channel returns error when size is 0) 41 */ 42 #define NULL_DESC_BUFFER_SIZE 1 43 44 enum ioat_irq_mode { 45 IOAT_NOIRQ = 0, 46 IOAT_MSIX, 47 IOAT_MSI, 48 IOAT_INTX 49 }; 50 51 /** 52 * struct ioatdma_device - internal representation of a IOAT device 53 * @pdev: PCI-Express device 54 * @reg_base: MMIO register space base address 55 * @completion_pool: DMA buffers for completion ops 56 * @sed_hw_pool: DMA super descriptor pools 57 * @dma_dev: embedded struct dma_device 58 * @version: version of ioatdma device 59 * @msix_entries: irq handlers 60 * @idx: per channel data 61 * @dca: direct cache access context 62 * @irq_mode: interrupt mode (INTX, MSI, MSIX) 63 * @cap: read DMA capabilities register 64 */ 65 struct ioatdma_device { 66 struct pci_dev *pdev; 67 void __iomem *reg_base; 68 struct dma_pool *completion_pool; 69 #define MAX_SED_POOLS 5 70 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 71 struct dma_device dma_dev; 72 u8 version; 73 #define IOAT_MAX_CHANS 4 74 struct msix_entry msix_entries[IOAT_MAX_CHANS]; 75 struct ioatdma_chan *idx[IOAT_MAX_CHANS]; 76 struct dca_provider *dca; 77 enum ioat_irq_mode irq_mode; 78 u32 cap; 79 int chancnt; 80 81 /* shadow version for CB3.3 chan reset errata workaround */ 82 u64 msixtba0; 83 u64 msixdata0; 84 u32 msixpba; 85 }; 86 87 #define IOAT_MAX_ORDER 16 88 #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) 89 #define IOAT_CHUNK_SIZE (SZ_512K) 90 #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) 91 92 struct ioat_descs { 93 void *virt; 94 dma_addr_t hw; 95 }; 96 97 struct ioatdma_chan { 98 struct dma_chan dma_chan; 99 void __iomem *reg_base; 100 dma_addr_t last_completion; 101 spinlock_t cleanup_lock; 102 unsigned long state; 103 #define IOAT_CHAN_DOWN 0 104 #define IOAT_COMPLETION_ACK 1 105 #define IOAT_RESET_PENDING 2 106 #define IOAT_KOBJ_INIT_FAIL 3 107 #define IOAT_RUN 5 108 #define IOAT_CHAN_ACTIVE 6 109 struct timer_list timer; 110 #define RESET_DELAY msecs_to_jiffies(100) 111 struct ioatdma_device *ioat_dma; 112 dma_addr_t completion_dma; 113 u64 *completion; 114 struct tasklet_struct cleanup_task; 115 struct kobject kobj; 116 117 /* ioat v2 / v3 channel attributes 118 * @xfercap_log; log2 of channel max transfer length (for fast division) 119 * @head: allocated index 120 * @issued: hardware notification point 121 * @tail: cleanup index 122 * @dmacount: identical to 'head' except for occasionally resetting to zero 123 * @alloc_order: log2 of the number of allocated descriptors 124 * @produce: number of descriptors to produce at submit time 125 * @ring: software ring buffer implementation of hardware ring 126 * @prep_lock: serializes descriptor preparation (producers) 127 */ 128 size_t xfercap_log; 129 u16 head; 130 u16 issued; 131 u16 tail; 132 u16 dmacount; 133 u16 alloc_order; 134 u16 produce; 135 struct ioat_ring_ent **ring; 136 spinlock_t prep_lock; 137 struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK]; 138 int desc_chunks; 139 int intr_coalesce; 140 int prev_intr_coalesce; 141 }; 142 143 /** 144 * struct ioat_sed_ent - wrapper around super extended hardware descriptor 145 * @hw: hardware SED 146 * @dma: dma address for the SED 147 * @parent: point to the dma descriptor that's the parent 148 * @hw_pool: descriptor pool index 149 */ 150 struct ioat_sed_ent { 151 struct ioat_sed_raw_descriptor *hw; 152 dma_addr_t dma; 153 struct ioat_ring_ent *parent; 154 unsigned int hw_pool; 155 }; 156 157 /** 158 * struct ioat_ring_ent - wrapper around hardware descriptor 159 * @hw: hardware DMA descriptor (for memcpy) 160 * @xor: hardware xor descriptor 161 * @xor_ex: hardware xor extension descriptor 162 * @pq: hardware pq descriptor 163 * @pq_ex: hardware pq extension descriptor 164 * @pqu: hardware pq update descriptor 165 * @raw: hardware raw (un-typed) descriptor 166 * @txd: the generic software descriptor for all engines 167 * @len: total transaction length for unmap 168 * @result: asynchronous result of validate operations 169 * @id: identifier for debug 170 * @sed: pointer to super extended descriptor sw desc 171 */ 172 173 struct ioat_ring_ent { 174 union { 175 struct ioat_dma_descriptor *hw; 176 struct ioat_xor_descriptor *xor; 177 struct ioat_xor_ext_descriptor *xor_ex; 178 struct ioat_pq_descriptor *pq; 179 struct ioat_pq_ext_descriptor *pq_ex; 180 struct ioat_pq_update_descriptor *pqu; 181 struct ioat_raw_descriptor *raw; 182 }; 183 size_t len; 184 struct dma_async_tx_descriptor txd; 185 enum sum_check_flags *result; 186 #ifdef DEBUG 187 int id; 188 #endif 189 struct ioat_sed_ent *sed; 190 }; 191 192 extern int ioat_pending_level; 193 extern const struct kobj_type ioat_ktype; 194 extern struct kmem_cache *ioat_cache; 195 extern struct kmem_cache *ioat_sed_cache; 196 197 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) 198 { 199 return container_of(c, struct ioatdma_chan, dma_chan); 200 } 201 202 /* wrapper around hardware descriptor format + additional software fields */ 203 #ifdef DEBUG 204 #define set_desc_id(desc, i) ((desc)->id = (i)) 205 #define desc_id(desc) ((desc)->id) 206 #else 207 #define set_desc_id(desc, i) 208 #define desc_id(desc) (0) 209 #endif 210 211 static inline void 212 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, 213 struct dma_async_tx_descriptor *tx, int id) 214 { 215 struct device *dev = to_dev(ioat_chan); 216 217 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" 218 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, 219 (unsigned long long) tx->phys, 220 (unsigned long long) hw->next, tx->cookie, tx->flags, 221 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); 222 } 223 224 #define dump_desc_dbg(c, d) \ 225 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) 226 227 static inline struct ioatdma_chan * 228 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) 229 { 230 return ioat_dma->idx[index]; 231 } 232 233 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) 234 { 235 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); 236 } 237 238 static inline u64 ioat_chansts_to_addr(u64 status) 239 { 240 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 241 } 242 243 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) 244 { 245 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 246 } 247 248 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) 249 { 250 u8 ver = ioat_chan->ioat_dma->version; 251 252 writeb(IOAT_CHANCMD_SUSPEND, 253 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 254 } 255 256 static inline void ioat_reset(struct ioatdma_chan *ioat_chan) 257 { 258 u8 ver = ioat_chan->ioat_dma->version; 259 260 writeb(IOAT_CHANCMD_RESET, 261 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 262 } 263 264 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) 265 { 266 u8 ver = ioat_chan->ioat_dma->version; 267 u8 cmd; 268 269 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 270 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; 271 } 272 273 static inline bool is_ioat_active(unsigned long status) 274 { 275 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); 276 } 277 278 static inline bool is_ioat_idle(unsigned long status) 279 { 280 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); 281 } 282 283 static inline bool is_ioat_halted(unsigned long status) 284 { 285 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); 286 } 287 288 static inline bool is_ioat_suspended(unsigned long status) 289 { 290 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); 291 } 292 293 /* channel was fatally programmed */ 294 static inline bool is_ioat_bug(unsigned long err) 295 { 296 return !!err; 297 } 298 299 300 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) 301 { 302 return 1 << ioat_chan->alloc_order; 303 } 304 305 /* count of descriptors in flight with the engine */ 306 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) 307 { 308 return CIRC_CNT(ioat_chan->head, ioat_chan->tail, 309 ioat_ring_size(ioat_chan)); 310 } 311 312 /* count of descriptors pending submission to hardware */ 313 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) 314 { 315 return CIRC_CNT(ioat_chan->head, ioat_chan->issued, 316 ioat_ring_size(ioat_chan)); 317 } 318 319 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) 320 { 321 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); 322 } 323 324 static inline u16 325 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) 326 { 327 u16 num_descs = len >> ioat_chan->xfercap_log; 328 329 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); 330 return num_descs; 331 } 332 333 static inline struct ioat_ring_ent * 334 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) 335 { 336 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; 337 } 338 339 static inline void 340 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) 341 { 342 writel(addr & 0x00000000FFFFFFFF, 343 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 344 writel(addr >> 32, 345 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 346 } 347 348 /* IOAT Prep functions */ 349 struct dma_async_tx_descriptor * 350 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 351 dma_addr_t dma_src, size_t len, unsigned long flags); 352 struct dma_async_tx_descriptor * 353 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); 354 struct dma_async_tx_descriptor * 355 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 356 unsigned int src_cnt, size_t len, unsigned long flags); 357 struct dma_async_tx_descriptor * 358 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 359 unsigned int src_cnt, size_t len, 360 enum sum_check_flags *result, unsigned long flags); 361 struct dma_async_tx_descriptor * 362 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 363 unsigned int src_cnt, const unsigned char *scf, size_t len, 364 unsigned long flags); 365 struct dma_async_tx_descriptor * 366 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 367 unsigned int src_cnt, const unsigned char *scf, size_t len, 368 enum sum_check_flags *pqres, unsigned long flags); 369 struct dma_async_tx_descriptor * 370 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 371 unsigned int src_cnt, size_t len, unsigned long flags); 372 struct dma_async_tx_descriptor * 373 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 374 unsigned int src_cnt, size_t len, 375 enum sum_check_flags *result, unsigned long flags); 376 377 /* IOAT Operation functions */ 378 irqreturn_t ioat_dma_do_interrupt(int irq, void *data); 379 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); 380 struct ioat_ring_ent ** 381 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); 382 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); 383 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); 384 int ioat_reset_hw(struct ioatdma_chan *ioat_chan); 385 enum dma_status 386 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 387 struct dma_tx_state *txstate); 388 void ioat_cleanup_event(struct tasklet_struct *t); 389 void ioat_timer_event(struct timer_list *t); 390 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); 391 void ioat_issue_pending(struct dma_chan *chan); 392 393 /* IOAT Init functions */ 394 bool is_bwd_ioat(struct pci_dev *pdev); 395 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 396 void ioat_kobject_add(struct ioatdma_device *ioat_dma, const struct kobj_type *type); 397 void ioat_kobject_del(struct ioatdma_device *ioat_dma); 398 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); 399 void ioat_stop(struct ioatdma_chan *ioat_chan); 400 #endif /* IOATDMA_H */ 401