1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. 4 */ 5 #ifndef IOATDMA_H 6 #define IOATDMA_H 7 8 #include <linux/dmaengine.h> 9 #include <linux/init.h> 10 #include <linux/dmapool.h> 11 #include <linux/cache.h> 12 #include <linux/pci_ids.h> 13 #include <linux/circ_buf.h> 14 #include <linux/interrupt.h> 15 #include "registers.h" 16 #include "hw.h" 17 18 #define IOAT_DMA_VERSION "5.00" 19 20 #define IOAT_DMA_DCA_ANY_CPU ~0 21 22 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) 23 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) 24 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) 25 26 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) 27 28 /* ioat hardware assumes at least two sources for raid operations */ 29 #define src_cnt_to_sw(x) ((x) + 2) 30 #define src_cnt_to_hw(x) ((x) - 2) 31 #define ndest_to_sw(x) ((x) + 1) 32 #define ndest_to_hw(x) ((x) - 1) 33 #define src16_cnt_to_sw(x) ((x) + 9) 34 #define src16_cnt_to_hw(x) ((x) - 9) 35 36 /* 37 * workaround for IOAT ver.3.0 null descriptor issue 38 * (channel returns error when size is 0) 39 */ 40 #define NULL_DESC_BUFFER_SIZE 1 41 42 enum ioat_irq_mode { 43 IOAT_NOIRQ = 0, 44 IOAT_MSIX, 45 IOAT_MSI, 46 IOAT_INTX 47 }; 48 49 /** 50 * struct ioatdma_device - internal representation of a IOAT device 51 * @pdev: PCI-Express device 52 * @reg_base: MMIO register space base address 53 * @completion_pool: DMA buffers for completion ops 54 * @sed_hw_pool: DMA super descriptor pools 55 * @dma_dev: embedded struct dma_device 56 * @version: version of ioatdma device 57 * @msix_entries: irq handlers 58 * @idx: per channel data 59 * @dca: direct cache access context 60 * @irq_mode: interrupt mode (INTX, MSI, MSIX) 61 * @cap: read DMA capabilities register 62 */ 63 struct ioatdma_device { 64 struct pci_dev *pdev; 65 void __iomem *reg_base; 66 struct dma_pool *completion_pool; 67 #define MAX_SED_POOLS 5 68 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 69 struct dma_device dma_dev; 70 u8 version; 71 #define IOAT_MAX_CHANS 4 72 struct msix_entry msix_entries[IOAT_MAX_CHANS]; 73 struct ioatdma_chan *idx[IOAT_MAX_CHANS]; 74 struct dca_provider *dca; 75 enum ioat_irq_mode irq_mode; 76 u32 cap; 77 78 /* shadow version for CB3.3 chan reset errata workaround */ 79 u64 msixtba0; 80 u64 msixdata0; 81 u32 msixpba; 82 }; 83 84 #define IOAT_MAX_ORDER 16 85 #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) 86 #define IOAT_CHUNK_SIZE (SZ_512K) 87 #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) 88 89 struct ioat_descs { 90 void *virt; 91 dma_addr_t hw; 92 }; 93 94 struct ioatdma_chan { 95 struct dma_chan dma_chan; 96 void __iomem *reg_base; 97 dma_addr_t last_completion; 98 spinlock_t cleanup_lock; 99 unsigned long state; 100 #define IOAT_CHAN_DOWN 0 101 #define IOAT_COMPLETION_ACK 1 102 #define IOAT_RESET_PENDING 2 103 #define IOAT_KOBJ_INIT_FAIL 3 104 #define IOAT_RUN 5 105 #define IOAT_CHAN_ACTIVE 6 106 struct timer_list timer; 107 #define RESET_DELAY msecs_to_jiffies(100) 108 struct ioatdma_device *ioat_dma; 109 dma_addr_t completion_dma; 110 u64 *completion; 111 struct tasklet_struct cleanup_task; 112 struct kobject kobj; 113 114 /* ioat v2 / v3 channel attributes 115 * @xfercap_log; log2 of channel max transfer length (for fast division) 116 * @head: allocated index 117 * @issued: hardware notification point 118 * @tail: cleanup index 119 * @dmacount: identical to 'head' except for occasionally resetting to zero 120 * @alloc_order: log2 of the number of allocated descriptors 121 * @produce: number of descriptors to produce at submit time 122 * @ring: software ring buffer implementation of hardware ring 123 * @prep_lock: serializes descriptor preparation (producers) 124 */ 125 size_t xfercap_log; 126 u16 head; 127 u16 issued; 128 u16 tail; 129 u16 dmacount; 130 u16 alloc_order; 131 u16 produce; 132 struct ioat_ring_ent **ring; 133 spinlock_t prep_lock; 134 struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK]; 135 int desc_chunks; 136 int intr_coalesce; 137 int prev_intr_coalesce; 138 }; 139 140 struct ioat_sysfs_entry { 141 struct attribute attr; 142 ssize_t (*show)(struct dma_chan *, char *); 143 ssize_t (*store)(struct dma_chan *, const char *, size_t); 144 }; 145 146 /** 147 * struct ioat_sed_ent - wrapper around super extended hardware descriptor 148 * @hw: hardware SED 149 * @dma: dma address for the SED 150 * @parent: point to the dma descriptor that's the parent 151 * @hw_pool: descriptor pool index 152 */ 153 struct ioat_sed_ent { 154 struct ioat_sed_raw_descriptor *hw; 155 dma_addr_t dma; 156 struct ioat_ring_ent *parent; 157 unsigned int hw_pool; 158 }; 159 160 /** 161 * struct ioat_ring_ent - wrapper around hardware descriptor 162 * @hw: hardware DMA descriptor (for memcpy) 163 * @xor: hardware xor descriptor 164 * @xor_ex: hardware xor extension descriptor 165 * @pq: hardware pq descriptor 166 * @pq_ex: hardware pq extension descriptor 167 * @pqu: hardware pq update descriptor 168 * @raw: hardware raw (un-typed) descriptor 169 * @txd: the generic software descriptor for all engines 170 * @len: total transaction length for unmap 171 * @result: asynchronous result of validate operations 172 * @id: identifier for debug 173 * @sed: pointer to super extended descriptor sw desc 174 */ 175 176 struct ioat_ring_ent { 177 union { 178 struct ioat_dma_descriptor *hw; 179 struct ioat_xor_descriptor *xor; 180 struct ioat_xor_ext_descriptor *xor_ex; 181 struct ioat_pq_descriptor *pq; 182 struct ioat_pq_ext_descriptor *pq_ex; 183 struct ioat_pq_update_descriptor *pqu; 184 struct ioat_raw_descriptor *raw; 185 }; 186 size_t len; 187 struct dma_async_tx_descriptor txd; 188 enum sum_check_flags *result; 189 #ifdef DEBUG 190 int id; 191 #endif 192 struct ioat_sed_ent *sed; 193 }; 194 195 extern const struct sysfs_ops ioat_sysfs_ops; 196 extern struct ioat_sysfs_entry ioat_version_attr; 197 extern struct ioat_sysfs_entry ioat_cap_attr; 198 extern int ioat_pending_level; 199 extern int ioat_ring_alloc_order; 200 extern struct kobj_type ioat_ktype; 201 extern struct kmem_cache *ioat_cache; 202 extern int ioat_ring_max_alloc_order; 203 extern struct kmem_cache *ioat_sed_cache; 204 205 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) 206 { 207 return container_of(c, struct ioatdma_chan, dma_chan); 208 } 209 210 /* wrapper around hardware descriptor format + additional software fields */ 211 #ifdef DEBUG 212 #define set_desc_id(desc, i) ((desc)->id = (i)) 213 #define desc_id(desc) ((desc)->id) 214 #else 215 #define set_desc_id(desc, i) 216 #define desc_id(desc) (0) 217 #endif 218 219 static inline void 220 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, 221 struct dma_async_tx_descriptor *tx, int id) 222 { 223 struct device *dev = to_dev(ioat_chan); 224 225 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" 226 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, 227 (unsigned long long) tx->phys, 228 (unsigned long long) hw->next, tx->cookie, tx->flags, 229 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); 230 } 231 232 #define dump_desc_dbg(c, d) \ 233 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) 234 235 static inline struct ioatdma_chan * 236 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) 237 { 238 return ioat_dma->idx[index]; 239 } 240 241 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) 242 { 243 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); 244 } 245 246 static inline u64 ioat_chansts_to_addr(u64 status) 247 { 248 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 249 } 250 251 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) 252 { 253 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 254 } 255 256 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) 257 { 258 u8 ver = ioat_chan->ioat_dma->version; 259 260 writeb(IOAT_CHANCMD_SUSPEND, 261 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 262 } 263 264 static inline void ioat_reset(struct ioatdma_chan *ioat_chan) 265 { 266 u8 ver = ioat_chan->ioat_dma->version; 267 268 writeb(IOAT_CHANCMD_RESET, 269 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 270 } 271 272 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) 273 { 274 u8 ver = ioat_chan->ioat_dma->version; 275 u8 cmd; 276 277 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 278 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; 279 } 280 281 static inline bool is_ioat_active(unsigned long status) 282 { 283 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); 284 } 285 286 static inline bool is_ioat_idle(unsigned long status) 287 { 288 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); 289 } 290 291 static inline bool is_ioat_halted(unsigned long status) 292 { 293 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); 294 } 295 296 static inline bool is_ioat_suspended(unsigned long status) 297 { 298 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); 299 } 300 301 /* channel was fatally programmed */ 302 static inline bool is_ioat_bug(unsigned long err) 303 { 304 return !!err; 305 } 306 307 308 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) 309 { 310 return 1 << ioat_chan->alloc_order; 311 } 312 313 /* count of descriptors in flight with the engine */ 314 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) 315 { 316 return CIRC_CNT(ioat_chan->head, ioat_chan->tail, 317 ioat_ring_size(ioat_chan)); 318 } 319 320 /* count of descriptors pending submission to hardware */ 321 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) 322 { 323 return CIRC_CNT(ioat_chan->head, ioat_chan->issued, 324 ioat_ring_size(ioat_chan)); 325 } 326 327 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) 328 { 329 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); 330 } 331 332 static inline u16 333 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) 334 { 335 u16 num_descs = len >> ioat_chan->xfercap_log; 336 337 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); 338 return num_descs; 339 } 340 341 static inline struct ioat_ring_ent * 342 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) 343 { 344 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; 345 } 346 347 static inline void 348 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) 349 { 350 writel(addr & 0x00000000FFFFFFFF, 351 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 352 writel(addr >> 32, 353 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 354 } 355 356 /* IOAT Prep functions */ 357 struct dma_async_tx_descriptor * 358 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 359 dma_addr_t dma_src, size_t len, unsigned long flags); 360 struct dma_async_tx_descriptor * 361 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); 362 struct dma_async_tx_descriptor * 363 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 364 unsigned int src_cnt, size_t len, unsigned long flags); 365 struct dma_async_tx_descriptor * 366 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 367 unsigned int src_cnt, size_t len, 368 enum sum_check_flags *result, unsigned long flags); 369 struct dma_async_tx_descriptor * 370 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 371 unsigned int src_cnt, const unsigned char *scf, size_t len, 372 unsigned long flags); 373 struct dma_async_tx_descriptor * 374 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 375 unsigned int src_cnt, const unsigned char *scf, size_t len, 376 enum sum_check_flags *pqres, unsigned long flags); 377 struct dma_async_tx_descriptor * 378 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 379 unsigned int src_cnt, size_t len, unsigned long flags); 380 struct dma_async_tx_descriptor * 381 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 382 unsigned int src_cnt, size_t len, 383 enum sum_check_flags *result, unsigned long flags); 384 385 /* IOAT Operation functions */ 386 irqreturn_t ioat_dma_do_interrupt(int irq, void *data); 387 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); 388 struct ioat_ring_ent ** 389 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); 390 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); 391 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); 392 int ioat_reset_hw(struct ioatdma_chan *ioat_chan); 393 enum dma_status 394 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 395 struct dma_tx_state *txstate); 396 void ioat_cleanup_event(struct tasklet_struct *t); 397 void ioat_timer_event(struct timer_list *t); 398 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); 399 void ioat_issue_pending(struct dma_chan *chan); 400 401 /* IOAT Init functions */ 402 bool is_bwd_ioat(struct pci_dev *pdev); 403 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 404 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); 405 void ioat_kobject_del(struct ioatdma_device *ioat_dma); 406 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); 407 void ioat_stop(struct ioatdma_chan *ioat_chan); 408 #endif /* IOATDMA_H */ 409