dma.h (c0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4) dma.h (599d49de7f69cb5a23e913db24e168ba2f09bd05)
1/*
2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *

--- 23 unchanged lines hidden (view full) ---

32#define IOAT_DMA_DCA_ANY_CPU ~0
33
34#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
37
38#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
39
1/*
2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *

--- 23 unchanged lines hidden (view full) ---

32#define IOAT_DMA_DCA_ANY_CPU ~0
33
34#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
37
38#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
39
40/* ioat hardware assumes at least two sources for raid operations */
41#define src_cnt_to_sw(x) ((x) + 2)
42#define src_cnt_to_hw(x) ((x) - 2)
43#define ndest_to_sw(x) ((x) + 1)
44#define ndest_to_hw(x) ((x) - 1)
45#define src16_cnt_to_sw(x) ((x) + 9)
46#define src16_cnt_to_hw(x) ((x) - 9)
47
40/*
41 * workaround for IOAT ver.3.0 null descriptor issue
42 * (channel returns error when size is 0)
43 */
44#define NULL_DESC_BUFFER_SIZE 1
45
46enum ioat_irq_mode {
47 IOAT_NOIRQ = 0,

--- 137 unchanged lines hidden (view full) ---

185 struct dma_async_tx_descriptor txd;
186 enum sum_check_flags *result;
187 #ifdef DEBUG
188 int id;
189 #endif
190 struct ioat_sed_ent *sed;
191};
192
48/*
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
51 */
52#define NULL_DESC_BUFFER_SIZE 1
53
54enum ioat_irq_mode {
55 IOAT_NOIRQ = 0,

--- 137 unchanged lines hidden (view full) ---

193 struct dma_async_tx_descriptor txd;
194 enum sum_check_flags *result;
195 #ifdef DEBUG
196 int id;
197 #endif
198 struct ioat_sed_ent *sed;
199};
200
201extern const struct sysfs_ops ioat_sysfs_ops;
202extern struct ioat_sysfs_entry ioat_version_attr;
203extern struct ioat_sysfs_entry ioat_cap_attr;
204extern int ioat_pending_level;
205extern int ioat_ring_alloc_order;
206extern struct kobj_type ioat_ktype;
207extern struct kmem_cache *ioat_cache;
208extern int ioat_ring_max_alloc_order;
209extern struct kmem_cache *ioat_sed_cache;
210
193static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
194{
195 return container_of(c, struct ioatdma_chan, dma_chan);
196}
197
211static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
212{
213 return container_of(c, struct ioatdma_chan, dma_chan);
214}
215
198
199
200/* wrapper around hardware descriptor format + additional software fields */
216/* wrapper around hardware descriptor format + additional software fields */
201
202#ifdef DEBUG
203#define set_desc_id(desc, i) ((desc)->id = (i))
204#define desc_id(desc) ((desc)->id)
205#else
206#define set_desc_id(desc, i)
207#define desc_id(desc) (0)
208#endif
209

--- 166 unchanged lines hidden (view full) ---

376ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
377{
378 writel(addr & 0x00000000FFFFFFFF,
379 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
380 writel(addr >> 32,
381 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
382}
383
217#ifdef DEBUG
218#define set_desc_id(desc, i) ((desc)->id = (i))
219#define desc_id(desc) ((desc)->id)
220#else
221#define set_desc_id(desc, i)
222#define desc_id(desc) (0)
223#endif
224

--- 166 unchanged lines hidden (view full) ---

391ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
392{
393 writel(addr & 0x00000000FFFFFFFF,
394 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
395 writel(addr >> 32,
396 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
397}
398
384irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
385irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
386struct ioat_ring_ent **
387ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
388void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
389void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
390int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
399/* IOAT Prep functions */
391struct dma_async_tx_descriptor *
400struct dma_async_tx_descriptor *
401ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
402 dma_addr_t dma_src, size_t len, unsigned long flags);
403struct dma_async_tx_descriptor *
392ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
393struct dma_async_tx_descriptor *
394ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
395 unsigned int src_cnt, size_t len, unsigned long flags);
396struct dma_async_tx_descriptor *
397ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
398 unsigned int src_cnt, size_t len,
399 enum sum_check_flags *result, unsigned long flags);

--- 7 unchanged lines hidden (view full) ---

407 enum sum_check_flags *pqres, unsigned long flags);
408struct dma_async_tx_descriptor *
409ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
410 unsigned int src_cnt, size_t len, unsigned long flags);
411struct dma_async_tx_descriptor *
412ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
413 unsigned int src_cnt, size_t len,
414 enum sum_check_flags *result, unsigned long flags);
404ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
405struct dma_async_tx_descriptor *
406ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
407 unsigned int src_cnt, size_t len, unsigned long flags);
408struct dma_async_tx_descriptor *
409ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
410 unsigned int src_cnt, size_t len,
411 enum sum_check_flags *result, unsigned long flags);

--- 7 unchanged lines hidden (view full) ---

419 enum sum_check_flags *pqres, unsigned long flags);
420struct dma_async_tx_descriptor *
421ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
422 unsigned int src_cnt, size_t len, unsigned long flags);
423struct dma_async_tx_descriptor *
424ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
425 unsigned int src_cnt, size_t len,
426 enum sum_check_flags *result, unsigned long flags);
427
428/* IOAT Operation functions */
429irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
430irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
431struct ioat_ring_ent **
432ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
433void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
434void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
435int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
415enum dma_status
416ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
417 struct dma_tx_state *txstate);
418void ioat_cleanup_event(unsigned long data);
419void ioat_timer_event(unsigned long data);
436enum dma_status
437ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
438 struct dma_tx_state *txstate);
439void ioat_cleanup_event(unsigned long data);
440void ioat_timer_event(unsigned long data);
420bool is_bwd_ioat(struct pci_dev *pdev);
421int ioat_probe(struct ioatdma_device *ioat_dma);
422int ioat_register(struct ioatdma_device *ioat_dma);
423int ioat_dma_self_test(struct ioatdma_device *ioat_dma);
424void ioat_dma_remove(struct ioatdma_device *ioat_dma);
425struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
426void ioat_init_channel(struct ioatdma_device *ioat_dma,
427 struct ioatdma_chan *ioat_chan, int idx);
428enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
429 struct dma_tx_state *txstate);
430bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
431 dma_addr_t *phys_complete);
441enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
442 struct dma_tx_state *txstate);
443bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
444 dma_addr_t *phys_complete);
432void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
433void ioat_kobject_del(struct ioatdma_device *ioat_dma);
434int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
435void ioat_stop(struct ioatdma_chan *ioat_chan);
436int ioat_dma_probe(struct ioatdma_device *ioat_dma, int dca);
437int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
438struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
439int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
445int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
440int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
441struct dma_async_tx_descriptor *
442ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
443 dma_addr_t dma_src, size_t len, unsigned long flags);
444void ioat_issue_pending(struct dma_chan *chan);
446void ioat_issue_pending(struct dma_chan *chan);
445int ioat_alloc_chan_resources(struct dma_chan *c);
446void ioat_free_chan_resources(struct dma_chan *c);
447void __ioat_restart_chan(struct ioatdma_chan *ioat_chan);
448bool reshape_ring(struct ioatdma_chan *ioat, int order);
449void __ioat_issue_pending(struct ioatdma_chan *ioat_chan);
450void ioat_timer_event(unsigned long data);
451int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
452int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
447bool reshape_ring(struct ioatdma_chan *ioat, int order);
448void __ioat_issue_pending(struct ioatdma_chan *ioat_chan);
449void ioat_timer_event(unsigned long data);
450int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
451int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
452void __ioat_restart_chan(struct ioatdma_chan *ioat_chan);
453
453
454extern const struct sysfs_ops ioat_sysfs_ops;
455extern struct ioat_sysfs_entry ioat_version_attr;
456extern struct ioat_sysfs_entry ioat_cap_attr;
457extern int ioat_pending_level;
458extern int ioat_ring_alloc_order;
459extern struct kobj_type ioat_ktype;
460extern struct kmem_cache *ioat_cache;
461extern int ioat_ring_max_alloc_order;
462extern struct kmem_cache *ioat_sed_cache;
463
454/* IOAT Init functions */
455bool is_bwd_ioat(struct pci_dev *pdev);
456void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
457void ioat_kobject_del(struct ioatdma_device *ioat_dma);
458int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
459void ioat_stop(struct ioatdma_chan *ioat_chan);
460struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
464#endif /* IOATDMA_H */
461#endif /* IOATDMA_H */