1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Qualcomm Technologies HIDMA data structures 4 * 5 * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. 6 */ 7 8 #ifndef QCOM_HIDMA_H 9 #define QCOM_HIDMA_H 10 11 #include <linux/kfifo.h> 12 #include <linux/interrupt.h> 13 #include <linux/dmaengine.h> 14 15 #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */ 16 #define HIDMA_TRE_CFG_IDX 0 17 #define HIDMA_TRE_LEN_IDX 1 18 #define HIDMA_TRE_SRC_LOW_IDX 2 19 #define HIDMA_TRE_SRC_HI_IDX 3 20 #define HIDMA_TRE_DEST_LOW_IDX 4 21 #define HIDMA_TRE_DEST_HI_IDX 5 22 23 enum tre_type { 24 HIDMA_TRE_MEMCPY = 3, 25 HIDMA_TRE_MEMSET = 4, 26 }; 27 28 struct hidma_tre { 29 atomic_t allocated; /* if this channel is allocated */ 30 bool queued; /* flag whether this is pending */ 31 u16 status; /* status */ 32 u32 idx; /* index of the tre */ 33 u32 dma_sig; /* signature of the tre */ 34 const char *dev_name; /* name of the device */ 35 void (*callback)(void *data); /* requester callback */ 36 void *data; /* Data associated with this channel*/ 37 struct hidma_lldev *lldev; /* lldma device pointer */ 38 u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ 39 u32 tre_index; /* the offset where this was written*/ 40 u32 int_flags; /* interrupt flags */ 41 u8 err_info; /* error record in this transfer */ 42 u8 err_code; /* completion code */ 43 }; 44 45 struct hidma_lldev { 46 bool msi_support; /* flag indicating MSI support */ 47 bool initialized; /* initialized flag */ 48 u8 trch_state; /* trch_state of the device */ 49 u8 evch_state; /* evch_state of the device */ 50 u8 chidx; /* channel index in the core */ 51 u32 nr_tres; /* max number of configs */ 52 spinlock_t lock; /* reentrancy */ 53 struct hidma_tre *trepool; /* trepool of user configs */ 54 struct device *dev; /* device */ 55 void __iomem *trca; /* Transfer Channel address */ 56 void __iomem *evca; /* Event Channel address */ 57 struct hidma_tre 58 **pending_tre_list; /* Pointers to pending TREs */ 59 atomic_t pending_tre_count; /* Number of TREs pending */ 60 61 void *tre_ring; /* TRE ring */ 62 dma_addr_t tre_dma; /* TRE ring to be shared with HW */ 63 u32 tre_ring_size; /* Byte size of the ring */ 64 u32 tre_processed_off; /* last processed TRE */ 65 66 void *evre_ring; /* EVRE ring */ 67 dma_addr_t evre_dma; /* EVRE ring to be shared with HW */ 68 u32 evre_ring_size; /* Byte size of the ring */ 69 u32 evre_processed_off; /* last processed EVRE */ 70 71 u32 tre_write_offset; /* TRE write location */ 72 struct tasklet_struct task; /* task delivering notifications */ 73 DECLARE_KFIFO_PTR(handoff_fifo, 74 struct hidma_tre *); /* pending TREs FIFO */ 75 }; 76 77 struct hidma_desc { 78 struct dma_async_tx_descriptor desc; 79 /* link list node for this channel*/ 80 struct list_head node; 81 u32 tre_ch; 82 }; 83 84 struct hidma_chan { 85 bool paused; 86 bool allocated; 87 char dbg_name[16]; 88 u32 dma_sig; 89 dma_cookie_t last_success; 90 91 /* 92 * active descriptor on this channel 93 * It is used by the DMA complete notification to 94 * locate the descriptor that initiated the transfer. 95 */ 96 struct dentry *debugfs; 97 struct dentry *stats; 98 struct hidma_dev *dmadev; 99 struct hidma_desc *running; 100 101 struct dma_chan chan; 102 struct list_head free; 103 struct list_head prepared; 104 struct list_head queued; 105 struct list_head active; 106 struct list_head completed; 107 108 /* Lock for this structure */ 109 spinlock_t lock; 110 }; 111 112 struct hidma_dev { 113 int irq; 114 int chidx; 115 u32 nr_descriptors; 116 int msi_virqbase; 117 118 struct hidma_lldev *lldev; 119 void __iomem *dev_trca; 120 struct resource *trca_resource; 121 void __iomem *dev_evca; 122 struct resource *evca_resource; 123 124 /* used to protect the pending channel list*/ 125 spinlock_t lock; 126 struct dma_device ddev; 127 128 struct dentry *debugfs; 129 struct dentry *stats; 130 131 /* sysfs entry for the channel id */ 132 struct device_attribute *chid_attrs; 133 134 /* Task delivering issue_pending */ 135 struct tasklet_struct task; 136 }; 137 138 int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, 139 const char *dev_name, 140 void (*callback)(void *data), void *data, u32 *tre_ch); 141 142 void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); 143 enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); 144 bool hidma_ll_isenabled(struct hidma_lldev *llhndl); 145 void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); 146 void hidma_ll_start(struct hidma_lldev *llhndl); 147 int hidma_ll_disable(struct hidma_lldev *lldev); 148 int hidma_ll_enable(struct hidma_lldev *llhndl); 149 void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, 150 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype); 151 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); 152 int hidma_ll_setup(struct hidma_lldev *lldev); 153 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, 154 void __iomem *trca, void __iomem *evca, 155 u8 chidx); 156 int hidma_ll_uninit(struct hidma_lldev *llhndl); 157 irqreturn_t hidma_ll_inthandler(int irq, void *arg); 158 irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); 159 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, 160 u8 err_code); 161 int hidma_debug_init(struct hidma_dev *dmadev); 162 void hidma_debug_uninit(struct hidma_dev *dmadev); 163 #endif 164