1 /* SPDX-License-Identifier: GPL-2.0-only 2 * 3 * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #ifndef _QAIC_H_ 8 #define _QAIC_H_ 9 10 #include <linux/interrupt.h> 11 #include <linux/kref.h> 12 #include <linux/mhi.h> 13 #include <linux/mutex.h> 14 #include <linux/pci.h> 15 #include <linux/spinlock.h> 16 #include <linux/srcu.h> 17 #include <linux/wait.h> 18 #include <linux/workqueue.h> 19 #include <drm/drm_device.h> 20 #include <drm/drm_gem.h> 21 22 #define QAIC_DBC_BASE SZ_128K 23 #define QAIC_DBC_SIZE SZ_4K 24 25 #define QAIC_NO_PARTITION -1 26 27 #define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE) 28 29 #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base) 30 #define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm) 31 #define to_drm(qddev) (&(qddev)->drm) 32 #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */ 33 #define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev) 34 35 enum aic_families { 36 FAMILY_AIC100, 37 FAMILY_AIC200, 38 FAMILY_MAX, 39 }; 40 41 enum __packed dev_states { 42 /* Device is offline or will be very soon */ 43 QAIC_OFFLINE, 44 /* Device is booting, not clear if it's in a usable state */ 45 QAIC_BOOT, 46 /* Device is fully operational */ 47 QAIC_ONLINE, 48 }; 49 50 extern bool datapath_polling; 51 52 struct qaic_user { 53 /* Uniquely identifies this user for the device */ 54 int handle; 55 struct kref ref_count; 56 /* Char device opened by this user */ 57 struct qaic_drm_device *qddev; 58 /* Node in list of users that opened this drm device */ 59 struct list_head node; 60 /* SRCU used to synchronize this user during cleanup */ 61 struct srcu_struct qddev_lock; 62 atomic_t chunk_id; 63 }; 64 65 struct dma_bridge_chan { 66 /* Pointer to device strcut maintained by driver */ 67 struct qaic_device *qdev; 68 /* ID of this DMA bridge channel(DBC) */ 69 unsigned int id; 70 /* Synchronizes access to xfer_list */ 71 spinlock_t xfer_lock; 72 /* Base address of request queue */ 73 void *req_q_base; 74 /* Base address of response queue */ 75 void *rsp_q_base; 76 /* 77 * Base bus address of request queue. Response queue bus address can be 78 * calculated by adding request queue size to this variable 79 */ 80 dma_addr_t dma_addr; 81 /* Total size of request and response queue in byte */ 82 u32 total_size; 83 /* Capacity of request/response queue */ 84 u32 nelem; 85 /* The user that opened this DBC */ 86 struct qaic_user *usr; 87 /* 88 * Request ID of next memory handle that goes in request queue. One 89 * memory handle can enqueue more than one request elements, all 90 * this requests that belong to same memory handle have same request ID 91 */ 92 u16 next_req_id; 93 /* true: DBC is in use; false: DBC not in use */ 94 bool in_use; 95 /* 96 * Base address of device registers. Used to read/write request and 97 * response queue's head and tail pointer of this DBC. 98 */ 99 void __iomem *dbc_base; 100 /* Synchronizes access to Request queue's head and tail pointer */ 101 struct mutex req_lock; 102 /* Head of list where each node is a memory handle queued in request queue */ 103 struct list_head xfer_list; 104 /* Synchronizes DBC readers during cleanup */ 105 struct srcu_struct ch_lock; 106 /* 107 * When this DBC is released, any thread waiting on this wait queue is 108 * woken up 109 */ 110 wait_queue_head_t dbc_release; 111 /* Head of list where each node is a bo associated with this DBC */ 112 struct list_head bo_lists; 113 /* The irq line for this DBC. Used for polling */ 114 unsigned int irq; 115 /* Polling work item to simulate interrupts */ 116 struct work_struct poll_work; 117 }; 118 119 struct qaic_device { 120 /* Pointer to base PCI device struct of our physical device */ 121 struct pci_dev *pdev; 122 /* Req. ID of request that will be queued next in MHI control device */ 123 u32 next_seq_num; 124 /* Base address of the MHI bar */ 125 void __iomem *bar_mhi; 126 /* Base address of the DBCs bar */ 127 void __iomem *bar_dbc; 128 /* Controller structure for MHI devices */ 129 struct mhi_controller *mhi_cntrl; 130 /* MHI control channel device */ 131 struct mhi_device *cntl_ch; 132 /* List of requests queued in MHI control device */ 133 struct list_head cntl_xfer_list; 134 /* Synchronizes MHI control device transactions and its xfer list */ 135 struct mutex cntl_mutex; 136 /* Array of DBC struct of this device */ 137 struct dma_bridge_chan *dbc; 138 /* Work queue for tasks related to MHI control device */ 139 struct workqueue_struct *cntl_wq; 140 /* Synchronizes all the users of device during cleanup */ 141 struct srcu_struct dev_lock; 142 /* Track the state of the device during resets */ 143 enum dev_states dev_state; 144 /* true: single MSI is used to operate device */ 145 bool single_msi; 146 /* 147 * true: A tx MHI transaction has failed and a rx buffer is still queued 148 * in control device. Such a buffer is considered lost rx buffer 149 * false: No rx buffer is lost in control device 150 */ 151 bool cntl_lost_buf; 152 /* Maximum number of DBC supported by this device */ 153 u32 num_dbc; 154 /* Reference to the drm_device for this device when it is created */ 155 struct qaic_drm_device *qddev; 156 /* Generate the CRC of a control message */ 157 u32 (*gen_crc)(void *msg); 158 /* Validate the CRC of a control message */ 159 bool (*valid_crc)(void *msg); 160 /* MHI "QAIC_TIMESYNC" channel device */ 161 struct mhi_device *qts_ch; 162 /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */ 163 struct workqueue_struct *qts_wq; 164 /* Head of list of page allocated by MHI bootlog device */ 165 struct list_head bootlog; 166 /* MHI bootlog channel device */ 167 struct mhi_device *bootlog_ch; 168 /* Work queue for tasks related to MHI bootlog device */ 169 struct workqueue_struct *bootlog_wq; 170 /* Synchronizes access of pages in MHI bootlog device */ 171 struct mutex bootlog_mutex; 172 /* MHI RAS channel device */ 173 struct mhi_device *ras_ch; 174 /* Correctable error count */ 175 unsigned int ce_count; 176 /* Un-correctable error count */ 177 unsigned int ue_count; 178 /* Un-correctable non-fatal error count */ 179 unsigned int ue_nf_count; 180 }; 181 182 struct qaic_drm_device { 183 /* The drm device struct of this drm device */ 184 struct drm_device drm; 185 /* Pointer to the root device struct driven by this driver */ 186 struct qaic_device *qdev; 187 /* 188 * The physical device can be partition in number of logical devices. 189 * And each logical device is given a partition id. This member stores 190 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm 191 * device is the actual physical device 192 */ 193 s32 partition_id; 194 /* Head in list of users who have opened this drm device */ 195 struct list_head users; 196 /* Synchronizes access to users list */ 197 struct mutex users_mutex; 198 }; 199 200 struct qaic_bo { 201 struct drm_gem_object base; 202 /* Scatter/gather table for allocate/imported BO */ 203 struct sg_table *sgt; 204 /* Head in list of slices of this BO */ 205 struct list_head slices; 206 /* Total nents, for all slices of this BO */ 207 int total_slice_nents; 208 /* 209 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and 210 * DMA_FROM_DEVICE. 211 */ 212 int dir; 213 /* The pointer of the DBC which operates on this BO */ 214 struct dma_bridge_chan *dbc; 215 /* Number of slice that belongs to this buffer */ 216 u32 nr_slice; 217 /* Number of slice that have been transferred by DMA engine */ 218 u32 nr_slice_xfer_done; 219 /* 220 * If true then user has attached slicing information to this BO by 221 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl. 222 */ 223 bool sliced; 224 /* Request ID of this BO if it is queued for execution */ 225 u16 req_id; 226 /* Wait on this for completion of DMA transfer of this BO */ 227 struct completion xfer_done; 228 /* 229 * Node in linked list where head is dbc->xfer_list. 230 * This link list contain BO's that are queued for DMA transfer. 231 */ 232 struct list_head xfer_list; 233 /* 234 * Node in linked list where head is dbc->bo_lists. 235 * This link list contain BO's that are associated with the DBC it is 236 * linked to. 237 */ 238 struct list_head bo_list; 239 struct { 240 /* 241 * Latest timestamp(ns) at which kernel received a request to 242 * execute this BO 243 */ 244 u64 req_received_ts; 245 /* 246 * Latest timestamp(ns) at which kernel enqueued requests of 247 * this BO for execution in DMA queue 248 */ 249 u64 req_submit_ts; 250 /* 251 * Latest timestamp(ns) at which kernel received a completion 252 * interrupt for requests of this BO 253 */ 254 u64 req_processed_ts; 255 /* 256 * Number of elements already enqueued in DMA queue before 257 * enqueuing requests of this BO 258 */ 259 u32 queue_level_before; 260 } perf_stats; 261 /* Synchronizes BO operations */ 262 struct mutex lock; 263 }; 264 265 struct bo_slice { 266 /* Mapped pages */ 267 struct sg_table *sgt; 268 /* Number of requests required to queue in DMA queue */ 269 int nents; 270 /* See enum dma_data_direction */ 271 int dir; 272 /* Actual requests that will be copied in DMA queue */ 273 struct dbc_req *reqs; 274 struct kref ref_count; 275 /* true: No DMA transfer required */ 276 bool no_xfer; 277 /* Pointer to the parent BO handle */ 278 struct qaic_bo *bo; 279 /* Node in list of slices maintained by parent BO */ 280 struct list_head slice; 281 /* Size of this slice in bytes */ 282 u64 size; 283 /* Offset of this slice in buffer */ 284 u64 offset; 285 }; 286 287 int get_dbc_req_elem_size(void); 288 int get_dbc_rsp_elem_size(void); 289 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor); 290 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 291 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result); 292 293 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result); 294 295 int qaic_control_open(struct qaic_device *qdev); 296 void qaic_control_close(struct qaic_device *qdev); 297 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr); 298 299 irqreturn_t dbc_irq_threaded_fn(int irq, void *data); 300 irqreturn_t dbc_irq_handler(int irq, void *data); 301 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); 302 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); 303 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id); 304 void release_dbc(struct qaic_device *qdev, u32 dbc_id); 305 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail); 306 307 void wake_all_cntl(struct qaic_device *qdev); 308 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev); 309 310 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); 311 312 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 313 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 314 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 315 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 316 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 317 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 318 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 319 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); 320 void irq_polling_work(struct work_struct *work); 321 322 #endif /* _QAIC_H_ */ 323