xref: /linux/drivers/accel/qaic/qaic.h (revision b8e85e6f3a09fc56b0ff574887798962ef8a8f80)
1 /* SPDX-License-Identifier: GPL-2.0-only
2  *
3  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #ifndef _QAIC_H_
8 #define _QAIC_H_
9 
10 #include <linux/interrupt.h>
11 #include <linux/kref.h>
12 #include <linux/mhi.h>
13 #include <linux/mutex.h>
14 #include <linux/pci.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/wait.h>
18 #include <linux/workqueue.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_gem.h>
21 
22 #define QAIC_DBC_BASE		SZ_128K
23 #define QAIC_DBC_SIZE		SZ_4K
24 
25 #define QAIC_NO_PARTITION	-1
26 
27 #define QAIC_DBC_OFF(i)		((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
28 
29 #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
30 #define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
31 #define to_drm(qddev) (&(qddev)->drm)
32 #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
33 
34 enum __packed dev_states {
35 	/* Device is offline or will be very soon */
36 	QAIC_OFFLINE,
37 	/* Device is booting, not clear if it's in a usable state */
38 	QAIC_BOOT,
39 	/* Device is fully operational */
40 	QAIC_ONLINE,
41 };
42 
43 extern bool datapath_polling;
44 
45 struct qaic_user {
46 	/* Uniquely identifies this user for the device */
47 	int			handle;
48 	struct kref		ref_count;
49 	/* Char device opened by this user */
50 	struct qaic_drm_device	*qddev;
51 	/* Node in list of users that opened this drm device */
52 	struct list_head	node;
53 	/* SRCU used to synchronize this user during cleanup */
54 	struct srcu_struct	qddev_lock;
55 	atomic_t		chunk_id;
56 };
57 
58 struct dma_bridge_chan {
59 	/* Pointer to device strcut maintained by driver */
60 	struct qaic_device	*qdev;
61 	/* ID of this DMA bridge channel(DBC) */
62 	unsigned int		id;
63 	/* Synchronizes access to xfer_list */
64 	spinlock_t		xfer_lock;
65 	/* Base address of request queue */
66 	void			*req_q_base;
67 	/* Base address of response queue */
68 	void			*rsp_q_base;
69 	/*
70 	 * Base bus address of request queue. Response queue bus address can be
71 	 * calculated by adding request queue size to this variable
72 	 */
73 	dma_addr_t		dma_addr;
74 	/* Total size of request and response queue in byte */
75 	u32			total_size;
76 	/* Capacity of request/response queue */
77 	u32			nelem;
78 	/* The user that opened this DBC */
79 	struct qaic_user	*usr;
80 	/*
81 	 * Request ID of next memory handle that goes in request queue. One
82 	 * memory handle can enqueue more than one request elements, all
83 	 * this requests that belong to same memory handle have same request ID
84 	 */
85 	u16			next_req_id;
86 	/* true: DBC is in use; false: DBC not in use */
87 	bool			in_use;
88 	/*
89 	 * Base address of device registers. Used to read/write request and
90 	 * response queue's head and tail pointer of this DBC.
91 	 */
92 	void __iomem		*dbc_base;
93 	/* Head of list where each node is a memory handle queued in request queue */
94 	struct list_head	xfer_list;
95 	/* Synchronizes DBC readers during cleanup */
96 	struct srcu_struct	ch_lock;
97 	/*
98 	 * When this DBC is released, any thread waiting on this wait queue is
99 	 * woken up
100 	 */
101 	wait_queue_head_t	dbc_release;
102 	/* Head of list where each node is a bo associated with this DBC */
103 	struct list_head	bo_lists;
104 	/* The irq line for this DBC. Used for polling */
105 	unsigned int		irq;
106 	/* Polling work item to simulate interrupts */
107 	struct work_struct	poll_work;
108 };
109 
110 struct qaic_device {
111 	/* Pointer to base PCI device struct of our physical device */
112 	struct pci_dev		*pdev;
113 	/* Req. ID of request that will be queued next in MHI control device */
114 	u32			next_seq_num;
115 	/* Base address of bar 0 */
116 	void __iomem		*bar_0;
117 	/* Base address of bar 2 */
118 	void __iomem		*bar_2;
119 	/* Controller structure for MHI devices */
120 	struct mhi_controller	*mhi_cntrl;
121 	/* MHI control channel device */
122 	struct mhi_device	*cntl_ch;
123 	/* List of requests queued in MHI control device */
124 	struct list_head	cntl_xfer_list;
125 	/* Synchronizes MHI control device transactions and its xfer list */
126 	struct mutex		cntl_mutex;
127 	/* Array of DBC struct of this device */
128 	struct dma_bridge_chan	*dbc;
129 	/* Work queue for tasks related to MHI control device */
130 	struct workqueue_struct	*cntl_wq;
131 	/* Synchronizes all the users of device during cleanup */
132 	struct srcu_struct	dev_lock;
133 	/* Track the state of the device during resets */
134 	enum dev_states		dev_state;
135 	/* true: single MSI is used to operate device */
136 	bool			single_msi;
137 	/*
138 	 * true: A tx MHI transaction has failed and a rx buffer is still queued
139 	 * in control device. Such a buffer is considered lost rx buffer
140 	 * false: No rx buffer is lost in control device
141 	 */
142 	bool			cntl_lost_buf;
143 	/* Maximum number of DBC supported by this device */
144 	u32			num_dbc;
145 	/* Reference to the drm_device for this device when it is created */
146 	struct qaic_drm_device	*qddev;
147 	/* Generate the CRC of a control message */
148 	u32 (*gen_crc)(void *msg);
149 	/* Validate the CRC of a control message */
150 	bool (*valid_crc)(void *msg);
151 	/* MHI "QAIC_TIMESYNC" channel device */
152 	struct mhi_device	*qts_ch;
153 	/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
154 	struct workqueue_struct	*qts_wq;
155 };
156 
157 struct qaic_drm_device {
158 	/* The drm device struct of this drm device */
159 	struct drm_device	drm;
160 	/* Pointer to the root device struct driven by this driver */
161 	struct qaic_device	*qdev;
162 	/*
163 	 * The physical device can be partition in number of logical devices.
164 	 * And each logical device is given a partition id. This member stores
165 	 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
166 	 * device is the actual physical device
167 	 */
168 	s32			partition_id;
169 	/* Head in list of users who have opened this drm device */
170 	struct list_head	users;
171 	/* Synchronizes access to users list */
172 	struct mutex		users_mutex;
173 };
174 
175 struct qaic_bo {
176 	struct drm_gem_object	base;
177 	/* Scatter/gather table for allocate/imported BO */
178 	struct sg_table		*sgt;
179 	/* Head in list of slices of this BO */
180 	struct list_head	slices;
181 	/* Total nents, for all slices of this BO */
182 	int			total_slice_nents;
183 	/*
184 	 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
185 	 * DMA_FROM_DEVICE.
186 	 */
187 	int			dir;
188 	/* The pointer of the DBC which operates on this BO */
189 	struct dma_bridge_chan	*dbc;
190 	/* Number of slice that belongs to this buffer */
191 	u32			nr_slice;
192 	/* Number of slice that have been transferred by DMA engine */
193 	u32			nr_slice_xfer_done;
194 	/* true = BO is queued for execution, true = BO is not queued */
195 	bool			queued;
196 	/*
197 	 * If true then user has attached slicing information to this BO by
198 	 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
199 	 */
200 	bool			sliced;
201 	/* Request ID of this BO if it is queued for execution */
202 	u16			req_id;
203 	/* Handle assigned to this BO */
204 	u32			handle;
205 	/* Wait on this for completion of DMA transfer of this BO */
206 	struct completion	xfer_done;
207 	/*
208 	 * Node in linked list where head is dbc->xfer_list.
209 	 * This link list contain BO's that are queued for DMA transfer.
210 	 */
211 	struct list_head	xfer_list;
212 	/*
213 	 * Node in linked list where head is dbc->bo_lists.
214 	 * This link list contain BO's that are associated with the DBC it is
215 	 * linked to.
216 	 */
217 	struct list_head	bo_list;
218 	struct {
219 		/*
220 		 * Latest timestamp(ns) at which kernel received a request to
221 		 * execute this BO
222 		 */
223 		u64		req_received_ts;
224 		/*
225 		 * Latest timestamp(ns) at which kernel enqueued requests of
226 		 * this BO for execution in DMA queue
227 		 */
228 		u64		req_submit_ts;
229 		/*
230 		 * Latest timestamp(ns) at which kernel received a completion
231 		 * interrupt for requests of this BO
232 		 */
233 		u64		req_processed_ts;
234 		/*
235 		 * Number of elements already enqueued in DMA queue before
236 		 * enqueuing requests of this BO
237 		 */
238 		u32		queue_level_before;
239 	} perf_stats;
240 	/* Synchronizes BO operations */
241 	struct mutex		lock;
242 };
243 
244 struct bo_slice {
245 	/* Mapped pages */
246 	struct sg_table		*sgt;
247 	/* Number of requests required to queue in DMA queue */
248 	int			nents;
249 	/* See enum dma_data_direction */
250 	int			dir;
251 	/* Actual requests that will be copied in DMA queue */
252 	struct dbc_req		*reqs;
253 	struct kref		ref_count;
254 	/* true: No DMA transfer required */
255 	bool			no_xfer;
256 	/* Pointer to the parent BO handle */
257 	struct qaic_bo		*bo;
258 	/* Node in list of slices maintained by parent BO */
259 	struct list_head	slice;
260 	/* Size of this slice in bytes */
261 	u64			size;
262 	/* Offset of this slice in buffer */
263 	u64			offset;
264 };
265 
266 int get_dbc_req_elem_size(void);
267 int get_dbc_rsp_elem_size(void);
268 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
269 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
270 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
271 
272 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
273 
274 int qaic_control_open(struct qaic_device *qdev);
275 void qaic_control_close(struct qaic_device *qdev);
276 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
277 
278 irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
279 irqreturn_t dbc_irq_handler(int irq, void *data);
280 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
281 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
282 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
283 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
284 
285 void wake_all_cntl(struct qaic_device *qdev);
286 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
287 
288 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
289 
290 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
291 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
292 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
293 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
294 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
295 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
296 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
297 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
298 void irq_polling_work(struct work_struct *work);
299 
300 #endif /* _QAIC_H_ */
301