xref: /linux/drivers/accel/qaic/qaic.h (revision 82f78acd5a9270370ef4aa3f032ede25f3dc91ee)
1 /* SPDX-License-Identifier: GPL-2.0-only
2  *
3  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #ifndef _QAIC_H_
8 #define _QAIC_H_
9 
10 #include <linux/interrupt.h>
11 #include <linux/kref.h>
12 #include <linux/mhi.h>
13 #include <linux/mutex.h>
14 #include <linux/pci.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/wait.h>
18 #include <linux/workqueue.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_gem.h>
21 
22 #define QAIC_DBC_BASE		SZ_128K
23 #define QAIC_DBC_SIZE		SZ_4K
24 #define QAIC_SSR_DBC_SENTINEL	U32_MAX /* No ongoing SSR sentinel */
25 
26 #define QAIC_NO_PARTITION	-1
27 
28 #define QAIC_DBC_OFF(i)		((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
29 
30 #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
31 #define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
32 #define to_drm(qddev) (&(qddev)->drm)
33 #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
34 #define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
35 
36 enum aic_families {
37 	FAMILY_AIC100,
38 	FAMILY_AIC200,
39 	FAMILY_MAX,
40 };
41 
42 enum __packed dev_states {
43 	/* Device is offline or will be very soon */
44 	QAIC_OFFLINE,
45 	/* Device is booting, not clear if it's in a usable state */
46 	QAIC_BOOT,
47 	/* Device is fully operational */
48 	QAIC_ONLINE,
49 };
50 
51 enum dbc_states {
52 	/* DBC is free and can be activated */
53 	DBC_STATE_IDLE,
54 	/* DBC is activated and a workload is running on device */
55 	DBC_STATE_ASSIGNED,
56 	/* Sub-system associated with this workload has crashed and it will shutdown soon */
57 	DBC_STATE_BEFORE_SHUTDOWN,
58 	/* Sub-system associated with this workload has crashed and it has shutdown */
59 	DBC_STATE_AFTER_SHUTDOWN,
60 	/* Sub-system associated with this workload is shutdown and it will be powered up soon */
61 	DBC_STATE_BEFORE_POWER_UP,
62 	/* Sub-system associated with this workload is now powered up */
63 	DBC_STATE_AFTER_POWER_UP,
64 	DBC_STATE_MAX,
65 };
66 
67 extern bool datapath_polling;
68 
69 struct qaic_user {
70 	/* Uniquely identifies this user for the device */
71 	int			handle;
72 	struct kref		ref_count;
73 	/* Char device opened by this user */
74 	struct qaic_drm_device	*qddev;
75 	/* Node in list of users that opened this drm device */
76 	struct list_head	node;
77 	/* SRCU used to synchronize this user during cleanup */
78 	struct srcu_struct	qddev_lock;
79 	atomic_t		chunk_id;
80 };
81 
82 struct dma_bridge_chan {
83 	/* Pointer to device strcut maintained by driver */
84 	struct qaic_device	*qdev;
85 	/* ID of this DMA bridge channel(DBC) */
86 	unsigned int		id;
87 	/* Synchronizes access to xfer_list */
88 	spinlock_t		xfer_lock;
89 	/* Base address of request queue */
90 	void			*req_q_base;
91 	/* Base address of response queue */
92 	void			*rsp_q_base;
93 	/*
94 	 * Base bus address of request queue. Response queue bus address can be
95 	 * calculated by adding request queue size to this variable
96 	 */
97 	dma_addr_t		dma_addr;
98 	/* Total size of request and response queue in byte */
99 	u32			total_size;
100 	/* Capacity of request/response queue */
101 	u32			nelem;
102 	/* The user that opened this DBC */
103 	struct qaic_user	*usr;
104 	/*
105 	 * Request ID of next memory handle that goes in request queue. One
106 	 * memory handle can enqueue more than one request elements, all
107 	 * this requests that belong to same memory handle have same request ID
108 	 */
109 	u16			next_req_id;
110 	/* true: DBC is in use; false: DBC not in use */
111 	bool			in_use;
112 	/*
113 	 * Base address of device registers. Used to read/write request and
114 	 * response queue's head and tail pointer of this DBC.
115 	 */
116 	void __iomem		*dbc_base;
117 	/* Synchronizes access to Request queue's head and tail pointer */
118 	struct mutex		req_lock;
119 	/* Head of list where each node is a memory handle queued in request queue */
120 	struct list_head	xfer_list;
121 	/* Synchronizes DBC readers during cleanup */
122 	struct srcu_struct	ch_lock;
123 	/*
124 	 * When this DBC is released, any thread waiting on this wait queue is
125 	 * woken up
126 	 */
127 	wait_queue_head_t	dbc_release;
128 	/* Head of list where each node is a bo associated with this DBC */
129 	struct list_head	bo_lists;
130 	/* The irq line for this DBC. Used for polling */
131 	unsigned int		irq;
132 	/* Polling work item to simulate interrupts */
133 	struct work_struct	poll_work;
134 	/* Represents various states of this DBC from enum dbc_states */
135 	unsigned int		state;
136 };
137 
138 struct qaic_device {
139 	/* Pointer to base PCI device struct of our physical device */
140 	struct pci_dev		*pdev;
141 	/* Req. ID of request that will be queued next in MHI control device */
142 	u32			next_seq_num;
143 	/* Base address of the MHI bar */
144 	void __iomem		*bar_mhi;
145 	/* Base address of the DBCs bar */
146 	void __iomem		*bar_dbc;
147 	/* Controller structure for MHI devices */
148 	struct mhi_controller	*mhi_cntrl;
149 	/* MHI control channel device */
150 	struct mhi_device	*cntl_ch;
151 	/* List of requests queued in MHI control device */
152 	struct list_head	cntl_xfer_list;
153 	/* Synchronizes MHI control device transactions and its xfer list */
154 	struct mutex		cntl_mutex;
155 	/* Array of DBC struct of this device */
156 	struct dma_bridge_chan	*dbc;
157 	/* Work queue for tasks related to MHI control device */
158 	struct workqueue_struct	*cntl_wq;
159 	/* Synchronizes all the users of device during cleanup */
160 	struct srcu_struct	dev_lock;
161 	/* Track the state of the device during resets */
162 	enum dev_states		dev_state;
163 	/* true: single MSI is used to operate device */
164 	bool			single_msi;
165 	/*
166 	 * true: A tx MHI transaction has failed and a rx buffer is still queued
167 	 * in control device. Such a buffer is considered lost rx buffer
168 	 * false: No rx buffer is lost in control device
169 	 */
170 	bool			cntl_lost_buf;
171 	/* Maximum number of DBC supported by this device */
172 	u32			num_dbc;
173 	/* Reference to the drm_device for this device when it is created */
174 	struct qaic_drm_device	*qddev;
175 	/* Generate the CRC of a control message */
176 	u32 (*gen_crc)(void *msg);
177 	/* Validate the CRC of a control message */
178 	bool (*valid_crc)(void *msg);
179 	/* MHI "QAIC_TIMESYNC" channel device */
180 	struct mhi_device	*qts_ch;
181 	/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
182 	struct workqueue_struct	*qts_wq;
183 	/* MHI "QAIC_TIMESYNC_PERIODIC" channel device */
184 	struct mhi_device	*mqts_ch;
185 	/* Head of list of page allocated by MHI bootlog device */
186 	struct list_head        bootlog;
187 	/* MHI bootlog channel device */
188 	struct mhi_device       *bootlog_ch;
189 	/* Work queue for tasks related to MHI bootlog device */
190 	struct workqueue_struct *bootlog_wq;
191 	/* Synchronizes access of pages in MHI bootlog device */
192 	struct mutex            bootlog_mutex;
193 	/* MHI RAS channel device */
194 	struct mhi_device	*ras_ch;
195 	/* Correctable error count */
196 	unsigned int		ce_count;
197 	/* Un-correctable error count */
198 	unsigned int		ue_count;
199 	/* Un-correctable non-fatal error count */
200 	unsigned int		ue_nf_count;
201 	/* MHI SSR channel device */
202 	struct mhi_device	*ssr_ch;
203 	/* Work queue for tasks related to MHI SSR device */
204 	struct workqueue_struct	*ssr_wq;
205 	/* Buffer to collect SSR crashdump via SSR MHI channel */
206 	void			*ssr_mhi_buf;
207 	/* DBC which is under SSR. Sentinel U32_MAX would mean that no SSR in progress */
208 	u32			ssr_dbc;
209 };
210 
211 struct qaic_drm_device {
212 	/* The drm device struct of this drm device */
213 	struct drm_device	drm;
214 	/* Pointer to the root device struct driven by this driver */
215 	struct qaic_device	*qdev;
216 	/*
217 	 * The physical device can be partition in number of logical devices.
218 	 * And each logical device is given a partition id. This member stores
219 	 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
220 	 * device is the actual physical device
221 	 */
222 	s32			partition_id;
223 	/* Head in list of users who have opened this drm device */
224 	struct list_head	users;
225 	/* Synchronizes access to users list */
226 	struct mutex		users_mutex;
227 	/* Pointer to array of DBC sysfs attributes */
228 	void			*sysfs_attrs;
229 };
230 
231 struct qaic_bo {
232 	struct drm_gem_object	base;
233 	/* Scatter/gather table for allocate/imported BO */
234 	struct sg_table		*sgt;
235 	/* Head in list of slices of this BO */
236 	struct list_head	slices;
237 	/* Total nents, for all slices of this BO */
238 	int			total_slice_nents;
239 	/*
240 	 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
241 	 * DMA_FROM_DEVICE.
242 	 */
243 	int			dir;
244 	/* The pointer of the DBC which operates on this BO */
245 	struct dma_bridge_chan	*dbc;
246 	/* Number of slice that belongs to this buffer */
247 	u32			nr_slice;
248 	/* Number of slice that have been transferred by DMA engine */
249 	u32			nr_slice_xfer_done;
250 	/*
251 	 * If true then user has attached slicing information to this BO by
252 	 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
253 	 */
254 	bool			sliced;
255 	/* Request ID of this BO if it is queued for execution */
256 	u16			req_id;
257 	/* Wait on this for completion of DMA transfer of this BO */
258 	struct completion	xfer_done;
259 	/*
260 	 * Node in linked list where head is dbc->xfer_list.
261 	 * This link list contain BO's that are queued for DMA transfer.
262 	 */
263 	struct list_head	xfer_list;
264 	/*
265 	 * Node in linked list where head is dbc->bo_lists.
266 	 * This link list contain BO's that are associated with the DBC it is
267 	 * linked to.
268 	 */
269 	struct list_head	bo_list;
270 	struct {
271 		/*
272 		 * Latest timestamp(ns) at which kernel received a request to
273 		 * execute this BO
274 		 */
275 		u64		req_received_ts;
276 		/*
277 		 * Latest timestamp(ns) at which kernel enqueued requests of
278 		 * this BO for execution in DMA queue
279 		 */
280 		u64		req_submit_ts;
281 		/*
282 		 * Latest timestamp(ns) at which kernel received a completion
283 		 * interrupt for requests of this BO
284 		 */
285 		u64		req_processed_ts;
286 		/*
287 		 * Number of elements already enqueued in DMA queue before
288 		 * enqueuing requests of this BO
289 		 */
290 		u32		queue_level_before;
291 	} perf_stats;
292 	/* Synchronizes BO operations */
293 	struct mutex		lock;
294 };
295 
296 struct bo_slice {
297 	/* Mapped pages */
298 	struct sg_table		*sgt;
299 	/* Number of requests required to queue in DMA queue */
300 	int			nents;
301 	/* See enum dma_data_direction */
302 	int			dir;
303 	/* Actual requests that will be copied in DMA queue */
304 	struct dbc_req		*reqs;
305 	struct kref		ref_count;
306 	/* true: No DMA transfer required */
307 	bool			no_xfer;
308 	/* Pointer to the parent BO handle */
309 	struct qaic_bo		*bo;
310 	/* Node in list of slices maintained by parent BO */
311 	struct list_head	slice;
312 	/* Size of this slice in bytes */
313 	u64			size;
314 	/* Offset of this slice in buffer */
315 	u64			offset;
316 };
317 
318 int get_dbc_req_elem_size(void);
319 int get_dbc_rsp_elem_size(void);
320 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
321 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
322 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
323 
324 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
325 
326 int qaic_control_open(struct qaic_device *qdev);
327 void qaic_control_close(struct qaic_device *qdev);
328 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
329 
330 irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
331 irqreturn_t dbc_irq_handler(int irq, void *data);
332 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
333 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
334 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
335 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
336 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
337 
338 void wake_all_cntl(struct qaic_device *qdev);
339 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
340 
341 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
342 
343 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
344 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
345 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
346 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
347 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
348 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
349 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
350 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
351 void qaic_irq_polling_work(struct work_struct *work);
352 void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id);
353 void qaic_dbc_exit_ssr(struct qaic_device *qdev);
354 
355 /* qaic_sysfs.c */
356 int qaic_sysfs_init(struct qaic_drm_device *qddev);
357 void qaic_sysfs_remove(struct qaic_drm_device *qddev);
358 void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state);
359 
360 #endif /* _QAIC_H_ */
361