xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h (revision de848da12f752170c2ebe114804a985314fd5a6a)
1d87f36a0SRajneesh Bhardwaj /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2ed6e6a34SBen Goz /*
3d87f36a0SRajneesh Bhardwaj  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4ed6e6a34SBen Goz  *
5ed6e6a34SBen Goz  * Permission is hereby granted, free of charge, to any person obtaining a
6ed6e6a34SBen Goz  * copy of this software and associated documentation files (the "Software"),
7ed6e6a34SBen Goz  * to deal in the Software without restriction, including without limitation
8ed6e6a34SBen Goz  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9ed6e6a34SBen Goz  * and/or sell copies of the Software, and to permit persons to whom the
10ed6e6a34SBen Goz  * Software is furnished to do so, subject to the following conditions:
11ed6e6a34SBen Goz  *
12ed6e6a34SBen Goz  * The above copyright notice and this permission notice shall be included in
13ed6e6a34SBen Goz  * all copies or substantial portions of the Software.
14ed6e6a34SBen Goz  *
15ed6e6a34SBen Goz  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16ed6e6a34SBen Goz  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17ed6e6a34SBen Goz  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18ed6e6a34SBen Goz  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19ed6e6a34SBen Goz  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20ed6e6a34SBen Goz  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21ed6e6a34SBen Goz  * OTHER DEALINGS IN THE SOFTWARE.
22ed6e6a34SBen Goz  *
23ed6e6a34SBen Goz  */
24ed6e6a34SBen Goz 
25ed6e6a34SBen Goz #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
26ed6e6a34SBen Goz #define KFD_DEVICE_QUEUE_MANAGER_H_
27ed6e6a34SBen Goz 
28ed6e6a34SBen Goz #include <linux/rwsem.h>
29ed6e6a34SBen Goz #include <linux/list.h>
30efeaed4dSFelix Kuehling #include <linux/mutex.h>
31efeaed4dSFelix Kuehling #include <linux/sched/mm.h>
32ed6e6a34SBen Goz #include "kfd_priv.h"
33ed6e6a34SBen Goz #include "kfd_mqd_manager.h"
34ed6e6a34SBen Goz 
35ed6e6a34SBen Goz 
36d9d4623cSYong Zhao #define VMID_NUM 16
37d9d4623cSYong Zhao 
38cc009e61SMukul Joshi #define KFD_MES_PROCESS_QUANTUM		100000
39cc009e61SMukul Joshi #define KFD_MES_GANG_QUANTUM		10000
407cee6a68SJonathan Kim #define USE_DEFAULT_GRACE_PERIOD 0xffffffff
41cc009e61SMukul Joshi 
42ed6e6a34SBen Goz struct device_process_node {
43ed6e6a34SBen Goz 	struct qcm_process_device *qpd;
44ed6e6a34SBen Goz 	struct list_head list;
45ed6e6a34SBen Goz };
46ed6e6a34SBen Goz 
475bdd3eb2SMukul Joshi union SQ_CMD_BITS {
485bdd3eb2SMukul Joshi 	struct {
495bdd3eb2SMukul Joshi 		uint32_t cmd:3;
505bdd3eb2SMukul Joshi 		uint32_t:1;
515bdd3eb2SMukul Joshi 		uint32_t mode:3;
525bdd3eb2SMukul Joshi 		uint32_t check_vmid:1;
535bdd3eb2SMukul Joshi 		uint32_t trap_id:3;
545bdd3eb2SMukul Joshi 		uint32_t:5;
555bdd3eb2SMukul Joshi 		uint32_t wave_id:4;
565bdd3eb2SMukul Joshi 		uint32_t simd_id:2;
575bdd3eb2SMukul Joshi 		uint32_t:2;
585bdd3eb2SMukul Joshi 		uint32_t queue_id:3;
595bdd3eb2SMukul Joshi 		uint32_t:1;
605bdd3eb2SMukul Joshi 		uint32_t vm_id:4;
615bdd3eb2SMukul Joshi 	} bitfields, bits;
625bdd3eb2SMukul Joshi 	uint32_t u32All;
635bdd3eb2SMukul Joshi 	signed int i32All;
645bdd3eb2SMukul Joshi 	float f32All;
655bdd3eb2SMukul Joshi };
665bdd3eb2SMukul Joshi 
675bdd3eb2SMukul Joshi union GRBM_GFX_INDEX_BITS {
685bdd3eb2SMukul Joshi 	struct {
695bdd3eb2SMukul Joshi 		uint32_t instance_index:8;
705bdd3eb2SMukul Joshi 		uint32_t sh_index:8;
715bdd3eb2SMukul Joshi 		uint32_t se_index:8;
725bdd3eb2SMukul Joshi 		uint32_t:5;
735bdd3eb2SMukul Joshi 		uint32_t sh_broadcast_writes:1;
745bdd3eb2SMukul Joshi 		uint32_t instance_broadcast_writes:1;
755bdd3eb2SMukul Joshi 		uint32_t se_broadcast_writes:1;
765bdd3eb2SMukul Joshi 	} bitfields, bits;
775bdd3eb2SMukul Joshi 	uint32_t u32All;
785bdd3eb2SMukul Joshi 	signed int i32All;
795bdd3eb2SMukul Joshi 	float f32All;
805bdd3eb2SMukul Joshi };
815bdd3eb2SMukul Joshi 
8264c7f8cfSBen Goz /**
8345c9a5e4SOded Gabbay  * struct device_queue_manager_ops
8464c7f8cfSBen Goz  *
8564c7f8cfSBen Goz  * @create_queue: Queue creation routine.
8664c7f8cfSBen Goz  *
8764c7f8cfSBen Goz  * @destroy_queue: Queue destruction routine.
8864c7f8cfSBen Goz  *
8964c7f8cfSBen Goz  * @update_queue: Queue update routine.
9064c7f8cfSBen Goz  *
9164c7f8cfSBen Goz  * @exeute_queues: Dispatches the queues list to the H/W.
9264c7f8cfSBen Goz  *
9364c7f8cfSBen Goz  * @register_process: This routine associates a specific process with device.
9464c7f8cfSBen Goz  *
9564c7f8cfSBen Goz  * @unregister_process: destroys the associations between process to device.
9664c7f8cfSBen Goz  *
9764c7f8cfSBen Goz  * @initialize: Initializes the pipelines and memory module for that device.
9864c7f8cfSBen Goz  *
992243f493SRajneesh Bhardwaj  * @start: Initializes the resources/modules the device needs for queues
10064c7f8cfSBen Goz  * execution. This function is called on device initialization and after the
10164c7f8cfSBen Goz  * system woke up after suspension.
10264c7f8cfSBen Goz  *
10364c7f8cfSBen Goz  * @stop: This routine stops execution of all the active queue running on the
10464c7f8cfSBen Goz  * H/W and basically this function called on system suspend.
10564c7f8cfSBen Goz  *
10664c7f8cfSBen Goz  * @uninitialize: Destroys all the device queue manager resources allocated in
10764c7f8cfSBen Goz  * initialize routine.
10864c7f8cfSBen Goz  *
109*234eebe1SAmber Lin  * @halt: This routine unmaps queues from runlist and set halt status to true
110*234eebe1SAmber Lin  * so no more queues will be mapped to runlist until unhalt.
111*234eebe1SAmber Lin  *
112*234eebe1SAmber Lin  * @unhalt: This routine unset halt status to flase and maps queues back to
113*234eebe1SAmber Lin  * runlist.
114*234eebe1SAmber Lin  *
11564c7f8cfSBen Goz  * @create_kernel_queue: Creates kernel queue. Used for debug queue.
11664c7f8cfSBen Goz  *
11764c7f8cfSBen Goz  * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
11864c7f8cfSBen Goz  *
11964c7f8cfSBen Goz  * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
12064c7f8cfSBen Goz  * memory apertures.
12164c7f8cfSBen Goz  *
1229fd3f1bfSFelix Kuehling  * @process_termination: Clears all process queues belongs to that device.
1239fd3f1bfSFelix Kuehling  *
12426103436SFelix Kuehling  * @evict_process_queues: Evict all active queues of a process
12526103436SFelix Kuehling  *
1262243f493SRajneesh Bhardwaj  * @restore_process_queues: Restore all evicted queues of a process
12726103436SFelix Kuehling  *
1285df099e8SJay Cornwall  * @get_wave_state: Retrieves context save state and optionally copies the
1295df099e8SJay Cornwall  * control stack, if kept in the MQD, to the given userspace address.
130dec63443STao Zhou  *
131dec63443STao Zhou  * @reset_queues: reset queues which consume RAS poison
13242c6c482SDavid Yat Sin  * @get_queue_checkpoint_info: Retrieves queue size information for CRIU checkpoint.
13342c6c482SDavid Yat Sin  *
13442c6c482SDavid Yat Sin  * @checkpoint_mqd: checkpoint queue MQD contents for CRIU.
13564c7f8cfSBen Goz  */
13664c7f8cfSBen Goz 
13745c9a5e4SOded Gabbay struct device_queue_manager_ops {
138ed6e6a34SBen Goz 	int	(*create_queue)(struct device_queue_manager *dqm,
139ed6e6a34SBen Goz 				struct queue *q,
1402485c12cSDavid Yat Sin 				struct qcm_process_device *qpd,
14142c6c482SDavid Yat Sin 				const struct kfd_criu_queue_priv_data *qd,
1423a9822d7SDavid Yat Sin 				const void *restore_mqd,
1433a9822d7SDavid Yat Sin 				const void *restore_ctl_stack);
144992839adSYair Shachar 
145ed6e6a34SBen Goz 	int	(*destroy_queue)(struct device_queue_manager *dqm,
146ed6e6a34SBen Goz 				struct qcm_process_device *qpd,
147ed6e6a34SBen Goz 				struct queue *q);
148992839adSYair Shachar 
149ed6e6a34SBen Goz 	int	(*update_queue)(struct device_queue_manager *dqm,
150c6e559ebSLang Yu 				struct queue *q, struct mqd_update_info *minfo);
15164c7f8cfSBen Goz 
152ed6e6a34SBen Goz 	int	(*register_process)(struct device_queue_manager *dqm,
153ed6e6a34SBen Goz 					struct qcm_process_device *qpd);
154992839adSYair Shachar 
155ed6e6a34SBen Goz 	int	(*unregister_process)(struct device_queue_manager *dqm,
156ed6e6a34SBen Goz 					struct qcm_process_device *qpd);
157992839adSYair Shachar 
158ed6e6a34SBen Goz 	int	(*initialize)(struct device_queue_manager *dqm);
159ed6e6a34SBen Goz 	int	(*start)(struct device_queue_manager *dqm);
160ed6e6a34SBen Goz 	int	(*stop)(struct device_queue_manager *dqm);
161ed6e6a34SBen Goz 	void	(*uninitialize)(struct device_queue_manager *dqm);
162*234eebe1SAmber Lin 	int     (*halt)(struct device_queue_manager *dqm);
163*234eebe1SAmber Lin 	int     (*unhalt)(struct device_queue_manager *dqm);
164ed6e6a34SBen Goz 	int	(*create_kernel_queue)(struct device_queue_manager *dqm,
165ed6e6a34SBen Goz 					struct kernel_queue *kq,
166ed6e6a34SBen Goz 					struct qcm_process_device *qpd);
167992839adSYair Shachar 
168ed6e6a34SBen Goz 	void	(*destroy_kernel_queue)(struct device_queue_manager *dqm,
169ed6e6a34SBen Goz 					struct kernel_queue *kq,
170ed6e6a34SBen Goz 					struct qcm_process_device *qpd);
171992839adSYair Shachar 
172ed6e6a34SBen Goz 	bool	(*set_cache_memory_policy)(struct device_queue_manager *dqm,
173ed6e6a34SBen Goz 					   struct qcm_process_device *qpd,
174ed6e6a34SBen Goz 					   enum cache_policy default_policy,
175ed6e6a34SBen Goz 					   enum cache_policy alternate_policy,
176ed6e6a34SBen Goz 					   void __user *alternate_aperture_base,
177ed6e6a34SBen Goz 					   uint64_t alternate_aperture_size);
1789fd3f1bfSFelix Kuehling 
1799fd3f1bfSFelix Kuehling 	int (*process_termination)(struct device_queue_manager *dqm,
1809fd3f1bfSFelix Kuehling 			struct qcm_process_device *qpd);
18126103436SFelix Kuehling 
18226103436SFelix Kuehling 	int (*evict_process_queues)(struct device_queue_manager *dqm,
18326103436SFelix Kuehling 				    struct qcm_process_device *qpd);
18426103436SFelix Kuehling 	int (*restore_process_queues)(struct device_queue_manager *dqm,
18526103436SFelix Kuehling 				      struct qcm_process_device *qpd);
1865df099e8SJay Cornwall 
1875df099e8SJay Cornwall 	int	(*get_wave_state)(struct device_queue_manager *dqm,
1885df099e8SJay Cornwall 				  struct queue *q,
1895df099e8SJay Cornwall 				  void __user *ctl_stack,
1905df099e8SJay Cornwall 				  u32 *ctl_stack_used_size,
1915df099e8SJay Cornwall 				  u32 *save_area_used_size);
192dec63443STao Zhou 
193dec63443STao Zhou 	int (*reset_queues)(struct device_queue_manager *dqm,
194dec63443STao Zhou 					uint16_t pasid);
19542c6c482SDavid Yat Sin 	void	(*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
1963a9822d7SDavid Yat Sin 				  const struct queue *q, u32 *mqd_size,
1973a9822d7SDavid Yat Sin 				  u32 *ctl_stack_size);
19842c6c482SDavid Yat Sin 
19942c6c482SDavid Yat Sin 	int	(*checkpoint_mqd)(struct device_queue_manager *dqm,
20042c6c482SDavid Yat Sin 				  const struct queue *q,
2013a9822d7SDavid Yat Sin 				  void *mqd,
2023a9822d7SDavid Yat Sin 				  void *ctl_stack);
20345c9a5e4SOded Gabbay };
204ed6e6a34SBen Goz 
205d42af779SOded Gabbay struct device_queue_manager_asic_ops {
206bfd5e378SYong Zhao 	int	(*update_qpd)(struct device_queue_manager *dqm,
207d42af779SOded Gabbay 					struct qcm_process_device *qpd);
208d42af779SOded Gabbay 	bool	(*set_cache_memory_policy)(struct device_queue_manager *dqm,
209d42af779SOded Gabbay 					   struct qcm_process_device *qpd,
210d42af779SOded Gabbay 					   enum cache_policy default_policy,
211d42af779SOded Gabbay 					   enum cache_policy alternate_policy,
212d42af779SOded Gabbay 					   void __user *alternate_aperture_base,
213d42af779SOded Gabbay 					   uint64_t alternate_aperture_size);
2143e3f6e1aSOded Gabbay 	void	(*init_sdma_vm)(struct device_queue_manager *dqm,
2153e3f6e1aSOded Gabbay 				struct queue *q,
2163e3f6e1aSOded Gabbay 				struct qcm_process_device *qpd);
217972fcdb5SOak Zeng 	struct mqd_manager *	(*mqd_manager_init)(enum KFD_MQD_TYPE type,
2188dc1db31SMukul Joshi 				 struct kfd_node *dev);
219d42af779SOded Gabbay };
220d42af779SOded Gabbay 
221ee0a469cSJonathan Kim struct dqm_detect_hang_info {
222ee0a469cSJonathan Kim 	int pipe_id;
223ee0a469cSJonathan Kim 	int queue_id;
224ee0a469cSJonathan Kim 	int xcc_id;
225ee0a469cSJonathan Kim 	uint64_t queue_address;
226ee0a469cSJonathan Kim };
227ee0a469cSJonathan Kim 
22845c9a5e4SOded Gabbay /**
22945c9a5e4SOded Gabbay  * struct device_queue_manager
23045c9a5e4SOded Gabbay  *
23145c9a5e4SOded Gabbay  * This struct is a base class for the kfd queues scheduler in the
23245c9a5e4SOded Gabbay  * device level. The device base class should expose the basic operations
23345c9a5e4SOded Gabbay  * for queue creation and queue destruction. This base class hides the
23445c9a5e4SOded Gabbay  * scheduling mode of the driver and the specific implementation of the
23545c9a5e4SOded Gabbay  * concrete device. This class is the only class in the queues scheduler
23645c9a5e4SOded Gabbay  * that configures the H/W.
23745c9a5e4SOded Gabbay  *
23845c9a5e4SOded Gabbay  */
23945c9a5e4SOded Gabbay 
24045c9a5e4SOded Gabbay struct device_queue_manager {
24145c9a5e4SOded Gabbay 	struct device_queue_manager_ops ops;
242bfd5e378SYong Zhao 	struct device_queue_manager_asic_ops asic_ops;
243ed6e6a34SBen Goz 
2448d5f3552SYong Zhao 	struct mqd_manager	*mqd_mgrs[KFD_MQD_TYPE_MAX];
2459af5379cSOak Zeng 	struct packet_manager	packet_mgr;
2468dc1db31SMukul Joshi 	struct kfd_node		*dev;
247efeaed4dSFelix Kuehling 	struct mutex		lock_hidden; /* use dqm_lock/unlock(dqm) */
248ed6e6a34SBen Goz 	struct list_head	queues;
249efeaed4dSFelix Kuehling 	unsigned int		saved_flags;
250ed6e6a34SBen Goz 	unsigned int		processes_count;
25181b820b3SYong Zhao 	unsigned int		active_queue_count;
252b42902f4SYong Zhao 	unsigned int		active_cp_queue_count;
253b8020b03SJoseph Greathouse 	unsigned int		gws_queue_count;
254b8cbab04SOded Gabbay 	unsigned int		total_queue_count;
255ed6e6a34SBen Goz 	unsigned int		next_pipe_to_allocate;
256ed6e6a34SBen Goz 	unsigned int		*allocated_queues;
257a805889aSMukul Joshi 	DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES);
258a805889aSMukul Joshi 	DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
259d9d4623cSYong Zhao 	/* the pasid mapping for each kfd vmid */
260d9d4623cSYong Zhao 	uint16_t		vmid_pasid[VMID_NUM];
261ed6e6a34SBen Goz 	uint64_t		pipelines_addr;
262ed6e6a34SBen Goz 	uint64_t		fence_gpu_addr;
263b010affeSQu Huang 	uint64_t		*fence_addr;
264ed6e6a34SBen Goz 	struct kfd_mem_obj	*fence_mem;
265ed6e6a34SBen Goz 	bool			active_runlist;
266d146c5a7SFelix Kuehling 	int			sched_policy;
26797ae3c8cSJonathan Kim 	uint32_t		trap_debug_vmid;
26873ea648dSShaoyun Liu 
26973ea648dSShaoyun Liu 	/* hw exception  */
27073ea648dSShaoyun Liu 	bool			is_hws_hang;
27109c34e8dSFelix Kuehling 	bool			is_resetting;
27273ea648dSShaoyun Liu 	struct work_struct	hw_exception_work;
27311614c36SOak Zeng 	struct kfd_mem_obj	hiq_sdma_mqd;
2742c99a547SPhilip Yang 	bool			sched_running;
275*234eebe1SAmber Lin 	bool			sched_halt;
276c2d43918SMukul Joshi 
277c2d43918SMukul Joshi 	/* used for GFX 9.4.3 only */
278c2d43918SMukul Joshi 	uint32_t		current_logical_xcc_start;
2797cee6a68SJonathan Kim 
2807cee6a68SJonathan Kim 	uint32_t		wait_times;
281a70a93faSJonathan Kim 
282a70a93faSJonathan Kim 	wait_queue_head_t	destroy_wait;
283ee0a469cSJonathan Kim 
284ee0a469cSJonathan Kim 	/* for per-queue reset support */
285ee0a469cSJonathan Kim 	struct dqm_detect_hang_info *detect_hang_info;
286ee0a469cSJonathan Kim 	size_t detect_hang_info_size;
287ee0a469cSJonathan Kim 	int detect_hang_count;
288ed6e6a34SBen Goz };
289ed6e6a34SBen Goz 
290bfd5e378SYong Zhao void device_queue_manager_init_cik(
291bfd5e378SYong Zhao 		struct device_queue_manager_asic_ops *asic_ops);
292bfd5e378SYong Zhao void device_queue_manager_init_vi(
293bfd5e378SYong Zhao 		struct device_queue_manager_asic_ops *asic_ops);
294bed4f110SFelix Kuehling void device_queue_manager_init_v9(
295bed4f110SFelix Kuehling 		struct device_queue_manager_asic_ops *asic_ops);
29680e28aafSAlex Deucher void device_queue_manager_init_v10(
29714328aa5SPhilip Cox 		struct device_queue_manager_asic_ops *asic_ops);
298cc009e61SMukul Joshi void device_queue_manager_init_v11(
299cc009e61SMukul Joshi 		struct device_queue_manager_asic_ops *asic_ops);
30047fa09b7SDavid Belanger void device_queue_manager_init_v12(
30147fa09b7SDavid Belanger 		struct device_queue_manager_asic_ops *asic_ops);
302a22fc854SBen Goz void program_sh_mem_settings(struct device_queue_manager *dqm,
303a22fc854SBen Goz 					struct qcm_process_device *qpd);
304e6945304SYong Zhao unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
305d0b63bb3SAndres Rodriguez unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
306d0b63bb3SAndres Rodriguez unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
30798bb9222SYong Zhao unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
3081b4670f6SOak Zeng unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
30997ae3c8cSJonathan Kim int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
31097ae3c8cSJonathan Kim 			struct qcm_process_device *qpd);
31197ae3c8cSJonathan Kim int release_debug_trap_vmid(struct device_queue_manager *dqm,
31297ae3c8cSJonathan Kim 			struct qcm_process_device *qpd);
313a70a93faSJonathan Kim int suspend_queues(struct kfd_process *p,
314a70a93faSJonathan Kim 			uint32_t num_queues,
315a70a93faSJonathan Kim 			uint32_t grace_period,
316a70a93faSJonathan Kim 			uint64_t exception_clear_mask,
317a70a93faSJonathan Kim 			uint32_t *usr_queue_id_array);
318a70a93faSJonathan Kim int resume_queues(struct kfd_process *p,
319a70a93faSJonathan Kim 		uint32_t num_queues,
320a70a93faSJonathan Kim 		uint32_t *usr_queue_id_array);
321b17bd5dbSJonathan Kim void set_queue_snapshot_entry(struct queue *q,
322b17bd5dbSJonathan Kim 			      uint64_t exception_clear_mask,
323b17bd5dbSJonathan Kim 			      struct kfd_queue_snapshot_entry *qss_entry);
3240de4ec9aSJonathan Kim int debug_lock_and_unmap(struct device_queue_manager *dqm);
3250de4ec9aSJonathan Kim int debug_map_and_unlock(struct device_queue_manager *dqm);
3260de4ec9aSJonathan Kim int debug_refresh_runlist(struct device_queue_manager *dqm);
3270b3674aeSOded Gabbay 
328a104299bSDaniel Vetter static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
3290b3674aeSOded Gabbay {
3300b3674aeSOded Gabbay 	return (pdd->lds_base >> 16) & 0xFF;
3310b3674aeSOded Gabbay }
3320b3674aeSOded Gabbay 
333a104299bSDaniel Vetter static inline unsigned int
3340b3674aeSOded Gabbay get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
3350b3674aeSOded Gabbay {
3360b3674aeSOded Gabbay 	return (pdd->lds_base >> 60) & 0x0E;
3370b3674aeSOded Gabbay }
3380b3674aeSOded Gabbay 
339efeaed4dSFelix Kuehling /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
340efeaed4dSFelix Kuehling  * happens while holding this lock anywhere to prevent deadlocks when
341efeaed4dSFelix Kuehling  * an MMU notifier runs in reclaim-FS context.
342efeaed4dSFelix Kuehling  */
343efeaed4dSFelix Kuehling static inline void dqm_lock(struct device_queue_manager *dqm)
344efeaed4dSFelix Kuehling {
345efeaed4dSFelix Kuehling 	mutex_lock(&dqm->lock_hidden);
3461fb8b1fcSFelix Kuehling 	dqm->saved_flags = memalloc_noreclaim_save();
347efeaed4dSFelix Kuehling }
348efeaed4dSFelix Kuehling static inline void dqm_unlock(struct device_queue_manager *dqm)
349efeaed4dSFelix Kuehling {
3501fb8b1fcSFelix Kuehling 	memalloc_noreclaim_restore(dqm->saved_flags);
351efeaed4dSFelix Kuehling 	mutex_unlock(&dqm->lock_hidden);
352efeaed4dSFelix Kuehling }
353efeaed4dSFelix Kuehling 
354818b0324SMukul Joshi static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
355818b0324SMukul Joshi {
3562243f493SRajneesh Bhardwaj 	/* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
357818b0324SMukul Joshi 	return get_user(*val, q_rptr + 1);
358818b0324SMukul Joshi }
359ed6e6a34SBen Goz #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
360