xref: /linux/drivers/gpu/drm/xe/xe_page_reclaim.h (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1b912138dSBrian Nguyen /* SPDX-License-Identifier: MIT */
2b912138dSBrian Nguyen /*
3b912138dSBrian Nguyen  * Copyright © 2025 Intel Corporation
4b912138dSBrian Nguyen  */
5b912138dSBrian Nguyen 
6b912138dSBrian Nguyen #ifndef _XE_PAGE_RECLAIM_H_
7b912138dSBrian Nguyen #define _XE_PAGE_RECLAIM_H_
8b912138dSBrian Nguyen 
9b912138dSBrian Nguyen #include <linux/kref.h>
10b912138dSBrian Nguyen #include <linux/mm.h>
11b912138dSBrian Nguyen #include <linux/slab.h>
12b912138dSBrian Nguyen #include <linux/types.h>
13b912138dSBrian Nguyen #include <linux/workqueue.h>
14b912138dSBrian Nguyen #include <linux/bits.h>
15b912138dSBrian Nguyen 
16b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_MAX_ENTRIES	512
17b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_LIST_MAX_SIZE	SZ_4K
18b912138dSBrian Nguyen 
192b192bebSBrian Nguyen struct xe_tlb_inval;
202b192bebSBrian Nguyen struct xe_tlb_inval_fence;
217c52f13bSBrian Nguyen struct xe_tile;
227a0e86e3SBrian Nguyen struct xe_gt;
237c52f13bSBrian Nguyen struct xe_vma;
242b192bebSBrian Nguyen 
25b912138dSBrian Nguyen struct xe_guc_page_reclaim_entry {
26b912138dSBrian Nguyen 	u64 qw;
27b912138dSBrian Nguyen /* valid reclaim entry bit */
28b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_VALID		BIT_ULL(0)
29b912138dSBrian Nguyen /*
30b912138dSBrian Nguyen  * offset order of page size to be reclaimed
31b912138dSBrian Nguyen  * page_size = 1 << (XE_PTE_SHIFT + reclamation_size)
32b912138dSBrian Nguyen  */
33b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_SIZE		GENMASK_ULL(6, 1)
34b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_RSVD_0		GENMASK_ULL(11, 7)
35b912138dSBrian Nguyen /* lower 20 bits of the physical address */
36b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_ADDR_LO		GENMASK_ULL(31, 12)
37b912138dSBrian Nguyen /* upper 20 bits of the physical address */
38b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_ADDR_HI		GENMASK_ULL(51, 32)
39b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_RSVD_1		GENMASK_ULL(63, 52)
40b912138dSBrian Nguyen } __packed;
41b912138dSBrian Nguyen 
42b912138dSBrian Nguyen struct xe_page_reclaim_list {
43b912138dSBrian Nguyen 	/** @entries: array of page reclaim entries, page allocated */
44b912138dSBrian Nguyen 	struct xe_guc_page_reclaim_entry *entries;
45b912138dSBrian Nguyen 	/** @num_entries: number of entries */
46b912138dSBrian Nguyen 	int num_entries;
47b912138dSBrian Nguyen #define XE_PAGE_RECLAIM_INVALID_LIST	-1
48b912138dSBrian Nguyen };
49b912138dSBrian Nguyen 
50b912138dSBrian Nguyen /**
51b912138dSBrian Nguyen  * xe_page_reclaim_list_is_new() - Check if PRL is new allocation
52b912138dSBrian Nguyen  * @prl: Pointer to page reclaim list
53b912138dSBrian Nguyen  *
54b912138dSBrian Nguyen  * PRL indicates it hasn't been allocated through both values being NULL
55b912138dSBrian Nguyen  */
56b912138dSBrian Nguyen static inline bool xe_page_reclaim_list_is_new(struct xe_page_reclaim_list *prl)
57b912138dSBrian Nguyen {
58b912138dSBrian Nguyen 	return !prl->entries && prl->num_entries == 0;
59b912138dSBrian Nguyen }
60b912138dSBrian Nguyen 
61b912138dSBrian Nguyen /**
62b912138dSBrian Nguyen  * xe_page_reclaim_list_valid() - Check if the page reclaim list is valid
63b912138dSBrian Nguyen  * @prl: Pointer to page reclaim list
64b912138dSBrian Nguyen  *
65b912138dSBrian Nguyen  * PRL uses the XE_PAGE_RECLAIM_INVALID_LIST to indicate that a PRL
66b912138dSBrian Nguyen  * is unusable.
67b912138dSBrian Nguyen  */
68b912138dSBrian Nguyen static inline bool xe_page_reclaim_list_valid(struct xe_page_reclaim_list *prl)
69b912138dSBrian Nguyen {
70b912138dSBrian Nguyen 	return !xe_page_reclaim_list_is_new(prl) &&
71b912138dSBrian Nguyen 	       prl->num_entries != XE_PAGE_RECLAIM_INVALID_LIST;
72b912138dSBrian Nguyen }
73b912138dSBrian Nguyen 
747c52f13bSBrian Nguyen bool xe_page_reclaim_skip(struct xe_tile *tile, struct xe_vma *vma);
752b192bebSBrian Nguyen struct drm_suballoc *xe_page_reclaim_create_prl_bo(struct xe_tlb_inval *tlb_inval,
762b192bebSBrian Nguyen 						   struct xe_page_reclaim_list *prl,
772b192bebSBrian Nguyen 						   struct xe_tlb_inval_fence *fence);
78b912138dSBrian Nguyen void xe_page_reclaim_list_invalidate(struct xe_page_reclaim_list *prl);
797a0e86e3SBrian Nguyen 
807a0e86e3SBrian Nguyen /**
817a0e86e3SBrian Nguyen  * xe_page_reclaim_list_abort() - Invalidate a PRL and log an abort reason
827a0e86e3SBrian Nguyen  * @gt: GT owning the page reclaim request
837a0e86e3SBrian Nguyen  * @prl: Page reclaim list to invalidate
847a0e86e3SBrian Nguyen  * @fmt: format string for the log message with args
857a0e86e3SBrian Nguyen  *
867a0e86e3SBrian Nguyen  * Abort page reclaim process by invalidating PRL and doing any relevant logging.
877a0e86e3SBrian Nguyen  */
887a0e86e3SBrian Nguyen #define xe_page_reclaim_list_abort(gt, prl, fmt, ...)					\
897a0e86e3SBrian Nguyen 	do {										\
907a0e86e3SBrian Nguyen 		struct xe_gt *__gt = (gt);						\
917a0e86e3SBrian Nguyen 		struct xe_page_reclaim_list *__prl = (prl);				\
927a0e86e3SBrian Nguyen 											\
937a0e86e3SBrian Nguyen 		xe_page_reclaim_list_invalidate(__prl);					\
94*2e08feebSBrian Nguyen 		xe_gt_stats_incr(__gt, XE_GT_STATS_ID_PRL_ABORTED_COUNT, 1);		\
957a0e86e3SBrian Nguyen 		vm_dbg(&gt_to_xe(__gt)->drm, "PRL aborted: " fmt, ##__VA_ARGS__);	\
967a0e86e3SBrian Nguyen 	} while (0)
977a0e86e3SBrian Nguyen 
98b912138dSBrian Nguyen void xe_page_reclaim_list_init(struct xe_page_reclaim_list *prl);
99b912138dSBrian Nguyen int xe_page_reclaim_list_alloc_entries(struct xe_page_reclaim_list *prl);
100b912138dSBrian Nguyen /**
101b912138dSBrian Nguyen  * xe_page_reclaim_entries_get() - Increment the reference count of page reclaim entries.
102b912138dSBrian Nguyen  * @entries: Pointer to the array of page reclaim entries.
103b912138dSBrian Nguyen  *
104b912138dSBrian Nguyen  * This function increments the reference count of the backing page.
105b912138dSBrian Nguyen  */
106b912138dSBrian Nguyen static inline void xe_page_reclaim_entries_get(struct xe_guc_page_reclaim_entry *entries)
107b912138dSBrian Nguyen {
108b912138dSBrian Nguyen 	if (entries)
109b912138dSBrian Nguyen 		get_page(virt_to_page(entries));
110b912138dSBrian Nguyen }
111b912138dSBrian Nguyen 
112b912138dSBrian Nguyen /**
113b912138dSBrian Nguyen  * xe_page_reclaim_entries_put() - Decrement the reference count of page reclaim entries.
114b912138dSBrian Nguyen  * @entries: Pointer to the array of page reclaim entries.
115b912138dSBrian Nguyen  *
116b912138dSBrian Nguyen  * This function decrements the reference count of the backing page
117b912138dSBrian Nguyen  * and frees it if the count reaches zero.
118b912138dSBrian Nguyen  */
119b912138dSBrian Nguyen static inline void xe_page_reclaim_entries_put(struct xe_guc_page_reclaim_entry *entries)
120b912138dSBrian Nguyen {
121b912138dSBrian Nguyen 	if (entries)
122b912138dSBrian Nguyen 		put_page(virt_to_page(entries));
123b912138dSBrian Nguyen }
124b912138dSBrian Nguyen 
125b912138dSBrian Nguyen #endif	/* _XE_PAGE_RECLAIM_H_ */
126