xref: /linux/drivers/gpu/drm/xe/xe_page_reclaim.h (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #ifndef _XE_PAGE_RECLAIM_H_
7 #define _XE_PAGE_RECLAIM_H_
8 
9 #include <linux/kref.h>
10 #include <linux/mm.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/workqueue.h>
14 #include <linux/bits.h>
15 
16 #define XE_PAGE_RECLAIM_MAX_ENTRIES	512
17 #define XE_PAGE_RECLAIM_LIST_MAX_SIZE	SZ_4K
18 
19 struct xe_tlb_inval;
20 struct xe_tlb_inval_fence;
21 struct xe_tile;
22 struct xe_gt;
23 struct xe_vma;
24 
25 struct xe_guc_page_reclaim_entry {
26 	u64 qw;
27 /* valid reclaim entry bit */
28 #define XE_PAGE_RECLAIM_VALID		BIT_ULL(0)
29 /*
30  * offset order of page size to be reclaimed
31  * page_size = 1 << (XE_PTE_SHIFT + reclamation_size)
32  */
33 #define XE_PAGE_RECLAIM_SIZE		GENMASK_ULL(6, 1)
34 #define XE_PAGE_RECLAIM_RSVD_0		GENMASK_ULL(11, 7)
35 /* lower 20 bits of the physical address */
36 #define XE_PAGE_RECLAIM_ADDR_LO		GENMASK_ULL(31, 12)
37 /* upper 20 bits of the physical address */
38 #define XE_PAGE_RECLAIM_ADDR_HI		GENMASK_ULL(51, 32)
39 #define XE_PAGE_RECLAIM_RSVD_1		GENMASK_ULL(63, 52)
40 } __packed;
41 
42 struct xe_page_reclaim_list {
43 	/** @entries: array of page reclaim entries, page allocated */
44 	struct xe_guc_page_reclaim_entry *entries;
45 	/** @num_entries: number of entries */
46 	int num_entries;
47 #define XE_PAGE_RECLAIM_INVALID_LIST	-1
48 };
49 
50 /**
51  * xe_page_reclaim_list_is_new() - Check if PRL is new allocation
52  * @prl: Pointer to page reclaim list
53  *
54  * PRL indicates it hasn't been allocated through both values being NULL
55  */
56 static inline bool xe_page_reclaim_list_is_new(struct xe_page_reclaim_list *prl)
57 {
58 	return !prl->entries && prl->num_entries == 0;
59 }
60 
61 /**
62  * xe_page_reclaim_list_valid() - Check if the page reclaim list is valid
63  * @prl: Pointer to page reclaim list
64  *
65  * PRL uses the XE_PAGE_RECLAIM_INVALID_LIST to indicate that a PRL
66  * is unusable.
67  */
68 static inline bool xe_page_reclaim_list_valid(struct xe_page_reclaim_list *prl)
69 {
70 	return !xe_page_reclaim_list_is_new(prl) &&
71 	       prl->num_entries != XE_PAGE_RECLAIM_INVALID_LIST;
72 }
73 
74 bool xe_page_reclaim_skip(struct xe_tile *tile, struct xe_vma *vma);
75 struct drm_suballoc *xe_page_reclaim_create_prl_bo(struct xe_tlb_inval *tlb_inval,
76 						   struct xe_page_reclaim_list *prl,
77 						   struct xe_tlb_inval_fence *fence);
78 void xe_page_reclaim_list_invalidate(struct xe_page_reclaim_list *prl);
79 
80 /**
81  * xe_page_reclaim_list_abort() - Invalidate a PRL and log an abort reason
82  * @gt: GT owning the page reclaim request
83  * @prl: Page reclaim list to invalidate
84  * @fmt: format string for the log message with args
85  *
86  * Abort page reclaim process by invalidating PRL and doing any relevant logging.
87  */
88 #define xe_page_reclaim_list_abort(gt, prl, fmt, ...)					\
89 	do {										\
90 		struct xe_gt *__gt = (gt);						\
91 		struct xe_page_reclaim_list *__prl = (prl);				\
92 											\
93 		xe_page_reclaim_list_invalidate(__prl);					\
94 		xe_gt_stats_incr(__gt, XE_GT_STATS_ID_PRL_ABORTED_COUNT, 1);		\
95 		vm_dbg(&gt_to_xe(__gt)->drm, "PRL aborted: " fmt, ##__VA_ARGS__);	\
96 	} while (0)
97 
98 void xe_page_reclaim_list_init(struct xe_page_reclaim_list *prl);
99 int xe_page_reclaim_list_alloc_entries(struct xe_page_reclaim_list *prl);
100 /**
101  * xe_page_reclaim_entries_get() - Increment the reference count of page reclaim entries.
102  * @entries: Pointer to the array of page reclaim entries.
103  *
104  * This function increments the reference count of the backing page.
105  */
106 static inline void xe_page_reclaim_entries_get(struct xe_guc_page_reclaim_entry *entries)
107 {
108 	if (entries)
109 		get_page(virt_to_page(entries));
110 }
111 
112 /**
113  * xe_page_reclaim_entries_put() - Decrement the reference count of page reclaim entries.
114  * @entries: Pointer to the array of page reclaim entries.
115  *
116  * This function decrements the reference count of the backing page
117  * and frees it if the count reaches zero.
118  */
119 static inline void xe_page_reclaim_entries_put(struct xe_guc_page_reclaim_entry *entries)
120 {
121 	if (entries)
122 		put_page(virt_to_page(entries));
123 }
124 
125 #endif	/* _XE_PAGE_RECLAIM_H_ */
126