xref: /linux/drivers/gpu/drm/xe/xe_page_reclaim.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1b912138dSBrian Nguyen // SPDX-License-Identifier: MIT
2b912138dSBrian Nguyen /*
3b912138dSBrian Nguyen  * Copyright © 2025 Intel Corporation
4b912138dSBrian Nguyen  */
5b912138dSBrian Nguyen 
6b912138dSBrian Nguyen #include <linux/bitfield.h>
7b912138dSBrian Nguyen #include <linux/kref.h>
8b912138dSBrian Nguyen #include <linux/mm.h>
9b912138dSBrian Nguyen #include <linux/slab.h>
10b912138dSBrian Nguyen 
11b912138dSBrian Nguyen #include "xe_page_reclaim.h"
12b912138dSBrian Nguyen 
13*2e08feebSBrian Nguyen #include "xe_gt_stats.h"
14b912138dSBrian Nguyen #include "xe_macros.h"
157c52f13bSBrian Nguyen #include "xe_pat.h"
162b192bebSBrian Nguyen #include "xe_sa.h"
172b192bebSBrian Nguyen #include "xe_tlb_inval_types.h"
187c52f13bSBrian Nguyen 
197c52f13bSBrian Nguyen /**
207c52f13bSBrian Nguyen  * xe_page_reclaim_skip() - Decide whether PRL should be skipped for a VMA
217c52f13bSBrian Nguyen  * @tile: Tile owning the VMA
227c52f13bSBrian Nguyen  * @vma: VMA under consideration
237c52f13bSBrian Nguyen  *
247c52f13bSBrian Nguyen  * PPC flushing may be handled by HW for specific PAT encodings.
257c52f13bSBrian Nguyen  * Skip PPC flushing/Page Reclaim for scenarios below due to redundant
267c52f13bSBrian Nguyen  * flushes.
277c52f13bSBrian Nguyen  * - pat_index is transient display (1)
287c52f13bSBrian Nguyen  *
297c52f13bSBrian Nguyen  * Return: true when page reclamation is unnecessary, false otherwise.
307c52f13bSBrian Nguyen  */
317c52f13bSBrian Nguyen bool xe_page_reclaim_skip(struct xe_tile *tile, struct xe_vma *vma)
327c52f13bSBrian Nguyen {
337c52f13bSBrian Nguyen 	u8 l3_policy;
347c52f13bSBrian Nguyen 
357c52f13bSBrian Nguyen 	l3_policy = xe_pat_index_get_l3_policy(tile->xe, vma->attr.pat_index);
367c52f13bSBrian Nguyen 
377c52f13bSBrian Nguyen 	/*
387c52f13bSBrian Nguyen 	 *   - l3_policy:   0=WB, 1=XD ("WB - Transient Display"), 3=UC
397c52f13bSBrian Nguyen 	 * Transient display flushes is taken care by HW, l3_policy = 1.
407c52f13bSBrian Nguyen 	 *
417c52f13bSBrian Nguyen 	 * HW will sequence these transient flushes at various sync points so
427c52f13bSBrian Nguyen 	 * any event of page reclamation will hit these sync points before
437c52f13bSBrian Nguyen 	 * page reclamation could execute.
447c52f13bSBrian Nguyen 	 */
457c52f13bSBrian Nguyen 	return (l3_policy == XE_L3_POLICY_XD);
467c52f13bSBrian Nguyen }
472b192bebSBrian Nguyen 
482b192bebSBrian Nguyen /**
492b192bebSBrian Nguyen  * xe_page_reclaim_create_prl_bo() - Back a PRL with a suballocated GGTT BO
502b192bebSBrian Nguyen  * @tlb_inval: TLB invalidation frontend associated with the request
512b192bebSBrian Nguyen  * @prl: page reclaim list data that bo will copy from
522b192bebSBrian Nguyen  * @fence: tlb invalidation fence that page reclaim action is paired to
532b192bebSBrian Nguyen  *
542b192bebSBrian Nguyen  * Suballocates a 4K BO out of the tile reclaim pool, copies the PRL CPU
552b192bebSBrian Nguyen  * copy into the BO and queues the buffer for release when @fence signals.
562b192bebSBrian Nguyen  *
572b192bebSBrian Nguyen  * Return: struct drm_suballoc pointer on success or ERR_PTR on failure.
582b192bebSBrian Nguyen  */
592b192bebSBrian Nguyen struct drm_suballoc *xe_page_reclaim_create_prl_bo(struct xe_tlb_inval *tlb_inval,
602b192bebSBrian Nguyen 						   struct xe_page_reclaim_list *prl,
612b192bebSBrian Nguyen 						   struct xe_tlb_inval_fence *fence)
622b192bebSBrian Nguyen {
632b192bebSBrian Nguyen 	struct xe_gt *gt = container_of(tlb_inval, struct xe_gt, tlb_inval);
642b192bebSBrian Nguyen 	struct xe_tile *tile = gt_to_tile(gt);
652b192bebSBrian Nguyen 	/* (+1) for NULL page_reclaim_entry to indicate end of list */
662b192bebSBrian Nguyen 	int prl_size = min(prl->num_entries + 1, XE_PAGE_RECLAIM_MAX_ENTRIES) *
672b192bebSBrian Nguyen 		sizeof(struct xe_guc_page_reclaim_entry);
682b192bebSBrian Nguyen 	struct drm_suballoc *prl_sa;
692b192bebSBrian Nguyen 
702b192bebSBrian Nguyen 	/* Maximum size of PRL is 1 4K-page */
712b192bebSBrian Nguyen 	prl_sa = __xe_sa_bo_new(tile->mem.reclaim_pool,
722b192bebSBrian Nguyen 				prl_size, GFP_ATOMIC);
732b192bebSBrian Nguyen 	if (IS_ERR(prl_sa))
742b192bebSBrian Nguyen 		return prl_sa;
752b192bebSBrian Nguyen 
762b192bebSBrian Nguyen 	memcpy(xe_sa_bo_cpu_addr(prl_sa), prl->entries,
772b192bebSBrian Nguyen 	       prl_size);
782b192bebSBrian Nguyen 	xe_sa_bo_flush_write(prl_sa);
792b192bebSBrian Nguyen 	/* Queue up sa_bo_free on tlb invalidation fence signal */
802b192bebSBrian Nguyen 	xe_sa_bo_free(prl_sa, &fence->base);
812b192bebSBrian Nguyen 
822b192bebSBrian Nguyen 	return prl_sa;
832b192bebSBrian Nguyen }
84b912138dSBrian Nguyen 
85b912138dSBrian Nguyen /**
86b912138dSBrian Nguyen  * xe_page_reclaim_list_invalidate() - Mark a PRL as invalid
87b912138dSBrian Nguyen  * @prl: Page reclaim list to reset
88b912138dSBrian Nguyen  *
89b912138dSBrian Nguyen  * Clears the entries pointer and marks the list as invalid so
90b912138dSBrian Nguyen  * future use knows PRL is unusable. It is expected that the entries
91b912138dSBrian Nguyen  * have already been released.
92b912138dSBrian Nguyen  */
93b912138dSBrian Nguyen void xe_page_reclaim_list_invalidate(struct xe_page_reclaim_list *prl)
94b912138dSBrian Nguyen {
95b912138dSBrian Nguyen 	xe_page_reclaim_entries_put(prl->entries);
96b912138dSBrian Nguyen 	prl->entries = NULL;
97b912138dSBrian Nguyen 	prl->num_entries = XE_PAGE_RECLAIM_INVALID_LIST;
98b912138dSBrian Nguyen }
99b912138dSBrian Nguyen 
100b912138dSBrian Nguyen /**
101b912138dSBrian Nguyen  * xe_page_reclaim_list_init() - Initialize a page reclaim list
102b912138dSBrian Nguyen  * @prl: Page reclaim list to initialize
103b912138dSBrian Nguyen  *
104b912138dSBrian Nguyen  * NULLs both values in list to prepare on initalization.
105b912138dSBrian Nguyen  */
106b912138dSBrian Nguyen void xe_page_reclaim_list_init(struct xe_page_reclaim_list *prl)
107b912138dSBrian Nguyen {
108b912138dSBrian Nguyen 	prl->entries = NULL;
109b912138dSBrian Nguyen 	prl->num_entries = 0;
110b912138dSBrian Nguyen }
111b912138dSBrian Nguyen 
112b912138dSBrian Nguyen /**
113b912138dSBrian Nguyen  * xe_page_reclaim_list_alloc_entries() - Allocate page reclaim list entries
114b912138dSBrian Nguyen  * @prl: Page reclaim list to allocate entries for
115b912138dSBrian Nguyen  *
116b912138dSBrian Nguyen  * Allocate one 4K page for the PRL entries, otherwise assign prl->entries to NULL.
117b912138dSBrian Nguyen  */
118b912138dSBrian Nguyen int xe_page_reclaim_list_alloc_entries(struct xe_page_reclaim_list *prl)
119b912138dSBrian Nguyen {
120b912138dSBrian Nguyen 	struct page *page;
121b912138dSBrian Nguyen 
122b912138dSBrian Nguyen 	if (XE_WARN_ON(prl->entries))
123b912138dSBrian Nguyen 		return 0;
124b912138dSBrian Nguyen 
125b912138dSBrian Nguyen 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
126b912138dSBrian Nguyen 	if (page) {
127b912138dSBrian Nguyen 		prl->entries = page_address(page);
128b912138dSBrian Nguyen 		prl->num_entries = 0;
129b912138dSBrian Nguyen 	}
130b912138dSBrian Nguyen 
131b912138dSBrian Nguyen 	return page ? 0 : -ENOMEM;
132b912138dSBrian Nguyen }
133