xref: /linux/drivers/gpu/drm/xe/xe_mem_pool.c (revision f1a5e78a55ebf2b05777fd5eb738038ddae609d6)
1*36c6bac1SSatyanarayana K V P // SPDX-License-Identifier: MIT
2*36c6bac1SSatyanarayana K V P /*
3*36c6bac1SSatyanarayana K V P  * Copyright © 2026 Intel Corporation
4*36c6bac1SSatyanarayana K V P  */
5*36c6bac1SSatyanarayana K V P 
6*36c6bac1SSatyanarayana K V P #include <linux/kernel.h>
7*36c6bac1SSatyanarayana K V P 
8*36c6bac1SSatyanarayana K V P #include <drm/drm_managed.h>
9*36c6bac1SSatyanarayana K V P 
10*36c6bac1SSatyanarayana K V P #include "instructions/xe_mi_commands.h"
11*36c6bac1SSatyanarayana K V P #include "xe_bo.h"
12*36c6bac1SSatyanarayana K V P #include "xe_device_types.h"
13*36c6bac1SSatyanarayana K V P #include "xe_map.h"
14*36c6bac1SSatyanarayana K V P #include "xe_mem_pool.h"
15*36c6bac1SSatyanarayana K V P #include "xe_mem_pool_types.h"
16*36c6bac1SSatyanarayana K V P #include "xe_tile_printk.h"
17*36c6bac1SSatyanarayana K V P 
18*36c6bac1SSatyanarayana K V P /**
19*36c6bac1SSatyanarayana K V P  * struct xe_mem_pool - DRM MM pool for sub-allocating memory from a BO on an
20*36c6bac1SSatyanarayana K V P  * XE tile.
21*36c6bac1SSatyanarayana K V P  *
22*36c6bac1SSatyanarayana K V P  * The XE memory pool is a DRM MM manager that provides sub-allocation of memory
23*36c6bac1SSatyanarayana K V P  * from a backing buffer object (BO) on a specific XE tile. It is designed to
24*36c6bac1SSatyanarayana K V P  * manage memory for GPU workloads, allowing for efficient allocation and
25*36c6bac1SSatyanarayana K V P  * deallocation of memory regions within the BO.
26*36c6bac1SSatyanarayana K V P  *
27*36c6bac1SSatyanarayana K V P  * The memory pool maintains a primary BO that is pinned in the GGTT and mapped
28*36c6bac1SSatyanarayana K V P  * into the CPU address space for direct access. Optionally, it can also maintain
29*36c6bac1SSatyanarayana K V P  * a shadow BO that can be used for atomic updates to the primary BO's contents.
30*36c6bac1SSatyanarayana K V P  *
31*36c6bac1SSatyanarayana K V P  * The API provided by the memory pool allows clients to allocate and free memory
32*36c6bac1SSatyanarayana K V P  * regions, retrieve GPU and CPU addresses, and synchronize data between the
33*36c6bac1SSatyanarayana K V P  * primary and shadow BOs as needed.
34*36c6bac1SSatyanarayana K V P  */
35*36c6bac1SSatyanarayana K V P struct xe_mem_pool {
36*36c6bac1SSatyanarayana K V P 	/** @base: Range allocator over [0, @size) in bytes */
37*36c6bac1SSatyanarayana K V P 	struct drm_mm base;
38*36c6bac1SSatyanarayana K V P 	/** @bo: Active pool BO (GGTT-pinned, CPU-mapped). */
39*36c6bac1SSatyanarayana K V P 	struct xe_bo *bo;
40*36c6bac1SSatyanarayana K V P 	/** @shadow: Shadow BO for atomic command updates. */
41*36c6bac1SSatyanarayana K V P 	struct xe_bo *shadow;
42*36c6bac1SSatyanarayana K V P 	/** @swap_guard: Timeline guard updating @bo and @shadow */
43*36c6bac1SSatyanarayana K V P 	struct mutex swap_guard;
44*36c6bac1SSatyanarayana K V P 	/** @cpu_addr: CPU virtual address of the active BO. */
45*36c6bac1SSatyanarayana K V P 	void *cpu_addr;
46*36c6bac1SSatyanarayana K V P 	/** @is_iomem: Indicates if the BO mapping is I/O memory. */
47*36c6bac1SSatyanarayana K V P 	bool is_iomem;
48*36c6bac1SSatyanarayana K V P };
49*36c6bac1SSatyanarayana K V P 
50*36c6bac1SSatyanarayana K V P static struct xe_mem_pool *node_to_pool(struct xe_mem_pool_node *node)
51*36c6bac1SSatyanarayana K V P {
52*36c6bac1SSatyanarayana K V P 	return container_of(node->sa_node.mm, struct xe_mem_pool, base);
53*36c6bac1SSatyanarayana K V P }
54*36c6bac1SSatyanarayana K V P 
55*36c6bac1SSatyanarayana K V P static struct xe_tile *pool_to_tile(struct xe_mem_pool *pool)
56*36c6bac1SSatyanarayana K V P {
57*36c6bac1SSatyanarayana K V P 	return pool->bo->tile;
58*36c6bac1SSatyanarayana K V P }
59*36c6bac1SSatyanarayana K V P 
60*36c6bac1SSatyanarayana K V P static void fini_pool_action(struct drm_device *drm, void *arg)
61*36c6bac1SSatyanarayana K V P {
62*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool = arg;
63*36c6bac1SSatyanarayana K V P 
64*36c6bac1SSatyanarayana K V P 	if (pool->is_iomem)
65*36c6bac1SSatyanarayana K V P 		kvfree(pool->cpu_addr);
66*36c6bac1SSatyanarayana K V P 
67*36c6bac1SSatyanarayana K V P 	drm_mm_takedown(&pool->base);
68*36c6bac1SSatyanarayana K V P }
69*36c6bac1SSatyanarayana K V P 
70*36c6bac1SSatyanarayana K V P static int pool_shadow_init(struct xe_mem_pool *pool)
71*36c6bac1SSatyanarayana K V P {
72*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool->bo->tile;
73*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
74*36c6bac1SSatyanarayana K V P 	struct xe_bo *shadow;
75*36c6bac1SSatyanarayana K V P 	int ret;
76*36c6bac1SSatyanarayana K V P 
77*36c6bac1SSatyanarayana K V P 	xe_assert(xe, !pool->shadow);
78*36c6bac1SSatyanarayana K V P 
79*36c6bac1SSatyanarayana K V P 	ret = drmm_mutex_init(&xe->drm, &pool->swap_guard);
80*36c6bac1SSatyanarayana K V P 	if (ret)
81*36c6bac1SSatyanarayana K V P 		return ret;
82*36c6bac1SSatyanarayana K V P 
83*36c6bac1SSatyanarayana K V P 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
84*36c6bac1SSatyanarayana K V P 		fs_reclaim_acquire(GFP_KERNEL);
85*36c6bac1SSatyanarayana K V P 		might_lock(&pool->swap_guard);
86*36c6bac1SSatyanarayana K V P 		fs_reclaim_release(GFP_KERNEL);
87*36c6bac1SSatyanarayana K V P 	}
88*36c6bac1SSatyanarayana K V P 	shadow = xe_managed_bo_create_pin_map(xe, tile,
89*36c6bac1SSatyanarayana K V P 					      xe_bo_size(pool->bo),
90*36c6bac1SSatyanarayana K V P 					      XE_BO_FLAG_VRAM_IF_DGFX(tile) |
91*36c6bac1SSatyanarayana K V P 					      XE_BO_FLAG_GGTT |
92*36c6bac1SSatyanarayana K V P 					      XE_BO_FLAG_GGTT_INVALIDATE |
93*36c6bac1SSatyanarayana K V P 					      XE_BO_FLAG_PINNED_NORESTORE);
94*36c6bac1SSatyanarayana K V P 	if (IS_ERR(shadow))
95*36c6bac1SSatyanarayana K V P 		return PTR_ERR(shadow);
96*36c6bac1SSatyanarayana K V P 
97*36c6bac1SSatyanarayana K V P 	pool->shadow = shadow;
98*36c6bac1SSatyanarayana K V P 
99*36c6bac1SSatyanarayana K V P 	return 0;
100*36c6bac1SSatyanarayana K V P }
101*36c6bac1SSatyanarayana K V P 
102*36c6bac1SSatyanarayana K V P /**
103*36c6bac1SSatyanarayana K V P  * xe_mem_pool_init() - Initialize memory pool.
104*36c6bac1SSatyanarayana K V P  * @tile: the &xe_tile where allocate.
105*36c6bac1SSatyanarayana K V P  * @size: number of bytes to allocate.
106*36c6bac1SSatyanarayana K V P  * @guard: the size of the guard region at the end of the BO that is not
107*36c6bac1SSatyanarayana K V P  * sub-allocated, in bytes.
108*36c6bac1SSatyanarayana K V P  * @flags: flags to use to create shadow pool.
109*36c6bac1SSatyanarayana K V P  *
110*36c6bac1SSatyanarayana K V P  * Initializes a memory pool for sub-allocating memory from a backing BO on the
111*36c6bac1SSatyanarayana K V P  * specified XE tile. The backing BO is pinned in the GGTT and mapped into
112*36c6bac1SSatyanarayana K V P  * the CPU address space for direct access. Optionally, a shadow BO can also be
113*36c6bac1SSatyanarayana K V P  * initialized for atomic updates to the primary BO's contents.
114*36c6bac1SSatyanarayana K V P  *
115*36c6bac1SSatyanarayana K V P  * Returns: a pointer to the &xe_mem_pool, or an error pointer on failure.
116*36c6bac1SSatyanarayana K V P  */
117*36c6bac1SSatyanarayana K V P struct xe_mem_pool *xe_mem_pool_init(struct xe_tile *tile, u32 size,
118*36c6bac1SSatyanarayana K V P 				     u32 guard, int flags)
119*36c6bac1SSatyanarayana K V P {
120*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
121*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool;
122*36c6bac1SSatyanarayana K V P 	struct xe_bo *bo;
123*36c6bac1SSatyanarayana K V P 	u32 managed_size;
124*36c6bac1SSatyanarayana K V P 	int ret;
125*36c6bac1SSatyanarayana K V P 
126*36c6bac1SSatyanarayana K V P 	xe_tile_assert(tile, size > guard);
127*36c6bac1SSatyanarayana K V P 	managed_size = size - guard;
128*36c6bac1SSatyanarayana K V P 
129*36c6bac1SSatyanarayana K V P 	pool = drmm_kzalloc(&xe->drm, sizeof(*pool), GFP_KERNEL);
130*36c6bac1SSatyanarayana K V P 	if (!pool)
131*36c6bac1SSatyanarayana K V P 		return ERR_PTR(-ENOMEM);
132*36c6bac1SSatyanarayana K V P 
133*36c6bac1SSatyanarayana K V P 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
134*36c6bac1SSatyanarayana K V P 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
135*36c6bac1SSatyanarayana K V P 					  XE_BO_FLAG_GGTT |
136*36c6bac1SSatyanarayana K V P 					  XE_BO_FLAG_GGTT_INVALIDATE |
137*36c6bac1SSatyanarayana K V P 					  XE_BO_FLAG_PINNED_NORESTORE);
138*36c6bac1SSatyanarayana K V P 	if (IS_ERR(bo)) {
139*36c6bac1SSatyanarayana K V P 		xe_tile_err(tile, "Failed to prepare %uKiB BO for mem pool (%pe)\n",
140*36c6bac1SSatyanarayana K V P 			    size / SZ_1K, bo);
141*36c6bac1SSatyanarayana K V P 		return ERR_CAST(bo);
142*36c6bac1SSatyanarayana K V P 	}
143*36c6bac1SSatyanarayana K V P 	pool->bo = bo;
144*36c6bac1SSatyanarayana K V P 	pool->is_iomem = bo->vmap.is_iomem;
145*36c6bac1SSatyanarayana K V P 
146*36c6bac1SSatyanarayana K V P 	if (pool->is_iomem) {
147*36c6bac1SSatyanarayana K V P 		pool->cpu_addr = kvzalloc(size, GFP_KERNEL);
148*36c6bac1SSatyanarayana K V P 		if (!pool->cpu_addr)
149*36c6bac1SSatyanarayana K V P 			return ERR_PTR(-ENOMEM);
150*36c6bac1SSatyanarayana K V P 	} else {
151*36c6bac1SSatyanarayana K V P 		pool->cpu_addr = bo->vmap.vaddr;
152*36c6bac1SSatyanarayana K V P 	}
153*36c6bac1SSatyanarayana K V P 
154*36c6bac1SSatyanarayana K V P 	if (flags & XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY) {
155*36c6bac1SSatyanarayana K V P 		ret = pool_shadow_init(pool);
156*36c6bac1SSatyanarayana K V P 
157*36c6bac1SSatyanarayana K V P 		if (ret)
158*36c6bac1SSatyanarayana K V P 			goto out_err;
159*36c6bac1SSatyanarayana K V P 	}
160*36c6bac1SSatyanarayana K V P 
161*36c6bac1SSatyanarayana K V P 	drm_mm_init(&pool->base, 0, managed_size);
162*36c6bac1SSatyanarayana K V P 	ret = drmm_add_action_or_reset(&xe->drm, fini_pool_action, pool);
163*36c6bac1SSatyanarayana K V P 	if (ret)
164*36c6bac1SSatyanarayana K V P 		return ERR_PTR(ret);
165*36c6bac1SSatyanarayana K V P 
166*36c6bac1SSatyanarayana K V P 	return pool;
167*36c6bac1SSatyanarayana K V P 
168*36c6bac1SSatyanarayana K V P out_err:
169*36c6bac1SSatyanarayana K V P 	if (flags & XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY)
170*36c6bac1SSatyanarayana K V P 		xe_tile_err(tile,
171*36c6bac1SSatyanarayana K V P 			    "Failed to initialize shadow BO for mem pool (%d)\n", ret);
172*36c6bac1SSatyanarayana K V P 	if (bo->vmap.is_iomem)
173*36c6bac1SSatyanarayana K V P 		kvfree(pool->cpu_addr);
174*36c6bac1SSatyanarayana K V P 	return ERR_PTR(ret);
175*36c6bac1SSatyanarayana K V P }
176*36c6bac1SSatyanarayana K V P 
177*36c6bac1SSatyanarayana K V P /**
178*36c6bac1SSatyanarayana K V P  * xe_mem_pool_sync() - Copy the entire contents of the main pool to shadow pool.
179*36c6bac1SSatyanarayana K V P  * @pool: the memory pool containing the primary and shadow BOs.
180*36c6bac1SSatyanarayana K V P  *
181*36c6bac1SSatyanarayana K V P  * Copies the entire contents of the primary pool to the shadow pool. This must
182*36c6bac1SSatyanarayana K V P  * be done after xe_mem_pool_init() with the XE_MEM_POOL_BO_FLAG_INIT_SHADOW_COPY
183*36c6bac1SSatyanarayana K V P  * flag to ensure that the shadow pool has the same initial contents as the primary
184*36c6bac1SSatyanarayana K V P  * pool. After this initial synchronization, clients can choose to synchronize the
185*36c6bac1SSatyanarayana K V P  * shadow pool with the primary pool on a node  basis using
186*36c6bac1SSatyanarayana K V P  * xe_mem_pool_sync_shadow_locked() as needed.
187*36c6bac1SSatyanarayana K V P  *
188*36c6bac1SSatyanarayana K V P  * Return: None.
189*36c6bac1SSatyanarayana K V P  */
190*36c6bac1SSatyanarayana K V P void xe_mem_pool_sync(struct xe_mem_pool *pool)
191*36c6bac1SSatyanarayana K V P {
192*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool_to_tile(pool);
193*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
194*36c6bac1SSatyanarayana K V P 
195*36c6bac1SSatyanarayana K V P 	xe_tile_assert(tile, pool->shadow);
196*36c6bac1SSatyanarayana K V P 
197*36c6bac1SSatyanarayana K V P 	xe_map_memcpy_to(xe, &pool->shadow->vmap, 0,
198*36c6bac1SSatyanarayana K V P 			 pool->cpu_addr, xe_bo_size(pool->bo));
199*36c6bac1SSatyanarayana K V P }
200*36c6bac1SSatyanarayana K V P 
201*36c6bac1SSatyanarayana K V P /**
202*36c6bac1SSatyanarayana K V P  * xe_mem_pool_swap_shadow_locked() - Swap the primary BO with the shadow BO.
203*36c6bac1SSatyanarayana K V P  * @pool: the memory pool containing the primary and shadow BOs.
204*36c6bac1SSatyanarayana K V P  *
205*36c6bac1SSatyanarayana K V P  * Swaps the primary buffer object with the shadow buffer object in the mem
206*36c6bac1SSatyanarayana K V P  * pool. This allows for atomic updates to the contents of the primary BO
207*36c6bac1SSatyanarayana K V P  * by first writing to the shadow BO and then swapping it with the primary BO.
208*36c6bac1SSatyanarayana K V P  * Swap_guard must be held to ensure synchronization with any concurrent swap
209*36c6bac1SSatyanarayana K V P  * operations.
210*36c6bac1SSatyanarayana K V P  *
211*36c6bac1SSatyanarayana K V P  * Return: None.
212*36c6bac1SSatyanarayana K V P  */
213*36c6bac1SSatyanarayana K V P void xe_mem_pool_swap_shadow_locked(struct xe_mem_pool *pool)
214*36c6bac1SSatyanarayana K V P {
215*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool_to_tile(pool);
216*36c6bac1SSatyanarayana K V P 
217*36c6bac1SSatyanarayana K V P 	xe_tile_assert(tile, pool->shadow);
218*36c6bac1SSatyanarayana K V P 	lockdep_assert_held(&pool->swap_guard);
219*36c6bac1SSatyanarayana K V P 
220*36c6bac1SSatyanarayana K V P 	swap(pool->bo, pool->shadow);
221*36c6bac1SSatyanarayana K V P 	if (!pool->bo->vmap.is_iomem)
222*36c6bac1SSatyanarayana K V P 		pool->cpu_addr = pool->bo->vmap.vaddr;
223*36c6bac1SSatyanarayana K V P }
224*36c6bac1SSatyanarayana K V P 
225*36c6bac1SSatyanarayana K V P /**
226*36c6bac1SSatyanarayana K V P  * xe_mem_pool_sync_shadow_locked() - Copy node from primary pool to shadow pool.
227*36c6bac1SSatyanarayana K V P  * @node: the node allocated in the memory pool.
228*36c6bac1SSatyanarayana K V P  *
229*36c6bac1SSatyanarayana K V P  * Copies the specified batch buffer from the primary pool to the shadow pool.
230*36c6bac1SSatyanarayana K V P  * Swap_guard must be held to ensure synchronization with any concurrent swap
231*36c6bac1SSatyanarayana K V P  * operations.
232*36c6bac1SSatyanarayana K V P  *
233*36c6bac1SSatyanarayana K V P  * Return: None.
234*36c6bac1SSatyanarayana K V P  */
235*36c6bac1SSatyanarayana K V P void xe_mem_pool_sync_shadow_locked(struct xe_mem_pool_node *node)
236*36c6bac1SSatyanarayana K V P {
237*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool = node_to_pool(node);
238*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool_to_tile(pool);
239*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
240*36c6bac1SSatyanarayana K V P 	struct drm_mm_node *sa_node = &node->sa_node;
241*36c6bac1SSatyanarayana K V P 
242*36c6bac1SSatyanarayana K V P 	xe_tile_assert(tile, pool->shadow);
243*36c6bac1SSatyanarayana K V P 	lockdep_assert_held(&pool->swap_guard);
244*36c6bac1SSatyanarayana K V P 
245*36c6bac1SSatyanarayana K V P 	xe_map_memcpy_to(xe, &pool->shadow->vmap,
246*36c6bac1SSatyanarayana K V P 			 sa_node->start,
247*36c6bac1SSatyanarayana K V P 			 pool->cpu_addr + sa_node->start,
248*36c6bac1SSatyanarayana K V P 			 sa_node->size);
249*36c6bac1SSatyanarayana K V P }
250*36c6bac1SSatyanarayana K V P 
251*36c6bac1SSatyanarayana K V P /**
252*36c6bac1SSatyanarayana K V P  * xe_mem_pool_gpu_addr() - Retrieve GPU address of memory pool.
253*36c6bac1SSatyanarayana K V P  * @pool: the memory pool
254*36c6bac1SSatyanarayana K V P  *
255*36c6bac1SSatyanarayana K V P  * Returns: GGTT address of the memory pool.
256*36c6bac1SSatyanarayana K V P  */
257*36c6bac1SSatyanarayana K V P u64 xe_mem_pool_gpu_addr(struct xe_mem_pool *pool)
258*36c6bac1SSatyanarayana K V P {
259*36c6bac1SSatyanarayana K V P 	return xe_bo_ggtt_addr(pool->bo);
260*36c6bac1SSatyanarayana K V P }
261*36c6bac1SSatyanarayana K V P 
262*36c6bac1SSatyanarayana K V P /**
263*36c6bac1SSatyanarayana K V P  * xe_mem_pool_cpu_addr() - Retrieve CPU address of manager pool.
264*36c6bac1SSatyanarayana K V P  * @pool: the memory pool
265*36c6bac1SSatyanarayana K V P  *
266*36c6bac1SSatyanarayana K V P  * Returns: CPU virtual address of memory pool.
267*36c6bac1SSatyanarayana K V P  */
268*36c6bac1SSatyanarayana K V P void *xe_mem_pool_cpu_addr(struct xe_mem_pool *pool)
269*36c6bac1SSatyanarayana K V P {
270*36c6bac1SSatyanarayana K V P 	return pool->cpu_addr;
271*36c6bac1SSatyanarayana K V P }
272*36c6bac1SSatyanarayana K V P 
273*36c6bac1SSatyanarayana K V P /**
274*36c6bac1SSatyanarayana K V P  * xe_mem_pool_bo_swap_guard() - Retrieve the mutex used to guard swap
275*36c6bac1SSatyanarayana K V P  * operations on a memory pool.
276*36c6bac1SSatyanarayana K V P  * @pool: the memory pool
277*36c6bac1SSatyanarayana K V P  *
278*36c6bac1SSatyanarayana K V P  * Returns: Swap guard mutex or NULL if shadow pool is not created.
279*36c6bac1SSatyanarayana K V P  */
280*36c6bac1SSatyanarayana K V P struct mutex *xe_mem_pool_bo_swap_guard(struct xe_mem_pool *pool)
281*36c6bac1SSatyanarayana K V P {
282*36c6bac1SSatyanarayana K V P 	if (!pool->shadow)
283*36c6bac1SSatyanarayana K V P 		return NULL;
284*36c6bac1SSatyanarayana K V P 
285*36c6bac1SSatyanarayana K V P 	return &pool->swap_guard;
286*36c6bac1SSatyanarayana K V P }
287*36c6bac1SSatyanarayana K V P 
288*36c6bac1SSatyanarayana K V P /**
289*36c6bac1SSatyanarayana K V P  * xe_mem_pool_bo_flush_write() - Copy the data from the sub-allocation
290*36c6bac1SSatyanarayana K V P  * to the GPU memory.
291*36c6bac1SSatyanarayana K V P  * @node: the node allocated in the memory pool to flush.
292*36c6bac1SSatyanarayana K V P  */
293*36c6bac1SSatyanarayana K V P void xe_mem_pool_bo_flush_write(struct xe_mem_pool_node *node)
294*36c6bac1SSatyanarayana K V P {
295*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool = node_to_pool(node);
296*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool_to_tile(pool);
297*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
298*36c6bac1SSatyanarayana K V P 	struct drm_mm_node *sa_node = &node->sa_node;
299*36c6bac1SSatyanarayana K V P 
300*36c6bac1SSatyanarayana K V P 	if (!pool->bo->vmap.is_iomem)
301*36c6bac1SSatyanarayana K V P 		return;
302*36c6bac1SSatyanarayana K V P 
303*36c6bac1SSatyanarayana K V P 	xe_map_memcpy_to(xe, &pool->bo->vmap, sa_node->start,
304*36c6bac1SSatyanarayana K V P 			 pool->cpu_addr + sa_node->start,
305*36c6bac1SSatyanarayana K V P 			 sa_node->size);
306*36c6bac1SSatyanarayana K V P }
307*36c6bac1SSatyanarayana K V P 
308*36c6bac1SSatyanarayana K V P /**
309*36c6bac1SSatyanarayana K V P  * xe_mem_pool_bo_sync_read() - Copy the data from GPU memory to the
310*36c6bac1SSatyanarayana K V P  * sub-allocation.
311*36c6bac1SSatyanarayana K V P  * @node: the node allocated in the memory pool to read back.
312*36c6bac1SSatyanarayana K V P  */
313*36c6bac1SSatyanarayana K V P void xe_mem_pool_bo_sync_read(struct xe_mem_pool_node *node)
314*36c6bac1SSatyanarayana K V P {
315*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool = node_to_pool(node);
316*36c6bac1SSatyanarayana K V P 	struct xe_tile *tile = pool_to_tile(pool);
317*36c6bac1SSatyanarayana K V P 	struct xe_device *xe = tile_to_xe(tile);
318*36c6bac1SSatyanarayana K V P 	struct drm_mm_node *sa_node = &node->sa_node;
319*36c6bac1SSatyanarayana K V P 
320*36c6bac1SSatyanarayana K V P 	if (!pool->bo->vmap.is_iomem)
321*36c6bac1SSatyanarayana K V P 		return;
322*36c6bac1SSatyanarayana K V P 
323*36c6bac1SSatyanarayana K V P 	xe_map_memcpy_from(xe, pool->cpu_addr + sa_node->start,
324*36c6bac1SSatyanarayana K V P 			   &pool->bo->vmap, sa_node->start, sa_node->size);
325*36c6bac1SSatyanarayana K V P }
326*36c6bac1SSatyanarayana K V P 
327*36c6bac1SSatyanarayana K V P /**
328*36c6bac1SSatyanarayana K V P  * xe_mem_pool_alloc_node() - Allocate a new node for use with xe_mem_pool.
329*36c6bac1SSatyanarayana K V P  *
330*36c6bac1SSatyanarayana K V P  * Returns: node structure or an ERR_PTR(-ENOMEM).
331*36c6bac1SSatyanarayana K V P  */
332*36c6bac1SSatyanarayana K V P struct xe_mem_pool_node *xe_mem_pool_alloc_node(void)
333*36c6bac1SSatyanarayana K V P {
334*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool_node *node = kzalloc_obj(*node);
335*36c6bac1SSatyanarayana K V P 
336*36c6bac1SSatyanarayana K V P 	if (!node)
337*36c6bac1SSatyanarayana K V P 		return ERR_PTR(-ENOMEM);
338*36c6bac1SSatyanarayana K V P 
339*36c6bac1SSatyanarayana K V P 	return node;
340*36c6bac1SSatyanarayana K V P }
341*36c6bac1SSatyanarayana K V P 
342*36c6bac1SSatyanarayana K V P /**
343*36c6bac1SSatyanarayana K V P  * xe_mem_pool_insert_node() - Insert a node into the memory pool.
344*36c6bac1SSatyanarayana K V P  * @pool: the memory pool to insert into
345*36c6bac1SSatyanarayana K V P  * @node: the node to insert
346*36c6bac1SSatyanarayana K V P  * @size: the size of the node to be allocated in bytes.
347*36c6bac1SSatyanarayana K V P  *
348*36c6bac1SSatyanarayana K V P  * Inserts a node into the specified memory pool using drm_mm for
349*36c6bac1SSatyanarayana K V P  * allocation.
350*36c6bac1SSatyanarayana K V P  *
351*36c6bac1SSatyanarayana K V P  * Returns: 0 on success or a negative error code on failure.
352*36c6bac1SSatyanarayana K V P  */
353*36c6bac1SSatyanarayana K V P int xe_mem_pool_insert_node(struct xe_mem_pool *pool,
354*36c6bac1SSatyanarayana K V P 			    struct xe_mem_pool_node *node, u32 size)
355*36c6bac1SSatyanarayana K V P {
356*36c6bac1SSatyanarayana K V P 	if (!pool)
357*36c6bac1SSatyanarayana K V P 		return -EINVAL;
358*36c6bac1SSatyanarayana K V P 
359*36c6bac1SSatyanarayana K V P 	return drm_mm_insert_node(&pool->base, &node->sa_node, size);
360*36c6bac1SSatyanarayana K V P }
361*36c6bac1SSatyanarayana K V P 
362*36c6bac1SSatyanarayana K V P /**
363*36c6bac1SSatyanarayana K V P  * xe_mem_pool_free_node() - Free a node allocated from the memory pool.
364*36c6bac1SSatyanarayana K V P  * @node: the node to free
365*36c6bac1SSatyanarayana K V P  *
366*36c6bac1SSatyanarayana K V P  * Returns: None.
367*36c6bac1SSatyanarayana K V P  */
368*36c6bac1SSatyanarayana K V P void xe_mem_pool_free_node(struct xe_mem_pool_node *node)
369*36c6bac1SSatyanarayana K V P {
370*36c6bac1SSatyanarayana K V P 	if (!node)
371*36c6bac1SSatyanarayana K V P 		return;
372*36c6bac1SSatyanarayana K V P 
373*36c6bac1SSatyanarayana K V P 	drm_mm_remove_node(&node->sa_node);
374*36c6bac1SSatyanarayana K V P 	kfree(node);
375*36c6bac1SSatyanarayana K V P }
376*36c6bac1SSatyanarayana K V P 
377*36c6bac1SSatyanarayana K V P /**
378*36c6bac1SSatyanarayana K V P  * xe_mem_pool_node_cpu_addr() - Retrieve CPU address of the node.
379*36c6bac1SSatyanarayana K V P  * @node: the node allocated in the memory pool
380*36c6bac1SSatyanarayana K V P  *
381*36c6bac1SSatyanarayana K V P  * Returns: CPU virtual address of the node.
382*36c6bac1SSatyanarayana K V P  */
383*36c6bac1SSatyanarayana K V P void *xe_mem_pool_node_cpu_addr(struct xe_mem_pool_node *node)
384*36c6bac1SSatyanarayana K V P {
385*36c6bac1SSatyanarayana K V P 	struct xe_mem_pool *pool = node_to_pool(node);
386*36c6bac1SSatyanarayana K V P 
387*36c6bac1SSatyanarayana K V P 	return xe_mem_pool_cpu_addr(pool) + node->sa_node.start;
388*36c6bac1SSatyanarayana K V P }
389*36c6bac1SSatyanarayana K V P 
390*36c6bac1SSatyanarayana K V P /**
391*36c6bac1SSatyanarayana K V P  * xe_mem_pool_dump() - Dump the state of the DRM MM manager for debugging.
392*36c6bac1SSatyanarayana K V P  * @pool: the memory pool info be dumped.
393*36c6bac1SSatyanarayana K V P  * @p: The DRM printer to use for output.
394*36c6bac1SSatyanarayana K V P  *
395*36c6bac1SSatyanarayana K V P  * Only the drm managed region is dumped, not the state of the BOs or any other
396*36c6bac1SSatyanarayana K V P  * pool information.
397*36c6bac1SSatyanarayana K V P  *
398*36c6bac1SSatyanarayana K V P  * Returns: None.
399*36c6bac1SSatyanarayana K V P  */
400*36c6bac1SSatyanarayana K V P void xe_mem_pool_dump(struct xe_mem_pool *pool, struct drm_printer *p)
401*36c6bac1SSatyanarayana K V P {
402*36c6bac1SSatyanarayana K V P 	drm_mm_print(&pool->base, p);
403*36c6bac1SSatyanarayana K V P }
404