Lines Matching +full:sub +full:- +full:node
1 // SPDX-License-Identifier: MIT
19 * struct xe_mem_pool - DRM MM pool for sub-allocating memory from a BO on an
22 * The XE memory pool is a DRM MM manager that provides sub-allocation of memory
38 /** @bo: Active pool BO (GGTT-pinned, CPU-mapped). */
50 static struct xe_mem_pool *node_to_pool(struct xe_mem_pool_node *node) in node_to_pool() argument
52 return container_of(node->sa_node.mm, struct xe_mem_pool, base); in node_to_pool()
57 return pool->bo->tile; in pool_to_tile()
64 if (pool->is_iomem) in fini_pool_action()
65 kvfree(pool->cpu_addr); in fini_pool_action()
67 drm_mm_takedown(&pool->base); in fini_pool_action()
72 struct xe_tile *tile = pool->bo->tile; in pool_shadow_init()
77 xe_assert(xe, !pool->shadow); in pool_shadow_init()
79 ret = drmm_mutex_init(&xe->drm, &pool->swap_guard); in pool_shadow_init()
85 might_lock(&pool->swap_guard); in pool_shadow_init()
89 xe_bo_size(pool->bo), in pool_shadow_init()
97 pool->shadow = shadow; in pool_shadow_init()
103 * xe_mem_pool_init() - Initialize memory pool.
107 * sub-allocated, in bytes.
110 * Initializes a memory pool for sub-allocating memory from a backing BO on the
127 managed_size = size - guard; in xe_mem_pool_init()
129 pool = drmm_kzalloc(&xe->drm, sizeof(*pool), GFP_KERNEL); in xe_mem_pool_init()
131 return ERR_PTR(-ENOMEM); in xe_mem_pool_init()
143 pool->bo = bo; in xe_mem_pool_init()
144 pool->is_iomem = bo->vmap.is_iomem; in xe_mem_pool_init()
146 if (pool->is_iomem) { in xe_mem_pool_init()
147 pool->cpu_addr = kvzalloc(size, GFP_KERNEL); in xe_mem_pool_init()
148 if (!pool->cpu_addr) in xe_mem_pool_init()
149 return ERR_PTR(-ENOMEM); in xe_mem_pool_init()
151 pool->cpu_addr = bo->vmap.vaddr; in xe_mem_pool_init()
161 drm_mm_init(&pool->base, 0, managed_size); in xe_mem_pool_init()
162 ret = drmm_add_action_or_reset(&xe->drm, fini_pool_action, pool); in xe_mem_pool_init()
172 if (bo->vmap.is_iomem) in xe_mem_pool_init()
173 kvfree(pool->cpu_addr); in xe_mem_pool_init()
178 * xe_mem_pool_sync() - Copy the entire contents of the main pool to shadow pool.
185 * shadow pool with the primary pool on a node basis using
195 xe_tile_assert(tile, pool->shadow); in xe_mem_pool_sync()
197 xe_map_memcpy_to(xe, &pool->shadow->vmap, 0, in xe_mem_pool_sync()
198 pool->cpu_addr, xe_bo_size(pool->bo)); in xe_mem_pool_sync()
202 * xe_mem_pool_swap_shadow_locked() - Swap the primary BO with the shadow BO.
217 xe_tile_assert(tile, pool->shadow); in xe_mem_pool_swap_shadow_locked()
218 lockdep_assert_held(&pool->swap_guard); in xe_mem_pool_swap_shadow_locked()
220 swap(pool->bo, pool->shadow); in xe_mem_pool_swap_shadow_locked()
221 if (!pool->bo->vmap.is_iomem) in xe_mem_pool_swap_shadow_locked()
222 pool->cpu_addr = pool->bo->vmap.vaddr; in xe_mem_pool_swap_shadow_locked()
226 * xe_mem_pool_sync_shadow_locked() - Copy node from primary pool to shadow pool.
227 * @node: the node allocated in the memory pool.
235 void xe_mem_pool_sync_shadow_locked(struct xe_mem_pool_node *node) in xe_mem_pool_sync_shadow_locked() argument
237 struct xe_mem_pool *pool = node_to_pool(node); in xe_mem_pool_sync_shadow_locked()
240 struct drm_mm_node *sa_node = &node->sa_node; in xe_mem_pool_sync_shadow_locked()
242 xe_tile_assert(tile, pool->shadow); in xe_mem_pool_sync_shadow_locked()
243 lockdep_assert_held(&pool->swap_guard); in xe_mem_pool_sync_shadow_locked()
245 xe_map_memcpy_to(xe, &pool->shadow->vmap, in xe_mem_pool_sync_shadow_locked()
246 sa_node->start, in xe_mem_pool_sync_shadow_locked()
247 pool->cpu_addr + sa_node->start, in xe_mem_pool_sync_shadow_locked()
248 sa_node->size); in xe_mem_pool_sync_shadow_locked()
252 * xe_mem_pool_gpu_addr() - Retrieve GPU address of memory pool.
259 return xe_bo_ggtt_addr(pool->bo); in xe_mem_pool_gpu_addr()
263 * xe_mem_pool_cpu_addr() - Retrieve CPU address of manager pool.
270 return pool->cpu_addr; in xe_mem_pool_cpu_addr()
274 * xe_mem_pool_bo_swap_guard() - Retrieve the mutex used to guard swap
282 if (!pool->shadow) in xe_mem_pool_bo_swap_guard()
285 return &pool->swap_guard; in xe_mem_pool_bo_swap_guard()
289 * xe_mem_pool_bo_flush_write() - Copy the data from the sub-allocation
291 * @node: the node allocated in the memory pool to flush.
293 void xe_mem_pool_bo_flush_write(struct xe_mem_pool_node *node) in xe_mem_pool_bo_flush_write() argument
295 struct xe_mem_pool *pool = node_to_pool(node); in xe_mem_pool_bo_flush_write()
298 struct drm_mm_node *sa_node = &node->sa_node; in xe_mem_pool_bo_flush_write()
300 if (!pool->bo->vmap.is_iomem) in xe_mem_pool_bo_flush_write()
303 xe_map_memcpy_to(xe, &pool->bo->vmap, sa_node->start, in xe_mem_pool_bo_flush_write()
304 pool->cpu_addr + sa_node->start, in xe_mem_pool_bo_flush_write()
305 sa_node->size); in xe_mem_pool_bo_flush_write()
309 * xe_mem_pool_bo_sync_read() - Copy the data from GPU memory to the
310 * sub-allocation.
311 * @node: the node allocated in the memory pool to read back.
313 void xe_mem_pool_bo_sync_read(struct xe_mem_pool_node *node) in xe_mem_pool_bo_sync_read() argument
315 struct xe_mem_pool *pool = node_to_pool(node); in xe_mem_pool_bo_sync_read()
318 struct drm_mm_node *sa_node = &node->sa_node; in xe_mem_pool_bo_sync_read()
320 if (!pool->bo->vmap.is_iomem) in xe_mem_pool_bo_sync_read()
323 xe_map_memcpy_from(xe, pool->cpu_addr + sa_node->start, in xe_mem_pool_bo_sync_read()
324 &pool->bo->vmap, sa_node->start, sa_node->size); in xe_mem_pool_bo_sync_read()
328 * xe_mem_pool_alloc_node() - Allocate a new node for use with xe_mem_pool.
330 * Returns: node structure or an ERR_PTR(-ENOMEM).
334 struct xe_mem_pool_node *node = kzalloc_obj(*node); in xe_mem_pool_alloc_node() local
336 if (!node) in xe_mem_pool_alloc_node()
337 return ERR_PTR(-ENOMEM); in xe_mem_pool_alloc_node()
339 return node; in xe_mem_pool_alloc_node()
343 * xe_mem_pool_insert_node() - Insert a node into the memory pool.
345 * @node: the node to insert
346 * @size: the size of the node to be allocated in bytes.
348 * Inserts a node into the specified memory pool using drm_mm for
354 struct xe_mem_pool_node *node, u32 size) in xe_mem_pool_insert_node() argument
357 return -EINVAL; in xe_mem_pool_insert_node()
359 return drm_mm_insert_node(&pool->base, &node->sa_node, size); in xe_mem_pool_insert_node()
363 * xe_mem_pool_free_node() - Free a node allocated from the memory pool.
364 * @node: the node to free
368 void xe_mem_pool_free_node(struct xe_mem_pool_node *node) in xe_mem_pool_free_node() argument
370 if (!node) in xe_mem_pool_free_node()
373 drm_mm_remove_node(&node->sa_node); in xe_mem_pool_free_node()
374 kfree(node); in xe_mem_pool_free_node()
378 * xe_mem_pool_node_cpu_addr() - Retrieve CPU address of the node.
379 * @node: the node allocated in the memory pool
381 * Returns: CPU virtual address of the node.
383 void *xe_mem_pool_node_cpu_addr(struct xe_mem_pool_node *node) in xe_mem_pool_node_cpu_addr() argument
385 struct xe_mem_pool *pool = node_to_pool(node); in xe_mem_pool_node_cpu_addr()
387 return xe_mem_pool_cpu_addr(pool) + node->sa_node.start; in xe_mem_pool_node_cpu_addr()
391 * xe_mem_pool_dump() - Dump the state of the DRM MM manager for debugging.
402 drm_mm_print(&pool->base, p); in xe_mem_pool_dump()