xref: /linux/include/linux/dma-fence-unwrap.h (revision e65e175b07bef5974045cc42238de99057669ca7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2022 Advanced Micro Devices, Inc.
4  * Authors:
5  *	Christian König <christian.koenig@amd.com>
6  */
7 
8 #ifndef __LINUX_DMA_FENCE_UNWRAP_H
9 #define __LINUX_DMA_FENCE_UNWRAP_H
10 
11 struct dma_fence;
12 
13 /**
14  * struct dma_fence_unwrap - cursor into the container structure
15  *
16  * Should be used with dma_fence_unwrap_for_each() iterator macro.
17  */
18 struct dma_fence_unwrap {
19 	/**
20 	 * @chain: potential dma_fence_chain, but can be other fence as well
21 	 */
22 	struct dma_fence *chain;
23 	/**
24 	 * @array: potential dma_fence_array, but can be other fence as well
25 	 */
26 	struct dma_fence *array;
27 	/**
28 	 * @index: last returned index if @array is really a dma_fence_array
29 	 */
30 	unsigned int index;
31 };
32 
33 struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
34 					 struct dma_fence_unwrap *cursor);
35 struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
36 
37 /**
38  * dma_fence_unwrap_for_each - iterate over all fences in containers
39  * @fence: current fence
40  * @cursor: current position inside the containers
41  * @head: starting point for the iterator
42  *
43  * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
44  * potential fences in them. If @head is just a normal fence only that one is
45  * returned.
46  */
47 #define dma_fence_unwrap_for_each(fence, cursor, head)			\
48 	for (fence = dma_fence_unwrap_first(head, cursor); fence;	\
49 	     fence = dma_fence_unwrap_next(cursor))
50 
51 struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
52 					   struct dma_fence **fences,
53 					   struct dma_fence_unwrap *cursors);
54 
55 /**
56  * dma_fence_unwrap_merge - unwrap and merge fences
57  *
58  * All fences given as parameters are unwrapped and merged back together as flat
59  * dma_fence_array. Useful if multiple containers need to be merged together.
60  *
61  * Implemented as a macro to allocate the necessary arrays on the stack and
62  * account the stack frame size to the caller.
63  *
64  * Returns NULL on memory allocation failure, a dma_fence object representing
65  * all the given fences otherwise.
66  */
67 #define dma_fence_unwrap_merge(...)					\
68 	({								\
69 		struct dma_fence *__f[] = { __VA_ARGS__ };		\
70 		struct dma_fence_unwrap __c[ARRAY_SIZE(__f)];		\
71 									\
72 		__dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c);	\
73 	})
74 
75 #endif
76