xref: /linux/include/uapi/linux/dma-buf.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1e2be04c7SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2c11e391dSDaniel Vetter /*
3c11e391dSDaniel Vetter  * Framework for buffer objects that can be shared across devices/subsystems.
4c11e391dSDaniel Vetter  *
5c11e391dSDaniel Vetter  * Copyright(C) 2015 Intel Ltd
6c11e391dSDaniel Vetter  *
7c11e391dSDaniel Vetter  * This program is free software; you can redistribute it and/or modify it
8c11e391dSDaniel Vetter  * under the terms of the GNU General Public License version 2 as published by
9c11e391dSDaniel Vetter  * the Free Software Foundation.
10c11e391dSDaniel Vetter  *
11c11e391dSDaniel Vetter  * This program is distributed in the hope that it will be useful, but WITHOUT
12c11e391dSDaniel Vetter  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13c11e391dSDaniel Vetter  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14c11e391dSDaniel Vetter  * more details.
15c11e391dSDaniel Vetter  *
16c11e391dSDaniel Vetter  * You should have received a copy of the GNU General Public License along with
17c11e391dSDaniel Vetter  * this program.  If not, see <http://www.gnu.org/licenses/>.
18c11e391dSDaniel Vetter  */
19c11e391dSDaniel Vetter 
20c11e391dSDaniel Vetter #ifndef _DMA_BUF_UAPI_H_
21c11e391dSDaniel Vetter #define _DMA_BUF_UAPI_H_
22c11e391dSDaniel Vetter 
23c11e391dSDaniel Vetter #include <linux/types.h>
24c11e391dSDaniel Vetter 
2551f52547SJason Ekstrand /**
2651f52547SJason Ekstrand  * struct dma_buf_sync - Synchronize with CPU access.
2751f52547SJason Ekstrand  *
2851f52547SJason Ekstrand  * When a DMA buffer is accessed from the CPU via mmap, it is not always
2951f52547SJason Ekstrand  * possible to guarantee coherency between the CPU-visible map and underlying
3051f52547SJason Ekstrand  * memory.  To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
3151f52547SJason Ekstrand  * any CPU access to give the kernel the chance to shuffle memory around if
3251f52547SJason Ekstrand  * needed.
3351f52547SJason Ekstrand  *
3451f52547SJason Ekstrand  * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
3551f52547SJason Ekstrand  * with DMA_BUF_SYNC_START and the appropriate read/write flags.  Once the
3651f52547SJason Ekstrand  * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
3751f52547SJason Ekstrand  * DMA_BUF_SYNC_END and the same read/write flags.
3851f52547SJason Ekstrand  *
3951f52547SJason Ekstrand  * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
4051f52547SJason Ekstrand  * coherency.  It does not prevent other processes or devices from
4151f52547SJason Ekstrand  * accessing the memory at the same time.  If synchronization with a GPU or
4251f52547SJason Ekstrand  * other device driver is required, it is the client's responsibility to
4351f52547SJason Ekstrand  * wait for buffer to be ready for reading or writing before calling this
4451f52547SJason Ekstrand  * ioctl with DMA_BUF_SYNC_START.  Likewise, the client must ensure that
4551f52547SJason Ekstrand  * follow-up work is not submitted to GPU or other device driver until
4651f52547SJason Ekstrand  * after this ioctl has been called with DMA_BUF_SYNC_END?
4751f52547SJason Ekstrand  *
4851f52547SJason Ekstrand  * If the driver or API with which the client is interacting uses implicit
4951f52547SJason Ekstrand  * synchronization, waiting for prior work to complete can be done via
5051f52547SJason Ekstrand  * poll() on the DMA buffer file descriptor.  If the driver or API requires
5151f52547SJason Ekstrand  * explicit synchronization, the client may have to wait on a sync_file or
5251f52547SJason Ekstrand  * other synchronization primitive outside the scope of the DMA buffer API.
5351f52547SJason Ekstrand  */
54c11e391dSDaniel Vetter struct dma_buf_sync {
5551f52547SJason Ekstrand 	/**
5651f52547SJason Ekstrand 	 * @flags: Set of access flags
5751f52547SJason Ekstrand 	 *
5851f52547SJason Ekstrand 	 * DMA_BUF_SYNC_START:
5951f52547SJason Ekstrand 	 *     Indicates the start of a map access session.
6051f52547SJason Ekstrand 	 *
6151f52547SJason Ekstrand 	 * DMA_BUF_SYNC_END:
6251f52547SJason Ekstrand 	 *     Indicates the end of a map access session.
6351f52547SJason Ekstrand 	 *
6451f52547SJason Ekstrand 	 * DMA_BUF_SYNC_READ:
6551f52547SJason Ekstrand 	 *     Indicates that the mapped DMA buffer will be read by the
6651f52547SJason Ekstrand 	 *     client via the CPU map.
6751f52547SJason Ekstrand 	 *
6851f52547SJason Ekstrand 	 * DMA_BUF_SYNC_WRITE:
6951f52547SJason Ekstrand 	 *     Indicates that the mapped DMA buffer will be written by the
7051f52547SJason Ekstrand 	 *     client via the CPU map.
7151f52547SJason Ekstrand 	 *
7251f52547SJason Ekstrand 	 * DMA_BUF_SYNC_RW:
7351f52547SJason Ekstrand 	 *     An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
7451f52547SJason Ekstrand 	 */
75c11e391dSDaniel Vetter 	__u64 flags;
76c11e391dSDaniel Vetter };
77c11e391dSDaniel Vetter 
78c11e391dSDaniel Vetter #define DMA_BUF_SYNC_READ      (1 << 0)
79c11e391dSDaniel Vetter #define DMA_BUF_SYNC_WRITE     (2 << 0)
80c11e391dSDaniel Vetter #define DMA_BUF_SYNC_RW        (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
81c11e391dSDaniel Vetter #define DMA_BUF_SYNC_START     (0 << 2)
82c11e391dSDaniel Vetter #define DMA_BUF_SYNC_END       (1 << 2)
83c11e391dSDaniel Vetter #define DMA_BUF_SYNC_VALID_FLAGS_MASK \
84c11e391dSDaniel Vetter 	(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
85c11e391dSDaniel Vetter 
86bb2bb903SGreg Hackmann #define DMA_BUF_NAME_LEN	32
87bb2bb903SGreg Hackmann 
8820e10881SJason Ekstrand /**
8920e10881SJason Ekstrand  * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
9020e10881SJason Ekstrand  *
9120e10881SJason Ekstrand  * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
9220e10881SJason Ekstrand  * current set of fences on a dma-buf file descriptor as a sync_file.  CPU
9320e10881SJason Ekstrand  * waits via poll() or other driver-specific mechanisms typically wait on
9420e10881SJason Ekstrand  * whatever fences are on the dma-buf at the time the wait begins.  This
9520e10881SJason Ekstrand  * is similar except that it takes a snapshot of the current fences on the
9620e10881SJason Ekstrand  * dma-buf for waiting later instead of waiting immediately.  This is
9720e10881SJason Ekstrand  * useful for modern graphics APIs such as Vulkan which assume an explicit
9820e10881SJason Ekstrand  * synchronization model but still need to inter-operate with dma-buf.
99*59474049SJason Ekstrand  *
100*59474049SJason Ekstrand  * The intended usage pattern is the following:
101*59474049SJason Ekstrand  *
102*59474049SJason Ekstrand  *  1. Export a sync_file with flags corresponding to the expected GPU usage
103*59474049SJason Ekstrand  *     via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
104*59474049SJason Ekstrand  *
105*59474049SJason Ekstrand  *  2. Submit rendering work which uses the dma-buf.  The work should wait on
106*59474049SJason Ekstrand  *     the exported sync file before rendering and produce another sync_file
107*59474049SJason Ekstrand  *     when complete.
108*59474049SJason Ekstrand  *
109*59474049SJason Ekstrand  *  3. Import the rendering-complete sync_file into the dma-buf with flags
110*59474049SJason Ekstrand  *     corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
111*59474049SJason Ekstrand  *
112*59474049SJason Ekstrand  * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
113*59474049SJason Ekstrand  * the above is not a single atomic operation.  If userspace wants to ensure
114*59474049SJason Ekstrand  * ordering via these fences, it is the respnosibility of userspace to use
115*59474049SJason Ekstrand  * locks or other mechanisms to ensure that no other context adds fences or
116*59474049SJason Ekstrand  * submits work between steps 1 and 3 above.
11720e10881SJason Ekstrand  */
11820e10881SJason Ekstrand struct dma_buf_export_sync_file {
11920e10881SJason Ekstrand 	/**
12020e10881SJason Ekstrand 	 * @flags: Read/write flags
12120e10881SJason Ekstrand 	 *
12220e10881SJason Ekstrand 	 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
12320e10881SJason Ekstrand 	 *
12420e10881SJason Ekstrand 	 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
12520e10881SJason Ekstrand 	 * the returned sync file waits on any writers of the dma-buf to
12620e10881SJason Ekstrand 	 * complete.  Waiting on the returned sync file is equivalent to
12720e10881SJason Ekstrand 	 * poll() with POLLIN.
12820e10881SJason Ekstrand 	 *
12920e10881SJason Ekstrand 	 * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
13020e10881SJason Ekstrand 	 * any users of the dma-buf (read or write) to complete.  Waiting
13120e10881SJason Ekstrand 	 * on the returned sync file is equivalent to poll() with POLLOUT.
13220e10881SJason Ekstrand 	 * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
13320e10881SJason Ekstrand 	 * is equivalent to just DMA_BUF_SYNC_WRITE.
13420e10881SJason Ekstrand 	 */
13520e10881SJason Ekstrand 	__u32 flags;
13620e10881SJason Ekstrand 	/** @fd: Returned sync file descriptor */
13720e10881SJason Ekstrand 	__s32 fd;
13820e10881SJason Ekstrand };
13920e10881SJason Ekstrand 
140*59474049SJason Ekstrand /**
141*59474049SJason Ekstrand  * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
142*59474049SJason Ekstrand  *
143*59474049SJason Ekstrand  * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
144*59474049SJason Ekstrand  * sync_file into a dma-buf for the purposes of implicit synchronization
145*59474049SJason Ekstrand  * with other dma-buf consumers.  This allows clients using explicitly
146*59474049SJason Ekstrand  * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
147*59474049SJason Ekstrand  * which expect implicit synchronization such as OpenGL or most media
148*59474049SJason Ekstrand  * drivers/video.
149*59474049SJason Ekstrand  */
150*59474049SJason Ekstrand struct dma_buf_import_sync_file {
151*59474049SJason Ekstrand 	/**
152*59474049SJason Ekstrand 	 * @flags: Read/write flags
153*59474049SJason Ekstrand 	 *
154*59474049SJason Ekstrand 	 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
155*59474049SJason Ekstrand 	 *
156*59474049SJason Ekstrand 	 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
157*59474049SJason Ekstrand 	 * this inserts the sync_file as a read-only fence.  Any subsequent
158*59474049SJason Ekstrand 	 * implicitly synchronized writes to this dma-buf will wait on this
159*59474049SJason Ekstrand 	 * fence but reads will not.
160*59474049SJason Ekstrand 	 *
161*59474049SJason Ekstrand 	 * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
162*59474049SJason Ekstrand 	 * write fence.  All subsequent implicitly synchronized access to
163*59474049SJason Ekstrand 	 * this dma-buf will wait on this fence.
164*59474049SJason Ekstrand 	 */
165*59474049SJason Ekstrand 	__u32 flags;
166*59474049SJason Ekstrand 	/** @fd: Sync file descriptor */
167*59474049SJason Ekstrand 	__s32 fd;
168*59474049SJason Ekstrand };
169*59474049SJason Ekstrand 
170c11e391dSDaniel Vetter #define DMA_BUF_BASE		'b'
171c11e391dSDaniel Vetter #define DMA_BUF_IOCTL_SYNC	_IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
172a5bff92eSDaniel Vetter 
173a5bff92eSDaniel Vetter /* 32/64bitness of this uapi was botched in android, there's no difference
174a5bff92eSDaniel Vetter  * between them in actual uapi, they're just different numbers.
175a5bff92eSDaniel Vetter  */
176bb2bb903SGreg Hackmann #define DMA_BUF_SET_NAME	_IOW(DMA_BUF_BASE, 1, const char *)
1777c3e9fcaSJérôme Pouiller #define DMA_BUF_SET_NAME_A	_IOW(DMA_BUF_BASE, 1, __u32)
1787c3e9fcaSJérôme Pouiller #define DMA_BUF_SET_NAME_B	_IOW(DMA_BUF_BASE, 1, __u64)
17920e10881SJason Ekstrand #define DMA_BUF_IOCTL_EXPORT_SYNC_FILE	_IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
180*59474049SJason Ekstrand #define DMA_BUF_IOCTL_IMPORT_SYNC_FILE	_IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
181c11e391dSDaniel Vetter 
182c11e391dSDaniel Vetter #endif
183