xref: /linux/fs/ceph/ioctl.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef FS_CEPH_IOCTL_H
3 #define FS_CEPH_IOCTL_H
4 
5 #include <linux/ioctl.h>
6 #include <linux/types.h>
7 
8 #define CEPH_IOCTL_MAGIC 0x97
9 
10 /*
11  * CEPH_IOC_GET_LAYOUT - get file layout or dir layout policy
12  * CEPH_IOC_SET_LAYOUT - set file layout
13  * CEPH_IOC_SET_LAYOUT_POLICY - set dir layout policy
14  *
15  * The file layout specifies how file data is striped over objects in
16  * the distributed object store, which object pool they belong to (if
17  * it differs from the default), and an optional 'preferred osd' to
18  * store them on.
19  *
20  * Files get a new layout based on the policy set on the containing
21  * directory or one of its ancestors.  The GET_LAYOUT ioctl will let
22  * you examine the layout for a file or the policy on a directory.
23  *
24  * SET_LAYOUT will let you set a layout on a newly created file.  This
25  * only works immediately after the file is created and before any
26  * data is written to it.
27  *
28  * SET_LAYOUT_POLICY will let you set a layout policy (default layout)
29  * on a directory that will apply to any new files created in that
30  * directory (or any child directory that doesn't specify a layout of
31  * its own).
32  */
33 
34 /* use u64 to align sanely on all archs */
35 struct ceph_ioctl_layout {
36 	__u64 stripe_unit, stripe_count, object_size;
37 	__u64 data_pool;
38 
39 	/* obsolete.  new values ignored, always return -1 */
40 	__s64 preferred_osd;
41 };
42 
43 #define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1,		\
44 				   struct ceph_ioctl_layout)
45 #define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2,		\
46 				   struct ceph_ioctl_layout)
47 #define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5,	\
48 				   struct ceph_ioctl_layout)
49 
50 /*
51  * CEPH_IOC_GET_DATALOC - get location of file data in the cluster
52  *
53  * Extract identity, address of the OSD and object storing a given
54  * file offset.
55  */
56 struct ceph_ioctl_dataloc {
57 	__u64 file_offset;           /* in+out: file offset */
58 	__u64 object_offset;         /* out: offset in object */
59 	__u64 object_no;             /* out: object # */
60 	__u64 object_size;           /* out: object size */
61 	char object_name[64];        /* out: object name */
62 	__u64 block_offset;          /* out: offset in block */
63 	__u64 block_size;            /* out: block length */
64 	__s64 osd;                   /* out: osd # */
65 	struct sockaddr_storage osd_addr; /* out: osd address */
66 };
67 
68 #define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3,	\
69 				   struct ceph_ioctl_dataloc)
70 
71 /*
72  * CEPH_IOC_LAZYIO - relax consistency
73  *
74  * Normally Ceph switches to synchronous IO when multiple clients have
75  * the file open (and or more for write).  Reads and writes bypass the
76  * page cache and go directly to the OSD.  Setting this flag on a file
77  * descriptor will allow buffered IO for this file in cases where the
78  * application knows it won't interfere with other nodes (or doesn't
79  * care).
80  */
81 #define CEPH_IOC_LAZYIO _IO(CEPH_IOCTL_MAGIC, 4)
82 
83 /*
84  * CEPH_IOC_SYNCIO - force synchronous IO
85  *
86  * This ioctl sets a file flag that forces the synchronous IO that
87  * bypasses the page cache, even if it is not necessary.  This is
88  * essentially the opposite behavior of IOC_LAZYIO.  This forces the
89  * same read/write path as a file opened by multiple clients when one
90  * or more of those clients is opened for write.
91  *
92  * Note that this type of sync IO takes a different path than a file
93  * opened with O_SYNC/D_SYNC (writes hit the page cache and are
94  * immediately flushed on page boundaries).  It is very similar to
95  * O_DIRECT (writes bypass the page cache) excep that O_DIRECT writes
96  * are not copied (user page must remain stable) and O_DIRECT writes
97  * have alignment restrictions (on the buffer and file offset).
98  */
99 #define CEPH_IOC_SYNCIO _IO(CEPH_IOCTL_MAGIC, 5)
100 
101 #endif
102