xref: /linux/fs/ceph/locks.c (revision 00a6d7b6762c27d441e9ac8faff36384bc0fc180)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/file.h>
4 #include <linux/namei.h>
5 #include <linux/random.h>
6 
7 #include "super.h"
8 #include "mds_client.h"
9 #include <linux/ceph/pagelist.h>
10 
11 static u64 lock_secret;
12 
13 static inline u64 secure_addr(void *addr)
14 {
15 	u64 v = lock_secret ^ (u64)(unsigned long)addr;
16 	/*
17 	 * Set the most significant bit, so that MDS knows the 'owner'
18 	 * is sufficient to identify the owner of lock. (old code uses
19 	 * both 'owner' and 'pid')
20 	 */
21 	v |= (1ULL << 63);
22 	return v;
23 }
24 
25 void __init ceph_flock_init(void)
26 {
27 	get_random_bytes(&lock_secret, sizeof(lock_secret));
28 }
29 
30 /**
31  * Implement fcntl and flock locking functions.
32  */
33 static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
34 			     int cmd, u8 wait, struct file_lock *fl)
35 {
36 	struct inode *inode = file_inode(file);
37 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
38 	struct ceph_mds_request *req;
39 	int err;
40 	u64 length = 0;
41 	u64 owner;
42 
43 	req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
44 	if (IS_ERR(req))
45 		return PTR_ERR(req);
46 	req->r_inode = inode;
47 	ihold(inode);
48 	req->r_num_caps = 1;
49 
50 	/* mds requires start and length rather than start and end */
51 	if (LLONG_MAX == fl->fl_end)
52 		length = 0;
53 	else
54 		length = fl->fl_end - fl->fl_start + 1;
55 
56 	if (lock_type == CEPH_LOCK_FCNTL)
57 		owner = secure_addr(fl->fl_owner);
58 	else
59 		owner = secure_addr(fl->fl_file);
60 
61 	dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
62 	     "start: %llu, length: %llu, wait: %d, type: %d", (int)lock_type,
63 	     (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
64 	     wait, fl->fl_type);
65 
66 	req->r_args.filelock_change.rule = lock_type;
67 	req->r_args.filelock_change.type = cmd;
68 	req->r_args.filelock_change.owner = cpu_to_le64(owner);
69 	req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
70 	req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
71 	req->r_args.filelock_change.length = cpu_to_le64(length);
72 	req->r_args.filelock_change.wait = wait;
73 
74 	err = ceph_mdsc_do_request(mdsc, inode, req);
75 
76 	if (operation == CEPH_MDS_OP_GETFILELOCK) {
77 		fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid);
78 		if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
79 			fl->fl_type = F_RDLCK;
80 		else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
81 			fl->fl_type = F_WRLCK;
82 		else
83 			fl->fl_type = F_UNLCK;
84 
85 		fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
86 		length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
87 						 le64_to_cpu(req->r_reply_info.filelock_reply->length);
88 		if (length >= 1)
89 			fl->fl_end = length -1;
90 		else
91 			fl->fl_end = 0;
92 
93 	}
94 	ceph_mdsc_put_request(req);
95 	dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
96 	     "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
97 	     (int)operation, (u64)fl->fl_pid, fl->fl_start,
98 	     length, wait, fl->fl_type, err);
99 	return err;
100 }
101 
102 /**
103  * Attempt to set an fcntl lock.
104  * For now, this just goes away to the server. Later it may be more awesome.
105  */
106 int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
107 {
108 	u8 lock_cmd;
109 	int err;
110 	u8 wait = 0;
111 	u16 op = CEPH_MDS_OP_SETFILELOCK;
112 
113 	if (!(fl->fl_flags & FL_POSIX))
114 		return -ENOLCK;
115 	/* No mandatory locks */
116 	if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
117 		return -ENOLCK;
118 
119 	dout("ceph_lock, fl_owner: %p", fl->fl_owner);
120 
121 	/* set wait bit as appropriate, then make command as Ceph expects it*/
122 	if (IS_GETLK(cmd))
123 		op = CEPH_MDS_OP_GETFILELOCK;
124 	else if (IS_SETLKW(cmd))
125 		wait = 1;
126 
127 	if (F_RDLCK == fl->fl_type)
128 		lock_cmd = CEPH_LOCK_SHARED;
129 	else if (F_WRLCK == fl->fl_type)
130 		lock_cmd = CEPH_LOCK_EXCL;
131 	else
132 		lock_cmd = CEPH_LOCK_UNLOCK;
133 
134 	err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl);
135 	if (!err) {
136 		if (op != CEPH_MDS_OP_GETFILELOCK) {
137 			dout("mds locked, locking locally");
138 			err = posix_lock_file(file, fl, NULL);
139 			if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
140 				/* undo! This should only happen if
141 				 * the kernel detects local
142 				 * deadlock. */
143 				ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
144 						  CEPH_LOCK_UNLOCK, 0, fl);
145 				dout("got %d on posix_lock_file, undid lock",
146 				     err);
147 			}
148 		}
149 
150 	} else if (err == -ERESTARTSYS) {
151 		dout("undoing lock\n");
152 		ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
153 				  CEPH_LOCK_UNLOCK, 0, fl);
154 	}
155 	return err;
156 }
157 
158 int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
159 {
160 	u8 lock_cmd;
161 	int err;
162 	u8 wait = 0;
163 
164 	if (!(fl->fl_flags & FL_FLOCK))
165 		return -ENOLCK;
166 	/* No mandatory locks */
167 	if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
168 		return -ENOLCK;
169 
170 	dout("ceph_flock, fl_file: %p", fl->fl_file);
171 
172 	if (IS_SETLKW(cmd))
173 		wait = 1;
174 
175 	if (F_RDLCK == fl->fl_type)
176 		lock_cmd = CEPH_LOCK_SHARED;
177 	else if (F_WRLCK == fl->fl_type)
178 		lock_cmd = CEPH_LOCK_EXCL;
179 	else
180 		lock_cmd = CEPH_LOCK_UNLOCK;
181 
182 	err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
183 				file, lock_cmd, wait, fl);
184 	if (!err) {
185 		err = flock_lock_file_wait(file, fl);
186 		if (err) {
187 			ceph_lock_message(CEPH_LOCK_FLOCK,
188 					  CEPH_MDS_OP_SETFILELOCK,
189 					  file, CEPH_LOCK_UNLOCK, 0, fl);
190 			dout("got %d on flock_lock_file_wait, undid lock", err);
191 		}
192 	} else if (err == -ERESTARTSYS) {
193 		dout("undoing lock\n");
194 		ceph_lock_message(CEPH_LOCK_FLOCK,
195 				  CEPH_MDS_OP_SETFILELOCK,
196 				  file, CEPH_LOCK_UNLOCK, 0, fl);
197 	}
198 	return err;
199 }
200 
201 /**
202  * Must be called with lock_flocks() already held. Fills in the passed
203  * counter variables, so you can prepare pagelist metadata before calling
204  * ceph_encode_locks.
205  */
206 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
207 {
208 	struct file_lock *lock;
209 
210 	*fcntl_count = 0;
211 	*flock_count = 0;
212 
213 	for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
214 		if (lock->fl_flags & FL_POSIX)
215 			++(*fcntl_count);
216 		else if (lock->fl_flags & FL_FLOCK)
217 			++(*flock_count);
218 	}
219 	dout("counted %d flock locks and %d fcntl locks",
220 	     *flock_count, *fcntl_count);
221 }
222 
223 /**
224  * Encode the flock and fcntl locks for the given inode into the ceph_filelock
225  * array. Must be called with inode->i_lock already held.
226  * If we encounter more of a specific lock type than expected, return -ENOSPC.
227  */
228 int ceph_encode_locks_to_buffer(struct inode *inode,
229 				struct ceph_filelock *flocks,
230 				int num_fcntl_locks, int num_flock_locks)
231 {
232 	struct file_lock *lock;
233 	int err = 0;
234 	int seen_fcntl = 0;
235 	int seen_flock = 0;
236 	int l = 0;
237 
238 	dout("encoding %d flock and %d fcntl locks", num_flock_locks,
239 	     num_fcntl_locks);
240 
241 	for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
242 		if (lock->fl_flags & FL_POSIX) {
243 			++seen_fcntl;
244 			if (seen_fcntl > num_fcntl_locks) {
245 				err = -ENOSPC;
246 				goto fail;
247 			}
248 			err = lock_to_ceph_filelock(lock, &flocks[l]);
249 			if (err)
250 				goto fail;
251 			++l;
252 		}
253 	}
254 	for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
255 		if (lock->fl_flags & FL_FLOCK) {
256 			++seen_flock;
257 			if (seen_flock > num_flock_locks) {
258 				err = -ENOSPC;
259 				goto fail;
260 			}
261 			err = lock_to_ceph_filelock(lock, &flocks[l]);
262 			if (err)
263 				goto fail;
264 			++l;
265 		}
266 	}
267 fail:
268 	return err;
269 }
270 
271 /**
272  * Copy the encoded flock and fcntl locks into the pagelist.
273  * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
274  * sequential flock locks.
275  * Returns zero on success.
276  */
277 int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
278 			   struct ceph_pagelist *pagelist,
279 			   int num_fcntl_locks, int num_flock_locks)
280 {
281 	int err = 0;
282 	__le32 nlocks;
283 
284 	nlocks = cpu_to_le32(num_fcntl_locks);
285 	err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
286 	if (err)
287 		goto out_fail;
288 
289 	err = ceph_pagelist_append(pagelist, flocks,
290 				   num_fcntl_locks * sizeof(*flocks));
291 	if (err)
292 		goto out_fail;
293 
294 	nlocks = cpu_to_le32(num_flock_locks);
295 	err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
296 	if (err)
297 		goto out_fail;
298 
299 	err = ceph_pagelist_append(pagelist,
300 				   &flocks[num_fcntl_locks],
301 				   num_flock_locks * sizeof(*flocks));
302 out_fail:
303 	return err;
304 }
305 
306 /*
307  * Given a pointer to a lock, convert it to a ceph filelock
308  */
309 int lock_to_ceph_filelock(struct file_lock *lock,
310 			  struct ceph_filelock *cephlock)
311 {
312 	int err = 0;
313 	cephlock->start = cpu_to_le64(lock->fl_start);
314 	cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
315 	cephlock->client = cpu_to_le64(0);
316 	cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
317 	if (lock->fl_flags & FL_POSIX)
318 		cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
319 	else
320 		cephlock->owner = cpu_to_le64(secure_addr(lock->fl_file));
321 
322 	switch (lock->fl_type) {
323 	case F_RDLCK:
324 		cephlock->type = CEPH_LOCK_SHARED;
325 		break;
326 	case F_WRLCK:
327 		cephlock->type = CEPH_LOCK_EXCL;
328 		break;
329 	case F_UNLCK:
330 		cephlock->type = CEPH_LOCK_UNLOCK;
331 		break;
332 	default:
333 		dout("Have unknown lock type %d", lock->fl_type);
334 		err = -EINVAL;
335 	}
336 
337 	return err;
338 }
339