xref: /linux/fs/smb/client/file.c (revision cb780b79b236db9a036a4b7975361dd44a0b4d29)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn)
101 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
102 #endif
103 }
104 
105 /*
106  * Issue a subrequest to upload to the server.
107  */
cifs_issue_write(struct netfs_io_subrequest * subreq)108 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
109 {
110 	struct cifs_io_subrequest *wdata =
111 		container_of(subreq, struct cifs_io_subrequest, subreq);
112 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
113 	int rc;
114 
115 	if (cifs_forced_shutdown(sbi)) {
116 		rc = -EIO;
117 		goto fail;
118 	}
119 
120 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
121 	if (rc)
122 		goto fail;
123 
124 	rc = -EAGAIN;
125 	if (wdata->req->cfile->invalidHandle)
126 		goto fail;
127 
128 	wdata->server->ops->async_writev(wdata);
129 out:
130 	return;
131 
132 fail:
133 	if (rc == -EAGAIN)
134 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
135 	else
136 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
137 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
138 	cifs_write_subrequest_terminated(wdata, rc);
139 	goto out;
140 }
141 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)142 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
143 {
144 	cifs_invalidate_cache(wreq->inode, 0);
145 }
146 
147 /*
148  * Negotiate the size of a read operation on behalf of the netfs library.
149  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)150 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
151 {
152 	struct netfs_io_request *rreq = subreq->rreq;
153 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
154 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
155 	struct TCP_Server_Info *server;
156 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
157 	size_t size;
158 	int rc = 0;
159 
160 	if (!rdata->have_xid) {
161 		rdata->xid = get_xid();
162 		rdata->have_xid = true;
163 	}
164 
165 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
166 	rdata->server = server;
167 
168 	if (cifs_sb->ctx->rsize == 0)
169 		cifs_negotiate_rsize(server, cifs_sb->ctx,
170 				     tlink_tcon(req->cfile->tlink));
171 
172 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
173 					   &size, &rdata->credits);
174 	if (rc)
175 		return rc;
176 
177 	rreq->io_streams[0].sreq_max_len = size;
178 
179 	rdata->credits.in_flight_check = 1;
180 	rdata->credits.rreq_debug_id = rreq->debug_id;
181 	rdata->credits.rreq_debug_index = subreq->debug_index;
182 
183 	trace_smb3_rw_credits(rdata->rreq->debug_id,
184 			      rdata->subreq.debug_index,
185 			      rdata->credits.value,
186 			      server->credits, server->in_flight, 0,
187 			      cifs_trace_rw_credits_read_submit);
188 
189 #ifdef CONFIG_CIFS_SMB_DIRECT
190 	if (server->smbd_conn)
191 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
192 #endif
193 	return 0;
194 }
195 
196 /*
197  * Issue a read operation on behalf of the netfs helper functions.  We're asked
198  * to make a read of a certain size at a point in the file.  We are permitted
199  * to only read a portion of that, but as long as we read something, the netfs
200  * helper will call us again so that we can issue another read.
201  */
cifs_issue_read(struct netfs_io_subrequest * subreq)202 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
203 {
204 	struct netfs_io_request *rreq = subreq->rreq;
205 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
206 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
207 	struct TCP_Server_Info *server = rdata->server;
208 	int rc = 0;
209 
210 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
211 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
212 		 subreq->transferred, subreq->len);
213 
214 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
215 	if (rc)
216 		goto failed;
217 
218 	if (req->cfile->invalidHandle) {
219 		do {
220 			rc = cifs_reopen_file(req->cfile, true);
221 		} while (rc == -EAGAIN);
222 		if (rc)
223 			goto failed;
224 	}
225 
226 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
227 	    subreq->rreq->origin != NETFS_DIO_READ)
228 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
229 
230 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
231 	rc = rdata->server->ops->async_readv(rdata);
232 	if (rc)
233 		goto failed;
234 	return;
235 
236 failed:
237 	subreq->error = rc;
238 	netfs_read_subreq_terminated(subreq);
239 }
240 
241 /*
242  * Writeback calls this when it finds a folio that needs uploading.  This isn't
243  * called if writeback only has copy-to-cache to deal with.
244  */
cifs_begin_writeback(struct netfs_io_request * wreq)245 static void cifs_begin_writeback(struct netfs_io_request *wreq)
246 {
247 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
248 	int ret;
249 
250 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
251 	if (ret) {
252 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
253 		return;
254 	}
255 
256 	wreq->io_streams[0].avail = true;
257 }
258 
259 /*
260  * Initialise a request.
261  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)262 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
263 {
264 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
265 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
266 	struct cifsFileInfo *open_file = NULL;
267 
268 	rreq->rsize = cifs_sb->ctx->rsize;
269 	rreq->wsize = cifs_sb->ctx->wsize;
270 	req->pid = current->tgid; // Ummm...  This may be a workqueue
271 
272 	if (file) {
273 		open_file = file->private_data;
274 		rreq->netfs_priv = file->private_data;
275 		req->cfile = cifsFileInfo_get(open_file);
276 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
277 			req->pid = req->cfile->pid;
278 	} else if (rreq->origin != NETFS_WRITEBACK) {
279 		WARN_ON_ONCE(1);
280 		return -EIO;
281 	}
282 
283 	return 0;
284 }
285 
286 /*
287  * Completion of a request operation.
288  */
cifs_rreq_done(struct netfs_io_request * rreq)289 static void cifs_rreq_done(struct netfs_io_request *rreq)
290 {
291 	struct timespec64 atime, mtime;
292 	struct inode *inode = rreq->inode;
293 
294 	/* we do not want atime to be less than mtime, it broke some apps */
295 	atime = inode_set_atime_to_ts(inode, current_time(inode));
296 	mtime = inode_get_mtime(inode);
297 	if (timespec64_compare(&atime, &mtime))
298 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
299 }
300 
cifs_free_request(struct netfs_io_request * rreq)301 static void cifs_free_request(struct netfs_io_request *rreq)
302 {
303 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
304 
305 	if (req->cfile)
306 		cifsFileInfo_put(req->cfile);
307 }
308 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)309 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
310 {
311 	struct cifs_io_subrequest *rdata =
312 		container_of(subreq, struct cifs_io_subrequest, subreq);
313 	int rc = subreq->error;
314 
315 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
316 #ifdef CONFIG_CIFS_SMB_DIRECT
317 		if (rdata->mr) {
318 			smbd_deregister_mr(rdata->mr);
319 			rdata->mr = NULL;
320 		}
321 #endif
322 	}
323 
324 	if (rdata->credits.value != 0) {
325 		trace_smb3_rw_credits(rdata->rreq->debug_id,
326 				      rdata->subreq.debug_index,
327 				      rdata->credits.value,
328 				      rdata->server ? rdata->server->credits : 0,
329 				      rdata->server ? rdata->server->in_flight : 0,
330 				      -rdata->credits.value,
331 				      cifs_trace_rw_credits_free_subreq);
332 		if (rdata->server)
333 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
334 		else
335 			rdata->credits.value = 0;
336 	}
337 
338 	if (rdata->have_xid)
339 		free_xid(rdata->xid);
340 }
341 
342 const struct netfs_request_ops cifs_req_ops = {
343 	.request_pool		= &cifs_io_request_pool,
344 	.subrequest_pool	= &cifs_io_subrequest_pool,
345 	.init_request		= cifs_init_request,
346 	.free_request		= cifs_free_request,
347 	.free_subrequest	= cifs_free_subrequest,
348 	.prepare_read		= cifs_prepare_read,
349 	.issue_read		= cifs_issue_read,
350 	.done			= cifs_rreq_done,
351 	.begin_writeback	= cifs_begin_writeback,
352 	.prepare_write		= cifs_prepare_write,
353 	.issue_write		= cifs_issue_write,
354 	.invalidate_cache	= cifs_netfs_invalidate_cache,
355 };
356 
357 /*
358  * Mark as invalid, all open files on tree connections since they
359  * were closed when session to server was lost.
360  */
361 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)362 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
363 {
364 	struct cifsFileInfo *open_file = NULL;
365 	struct list_head *tmp;
366 	struct list_head *tmp1;
367 
368 	/* only send once per connect */
369 	spin_lock(&tcon->tc_lock);
370 	if (tcon->need_reconnect)
371 		tcon->status = TID_NEED_RECON;
372 
373 	if (tcon->status != TID_NEED_RECON) {
374 		spin_unlock(&tcon->tc_lock);
375 		return;
376 	}
377 	tcon->status = TID_IN_FILES_INVALIDATE;
378 	spin_unlock(&tcon->tc_lock);
379 
380 	/* list all files open on tree connection and mark them invalid */
381 	spin_lock(&tcon->open_file_lock);
382 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
383 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
384 		open_file->invalidHandle = true;
385 		open_file->oplock_break_cancelled = true;
386 	}
387 	spin_unlock(&tcon->open_file_lock);
388 
389 	invalidate_all_cached_dirs(tcon);
390 	spin_lock(&tcon->tc_lock);
391 	if (tcon->status == TID_IN_FILES_INVALIDATE)
392 		tcon->status = TID_NEED_TCON;
393 	spin_unlock(&tcon->tc_lock);
394 
395 	/*
396 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
397 	 * to this tcon.
398 	 */
399 }
400 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)401 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
402 {
403 	if ((flags & O_ACCMODE) == O_RDONLY)
404 		return GENERIC_READ;
405 	else if ((flags & O_ACCMODE) == O_WRONLY)
406 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
407 	else if ((flags & O_ACCMODE) == O_RDWR) {
408 		/* GENERIC_ALL is too much permission to request
409 		   can cause unnecessary access denied on create */
410 		/* return GENERIC_ALL; */
411 		return (GENERIC_READ | GENERIC_WRITE);
412 	}
413 
414 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
415 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
416 		FILE_READ_DATA);
417 }
418 
419 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)420 static u32 cifs_posix_convert_flags(unsigned int flags)
421 {
422 	u32 posix_flags = 0;
423 
424 	if ((flags & O_ACCMODE) == O_RDONLY)
425 		posix_flags = SMB_O_RDONLY;
426 	else if ((flags & O_ACCMODE) == O_WRONLY)
427 		posix_flags = SMB_O_WRONLY;
428 	else if ((flags & O_ACCMODE) == O_RDWR)
429 		posix_flags = SMB_O_RDWR;
430 
431 	if (flags & O_CREAT) {
432 		posix_flags |= SMB_O_CREAT;
433 		if (flags & O_EXCL)
434 			posix_flags |= SMB_O_EXCL;
435 	} else if (flags & O_EXCL)
436 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
437 			 current->comm, current->tgid);
438 
439 	if (flags & O_TRUNC)
440 		posix_flags |= SMB_O_TRUNC;
441 	/* be safe and imply O_SYNC for O_DSYNC */
442 	if (flags & O_DSYNC)
443 		posix_flags |= SMB_O_SYNC;
444 	if (flags & O_DIRECTORY)
445 		posix_flags |= SMB_O_DIRECTORY;
446 	if (flags & O_NOFOLLOW)
447 		posix_flags |= SMB_O_NOFOLLOW;
448 	if (flags & O_DIRECT)
449 		posix_flags |= SMB_O_DIRECT;
450 
451 	return posix_flags;
452 }
453 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
454 
cifs_get_disposition(unsigned int flags)455 static inline int cifs_get_disposition(unsigned int flags)
456 {
457 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
458 		return FILE_CREATE;
459 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
460 		return FILE_OVERWRITE_IF;
461 	else if ((flags & O_CREAT) == O_CREAT)
462 		return FILE_OPEN_IF;
463 	else if ((flags & O_TRUNC) == O_TRUNC)
464 		return FILE_OVERWRITE;
465 	else
466 		return FILE_OPEN;
467 }
468 
469 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)470 int cifs_posix_open(const char *full_path, struct inode **pinode,
471 			struct super_block *sb, int mode, unsigned int f_flags,
472 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
473 {
474 	int rc;
475 	FILE_UNIX_BASIC_INFO *presp_data;
476 	__u32 posix_flags = 0;
477 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
478 	struct cifs_fattr fattr;
479 	struct tcon_link *tlink;
480 	struct cifs_tcon *tcon;
481 
482 	cifs_dbg(FYI, "posix open %s\n", full_path);
483 
484 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
485 	if (presp_data == NULL)
486 		return -ENOMEM;
487 
488 	tlink = cifs_sb_tlink(cifs_sb);
489 	if (IS_ERR(tlink)) {
490 		rc = PTR_ERR(tlink);
491 		goto posix_open_ret;
492 	}
493 
494 	tcon = tlink_tcon(tlink);
495 	mode &= ~current_umask();
496 
497 	posix_flags = cifs_posix_convert_flags(f_flags);
498 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
499 			     poplock, full_path, cifs_sb->local_nls,
500 			     cifs_remap(cifs_sb));
501 	cifs_put_tlink(tlink);
502 
503 	if (rc)
504 		goto posix_open_ret;
505 
506 	if (presp_data->Type == cpu_to_le32(-1))
507 		goto posix_open_ret; /* open ok, caller does qpathinfo */
508 
509 	if (!pinode)
510 		goto posix_open_ret; /* caller does not need info */
511 
512 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
513 
514 	/* get new inode and set it up */
515 	if (*pinode == NULL) {
516 		cifs_fill_uniqueid(sb, &fattr);
517 		*pinode = cifs_iget(sb, &fattr);
518 		if (!*pinode) {
519 			rc = -ENOMEM;
520 			goto posix_open_ret;
521 		}
522 	} else {
523 		cifs_revalidate_mapping(*pinode);
524 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
525 	}
526 
527 posix_open_ret:
528 	kfree(presp_data);
529 	return rc;
530 }
531 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
532 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)533 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
534 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
535 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
536 {
537 	int rc;
538 	int desired_access;
539 	int disposition;
540 	int create_options = CREATE_NOT_DIR;
541 	struct TCP_Server_Info *server = tcon->ses->server;
542 	struct cifs_open_parms oparms;
543 	int rdwr_for_fscache = 0;
544 
545 	if (!server->ops->open)
546 		return -ENOSYS;
547 
548 	/* If we're caching, we need to be able to fill in around partial writes. */
549 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
550 		rdwr_for_fscache = 1;
551 
552 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
553 
554 /*********************************************************************
555  *  open flag mapping table:
556  *
557  *	POSIX Flag            CIFS Disposition
558  *	----------            ----------------
559  *	O_CREAT               FILE_OPEN_IF
560  *	O_CREAT | O_EXCL      FILE_CREATE
561  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
562  *	O_TRUNC               FILE_OVERWRITE
563  *	none of the above     FILE_OPEN
564  *
565  *	Note that there is not a direct match between disposition
566  *	FILE_SUPERSEDE (ie create whether or not file exists although
567  *	O_CREAT | O_TRUNC is similar but truncates the existing
568  *	file rather than creating a new file as FILE_SUPERSEDE does
569  *	(which uses the attributes / metadata passed in on open call)
570  *?
571  *?  O_SYNC is a reasonable match to CIFS writethrough flag
572  *?  and the read write flags match reasonably.  O_LARGEFILE
573  *?  is irrelevant because largefile support is always used
574  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
575  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
576  *********************************************************************/
577 
578 	disposition = cifs_get_disposition(f_flags);
579 
580 	/* BB pass O_SYNC flag through on file attributes .. BB */
581 
582 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
583 	if (f_flags & O_SYNC)
584 		create_options |= CREATE_WRITE_THROUGH;
585 
586 	if (f_flags & O_DIRECT)
587 		create_options |= CREATE_NO_BUFFER;
588 
589 retry_open:
590 	oparms = (struct cifs_open_parms) {
591 		.tcon = tcon,
592 		.cifs_sb = cifs_sb,
593 		.desired_access = desired_access,
594 		.create_options = cifs_create_options(cifs_sb, create_options),
595 		.disposition = disposition,
596 		.path = full_path,
597 		.fid = fid,
598 	};
599 
600 	rc = server->ops->open(xid, &oparms, oplock, buf);
601 	if (rc) {
602 		if (rc == -EACCES && rdwr_for_fscache == 1) {
603 			desired_access = cifs_convert_flags(f_flags, 0);
604 			rdwr_for_fscache = 2;
605 			goto retry_open;
606 		}
607 		return rc;
608 	}
609 	if (rdwr_for_fscache == 2)
610 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
611 
612 	/* TODO: Add support for calling posix query info but with passing in fid */
613 	if (tcon->unix_ext)
614 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
615 					      xid);
616 	else
617 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
618 					 xid, fid);
619 
620 	if (rc) {
621 		server->ops->close(xid, tcon, fid);
622 		if (rc == -ESTALE)
623 			rc = -EOPENSTALE;
624 	}
625 
626 	return rc;
627 }
628 
629 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)630 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
631 {
632 	struct cifs_fid_locks *cur;
633 	bool has_locks = false;
634 
635 	down_read(&cinode->lock_sem);
636 	list_for_each_entry(cur, &cinode->llist, llist) {
637 		if (!list_empty(&cur->locks)) {
638 			has_locks = true;
639 			break;
640 		}
641 	}
642 	up_read(&cinode->lock_sem);
643 	return has_locks;
644 }
645 
646 void
cifs_down_write(struct rw_semaphore * sem)647 cifs_down_write(struct rw_semaphore *sem)
648 {
649 	while (!down_write_trylock(sem))
650 		msleep(10);
651 }
652 
653 static void cifsFileInfo_put_work(struct work_struct *work);
654 void serverclose_work(struct work_struct *work);
655 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)656 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
657 				       struct tcon_link *tlink, __u32 oplock,
658 				       const char *symlink_target)
659 {
660 	struct dentry *dentry = file_dentry(file);
661 	struct inode *inode = d_inode(dentry);
662 	struct cifsInodeInfo *cinode = CIFS_I(inode);
663 	struct cifsFileInfo *cfile;
664 	struct cifs_fid_locks *fdlocks;
665 	struct cifs_tcon *tcon = tlink_tcon(tlink);
666 	struct TCP_Server_Info *server = tcon->ses->server;
667 
668 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
669 	if (cfile == NULL)
670 		return cfile;
671 
672 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
673 	if (!fdlocks) {
674 		kfree(cfile);
675 		return NULL;
676 	}
677 
678 	if (symlink_target) {
679 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
680 		if (!cfile->symlink_target) {
681 			kfree(fdlocks);
682 			kfree(cfile);
683 			return NULL;
684 		}
685 	}
686 
687 	INIT_LIST_HEAD(&fdlocks->locks);
688 	fdlocks->cfile = cfile;
689 	cfile->llist = fdlocks;
690 
691 	cfile->count = 1;
692 	cfile->pid = current->tgid;
693 	cfile->uid = current_fsuid();
694 	cfile->dentry = dget(dentry);
695 	cfile->f_flags = file->f_flags;
696 	cfile->invalidHandle = false;
697 	cfile->deferred_close_scheduled = false;
698 	cfile->tlink = cifs_get_tlink(tlink);
699 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
700 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
701 	INIT_WORK(&cfile->serverclose, serverclose_work);
702 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
703 	mutex_init(&cfile->fh_mutex);
704 	spin_lock_init(&cfile->file_info_lock);
705 
706 	cifs_sb_active(inode->i_sb);
707 
708 	/*
709 	 * If the server returned a read oplock and we have mandatory brlocks,
710 	 * set oplock level to None.
711 	 */
712 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
713 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
714 		oplock = 0;
715 	}
716 
717 	cifs_down_write(&cinode->lock_sem);
718 	list_add(&fdlocks->llist, &cinode->llist);
719 	up_write(&cinode->lock_sem);
720 
721 	spin_lock(&tcon->open_file_lock);
722 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
723 		oplock = fid->pending_open->oplock;
724 	list_del(&fid->pending_open->olist);
725 
726 	fid->purge_cache = false;
727 	server->ops->set_fid(cfile, fid, oplock);
728 
729 	list_add(&cfile->tlist, &tcon->openFileList);
730 	atomic_inc(&tcon->num_local_opens);
731 
732 	/* if readable file instance put first in list*/
733 	spin_lock(&cinode->open_file_lock);
734 	if (file->f_mode & FMODE_READ)
735 		list_add(&cfile->flist, &cinode->openFileList);
736 	else
737 		list_add_tail(&cfile->flist, &cinode->openFileList);
738 	spin_unlock(&cinode->open_file_lock);
739 	spin_unlock(&tcon->open_file_lock);
740 
741 	if (fid->purge_cache)
742 		cifs_zap_mapping(inode);
743 
744 	file->private_data = cfile;
745 	return cfile;
746 }
747 
748 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)749 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
750 {
751 	spin_lock(&cifs_file->file_info_lock);
752 	cifsFileInfo_get_locked(cifs_file);
753 	spin_unlock(&cifs_file->file_info_lock);
754 	return cifs_file;
755 }
756 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)757 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
758 {
759 	struct inode *inode = d_inode(cifs_file->dentry);
760 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
761 	struct cifsLockInfo *li, *tmp;
762 	struct super_block *sb = inode->i_sb;
763 
764 	/*
765 	 * Delete any outstanding lock records. We'll lose them when the file
766 	 * is closed anyway.
767 	 */
768 	cifs_down_write(&cifsi->lock_sem);
769 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
770 		list_del(&li->llist);
771 		cifs_del_lock_waiters(li);
772 		kfree(li);
773 	}
774 	list_del(&cifs_file->llist->llist);
775 	kfree(cifs_file->llist);
776 	up_write(&cifsi->lock_sem);
777 
778 	cifs_put_tlink(cifs_file->tlink);
779 	dput(cifs_file->dentry);
780 	cifs_sb_deactive(sb);
781 	kfree(cifs_file->symlink_target);
782 	kfree(cifs_file);
783 }
784 
cifsFileInfo_put_work(struct work_struct * work)785 static void cifsFileInfo_put_work(struct work_struct *work)
786 {
787 	struct cifsFileInfo *cifs_file = container_of(work,
788 			struct cifsFileInfo, put);
789 
790 	cifsFileInfo_put_final(cifs_file);
791 }
792 
serverclose_work(struct work_struct * work)793 void serverclose_work(struct work_struct *work)
794 {
795 	struct cifsFileInfo *cifs_file = container_of(work,
796 			struct cifsFileInfo, serverclose);
797 
798 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
799 
800 	struct TCP_Server_Info *server = tcon->ses->server;
801 	int rc = 0;
802 	int retries = 0;
803 	int MAX_RETRIES = 4;
804 
805 	do {
806 		if (server->ops->close_getattr)
807 			rc = server->ops->close_getattr(0, tcon, cifs_file);
808 		else if (server->ops->close)
809 			rc = server->ops->close(0, tcon, &cifs_file->fid);
810 
811 		if (rc == -EBUSY || rc == -EAGAIN) {
812 			retries++;
813 			msleep(250);
814 		}
815 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
816 	);
817 
818 	if (retries == MAX_RETRIES)
819 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
820 
821 	if (cifs_file->offload)
822 		queue_work(fileinfo_put_wq, &cifs_file->put);
823 	else
824 		cifsFileInfo_put_final(cifs_file);
825 }
826 
827 /**
828  * cifsFileInfo_put - release a reference of file priv data
829  *
830  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
831  *
832  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
833  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)834 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
835 {
836 	_cifsFileInfo_put(cifs_file, true, true);
837 }
838 
839 /**
840  * _cifsFileInfo_put - release a reference of file priv data
841  *
842  * This may involve closing the filehandle @cifs_file out on the
843  * server. Must be called without holding tcon->open_file_lock,
844  * cinode->open_file_lock and cifs_file->file_info_lock.
845  *
846  * If @wait_for_oplock_handler is true and we are releasing the last
847  * reference, wait for any running oplock break handler of the file
848  * and cancel any pending one.
849  *
850  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
851  * @wait_oplock_handler: must be false if called from oplock_break_handler
852  * @offload:	not offloaded on close and oplock breaks
853  *
854  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)855 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
856 		       bool wait_oplock_handler, bool offload)
857 {
858 	struct inode *inode = d_inode(cifs_file->dentry);
859 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
860 	struct TCP_Server_Info *server = tcon->ses->server;
861 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
862 	struct super_block *sb = inode->i_sb;
863 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
864 	struct cifs_fid fid = {};
865 	struct cifs_pending_open open;
866 	bool oplock_break_cancelled;
867 	bool serverclose_offloaded = false;
868 
869 	spin_lock(&tcon->open_file_lock);
870 	spin_lock(&cifsi->open_file_lock);
871 	spin_lock(&cifs_file->file_info_lock);
872 
873 	cifs_file->offload = offload;
874 	if (--cifs_file->count > 0) {
875 		spin_unlock(&cifs_file->file_info_lock);
876 		spin_unlock(&cifsi->open_file_lock);
877 		spin_unlock(&tcon->open_file_lock);
878 		return;
879 	}
880 	spin_unlock(&cifs_file->file_info_lock);
881 
882 	if (server->ops->get_lease_key)
883 		server->ops->get_lease_key(inode, &fid);
884 
885 	/* store open in pending opens to make sure we don't miss lease break */
886 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
887 
888 	/* remove it from the lists */
889 	list_del(&cifs_file->flist);
890 	list_del(&cifs_file->tlist);
891 	atomic_dec(&tcon->num_local_opens);
892 
893 	if (list_empty(&cifsi->openFileList)) {
894 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
895 			 d_inode(cifs_file->dentry));
896 		/*
897 		 * In strict cache mode we need invalidate mapping on the last
898 		 * close  because it may cause a error when we open this file
899 		 * again and get at least level II oplock.
900 		 */
901 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
902 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
903 		cifs_set_oplock_level(cifsi, 0);
904 	}
905 
906 	spin_unlock(&cifsi->open_file_lock);
907 	spin_unlock(&tcon->open_file_lock);
908 
909 	oplock_break_cancelled = wait_oplock_handler ?
910 		cancel_work_sync(&cifs_file->oplock_break) : false;
911 
912 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
913 		struct TCP_Server_Info *server = tcon->ses->server;
914 		unsigned int xid;
915 		int rc = 0;
916 
917 		xid = get_xid();
918 		if (server->ops->close_getattr)
919 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
920 		else if (server->ops->close)
921 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
922 		_free_xid(xid);
923 
924 		if (rc == -EBUSY || rc == -EAGAIN) {
925 			// Server close failed, hence offloading it as an async op
926 			queue_work(serverclose_wq, &cifs_file->serverclose);
927 			serverclose_offloaded = true;
928 		}
929 	}
930 
931 	if (oplock_break_cancelled)
932 		cifs_done_oplock_break(cifsi);
933 
934 	cifs_del_pending_open(&open);
935 
936 	// if serverclose has been offloaded to wq (on failure), it will
937 	// handle offloading put as well. If serverclose not offloaded,
938 	// we need to handle offloading put here.
939 	if (!serverclose_offloaded) {
940 		if (offload)
941 			queue_work(fileinfo_put_wq, &cifs_file->put);
942 		else
943 			cifsFileInfo_put_final(cifs_file);
944 	}
945 }
946 
cifs_open(struct inode * inode,struct file * file)947 int cifs_open(struct inode *inode, struct file *file)
948 
949 {
950 	int rc = -EACCES;
951 	unsigned int xid;
952 	__u32 oplock;
953 	struct cifs_sb_info *cifs_sb;
954 	struct TCP_Server_Info *server;
955 	struct cifs_tcon *tcon;
956 	struct tcon_link *tlink;
957 	struct cifsFileInfo *cfile = NULL;
958 	void *page;
959 	const char *full_path;
960 	bool posix_open_ok = false;
961 	struct cifs_fid fid = {};
962 	struct cifs_pending_open open;
963 	struct cifs_open_info_data data = {};
964 
965 	xid = get_xid();
966 
967 	cifs_sb = CIFS_SB(inode->i_sb);
968 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
969 		free_xid(xid);
970 		return -EIO;
971 	}
972 
973 	tlink = cifs_sb_tlink(cifs_sb);
974 	if (IS_ERR(tlink)) {
975 		free_xid(xid);
976 		return PTR_ERR(tlink);
977 	}
978 	tcon = tlink_tcon(tlink);
979 	server = tcon->ses->server;
980 
981 	page = alloc_dentry_path();
982 	full_path = build_path_from_dentry(file_dentry(file), page);
983 	if (IS_ERR(full_path)) {
984 		rc = PTR_ERR(full_path);
985 		goto out;
986 	}
987 
988 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
989 		 inode, file->f_flags, full_path);
990 
991 	if (file->f_flags & O_DIRECT &&
992 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
993 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
994 			file->f_op = &cifs_file_direct_nobrl_ops;
995 		else
996 			file->f_op = &cifs_file_direct_ops;
997 	}
998 
999 	/* Get the cached handle as SMB2 close is deferred */
1000 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1001 		rc = cifs_get_writable_path(tcon, full_path,
1002 					    FIND_WR_FSUID_ONLY |
1003 					    FIND_WR_NO_PENDING_DELETE,
1004 					    &cfile);
1005 	} else {
1006 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
1007 	}
1008 	if (rc == 0) {
1009 		unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1010 		unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1011 
1012 		if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1013 		    (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1014 			file->private_data = cfile;
1015 			spin_lock(&CIFS_I(inode)->deferred_lock);
1016 			cifs_del_deferred_close(cfile);
1017 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1018 			goto use_cache;
1019 		}
1020 		_cifsFileInfo_put(cfile, true, false);
1021 	} else {
1022 		/* hard link on the defeered close file */
1023 		rc = cifs_get_hardlink_path(tcon, inode, file);
1024 		if (rc)
1025 			cifs_close_deferred_file(CIFS_I(inode));
1026 	}
1027 
1028 	if (server->oplocks)
1029 		oplock = REQ_OPLOCK;
1030 	else
1031 		oplock = 0;
1032 
1033 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1034 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1035 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1036 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1037 		/* can not refresh inode info since size could be stale */
1038 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1039 				cifs_sb->ctx->file_mode /* ignored */,
1040 				file->f_flags, &oplock, &fid.netfid, xid);
1041 		if (rc == 0) {
1042 			cifs_dbg(FYI, "posix open succeeded\n");
1043 			posix_open_ok = true;
1044 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1045 			if (tcon->ses->serverNOS)
1046 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1047 					 tcon->ses->ip_addr,
1048 					 tcon->ses->serverNOS);
1049 			tcon->broken_posix_open = true;
1050 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1051 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1052 			goto out;
1053 		/*
1054 		 * Else fallthrough to retry open the old way on network i/o
1055 		 * or DFS errors.
1056 		 */
1057 	}
1058 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1059 
1060 	if (server->ops->get_lease_key)
1061 		server->ops->get_lease_key(inode, &fid);
1062 
1063 	cifs_add_pending_open(&fid, tlink, &open);
1064 
1065 	if (!posix_open_ok) {
1066 		if (server->ops->get_lease_key)
1067 			server->ops->get_lease_key(inode, &fid);
1068 
1069 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1070 				  xid, &data);
1071 		if (rc) {
1072 			cifs_del_pending_open(&open);
1073 			goto out;
1074 		}
1075 	}
1076 
1077 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1078 	if (cfile == NULL) {
1079 		if (server->ops->close)
1080 			server->ops->close(xid, tcon, &fid);
1081 		cifs_del_pending_open(&open);
1082 		rc = -ENOMEM;
1083 		goto out;
1084 	}
1085 
1086 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1087 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1088 		/*
1089 		 * Time to set mode which we can not set earlier due to
1090 		 * problems creating new read-only files.
1091 		 */
1092 		struct cifs_unix_set_info_args args = {
1093 			.mode	= inode->i_mode,
1094 			.uid	= INVALID_UID, /* no change */
1095 			.gid	= INVALID_GID, /* no change */
1096 			.ctime	= NO_CHANGE_64,
1097 			.atime	= NO_CHANGE_64,
1098 			.mtime	= NO_CHANGE_64,
1099 			.device	= 0,
1100 		};
1101 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1102 				       cfile->pid);
1103 	}
1104 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1105 
1106 use_cache:
1107 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1108 			   file->f_mode & FMODE_WRITE);
1109 	if (!(file->f_flags & O_DIRECT))
1110 		goto out;
1111 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1112 		goto out;
1113 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1114 
1115 out:
1116 	free_dentry_path(page);
1117 	free_xid(xid);
1118 	cifs_put_tlink(tlink);
1119 	cifs_free_open_info(&data);
1120 	return rc;
1121 }
1122 
1123 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1124 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1125 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1126 
1127 /*
1128  * Try to reacquire byte range locks that were released when session
1129  * to server was lost.
1130  */
1131 static int
cifs_relock_file(struct cifsFileInfo * cfile)1132 cifs_relock_file(struct cifsFileInfo *cfile)
1133 {
1134 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1135 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1136 	int rc = 0;
1137 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1138 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1139 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1140 
1141 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1142 	if (cinode->can_cache_brlcks) {
1143 		/* can cache locks - no need to relock */
1144 		up_read(&cinode->lock_sem);
1145 		return rc;
1146 	}
1147 
1148 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1149 	if (cap_unix(tcon->ses) &&
1150 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1151 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1152 		rc = cifs_push_posix_locks(cfile);
1153 	else
1154 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1155 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1156 
1157 	up_read(&cinode->lock_sem);
1158 	return rc;
1159 }
1160 
1161 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1162 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1163 {
1164 	int rc = -EACCES;
1165 	unsigned int xid;
1166 	__u32 oplock;
1167 	struct cifs_sb_info *cifs_sb;
1168 	struct cifs_tcon *tcon;
1169 	struct TCP_Server_Info *server;
1170 	struct cifsInodeInfo *cinode;
1171 	struct inode *inode;
1172 	void *page;
1173 	const char *full_path;
1174 	int desired_access;
1175 	int disposition = FILE_OPEN;
1176 	int create_options = CREATE_NOT_DIR;
1177 	struct cifs_open_parms oparms;
1178 	int rdwr_for_fscache = 0;
1179 
1180 	xid = get_xid();
1181 	mutex_lock(&cfile->fh_mutex);
1182 	if (!cfile->invalidHandle) {
1183 		mutex_unlock(&cfile->fh_mutex);
1184 		free_xid(xid);
1185 		return 0;
1186 	}
1187 
1188 	inode = d_inode(cfile->dentry);
1189 	cifs_sb = CIFS_SB(inode->i_sb);
1190 	tcon = tlink_tcon(cfile->tlink);
1191 	server = tcon->ses->server;
1192 
1193 	/*
1194 	 * Can not grab rename sem here because various ops, including those
1195 	 * that already have the rename sem can end up causing writepage to get
1196 	 * called and if the server was down that means we end up here, and we
1197 	 * can never tell if the caller already has the rename_sem.
1198 	 */
1199 	page = alloc_dentry_path();
1200 	full_path = build_path_from_dentry(cfile->dentry, page);
1201 	if (IS_ERR(full_path)) {
1202 		mutex_unlock(&cfile->fh_mutex);
1203 		free_dentry_path(page);
1204 		free_xid(xid);
1205 		return PTR_ERR(full_path);
1206 	}
1207 
1208 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1209 		 inode, cfile->f_flags, full_path);
1210 
1211 	if (tcon->ses->server->oplocks)
1212 		oplock = REQ_OPLOCK;
1213 	else
1214 		oplock = 0;
1215 
1216 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1217 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1218 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1219 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1220 		/*
1221 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1222 		 * original open. Must mask them off for a reopen.
1223 		 */
1224 		unsigned int oflags = cfile->f_flags &
1225 						~(O_CREAT | O_EXCL | O_TRUNC);
1226 
1227 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1228 				     cifs_sb->ctx->file_mode /* ignored */,
1229 				     oflags, &oplock, &cfile->fid.netfid, xid);
1230 		if (rc == 0) {
1231 			cifs_dbg(FYI, "posix reopen succeeded\n");
1232 			oparms.reconnect = true;
1233 			goto reopen_success;
1234 		}
1235 		/*
1236 		 * fallthrough to retry open the old way on errors, especially
1237 		 * in the reconnect path it is important to retry hard
1238 		 */
1239 	}
1240 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1241 
1242 	/* If we're caching, we need to be able to fill in around partial writes. */
1243 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1244 		rdwr_for_fscache = 1;
1245 
1246 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1247 
1248 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1249 	if (cfile->f_flags & O_SYNC)
1250 		create_options |= CREATE_WRITE_THROUGH;
1251 
1252 	if (cfile->f_flags & O_DIRECT)
1253 		create_options |= CREATE_NO_BUFFER;
1254 
1255 	if (server->ops->get_lease_key)
1256 		server->ops->get_lease_key(inode, &cfile->fid);
1257 
1258 retry_open:
1259 	oparms = (struct cifs_open_parms) {
1260 		.tcon = tcon,
1261 		.cifs_sb = cifs_sb,
1262 		.desired_access = desired_access,
1263 		.create_options = cifs_create_options(cifs_sb, create_options),
1264 		.disposition = disposition,
1265 		.path = full_path,
1266 		.fid = &cfile->fid,
1267 		.reconnect = true,
1268 	};
1269 
1270 	/*
1271 	 * Can not refresh inode by passing in file_info buf to be returned by
1272 	 * ops->open and then calling get_inode_info with returned buf since
1273 	 * file might have write behind data that needs to be flushed and server
1274 	 * version of file size can be stale. If we knew for sure that inode was
1275 	 * not dirty locally we could do this.
1276 	 */
1277 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1278 	if (rc == -ENOENT && oparms.reconnect == false) {
1279 		/* durable handle timeout is expired - open the file again */
1280 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1281 		/* indicate that we need to relock the file */
1282 		oparms.reconnect = true;
1283 	}
1284 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1285 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1286 		rdwr_for_fscache = 2;
1287 		goto retry_open;
1288 	}
1289 
1290 	if (rc) {
1291 		mutex_unlock(&cfile->fh_mutex);
1292 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1293 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1294 		goto reopen_error_exit;
1295 	}
1296 
1297 	if (rdwr_for_fscache == 2)
1298 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1299 
1300 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1301 reopen_success:
1302 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1303 	cfile->invalidHandle = false;
1304 	mutex_unlock(&cfile->fh_mutex);
1305 	cinode = CIFS_I(inode);
1306 
1307 	if (can_flush) {
1308 		rc = filemap_write_and_wait(inode->i_mapping);
1309 		if (!is_interrupt_error(rc))
1310 			mapping_set_error(inode->i_mapping, rc);
1311 
1312 		if (tcon->posix_extensions) {
1313 			rc = smb311_posix_get_inode_info(&inode, full_path,
1314 							 NULL, inode->i_sb, xid);
1315 		} else if (tcon->unix_ext) {
1316 			rc = cifs_get_inode_info_unix(&inode, full_path,
1317 						      inode->i_sb, xid);
1318 		} else {
1319 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1320 						 inode->i_sb, xid, NULL);
1321 		}
1322 	}
1323 	/*
1324 	 * Else we are writing out data to server already and could deadlock if
1325 	 * we tried to flush data, and since we do not know if we have data that
1326 	 * would invalidate the current end of file on the server we can not go
1327 	 * to the server to get the new inode info.
1328 	 */
1329 
1330 	/*
1331 	 * If the server returned a read oplock and we have mandatory brlocks,
1332 	 * set oplock level to None.
1333 	 */
1334 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1335 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1336 		oplock = 0;
1337 	}
1338 
1339 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1340 	if (oparms.reconnect)
1341 		cifs_relock_file(cfile);
1342 
1343 reopen_error_exit:
1344 	free_dentry_path(page);
1345 	free_xid(xid);
1346 	return rc;
1347 }
1348 
smb2_deferred_work_close(struct work_struct * work)1349 void smb2_deferred_work_close(struct work_struct *work)
1350 {
1351 	struct cifsFileInfo *cfile = container_of(work,
1352 			struct cifsFileInfo, deferred.work);
1353 
1354 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1355 	cifs_del_deferred_close(cfile);
1356 	cfile->deferred_close_scheduled = false;
1357 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1358 	_cifsFileInfo_put(cfile, true, false);
1359 }
1360 
1361 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1362 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1363 {
1364 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1365 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1366 
1367 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1368 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1369 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1370 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1371 
1372 }
1373 
cifs_close(struct inode * inode,struct file * file)1374 int cifs_close(struct inode *inode, struct file *file)
1375 {
1376 	struct cifsFileInfo *cfile;
1377 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1378 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1379 	struct cifs_deferred_close *dclose;
1380 
1381 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1382 
1383 	if (file->private_data != NULL) {
1384 		cfile = file->private_data;
1385 		file->private_data = NULL;
1386 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1387 		if ((cfile->status_file_deleted == false) &&
1388 		    (smb2_can_defer_close(inode, dclose))) {
1389 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1390 				inode_set_mtime_to_ts(inode,
1391 						      inode_set_ctime_current(inode));
1392 			}
1393 			spin_lock(&cinode->deferred_lock);
1394 			cifs_add_deferred_close(cfile, dclose);
1395 			if (cfile->deferred_close_scheduled &&
1396 			    delayed_work_pending(&cfile->deferred)) {
1397 				/*
1398 				 * If there is no pending work, mod_delayed_work queues new work.
1399 				 * So, Increase the ref count to avoid use-after-free.
1400 				 */
1401 				if (!mod_delayed_work(deferredclose_wq,
1402 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1403 					cifsFileInfo_get(cfile);
1404 			} else {
1405 				/* Deferred close for files */
1406 				queue_delayed_work(deferredclose_wq,
1407 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1408 				cfile->deferred_close_scheduled = true;
1409 				spin_unlock(&cinode->deferred_lock);
1410 				return 0;
1411 			}
1412 			spin_unlock(&cinode->deferred_lock);
1413 			_cifsFileInfo_put(cfile, true, false);
1414 		} else {
1415 			_cifsFileInfo_put(cfile, true, false);
1416 			kfree(dclose);
1417 		}
1418 	}
1419 
1420 	/* return code from the ->release op is always ignored */
1421 	return 0;
1422 }
1423 
1424 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1425 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1426 {
1427 	struct cifsFileInfo *open_file, *tmp;
1428 	LIST_HEAD(tmp_list);
1429 
1430 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1431 		return;
1432 
1433 	tcon->need_reopen_files = false;
1434 
1435 	cifs_dbg(FYI, "Reopen persistent handles\n");
1436 
1437 	/* list all files open on tree connection, reopen resilient handles  */
1438 	spin_lock(&tcon->open_file_lock);
1439 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1440 		if (!open_file->invalidHandle)
1441 			continue;
1442 		cifsFileInfo_get(open_file);
1443 		list_add_tail(&open_file->rlist, &tmp_list);
1444 	}
1445 	spin_unlock(&tcon->open_file_lock);
1446 
1447 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1448 		if (cifs_reopen_file(open_file, false /* do not flush */))
1449 			tcon->need_reopen_files = true;
1450 		list_del_init(&open_file->rlist);
1451 		cifsFileInfo_put(open_file);
1452 	}
1453 }
1454 
cifs_closedir(struct inode * inode,struct file * file)1455 int cifs_closedir(struct inode *inode, struct file *file)
1456 {
1457 	int rc = 0;
1458 	unsigned int xid;
1459 	struct cifsFileInfo *cfile = file->private_data;
1460 	struct cifs_tcon *tcon;
1461 	struct TCP_Server_Info *server;
1462 	char *buf;
1463 
1464 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1465 
1466 	if (cfile == NULL)
1467 		return rc;
1468 
1469 	xid = get_xid();
1470 	tcon = tlink_tcon(cfile->tlink);
1471 	server = tcon->ses->server;
1472 
1473 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1474 	spin_lock(&cfile->file_info_lock);
1475 	if (server->ops->dir_needs_close(cfile)) {
1476 		cfile->invalidHandle = true;
1477 		spin_unlock(&cfile->file_info_lock);
1478 		if (server->ops->close_dir)
1479 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1480 		else
1481 			rc = -ENOSYS;
1482 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1483 		/* not much we can do if it fails anyway, ignore rc */
1484 		rc = 0;
1485 	} else
1486 		spin_unlock(&cfile->file_info_lock);
1487 
1488 	buf = cfile->srch_inf.ntwrk_buf_start;
1489 	if (buf) {
1490 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1491 		cfile->srch_inf.ntwrk_buf_start = NULL;
1492 		if (cfile->srch_inf.smallBuf)
1493 			cifs_small_buf_release(buf);
1494 		else
1495 			cifs_buf_release(buf);
1496 	}
1497 
1498 	cifs_put_tlink(cfile->tlink);
1499 	kfree(file->private_data);
1500 	file->private_data = NULL;
1501 	/* BB can we lock the filestruct while this is going on? */
1502 	free_xid(xid);
1503 	return rc;
1504 }
1505 
1506 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1507 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1508 {
1509 	struct cifsLockInfo *lock =
1510 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1511 	if (!lock)
1512 		return lock;
1513 	lock->offset = offset;
1514 	lock->length = length;
1515 	lock->type = type;
1516 	lock->pid = current->tgid;
1517 	lock->flags = flags;
1518 	INIT_LIST_HEAD(&lock->blist);
1519 	init_waitqueue_head(&lock->block_q);
1520 	return lock;
1521 }
1522 
1523 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1524 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1525 {
1526 	struct cifsLockInfo *li, *tmp;
1527 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1528 		list_del_init(&li->blist);
1529 		wake_up(&li->block_q);
1530 	}
1531 }
1532 
1533 #define CIFS_LOCK_OP	0
1534 #define CIFS_READ_OP	1
1535 #define CIFS_WRITE_OP	2
1536 
1537 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1538 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1539 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1540 			    __u64 length, __u8 type, __u16 flags,
1541 			    struct cifsFileInfo *cfile,
1542 			    struct cifsLockInfo **conf_lock, int rw_check)
1543 {
1544 	struct cifsLockInfo *li;
1545 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1546 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1547 
1548 	list_for_each_entry(li, &fdlocks->locks, llist) {
1549 		if (offset + length <= li->offset ||
1550 		    offset >= li->offset + li->length)
1551 			continue;
1552 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1553 		    server->ops->compare_fids(cfile, cur_cfile)) {
1554 			/* shared lock prevents write op through the same fid */
1555 			if (!(li->type & server->vals->shared_lock_type) ||
1556 			    rw_check != CIFS_WRITE_OP)
1557 				continue;
1558 		}
1559 		if ((type & server->vals->shared_lock_type) &&
1560 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1561 		     current->tgid == li->pid) || type == li->type))
1562 			continue;
1563 		if (rw_check == CIFS_LOCK_OP &&
1564 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1565 		    server->ops->compare_fids(cfile, cur_cfile))
1566 			continue;
1567 		if (conf_lock)
1568 			*conf_lock = li;
1569 		return true;
1570 	}
1571 	return false;
1572 }
1573 
1574 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1575 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1576 			__u8 type, __u16 flags,
1577 			struct cifsLockInfo **conf_lock, int rw_check)
1578 {
1579 	bool rc = false;
1580 	struct cifs_fid_locks *cur;
1581 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1582 
1583 	list_for_each_entry(cur, &cinode->llist, llist) {
1584 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1585 						 flags, cfile, conf_lock,
1586 						 rw_check);
1587 		if (rc)
1588 			break;
1589 	}
1590 
1591 	return rc;
1592 }
1593 
1594 /*
1595  * Check if there is another lock that prevents us to set the lock (mandatory
1596  * style). If such a lock exists, update the flock structure with its
1597  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1598  * or leave it the same if we can't. Returns 0 if we don't need to request to
1599  * the server or 1 otherwise.
1600  */
1601 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1602 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1603 	       __u8 type, struct file_lock *flock)
1604 {
1605 	int rc = 0;
1606 	struct cifsLockInfo *conf_lock;
1607 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1608 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1609 	bool exist;
1610 
1611 	down_read(&cinode->lock_sem);
1612 
1613 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1614 					flock->c.flc_flags, &conf_lock,
1615 					CIFS_LOCK_OP);
1616 	if (exist) {
1617 		flock->fl_start = conf_lock->offset;
1618 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1619 		flock->c.flc_pid = conf_lock->pid;
1620 		if (conf_lock->type & server->vals->shared_lock_type)
1621 			flock->c.flc_type = F_RDLCK;
1622 		else
1623 			flock->c.flc_type = F_WRLCK;
1624 	} else if (!cinode->can_cache_brlcks)
1625 		rc = 1;
1626 	else
1627 		flock->c.flc_type = F_UNLCK;
1628 
1629 	up_read(&cinode->lock_sem);
1630 	return rc;
1631 }
1632 
1633 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1634 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1635 {
1636 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1637 	cifs_down_write(&cinode->lock_sem);
1638 	list_add_tail(&lock->llist, &cfile->llist->locks);
1639 	up_write(&cinode->lock_sem);
1640 }
1641 
1642 /*
1643  * Set the byte-range lock (mandatory style). Returns:
1644  * 1) 0, if we set the lock and don't need to request to the server;
1645  * 2) 1, if no locks prevent us but we need to request to the server;
1646  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1647  */
1648 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1649 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1650 		 bool wait)
1651 {
1652 	struct cifsLockInfo *conf_lock;
1653 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1654 	bool exist;
1655 	int rc = 0;
1656 
1657 try_again:
1658 	exist = false;
1659 	cifs_down_write(&cinode->lock_sem);
1660 
1661 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1662 					lock->type, lock->flags, &conf_lock,
1663 					CIFS_LOCK_OP);
1664 	if (!exist && cinode->can_cache_brlcks) {
1665 		list_add_tail(&lock->llist, &cfile->llist->locks);
1666 		up_write(&cinode->lock_sem);
1667 		return rc;
1668 	}
1669 
1670 	if (!exist)
1671 		rc = 1;
1672 	else if (!wait)
1673 		rc = -EACCES;
1674 	else {
1675 		list_add_tail(&lock->blist, &conf_lock->blist);
1676 		up_write(&cinode->lock_sem);
1677 		rc = wait_event_interruptible(lock->block_q,
1678 					(lock->blist.prev == &lock->blist) &&
1679 					(lock->blist.next == &lock->blist));
1680 		if (!rc)
1681 			goto try_again;
1682 		cifs_down_write(&cinode->lock_sem);
1683 		list_del_init(&lock->blist);
1684 	}
1685 
1686 	up_write(&cinode->lock_sem);
1687 	return rc;
1688 }
1689 
1690 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1691 /*
1692  * Check if there is another lock that prevents us to set the lock (posix
1693  * style). If such a lock exists, update the flock structure with its
1694  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1695  * or leave it the same if we can't. Returns 0 if we don't need to request to
1696  * the server or 1 otherwise.
1697  */
1698 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1699 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1700 {
1701 	int rc = 0;
1702 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1703 	unsigned char saved_type = flock->c.flc_type;
1704 
1705 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1706 		return 1;
1707 
1708 	down_read(&cinode->lock_sem);
1709 	posix_test_lock(file, flock);
1710 
1711 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1712 		flock->c.flc_type = saved_type;
1713 		rc = 1;
1714 	}
1715 
1716 	up_read(&cinode->lock_sem);
1717 	return rc;
1718 }
1719 
1720 /*
1721  * Set the byte-range lock (posix style). Returns:
1722  * 1) <0, if the error occurs while setting the lock;
1723  * 2) 0, if we set the lock and don't need to request to the server;
1724  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1725  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1726  */
1727 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1728 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1729 {
1730 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1731 	int rc = FILE_LOCK_DEFERRED + 1;
1732 
1733 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1734 		return rc;
1735 
1736 	cifs_down_write(&cinode->lock_sem);
1737 	if (!cinode->can_cache_brlcks) {
1738 		up_write(&cinode->lock_sem);
1739 		return rc;
1740 	}
1741 
1742 	rc = posix_lock_file(file, flock, NULL);
1743 	up_write(&cinode->lock_sem);
1744 	return rc;
1745 }
1746 
1747 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1748 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1749 {
1750 	unsigned int xid;
1751 	int rc = 0, stored_rc;
1752 	struct cifsLockInfo *li, *tmp;
1753 	struct cifs_tcon *tcon;
1754 	unsigned int num, max_num, max_buf;
1755 	LOCKING_ANDX_RANGE *buf, *cur;
1756 	static const int types[] = {
1757 		LOCKING_ANDX_LARGE_FILES,
1758 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1759 	};
1760 	int i;
1761 
1762 	xid = get_xid();
1763 	tcon = tlink_tcon(cfile->tlink);
1764 
1765 	/*
1766 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1767 	 * and check it before using.
1768 	 */
1769 	max_buf = tcon->ses->server->maxBuf;
1770 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1771 		free_xid(xid);
1772 		return -EINVAL;
1773 	}
1774 
1775 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1776 		     PAGE_SIZE);
1777 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1778 			PAGE_SIZE);
1779 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1780 						sizeof(LOCKING_ANDX_RANGE);
1781 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1782 	if (!buf) {
1783 		free_xid(xid);
1784 		return -ENOMEM;
1785 	}
1786 
1787 	for (i = 0; i < 2; i++) {
1788 		cur = buf;
1789 		num = 0;
1790 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1791 			if (li->type != types[i])
1792 				continue;
1793 			cur->Pid = cpu_to_le16(li->pid);
1794 			cur->LengthLow = cpu_to_le32((u32)li->length);
1795 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1796 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1797 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1798 			if (++num == max_num) {
1799 				stored_rc = cifs_lockv(xid, tcon,
1800 						       cfile->fid.netfid,
1801 						       (__u8)li->type, 0, num,
1802 						       buf);
1803 				if (stored_rc)
1804 					rc = stored_rc;
1805 				cur = buf;
1806 				num = 0;
1807 			} else
1808 				cur++;
1809 		}
1810 
1811 		if (num) {
1812 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1813 					       (__u8)types[i], 0, num, buf);
1814 			if (stored_rc)
1815 				rc = stored_rc;
1816 		}
1817 	}
1818 
1819 	kfree(buf);
1820 	free_xid(xid);
1821 	return rc;
1822 }
1823 
1824 static __u32
hash_lockowner(fl_owner_t owner)1825 hash_lockowner(fl_owner_t owner)
1826 {
1827 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1828 }
1829 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1830 
1831 struct lock_to_push {
1832 	struct list_head llist;
1833 	__u64 offset;
1834 	__u64 length;
1835 	__u32 pid;
1836 	__u16 netfid;
1837 	__u8 type;
1838 };
1839 
1840 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1841 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1842 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1843 {
1844 	struct inode *inode = d_inode(cfile->dentry);
1845 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1846 	struct file_lock *flock;
1847 	struct file_lock_context *flctx = locks_inode_context(inode);
1848 	unsigned int count = 0, i;
1849 	int rc = 0, xid, type;
1850 	struct list_head locks_to_send, *el;
1851 	struct lock_to_push *lck, *tmp;
1852 	__u64 length;
1853 
1854 	xid = get_xid();
1855 
1856 	if (!flctx)
1857 		goto out;
1858 
1859 	spin_lock(&flctx->flc_lock);
1860 	list_for_each(el, &flctx->flc_posix) {
1861 		count++;
1862 	}
1863 	spin_unlock(&flctx->flc_lock);
1864 
1865 	INIT_LIST_HEAD(&locks_to_send);
1866 
1867 	/*
1868 	 * Allocating count locks is enough because no FL_POSIX locks can be
1869 	 * added to the list while we are holding cinode->lock_sem that
1870 	 * protects locking operations of this inode.
1871 	 */
1872 	for (i = 0; i < count; i++) {
1873 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1874 		if (!lck) {
1875 			rc = -ENOMEM;
1876 			goto err_out;
1877 		}
1878 		list_add_tail(&lck->llist, &locks_to_send);
1879 	}
1880 
1881 	el = locks_to_send.next;
1882 	spin_lock(&flctx->flc_lock);
1883 	for_each_file_lock(flock, &flctx->flc_posix) {
1884 		unsigned char ftype = flock->c.flc_type;
1885 
1886 		if (el == &locks_to_send) {
1887 			/*
1888 			 * The list ended. We don't have enough allocated
1889 			 * structures - something is really wrong.
1890 			 */
1891 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1892 			break;
1893 		}
1894 		length = cifs_flock_len(flock);
1895 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1896 			type = CIFS_RDLCK;
1897 		else
1898 			type = CIFS_WRLCK;
1899 		lck = list_entry(el, struct lock_to_push, llist);
1900 		lck->pid = hash_lockowner(flock->c.flc_owner);
1901 		lck->netfid = cfile->fid.netfid;
1902 		lck->length = length;
1903 		lck->type = type;
1904 		lck->offset = flock->fl_start;
1905 	}
1906 	spin_unlock(&flctx->flc_lock);
1907 
1908 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1909 		int stored_rc;
1910 
1911 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1912 					     lck->offset, lck->length, NULL,
1913 					     lck->type, 0);
1914 		if (stored_rc)
1915 			rc = stored_rc;
1916 		list_del(&lck->llist);
1917 		kfree(lck);
1918 	}
1919 
1920 out:
1921 	free_xid(xid);
1922 	return rc;
1923 err_out:
1924 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1925 		list_del(&lck->llist);
1926 		kfree(lck);
1927 	}
1928 	goto out;
1929 }
1930 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1931 
1932 static int
cifs_push_locks(struct cifsFileInfo * cfile)1933 cifs_push_locks(struct cifsFileInfo *cfile)
1934 {
1935 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1936 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1937 	int rc = 0;
1938 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1939 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1940 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1941 
1942 	/* we are going to update can_cache_brlcks here - need a write access */
1943 	cifs_down_write(&cinode->lock_sem);
1944 	if (!cinode->can_cache_brlcks) {
1945 		up_write(&cinode->lock_sem);
1946 		return rc;
1947 	}
1948 
1949 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1950 	if (cap_unix(tcon->ses) &&
1951 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1952 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1953 		rc = cifs_push_posix_locks(cfile);
1954 	else
1955 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1956 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1957 
1958 	cinode->can_cache_brlcks = false;
1959 	up_write(&cinode->lock_sem);
1960 	return rc;
1961 }
1962 
1963 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1964 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1965 		bool *wait_flag, struct TCP_Server_Info *server)
1966 {
1967 	if (flock->c.flc_flags & FL_POSIX)
1968 		cifs_dbg(FYI, "Posix\n");
1969 	if (flock->c.flc_flags & FL_FLOCK)
1970 		cifs_dbg(FYI, "Flock\n");
1971 	if (flock->c.flc_flags & FL_SLEEP) {
1972 		cifs_dbg(FYI, "Blocking lock\n");
1973 		*wait_flag = true;
1974 	}
1975 	if (flock->c.flc_flags & FL_ACCESS)
1976 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1977 	if (flock->c.flc_flags & FL_LEASE)
1978 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1979 	if (flock->c.flc_flags &
1980 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1981 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1982 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1983 		         flock->c.flc_flags);
1984 
1985 	*type = server->vals->large_lock_type;
1986 	if (lock_is_write(flock)) {
1987 		cifs_dbg(FYI, "F_WRLCK\n");
1988 		*type |= server->vals->exclusive_lock_type;
1989 		*lock = 1;
1990 	} else if (lock_is_unlock(flock)) {
1991 		cifs_dbg(FYI, "F_UNLCK\n");
1992 		*type |= server->vals->unlock_lock_type;
1993 		*unlock = 1;
1994 		/* Check if unlock includes more than one lock range */
1995 	} else if (lock_is_read(flock)) {
1996 		cifs_dbg(FYI, "F_RDLCK\n");
1997 		*type |= server->vals->shared_lock_type;
1998 		*lock = 1;
1999 	} else if (flock->c.flc_type == F_EXLCK) {
2000 		cifs_dbg(FYI, "F_EXLCK\n");
2001 		*type |= server->vals->exclusive_lock_type;
2002 		*lock = 1;
2003 	} else if (flock->c.flc_type == F_SHLCK) {
2004 		cifs_dbg(FYI, "F_SHLCK\n");
2005 		*type |= server->vals->shared_lock_type;
2006 		*lock = 1;
2007 	} else
2008 		cifs_dbg(FYI, "Unknown type of lock\n");
2009 }
2010 
2011 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2012 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2013 	   bool wait_flag, bool posix_lck, unsigned int xid)
2014 {
2015 	int rc = 0;
2016 	__u64 length = cifs_flock_len(flock);
2017 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2018 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2019 	struct TCP_Server_Info *server = tcon->ses->server;
2020 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2021 	__u16 netfid = cfile->fid.netfid;
2022 
2023 	if (posix_lck) {
2024 		int posix_lock_type;
2025 
2026 		rc = cifs_posix_lock_test(file, flock);
2027 		if (!rc)
2028 			return rc;
2029 
2030 		if (type & server->vals->shared_lock_type)
2031 			posix_lock_type = CIFS_RDLCK;
2032 		else
2033 			posix_lock_type = CIFS_WRLCK;
2034 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2035 				      hash_lockowner(flock->c.flc_owner),
2036 				      flock->fl_start, length, flock,
2037 				      posix_lock_type, wait_flag);
2038 		return rc;
2039 	}
2040 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2041 
2042 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2043 	if (!rc)
2044 		return rc;
2045 
2046 	/* BB we could chain these into one lock request BB */
2047 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2048 				    1, 0, false);
2049 	if (rc == 0) {
2050 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2051 					    type, 0, 1, false);
2052 		flock->c.flc_type = F_UNLCK;
2053 		if (rc != 0)
2054 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2055 				 rc);
2056 		return 0;
2057 	}
2058 
2059 	if (type & server->vals->shared_lock_type) {
2060 		flock->c.flc_type = F_WRLCK;
2061 		return 0;
2062 	}
2063 
2064 	type &= ~server->vals->exclusive_lock_type;
2065 
2066 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2067 				    type | server->vals->shared_lock_type,
2068 				    1, 0, false);
2069 	if (rc == 0) {
2070 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2071 			type | server->vals->shared_lock_type, 0, 1, false);
2072 		flock->c.flc_type = F_RDLCK;
2073 		if (rc != 0)
2074 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2075 				 rc);
2076 	} else
2077 		flock->c.flc_type = F_WRLCK;
2078 
2079 	return 0;
2080 }
2081 
2082 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2083 cifs_move_llist(struct list_head *source, struct list_head *dest)
2084 {
2085 	struct list_head *li, *tmp;
2086 	list_for_each_safe(li, tmp, source)
2087 		list_move(li, dest);
2088 }
2089 
2090 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2091 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2092 				struct file *file)
2093 {
2094 	struct cifsFileInfo *open_file = NULL;
2095 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2096 	int rc = 0;
2097 
2098 	spin_lock(&tcon->open_file_lock);
2099 	spin_lock(&cinode->open_file_lock);
2100 
2101 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2102 		if (file->f_flags == open_file->f_flags) {
2103 			rc = -EINVAL;
2104 			break;
2105 		}
2106 	}
2107 
2108 	spin_unlock(&cinode->open_file_lock);
2109 	spin_unlock(&tcon->open_file_lock);
2110 	return rc;
2111 }
2112 
2113 void
cifs_free_llist(struct list_head * llist)2114 cifs_free_llist(struct list_head *llist)
2115 {
2116 	struct cifsLockInfo *li, *tmp;
2117 	list_for_each_entry_safe(li, tmp, llist, llist) {
2118 		cifs_del_lock_waiters(li);
2119 		list_del(&li->llist);
2120 		kfree(li);
2121 	}
2122 }
2123 
2124 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2125 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2126 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2127 		  unsigned int xid)
2128 {
2129 	int rc = 0, stored_rc;
2130 	static const int types[] = {
2131 		LOCKING_ANDX_LARGE_FILES,
2132 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2133 	};
2134 	unsigned int i;
2135 	unsigned int max_num, num, max_buf;
2136 	LOCKING_ANDX_RANGE *buf, *cur;
2137 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2138 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2139 	struct cifsLockInfo *li, *tmp;
2140 	__u64 length = cifs_flock_len(flock);
2141 	LIST_HEAD(tmp_llist);
2142 
2143 	/*
2144 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2145 	 * and check it before using.
2146 	 */
2147 	max_buf = tcon->ses->server->maxBuf;
2148 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2149 		return -EINVAL;
2150 
2151 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2152 		     PAGE_SIZE);
2153 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2154 			PAGE_SIZE);
2155 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2156 						sizeof(LOCKING_ANDX_RANGE);
2157 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2158 	if (!buf)
2159 		return -ENOMEM;
2160 
2161 	cifs_down_write(&cinode->lock_sem);
2162 	for (i = 0; i < 2; i++) {
2163 		cur = buf;
2164 		num = 0;
2165 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2166 			if (flock->fl_start > li->offset ||
2167 			    (flock->fl_start + length) <
2168 			    (li->offset + li->length))
2169 				continue;
2170 			if (current->tgid != li->pid)
2171 				continue;
2172 			if (types[i] != li->type)
2173 				continue;
2174 			if (cinode->can_cache_brlcks) {
2175 				/*
2176 				 * We can cache brlock requests - simply remove
2177 				 * a lock from the file's list.
2178 				 */
2179 				list_del(&li->llist);
2180 				cifs_del_lock_waiters(li);
2181 				kfree(li);
2182 				continue;
2183 			}
2184 			cur->Pid = cpu_to_le16(li->pid);
2185 			cur->LengthLow = cpu_to_le32((u32)li->length);
2186 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2187 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2188 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2189 			/*
2190 			 * We need to save a lock here to let us add it again to
2191 			 * the file's list if the unlock range request fails on
2192 			 * the server.
2193 			 */
2194 			list_move(&li->llist, &tmp_llist);
2195 			if (++num == max_num) {
2196 				stored_rc = cifs_lockv(xid, tcon,
2197 						       cfile->fid.netfid,
2198 						       li->type, num, 0, buf);
2199 				if (stored_rc) {
2200 					/*
2201 					 * We failed on the unlock range
2202 					 * request - add all locks from the tmp
2203 					 * list to the head of the file's list.
2204 					 */
2205 					cifs_move_llist(&tmp_llist,
2206 							&cfile->llist->locks);
2207 					rc = stored_rc;
2208 				} else
2209 					/*
2210 					 * The unlock range request succeed -
2211 					 * free the tmp list.
2212 					 */
2213 					cifs_free_llist(&tmp_llist);
2214 				cur = buf;
2215 				num = 0;
2216 			} else
2217 				cur++;
2218 		}
2219 		if (num) {
2220 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2221 					       types[i], num, 0, buf);
2222 			if (stored_rc) {
2223 				cifs_move_llist(&tmp_llist,
2224 						&cfile->llist->locks);
2225 				rc = stored_rc;
2226 			} else
2227 				cifs_free_llist(&tmp_llist);
2228 		}
2229 	}
2230 
2231 	up_write(&cinode->lock_sem);
2232 	kfree(buf);
2233 	return rc;
2234 }
2235 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2236 
2237 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2238 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2239 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2240 	   unsigned int xid)
2241 {
2242 	int rc = 0;
2243 	__u64 length = cifs_flock_len(flock);
2244 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2245 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2246 	struct TCP_Server_Info *server = tcon->ses->server;
2247 	struct inode *inode = d_inode(cfile->dentry);
2248 
2249 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2250 	if (posix_lck) {
2251 		int posix_lock_type;
2252 
2253 		rc = cifs_posix_lock_set(file, flock);
2254 		if (rc <= FILE_LOCK_DEFERRED)
2255 			return rc;
2256 
2257 		if (type & server->vals->shared_lock_type)
2258 			posix_lock_type = CIFS_RDLCK;
2259 		else
2260 			posix_lock_type = CIFS_WRLCK;
2261 
2262 		if (unlock == 1)
2263 			posix_lock_type = CIFS_UNLCK;
2264 
2265 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2266 				      hash_lockowner(flock->c.flc_owner),
2267 				      flock->fl_start, length,
2268 				      NULL, posix_lock_type, wait_flag);
2269 		goto out;
2270 	}
2271 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2272 	if (lock) {
2273 		struct cifsLockInfo *lock;
2274 
2275 		lock = cifs_lock_init(flock->fl_start, length, type,
2276 				      flock->c.flc_flags);
2277 		if (!lock)
2278 			return -ENOMEM;
2279 
2280 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2281 		if (rc < 0) {
2282 			kfree(lock);
2283 			return rc;
2284 		}
2285 		if (!rc)
2286 			goto out;
2287 
2288 		/*
2289 		 * Windows 7 server can delay breaking lease from read to None
2290 		 * if we set a byte-range lock on a file - break it explicitly
2291 		 * before sending the lock to the server to be sure the next
2292 		 * read won't conflict with non-overlapted locks due to
2293 		 * pagereading.
2294 		 */
2295 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2296 					CIFS_CACHE_READ(CIFS_I(inode))) {
2297 			cifs_zap_mapping(inode);
2298 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2299 				 inode);
2300 			CIFS_I(inode)->oplock = 0;
2301 		}
2302 
2303 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2304 					    type, 1, 0, wait_flag);
2305 		if (rc) {
2306 			kfree(lock);
2307 			return rc;
2308 		}
2309 
2310 		cifs_lock_add(cfile, lock);
2311 	} else if (unlock)
2312 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2313 
2314 out:
2315 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2316 		/*
2317 		 * If this is a request to remove all locks because we
2318 		 * are closing the file, it doesn't matter if the
2319 		 * unlocking failed as both cifs.ko and the SMB server
2320 		 * remove the lock on file close
2321 		 */
2322 		if (rc) {
2323 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2324 			if (!(flock->c.flc_flags & FL_CLOSE))
2325 				return rc;
2326 		}
2327 		rc = locks_lock_file_wait(file, flock);
2328 	}
2329 	return rc;
2330 }
2331 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2332 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2333 {
2334 	int rc, xid;
2335 	int lock = 0, unlock = 0;
2336 	bool wait_flag = false;
2337 	bool posix_lck = false;
2338 	struct cifs_sb_info *cifs_sb;
2339 	struct cifs_tcon *tcon;
2340 	struct cifsFileInfo *cfile;
2341 	__u32 type;
2342 
2343 	xid = get_xid();
2344 
2345 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2346 		rc = -ENOLCK;
2347 		free_xid(xid);
2348 		return rc;
2349 	}
2350 
2351 	cfile = (struct cifsFileInfo *)file->private_data;
2352 	tcon = tlink_tcon(cfile->tlink);
2353 
2354 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2355 			tcon->ses->server);
2356 	cifs_sb = CIFS_FILE_SB(file);
2357 
2358 	if (cap_unix(tcon->ses) &&
2359 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2360 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2361 		posix_lck = true;
2362 
2363 	if (!lock && !unlock) {
2364 		/*
2365 		 * if no lock or unlock then nothing to do since we do not
2366 		 * know what it is
2367 		 */
2368 		rc = -EOPNOTSUPP;
2369 		free_xid(xid);
2370 		return rc;
2371 	}
2372 
2373 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2374 			xid);
2375 	free_xid(xid);
2376 	return rc;
2377 
2378 
2379 }
2380 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2381 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2382 {
2383 	int rc, xid;
2384 	int lock = 0, unlock = 0;
2385 	bool wait_flag = false;
2386 	bool posix_lck = false;
2387 	struct cifs_sb_info *cifs_sb;
2388 	struct cifs_tcon *tcon;
2389 	struct cifsFileInfo *cfile;
2390 	__u32 type;
2391 
2392 	rc = -EACCES;
2393 	xid = get_xid();
2394 
2395 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2396 		 flock->c.flc_flags, flock->c.flc_type,
2397 		 (long long)flock->fl_start,
2398 		 (long long)flock->fl_end);
2399 
2400 	cfile = (struct cifsFileInfo *)file->private_data;
2401 	tcon = tlink_tcon(cfile->tlink);
2402 
2403 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2404 			tcon->ses->server);
2405 	cifs_sb = CIFS_FILE_SB(file);
2406 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2407 
2408 	if (cap_unix(tcon->ses) &&
2409 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2410 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2411 		posix_lck = true;
2412 	/*
2413 	 * BB add code here to normalize offset and length to account for
2414 	 * negative length which we can not accept over the wire.
2415 	 */
2416 	if (IS_GETLK(cmd)) {
2417 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2418 		free_xid(xid);
2419 		return rc;
2420 	}
2421 
2422 	if (!lock && !unlock) {
2423 		/*
2424 		 * if no lock or unlock then nothing to do since we do not
2425 		 * know what it is
2426 		 */
2427 		free_xid(xid);
2428 		return -EOPNOTSUPP;
2429 	}
2430 
2431 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2432 			xid);
2433 	free_xid(xid);
2434 	return rc;
2435 }
2436 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2437 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2438 {
2439 	struct netfs_io_request *wreq = wdata->rreq;
2440 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2441 	loff_t wrend;
2442 
2443 	if (result > 0) {
2444 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2445 
2446 		if (wrend > ictx->zero_point &&
2447 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2448 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2449 			ictx->zero_point = wrend;
2450 		if (wrend > ictx->remote_i_size)
2451 			netfs_resize_file(ictx, wrend, true);
2452 	}
2453 
2454 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2455 }
2456 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2457 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2458 					bool fsuid_only)
2459 {
2460 	struct cifsFileInfo *open_file = NULL;
2461 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2462 
2463 	/* only filter by fsuid on multiuser mounts */
2464 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2465 		fsuid_only = false;
2466 
2467 	spin_lock(&cifs_inode->open_file_lock);
2468 	/* we could simply get the first_list_entry since write-only entries
2469 	   are always at the end of the list but since the first entry might
2470 	   have a close pending, we go through the whole list */
2471 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2472 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2473 			continue;
2474 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2475 			if ((!open_file->invalidHandle)) {
2476 				/* found a good file */
2477 				/* lock it so it will not be closed on us */
2478 				cifsFileInfo_get(open_file);
2479 				spin_unlock(&cifs_inode->open_file_lock);
2480 				return open_file;
2481 			} /* else might as well continue, and look for
2482 			     another, or simply have the caller reopen it
2483 			     again rather than trying to fix this handle */
2484 		} else /* write only file */
2485 			break; /* write only files are last so must be done */
2486 	}
2487 	spin_unlock(&cifs_inode->open_file_lock);
2488 	return NULL;
2489 }
2490 
2491 /* Return -EBADF if no handle is found and general rc otherwise */
2492 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2493 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2494 		       struct cifsFileInfo **ret_file)
2495 {
2496 	struct cifsFileInfo *open_file, *inv_file = NULL;
2497 	struct cifs_sb_info *cifs_sb;
2498 	bool any_available = false;
2499 	int rc = -EBADF;
2500 	unsigned int refind = 0;
2501 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2502 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2503 	*ret_file = NULL;
2504 
2505 	/*
2506 	 * Having a null inode here (because mapping->host was set to zero by
2507 	 * the VFS or MM) should not happen but we had reports of on oops (due
2508 	 * to it being zero) during stress testcases so we need to check for it
2509 	 */
2510 
2511 	if (cifs_inode == NULL) {
2512 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2513 		dump_stack();
2514 		return rc;
2515 	}
2516 
2517 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2518 
2519 	/* only filter by fsuid on multiuser mounts */
2520 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2521 		fsuid_only = false;
2522 
2523 	spin_lock(&cifs_inode->open_file_lock);
2524 refind_writable:
2525 	if (refind > MAX_REOPEN_ATT) {
2526 		spin_unlock(&cifs_inode->open_file_lock);
2527 		return rc;
2528 	}
2529 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2530 		if (!any_available && open_file->pid != current->tgid)
2531 			continue;
2532 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2533 			continue;
2534 		if (with_delete && !(open_file->fid.access & DELETE))
2535 			continue;
2536 		if ((flags & FIND_WR_NO_PENDING_DELETE) &&
2537 		    open_file->status_file_deleted)
2538 			continue;
2539 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2540 			if (!open_file->invalidHandle) {
2541 				/* found a good writable file */
2542 				cifsFileInfo_get(open_file);
2543 				spin_unlock(&cifs_inode->open_file_lock);
2544 				*ret_file = open_file;
2545 				return 0;
2546 			} else {
2547 				if (!inv_file)
2548 					inv_file = open_file;
2549 			}
2550 		}
2551 	}
2552 	/* couldn't find usable FH with same pid, try any available */
2553 	if (!any_available) {
2554 		any_available = true;
2555 		goto refind_writable;
2556 	}
2557 
2558 	if (inv_file) {
2559 		any_available = false;
2560 		cifsFileInfo_get(inv_file);
2561 	}
2562 
2563 	spin_unlock(&cifs_inode->open_file_lock);
2564 
2565 	if (inv_file) {
2566 		rc = cifs_reopen_file(inv_file, false);
2567 		if (!rc) {
2568 			*ret_file = inv_file;
2569 			return 0;
2570 		}
2571 
2572 		spin_lock(&cifs_inode->open_file_lock);
2573 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2574 		spin_unlock(&cifs_inode->open_file_lock);
2575 		cifsFileInfo_put(inv_file);
2576 		++refind;
2577 		inv_file = NULL;
2578 		spin_lock(&cifs_inode->open_file_lock);
2579 		goto refind_writable;
2580 	}
2581 
2582 	return rc;
2583 }
2584 
2585 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2586 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2587 {
2588 	struct cifsFileInfo *cfile;
2589 	int rc;
2590 
2591 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2592 	if (rc)
2593 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2594 
2595 	return cfile;
2596 }
2597 
2598 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2599 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2600 		       int flags,
2601 		       struct cifsFileInfo **ret_file)
2602 {
2603 	struct cifsFileInfo *cfile;
2604 	void *page = alloc_dentry_path();
2605 
2606 	*ret_file = NULL;
2607 
2608 	spin_lock(&tcon->open_file_lock);
2609 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2610 		struct cifsInodeInfo *cinode;
2611 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2612 		if (IS_ERR(full_path)) {
2613 			spin_unlock(&tcon->open_file_lock);
2614 			free_dentry_path(page);
2615 			return PTR_ERR(full_path);
2616 		}
2617 		if (strcmp(full_path, name))
2618 			continue;
2619 
2620 		cinode = CIFS_I(d_inode(cfile->dentry));
2621 		spin_unlock(&tcon->open_file_lock);
2622 		free_dentry_path(page);
2623 		return cifs_get_writable_file(cinode, flags, ret_file);
2624 	}
2625 
2626 	spin_unlock(&tcon->open_file_lock);
2627 	free_dentry_path(page);
2628 	return -ENOENT;
2629 }
2630 
2631 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2632 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2633 		       struct cifsFileInfo **ret_file)
2634 {
2635 	struct cifsFileInfo *cfile;
2636 	void *page = alloc_dentry_path();
2637 
2638 	*ret_file = NULL;
2639 
2640 	spin_lock(&tcon->open_file_lock);
2641 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2642 		struct cifsInodeInfo *cinode;
2643 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2644 		if (IS_ERR(full_path)) {
2645 			spin_unlock(&tcon->open_file_lock);
2646 			free_dentry_path(page);
2647 			return PTR_ERR(full_path);
2648 		}
2649 		if (strcmp(full_path, name))
2650 			continue;
2651 
2652 		cinode = CIFS_I(d_inode(cfile->dentry));
2653 		spin_unlock(&tcon->open_file_lock);
2654 		free_dentry_path(page);
2655 		*ret_file = find_readable_file(cinode, 0);
2656 		if (*ret_file) {
2657 			spin_lock(&cinode->open_file_lock);
2658 			if ((*ret_file)->status_file_deleted) {
2659 				spin_unlock(&cinode->open_file_lock);
2660 				cifsFileInfo_put(*ret_file);
2661 				*ret_file = NULL;
2662 			} else {
2663 				spin_unlock(&cinode->open_file_lock);
2664 			}
2665 		}
2666 		return *ret_file ? 0 : -ENOENT;
2667 	}
2668 
2669 	spin_unlock(&tcon->open_file_lock);
2670 	free_dentry_path(page);
2671 	return -ENOENT;
2672 }
2673 
2674 /*
2675  * Flush data on a strict file.
2676  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2677 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2678 		      int datasync)
2679 {
2680 	unsigned int xid;
2681 	int rc = 0;
2682 	struct cifs_tcon *tcon;
2683 	struct TCP_Server_Info *server;
2684 	struct cifsFileInfo *smbfile = file->private_data;
2685 	struct inode *inode = file_inode(file);
2686 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2687 
2688 	rc = file_write_and_wait_range(file, start, end);
2689 	if (rc) {
2690 		trace_cifs_fsync_err(inode->i_ino, rc);
2691 		return rc;
2692 	}
2693 
2694 	xid = get_xid();
2695 
2696 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2697 		 file, datasync);
2698 
2699 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2700 		rc = cifs_zap_mapping(inode);
2701 		if (rc) {
2702 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2703 			rc = 0; /* don't care about it in fsync */
2704 		}
2705 	}
2706 
2707 	tcon = tlink_tcon(smbfile->tlink);
2708 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2709 		server = tcon->ses->server;
2710 		if (server->ops->flush == NULL) {
2711 			rc = -ENOSYS;
2712 			goto strict_fsync_exit;
2713 		}
2714 
2715 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2716 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2717 			if (smbfile) {
2718 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2719 				cifsFileInfo_put(smbfile);
2720 			} else
2721 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2722 		} else
2723 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2724 	}
2725 
2726 strict_fsync_exit:
2727 	free_xid(xid);
2728 	return rc;
2729 }
2730 
2731 /*
2732  * Flush data on a non-strict data.
2733  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2734 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2735 {
2736 	unsigned int xid;
2737 	int rc = 0;
2738 	struct cifs_tcon *tcon;
2739 	struct TCP_Server_Info *server;
2740 	struct cifsFileInfo *smbfile = file->private_data;
2741 	struct inode *inode = file_inode(file);
2742 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2743 
2744 	rc = file_write_and_wait_range(file, start, end);
2745 	if (rc) {
2746 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2747 		return rc;
2748 	}
2749 
2750 	xid = get_xid();
2751 
2752 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2753 		 file, datasync);
2754 
2755 	tcon = tlink_tcon(smbfile->tlink);
2756 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2757 		server = tcon->ses->server;
2758 		if (server->ops->flush == NULL) {
2759 			rc = -ENOSYS;
2760 			goto fsync_exit;
2761 		}
2762 
2763 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2764 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2765 			if (smbfile) {
2766 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2767 				cifsFileInfo_put(smbfile);
2768 			} else
2769 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2770 		} else
2771 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2772 	}
2773 
2774 fsync_exit:
2775 	free_xid(xid);
2776 	return rc;
2777 }
2778 
2779 /*
2780  * As file closes, flush all cached write data for this inode checking
2781  * for write behind errors.
2782  */
cifs_flush(struct file * file,fl_owner_t id)2783 int cifs_flush(struct file *file, fl_owner_t id)
2784 {
2785 	struct inode *inode = file_inode(file);
2786 	int rc = 0;
2787 
2788 	if (file->f_mode & FMODE_WRITE)
2789 		rc = filemap_write_and_wait(inode->i_mapping);
2790 
2791 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2792 	if (rc) {
2793 		/* get more nuanced writeback errors */
2794 		rc = filemap_check_wb_err(file->f_mapping, 0);
2795 		trace_cifs_flush_err(inode->i_ino, rc);
2796 	}
2797 	return rc;
2798 }
2799 
2800 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2801 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2802 {
2803 	struct file *file = iocb->ki_filp;
2804 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2805 	struct inode *inode = file->f_mapping->host;
2806 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2807 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2808 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2809 	ssize_t rc;
2810 
2811 	rc = netfs_start_io_write(inode);
2812 	if (rc < 0)
2813 		return rc;
2814 
2815 	/*
2816 	 * We need to hold the sem to be sure nobody modifies lock list
2817 	 * with a brlock that prevents writing.
2818 	 */
2819 	down_read(&cinode->lock_sem);
2820 
2821 	rc = generic_write_checks(iocb, from);
2822 	if (rc <= 0)
2823 		goto out;
2824 
2825 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2826 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2827 				     server->vals->exclusive_lock_type, 0,
2828 				     NULL, CIFS_WRITE_OP))) {
2829 		rc = -EACCES;
2830 		goto out;
2831 	}
2832 
2833 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2834 
2835 out:
2836 	up_read(&cinode->lock_sem);
2837 	netfs_end_io_write(inode);
2838 	if (rc > 0)
2839 		rc = generic_write_sync(iocb, rc);
2840 	return rc;
2841 }
2842 
2843 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2844 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2845 {
2846 	struct inode *inode = file_inode(iocb->ki_filp);
2847 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2848 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2849 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2850 						iocb->ki_filp->private_data;
2851 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2852 	ssize_t written;
2853 
2854 	written = cifs_get_writer(cinode);
2855 	if (written)
2856 		return written;
2857 
2858 	if (CIFS_CACHE_WRITE(cinode)) {
2859 		if (cap_unix(tcon->ses) &&
2860 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2861 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2862 			written = netfs_file_write_iter(iocb, from);
2863 			goto out;
2864 		}
2865 		written = cifs_writev(iocb, from);
2866 		goto out;
2867 	}
2868 	/*
2869 	 * For non-oplocked files in strict cache mode we need to write the data
2870 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2871 	 * affected pages because it may cause a error with mandatory locks on
2872 	 * these pages but not on the region from pos to ppos+len-1.
2873 	 */
2874 	written = netfs_file_write_iter(iocb, from);
2875 	if (CIFS_CACHE_READ(cinode)) {
2876 		/*
2877 		 * We have read level caching and we have just sent a write
2878 		 * request to the server thus making data in the cache stale.
2879 		 * Zap the cache and set oplock/lease level to NONE to avoid
2880 		 * reading stale data from the cache. All subsequent read
2881 		 * operations will read new data from the server.
2882 		 */
2883 		cifs_zap_mapping(inode);
2884 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2885 			 inode);
2886 		cinode->oplock = 0;
2887 	}
2888 out:
2889 	cifs_put_writer(cinode);
2890 	return written;
2891 }
2892 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2893 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2894 {
2895 	ssize_t rc;
2896 	struct inode *inode = file_inode(iocb->ki_filp);
2897 
2898 	if (iocb->ki_flags & IOCB_DIRECT)
2899 		return netfs_unbuffered_read_iter(iocb, iter);
2900 
2901 	rc = cifs_revalidate_mapping(inode);
2902 	if (rc)
2903 		return rc;
2904 
2905 	return netfs_file_read_iter(iocb, iter);
2906 }
2907 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2908 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2909 {
2910 	struct inode *inode = file_inode(iocb->ki_filp);
2911 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2912 	ssize_t written;
2913 	int rc;
2914 
2915 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2916 		written = netfs_unbuffered_write_iter(iocb, from);
2917 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2918 			cifs_zap_mapping(inode);
2919 			cifs_dbg(FYI,
2920 				 "Set no oplock for inode=%p after a write operation\n",
2921 				 inode);
2922 			cinode->oplock = 0;
2923 		}
2924 		return written;
2925 	}
2926 
2927 	written = cifs_get_writer(cinode);
2928 	if (written)
2929 		return written;
2930 
2931 	written = netfs_file_write_iter(iocb, from);
2932 
2933 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2934 		rc = filemap_fdatawrite(inode->i_mapping);
2935 		if (rc)
2936 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2937 				 rc, inode);
2938 	}
2939 
2940 	cifs_put_writer(cinode);
2941 	return written;
2942 }
2943 
2944 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2945 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2946 {
2947 	struct inode *inode = file_inode(iocb->ki_filp);
2948 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2949 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2950 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2951 						iocb->ki_filp->private_data;
2952 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2953 	int rc = -EACCES;
2954 
2955 	/*
2956 	 * In strict cache mode we need to read from the server all the time
2957 	 * if we don't have level II oplock because the server can delay mtime
2958 	 * change - so we can't make a decision about inode invalidating.
2959 	 * And we can also fail with pagereading if there are mandatory locks
2960 	 * on pages affected by this read but not on the region from pos to
2961 	 * pos+len-1.
2962 	 */
2963 	if (!CIFS_CACHE_READ(cinode))
2964 		return netfs_unbuffered_read_iter(iocb, to);
2965 
2966 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2967 		if (iocb->ki_flags & IOCB_DIRECT)
2968 			return netfs_unbuffered_read_iter(iocb, to);
2969 		return netfs_buffered_read_iter(iocb, to);
2970 	}
2971 
2972 	/*
2973 	 * We need to hold the sem to be sure nobody modifies lock list
2974 	 * with a brlock that prevents reading.
2975 	 */
2976 	if (iocb->ki_flags & IOCB_DIRECT) {
2977 		rc = netfs_start_io_direct(inode);
2978 		if (rc < 0)
2979 			goto out;
2980 		rc = -EACCES;
2981 		down_read(&cinode->lock_sem);
2982 		if (!cifs_find_lock_conflict(
2983 			    cfile, iocb->ki_pos, iov_iter_count(to),
2984 			    tcon->ses->server->vals->shared_lock_type,
2985 			    0, NULL, CIFS_READ_OP))
2986 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2987 		up_read(&cinode->lock_sem);
2988 		netfs_end_io_direct(inode);
2989 	} else {
2990 		rc = netfs_start_io_read(inode);
2991 		if (rc < 0)
2992 			goto out;
2993 		rc = -EACCES;
2994 		down_read(&cinode->lock_sem);
2995 		if (!cifs_find_lock_conflict(
2996 			    cfile, iocb->ki_pos, iov_iter_count(to),
2997 			    tcon->ses->server->vals->shared_lock_type,
2998 			    0, NULL, CIFS_READ_OP))
2999 			rc = filemap_read(iocb, to, 0);
3000 		up_read(&cinode->lock_sem);
3001 		netfs_end_io_read(inode);
3002 	}
3003 out:
3004 	return rc;
3005 }
3006 
cifs_page_mkwrite(struct vm_fault * vmf)3007 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3008 {
3009 	return netfs_page_mkwrite(vmf, NULL);
3010 }
3011 
3012 static const struct vm_operations_struct cifs_file_vm_ops = {
3013 	.fault = filemap_fault,
3014 	.map_pages = filemap_map_pages,
3015 	.page_mkwrite = cifs_page_mkwrite,
3016 };
3017 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3018 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3019 {
3020 	int xid, rc = 0;
3021 	struct inode *inode = file_inode(desc->file);
3022 
3023 	xid = get_xid();
3024 
3025 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3026 		rc = cifs_zap_mapping(inode);
3027 	if (!rc)
3028 		rc = generic_file_mmap_prepare(desc);
3029 	if (!rc)
3030 		desc->vm_ops = &cifs_file_vm_ops;
3031 
3032 	free_xid(xid);
3033 	return rc;
3034 }
3035 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3036 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3037 {
3038 	int rc, xid;
3039 
3040 	xid = get_xid();
3041 
3042 	rc = cifs_revalidate_file(desc->file);
3043 	if (rc)
3044 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3045 			 rc);
3046 	if (!rc)
3047 		rc = generic_file_mmap_prepare(desc);
3048 	if (!rc)
3049 		desc->vm_ops = &cifs_file_vm_ops;
3050 
3051 	free_xid(xid);
3052 	return rc;
3053 }
3054 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3055 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3056 {
3057 	struct cifsFileInfo *open_file;
3058 
3059 	spin_lock(&cifs_inode->open_file_lock);
3060 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3061 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3062 			spin_unlock(&cifs_inode->open_file_lock);
3063 			return 1;
3064 		}
3065 	}
3066 	spin_unlock(&cifs_inode->open_file_lock);
3067 	return 0;
3068 }
3069 
3070 /* We do not want to update the file size from server for inodes
3071    open for write - to avoid races with writepage extending
3072    the file - in the future we could consider allowing
3073    refreshing the inode only on increases in the file size
3074    but this is tricky to do without racing with writebehind
3075    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3076 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3077 			    bool from_readdir)
3078 {
3079 	if (!cifsInode)
3080 		return true;
3081 
3082 	if (is_inode_writable(cifsInode) ||
3083 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3084 		/* This inode is open for write at least once */
3085 		struct cifs_sb_info *cifs_sb;
3086 
3087 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3088 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3089 			/* since no page cache to corrupt on directio
3090 			we can change size safely */
3091 			return true;
3092 		}
3093 
3094 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3095 			return true;
3096 
3097 		return false;
3098 	} else
3099 		return true;
3100 }
3101 
cifs_oplock_break(struct work_struct * work)3102 void cifs_oplock_break(struct work_struct *work)
3103 {
3104 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3105 						  oplock_break);
3106 	struct inode *inode = d_inode(cfile->dentry);
3107 	struct super_block *sb = inode->i_sb;
3108 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3109 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3110 	struct cifs_tcon *tcon;
3111 	struct TCP_Server_Info *server;
3112 	struct tcon_link *tlink;
3113 	int rc = 0;
3114 	bool purge_cache = false, oplock_break_cancelled;
3115 	__u64 persistent_fid, volatile_fid;
3116 	__u16 net_fid;
3117 
3118 	/*
3119 	 * Hold a reference to the superblock to prevent it and its inodes from
3120 	 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
3121 	 * may release the last reference to the sb and trigger inode eviction.
3122 	 */
3123 	cifs_sb_active(sb);
3124 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3125 			TASK_UNINTERRUPTIBLE);
3126 
3127 	tlink = cifs_sb_tlink(cifs_sb);
3128 	if (IS_ERR(tlink))
3129 		goto out;
3130 	tcon = tlink_tcon(tlink);
3131 	server = tcon->ses->server;
3132 
3133 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3134 				      cfile->oplock_epoch, &purge_cache);
3135 
3136 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3137 						cifs_has_mand_locks(cinode)) {
3138 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3139 			 inode);
3140 		cinode->oplock = 0;
3141 	}
3142 
3143 	if (S_ISREG(inode->i_mode)) {
3144 		if (CIFS_CACHE_READ(cinode))
3145 			break_lease(inode, O_RDONLY);
3146 		else
3147 			break_lease(inode, O_WRONLY);
3148 		rc = filemap_fdatawrite(inode->i_mapping);
3149 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3150 			rc = filemap_fdatawait(inode->i_mapping);
3151 			mapping_set_error(inode->i_mapping, rc);
3152 			cifs_zap_mapping(inode);
3153 		}
3154 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3155 		if (CIFS_CACHE_WRITE(cinode))
3156 			goto oplock_break_ack;
3157 	}
3158 
3159 	rc = cifs_push_locks(cfile);
3160 	if (rc)
3161 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3162 
3163 oplock_break_ack:
3164 	/*
3165 	 * When oplock break is received and there are no active
3166 	 * file handles but cached, then schedule deferred close immediately.
3167 	 * So, new open will not use cached handle.
3168 	 */
3169 
3170 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3171 		cifs_close_deferred_file(cinode);
3172 
3173 	persistent_fid = cfile->fid.persistent_fid;
3174 	volatile_fid = cfile->fid.volatile_fid;
3175 	net_fid = cfile->fid.netfid;
3176 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3177 
3178 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3179 	/*
3180 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3181 	 * an acknowledgment to be sent when the file has already been closed.
3182 	 */
3183 	spin_lock(&cinode->open_file_lock);
3184 	/* check list empty since can race with kill_sb calling tree disconnect */
3185 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3186 		spin_unlock(&cinode->open_file_lock);
3187 		rc = server->ops->oplock_response(tcon, persistent_fid,
3188 						  volatile_fid, net_fid, cinode);
3189 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3190 	} else
3191 		spin_unlock(&cinode->open_file_lock);
3192 
3193 	cifs_put_tlink(tlink);
3194 out:
3195 	cifs_done_oplock_break(cinode);
3196 	cifs_sb_deactive(sb);
3197 }
3198 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3199 static int cifs_swap_activate(struct swap_info_struct *sis,
3200 			      struct file *swap_file, sector_t *span)
3201 {
3202 	struct cifsFileInfo *cfile = swap_file->private_data;
3203 	struct inode *inode = swap_file->f_mapping->host;
3204 	unsigned long blocks;
3205 	long long isize;
3206 
3207 	cifs_dbg(FYI, "swap activate\n");
3208 
3209 	if (!swap_file->f_mapping->a_ops->swap_rw)
3210 		/* Cannot support swap */
3211 		return -EINVAL;
3212 
3213 	spin_lock(&inode->i_lock);
3214 	blocks = inode->i_blocks;
3215 	isize = inode->i_size;
3216 	spin_unlock(&inode->i_lock);
3217 	if (blocks*512 < isize) {
3218 		pr_warn("swap activate: swapfile has holes\n");
3219 		return -EINVAL;
3220 	}
3221 	*span = sis->pages;
3222 
3223 	pr_warn_once("Swap support over SMB3 is experimental\n");
3224 
3225 	/*
3226 	 * TODO: consider adding ACL (or documenting how) to prevent other
3227 	 * users (on this or other systems) from reading it
3228 	 */
3229 
3230 
3231 	/* TODO: add sk_set_memalloc(inet) or similar */
3232 
3233 	if (cfile)
3234 		cfile->swapfile = true;
3235 	/*
3236 	 * TODO: Since file already open, we can't open with DENY_ALL here
3237 	 * but we could add call to grab a byte range lock to prevent others
3238 	 * from reading or writing the file
3239 	 */
3240 
3241 	sis->flags |= SWP_FS_OPS;
3242 	return add_swap_extent(sis, 0, sis->max, 0);
3243 }
3244 
cifs_swap_deactivate(struct file * file)3245 static void cifs_swap_deactivate(struct file *file)
3246 {
3247 	struct cifsFileInfo *cfile = file->private_data;
3248 
3249 	cifs_dbg(FYI, "swap deactivate\n");
3250 
3251 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3252 
3253 	if (cfile)
3254 		cfile->swapfile = false;
3255 
3256 	/* do we need to unpin (or unlock) the file */
3257 }
3258 
3259 /**
3260  * cifs_swap_rw - SMB3 address space operation for swap I/O
3261  * @iocb: target I/O control block
3262  * @iter: I/O buffer
3263  *
3264  * Perform IO to the swap-file.  This is much like direct IO.
3265  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3266 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3267 {
3268 	ssize_t ret;
3269 
3270 	if (iov_iter_rw(iter) == READ)
3271 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3272 	else
3273 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3274 	if (ret < 0)
3275 		return ret;
3276 	return 0;
3277 }
3278 
3279 const struct address_space_operations cifs_addr_ops = {
3280 	.read_folio	= netfs_read_folio,
3281 	.readahead	= netfs_readahead,
3282 	.writepages	= netfs_writepages,
3283 	.dirty_folio	= netfs_dirty_folio,
3284 	.release_folio	= netfs_release_folio,
3285 	.direct_IO	= noop_direct_IO,
3286 	.invalidate_folio = netfs_invalidate_folio,
3287 	.migrate_folio	= filemap_migrate_folio,
3288 	/*
3289 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3290 	 * helper if needed
3291 	 */
3292 	.swap_activate	= cifs_swap_activate,
3293 	.swap_deactivate = cifs_swap_deactivate,
3294 	.swap_rw = cifs_swap_rw,
3295 };
3296 
3297 /*
3298  * cifs_readahead requires the server to support a buffer large enough to
3299  * contain the header plus one complete page of data.  Otherwise, we need
3300  * to leave cifs_readahead out of the address space operations.
3301  */
3302 const struct address_space_operations cifs_addr_ops_smallbuf = {
3303 	.read_folio	= netfs_read_folio,
3304 	.writepages	= netfs_writepages,
3305 	.dirty_folio	= netfs_dirty_folio,
3306 	.release_folio	= netfs_release_folio,
3307 	.invalidate_folio = netfs_invalidate_folio,
3308 	.migrate_folio	= filemap_migrate_folio,
3309 };
3310