xref: /linux/fs/smb/client/file.c (revision 49ac6f05ace5bb0070c68a0193aa05d3c25d4c83)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
84 	wdata->credits.rreq_debug_index = subreq->debug_index;
85 	wdata->credits.in_flight_check = 1;
86 	trace_smb3_rw_credits(wdata->rreq->debug_id,
87 			      wdata->subreq.debug_index,
88 			      wdata->credits.value,
89 			      server->credits, server->in_flight,
90 			      wdata->credits.value,
91 			      cifs_trace_rw_credits_write_prepare);
92 
93 #ifdef CONFIG_CIFS_SMB_DIRECT
94 	if (server->smbd_conn)
95 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
96 #endif
97 }
98 
99 /*
100  * Issue a subrequest to upload to the server.
101  */
102 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
103 {
104 	struct cifs_io_subrequest *wdata =
105 		container_of(subreq, struct cifs_io_subrequest, subreq);
106 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
107 	int rc;
108 
109 	if (cifs_forced_shutdown(sbi)) {
110 		rc = -EIO;
111 		goto fail;
112 	}
113 
114 	wdata->actual_len = wdata->subreq.len;
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Split the read up according to how many credits we can get for each piece.
144  * It's okay to sleep here if we need to wait for more credit to become
145  * available.
146  *
147  * We also choose the server and allocate an operation ID to be cleaned up
148  * later.
149  */
150 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
151 {
152 	struct netfs_io_request *rreq = subreq->rreq;
153 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
154 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
155 	struct TCP_Server_Info *server = req->server;
156 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
157 	size_t rsize;
158 	int rc;
159 
160 	rdata->xid = get_xid();
161 	rdata->have_xid = true;
162 	rdata->server = server;
163 
164 	if (cifs_sb->ctx->rsize == 0)
165 		cifs_sb->ctx->rsize =
166 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
167 						     cifs_sb->ctx);
168 
169 
170 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
171 					   &rsize, &rdata->credits);
172 	if (rc) {
173 		subreq->error = rc;
174 		return false;
175 	}
176 
177 	rdata->credits.in_flight_check = 1;
178 	rdata->credits.rreq_debug_id = rreq->debug_id;
179 	rdata->credits.rreq_debug_index = subreq->debug_index;
180 
181 	trace_smb3_rw_credits(rdata->rreq->debug_id,
182 			      rdata->subreq.debug_index,
183 			      rdata->credits.value,
184 			      server->credits, server->in_flight, 0,
185 			      cifs_trace_rw_credits_read_submit);
186 
187 	subreq->len = umin(subreq->len, rsize);
188 	rdata->actual_len = subreq->len;
189 
190 #ifdef CONFIG_CIFS_SMB_DIRECT
191 	if (server->smbd_conn)
192 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
193 #endif
194 	return true;
195 }
196 
197 /*
198  * Issue a read operation on behalf of the netfs helper functions.  We're asked
199  * to make a read of a certain size at a point in the file.  We are permitted
200  * to only read a portion of that, but as long as we read something, the netfs
201  * helper will call us again so that we can issue another read.
202  */
203 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
204 {
205 	struct netfs_io_request *rreq = subreq->rreq;
206 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
207 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
208 	struct TCP_Server_Info *server = req->server;
209 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
210 	int rc = 0;
211 
212 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
213 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
214 		 subreq->transferred, subreq->len);
215 
216 	if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
217 		/*
218 		 * As we're issuing a retry, we need to negotiate some new
219 		 * credits otherwise the server may reject the op with
220 		 * INVALID_PARAMETER.  Note, however, we may get back less
221 		 * credit than we need to complete the op, in which case, we
222 		 * shorten the op and rely on additional rounds of retry.
223 		 */
224 		size_t rsize = umin(subreq->len - subreq->transferred,
225 				    cifs_sb->ctx->rsize);
226 
227 		rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len,
228 						   &rdata->credits);
229 		if (rc)
230 			goto out;
231 
232 		rdata->credits.in_flight_check = 1;
233 
234 		trace_smb3_rw_credits(rdata->rreq->debug_id,
235 				      rdata->subreq.debug_index,
236 				      rdata->credits.value,
237 				      server->credits, server->in_flight, 0,
238 				      cifs_trace_rw_credits_read_resubmit);
239 	}
240 
241 	if (req->cfile->invalidHandle) {
242 		do {
243 			rc = cifs_reopen_file(req->cfile, true);
244 		} while (rc == -EAGAIN);
245 		if (rc)
246 			goto out;
247 	}
248 
249 	if (subreq->rreq->origin != NETFS_DIO_READ)
250 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
251 
252 	rc = rdata->server->ops->async_readv(rdata);
253 out:
254 	if (rc)
255 		netfs_subreq_terminated(subreq, rc, false);
256 }
257 
258 /*
259  * Writeback calls this when it finds a folio that needs uploading.  This isn't
260  * called if writeback only has copy-to-cache to deal with.
261  */
262 static void cifs_begin_writeback(struct netfs_io_request *wreq)
263 {
264 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
265 	int ret;
266 
267 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
268 	if (ret) {
269 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
270 		return;
271 	}
272 
273 	wreq->io_streams[0].avail = true;
274 }
275 
276 /*
277  * Initialise a request.
278  */
279 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
280 {
281 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
282 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
283 	struct cifsFileInfo *open_file = NULL;
284 
285 	rreq->rsize = cifs_sb->ctx->rsize;
286 	rreq->wsize = cifs_sb->ctx->wsize;
287 	req->pid = current->tgid; // Ummm...  This may be a workqueue
288 
289 	if (file) {
290 		open_file = file->private_data;
291 		rreq->netfs_priv = file->private_data;
292 		req->cfile = cifsFileInfo_get(open_file);
293 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
294 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
295 			req->pid = req->cfile->pid;
296 	} else if (rreq->origin != NETFS_WRITEBACK) {
297 		WARN_ON_ONCE(1);
298 		return -EIO;
299 	}
300 
301 	return 0;
302 }
303 
304 /*
305  * Completion of a request operation.
306  */
307 static void cifs_rreq_done(struct netfs_io_request *rreq)
308 {
309 	struct timespec64 atime, mtime;
310 	struct inode *inode = rreq->inode;
311 
312 	/* we do not want atime to be less than mtime, it broke some apps */
313 	atime = inode_set_atime_to_ts(inode, current_time(inode));
314 	mtime = inode_get_mtime(inode);
315 	if (timespec64_compare(&atime, &mtime))
316 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
317 }
318 
319 static void cifs_post_modify(struct inode *inode)
320 {
321 	/* Indication to update ctime and mtime as close is deferred */
322 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
323 }
324 
325 static void cifs_free_request(struct netfs_io_request *rreq)
326 {
327 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
328 
329 	if (req->cfile)
330 		cifsFileInfo_put(req->cfile);
331 }
332 
333 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
334 {
335 	struct cifs_io_subrequest *rdata =
336 		container_of(subreq, struct cifs_io_subrequest, subreq);
337 	int rc = subreq->error;
338 
339 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
340 #ifdef CONFIG_CIFS_SMB_DIRECT
341 		if (rdata->mr) {
342 			smbd_deregister_mr(rdata->mr);
343 			rdata->mr = NULL;
344 		}
345 #endif
346 	}
347 
348 	if (rdata->credits.value != 0) {
349 		trace_smb3_rw_credits(rdata->rreq->debug_id,
350 				      rdata->subreq.debug_index,
351 				      rdata->credits.value,
352 				      rdata->server ? rdata->server->credits : 0,
353 				      rdata->server ? rdata->server->in_flight : 0,
354 				      -rdata->credits.value,
355 				      cifs_trace_rw_credits_free_subreq);
356 		if (rdata->server)
357 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
358 		else
359 			rdata->credits.value = 0;
360 	}
361 
362 	if (rdata->have_xid)
363 		free_xid(rdata->xid);
364 }
365 
366 const struct netfs_request_ops cifs_req_ops = {
367 	.request_pool		= &cifs_io_request_pool,
368 	.subrequest_pool	= &cifs_io_subrequest_pool,
369 	.init_request		= cifs_init_request,
370 	.free_request		= cifs_free_request,
371 	.free_subrequest	= cifs_free_subrequest,
372 	.clamp_length		= cifs_clamp_length,
373 	.issue_read		= cifs_req_issue_read,
374 	.done			= cifs_rreq_done,
375 	.post_modify		= cifs_post_modify,
376 	.begin_writeback	= cifs_begin_writeback,
377 	.prepare_write		= cifs_prepare_write,
378 	.issue_write		= cifs_issue_write,
379 	.invalidate_cache	= cifs_netfs_invalidate_cache,
380 };
381 
382 /*
383  * Mark as invalid, all open files on tree connections since they
384  * were closed when session to server was lost.
385  */
386 void
387 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
388 {
389 	struct cifsFileInfo *open_file = NULL;
390 	struct list_head *tmp;
391 	struct list_head *tmp1;
392 
393 	/* only send once per connect */
394 	spin_lock(&tcon->tc_lock);
395 	if (tcon->need_reconnect)
396 		tcon->status = TID_NEED_RECON;
397 
398 	if (tcon->status != TID_NEED_RECON) {
399 		spin_unlock(&tcon->tc_lock);
400 		return;
401 	}
402 	tcon->status = TID_IN_FILES_INVALIDATE;
403 	spin_unlock(&tcon->tc_lock);
404 
405 	/* list all files open on tree connection and mark them invalid */
406 	spin_lock(&tcon->open_file_lock);
407 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
408 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
409 		open_file->invalidHandle = true;
410 		open_file->oplock_break_cancelled = true;
411 	}
412 	spin_unlock(&tcon->open_file_lock);
413 
414 	invalidate_all_cached_dirs(tcon);
415 	spin_lock(&tcon->tc_lock);
416 	if (tcon->status == TID_IN_FILES_INVALIDATE)
417 		tcon->status = TID_NEED_TCON;
418 	spin_unlock(&tcon->tc_lock);
419 
420 	/*
421 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
422 	 * to this tcon.
423 	 */
424 }
425 
426 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
427 {
428 	if ((flags & O_ACCMODE) == O_RDONLY)
429 		return GENERIC_READ;
430 	else if ((flags & O_ACCMODE) == O_WRONLY)
431 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
432 	else if ((flags & O_ACCMODE) == O_RDWR) {
433 		/* GENERIC_ALL is too much permission to request
434 		   can cause unnecessary access denied on create */
435 		/* return GENERIC_ALL; */
436 		return (GENERIC_READ | GENERIC_WRITE);
437 	}
438 
439 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
440 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
441 		FILE_READ_DATA);
442 }
443 
444 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
445 static u32 cifs_posix_convert_flags(unsigned int flags)
446 {
447 	u32 posix_flags = 0;
448 
449 	if ((flags & O_ACCMODE) == O_RDONLY)
450 		posix_flags = SMB_O_RDONLY;
451 	else if ((flags & O_ACCMODE) == O_WRONLY)
452 		posix_flags = SMB_O_WRONLY;
453 	else if ((flags & O_ACCMODE) == O_RDWR)
454 		posix_flags = SMB_O_RDWR;
455 
456 	if (flags & O_CREAT) {
457 		posix_flags |= SMB_O_CREAT;
458 		if (flags & O_EXCL)
459 			posix_flags |= SMB_O_EXCL;
460 	} else if (flags & O_EXCL)
461 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
462 			 current->comm, current->tgid);
463 
464 	if (flags & O_TRUNC)
465 		posix_flags |= SMB_O_TRUNC;
466 	/* be safe and imply O_SYNC for O_DSYNC */
467 	if (flags & O_DSYNC)
468 		posix_flags |= SMB_O_SYNC;
469 	if (flags & O_DIRECTORY)
470 		posix_flags |= SMB_O_DIRECTORY;
471 	if (flags & O_NOFOLLOW)
472 		posix_flags |= SMB_O_NOFOLLOW;
473 	if (flags & O_DIRECT)
474 		posix_flags |= SMB_O_DIRECT;
475 
476 	return posix_flags;
477 }
478 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
479 
480 static inline int cifs_get_disposition(unsigned int flags)
481 {
482 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
483 		return FILE_CREATE;
484 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
485 		return FILE_OVERWRITE_IF;
486 	else if ((flags & O_CREAT) == O_CREAT)
487 		return FILE_OPEN_IF;
488 	else if ((flags & O_TRUNC) == O_TRUNC)
489 		return FILE_OVERWRITE;
490 	else
491 		return FILE_OPEN;
492 }
493 
494 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
495 int cifs_posix_open(const char *full_path, struct inode **pinode,
496 			struct super_block *sb, int mode, unsigned int f_flags,
497 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
498 {
499 	int rc;
500 	FILE_UNIX_BASIC_INFO *presp_data;
501 	__u32 posix_flags = 0;
502 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
503 	struct cifs_fattr fattr;
504 	struct tcon_link *tlink;
505 	struct cifs_tcon *tcon;
506 
507 	cifs_dbg(FYI, "posix open %s\n", full_path);
508 
509 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
510 	if (presp_data == NULL)
511 		return -ENOMEM;
512 
513 	tlink = cifs_sb_tlink(cifs_sb);
514 	if (IS_ERR(tlink)) {
515 		rc = PTR_ERR(tlink);
516 		goto posix_open_ret;
517 	}
518 
519 	tcon = tlink_tcon(tlink);
520 	mode &= ~current_umask();
521 
522 	posix_flags = cifs_posix_convert_flags(f_flags);
523 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
524 			     poplock, full_path, cifs_sb->local_nls,
525 			     cifs_remap(cifs_sb));
526 	cifs_put_tlink(tlink);
527 
528 	if (rc)
529 		goto posix_open_ret;
530 
531 	if (presp_data->Type == cpu_to_le32(-1))
532 		goto posix_open_ret; /* open ok, caller does qpathinfo */
533 
534 	if (!pinode)
535 		goto posix_open_ret; /* caller does not need info */
536 
537 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
538 
539 	/* get new inode and set it up */
540 	if (*pinode == NULL) {
541 		cifs_fill_uniqueid(sb, &fattr);
542 		*pinode = cifs_iget(sb, &fattr);
543 		if (!*pinode) {
544 			rc = -ENOMEM;
545 			goto posix_open_ret;
546 		}
547 	} else {
548 		cifs_revalidate_mapping(*pinode);
549 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
550 	}
551 
552 posix_open_ret:
553 	kfree(presp_data);
554 	return rc;
555 }
556 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
557 
558 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
559 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
560 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
561 {
562 	int rc;
563 	int desired_access;
564 	int disposition;
565 	int create_options = CREATE_NOT_DIR;
566 	struct TCP_Server_Info *server = tcon->ses->server;
567 	struct cifs_open_parms oparms;
568 	int rdwr_for_fscache = 0;
569 
570 	if (!server->ops->open)
571 		return -ENOSYS;
572 
573 	/* If we're caching, we need to be able to fill in around partial writes. */
574 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
575 		rdwr_for_fscache = 1;
576 
577 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
578 
579 /*********************************************************************
580  *  open flag mapping table:
581  *
582  *	POSIX Flag            CIFS Disposition
583  *	----------            ----------------
584  *	O_CREAT               FILE_OPEN_IF
585  *	O_CREAT | O_EXCL      FILE_CREATE
586  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
587  *	O_TRUNC               FILE_OVERWRITE
588  *	none of the above     FILE_OPEN
589  *
590  *	Note that there is not a direct match between disposition
591  *	FILE_SUPERSEDE (ie create whether or not file exists although
592  *	O_CREAT | O_TRUNC is similar but truncates the existing
593  *	file rather than creating a new file as FILE_SUPERSEDE does
594  *	(which uses the attributes / metadata passed in on open call)
595  *?
596  *?  O_SYNC is a reasonable match to CIFS writethrough flag
597  *?  and the read write flags match reasonably.  O_LARGEFILE
598  *?  is irrelevant because largefile support is always used
599  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
600  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
601  *********************************************************************/
602 
603 	disposition = cifs_get_disposition(f_flags);
604 
605 	/* BB pass O_SYNC flag through on file attributes .. BB */
606 
607 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
608 	if (f_flags & O_SYNC)
609 		create_options |= CREATE_WRITE_THROUGH;
610 
611 	if (f_flags & O_DIRECT)
612 		create_options |= CREATE_NO_BUFFER;
613 
614 retry_open:
615 	oparms = (struct cifs_open_parms) {
616 		.tcon = tcon,
617 		.cifs_sb = cifs_sb,
618 		.desired_access = desired_access,
619 		.create_options = cifs_create_options(cifs_sb, create_options),
620 		.disposition = disposition,
621 		.path = full_path,
622 		.fid = fid,
623 	};
624 
625 	rc = server->ops->open(xid, &oparms, oplock, buf);
626 	if (rc) {
627 		if (rc == -EACCES && rdwr_for_fscache == 1) {
628 			desired_access = cifs_convert_flags(f_flags, 0);
629 			rdwr_for_fscache = 2;
630 			goto retry_open;
631 		}
632 		return rc;
633 	}
634 	if (rdwr_for_fscache == 2)
635 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
636 
637 	/* TODO: Add support for calling posix query info but with passing in fid */
638 	if (tcon->unix_ext)
639 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
640 					      xid);
641 	else
642 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
643 					 xid, fid);
644 
645 	if (rc) {
646 		server->ops->close(xid, tcon, fid);
647 		if (rc == -ESTALE)
648 			rc = -EOPENSTALE;
649 	}
650 
651 	return rc;
652 }
653 
654 static bool
655 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
656 {
657 	struct cifs_fid_locks *cur;
658 	bool has_locks = false;
659 
660 	down_read(&cinode->lock_sem);
661 	list_for_each_entry(cur, &cinode->llist, llist) {
662 		if (!list_empty(&cur->locks)) {
663 			has_locks = true;
664 			break;
665 		}
666 	}
667 	up_read(&cinode->lock_sem);
668 	return has_locks;
669 }
670 
671 void
672 cifs_down_write(struct rw_semaphore *sem)
673 {
674 	while (!down_write_trylock(sem))
675 		msleep(10);
676 }
677 
678 static void cifsFileInfo_put_work(struct work_struct *work);
679 void serverclose_work(struct work_struct *work);
680 
681 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
682 				       struct tcon_link *tlink, __u32 oplock,
683 				       const char *symlink_target)
684 {
685 	struct dentry *dentry = file_dentry(file);
686 	struct inode *inode = d_inode(dentry);
687 	struct cifsInodeInfo *cinode = CIFS_I(inode);
688 	struct cifsFileInfo *cfile;
689 	struct cifs_fid_locks *fdlocks;
690 	struct cifs_tcon *tcon = tlink_tcon(tlink);
691 	struct TCP_Server_Info *server = tcon->ses->server;
692 
693 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
694 	if (cfile == NULL)
695 		return cfile;
696 
697 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
698 	if (!fdlocks) {
699 		kfree(cfile);
700 		return NULL;
701 	}
702 
703 	if (symlink_target) {
704 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
705 		if (!cfile->symlink_target) {
706 			kfree(fdlocks);
707 			kfree(cfile);
708 			return NULL;
709 		}
710 	}
711 
712 	INIT_LIST_HEAD(&fdlocks->locks);
713 	fdlocks->cfile = cfile;
714 	cfile->llist = fdlocks;
715 
716 	cfile->count = 1;
717 	cfile->pid = current->tgid;
718 	cfile->uid = current_fsuid();
719 	cfile->dentry = dget(dentry);
720 	cfile->f_flags = file->f_flags;
721 	cfile->invalidHandle = false;
722 	cfile->deferred_close_scheduled = false;
723 	cfile->tlink = cifs_get_tlink(tlink);
724 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
725 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
726 	INIT_WORK(&cfile->serverclose, serverclose_work);
727 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
728 	mutex_init(&cfile->fh_mutex);
729 	spin_lock_init(&cfile->file_info_lock);
730 
731 	cifs_sb_active(inode->i_sb);
732 
733 	/*
734 	 * If the server returned a read oplock and we have mandatory brlocks,
735 	 * set oplock level to None.
736 	 */
737 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
738 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
739 		oplock = 0;
740 	}
741 
742 	cifs_down_write(&cinode->lock_sem);
743 	list_add(&fdlocks->llist, &cinode->llist);
744 	up_write(&cinode->lock_sem);
745 
746 	spin_lock(&tcon->open_file_lock);
747 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
748 		oplock = fid->pending_open->oplock;
749 	list_del(&fid->pending_open->olist);
750 
751 	fid->purge_cache = false;
752 	server->ops->set_fid(cfile, fid, oplock);
753 
754 	list_add(&cfile->tlist, &tcon->openFileList);
755 	atomic_inc(&tcon->num_local_opens);
756 
757 	/* if readable file instance put first in list*/
758 	spin_lock(&cinode->open_file_lock);
759 	if (file->f_mode & FMODE_READ)
760 		list_add(&cfile->flist, &cinode->openFileList);
761 	else
762 		list_add_tail(&cfile->flist, &cinode->openFileList);
763 	spin_unlock(&cinode->open_file_lock);
764 	spin_unlock(&tcon->open_file_lock);
765 
766 	if (fid->purge_cache)
767 		cifs_zap_mapping(inode);
768 
769 	file->private_data = cfile;
770 	return cfile;
771 }
772 
773 struct cifsFileInfo *
774 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
775 {
776 	spin_lock(&cifs_file->file_info_lock);
777 	cifsFileInfo_get_locked(cifs_file);
778 	spin_unlock(&cifs_file->file_info_lock);
779 	return cifs_file;
780 }
781 
782 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
783 {
784 	struct inode *inode = d_inode(cifs_file->dentry);
785 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
786 	struct cifsLockInfo *li, *tmp;
787 	struct super_block *sb = inode->i_sb;
788 
789 	/*
790 	 * Delete any outstanding lock records. We'll lose them when the file
791 	 * is closed anyway.
792 	 */
793 	cifs_down_write(&cifsi->lock_sem);
794 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
795 		list_del(&li->llist);
796 		cifs_del_lock_waiters(li);
797 		kfree(li);
798 	}
799 	list_del(&cifs_file->llist->llist);
800 	kfree(cifs_file->llist);
801 	up_write(&cifsi->lock_sem);
802 
803 	cifs_put_tlink(cifs_file->tlink);
804 	dput(cifs_file->dentry);
805 	cifs_sb_deactive(sb);
806 	kfree(cifs_file->symlink_target);
807 	kfree(cifs_file);
808 }
809 
810 static void cifsFileInfo_put_work(struct work_struct *work)
811 {
812 	struct cifsFileInfo *cifs_file = container_of(work,
813 			struct cifsFileInfo, put);
814 
815 	cifsFileInfo_put_final(cifs_file);
816 }
817 
818 void serverclose_work(struct work_struct *work)
819 {
820 	struct cifsFileInfo *cifs_file = container_of(work,
821 			struct cifsFileInfo, serverclose);
822 
823 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
824 
825 	struct TCP_Server_Info *server = tcon->ses->server;
826 	int rc = 0;
827 	int retries = 0;
828 	int MAX_RETRIES = 4;
829 
830 	do {
831 		if (server->ops->close_getattr)
832 			rc = server->ops->close_getattr(0, tcon, cifs_file);
833 		else if (server->ops->close)
834 			rc = server->ops->close(0, tcon, &cifs_file->fid);
835 
836 		if (rc == -EBUSY || rc == -EAGAIN) {
837 			retries++;
838 			msleep(250);
839 		}
840 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
841 	);
842 
843 	if (retries == MAX_RETRIES)
844 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
845 
846 	if (cifs_file->offload)
847 		queue_work(fileinfo_put_wq, &cifs_file->put);
848 	else
849 		cifsFileInfo_put_final(cifs_file);
850 }
851 
852 /**
853  * cifsFileInfo_put - release a reference of file priv data
854  *
855  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
856  *
857  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
858  */
859 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
860 {
861 	_cifsFileInfo_put(cifs_file, true, true);
862 }
863 
864 /**
865  * _cifsFileInfo_put - release a reference of file priv data
866  *
867  * This may involve closing the filehandle @cifs_file out on the
868  * server. Must be called without holding tcon->open_file_lock,
869  * cinode->open_file_lock and cifs_file->file_info_lock.
870  *
871  * If @wait_for_oplock_handler is true and we are releasing the last
872  * reference, wait for any running oplock break handler of the file
873  * and cancel any pending one.
874  *
875  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
876  * @wait_oplock_handler: must be false if called from oplock_break_handler
877  * @offload:	not offloaded on close and oplock breaks
878  *
879  */
880 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
881 		       bool wait_oplock_handler, bool offload)
882 {
883 	struct inode *inode = d_inode(cifs_file->dentry);
884 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
885 	struct TCP_Server_Info *server = tcon->ses->server;
886 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
887 	struct super_block *sb = inode->i_sb;
888 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
889 	struct cifs_fid fid = {};
890 	struct cifs_pending_open open;
891 	bool oplock_break_cancelled;
892 	bool serverclose_offloaded = false;
893 
894 	spin_lock(&tcon->open_file_lock);
895 	spin_lock(&cifsi->open_file_lock);
896 	spin_lock(&cifs_file->file_info_lock);
897 
898 	cifs_file->offload = offload;
899 	if (--cifs_file->count > 0) {
900 		spin_unlock(&cifs_file->file_info_lock);
901 		spin_unlock(&cifsi->open_file_lock);
902 		spin_unlock(&tcon->open_file_lock);
903 		return;
904 	}
905 	spin_unlock(&cifs_file->file_info_lock);
906 
907 	if (server->ops->get_lease_key)
908 		server->ops->get_lease_key(inode, &fid);
909 
910 	/* store open in pending opens to make sure we don't miss lease break */
911 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
912 
913 	/* remove it from the lists */
914 	list_del(&cifs_file->flist);
915 	list_del(&cifs_file->tlist);
916 	atomic_dec(&tcon->num_local_opens);
917 
918 	if (list_empty(&cifsi->openFileList)) {
919 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
920 			 d_inode(cifs_file->dentry));
921 		/*
922 		 * In strict cache mode we need invalidate mapping on the last
923 		 * close  because it may cause a error when we open this file
924 		 * again and get at least level II oplock.
925 		 */
926 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
927 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
928 		cifs_set_oplock_level(cifsi, 0);
929 	}
930 
931 	spin_unlock(&cifsi->open_file_lock);
932 	spin_unlock(&tcon->open_file_lock);
933 
934 	oplock_break_cancelled = wait_oplock_handler ?
935 		cancel_work_sync(&cifs_file->oplock_break) : false;
936 
937 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
938 		struct TCP_Server_Info *server = tcon->ses->server;
939 		unsigned int xid;
940 		int rc = 0;
941 
942 		xid = get_xid();
943 		if (server->ops->close_getattr)
944 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
945 		else if (server->ops->close)
946 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
947 		_free_xid(xid);
948 
949 		if (rc == -EBUSY || rc == -EAGAIN) {
950 			// Server close failed, hence offloading it as an async op
951 			queue_work(serverclose_wq, &cifs_file->serverclose);
952 			serverclose_offloaded = true;
953 		}
954 	}
955 
956 	if (oplock_break_cancelled)
957 		cifs_done_oplock_break(cifsi);
958 
959 	cifs_del_pending_open(&open);
960 
961 	// if serverclose has been offloaded to wq (on failure), it will
962 	// handle offloading put as well. If serverclose not offloaded,
963 	// we need to handle offloading put here.
964 	if (!serverclose_offloaded) {
965 		if (offload)
966 			queue_work(fileinfo_put_wq, &cifs_file->put);
967 		else
968 			cifsFileInfo_put_final(cifs_file);
969 	}
970 }
971 
972 int cifs_open(struct inode *inode, struct file *file)
973 
974 {
975 	int rc = -EACCES;
976 	unsigned int xid;
977 	__u32 oplock;
978 	struct cifs_sb_info *cifs_sb;
979 	struct TCP_Server_Info *server;
980 	struct cifs_tcon *tcon;
981 	struct tcon_link *tlink;
982 	struct cifsFileInfo *cfile = NULL;
983 	void *page;
984 	const char *full_path;
985 	bool posix_open_ok = false;
986 	struct cifs_fid fid = {};
987 	struct cifs_pending_open open;
988 	struct cifs_open_info_data data = {};
989 
990 	xid = get_xid();
991 
992 	cifs_sb = CIFS_SB(inode->i_sb);
993 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
994 		free_xid(xid);
995 		return -EIO;
996 	}
997 
998 	tlink = cifs_sb_tlink(cifs_sb);
999 	if (IS_ERR(tlink)) {
1000 		free_xid(xid);
1001 		return PTR_ERR(tlink);
1002 	}
1003 	tcon = tlink_tcon(tlink);
1004 	server = tcon->ses->server;
1005 
1006 	page = alloc_dentry_path();
1007 	full_path = build_path_from_dentry(file_dentry(file), page);
1008 	if (IS_ERR(full_path)) {
1009 		rc = PTR_ERR(full_path);
1010 		goto out;
1011 	}
1012 
1013 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1014 		 inode, file->f_flags, full_path);
1015 
1016 	if (file->f_flags & O_DIRECT &&
1017 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
1018 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
1019 			file->f_op = &cifs_file_direct_nobrl_ops;
1020 		else
1021 			file->f_op = &cifs_file_direct_ops;
1022 	}
1023 
1024 	/* Get the cached handle as SMB2 close is deferred */
1025 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
1026 	if (rc == 0) {
1027 		if (file->f_flags == cfile->f_flags) {
1028 			file->private_data = cfile;
1029 			spin_lock(&CIFS_I(inode)->deferred_lock);
1030 			cifs_del_deferred_close(cfile);
1031 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1032 			goto use_cache;
1033 		} else {
1034 			_cifsFileInfo_put(cfile, true, false);
1035 		}
1036 	}
1037 
1038 	if (server->oplocks)
1039 		oplock = REQ_OPLOCK;
1040 	else
1041 		oplock = 0;
1042 
1043 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1044 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1045 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1046 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1047 		/* can not refresh inode info since size could be stale */
1048 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1049 				cifs_sb->ctx->file_mode /* ignored */,
1050 				file->f_flags, &oplock, &fid.netfid, xid);
1051 		if (rc == 0) {
1052 			cifs_dbg(FYI, "posix open succeeded\n");
1053 			posix_open_ok = true;
1054 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1055 			if (tcon->ses->serverNOS)
1056 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1057 					 tcon->ses->ip_addr,
1058 					 tcon->ses->serverNOS);
1059 			tcon->broken_posix_open = true;
1060 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1061 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1062 			goto out;
1063 		/*
1064 		 * Else fallthrough to retry open the old way on network i/o
1065 		 * or DFS errors.
1066 		 */
1067 	}
1068 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1069 
1070 	if (server->ops->get_lease_key)
1071 		server->ops->get_lease_key(inode, &fid);
1072 
1073 	cifs_add_pending_open(&fid, tlink, &open);
1074 
1075 	if (!posix_open_ok) {
1076 		if (server->ops->get_lease_key)
1077 			server->ops->get_lease_key(inode, &fid);
1078 
1079 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1080 				  xid, &data);
1081 		if (rc) {
1082 			cifs_del_pending_open(&open);
1083 			goto out;
1084 		}
1085 	}
1086 
1087 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1088 	if (cfile == NULL) {
1089 		if (server->ops->close)
1090 			server->ops->close(xid, tcon, &fid);
1091 		cifs_del_pending_open(&open);
1092 		rc = -ENOMEM;
1093 		goto out;
1094 	}
1095 
1096 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1097 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1098 		/*
1099 		 * Time to set mode which we can not set earlier due to
1100 		 * problems creating new read-only files.
1101 		 */
1102 		struct cifs_unix_set_info_args args = {
1103 			.mode	= inode->i_mode,
1104 			.uid	= INVALID_UID, /* no change */
1105 			.gid	= INVALID_GID, /* no change */
1106 			.ctime	= NO_CHANGE_64,
1107 			.atime	= NO_CHANGE_64,
1108 			.mtime	= NO_CHANGE_64,
1109 			.device	= 0,
1110 		};
1111 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1112 				       cfile->pid);
1113 	}
1114 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1115 
1116 use_cache:
1117 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1118 			   file->f_mode & FMODE_WRITE);
1119 	if (!(file->f_flags & O_DIRECT))
1120 		goto out;
1121 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1122 		goto out;
1123 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1124 
1125 out:
1126 	free_dentry_path(page);
1127 	free_xid(xid);
1128 	cifs_put_tlink(tlink);
1129 	cifs_free_open_info(&data);
1130 	return rc;
1131 }
1132 
1133 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1134 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1135 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1136 
1137 /*
1138  * Try to reacquire byte range locks that were released when session
1139  * to server was lost.
1140  */
1141 static int
1142 cifs_relock_file(struct cifsFileInfo *cfile)
1143 {
1144 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1145 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1146 	int rc = 0;
1147 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1148 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1149 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1150 
1151 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1152 	if (cinode->can_cache_brlcks) {
1153 		/* can cache locks - no need to relock */
1154 		up_read(&cinode->lock_sem);
1155 		return rc;
1156 	}
1157 
1158 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1159 	if (cap_unix(tcon->ses) &&
1160 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1161 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1162 		rc = cifs_push_posix_locks(cfile);
1163 	else
1164 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1165 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1166 
1167 	up_read(&cinode->lock_sem);
1168 	return rc;
1169 }
1170 
1171 static int
1172 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1173 {
1174 	int rc = -EACCES;
1175 	unsigned int xid;
1176 	__u32 oplock;
1177 	struct cifs_sb_info *cifs_sb;
1178 	struct cifs_tcon *tcon;
1179 	struct TCP_Server_Info *server;
1180 	struct cifsInodeInfo *cinode;
1181 	struct inode *inode;
1182 	void *page;
1183 	const char *full_path;
1184 	int desired_access;
1185 	int disposition = FILE_OPEN;
1186 	int create_options = CREATE_NOT_DIR;
1187 	struct cifs_open_parms oparms;
1188 	int rdwr_for_fscache = 0;
1189 
1190 	xid = get_xid();
1191 	mutex_lock(&cfile->fh_mutex);
1192 	if (!cfile->invalidHandle) {
1193 		mutex_unlock(&cfile->fh_mutex);
1194 		free_xid(xid);
1195 		return 0;
1196 	}
1197 
1198 	inode = d_inode(cfile->dentry);
1199 	cifs_sb = CIFS_SB(inode->i_sb);
1200 	tcon = tlink_tcon(cfile->tlink);
1201 	server = tcon->ses->server;
1202 
1203 	/*
1204 	 * Can not grab rename sem here because various ops, including those
1205 	 * that already have the rename sem can end up causing writepage to get
1206 	 * called and if the server was down that means we end up here, and we
1207 	 * can never tell if the caller already has the rename_sem.
1208 	 */
1209 	page = alloc_dentry_path();
1210 	full_path = build_path_from_dentry(cfile->dentry, page);
1211 	if (IS_ERR(full_path)) {
1212 		mutex_unlock(&cfile->fh_mutex);
1213 		free_dentry_path(page);
1214 		free_xid(xid);
1215 		return PTR_ERR(full_path);
1216 	}
1217 
1218 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1219 		 inode, cfile->f_flags, full_path);
1220 
1221 	if (tcon->ses->server->oplocks)
1222 		oplock = REQ_OPLOCK;
1223 	else
1224 		oplock = 0;
1225 
1226 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1227 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1228 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1229 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1230 		/*
1231 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1232 		 * original open. Must mask them off for a reopen.
1233 		 */
1234 		unsigned int oflags = cfile->f_flags &
1235 						~(O_CREAT | O_EXCL | O_TRUNC);
1236 
1237 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1238 				     cifs_sb->ctx->file_mode /* ignored */,
1239 				     oflags, &oplock, &cfile->fid.netfid, xid);
1240 		if (rc == 0) {
1241 			cifs_dbg(FYI, "posix reopen succeeded\n");
1242 			oparms.reconnect = true;
1243 			goto reopen_success;
1244 		}
1245 		/*
1246 		 * fallthrough to retry open the old way on errors, especially
1247 		 * in the reconnect path it is important to retry hard
1248 		 */
1249 	}
1250 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1251 
1252 	/* If we're caching, we need to be able to fill in around partial writes. */
1253 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1254 		rdwr_for_fscache = 1;
1255 
1256 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1257 
1258 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1259 	if (cfile->f_flags & O_SYNC)
1260 		create_options |= CREATE_WRITE_THROUGH;
1261 
1262 	if (cfile->f_flags & O_DIRECT)
1263 		create_options |= CREATE_NO_BUFFER;
1264 
1265 	if (server->ops->get_lease_key)
1266 		server->ops->get_lease_key(inode, &cfile->fid);
1267 
1268 retry_open:
1269 	oparms = (struct cifs_open_parms) {
1270 		.tcon = tcon,
1271 		.cifs_sb = cifs_sb,
1272 		.desired_access = desired_access,
1273 		.create_options = cifs_create_options(cifs_sb, create_options),
1274 		.disposition = disposition,
1275 		.path = full_path,
1276 		.fid = &cfile->fid,
1277 		.reconnect = true,
1278 	};
1279 
1280 	/*
1281 	 * Can not refresh inode by passing in file_info buf to be returned by
1282 	 * ops->open and then calling get_inode_info with returned buf since
1283 	 * file might have write behind data that needs to be flushed and server
1284 	 * version of file size can be stale. If we knew for sure that inode was
1285 	 * not dirty locally we could do this.
1286 	 */
1287 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1288 	if (rc == -ENOENT && oparms.reconnect == false) {
1289 		/* durable handle timeout is expired - open the file again */
1290 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1291 		/* indicate that we need to relock the file */
1292 		oparms.reconnect = true;
1293 	}
1294 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1295 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1296 		rdwr_for_fscache = 2;
1297 		goto retry_open;
1298 	}
1299 
1300 	if (rc) {
1301 		mutex_unlock(&cfile->fh_mutex);
1302 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1303 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1304 		goto reopen_error_exit;
1305 	}
1306 
1307 	if (rdwr_for_fscache == 2)
1308 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1309 
1310 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1311 reopen_success:
1312 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1313 	cfile->invalidHandle = false;
1314 	mutex_unlock(&cfile->fh_mutex);
1315 	cinode = CIFS_I(inode);
1316 
1317 	if (can_flush) {
1318 		rc = filemap_write_and_wait(inode->i_mapping);
1319 		if (!is_interrupt_error(rc))
1320 			mapping_set_error(inode->i_mapping, rc);
1321 
1322 		if (tcon->posix_extensions) {
1323 			rc = smb311_posix_get_inode_info(&inode, full_path,
1324 							 NULL, inode->i_sb, xid);
1325 		} else if (tcon->unix_ext) {
1326 			rc = cifs_get_inode_info_unix(&inode, full_path,
1327 						      inode->i_sb, xid);
1328 		} else {
1329 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1330 						 inode->i_sb, xid, NULL);
1331 		}
1332 	}
1333 	/*
1334 	 * Else we are writing out data to server already and could deadlock if
1335 	 * we tried to flush data, and since we do not know if we have data that
1336 	 * would invalidate the current end of file on the server we can not go
1337 	 * to the server to get the new inode info.
1338 	 */
1339 
1340 	/*
1341 	 * If the server returned a read oplock and we have mandatory brlocks,
1342 	 * set oplock level to None.
1343 	 */
1344 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1345 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1346 		oplock = 0;
1347 	}
1348 
1349 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1350 	if (oparms.reconnect)
1351 		cifs_relock_file(cfile);
1352 
1353 reopen_error_exit:
1354 	free_dentry_path(page);
1355 	free_xid(xid);
1356 	return rc;
1357 }
1358 
1359 void smb2_deferred_work_close(struct work_struct *work)
1360 {
1361 	struct cifsFileInfo *cfile = container_of(work,
1362 			struct cifsFileInfo, deferred.work);
1363 
1364 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1365 	cifs_del_deferred_close(cfile);
1366 	cfile->deferred_close_scheduled = false;
1367 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1368 	_cifsFileInfo_put(cfile, true, false);
1369 }
1370 
1371 static bool
1372 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1373 {
1374 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1375 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1376 
1377 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1378 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1379 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1380 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1381 
1382 }
1383 
1384 int cifs_close(struct inode *inode, struct file *file)
1385 {
1386 	struct cifsFileInfo *cfile;
1387 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1388 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1389 	struct cifs_deferred_close *dclose;
1390 
1391 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1392 
1393 	if (file->private_data != NULL) {
1394 		cfile = file->private_data;
1395 		file->private_data = NULL;
1396 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1397 		if ((cfile->status_file_deleted == false) &&
1398 		    (smb2_can_defer_close(inode, dclose))) {
1399 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1400 				inode_set_mtime_to_ts(inode,
1401 						      inode_set_ctime_current(inode));
1402 			}
1403 			spin_lock(&cinode->deferred_lock);
1404 			cifs_add_deferred_close(cfile, dclose);
1405 			if (cfile->deferred_close_scheduled &&
1406 			    delayed_work_pending(&cfile->deferred)) {
1407 				/*
1408 				 * If there is no pending work, mod_delayed_work queues new work.
1409 				 * So, Increase the ref count to avoid use-after-free.
1410 				 */
1411 				if (!mod_delayed_work(deferredclose_wq,
1412 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1413 					cifsFileInfo_get(cfile);
1414 			} else {
1415 				/* Deferred close for files */
1416 				queue_delayed_work(deferredclose_wq,
1417 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1418 				cfile->deferred_close_scheduled = true;
1419 				spin_unlock(&cinode->deferred_lock);
1420 				return 0;
1421 			}
1422 			spin_unlock(&cinode->deferred_lock);
1423 			_cifsFileInfo_put(cfile, true, false);
1424 		} else {
1425 			_cifsFileInfo_put(cfile, true, false);
1426 			kfree(dclose);
1427 		}
1428 	}
1429 
1430 	/* return code from the ->release op is always ignored */
1431 	return 0;
1432 }
1433 
1434 void
1435 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1436 {
1437 	struct cifsFileInfo *open_file, *tmp;
1438 	struct list_head tmp_list;
1439 
1440 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1441 		return;
1442 
1443 	tcon->need_reopen_files = false;
1444 
1445 	cifs_dbg(FYI, "Reopen persistent handles\n");
1446 	INIT_LIST_HEAD(&tmp_list);
1447 
1448 	/* list all files open on tree connection, reopen resilient handles  */
1449 	spin_lock(&tcon->open_file_lock);
1450 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1451 		if (!open_file->invalidHandle)
1452 			continue;
1453 		cifsFileInfo_get(open_file);
1454 		list_add_tail(&open_file->rlist, &tmp_list);
1455 	}
1456 	spin_unlock(&tcon->open_file_lock);
1457 
1458 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1459 		if (cifs_reopen_file(open_file, false /* do not flush */))
1460 			tcon->need_reopen_files = true;
1461 		list_del_init(&open_file->rlist);
1462 		cifsFileInfo_put(open_file);
1463 	}
1464 }
1465 
1466 int cifs_closedir(struct inode *inode, struct file *file)
1467 {
1468 	int rc = 0;
1469 	unsigned int xid;
1470 	struct cifsFileInfo *cfile = file->private_data;
1471 	struct cifs_tcon *tcon;
1472 	struct TCP_Server_Info *server;
1473 	char *buf;
1474 
1475 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1476 
1477 	if (cfile == NULL)
1478 		return rc;
1479 
1480 	xid = get_xid();
1481 	tcon = tlink_tcon(cfile->tlink);
1482 	server = tcon->ses->server;
1483 
1484 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1485 	spin_lock(&cfile->file_info_lock);
1486 	if (server->ops->dir_needs_close(cfile)) {
1487 		cfile->invalidHandle = true;
1488 		spin_unlock(&cfile->file_info_lock);
1489 		if (server->ops->close_dir)
1490 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1491 		else
1492 			rc = -ENOSYS;
1493 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1494 		/* not much we can do if it fails anyway, ignore rc */
1495 		rc = 0;
1496 	} else
1497 		spin_unlock(&cfile->file_info_lock);
1498 
1499 	buf = cfile->srch_inf.ntwrk_buf_start;
1500 	if (buf) {
1501 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1502 		cfile->srch_inf.ntwrk_buf_start = NULL;
1503 		if (cfile->srch_inf.smallBuf)
1504 			cifs_small_buf_release(buf);
1505 		else
1506 			cifs_buf_release(buf);
1507 	}
1508 
1509 	cifs_put_tlink(cfile->tlink);
1510 	kfree(file->private_data);
1511 	file->private_data = NULL;
1512 	/* BB can we lock the filestruct while this is going on? */
1513 	free_xid(xid);
1514 	return rc;
1515 }
1516 
1517 static struct cifsLockInfo *
1518 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1519 {
1520 	struct cifsLockInfo *lock =
1521 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1522 	if (!lock)
1523 		return lock;
1524 	lock->offset = offset;
1525 	lock->length = length;
1526 	lock->type = type;
1527 	lock->pid = current->tgid;
1528 	lock->flags = flags;
1529 	INIT_LIST_HEAD(&lock->blist);
1530 	init_waitqueue_head(&lock->block_q);
1531 	return lock;
1532 }
1533 
1534 void
1535 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1536 {
1537 	struct cifsLockInfo *li, *tmp;
1538 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1539 		list_del_init(&li->blist);
1540 		wake_up(&li->block_q);
1541 	}
1542 }
1543 
1544 #define CIFS_LOCK_OP	0
1545 #define CIFS_READ_OP	1
1546 #define CIFS_WRITE_OP	2
1547 
1548 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1549 static bool
1550 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1551 			    __u64 length, __u8 type, __u16 flags,
1552 			    struct cifsFileInfo *cfile,
1553 			    struct cifsLockInfo **conf_lock, int rw_check)
1554 {
1555 	struct cifsLockInfo *li;
1556 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1557 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1558 
1559 	list_for_each_entry(li, &fdlocks->locks, llist) {
1560 		if (offset + length <= li->offset ||
1561 		    offset >= li->offset + li->length)
1562 			continue;
1563 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1564 		    server->ops->compare_fids(cfile, cur_cfile)) {
1565 			/* shared lock prevents write op through the same fid */
1566 			if (!(li->type & server->vals->shared_lock_type) ||
1567 			    rw_check != CIFS_WRITE_OP)
1568 				continue;
1569 		}
1570 		if ((type & server->vals->shared_lock_type) &&
1571 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1572 		     current->tgid == li->pid) || type == li->type))
1573 			continue;
1574 		if (rw_check == CIFS_LOCK_OP &&
1575 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1576 		    server->ops->compare_fids(cfile, cur_cfile))
1577 			continue;
1578 		if (conf_lock)
1579 			*conf_lock = li;
1580 		return true;
1581 	}
1582 	return false;
1583 }
1584 
1585 bool
1586 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1587 			__u8 type, __u16 flags,
1588 			struct cifsLockInfo **conf_lock, int rw_check)
1589 {
1590 	bool rc = false;
1591 	struct cifs_fid_locks *cur;
1592 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1593 
1594 	list_for_each_entry(cur, &cinode->llist, llist) {
1595 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1596 						 flags, cfile, conf_lock,
1597 						 rw_check);
1598 		if (rc)
1599 			break;
1600 	}
1601 
1602 	return rc;
1603 }
1604 
1605 /*
1606  * Check if there is another lock that prevents us to set the lock (mandatory
1607  * style). If such a lock exists, update the flock structure with its
1608  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1609  * or leave it the same if we can't. Returns 0 if we don't need to request to
1610  * the server or 1 otherwise.
1611  */
1612 static int
1613 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1614 	       __u8 type, struct file_lock *flock)
1615 {
1616 	int rc = 0;
1617 	struct cifsLockInfo *conf_lock;
1618 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1619 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1620 	bool exist;
1621 
1622 	down_read(&cinode->lock_sem);
1623 
1624 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1625 					flock->c.flc_flags, &conf_lock,
1626 					CIFS_LOCK_OP);
1627 	if (exist) {
1628 		flock->fl_start = conf_lock->offset;
1629 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1630 		flock->c.flc_pid = conf_lock->pid;
1631 		if (conf_lock->type & server->vals->shared_lock_type)
1632 			flock->c.flc_type = F_RDLCK;
1633 		else
1634 			flock->c.flc_type = F_WRLCK;
1635 	} else if (!cinode->can_cache_brlcks)
1636 		rc = 1;
1637 	else
1638 		flock->c.flc_type = F_UNLCK;
1639 
1640 	up_read(&cinode->lock_sem);
1641 	return rc;
1642 }
1643 
1644 static void
1645 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1646 {
1647 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1648 	cifs_down_write(&cinode->lock_sem);
1649 	list_add_tail(&lock->llist, &cfile->llist->locks);
1650 	up_write(&cinode->lock_sem);
1651 }
1652 
1653 /*
1654  * Set the byte-range lock (mandatory style). Returns:
1655  * 1) 0, if we set the lock and don't need to request to the server;
1656  * 2) 1, if no locks prevent us but we need to request to the server;
1657  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1658  */
1659 static int
1660 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1661 		 bool wait)
1662 {
1663 	struct cifsLockInfo *conf_lock;
1664 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1665 	bool exist;
1666 	int rc = 0;
1667 
1668 try_again:
1669 	exist = false;
1670 	cifs_down_write(&cinode->lock_sem);
1671 
1672 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1673 					lock->type, lock->flags, &conf_lock,
1674 					CIFS_LOCK_OP);
1675 	if (!exist && cinode->can_cache_brlcks) {
1676 		list_add_tail(&lock->llist, &cfile->llist->locks);
1677 		up_write(&cinode->lock_sem);
1678 		return rc;
1679 	}
1680 
1681 	if (!exist)
1682 		rc = 1;
1683 	else if (!wait)
1684 		rc = -EACCES;
1685 	else {
1686 		list_add_tail(&lock->blist, &conf_lock->blist);
1687 		up_write(&cinode->lock_sem);
1688 		rc = wait_event_interruptible(lock->block_q,
1689 					(lock->blist.prev == &lock->blist) &&
1690 					(lock->blist.next == &lock->blist));
1691 		if (!rc)
1692 			goto try_again;
1693 		cifs_down_write(&cinode->lock_sem);
1694 		list_del_init(&lock->blist);
1695 	}
1696 
1697 	up_write(&cinode->lock_sem);
1698 	return rc;
1699 }
1700 
1701 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1702 /*
1703  * Check if there is another lock that prevents us to set the lock (posix
1704  * style). If such a lock exists, update the flock structure with its
1705  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1706  * or leave it the same if we can't. Returns 0 if we don't need to request to
1707  * the server or 1 otherwise.
1708  */
1709 static int
1710 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1711 {
1712 	int rc = 0;
1713 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1714 	unsigned char saved_type = flock->c.flc_type;
1715 
1716 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1717 		return 1;
1718 
1719 	down_read(&cinode->lock_sem);
1720 	posix_test_lock(file, flock);
1721 
1722 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1723 		flock->c.flc_type = saved_type;
1724 		rc = 1;
1725 	}
1726 
1727 	up_read(&cinode->lock_sem);
1728 	return rc;
1729 }
1730 
1731 /*
1732  * Set the byte-range lock (posix style). Returns:
1733  * 1) <0, if the error occurs while setting the lock;
1734  * 2) 0, if we set the lock and don't need to request to the server;
1735  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1736  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1737  */
1738 static int
1739 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1740 {
1741 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1742 	int rc = FILE_LOCK_DEFERRED + 1;
1743 
1744 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1745 		return rc;
1746 
1747 	cifs_down_write(&cinode->lock_sem);
1748 	if (!cinode->can_cache_brlcks) {
1749 		up_write(&cinode->lock_sem);
1750 		return rc;
1751 	}
1752 
1753 	rc = posix_lock_file(file, flock, NULL);
1754 	up_write(&cinode->lock_sem);
1755 	return rc;
1756 }
1757 
1758 int
1759 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1760 {
1761 	unsigned int xid;
1762 	int rc = 0, stored_rc;
1763 	struct cifsLockInfo *li, *tmp;
1764 	struct cifs_tcon *tcon;
1765 	unsigned int num, max_num, max_buf;
1766 	LOCKING_ANDX_RANGE *buf, *cur;
1767 	static const int types[] = {
1768 		LOCKING_ANDX_LARGE_FILES,
1769 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1770 	};
1771 	int i;
1772 
1773 	xid = get_xid();
1774 	tcon = tlink_tcon(cfile->tlink);
1775 
1776 	/*
1777 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1778 	 * and check it before using.
1779 	 */
1780 	max_buf = tcon->ses->server->maxBuf;
1781 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1782 		free_xid(xid);
1783 		return -EINVAL;
1784 	}
1785 
1786 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1787 		     PAGE_SIZE);
1788 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1789 			PAGE_SIZE);
1790 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1791 						sizeof(LOCKING_ANDX_RANGE);
1792 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1793 	if (!buf) {
1794 		free_xid(xid);
1795 		return -ENOMEM;
1796 	}
1797 
1798 	for (i = 0; i < 2; i++) {
1799 		cur = buf;
1800 		num = 0;
1801 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1802 			if (li->type != types[i])
1803 				continue;
1804 			cur->Pid = cpu_to_le16(li->pid);
1805 			cur->LengthLow = cpu_to_le32((u32)li->length);
1806 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1807 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1808 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1809 			if (++num == max_num) {
1810 				stored_rc = cifs_lockv(xid, tcon,
1811 						       cfile->fid.netfid,
1812 						       (__u8)li->type, 0, num,
1813 						       buf);
1814 				if (stored_rc)
1815 					rc = stored_rc;
1816 				cur = buf;
1817 				num = 0;
1818 			} else
1819 				cur++;
1820 		}
1821 
1822 		if (num) {
1823 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1824 					       (__u8)types[i], 0, num, buf);
1825 			if (stored_rc)
1826 				rc = stored_rc;
1827 		}
1828 	}
1829 
1830 	kfree(buf);
1831 	free_xid(xid);
1832 	return rc;
1833 }
1834 
1835 static __u32
1836 hash_lockowner(fl_owner_t owner)
1837 {
1838 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1839 }
1840 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1841 
1842 struct lock_to_push {
1843 	struct list_head llist;
1844 	__u64 offset;
1845 	__u64 length;
1846 	__u32 pid;
1847 	__u16 netfid;
1848 	__u8 type;
1849 };
1850 
1851 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1852 static int
1853 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1854 {
1855 	struct inode *inode = d_inode(cfile->dentry);
1856 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1857 	struct file_lock *flock;
1858 	struct file_lock_context *flctx = locks_inode_context(inode);
1859 	unsigned int count = 0, i;
1860 	int rc = 0, xid, type;
1861 	struct list_head locks_to_send, *el;
1862 	struct lock_to_push *lck, *tmp;
1863 	__u64 length;
1864 
1865 	xid = get_xid();
1866 
1867 	if (!flctx)
1868 		goto out;
1869 
1870 	spin_lock(&flctx->flc_lock);
1871 	list_for_each(el, &flctx->flc_posix) {
1872 		count++;
1873 	}
1874 	spin_unlock(&flctx->flc_lock);
1875 
1876 	INIT_LIST_HEAD(&locks_to_send);
1877 
1878 	/*
1879 	 * Allocating count locks is enough because no FL_POSIX locks can be
1880 	 * added to the list while we are holding cinode->lock_sem that
1881 	 * protects locking operations of this inode.
1882 	 */
1883 	for (i = 0; i < count; i++) {
1884 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1885 		if (!lck) {
1886 			rc = -ENOMEM;
1887 			goto err_out;
1888 		}
1889 		list_add_tail(&lck->llist, &locks_to_send);
1890 	}
1891 
1892 	el = locks_to_send.next;
1893 	spin_lock(&flctx->flc_lock);
1894 	for_each_file_lock(flock, &flctx->flc_posix) {
1895 		unsigned char ftype = flock->c.flc_type;
1896 
1897 		if (el == &locks_to_send) {
1898 			/*
1899 			 * The list ended. We don't have enough allocated
1900 			 * structures - something is really wrong.
1901 			 */
1902 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1903 			break;
1904 		}
1905 		length = cifs_flock_len(flock);
1906 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1907 			type = CIFS_RDLCK;
1908 		else
1909 			type = CIFS_WRLCK;
1910 		lck = list_entry(el, struct lock_to_push, llist);
1911 		lck->pid = hash_lockowner(flock->c.flc_owner);
1912 		lck->netfid = cfile->fid.netfid;
1913 		lck->length = length;
1914 		lck->type = type;
1915 		lck->offset = flock->fl_start;
1916 	}
1917 	spin_unlock(&flctx->flc_lock);
1918 
1919 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1920 		int stored_rc;
1921 
1922 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1923 					     lck->offset, lck->length, NULL,
1924 					     lck->type, 0);
1925 		if (stored_rc)
1926 			rc = stored_rc;
1927 		list_del(&lck->llist);
1928 		kfree(lck);
1929 	}
1930 
1931 out:
1932 	free_xid(xid);
1933 	return rc;
1934 err_out:
1935 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1936 		list_del(&lck->llist);
1937 		kfree(lck);
1938 	}
1939 	goto out;
1940 }
1941 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1942 
1943 static int
1944 cifs_push_locks(struct cifsFileInfo *cfile)
1945 {
1946 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1947 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1948 	int rc = 0;
1949 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1950 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1951 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1952 
1953 	/* we are going to update can_cache_brlcks here - need a write access */
1954 	cifs_down_write(&cinode->lock_sem);
1955 	if (!cinode->can_cache_brlcks) {
1956 		up_write(&cinode->lock_sem);
1957 		return rc;
1958 	}
1959 
1960 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1961 	if (cap_unix(tcon->ses) &&
1962 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1963 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1964 		rc = cifs_push_posix_locks(cfile);
1965 	else
1966 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1967 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1968 
1969 	cinode->can_cache_brlcks = false;
1970 	up_write(&cinode->lock_sem);
1971 	return rc;
1972 }
1973 
1974 static void
1975 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1976 		bool *wait_flag, struct TCP_Server_Info *server)
1977 {
1978 	if (flock->c.flc_flags & FL_POSIX)
1979 		cifs_dbg(FYI, "Posix\n");
1980 	if (flock->c.flc_flags & FL_FLOCK)
1981 		cifs_dbg(FYI, "Flock\n");
1982 	if (flock->c.flc_flags & FL_SLEEP) {
1983 		cifs_dbg(FYI, "Blocking lock\n");
1984 		*wait_flag = true;
1985 	}
1986 	if (flock->c.flc_flags & FL_ACCESS)
1987 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1988 	if (flock->c.flc_flags & FL_LEASE)
1989 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1990 	if (flock->c.flc_flags &
1991 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1992 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1993 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1994 		         flock->c.flc_flags);
1995 
1996 	*type = server->vals->large_lock_type;
1997 	if (lock_is_write(flock)) {
1998 		cifs_dbg(FYI, "F_WRLCK\n");
1999 		*type |= server->vals->exclusive_lock_type;
2000 		*lock = 1;
2001 	} else if (lock_is_unlock(flock)) {
2002 		cifs_dbg(FYI, "F_UNLCK\n");
2003 		*type |= server->vals->unlock_lock_type;
2004 		*unlock = 1;
2005 		/* Check if unlock includes more than one lock range */
2006 	} else if (lock_is_read(flock)) {
2007 		cifs_dbg(FYI, "F_RDLCK\n");
2008 		*type |= server->vals->shared_lock_type;
2009 		*lock = 1;
2010 	} else if (flock->c.flc_type == F_EXLCK) {
2011 		cifs_dbg(FYI, "F_EXLCK\n");
2012 		*type |= server->vals->exclusive_lock_type;
2013 		*lock = 1;
2014 	} else if (flock->c.flc_type == F_SHLCK) {
2015 		cifs_dbg(FYI, "F_SHLCK\n");
2016 		*type |= server->vals->shared_lock_type;
2017 		*lock = 1;
2018 	} else
2019 		cifs_dbg(FYI, "Unknown type of lock\n");
2020 }
2021 
2022 static int
2023 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2024 	   bool wait_flag, bool posix_lck, unsigned int xid)
2025 {
2026 	int rc = 0;
2027 	__u64 length = cifs_flock_len(flock);
2028 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2029 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2030 	struct TCP_Server_Info *server = tcon->ses->server;
2031 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2032 	__u16 netfid = cfile->fid.netfid;
2033 
2034 	if (posix_lck) {
2035 		int posix_lock_type;
2036 
2037 		rc = cifs_posix_lock_test(file, flock);
2038 		if (!rc)
2039 			return rc;
2040 
2041 		if (type & server->vals->shared_lock_type)
2042 			posix_lock_type = CIFS_RDLCK;
2043 		else
2044 			posix_lock_type = CIFS_WRLCK;
2045 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2046 				      hash_lockowner(flock->c.flc_owner),
2047 				      flock->fl_start, length, flock,
2048 				      posix_lock_type, wait_flag);
2049 		return rc;
2050 	}
2051 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2052 
2053 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2054 	if (!rc)
2055 		return rc;
2056 
2057 	/* BB we could chain these into one lock request BB */
2058 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2059 				    1, 0, false);
2060 	if (rc == 0) {
2061 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2062 					    type, 0, 1, false);
2063 		flock->c.flc_type = F_UNLCK;
2064 		if (rc != 0)
2065 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2066 				 rc);
2067 		return 0;
2068 	}
2069 
2070 	if (type & server->vals->shared_lock_type) {
2071 		flock->c.flc_type = F_WRLCK;
2072 		return 0;
2073 	}
2074 
2075 	type &= ~server->vals->exclusive_lock_type;
2076 
2077 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2078 				    type | server->vals->shared_lock_type,
2079 				    1, 0, false);
2080 	if (rc == 0) {
2081 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2082 			type | server->vals->shared_lock_type, 0, 1, false);
2083 		flock->c.flc_type = F_RDLCK;
2084 		if (rc != 0)
2085 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2086 				 rc);
2087 	} else
2088 		flock->c.flc_type = F_WRLCK;
2089 
2090 	return 0;
2091 }
2092 
2093 void
2094 cifs_move_llist(struct list_head *source, struct list_head *dest)
2095 {
2096 	struct list_head *li, *tmp;
2097 	list_for_each_safe(li, tmp, source)
2098 		list_move(li, dest);
2099 }
2100 
2101 void
2102 cifs_free_llist(struct list_head *llist)
2103 {
2104 	struct cifsLockInfo *li, *tmp;
2105 	list_for_each_entry_safe(li, tmp, llist, llist) {
2106 		cifs_del_lock_waiters(li);
2107 		list_del(&li->llist);
2108 		kfree(li);
2109 	}
2110 }
2111 
2112 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2113 int
2114 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2115 		  unsigned int xid)
2116 {
2117 	int rc = 0, stored_rc;
2118 	static const int types[] = {
2119 		LOCKING_ANDX_LARGE_FILES,
2120 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2121 	};
2122 	unsigned int i;
2123 	unsigned int max_num, num, max_buf;
2124 	LOCKING_ANDX_RANGE *buf, *cur;
2125 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2126 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2127 	struct cifsLockInfo *li, *tmp;
2128 	__u64 length = cifs_flock_len(flock);
2129 	struct list_head tmp_llist;
2130 
2131 	INIT_LIST_HEAD(&tmp_llist);
2132 
2133 	/*
2134 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2135 	 * and check it before using.
2136 	 */
2137 	max_buf = tcon->ses->server->maxBuf;
2138 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2139 		return -EINVAL;
2140 
2141 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2142 		     PAGE_SIZE);
2143 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2144 			PAGE_SIZE);
2145 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2146 						sizeof(LOCKING_ANDX_RANGE);
2147 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2148 	if (!buf)
2149 		return -ENOMEM;
2150 
2151 	cifs_down_write(&cinode->lock_sem);
2152 	for (i = 0; i < 2; i++) {
2153 		cur = buf;
2154 		num = 0;
2155 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2156 			if (flock->fl_start > li->offset ||
2157 			    (flock->fl_start + length) <
2158 			    (li->offset + li->length))
2159 				continue;
2160 			if (current->tgid != li->pid)
2161 				continue;
2162 			if (types[i] != li->type)
2163 				continue;
2164 			if (cinode->can_cache_brlcks) {
2165 				/*
2166 				 * We can cache brlock requests - simply remove
2167 				 * a lock from the file's list.
2168 				 */
2169 				list_del(&li->llist);
2170 				cifs_del_lock_waiters(li);
2171 				kfree(li);
2172 				continue;
2173 			}
2174 			cur->Pid = cpu_to_le16(li->pid);
2175 			cur->LengthLow = cpu_to_le32((u32)li->length);
2176 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2177 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2178 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2179 			/*
2180 			 * We need to save a lock here to let us add it again to
2181 			 * the file's list if the unlock range request fails on
2182 			 * the server.
2183 			 */
2184 			list_move(&li->llist, &tmp_llist);
2185 			if (++num == max_num) {
2186 				stored_rc = cifs_lockv(xid, tcon,
2187 						       cfile->fid.netfid,
2188 						       li->type, num, 0, buf);
2189 				if (stored_rc) {
2190 					/*
2191 					 * We failed on the unlock range
2192 					 * request - add all locks from the tmp
2193 					 * list to the head of the file's list.
2194 					 */
2195 					cifs_move_llist(&tmp_llist,
2196 							&cfile->llist->locks);
2197 					rc = stored_rc;
2198 				} else
2199 					/*
2200 					 * The unlock range request succeed -
2201 					 * free the tmp list.
2202 					 */
2203 					cifs_free_llist(&tmp_llist);
2204 				cur = buf;
2205 				num = 0;
2206 			} else
2207 				cur++;
2208 		}
2209 		if (num) {
2210 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2211 					       types[i], num, 0, buf);
2212 			if (stored_rc) {
2213 				cifs_move_llist(&tmp_llist,
2214 						&cfile->llist->locks);
2215 				rc = stored_rc;
2216 			} else
2217 				cifs_free_llist(&tmp_llist);
2218 		}
2219 	}
2220 
2221 	up_write(&cinode->lock_sem);
2222 	kfree(buf);
2223 	return rc;
2224 }
2225 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2226 
2227 static int
2228 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2229 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2230 	   unsigned int xid)
2231 {
2232 	int rc = 0;
2233 	__u64 length = cifs_flock_len(flock);
2234 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2235 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2236 	struct TCP_Server_Info *server = tcon->ses->server;
2237 	struct inode *inode = d_inode(cfile->dentry);
2238 
2239 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2240 	if (posix_lck) {
2241 		int posix_lock_type;
2242 
2243 		rc = cifs_posix_lock_set(file, flock);
2244 		if (rc <= FILE_LOCK_DEFERRED)
2245 			return rc;
2246 
2247 		if (type & server->vals->shared_lock_type)
2248 			posix_lock_type = CIFS_RDLCK;
2249 		else
2250 			posix_lock_type = CIFS_WRLCK;
2251 
2252 		if (unlock == 1)
2253 			posix_lock_type = CIFS_UNLCK;
2254 
2255 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2256 				      hash_lockowner(flock->c.flc_owner),
2257 				      flock->fl_start, length,
2258 				      NULL, posix_lock_type, wait_flag);
2259 		goto out;
2260 	}
2261 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2262 	if (lock) {
2263 		struct cifsLockInfo *lock;
2264 
2265 		lock = cifs_lock_init(flock->fl_start, length, type,
2266 				      flock->c.flc_flags);
2267 		if (!lock)
2268 			return -ENOMEM;
2269 
2270 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2271 		if (rc < 0) {
2272 			kfree(lock);
2273 			return rc;
2274 		}
2275 		if (!rc)
2276 			goto out;
2277 
2278 		/*
2279 		 * Windows 7 server can delay breaking lease from read to None
2280 		 * if we set a byte-range lock on a file - break it explicitly
2281 		 * before sending the lock to the server to be sure the next
2282 		 * read won't conflict with non-overlapted locks due to
2283 		 * pagereading.
2284 		 */
2285 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2286 					CIFS_CACHE_READ(CIFS_I(inode))) {
2287 			cifs_zap_mapping(inode);
2288 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2289 				 inode);
2290 			CIFS_I(inode)->oplock = 0;
2291 		}
2292 
2293 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2294 					    type, 1, 0, wait_flag);
2295 		if (rc) {
2296 			kfree(lock);
2297 			return rc;
2298 		}
2299 
2300 		cifs_lock_add(cfile, lock);
2301 	} else if (unlock)
2302 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2303 
2304 out:
2305 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2306 		/*
2307 		 * If this is a request to remove all locks because we
2308 		 * are closing the file, it doesn't matter if the
2309 		 * unlocking failed as both cifs.ko and the SMB server
2310 		 * remove the lock on file close
2311 		 */
2312 		if (rc) {
2313 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2314 			if (!(flock->c.flc_flags & FL_CLOSE))
2315 				return rc;
2316 		}
2317 		rc = locks_lock_file_wait(file, flock);
2318 	}
2319 	return rc;
2320 }
2321 
2322 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2323 {
2324 	int rc, xid;
2325 	int lock = 0, unlock = 0;
2326 	bool wait_flag = false;
2327 	bool posix_lck = false;
2328 	struct cifs_sb_info *cifs_sb;
2329 	struct cifs_tcon *tcon;
2330 	struct cifsFileInfo *cfile;
2331 	__u32 type;
2332 
2333 	xid = get_xid();
2334 
2335 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2336 		rc = -ENOLCK;
2337 		free_xid(xid);
2338 		return rc;
2339 	}
2340 
2341 	cfile = (struct cifsFileInfo *)file->private_data;
2342 	tcon = tlink_tcon(cfile->tlink);
2343 
2344 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2345 			tcon->ses->server);
2346 	cifs_sb = CIFS_FILE_SB(file);
2347 
2348 	if (cap_unix(tcon->ses) &&
2349 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2350 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2351 		posix_lck = true;
2352 
2353 	if (!lock && !unlock) {
2354 		/*
2355 		 * if no lock or unlock then nothing to do since we do not
2356 		 * know what it is
2357 		 */
2358 		rc = -EOPNOTSUPP;
2359 		free_xid(xid);
2360 		return rc;
2361 	}
2362 
2363 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2364 			xid);
2365 	free_xid(xid);
2366 	return rc;
2367 
2368 
2369 }
2370 
2371 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2372 {
2373 	int rc, xid;
2374 	int lock = 0, unlock = 0;
2375 	bool wait_flag = false;
2376 	bool posix_lck = false;
2377 	struct cifs_sb_info *cifs_sb;
2378 	struct cifs_tcon *tcon;
2379 	struct cifsFileInfo *cfile;
2380 	__u32 type;
2381 
2382 	rc = -EACCES;
2383 	xid = get_xid();
2384 
2385 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2386 		 flock->c.flc_flags, flock->c.flc_type,
2387 		 (long long)flock->fl_start,
2388 		 (long long)flock->fl_end);
2389 
2390 	cfile = (struct cifsFileInfo *)file->private_data;
2391 	tcon = tlink_tcon(cfile->tlink);
2392 
2393 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2394 			tcon->ses->server);
2395 	cifs_sb = CIFS_FILE_SB(file);
2396 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2397 
2398 	if (cap_unix(tcon->ses) &&
2399 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2400 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2401 		posix_lck = true;
2402 	/*
2403 	 * BB add code here to normalize offset and length to account for
2404 	 * negative length which we can not accept over the wire.
2405 	 */
2406 	if (IS_GETLK(cmd)) {
2407 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2408 		free_xid(xid);
2409 		return rc;
2410 	}
2411 
2412 	if (!lock && !unlock) {
2413 		/*
2414 		 * if no lock or unlock then nothing to do since we do not
2415 		 * know what it is
2416 		 */
2417 		free_xid(xid);
2418 		return -EOPNOTSUPP;
2419 	}
2420 
2421 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2422 			xid);
2423 	free_xid(xid);
2424 	return rc;
2425 }
2426 
2427 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2428 				      bool was_async)
2429 {
2430 	struct netfs_io_request *wreq = wdata->rreq;
2431 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2432 	loff_t wrend;
2433 
2434 	if (result > 0) {
2435 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2436 
2437 		if (wrend > ictx->zero_point &&
2438 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2439 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2440 			ictx->zero_point = wrend;
2441 		if (wrend > ictx->remote_i_size)
2442 			netfs_resize_file(ictx, wrend, true);
2443 	}
2444 
2445 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2446 }
2447 
2448 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2449 					bool fsuid_only)
2450 {
2451 	struct cifsFileInfo *open_file = NULL;
2452 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2453 
2454 	/* only filter by fsuid on multiuser mounts */
2455 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2456 		fsuid_only = false;
2457 
2458 	spin_lock(&cifs_inode->open_file_lock);
2459 	/* we could simply get the first_list_entry since write-only entries
2460 	   are always at the end of the list but since the first entry might
2461 	   have a close pending, we go through the whole list */
2462 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2463 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2464 			continue;
2465 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2466 			if ((!open_file->invalidHandle)) {
2467 				/* found a good file */
2468 				/* lock it so it will not be closed on us */
2469 				cifsFileInfo_get(open_file);
2470 				spin_unlock(&cifs_inode->open_file_lock);
2471 				return open_file;
2472 			} /* else might as well continue, and look for
2473 			     another, or simply have the caller reopen it
2474 			     again rather than trying to fix this handle */
2475 		} else /* write only file */
2476 			break; /* write only files are last so must be done */
2477 	}
2478 	spin_unlock(&cifs_inode->open_file_lock);
2479 	return NULL;
2480 }
2481 
2482 /* Return -EBADF if no handle is found and general rc otherwise */
2483 int
2484 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2485 		       struct cifsFileInfo **ret_file)
2486 {
2487 	struct cifsFileInfo *open_file, *inv_file = NULL;
2488 	struct cifs_sb_info *cifs_sb;
2489 	bool any_available = false;
2490 	int rc = -EBADF;
2491 	unsigned int refind = 0;
2492 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2493 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2494 	*ret_file = NULL;
2495 
2496 	/*
2497 	 * Having a null inode here (because mapping->host was set to zero by
2498 	 * the VFS or MM) should not happen but we had reports of on oops (due
2499 	 * to it being zero) during stress testcases so we need to check for it
2500 	 */
2501 
2502 	if (cifs_inode == NULL) {
2503 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2504 		dump_stack();
2505 		return rc;
2506 	}
2507 
2508 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2509 
2510 	/* only filter by fsuid on multiuser mounts */
2511 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2512 		fsuid_only = false;
2513 
2514 	spin_lock(&cifs_inode->open_file_lock);
2515 refind_writable:
2516 	if (refind > MAX_REOPEN_ATT) {
2517 		spin_unlock(&cifs_inode->open_file_lock);
2518 		return rc;
2519 	}
2520 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2521 		if (!any_available && open_file->pid != current->tgid)
2522 			continue;
2523 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2524 			continue;
2525 		if (with_delete && !(open_file->fid.access & DELETE))
2526 			continue;
2527 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2528 			if (!open_file->invalidHandle) {
2529 				/* found a good writable file */
2530 				cifsFileInfo_get(open_file);
2531 				spin_unlock(&cifs_inode->open_file_lock);
2532 				*ret_file = open_file;
2533 				return 0;
2534 			} else {
2535 				if (!inv_file)
2536 					inv_file = open_file;
2537 			}
2538 		}
2539 	}
2540 	/* couldn't find useable FH with same pid, try any available */
2541 	if (!any_available) {
2542 		any_available = true;
2543 		goto refind_writable;
2544 	}
2545 
2546 	if (inv_file) {
2547 		any_available = false;
2548 		cifsFileInfo_get(inv_file);
2549 	}
2550 
2551 	spin_unlock(&cifs_inode->open_file_lock);
2552 
2553 	if (inv_file) {
2554 		rc = cifs_reopen_file(inv_file, false);
2555 		if (!rc) {
2556 			*ret_file = inv_file;
2557 			return 0;
2558 		}
2559 
2560 		spin_lock(&cifs_inode->open_file_lock);
2561 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2562 		spin_unlock(&cifs_inode->open_file_lock);
2563 		cifsFileInfo_put(inv_file);
2564 		++refind;
2565 		inv_file = NULL;
2566 		spin_lock(&cifs_inode->open_file_lock);
2567 		goto refind_writable;
2568 	}
2569 
2570 	return rc;
2571 }
2572 
2573 struct cifsFileInfo *
2574 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2575 {
2576 	struct cifsFileInfo *cfile;
2577 	int rc;
2578 
2579 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2580 	if (rc)
2581 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2582 
2583 	return cfile;
2584 }
2585 
2586 int
2587 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2588 		       int flags,
2589 		       struct cifsFileInfo **ret_file)
2590 {
2591 	struct cifsFileInfo *cfile;
2592 	void *page = alloc_dentry_path();
2593 
2594 	*ret_file = NULL;
2595 
2596 	spin_lock(&tcon->open_file_lock);
2597 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2598 		struct cifsInodeInfo *cinode;
2599 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2600 		if (IS_ERR(full_path)) {
2601 			spin_unlock(&tcon->open_file_lock);
2602 			free_dentry_path(page);
2603 			return PTR_ERR(full_path);
2604 		}
2605 		if (strcmp(full_path, name))
2606 			continue;
2607 
2608 		cinode = CIFS_I(d_inode(cfile->dentry));
2609 		spin_unlock(&tcon->open_file_lock);
2610 		free_dentry_path(page);
2611 		return cifs_get_writable_file(cinode, flags, ret_file);
2612 	}
2613 
2614 	spin_unlock(&tcon->open_file_lock);
2615 	free_dentry_path(page);
2616 	return -ENOENT;
2617 }
2618 
2619 int
2620 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2621 		       struct cifsFileInfo **ret_file)
2622 {
2623 	struct cifsFileInfo *cfile;
2624 	void *page = alloc_dentry_path();
2625 
2626 	*ret_file = NULL;
2627 
2628 	spin_lock(&tcon->open_file_lock);
2629 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2630 		struct cifsInodeInfo *cinode;
2631 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2632 		if (IS_ERR(full_path)) {
2633 			spin_unlock(&tcon->open_file_lock);
2634 			free_dentry_path(page);
2635 			return PTR_ERR(full_path);
2636 		}
2637 		if (strcmp(full_path, name))
2638 			continue;
2639 
2640 		cinode = CIFS_I(d_inode(cfile->dentry));
2641 		spin_unlock(&tcon->open_file_lock);
2642 		free_dentry_path(page);
2643 		*ret_file = find_readable_file(cinode, 0);
2644 		return *ret_file ? 0 : -ENOENT;
2645 	}
2646 
2647 	spin_unlock(&tcon->open_file_lock);
2648 	free_dentry_path(page);
2649 	return -ENOENT;
2650 }
2651 
2652 /*
2653  * Flush data on a strict file.
2654  */
2655 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2656 		      int datasync)
2657 {
2658 	unsigned int xid;
2659 	int rc = 0;
2660 	struct cifs_tcon *tcon;
2661 	struct TCP_Server_Info *server;
2662 	struct cifsFileInfo *smbfile = file->private_data;
2663 	struct inode *inode = file_inode(file);
2664 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2665 
2666 	rc = file_write_and_wait_range(file, start, end);
2667 	if (rc) {
2668 		trace_cifs_fsync_err(inode->i_ino, rc);
2669 		return rc;
2670 	}
2671 
2672 	xid = get_xid();
2673 
2674 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2675 		 file, datasync);
2676 
2677 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2678 		rc = cifs_zap_mapping(inode);
2679 		if (rc) {
2680 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2681 			rc = 0; /* don't care about it in fsync */
2682 		}
2683 	}
2684 
2685 	tcon = tlink_tcon(smbfile->tlink);
2686 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2687 		server = tcon->ses->server;
2688 		if (server->ops->flush == NULL) {
2689 			rc = -ENOSYS;
2690 			goto strict_fsync_exit;
2691 		}
2692 
2693 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2694 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2695 			if (smbfile) {
2696 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2697 				cifsFileInfo_put(smbfile);
2698 			} else
2699 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2700 		} else
2701 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2702 	}
2703 
2704 strict_fsync_exit:
2705 	free_xid(xid);
2706 	return rc;
2707 }
2708 
2709 /*
2710  * Flush data on a non-strict data.
2711  */
2712 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2713 {
2714 	unsigned int xid;
2715 	int rc = 0;
2716 	struct cifs_tcon *tcon;
2717 	struct TCP_Server_Info *server;
2718 	struct cifsFileInfo *smbfile = file->private_data;
2719 	struct inode *inode = file_inode(file);
2720 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2721 
2722 	rc = file_write_and_wait_range(file, start, end);
2723 	if (rc) {
2724 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2725 		return rc;
2726 	}
2727 
2728 	xid = get_xid();
2729 
2730 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2731 		 file, datasync);
2732 
2733 	tcon = tlink_tcon(smbfile->tlink);
2734 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2735 		server = tcon->ses->server;
2736 		if (server->ops->flush == NULL) {
2737 			rc = -ENOSYS;
2738 			goto fsync_exit;
2739 		}
2740 
2741 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2742 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2743 			if (smbfile) {
2744 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2745 				cifsFileInfo_put(smbfile);
2746 			} else
2747 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2748 		} else
2749 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2750 	}
2751 
2752 fsync_exit:
2753 	free_xid(xid);
2754 	return rc;
2755 }
2756 
2757 /*
2758  * As file closes, flush all cached write data for this inode checking
2759  * for write behind errors.
2760  */
2761 int cifs_flush(struct file *file, fl_owner_t id)
2762 {
2763 	struct inode *inode = file_inode(file);
2764 	int rc = 0;
2765 
2766 	if (file->f_mode & FMODE_WRITE)
2767 		rc = filemap_write_and_wait(inode->i_mapping);
2768 
2769 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2770 	if (rc) {
2771 		/* get more nuanced writeback errors */
2772 		rc = filemap_check_wb_err(file->f_mapping, 0);
2773 		trace_cifs_flush_err(inode->i_ino, rc);
2774 	}
2775 	return rc;
2776 }
2777 
2778 static ssize_t
2779 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2780 {
2781 	struct file *file = iocb->ki_filp;
2782 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2783 	struct inode *inode = file->f_mapping->host;
2784 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2785 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2786 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2787 	ssize_t rc;
2788 
2789 	rc = netfs_start_io_write(inode);
2790 	if (rc < 0)
2791 		return rc;
2792 
2793 	/*
2794 	 * We need to hold the sem to be sure nobody modifies lock list
2795 	 * with a brlock that prevents writing.
2796 	 */
2797 	down_read(&cinode->lock_sem);
2798 
2799 	rc = generic_write_checks(iocb, from);
2800 	if (rc <= 0)
2801 		goto out;
2802 
2803 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2804 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2805 				     server->vals->exclusive_lock_type, 0,
2806 				     NULL, CIFS_WRITE_OP))) {
2807 		rc = -EACCES;
2808 		goto out;
2809 	}
2810 
2811 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2812 
2813 out:
2814 	up_read(&cinode->lock_sem);
2815 	netfs_end_io_write(inode);
2816 	if (rc > 0)
2817 		rc = generic_write_sync(iocb, rc);
2818 	return rc;
2819 }
2820 
2821 ssize_t
2822 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2823 {
2824 	struct inode *inode = file_inode(iocb->ki_filp);
2825 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2826 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2827 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2828 						iocb->ki_filp->private_data;
2829 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2830 	ssize_t written;
2831 
2832 	written = cifs_get_writer(cinode);
2833 	if (written)
2834 		return written;
2835 
2836 	if (CIFS_CACHE_WRITE(cinode)) {
2837 		if (cap_unix(tcon->ses) &&
2838 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2839 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2840 			written = netfs_file_write_iter(iocb, from);
2841 			goto out;
2842 		}
2843 		written = cifs_writev(iocb, from);
2844 		goto out;
2845 	}
2846 	/*
2847 	 * For non-oplocked files in strict cache mode we need to write the data
2848 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2849 	 * affected pages because it may cause a error with mandatory locks on
2850 	 * these pages but not on the region from pos to ppos+len-1.
2851 	 */
2852 	written = netfs_file_write_iter(iocb, from);
2853 	if (CIFS_CACHE_READ(cinode)) {
2854 		/*
2855 		 * We have read level caching and we have just sent a write
2856 		 * request to the server thus making data in the cache stale.
2857 		 * Zap the cache and set oplock/lease level to NONE to avoid
2858 		 * reading stale data from the cache. All subsequent read
2859 		 * operations will read new data from the server.
2860 		 */
2861 		cifs_zap_mapping(inode);
2862 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2863 			 inode);
2864 		cinode->oplock = 0;
2865 	}
2866 out:
2867 	cifs_put_writer(cinode);
2868 	return written;
2869 }
2870 
2871 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2872 {
2873 	ssize_t rc;
2874 	struct inode *inode = file_inode(iocb->ki_filp);
2875 
2876 	if (iocb->ki_flags & IOCB_DIRECT)
2877 		return netfs_unbuffered_read_iter(iocb, iter);
2878 
2879 	rc = cifs_revalidate_mapping(inode);
2880 	if (rc)
2881 		return rc;
2882 
2883 	return netfs_file_read_iter(iocb, iter);
2884 }
2885 
2886 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2887 {
2888 	struct inode *inode = file_inode(iocb->ki_filp);
2889 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2890 	ssize_t written;
2891 	int rc;
2892 
2893 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2894 		written = netfs_unbuffered_write_iter(iocb, from);
2895 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2896 			cifs_zap_mapping(inode);
2897 			cifs_dbg(FYI,
2898 				 "Set no oplock for inode=%p after a write operation\n",
2899 				 inode);
2900 			cinode->oplock = 0;
2901 		}
2902 		return written;
2903 	}
2904 
2905 	written = cifs_get_writer(cinode);
2906 	if (written)
2907 		return written;
2908 
2909 	written = netfs_file_write_iter(iocb, from);
2910 
2911 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2912 		rc = filemap_fdatawrite(inode->i_mapping);
2913 		if (rc)
2914 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2915 				 rc, inode);
2916 	}
2917 
2918 	cifs_put_writer(cinode);
2919 	return written;
2920 }
2921 
2922 ssize_t
2923 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2924 {
2925 	struct inode *inode = file_inode(iocb->ki_filp);
2926 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2927 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2928 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2929 						iocb->ki_filp->private_data;
2930 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2931 	int rc = -EACCES;
2932 
2933 	/*
2934 	 * In strict cache mode we need to read from the server all the time
2935 	 * if we don't have level II oplock because the server can delay mtime
2936 	 * change - so we can't make a decision about inode invalidating.
2937 	 * And we can also fail with pagereading if there are mandatory locks
2938 	 * on pages affected by this read but not on the region from pos to
2939 	 * pos+len-1.
2940 	 */
2941 	if (!CIFS_CACHE_READ(cinode))
2942 		return netfs_unbuffered_read_iter(iocb, to);
2943 
2944 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2945 		if (iocb->ki_flags & IOCB_DIRECT)
2946 			return netfs_unbuffered_read_iter(iocb, to);
2947 		return netfs_buffered_read_iter(iocb, to);
2948 	}
2949 
2950 	/*
2951 	 * We need to hold the sem to be sure nobody modifies lock list
2952 	 * with a brlock that prevents reading.
2953 	 */
2954 	if (iocb->ki_flags & IOCB_DIRECT) {
2955 		rc = netfs_start_io_direct(inode);
2956 		if (rc < 0)
2957 			goto out;
2958 		rc = -EACCES;
2959 		down_read(&cinode->lock_sem);
2960 		if (!cifs_find_lock_conflict(
2961 			    cfile, iocb->ki_pos, iov_iter_count(to),
2962 			    tcon->ses->server->vals->shared_lock_type,
2963 			    0, NULL, CIFS_READ_OP))
2964 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2965 		up_read(&cinode->lock_sem);
2966 		netfs_end_io_direct(inode);
2967 	} else {
2968 		rc = netfs_start_io_read(inode);
2969 		if (rc < 0)
2970 			goto out;
2971 		rc = -EACCES;
2972 		down_read(&cinode->lock_sem);
2973 		if (!cifs_find_lock_conflict(
2974 			    cfile, iocb->ki_pos, iov_iter_count(to),
2975 			    tcon->ses->server->vals->shared_lock_type,
2976 			    0, NULL, CIFS_READ_OP))
2977 			rc = filemap_read(iocb, to, 0);
2978 		up_read(&cinode->lock_sem);
2979 		netfs_end_io_read(inode);
2980 	}
2981 out:
2982 	return rc;
2983 }
2984 
2985 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2986 {
2987 	return netfs_page_mkwrite(vmf, NULL);
2988 }
2989 
2990 static const struct vm_operations_struct cifs_file_vm_ops = {
2991 	.fault = filemap_fault,
2992 	.map_pages = filemap_map_pages,
2993 	.page_mkwrite = cifs_page_mkwrite,
2994 };
2995 
2996 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2997 {
2998 	int xid, rc = 0;
2999 	struct inode *inode = file_inode(file);
3000 
3001 	xid = get_xid();
3002 
3003 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3004 		rc = cifs_zap_mapping(inode);
3005 	if (!rc)
3006 		rc = generic_file_mmap(file, vma);
3007 	if (!rc)
3008 		vma->vm_ops = &cifs_file_vm_ops;
3009 
3010 	free_xid(xid);
3011 	return rc;
3012 }
3013 
3014 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3015 {
3016 	int rc, xid;
3017 
3018 	xid = get_xid();
3019 
3020 	rc = cifs_revalidate_file(file);
3021 	if (rc)
3022 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3023 			 rc);
3024 	if (!rc)
3025 		rc = generic_file_mmap(file, vma);
3026 	if (!rc)
3027 		vma->vm_ops = &cifs_file_vm_ops;
3028 
3029 	free_xid(xid);
3030 	return rc;
3031 }
3032 
3033 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3034 {
3035 	struct cifsFileInfo *open_file;
3036 
3037 	spin_lock(&cifs_inode->open_file_lock);
3038 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3039 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3040 			spin_unlock(&cifs_inode->open_file_lock);
3041 			return 1;
3042 		}
3043 	}
3044 	spin_unlock(&cifs_inode->open_file_lock);
3045 	return 0;
3046 }
3047 
3048 /* We do not want to update the file size from server for inodes
3049    open for write - to avoid races with writepage extending
3050    the file - in the future we could consider allowing
3051    refreshing the inode only on increases in the file size
3052    but this is tricky to do without racing with writebehind
3053    page caching in the current Linux kernel design */
3054 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3055 			    bool from_readdir)
3056 {
3057 	if (!cifsInode)
3058 		return true;
3059 
3060 	if (is_inode_writable(cifsInode) ||
3061 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3062 		/* This inode is open for write at least once */
3063 		struct cifs_sb_info *cifs_sb;
3064 
3065 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3066 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3067 			/* since no page cache to corrupt on directio
3068 			we can change size safely */
3069 			return true;
3070 		}
3071 
3072 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3073 			return true;
3074 
3075 		return false;
3076 	} else
3077 		return true;
3078 }
3079 
3080 void cifs_oplock_break(struct work_struct *work)
3081 {
3082 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3083 						  oplock_break);
3084 	struct inode *inode = d_inode(cfile->dentry);
3085 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3086 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3087 	struct cifs_tcon *tcon;
3088 	struct TCP_Server_Info *server;
3089 	struct tcon_link *tlink;
3090 	int rc = 0;
3091 	bool purge_cache = false, oplock_break_cancelled;
3092 	__u64 persistent_fid, volatile_fid;
3093 	__u16 net_fid;
3094 
3095 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3096 			TASK_UNINTERRUPTIBLE);
3097 
3098 	tlink = cifs_sb_tlink(cifs_sb);
3099 	if (IS_ERR(tlink))
3100 		goto out;
3101 	tcon = tlink_tcon(tlink);
3102 	server = tcon->ses->server;
3103 
3104 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3105 				      cfile->oplock_epoch, &purge_cache);
3106 
3107 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3108 						cifs_has_mand_locks(cinode)) {
3109 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3110 			 inode);
3111 		cinode->oplock = 0;
3112 	}
3113 
3114 	if (inode && S_ISREG(inode->i_mode)) {
3115 		if (CIFS_CACHE_READ(cinode))
3116 			break_lease(inode, O_RDONLY);
3117 		else
3118 			break_lease(inode, O_WRONLY);
3119 		rc = filemap_fdatawrite(inode->i_mapping);
3120 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3121 			rc = filemap_fdatawait(inode->i_mapping);
3122 			mapping_set_error(inode->i_mapping, rc);
3123 			cifs_zap_mapping(inode);
3124 		}
3125 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3126 		if (CIFS_CACHE_WRITE(cinode))
3127 			goto oplock_break_ack;
3128 	}
3129 
3130 	rc = cifs_push_locks(cfile);
3131 	if (rc)
3132 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3133 
3134 oplock_break_ack:
3135 	/*
3136 	 * When oplock break is received and there are no active
3137 	 * file handles but cached, then schedule deferred close immediately.
3138 	 * So, new open will not use cached handle.
3139 	 */
3140 
3141 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3142 		cifs_close_deferred_file(cinode);
3143 
3144 	persistent_fid = cfile->fid.persistent_fid;
3145 	volatile_fid = cfile->fid.volatile_fid;
3146 	net_fid = cfile->fid.netfid;
3147 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3148 
3149 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3150 	/*
3151 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3152 	 * an acknowledgment to be sent when the file has already been closed.
3153 	 */
3154 	spin_lock(&cinode->open_file_lock);
3155 	/* check list empty since can race with kill_sb calling tree disconnect */
3156 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3157 		spin_unlock(&cinode->open_file_lock);
3158 		rc = server->ops->oplock_response(tcon, persistent_fid,
3159 						  volatile_fid, net_fid, cinode);
3160 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3161 	} else
3162 		spin_unlock(&cinode->open_file_lock);
3163 
3164 	cifs_put_tlink(tlink);
3165 out:
3166 	cifs_done_oplock_break(cinode);
3167 }
3168 
3169 static int cifs_swap_activate(struct swap_info_struct *sis,
3170 			      struct file *swap_file, sector_t *span)
3171 {
3172 	struct cifsFileInfo *cfile = swap_file->private_data;
3173 	struct inode *inode = swap_file->f_mapping->host;
3174 	unsigned long blocks;
3175 	long long isize;
3176 
3177 	cifs_dbg(FYI, "swap activate\n");
3178 
3179 	if (!swap_file->f_mapping->a_ops->swap_rw)
3180 		/* Cannot support swap */
3181 		return -EINVAL;
3182 
3183 	spin_lock(&inode->i_lock);
3184 	blocks = inode->i_blocks;
3185 	isize = inode->i_size;
3186 	spin_unlock(&inode->i_lock);
3187 	if (blocks*512 < isize) {
3188 		pr_warn("swap activate: swapfile has holes\n");
3189 		return -EINVAL;
3190 	}
3191 	*span = sis->pages;
3192 
3193 	pr_warn_once("Swap support over SMB3 is experimental\n");
3194 
3195 	/*
3196 	 * TODO: consider adding ACL (or documenting how) to prevent other
3197 	 * users (on this or other systems) from reading it
3198 	 */
3199 
3200 
3201 	/* TODO: add sk_set_memalloc(inet) or similar */
3202 
3203 	if (cfile)
3204 		cfile->swapfile = true;
3205 	/*
3206 	 * TODO: Since file already open, we can't open with DENY_ALL here
3207 	 * but we could add call to grab a byte range lock to prevent others
3208 	 * from reading or writing the file
3209 	 */
3210 
3211 	sis->flags |= SWP_FS_OPS;
3212 	return add_swap_extent(sis, 0, sis->max, 0);
3213 }
3214 
3215 static void cifs_swap_deactivate(struct file *file)
3216 {
3217 	struct cifsFileInfo *cfile = file->private_data;
3218 
3219 	cifs_dbg(FYI, "swap deactivate\n");
3220 
3221 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3222 
3223 	if (cfile)
3224 		cfile->swapfile = false;
3225 
3226 	/* do we need to unpin (or unlock) the file */
3227 }
3228 
3229 /**
3230  * cifs_swap_rw - SMB3 address space operation for swap I/O
3231  * @iocb: target I/O control block
3232  * @iter: I/O buffer
3233  *
3234  * Perform IO to the swap-file.  This is much like direct IO.
3235  */
3236 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3237 {
3238 	ssize_t ret;
3239 
3240 	if (iov_iter_rw(iter) == READ)
3241 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3242 	else
3243 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3244 	if (ret < 0)
3245 		return ret;
3246 	return 0;
3247 }
3248 
3249 const struct address_space_operations cifs_addr_ops = {
3250 	.read_folio	= netfs_read_folio,
3251 	.readahead	= netfs_readahead,
3252 	.writepages	= netfs_writepages,
3253 	.dirty_folio	= netfs_dirty_folio,
3254 	.release_folio	= netfs_release_folio,
3255 	.direct_IO	= noop_direct_IO,
3256 	.invalidate_folio = netfs_invalidate_folio,
3257 	.migrate_folio	= filemap_migrate_folio,
3258 	/*
3259 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3260 	 * helper if needed
3261 	 */
3262 	.swap_activate	= cifs_swap_activate,
3263 	.swap_deactivate = cifs_swap_deactivate,
3264 	.swap_rw = cifs_swap_rw,
3265 };
3266 
3267 /*
3268  * cifs_readahead requires the server to support a buffer large enough to
3269  * contain the header plus one complete page of data.  Otherwise, we need
3270  * to leave cifs_readahead out of the address space operations.
3271  */
3272 const struct address_space_operations cifs_addr_ops_smallbuf = {
3273 	.read_folio	= netfs_read_folio,
3274 	.writepages	= netfs_writepages,
3275 	.dirty_folio	= netfs_dirty_folio,
3276 	.release_folio	= netfs_release_folio,
3277 	.invalidate_folio = netfs_invalidate_folio,
3278 	.migrate_folio	= filemap_migrate_folio,
3279 };
3280