xref: /linux/fs/smb/client/file.c (revision 436381eaf2a423e60fc8340399f7d2458091b383)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 #ifdef CONFIG_CIFS_SMB_DIRECT
84 	if (server->smbd_conn)
85 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
86 #endif
87 }
88 
89 /*
90  * Issue a subrequest to upload to the server.
91  */
92 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
93 {
94 	struct cifs_io_subrequest *wdata =
95 		container_of(subreq, struct cifs_io_subrequest, subreq);
96 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
97 	int rc;
98 
99 	if (cifs_forced_shutdown(sbi)) {
100 		rc = -EIO;
101 		goto fail;
102 	}
103 
104 	rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
105 	if (rc)
106 		goto fail;
107 
108 	rc = -EAGAIN;
109 	if (wdata->req->cfile->invalidHandle)
110 		goto fail;
111 
112 	wdata->server->ops->async_writev(wdata);
113 out:
114 	return;
115 
116 fail:
117 	if (rc == -EAGAIN)
118 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
119 	else
120 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
121 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
122 	cifs_write_subrequest_terminated(wdata, rc, false);
123 	goto out;
124 }
125 
126 /*
127  * Split the read up according to how many credits we can get for each piece.
128  * It's okay to sleep here if we need to wait for more credit to become
129  * available.
130  *
131  * We also choose the server and allocate an operation ID to be cleaned up
132  * later.
133  */
134 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
135 {
136 	struct netfs_io_request *rreq = subreq->rreq;
137 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
138 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
139 	struct TCP_Server_Info *server = req->server;
140 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
141 	size_t rsize = 0;
142 	int rc;
143 
144 	rdata->xid = get_xid();
145 	rdata->have_xid = true;
146 	rdata->server = server;
147 
148 	if (cifs_sb->ctx->rsize == 0)
149 		cifs_sb->ctx->rsize =
150 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
151 						     cifs_sb->ctx);
152 
153 
154 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
155 					   &rdata->credits);
156 	if (rc) {
157 		subreq->error = rc;
158 		return false;
159 	}
160 
161 	subreq->len = min_t(size_t, subreq->len, rsize);
162 #ifdef CONFIG_CIFS_SMB_DIRECT
163 	if (server->smbd_conn)
164 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
165 #endif
166 	return true;
167 }
168 
169 /*
170  * Issue a read operation on behalf of the netfs helper functions.  We're asked
171  * to make a read of a certain size at a point in the file.  We are permitted
172  * to only read a portion of that, but as long as we read something, the netfs
173  * helper will call us again so that we can issue another read.
174  */
175 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
176 {
177 	struct netfs_io_request *rreq = subreq->rreq;
178 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
179 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
180 	int rc = 0;
181 
182 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
183 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
184 		 subreq->transferred, subreq->len);
185 
186 	if (req->cfile->invalidHandle) {
187 		do {
188 			rc = cifs_reopen_file(req->cfile, true);
189 		} while (rc == -EAGAIN);
190 		if (rc)
191 			goto out;
192 	}
193 
194 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
195 
196 	rc = rdata->server->ops->async_readv(rdata);
197 out:
198 	if (rc)
199 		netfs_subreq_terminated(subreq, rc, false);
200 }
201 
202 /*
203  * Writeback calls this when it finds a folio that needs uploading.  This isn't
204  * called if writeback only has copy-to-cache to deal with.
205  */
206 static void cifs_begin_writeback(struct netfs_io_request *wreq)
207 {
208 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
209 	int ret;
210 
211 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
212 	if (ret) {
213 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
214 		return;
215 	}
216 
217 	wreq->io_streams[0].avail = true;
218 }
219 
220 /*
221  * Initialise a request.
222  */
223 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
224 {
225 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
226 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
227 	struct cifsFileInfo *open_file = NULL;
228 
229 	rreq->rsize = cifs_sb->ctx->rsize;
230 	rreq->wsize = cifs_sb->ctx->wsize;
231 	req->pid = current->tgid; // Ummm...  This may be a workqueue
232 
233 	if (file) {
234 		open_file = file->private_data;
235 		rreq->netfs_priv = file->private_data;
236 		req->cfile = cifsFileInfo_get(open_file);
237 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
238 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
239 			req->pid = req->cfile->pid;
240 	} else if (rreq->origin != NETFS_WRITEBACK) {
241 		WARN_ON_ONCE(1);
242 		return -EIO;
243 	}
244 
245 	return 0;
246 }
247 
248 /*
249  * Expand the size of a readahead to the size of the rsize, if at least as
250  * large as a page, allowing for the possibility that rsize is not pow-2
251  * aligned.
252  */
253 static void cifs_expand_readahead(struct netfs_io_request *rreq)
254 {
255 	unsigned int rsize = rreq->rsize;
256 	loff_t misalignment, i_size = i_size_read(rreq->inode);
257 
258 	if (rsize < PAGE_SIZE)
259 		return;
260 
261 	if (rsize < INT_MAX)
262 		rsize = roundup_pow_of_two(rsize);
263 	else
264 		rsize = ((unsigned int)INT_MAX + 1) / 2;
265 
266 	misalignment = rreq->start & (rsize - 1);
267 	if (misalignment) {
268 		rreq->start -= misalignment;
269 		rreq->len += misalignment;
270 	}
271 
272 	rreq->len = round_up(rreq->len, rsize);
273 	if (rreq->start < i_size && rreq->len > i_size - rreq->start)
274 		rreq->len = i_size - rreq->start;
275 }
276 
277 /*
278  * Completion of a request operation.
279  */
280 static void cifs_rreq_done(struct netfs_io_request *rreq)
281 {
282 	struct timespec64 atime, mtime;
283 	struct inode *inode = rreq->inode;
284 
285 	/* we do not want atime to be less than mtime, it broke some apps */
286 	atime = inode_set_atime_to_ts(inode, current_time(inode));
287 	mtime = inode_get_mtime(inode);
288 	if (timespec64_compare(&atime, &mtime))
289 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
290 }
291 
292 static void cifs_post_modify(struct inode *inode)
293 {
294 	/* Indication to update ctime and mtime as close is deferred */
295 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
296 }
297 
298 static void cifs_free_request(struct netfs_io_request *rreq)
299 {
300 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
301 
302 	if (req->cfile)
303 		cifsFileInfo_put(req->cfile);
304 }
305 
306 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
307 {
308 	struct cifs_io_subrequest *rdata =
309 		container_of(subreq, struct cifs_io_subrequest, subreq);
310 	int rc = subreq->error;
311 
312 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
313 #ifdef CONFIG_CIFS_SMB_DIRECT
314 		if (rdata->mr) {
315 			smbd_deregister_mr(rdata->mr);
316 			rdata->mr = NULL;
317 		}
318 #endif
319 	}
320 
321 	add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
322 	if (rdata->have_xid)
323 		free_xid(rdata->xid);
324 }
325 
326 const struct netfs_request_ops cifs_req_ops = {
327 	.request_pool		= &cifs_io_request_pool,
328 	.subrequest_pool	= &cifs_io_subrequest_pool,
329 	.init_request		= cifs_init_request,
330 	.free_request		= cifs_free_request,
331 	.free_subrequest	= cifs_free_subrequest,
332 	.expand_readahead	= cifs_expand_readahead,
333 	.clamp_length		= cifs_clamp_length,
334 	.issue_read		= cifs_req_issue_read,
335 	.done			= cifs_rreq_done,
336 	.post_modify		= cifs_post_modify,
337 	.begin_writeback	= cifs_begin_writeback,
338 	.prepare_write		= cifs_prepare_write,
339 	.issue_write		= cifs_issue_write,
340 };
341 
342 /*
343  * Mark as invalid, all open files on tree connections since they
344  * were closed when session to server was lost.
345  */
346 void
347 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
348 {
349 	struct cifsFileInfo *open_file = NULL;
350 	struct list_head *tmp;
351 	struct list_head *tmp1;
352 
353 	/* only send once per connect */
354 	spin_lock(&tcon->tc_lock);
355 	if (tcon->need_reconnect)
356 		tcon->status = TID_NEED_RECON;
357 
358 	if (tcon->status != TID_NEED_RECON) {
359 		spin_unlock(&tcon->tc_lock);
360 		return;
361 	}
362 	tcon->status = TID_IN_FILES_INVALIDATE;
363 	spin_unlock(&tcon->tc_lock);
364 
365 	/* list all files open on tree connection and mark them invalid */
366 	spin_lock(&tcon->open_file_lock);
367 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
368 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
369 		open_file->invalidHandle = true;
370 		open_file->oplock_break_cancelled = true;
371 	}
372 	spin_unlock(&tcon->open_file_lock);
373 
374 	invalidate_all_cached_dirs(tcon);
375 	spin_lock(&tcon->tc_lock);
376 	if (tcon->status == TID_IN_FILES_INVALIDATE)
377 		tcon->status = TID_NEED_TCON;
378 	spin_unlock(&tcon->tc_lock);
379 
380 	/*
381 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
382 	 * to this tcon.
383 	 */
384 }
385 
386 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
387 {
388 	if ((flags & O_ACCMODE) == O_RDONLY)
389 		return GENERIC_READ;
390 	else if ((flags & O_ACCMODE) == O_WRONLY)
391 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
392 	else if ((flags & O_ACCMODE) == O_RDWR) {
393 		/* GENERIC_ALL is too much permission to request
394 		   can cause unnecessary access denied on create */
395 		/* return GENERIC_ALL; */
396 		return (GENERIC_READ | GENERIC_WRITE);
397 	}
398 
399 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
400 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
401 		FILE_READ_DATA);
402 }
403 
404 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
405 static u32 cifs_posix_convert_flags(unsigned int flags)
406 {
407 	u32 posix_flags = 0;
408 
409 	if ((flags & O_ACCMODE) == O_RDONLY)
410 		posix_flags = SMB_O_RDONLY;
411 	else if ((flags & O_ACCMODE) == O_WRONLY)
412 		posix_flags = SMB_O_WRONLY;
413 	else if ((flags & O_ACCMODE) == O_RDWR)
414 		posix_flags = SMB_O_RDWR;
415 
416 	if (flags & O_CREAT) {
417 		posix_flags |= SMB_O_CREAT;
418 		if (flags & O_EXCL)
419 			posix_flags |= SMB_O_EXCL;
420 	} else if (flags & O_EXCL)
421 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
422 			 current->comm, current->tgid);
423 
424 	if (flags & O_TRUNC)
425 		posix_flags |= SMB_O_TRUNC;
426 	/* be safe and imply O_SYNC for O_DSYNC */
427 	if (flags & O_DSYNC)
428 		posix_flags |= SMB_O_SYNC;
429 	if (flags & O_DIRECTORY)
430 		posix_flags |= SMB_O_DIRECTORY;
431 	if (flags & O_NOFOLLOW)
432 		posix_flags |= SMB_O_NOFOLLOW;
433 	if (flags & O_DIRECT)
434 		posix_flags |= SMB_O_DIRECT;
435 
436 	return posix_flags;
437 }
438 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
439 
440 static inline int cifs_get_disposition(unsigned int flags)
441 {
442 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
443 		return FILE_CREATE;
444 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
445 		return FILE_OVERWRITE_IF;
446 	else if ((flags & O_CREAT) == O_CREAT)
447 		return FILE_OPEN_IF;
448 	else if ((flags & O_TRUNC) == O_TRUNC)
449 		return FILE_OVERWRITE;
450 	else
451 		return FILE_OPEN;
452 }
453 
454 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
455 int cifs_posix_open(const char *full_path, struct inode **pinode,
456 			struct super_block *sb, int mode, unsigned int f_flags,
457 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
458 {
459 	int rc;
460 	FILE_UNIX_BASIC_INFO *presp_data;
461 	__u32 posix_flags = 0;
462 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
463 	struct cifs_fattr fattr;
464 	struct tcon_link *tlink;
465 	struct cifs_tcon *tcon;
466 
467 	cifs_dbg(FYI, "posix open %s\n", full_path);
468 
469 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
470 	if (presp_data == NULL)
471 		return -ENOMEM;
472 
473 	tlink = cifs_sb_tlink(cifs_sb);
474 	if (IS_ERR(tlink)) {
475 		rc = PTR_ERR(tlink);
476 		goto posix_open_ret;
477 	}
478 
479 	tcon = tlink_tcon(tlink);
480 	mode &= ~current_umask();
481 
482 	posix_flags = cifs_posix_convert_flags(f_flags);
483 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
484 			     poplock, full_path, cifs_sb->local_nls,
485 			     cifs_remap(cifs_sb));
486 	cifs_put_tlink(tlink);
487 
488 	if (rc)
489 		goto posix_open_ret;
490 
491 	if (presp_data->Type == cpu_to_le32(-1))
492 		goto posix_open_ret; /* open ok, caller does qpathinfo */
493 
494 	if (!pinode)
495 		goto posix_open_ret; /* caller does not need info */
496 
497 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
498 
499 	/* get new inode and set it up */
500 	if (*pinode == NULL) {
501 		cifs_fill_uniqueid(sb, &fattr);
502 		*pinode = cifs_iget(sb, &fattr);
503 		if (!*pinode) {
504 			rc = -ENOMEM;
505 			goto posix_open_ret;
506 		}
507 	} else {
508 		cifs_revalidate_mapping(*pinode);
509 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
510 	}
511 
512 posix_open_ret:
513 	kfree(presp_data);
514 	return rc;
515 }
516 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
517 
518 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
519 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
520 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
521 {
522 	int rc;
523 	int desired_access;
524 	int disposition;
525 	int create_options = CREATE_NOT_DIR;
526 	struct TCP_Server_Info *server = tcon->ses->server;
527 	struct cifs_open_parms oparms;
528 	int rdwr_for_fscache = 0;
529 
530 	if (!server->ops->open)
531 		return -ENOSYS;
532 
533 	/* If we're caching, we need to be able to fill in around partial writes. */
534 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
535 		rdwr_for_fscache = 1;
536 
537 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
538 
539 /*********************************************************************
540  *  open flag mapping table:
541  *
542  *	POSIX Flag            CIFS Disposition
543  *	----------            ----------------
544  *	O_CREAT               FILE_OPEN_IF
545  *	O_CREAT | O_EXCL      FILE_CREATE
546  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
547  *	O_TRUNC               FILE_OVERWRITE
548  *	none of the above     FILE_OPEN
549  *
550  *	Note that there is not a direct match between disposition
551  *	FILE_SUPERSEDE (ie create whether or not file exists although
552  *	O_CREAT | O_TRUNC is similar but truncates the existing
553  *	file rather than creating a new file as FILE_SUPERSEDE does
554  *	(which uses the attributes / metadata passed in on open call)
555  *?
556  *?  O_SYNC is a reasonable match to CIFS writethrough flag
557  *?  and the read write flags match reasonably.  O_LARGEFILE
558  *?  is irrelevant because largefile support is always used
559  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
560  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
561  *********************************************************************/
562 
563 	disposition = cifs_get_disposition(f_flags);
564 
565 	/* BB pass O_SYNC flag through on file attributes .. BB */
566 
567 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
568 	if (f_flags & O_SYNC)
569 		create_options |= CREATE_WRITE_THROUGH;
570 
571 	if (f_flags & O_DIRECT)
572 		create_options |= CREATE_NO_BUFFER;
573 
574 retry_open:
575 	oparms = (struct cifs_open_parms) {
576 		.tcon = tcon,
577 		.cifs_sb = cifs_sb,
578 		.desired_access = desired_access,
579 		.create_options = cifs_create_options(cifs_sb, create_options),
580 		.disposition = disposition,
581 		.path = full_path,
582 		.fid = fid,
583 	};
584 
585 	rc = server->ops->open(xid, &oparms, oplock, buf);
586 	if (rc) {
587 		if (rc == -EACCES && rdwr_for_fscache == 1) {
588 			desired_access = cifs_convert_flags(f_flags, 0);
589 			rdwr_for_fscache = 2;
590 			goto retry_open;
591 		}
592 		return rc;
593 	}
594 	if (rdwr_for_fscache == 2)
595 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
596 
597 	/* TODO: Add support for calling posix query info but with passing in fid */
598 	if (tcon->unix_ext)
599 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
600 					      xid);
601 	else
602 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
603 					 xid, fid);
604 
605 	if (rc) {
606 		server->ops->close(xid, tcon, fid);
607 		if (rc == -ESTALE)
608 			rc = -EOPENSTALE;
609 	}
610 
611 	return rc;
612 }
613 
614 static bool
615 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
616 {
617 	struct cifs_fid_locks *cur;
618 	bool has_locks = false;
619 
620 	down_read(&cinode->lock_sem);
621 	list_for_each_entry(cur, &cinode->llist, llist) {
622 		if (!list_empty(&cur->locks)) {
623 			has_locks = true;
624 			break;
625 		}
626 	}
627 	up_read(&cinode->lock_sem);
628 	return has_locks;
629 }
630 
631 void
632 cifs_down_write(struct rw_semaphore *sem)
633 {
634 	while (!down_write_trylock(sem))
635 		msleep(10);
636 }
637 
638 static void cifsFileInfo_put_work(struct work_struct *work);
639 void serverclose_work(struct work_struct *work);
640 
641 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
642 				       struct tcon_link *tlink, __u32 oplock,
643 				       const char *symlink_target)
644 {
645 	struct dentry *dentry = file_dentry(file);
646 	struct inode *inode = d_inode(dentry);
647 	struct cifsInodeInfo *cinode = CIFS_I(inode);
648 	struct cifsFileInfo *cfile;
649 	struct cifs_fid_locks *fdlocks;
650 	struct cifs_tcon *tcon = tlink_tcon(tlink);
651 	struct TCP_Server_Info *server = tcon->ses->server;
652 
653 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
654 	if (cfile == NULL)
655 		return cfile;
656 
657 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
658 	if (!fdlocks) {
659 		kfree(cfile);
660 		return NULL;
661 	}
662 
663 	if (symlink_target) {
664 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
665 		if (!cfile->symlink_target) {
666 			kfree(fdlocks);
667 			kfree(cfile);
668 			return NULL;
669 		}
670 	}
671 
672 	INIT_LIST_HEAD(&fdlocks->locks);
673 	fdlocks->cfile = cfile;
674 	cfile->llist = fdlocks;
675 
676 	cfile->count = 1;
677 	cfile->pid = current->tgid;
678 	cfile->uid = current_fsuid();
679 	cfile->dentry = dget(dentry);
680 	cfile->f_flags = file->f_flags;
681 	cfile->invalidHandle = false;
682 	cfile->deferred_close_scheduled = false;
683 	cfile->tlink = cifs_get_tlink(tlink);
684 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
685 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
686 	INIT_WORK(&cfile->serverclose, serverclose_work);
687 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
688 	mutex_init(&cfile->fh_mutex);
689 	spin_lock_init(&cfile->file_info_lock);
690 
691 	cifs_sb_active(inode->i_sb);
692 
693 	/*
694 	 * If the server returned a read oplock and we have mandatory brlocks,
695 	 * set oplock level to None.
696 	 */
697 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
698 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
699 		oplock = 0;
700 	}
701 
702 	cifs_down_write(&cinode->lock_sem);
703 	list_add(&fdlocks->llist, &cinode->llist);
704 	up_write(&cinode->lock_sem);
705 
706 	spin_lock(&tcon->open_file_lock);
707 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
708 		oplock = fid->pending_open->oplock;
709 	list_del(&fid->pending_open->olist);
710 
711 	fid->purge_cache = false;
712 	server->ops->set_fid(cfile, fid, oplock);
713 
714 	list_add(&cfile->tlist, &tcon->openFileList);
715 	atomic_inc(&tcon->num_local_opens);
716 
717 	/* if readable file instance put first in list*/
718 	spin_lock(&cinode->open_file_lock);
719 	if (file->f_mode & FMODE_READ)
720 		list_add(&cfile->flist, &cinode->openFileList);
721 	else
722 		list_add_tail(&cfile->flist, &cinode->openFileList);
723 	spin_unlock(&cinode->open_file_lock);
724 	spin_unlock(&tcon->open_file_lock);
725 
726 	if (fid->purge_cache)
727 		cifs_zap_mapping(inode);
728 
729 	file->private_data = cfile;
730 	return cfile;
731 }
732 
733 struct cifsFileInfo *
734 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
735 {
736 	spin_lock(&cifs_file->file_info_lock);
737 	cifsFileInfo_get_locked(cifs_file);
738 	spin_unlock(&cifs_file->file_info_lock);
739 	return cifs_file;
740 }
741 
742 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
743 {
744 	struct inode *inode = d_inode(cifs_file->dentry);
745 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
746 	struct cifsLockInfo *li, *tmp;
747 	struct super_block *sb = inode->i_sb;
748 
749 	/*
750 	 * Delete any outstanding lock records. We'll lose them when the file
751 	 * is closed anyway.
752 	 */
753 	cifs_down_write(&cifsi->lock_sem);
754 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
755 		list_del(&li->llist);
756 		cifs_del_lock_waiters(li);
757 		kfree(li);
758 	}
759 	list_del(&cifs_file->llist->llist);
760 	kfree(cifs_file->llist);
761 	up_write(&cifsi->lock_sem);
762 
763 	cifs_put_tlink(cifs_file->tlink);
764 	dput(cifs_file->dentry);
765 	cifs_sb_deactive(sb);
766 	kfree(cifs_file->symlink_target);
767 	kfree(cifs_file);
768 }
769 
770 static void cifsFileInfo_put_work(struct work_struct *work)
771 {
772 	struct cifsFileInfo *cifs_file = container_of(work,
773 			struct cifsFileInfo, put);
774 
775 	cifsFileInfo_put_final(cifs_file);
776 }
777 
778 void serverclose_work(struct work_struct *work)
779 {
780 	struct cifsFileInfo *cifs_file = container_of(work,
781 			struct cifsFileInfo, serverclose);
782 
783 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
784 
785 	struct TCP_Server_Info *server = tcon->ses->server;
786 	int rc = 0;
787 	int retries = 0;
788 	int MAX_RETRIES = 4;
789 
790 	do {
791 		if (server->ops->close_getattr)
792 			rc = server->ops->close_getattr(0, tcon, cifs_file);
793 		else if (server->ops->close)
794 			rc = server->ops->close(0, tcon, &cifs_file->fid);
795 
796 		if (rc == -EBUSY || rc == -EAGAIN) {
797 			retries++;
798 			msleep(250);
799 		}
800 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
801 	);
802 
803 	if (retries == MAX_RETRIES)
804 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
805 
806 	if (cifs_file->offload)
807 		queue_work(fileinfo_put_wq, &cifs_file->put);
808 	else
809 		cifsFileInfo_put_final(cifs_file);
810 }
811 
812 /**
813  * cifsFileInfo_put - release a reference of file priv data
814  *
815  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
816  *
817  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
818  */
819 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
820 {
821 	_cifsFileInfo_put(cifs_file, true, true);
822 }
823 
824 /**
825  * _cifsFileInfo_put - release a reference of file priv data
826  *
827  * This may involve closing the filehandle @cifs_file out on the
828  * server. Must be called without holding tcon->open_file_lock,
829  * cinode->open_file_lock and cifs_file->file_info_lock.
830  *
831  * If @wait_for_oplock_handler is true and we are releasing the last
832  * reference, wait for any running oplock break handler of the file
833  * and cancel any pending one.
834  *
835  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
836  * @wait_oplock_handler: must be false if called from oplock_break_handler
837  * @offload:	not offloaded on close and oplock breaks
838  *
839  */
840 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
841 		       bool wait_oplock_handler, bool offload)
842 {
843 	struct inode *inode = d_inode(cifs_file->dentry);
844 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
845 	struct TCP_Server_Info *server = tcon->ses->server;
846 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
847 	struct super_block *sb = inode->i_sb;
848 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
849 	struct cifs_fid fid = {};
850 	struct cifs_pending_open open;
851 	bool oplock_break_cancelled;
852 	bool serverclose_offloaded = false;
853 
854 	spin_lock(&tcon->open_file_lock);
855 	spin_lock(&cifsi->open_file_lock);
856 	spin_lock(&cifs_file->file_info_lock);
857 
858 	cifs_file->offload = offload;
859 	if (--cifs_file->count > 0) {
860 		spin_unlock(&cifs_file->file_info_lock);
861 		spin_unlock(&cifsi->open_file_lock);
862 		spin_unlock(&tcon->open_file_lock);
863 		return;
864 	}
865 	spin_unlock(&cifs_file->file_info_lock);
866 
867 	if (server->ops->get_lease_key)
868 		server->ops->get_lease_key(inode, &fid);
869 
870 	/* store open in pending opens to make sure we don't miss lease break */
871 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
872 
873 	/* remove it from the lists */
874 	list_del(&cifs_file->flist);
875 	list_del(&cifs_file->tlist);
876 	atomic_dec(&tcon->num_local_opens);
877 
878 	if (list_empty(&cifsi->openFileList)) {
879 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
880 			 d_inode(cifs_file->dentry));
881 		/*
882 		 * In strict cache mode we need invalidate mapping on the last
883 		 * close  because it may cause a error when we open this file
884 		 * again and get at least level II oplock.
885 		 */
886 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
887 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
888 		cifs_set_oplock_level(cifsi, 0);
889 	}
890 
891 	spin_unlock(&cifsi->open_file_lock);
892 	spin_unlock(&tcon->open_file_lock);
893 
894 	oplock_break_cancelled = wait_oplock_handler ?
895 		cancel_work_sync(&cifs_file->oplock_break) : false;
896 
897 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
898 		struct TCP_Server_Info *server = tcon->ses->server;
899 		unsigned int xid;
900 		int rc = 0;
901 
902 		xid = get_xid();
903 		if (server->ops->close_getattr)
904 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
905 		else if (server->ops->close)
906 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
907 		_free_xid(xid);
908 
909 		if (rc == -EBUSY || rc == -EAGAIN) {
910 			// Server close failed, hence offloading it as an async op
911 			queue_work(serverclose_wq, &cifs_file->serverclose);
912 			serverclose_offloaded = true;
913 		}
914 	}
915 
916 	if (oplock_break_cancelled)
917 		cifs_done_oplock_break(cifsi);
918 
919 	cifs_del_pending_open(&open);
920 
921 	// if serverclose has been offloaded to wq (on failure), it will
922 	// handle offloading put as well. If serverclose not offloaded,
923 	// we need to handle offloading put here.
924 	if (!serverclose_offloaded) {
925 		if (offload)
926 			queue_work(fileinfo_put_wq, &cifs_file->put);
927 		else
928 			cifsFileInfo_put_final(cifs_file);
929 	}
930 }
931 
932 int cifs_open(struct inode *inode, struct file *file)
933 
934 {
935 	int rc = -EACCES;
936 	unsigned int xid;
937 	__u32 oplock;
938 	struct cifs_sb_info *cifs_sb;
939 	struct TCP_Server_Info *server;
940 	struct cifs_tcon *tcon;
941 	struct tcon_link *tlink;
942 	struct cifsFileInfo *cfile = NULL;
943 	void *page;
944 	const char *full_path;
945 	bool posix_open_ok = false;
946 	struct cifs_fid fid = {};
947 	struct cifs_pending_open open;
948 	struct cifs_open_info_data data = {};
949 
950 	xid = get_xid();
951 
952 	cifs_sb = CIFS_SB(inode->i_sb);
953 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
954 		free_xid(xid);
955 		return -EIO;
956 	}
957 
958 	tlink = cifs_sb_tlink(cifs_sb);
959 	if (IS_ERR(tlink)) {
960 		free_xid(xid);
961 		return PTR_ERR(tlink);
962 	}
963 	tcon = tlink_tcon(tlink);
964 	server = tcon->ses->server;
965 
966 	page = alloc_dentry_path();
967 	full_path = build_path_from_dentry(file_dentry(file), page);
968 	if (IS_ERR(full_path)) {
969 		rc = PTR_ERR(full_path);
970 		goto out;
971 	}
972 
973 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
974 		 inode, file->f_flags, full_path);
975 
976 	if (file->f_flags & O_DIRECT &&
977 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
978 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
979 			file->f_op = &cifs_file_direct_nobrl_ops;
980 		else
981 			file->f_op = &cifs_file_direct_ops;
982 	}
983 
984 	/* Get the cached handle as SMB2 close is deferred */
985 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
986 	if (rc == 0) {
987 		if (file->f_flags == cfile->f_flags) {
988 			file->private_data = cfile;
989 			spin_lock(&CIFS_I(inode)->deferred_lock);
990 			cifs_del_deferred_close(cfile);
991 			spin_unlock(&CIFS_I(inode)->deferred_lock);
992 			goto use_cache;
993 		} else {
994 			_cifsFileInfo_put(cfile, true, false);
995 		}
996 	}
997 
998 	if (server->oplocks)
999 		oplock = REQ_OPLOCK;
1000 	else
1001 		oplock = 0;
1002 
1003 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1004 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1005 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1006 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1007 		/* can not refresh inode info since size could be stale */
1008 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1009 				cifs_sb->ctx->file_mode /* ignored */,
1010 				file->f_flags, &oplock, &fid.netfid, xid);
1011 		if (rc == 0) {
1012 			cifs_dbg(FYI, "posix open succeeded\n");
1013 			posix_open_ok = true;
1014 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1015 			if (tcon->ses->serverNOS)
1016 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1017 					 tcon->ses->ip_addr,
1018 					 tcon->ses->serverNOS);
1019 			tcon->broken_posix_open = true;
1020 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1021 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1022 			goto out;
1023 		/*
1024 		 * Else fallthrough to retry open the old way on network i/o
1025 		 * or DFS errors.
1026 		 */
1027 	}
1028 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1029 
1030 	if (server->ops->get_lease_key)
1031 		server->ops->get_lease_key(inode, &fid);
1032 
1033 	cifs_add_pending_open(&fid, tlink, &open);
1034 
1035 	if (!posix_open_ok) {
1036 		if (server->ops->get_lease_key)
1037 			server->ops->get_lease_key(inode, &fid);
1038 
1039 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1040 				  xid, &data);
1041 		if (rc) {
1042 			cifs_del_pending_open(&open);
1043 			goto out;
1044 		}
1045 	}
1046 
1047 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1048 	if (cfile == NULL) {
1049 		if (server->ops->close)
1050 			server->ops->close(xid, tcon, &fid);
1051 		cifs_del_pending_open(&open);
1052 		rc = -ENOMEM;
1053 		goto out;
1054 	}
1055 
1056 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1057 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1058 		/*
1059 		 * Time to set mode which we can not set earlier due to
1060 		 * problems creating new read-only files.
1061 		 */
1062 		struct cifs_unix_set_info_args args = {
1063 			.mode	= inode->i_mode,
1064 			.uid	= INVALID_UID, /* no change */
1065 			.gid	= INVALID_GID, /* no change */
1066 			.ctime	= NO_CHANGE_64,
1067 			.atime	= NO_CHANGE_64,
1068 			.mtime	= NO_CHANGE_64,
1069 			.device	= 0,
1070 		};
1071 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1072 				       cfile->pid);
1073 	}
1074 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1075 
1076 use_cache:
1077 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1078 			   file->f_mode & FMODE_WRITE);
1079 	if (!(file->f_flags & O_DIRECT))
1080 		goto out;
1081 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1082 		goto out;
1083 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1084 
1085 out:
1086 	free_dentry_path(page);
1087 	free_xid(xid);
1088 	cifs_put_tlink(tlink);
1089 	cifs_free_open_info(&data);
1090 	return rc;
1091 }
1092 
1093 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1094 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1095 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1096 
1097 /*
1098  * Try to reacquire byte range locks that were released when session
1099  * to server was lost.
1100  */
1101 static int
1102 cifs_relock_file(struct cifsFileInfo *cfile)
1103 {
1104 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1105 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1106 	int rc = 0;
1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1109 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1110 
1111 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1112 	if (cinode->can_cache_brlcks) {
1113 		/* can cache locks - no need to relock */
1114 		up_read(&cinode->lock_sem);
1115 		return rc;
1116 	}
1117 
1118 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1119 	if (cap_unix(tcon->ses) &&
1120 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1121 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1122 		rc = cifs_push_posix_locks(cfile);
1123 	else
1124 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1125 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1126 
1127 	up_read(&cinode->lock_sem);
1128 	return rc;
1129 }
1130 
1131 static int
1132 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1133 {
1134 	int rc = -EACCES;
1135 	unsigned int xid;
1136 	__u32 oplock;
1137 	struct cifs_sb_info *cifs_sb;
1138 	struct cifs_tcon *tcon;
1139 	struct TCP_Server_Info *server;
1140 	struct cifsInodeInfo *cinode;
1141 	struct inode *inode;
1142 	void *page;
1143 	const char *full_path;
1144 	int desired_access;
1145 	int disposition = FILE_OPEN;
1146 	int create_options = CREATE_NOT_DIR;
1147 	struct cifs_open_parms oparms;
1148 	int rdwr_for_fscache = 0;
1149 
1150 	xid = get_xid();
1151 	mutex_lock(&cfile->fh_mutex);
1152 	if (!cfile->invalidHandle) {
1153 		mutex_unlock(&cfile->fh_mutex);
1154 		free_xid(xid);
1155 		return 0;
1156 	}
1157 
1158 	inode = d_inode(cfile->dentry);
1159 	cifs_sb = CIFS_SB(inode->i_sb);
1160 	tcon = tlink_tcon(cfile->tlink);
1161 	server = tcon->ses->server;
1162 
1163 	/*
1164 	 * Can not grab rename sem here because various ops, including those
1165 	 * that already have the rename sem can end up causing writepage to get
1166 	 * called and if the server was down that means we end up here, and we
1167 	 * can never tell if the caller already has the rename_sem.
1168 	 */
1169 	page = alloc_dentry_path();
1170 	full_path = build_path_from_dentry(cfile->dentry, page);
1171 	if (IS_ERR(full_path)) {
1172 		mutex_unlock(&cfile->fh_mutex);
1173 		free_dentry_path(page);
1174 		free_xid(xid);
1175 		return PTR_ERR(full_path);
1176 	}
1177 
1178 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1179 		 inode, cfile->f_flags, full_path);
1180 
1181 	if (tcon->ses->server->oplocks)
1182 		oplock = REQ_OPLOCK;
1183 	else
1184 		oplock = 0;
1185 
1186 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1187 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1188 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1189 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1190 		/*
1191 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1192 		 * original open. Must mask them off for a reopen.
1193 		 */
1194 		unsigned int oflags = cfile->f_flags &
1195 						~(O_CREAT | O_EXCL | O_TRUNC);
1196 
1197 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1198 				     cifs_sb->ctx->file_mode /* ignored */,
1199 				     oflags, &oplock, &cfile->fid.netfid, xid);
1200 		if (rc == 0) {
1201 			cifs_dbg(FYI, "posix reopen succeeded\n");
1202 			oparms.reconnect = true;
1203 			goto reopen_success;
1204 		}
1205 		/*
1206 		 * fallthrough to retry open the old way on errors, especially
1207 		 * in the reconnect path it is important to retry hard
1208 		 */
1209 	}
1210 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1211 
1212 	/* If we're caching, we need to be able to fill in around partial writes. */
1213 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1214 		rdwr_for_fscache = 1;
1215 
1216 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1217 
1218 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1219 	if (cfile->f_flags & O_SYNC)
1220 		create_options |= CREATE_WRITE_THROUGH;
1221 
1222 	if (cfile->f_flags & O_DIRECT)
1223 		create_options |= CREATE_NO_BUFFER;
1224 
1225 	if (server->ops->get_lease_key)
1226 		server->ops->get_lease_key(inode, &cfile->fid);
1227 
1228 retry_open:
1229 	oparms = (struct cifs_open_parms) {
1230 		.tcon = tcon,
1231 		.cifs_sb = cifs_sb,
1232 		.desired_access = desired_access,
1233 		.create_options = cifs_create_options(cifs_sb, create_options),
1234 		.disposition = disposition,
1235 		.path = full_path,
1236 		.fid = &cfile->fid,
1237 		.reconnect = true,
1238 	};
1239 
1240 	/*
1241 	 * Can not refresh inode by passing in file_info buf to be returned by
1242 	 * ops->open and then calling get_inode_info with returned buf since
1243 	 * file might have write behind data that needs to be flushed and server
1244 	 * version of file size can be stale. If we knew for sure that inode was
1245 	 * not dirty locally we could do this.
1246 	 */
1247 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1248 	if (rc == -ENOENT && oparms.reconnect == false) {
1249 		/* durable handle timeout is expired - open the file again */
1250 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1251 		/* indicate that we need to relock the file */
1252 		oparms.reconnect = true;
1253 	}
1254 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1255 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1256 		rdwr_for_fscache = 2;
1257 		goto retry_open;
1258 	}
1259 
1260 	if (rc) {
1261 		mutex_unlock(&cfile->fh_mutex);
1262 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1263 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1264 		goto reopen_error_exit;
1265 	}
1266 
1267 	if (rdwr_for_fscache == 2)
1268 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1269 
1270 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1271 reopen_success:
1272 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1273 	cfile->invalidHandle = false;
1274 	mutex_unlock(&cfile->fh_mutex);
1275 	cinode = CIFS_I(inode);
1276 
1277 	if (can_flush) {
1278 		rc = filemap_write_and_wait(inode->i_mapping);
1279 		if (!is_interrupt_error(rc))
1280 			mapping_set_error(inode->i_mapping, rc);
1281 
1282 		if (tcon->posix_extensions) {
1283 			rc = smb311_posix_get_inode_info(&inode, full_path,
1284 							 NULL, inode->i_sb, xid);
1285 		} else if (tcon->unix_ext) {
1286 			rc = cifs_get_inode_info_unix(&inode, full_path,
1287 						      inode->i_sb, xid);
1288 		} else {
1289 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1290 						 inode->i_sb, xid, NULL);
1291 		}
1292 	}
1293 	/*
1294 	 * Else we are writing out data to server already and could deadlock if
1295 	 * we tried to flush data, and since we do not know if we have data that
1296 	 * would invalidate the current end of file on the server we can not go
1297 	 * to the server to get the new inode info.
1298 	 */
1299 
1300 	/*
1301 	 * If the server returned a read oplock and we have mandatory brlocks,
1302 	 * set oplock level to None.
1303 	 */
1304 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1305 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1306 		oplock = 0;
1307 	}
1308 
1309 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1310 	if (oparms.reconnect)
1311 		cifs_relock_file(cfile);
1312 
1313 reopen_error_exit:
1314 	free_dentry_path(page);
1315 	free_xid(xid);
1316 	return rc;
1317 }
1318 
1319 void smb2_deferred_work_close(struct work_struct *work)
1320 {
1321 	struct cifsFileInfo *cfile = container_of(work,
1322 			struct cifsFileInfo, deferred.work);
1323 
1324 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1325 	cifs_del_deferred_close(cfile);
1326 	cfile->deferred_close_scheduled = false;
1327 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1328 	_cifsFileInfo_put(cfile, true, false);
1329 }
1330 
1331 static bool
1332 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1333 {
1334 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1335 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1336 
1337 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1338 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1339 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1340 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1341 
1342 }
1343 
1344 int cifs_close(struct inode *inode, struct file *file)
1345 {
1346 	struct cifsFileInfo *cfile;
1347 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1348 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1349 	struct cifs_deferred_close *dclose;
1350 
1351 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1352 
1353 	if (file->private_data != NULL) {
1354 		cfile = file->private_data;
1355 		file->private_data = NULL;
1356 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1357 		if ((cfile->status_file_deleted == false) &&
1358 		    (smb2_can_defer_close(inode, dclose))) {
1359 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1360 				inode_set_mtime_to_ts(inode,
1361 						      inode_set_ctime_current(inode));
1362 			}
1363 			spin_lock(&cinode->deferred_lock);
1364 			cifs_add_deferred_close(cfile, dclose);
1365 			if (cfile->deferred_close_scheduled &&
1366 			    delayed_work_pending(&cfile->deferred)) {
1367 				/*
1368 				 * If there is no pending work, mod_delayed_work queues new work.
1369 				 * So, Increase the ref count to avoid use-after-free.
1370 				 */
1371 				if (!mod_delayed_work(deferredclose_wq,
1372 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1373 					cifsFileInfo_get(cfile);
1374 			} else {
1375 				/* Deferred close for files */
1376 				queue_delayed_work(deferredclose_wq,
1377 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1378 				cfile->deferred_close_scheduled = true;
1379 				spin_unlock(&cinode->deferred_lock);
1380 				return 0;
1381 			}
1382 			spin_unlock(&cinode->deferred_lock);
1383 			_cifsFileInfo_put(cfile, true, false);
1384 		} else {
1385 			_cifsFileInfo_put(cfile, true, false);
1386 			kfree(dclose);
1387 		}
1388 	}
1389 
1390 	/* return code from the ->release op is always ignored */
1391 	return 0;
1392 }
1393 
1394 void
1395 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1396 {
1397 	struct cifsFileInfo *open_file, *tmp;
1398 	struct list_head tmp_list;
1399 
1400 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1401 		return;
1402 
1403 	tcon->need_reopen_files = false;
1404 
1405 	cifs_dbg(FYI, "Reopen persistent handles\n");
1406 	INIT_LIST_HEAD(&tmp_list);
1407 
1408 	/* list all files open on tree connection, reopen resilient handles  */
1409 	spin_lock(&tcon->open_file_lock);
1410 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1411 		if (!open_file->invalidHandle)
1412 			continue;
1413 		cifsFileInfo_get(open_file);
1414 		list_add_tail(&open_file->rlist, &tmp_list);
1415 	}
1416 	spin_unlock(&tcon->open_file_lock);
1417 
1418 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1419 		if (cifs_reopen_file(open_file, false /* do not flush */))
1420 			tcon->need_reopen_files = true;
1421 		list_del_init(&open_file->rlist);
1422 		cifsFileInfo_put(open_file);
1423 	}
1424 }
1425 
1426 int cifs_closedir(struct inode *inode, struct file *file)
1427 {
1428 	int rc = 0;
1429 	unsigned int xid;
1430 	struct cifsFileInfo *cfile = file->private_data;
1431 	struct cifs_tcon *tcon;
1432 	struct TCP_Server_Info *server;
1433 	char *buf;
1434 
1435 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1436 
1437 	if (cfile == NULL)
1438 		return rc;
1439 
1440 	xid = get_xid();
1441 	tcon = tlink_tcon(cfile->tlink);
1442 	server = tcon->ses->server;
1443 
1444 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1445 	spin_lock(&cfile->file_info_lock);
1446 	if (server->ops->dir_needs_close(cfile)) {
1447 		cfile->invalidHandle = true;
1448 		spin_unlock(&cfile->file_info_lock);
1449 		if (server->ops->close_dir)
1450 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1451 		else
1452 			rc = -ENOSYS;
1453 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1454 		/* not much we can do if it fails anyway, ignore rc */
1455 		rc = 0;
1456 	} else
1457 		spin_unlock(&cfile->file_info_lock);
1458 
1459 	buf = cfile->srch_inf.ntwrk_buf_start;
1460 	if (buf) {
1461 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1462 		cfile->srch_inf.ntwrk_buf_start = NULL;
1463 		if (cfile->srch_inf.smallBuf)
1464 			cifs_small_buf_release(buf);
1465 		else
1466 			cifs_buf_release(buf);
1467 	}
1468 
1469 	cifs_put_tlink(cfile->tlink);
1470 	kfree(file->private_data);
1471 	file->private_data = NULL;
1472 	/* BB can we lock the filestruct while this is going on? */
1473 	free_xid(xid);
1474 	return rc;
1475 }
1476 
1477 static struct cifsLockInfo *
1478 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1479 {
1480 	struct cifsLockInfo *lock =
1481 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1482 	if (!lock)
1483 		return lock;
1484 	lock->offset = offset;
1485 	lock->length = length;
1486 	lock->type = type;
1487 	lock->pid = current->tgid;
1488 	lock->flags = flags;
1489 	INIT_LIST_HEAD(&lock->blist);
1490 	init_waitqueue_head(&lock->block_q);
1491 	return lock;
1492 }
1493 
1494 void
1495 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1496 {
1497 	struct cifsLockInfo *li, *tmp;
1498 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1499 		list_del_init(&li->blist);
1500 		wake_up(&li->block_q);
1501 	}
1502 }
1503 
1504 #define CIFS_LOCK_OP	0
1505 #define CIFS_READ_OP	1
1506 #define CIFS_WRITE_OP	2
1507 
1508 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1509 static bool
1510 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1511 			    __u64 length, __u8 type, __u16 flags,
1512 			    struct cifsFileInfo *cfile,
1513 			    struct cifsLockInfo **conf_lock, int rw_check)
1514 {
1515 	struct cifsLockInfo *li;
1516 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1517 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1518 
1519 	list_for_each_entry(li, &fdlocks->locks, llist) {
1520 		if (offset + length <= li->offset ||
1521 		    offset >= li->offset + li->length)
1522 			continue;
1523 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1524 		    server->ops->compare_fids(cfile, cur_cfile)) {
1525 			/* shared lock prevents write op through the same fid */
1526 			if (!(li->type & server->vals->shared_lock_type) ||
1527 			    rw_check != CIFS_WRITE_OP)
1528 				continue;
1529 		}
1530 		if ((type & server->vals->shared_lock_type) &&
1531 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1532 		     current->tgid == li->pid) || type == li->type))
1533 			continue;
1534 		if (rw_check == CIFS_LOCK_OP &&
1535 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1536 		    server->ops->compare_fids(cfile, cur_cfile))
1537 			continue;
1538 		if (conf_lock)
1539 			*conf_lock = li;
1540 		return true;
1541 	}
1542 	return false;
1543 }
1544 
1545 bool
1546 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1547 			__u8 type, __u16 flags,
1548 			struct cifsLockInfo **conf_lock, int rw_check)
1549 {
1550 	bool rc = false;
1551 	struct cifs_fid_locks *cur;
1552 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1553 
1554 	list_for_each_entry(cur, &cinode->llist, llist) {
1555 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1556 						 flags, cfile, conf_lock,
1557 						 rw_check);
1558 		if (rc)
1559 			break;
1560 	}
1561 
1562 	return rc;
1563 }
1564 
1565 /*
1566  * Check if there is another lock that prevents us to set the lock (mandatory
1567  * style). If such a lock exists, update the flock structure with its
1568  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1569  * or leave it the same if we can't. Returns 0 if we don't need to request to
1570  * the server or 1 otherwise.
1571  */
1572 static int
1573 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1574 	       __u8 type, struct file_lock *flock)
1575 {
1576 	int rc = 0;
1577 	struct cifsLockInfo *conf_lock;
1578 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1579 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1580 	bool exist;
1581 
1582 	down_read(&cinode->lock_sem);
1583 
1584 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1585 					flock->c.flc_flags, &conf_lock,
1586 					CIFS_LOCK_OP);
1587 	if (exist) {
1588 		flock->fl_start = conf_lock->offset;
1589 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1590 		flock->c.flc_pid = conf_lock->pid;
1591 		if (conf_lock->type & server->vals->shared_lock_type)
1592 			flock->c.flc_type = F_RDLCK;
1593 		else
1594 			flock->c.flc_type = F_WRLCK;
1595 	} else if (!cinode->can_cache_brlcks)
1596 		rc = 1;
1597 	else
1598 		flock->c.flc_type = F_UNLCK;
1599 
1600 	up_read(&cinode->lock_sem);
1601 	return rc;
1602 }
1603 
1604 static void
1605 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1606 {
1607 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1608 	cifs_down_write(&cinode->lock_sem);
1609 	list_add_tail(&lock->llist, &cfile->llist->locks);
1610 	up_write(&cinode->lock_sem);
1611 }
1612 
1613 /*
1614  * Set the byte-range lock (mandatory style). Returns:
1615  * 1) 0, if we set the lock and don't need to request to the server;
1616  * 2) 1, if no locks prevent us but we need to request to the server;
1617  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1618  */
1619 static int
1620 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1621 		 bool wait)
1622 {
1623 	struct cifsLockInfo *conf_lock;
1624 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1625 	bool exist;
1626 	int rc = 0;
1627 
1628 try_again:
1629 	exist = false;
1630 	cifs_down_write(&cinode->lock_sem);
1631 
1632 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1633 					lock->type, lock->flags, &conf_lock,
1634 					CIFS_LOCK_OP);
1635 	if (!exist && cinode->can_cache_brlcks) {
1636 		list_add_tail(&lock->llist, &cfile->llist->locks);
1637 		up_write(&cinode->lock_sem);
1638 		return rc;
1639 	}
1640 
1641 	if (!exist)
1642 		rc = 1;
1643 	else if (!wait)
1644 		rc = -EACCES;
1645 	else {
1646 		list_add_tail(&lock->blist, &conf_lock->blist);
1647 		up_write(&cinode->lock_sem);
1648 		rc = wait_event_interruptible(lock->block_q,
1649 					(lock->blist.prev == &lock->blist) &&
1650 					(lock->blist.next == &lock->blist));
1651 		if (!rc)
1652 			goto try_again;
1653 		cifs_down_write(&cinode->lock_sem);
1654 		list_del_init(&lock->blist);
1655 	}
1656 
1657 	up_write(&cinode->lock_sem);
1658 	return rc;
1659 }
1660 
1661 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1662 /*
1663  * Check if there is another lock that prevents us to set the lock (posix
1664  * style). If such a lock exists, update the flock structure with its
1665  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1666  * or leave it the same if we can't. Returns 0 if we don't need to request to
1667  * the server or 1 otherwise.
1668  */
1669 static int
1670 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1671 {
1672 	int rc = 0;
1673 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1674 	unsigned char saved_type = flock->c.flc_type;
1675 
1676 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1677 		return 1;
1678 
1679 	down_read(&cinode->lock_sem);
1680 	posix_test_lock(file, flock);
1681 
1682 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1683 		flock->c.flc_type = saved_type;
1684 		rc = 1;
1685 	}
1686 
1687 	up_read(&cinode->lock_sem);
1688 	return rc;
1689 }
1690 
1691 /*
1692  * Set the byte-range lock (posix style). Returns:
1693  * 1) <0, if the error occurs while setting the lock;
1694  * 2) 0, if we set the lock and don't need to request to the server;
1695  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1696  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1697  */
1698 static int
1699 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1700 {
1701 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1702 	int rc = FILE_LOCK_DEFERRED + 1;
1703 
1704 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1705 		return rc;
1706 
1707 	cifs_down_write(&cinode->lock_sem);
1708 	if (!cinode->can_cache_brlcks) {
1709 		up_write(&cinode->lock_sem);
1710 		return rc;
1711 	}
1712 
1713 	rc = posix_lock_file(file, flock, NULL);
1714 	up_write(&cinode->lock_sem);
1715 	return rc;
1716 }
1717 
1718 int
1719 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1720 {
1721 	unsigned int xid;
1722 	int rc = 0, stored_rc;
1723 	struct cifsLockInfo *li, *tmp;
1724 	struct cifs_tcon *tcon;
1725 	unsigned int num, max_num, max_buf;
1726 	LOCKING_ANDX_RANGE *buf, *cur;
1727 	static const int types[] = {
1728 		LOCKING_ANDX_LARGE_FILES,
1729 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1730 	};
1731 	int i;
1732 
1733 	xid = get_xid();
1734 	tcon = tlink_tcon(cfile->tlink);
1735 
1736 	/*
1737 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1738 	 * and check it before using.
1739 	 */
1740 	max_buf = tcon->ses->server->maxBuf;
1741 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1742 		free_xid(xid);
1743 		return -EINVAL;
1744 	}
1745 
1746 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1747 		     PAGE_SIZE);
1748 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1749 			PAGE_SIZE);
1750 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1751 						sizeof(LOCKING_ANDX_RANGE);
1752 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1753 	if (!buf) {
1754 		free_xid(xid);
1755 		return -ENOMEM;
1756 	}
1757 
1758 	for (i = 0; i < 2; i++) {
1759 		cur = buf;
1760 		num = 0;
1761 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1762 			if (li->type != types[i])
1763 				continue;
1764 			cur->Pid = cpu_to_le16(li->pid);
1765 			cur->LengthLow = cpu_to_le32((u32)li->length);
1766 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1767 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1768 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1769 			if (++num == max_num) {
1770 				stored_rc = cifs_lockv(xid, tcon,
1771 						       cfile->fid.netfid,
1772 						       (__u8)li->type, 0, num,
1773 						       buf);
1774 				if (stored_rc)
1775 					rc = stored_rc;
1776 				cur = buf;
1777 				num = 0;
1778 			} else
1779 				cur++;
1780 		}
1781 
1782 		if (num) {
1783 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1784 					       (__u8)types[i], 0, num, buf);
1785 			if (stored_rc)
1786 				rc = stored_rc;
1787 		}
1788 	}
1789 
1790 	kfree(buf);
1791 	free_xid(xid);
1792 	return rc;
1793 }
1794 
1795 static __u32
1796 hash_lockowner(fl_owner_t owner)
1797 {
1798 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1799 }
1800 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1801 
1802 struct lock_to_push {
1803 	struct list_head llist;
1804 	__u64 offset;
1805 	__u64 length;
1806 	__u32 pid;
1807 	__u16 netfid;
1808 	__u8 type;
1809 };
1810 
1811 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1812 static int
1813 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1814 {
1815 	struct inode *inode = d_inode(cfile->dentry);
1816 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1817 	struct file_lock *flock;
1818 	struct file_lock_context *flctx = locks_inode_context(inode);
1819 	unsigned int count = 0, i;
1820 	int rc = 0, xid, type;
1821 	struct list_head locks_to_send, *el;
1822 	struct lock_to_push *lck, *tmp;
1823 	__u64 length;
1824 
1825 	xid = get_xid();
1826 
1827 	if (!flctx)
1828 		goto out;
1829 
1830 	spin_lock(&flctx->flc_lock);
1831 	list_for_each(el, &flctx->flc_posix) {
1832 		count++;
1833 	}
1834 	spin_unlock(&flctx->flc_lock);
1835 
1836 	INIT_LIST_HEAD(&locks_to_send);
1837 
1838 	/*
1839 	 * Allocating count locks is enough because no FL_POSIX locks can be
1840 	 * added to the list while we are holding cinode->lock_sem that
1841 	 * protects locking operations of this inode.
1842 	 */
1843 	for (i = 0; i < count; i++) {
1844 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1845 		if (!lck) {
1846 			rc = -ENOMEM;
1847 			goto err_out;
1848 		}
1849 		list_add_tail(&lck->llist, &locks_to_send);
1850 	}
1851 
1852 	el = locks_to_send.next;
1853 	spin_lock(&flctx->flc_lock);
1854 	for_each_file_lock(flock, &flctx->flc_posix) {
1855 		unsigned char ftype = flock->c.flc_type;
1856 
1857 		if (el == &locks_to_send) {
1858 			/*
1859 			 * The list ended. We don't have enough allocated
1860 			 * structures - something is really wrong.
1861 			 */
1862 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1863 			break;
1864 		}
1865 		length = cifs_flock_len(flock);
1866 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1867 			type = CIFS_RDLCK;
1868 		else
1869 			type = CIFS_WRLCK;
1870 		lck = list_entry(el, struct lock_to_push, llist);
1871 		lck->pid = hash_lockowner(flock->c.flc_owner);
1872 		lck->netfid = cfile->fid.netfid;
1873 		lck->length = length;
1874 		lck->type = type;
1875 		lck->offset = flock->fl_start;
1876 	}
1877 	spin_unlock(&flctx->flc_lock);
1878 
1879 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1880 		int stored_rc;
1881 
1882 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1883 					     lck->offset, lck->length, NULL,
1884 					     lck->type, 0);
1885 		if (stored_rc)
1886 			rc = stored_rc;
1887 		list_del(&lck->llist);
1888 		kfree(lck);
1889 	}
1890 
1891 out:
1892 	free_xid(xid);
1893 	return rc;
1894 err_out:
1895 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1896 		list_del(&lck->llist);
1897 		kfree(lck);
1898 	}
1899 	goto out;
1900 }
1901 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1902 
1903 static int
1904 cifs_push_locks(struct cifsFileInfo *cfile)
1905 {
1906 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1907 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1908 	int rc = 0;
1909 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1910 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1911 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1912 
1913 	/* we are going to update can_cache_brlcks here - need a write access */
1914 	cifs_down_write(&cinode->lock_sem);
1915 	if (!cinode->can_cache_brlcks) {
1916 		up_write(&cinode->lock_sem);
1917 		return rc;
1918 	}
1919 
1920 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1921 	if (cap_unix(tcon->ses) &&
1922 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1923 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1924 		rc = cifs_push_posix_locks(cfile);
1925 	else
1926 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1927 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1928 
1929 	cinode->can_cache_brlcks = false;
1930 	up_write(&cinode->lock_sem);
1931 	return rc;
1932 }
1933 
1934 static void
1935 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1936 		bool *wait_flag, struct TCP_Server_Info *server)
1937 {
1938 	if (flock->c.flc_flags & FL_POSIX)
1939 		cifs_dbg(FYI, "Posix\n");
1940 	if (flock->c.flc_flags & FL_FLOCK)
1941 		cifs_dbg(FYI, "Flock\n");
1942 	if (flock->c.flc_flags & FL_SLEEP) {
1943 		cifs_dbg(FYI, "Blocking lock\n");
1944 		*wait_flag = true;
1945 	}
1946 	if (flock->c.flc_flags & FL_ACCESS)
1947 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1948 	if (flock->c.flc_flags & FL_LEASE)
1949 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1950 	if (flock->c.flc_flags &
1951 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1952 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1953 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1954 		         flock->c.flc_flags);
1955 
1956 	*type = server->vals->large_lock_type;
1957 	if (lock_is_write(flock)) {
1958 		cifs_dbg(FYI, "F_WRLCK\n");
1959 		*type |= server->vals->exclusive_lock_type;
1960 		*lock = 1;
1961 	} else if (lock_is_unlock(flock)) {
1962 		cifs_dbg(FYI, "F_UNLCK\n");
1963 		*type |= server->vals->unlock_lock_type;
1964 		*unlock = 1;
1965 		/* Check if unlock includes more than one lock range */
1966 	} else if (lock_is_read(flock)) {
1967 		cifs_dbg(FYI, "F_RDLCK\n");
1968 		*type |= server->vals->shared_lock_type;
1969 		*lock = 1;
1970 	} else if (flock->c.flc_type == F_EXLCK) {
1971 		cifs_dbg(FYI, "F_EXLCK\n");
1972 		*type |= server->vals->exclusive_lock_type;
1973 		*lock = 1;
1974 	} else if (flock->c.flc_type == F_SHLCK) {
1975 		cifs_dbg(FYI, "F_SHLCK\n");
1976 		*type |= server->vals->shared_lock_type;
1977 		*lock = 1;
1978 	} else
1979 		cifs_dbg(FYI, "Unknown type of lock\n");
1980 }
1981 
1982 static int
1983 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1984 	   bool wait_flag, bool posix_lck, unsigned int xid)
1985 {
1986 	int rc = 0;
1987 	__u64 length = cifs_flock_len(flock);
1988 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1989 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1990 	struct TCP_Server_Info *server = tcon->ses->server;
1991 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1992 	__u16 netfid = cfile->fid.netfid;
1993 
1994 	if (posix_lck) {
1995 		int posix_lock_type;
1996 
1997 		rc = cifs_posix_lock_test(file, flock);
1998 		if (!rc)
1999 			return rc;
2000 
2001 		if (type & server->vals->shared_lock_type)
2002 			posix_lock_type = CIFS_RDLCK;
2003 		else
2004 			posix_lock_type = CIFS_WRLCK;
2005 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2006 				      hash_lockowner(flock->c.flc_owner),
2007 				      flock->fl_start, length, flock,
2008 				      posix_lock_type, wait_flag);
2009 		return rc;
2010 	}
2011 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2012 
2013 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2014 	if (!rc)
2015 		return rc;
2016 
2017 	/* BB we could chain these into one lock request BB */
2018 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2019 				    1, 0, false);
2020 	if (rc == 0) {
2021 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2022 					    type, 0, 1, false);
2023 		flock->c.flc_type = F_UNLCK;
2024 		if (rc != 0)
2025 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2026 				 rc);
2027 		return 0;
2028 	}
2029 
2030 	if (type & server->vals->shared_lock_type) {
2031 		flock->c.flc_type = F_WRLCK;
2032 		return 0;
2033 	}
2034 
2035 	type &= ~server->vals->exclusive_lock_type;
2036 
2037 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2038 				    type | server->vals->shared_lock_type,
2039 				    1, 0, false);
2040 	if (rc == 0) {
2041 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2042 			type | server->vals->shared_lock_type, 0, 1, false);
2043 		flock->c.flc_type = F_RDLCK;
2044 		if (rc != 0)
2045 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2046 				 rc);
2047 	} else
2048 		flock->c.flc_type = F_WRLCK;
2049 
2050 	return 0;
2051 }
2052 
2053 void
2054 cifs_move_llist(struct list_head *source, struct list_head *dest)
2055 {
2056 	struct list_head *li, *tmp;
2057 	list_for_each_safe(li, tmp, source)
2058 		list_move(li, dest);
2059 }
2060 
2061 void
2062 cifs_free_llist(struct list_head *llist)
2063 {
2064 	struct cifsLockInfo *li, *tmp;
2065 	list_for_each_entry_safe(li, tmp, llist, llist) {
2066 		cifs_del_lock_waiters(li);
2067 		list_del(&li->llist);
2068 		kfree(li);
2069 	}
2070 }
2071 
2072 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2073 int
2074 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2075 		  unsigned int xid)
2076 {
2077 	int rc = 0, stored_rc;
2078 	static const int types[] = {
2079 		LOCKING_ANDX_LARGE_FILES,
2080 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2081 	};
2082 	unsigned int i;
2083 	unsigned int max_num, num, max_buf;
2084 	LOCKING_ANDX_RANGE *buf, *cur;
2085 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2086 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2087 	struct cifsLockInfo *li, *tmp;
2088 	__u64 length = cifs_flock_len(flock);
2089 	struct list_head tmp_llist;
2090 
2091 	INIT_LIST_HEAD(&tmp_llist);
2092 
2093 	/*
2094 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2095 	 * and check it before using.
2096 	 */
2097 	max_buf = tcon->ses->server->maxBuf;
2098 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2099 		return -EINVAL;
2100 
2101 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2102 		     PAGE_SIZE);
2103 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2104 			PAGE_SIZE);
2105 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2106 						sizeof(LOCKING_ANDX_RANGE);
2107 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2108 	if (!buf)
2109 		return -ENOMEM;
2110 
2111 	cifs_down_write(&cinode->lock_sem);
2112 	for (i = 0; i < 2; i++) {
2113 		cur = buf;
2114 		num = 0;
2115 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2116 			if (flock->fl_start > li->offset ||
2117 			    (flock->fl_start + length) <
2118 			    (li->offset + li->length))
2119 				continue;
2120 			if (current->tgid != li->pid)
2121 				continue;
2122 			if (types[i] != li->type)
2123 				continue;
2124 			if (cinode->can_cache_brlcks) {
2125 				/*
2126 				 * We can cache brlock requests - simply remove
2127 				 * a lock from the file's list.
2128 				 */
2129 				list_del(&li->llist);
2130 				cifs_del_lock_waiters(li);
2131 				kfree(li);
2132 				continue;
2133 			}
2134 			cur->Pid = cpu_to_le16(li->pid);
2135 			cur->LengthLow = cpu_to_le32((u32)li->length);
2136 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2137 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2138 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2139 			/*
2140 			 * We need to save a lock here to let us add it again to
2141 			 * the file's list if the unlock range request fails on
2142 			 * the server.
2143 			 */
2144 			list_move(&li->llist, &tmp_llist);
2145 			if (++num == max_num) {
2146 				stored_rc = cifs_lockv(xid, tcon,
2147 						       cfile->fid.netfid,
2148 						       li->type, num, 0, buf);
2149 				if (stored_rc) {
2150 					/*
2151 					 * We failed on the unlock range
2152 					 * request - add all locks from the tmp
2153 					 * list to the head of the file's list.
2154 					 */
2155 					cifs_move_llist(&tmp_llist,
2156 							&cfile->llist->locks);
2157 					rc = stored_rc;
2158 				} else
2159 					/*
2160 					 * The unlock range request succeed -
2161 					 * free the tmp list.
2162 					 */
2163 					cifs_free_llist(&tmp_llist);
2164 				cur = buf;
2165 				num = 0;
2166 			} else
2167 				cur++;
2168 		}
2169 		if (num) {
2170 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2171 					       types[i], num, 0, buf);
2172 			if (stored_rc) {
2173 				cifs_move_llist(&tmp_llist,
2174 						&cfile->llist->locks);
2175 				rc = stored_rc;
2176 			} else
2177 				cifs_free_llist(&tmp_llist);
2178 		}
2179 	}
2180 
2181 	up_write(&cinode->lock_sem);
2182 	kfree(buf);
2183 	return rc;
2184 }
2185 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2186 
2187 static int
2188 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2189 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2190 	   unsigned int xid)
2191 {
2192 	int rc = 0;
2193 	__u64 length = cifs_flock_len(flock);
2194 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2195 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2196 	struct TCP_Server_Info *server = tcon->ses->server;
2197 	struct inode *inode = d_inode(cfile->dentry);
2198 
2199 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2200 	if (posix_lck) {
2201 		int posix_lock_type;
2202 
2203 		rc = cifs_posix_lock_set(file, flock);
2204 		if (rc <= FILE_LOCK_DEFERRED)
2205 			return rc;
2206 
2207 		if (type & server->vals->shared_lock_type)
2208 			posix_lock_type = CIFS_RDLCK;
2209 		else
2210 			posix_lock_type = CIFS_WRLCK;
2211 
2212 		if (unlock == 1)
2213 			posix_lock_type = CIFS_UNLCK;
2214 
2215 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2216 				      hash_lockowner(flock->c.flc_owner),
2217 				      flock->fl_start, length,
2218 				      NULL, posix_lock_type, wait_flag);
2219 		goto out;
2220 	}
2221 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2222 	if (lock) {
2223 		struct cifsLockInfo *lock;
2224 
2225 		lock = cifs_lock_init(flock->fl_start, length, type,
2226 				      flock->c.flc_flags);
2227 		if (!lock)
2228 			return -ENOMEM;
2229 
2230 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2231 		if (rc < 0) {
2232 			kfree(lock);
2233 			return rc;
2234 		}
2235 		if (!rc)
2236 			goto out;
2237 
2238 		/*
2239 		 * Windows 7 server can delay breaking lease from read to None
2240 		 * if we set a byte-range lock on a file - break it explicitly
2241 		 * before sending the lock to the server to be sure the next
2242 		 * read won't conflict with non-overlapted locks due to
2243 		 * pagereading.
2244 		 */
2245 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2246 					CIFS_CACHE_READ(CIFS_I(inode))) {
2247 			cifs_zap_mapping(inode);
2248 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2249 				 inode);
2250 			CIFS_I(inode)->oplock = 0;
2251 		}
2252 
2253 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2254 					    type, 1, 0, wait_flag);
2255 		if (rc) {
2256 			kfree(lock);
2257 			return rc;
2258 		}
2259 
2260 		cifs_lock_add(cfile, lock);
2261 	} else if (unlock)
2262 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2263 
2264 out:
2265 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2266 		/*
2267 		 * If this is a request to remove all locks because we
2268 		 * are closing the file, it doesn't matter if the
2269 		 * unlocking failed as both cifs.ko and the SMB server
2270 		 * remove the lock on file close
2271 		 */
2272 		if (rc) {
2273 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2274 			if (!(flock->c.flc_flags & FL_CLOSE))
2275 				return rc;
2276 		}
2277 		rc = locks_lock_file_wait(file, flock);
2278 	}
2279 	return rc;
2280 }
2281 
2282 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2283 {
2284 	int rc, xid;
2285 	int lock = 0, unlock = 0;
2286 	bool wait_flag = false;
2287 	bool posix_lck = false;
2288 	struct cifs_sb_info *cifs_sb;
2289 	struct cifs_tcon *tcon;
2290 	struct cifsFileInfo *cfile;
2291 	__u32 type;
2292 
2293 	xid = get_xid();
2294 
2295 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2296 		rc = -ENOLCK;
2297 		free_xid(xid);
2298 		return rc;
2299 	}
2300 
2301 	cfile = (struct cifsFileInfo *)file->private_data;
2302 	tcon = tlink_tcon(cfile->tlink);
2303 
2304 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2305 			tcon->ses->server);
2306 	cifs_sb = CIFS_FILE_SB(file);
2307 
2308 	if (cap_unix(tcon->ses) &&
2309 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2310 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2311 		posix_lck = true;
2312 
2313 	if (!lock && !unlock) {
2314 		/*
2315 		 * if no lock or unlock then nothing to do since we do not
2316 		 * know what it is
2317 		 */
2318 		rc = -EOPNOTSUPP;
2319 		free_xid(xid);
2320 		return rc;
2321 	}
2322 
2323 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2324 			xid);
2325 	free_xid(xid);
2326 	return rc;
2327 
2328 
2329 }
2330 
2331 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2332 {
2333 	int rc, xid;
2334 	int lock = 0, unlock = 0;
2335 	bool wait_flag = false;
2336 	bool posix_lck = false;
2337 	struct cifs_sb_info *cifs_sb;
2338 	struct cifs_tcon *tcon;
2339 	struct cifsFileInfo *cfile;
2340 	__u32 type;
2341 
2342 	rc = -EACCES;
2343 	xid = get_xid();
2344 
2345 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2346 		 flock->c.flc_flags, flock->c.flc_type,
2347 		 (long long)flock->fl_start,
2348 		 (long long)flock->fl_end);
2349 
2350 	cfile = (struct cifsFileInfo *)file->private_data;
2351 	tcon = tlink_tcon(cfile->tlink);
2352 
2353 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2354 			tcon->ses->server);
2355 	cifs_sb = CIFS_FILE_SB(file);
2356 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2357 
2358 	if (cap_unix(tcon->ses) &&
2359 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2360 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2361 		posix_lck = true;
2362 	/*
2363 	 * BB add code here to normalize offset and length to account for
2364 	 * negative length which we can not accept over the wire.
2365 	 */
2366 	if (IS_GETLK(cmd)) {
2367 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2368 		free_xid(xid);
2369 		return rc;
2370 	}
2371 
2372 	if (!lock && !unlock) {
2373 		/*
2374 		 * if no lock or unlock then nothing to do since we do not
2375 		 * know what it is
2376 		 */
2377 		free_xid(xid);
2378 		return -EOPNOTSUPP;
2379 	}
2380 
2381 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2382 			xid);
2383 	free_xid(xid);
2384 	return rc;
2385 }
2386 
2387 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2388 				      bool was_async)
2389 {
2390 	struct netfs_io_request *wreq = wdata->rreq;
2391 	loff_t new_server_eof;
2392 
2393 	if (result > 0) {
2394 		new_server_eof = wdata->subreq.start + wdata->subreq.transferred + result;
2395 
2396 		if (new_server_eof > netfs_inode(wreq->inode)->remote_i_size)
2397 			netfs_resize_file(netfs_inode(wreq->inode), new_server_eof, true);
2398 	}
2399 
2400 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2401 }
2402 
2403 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2404 					bool fsuid_only)
2405 {
2406 	struct cifsFileInfo *open_file = NULL;
2407 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2408 
2409 	/* only filter by fsuid on multiuser mounts */
2410 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2411 		fsuid_only = false;
2412 
2413 	spin_lock(&cifs_inode->open_file_lock);
2414 	/* we could simply get the first_list_entry since write-only entries
2415 	   are always at the end of the list but since the first entry might
2416 	   have a close pending, we go through the whole list */
2417 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2418 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2419 			continue;
2420 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2421 			if ((!open_file->invalidHandle)) {
2422 				/* found a good file */
2423 				/* lock it so it will not be closed on us */
2424 				cifsFileInfo_get(open_file);
2425 				spin_unlock(&cifs_inode->open_file_lock);
2426 				return open_file;
2427 			} /* else might as well continue, and look for
2428 			     another, or simply have the caller reopen it
2429 			     again rather than trying to fix this handle */
2430 		} else /* write only file */
2431 			break; /* write only files are last so must be done */
2432 	}
2433 	spin_unlock(&cifs_inode->open_file_lock);
2434 	return NULL;
2435 }
2436 
2437 /* Return -EBADF if no handle is found and general rc otherwise */
2438 int
2439 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2440 		       struct cifsFileInfo **ret_file)
2441 {
2442 	struct cifsFileInfo *open_file, *inv_file = NULL;
2443 	struct cifs_sb_info *cifs_sb;
2444 	bool any_available = false;
2445 	int rc = -EBADF;
2446 	unsigned int refind = 0;
2447 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2448 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2449 	*ret_file = NULL;
2450 
2451 	/*
2452 	 * Having a null inode here (because mapping->host was set to zero by
2453 	 * the VFS or MM) should not happen but we had reports of on oops (due
2454 	 * to it being zero) during stress testcases so we need to check for it
2455 	 */
2456 
2457 	if (cifs_inode == NULL) {
2458 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2459 		dump_stack();
2460 		return rc;
2461 	}
2462 
2463 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2464 
2465 	/* only filter by fsuid on multiuser mounts */
2466 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2467 		fsuid_only = false;
2468 
2469 	spin_lock(&cifs_inode->open_file_lock);
2470 refind_writable:
2471 	if (refind > MAX_REOPEN_ATT) {
2472 		spin_unlock(&cifs_inode->open_file_lock);
2473 		return rc;
2474 	}
2475 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2476 		if (!any_available && open_file->pid != current->tgid)
2477 			continue;
2478 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2479 			continue;
2480 		if (with_delete && !(open_file->fid.access & DELETE))
2481 			continue;
2482 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2483 			if (!open_file->invalidHandle) {
2484 				/* found a good writable file */
2485 				cifsFileInfo_get(open_file);
2486 				spin_unlock(&cifs_inode->open_file_lock);
2487 				*ret_file = open_file;
2488 				return 0;
2489 			} else {
2490 				if (!inv_file)
2491 					inv_file = open_file;
2492 			}
2493 		}
2494 	}
2495 	/* couldn't find useable FH with same pid, try any available */
2496 	if (!any_available) {
2497 		any_available = true;
2498 		goto refind_writable;
2499 	}
2500 
2501 	if (inv_file) {
2502 		any_available = false;
2503 		cifsFileInfo_get(inv_file);
2504 	}
2505 
2506 	spin_unlock(&cifs_inode->open_file_lock);
2507 
2508 	if (inv_file) {
2509 		rc = cifs_reopen_file(inv_file, false);
2510 		if (!rc) {
2511 			*ret_file = inv_file;
2512 			return 0;
2513 		}
2514 
2515 		spin_lock(&cifs_inode->open_file_lock);
2516 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2517 		spin_unlock(&cifs_inode->open_file_lock);
2518 		cifsFileInfo_put(inv_file);
2519 		++refind;
2520 		inv_file = NULL;
2521 		spin_lock(&cifs_inode->open_file_lock);
2522 		goto refind_writable;
2523 	}
2524 
2525 	return rc;
2526 }
2527 
2528 struct cifsFileInfo *
2529 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2530 {
2531 	struct cifsFileInfo *cfile;
2532 	int rc;
2533 
2534 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2535 	if (rc)
2536 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2537 
2538 	return cfile;
2539 }
2540 
2541 int
2542 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2543 		       int flags,
2544 		       struct cifsFileInfo **ret_file)
2545 {
2546 	struct cifsFileInfo *cfile;
2547 	void *page = alloc_dentry_path();
2548 
2549 	*ret_file = NULL;
2550 
2551 	spin_lock(&tcon->open_file_lock);
2552 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2553 		struct cifsInodeInfo *cinode;
2554 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2555 		if (IS_ERR(full_path)) {
2556 			spin_unlock(&tcon->open_file_lock);
2557 			free_dentry_path(page);
2558 			return PTR_ERR(full_path);
2559 		}
2560 		if (strcmp(full_path, name))
2561 			continue;
2562 
2563 		cinode = CIFS_I(d_inode(cfile->dentry));
2564 		spin_unlock(&tcon->open_file_lock);
2565 		free_dentry_path(page);
2566 		return cifs_get_writable_file(cinode, flags, ret_file);
2567 	}
2568 
2569 	spin_unlock(&tcon->open_file_lock);
2570 	free_dentry_path(page);
2571 	return -ENOENT;
2572 }
2573 
2574 int
2575 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2576 		       struct cifsFileInfo **ret_file)
2577 {
2578 	struct cifsFileInfo *cfile;
2579 	void *page = alloc_dentry_path();
2580 
2581 	*ret_file = NULL;
2582 
2583 	spin_lock(&tcon->open_file_lock);
2584 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2585 		struct cifsInodeInfo *cinode;
2586 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2587 		if (IS_ERR(full_path)) {
2588 			spin_unlock(&tcon->open_file_lock);
2589 			free_dentry_path(page);
2590 			return PTR_ERR(full_path);
2591 		}
2592 		if (strcmp(full_path, name))
2593 			continue;
2594 
2595 		cinode = CIFS_I(d_inode(cfile->dentry));
2596 		spin_unlock(&tcon->open_file_lock);
2597 		free_dentry_path(page);
2598 		*ret_file = find_readable_file(cinode, 0);
2599 		return *ret_file ? 0 : -ENOENT;
2600 	}
2601 
2602 	spin_unlock(&tcon->open_file_lock);
2603 	free_dentry_path(page);
2604 	return -ENOENT;
2605 }
2606 
2607 /*
2608  * Flush data on a strict file.
2609  */
2610 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2611 		      int datasync)
2612 {
2613 	unsigned int xid;
2614 	int rc = 0;
2615 	struct cifs_tcon *tcon;
2616 	struct TCP_Server_Info *server;
2617 	struct cifsFileInfo *smbfile = file->private_data;
2618 	struct inode *inode = file_inode(file);
2619 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2620 
2621 	rc = file_write_and_wait_range(file, start, end);
2622 	if (rc) {
2623 		trace_cifs_fsync_err(inode->i_ino, rc);
2624 		return rc;
2625 	}
2626 
2627 	xid = get_xid();
2628 
2629 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2630 		 file, datasync);
2631 
2632 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2633 		rc = cifs_zap_mapping(inode);
2634 		if (rc) {
2635 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2636 			rc = 0; /* don't care about it in fsync */
2637 		}
2638 	}
2639 
2640 	tcon = tlink_tcon(smbfile->tlink);
2641 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2642 		server = tcon->ses->server;
2643 		if (server->ops->flush == NULL) {
2644 			rc = -ENOSYS;
2645 			goto strict_fsync_exit;
2646 		}
2647 
2648 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2649 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2650 			if (smbfile) {
2651 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2652 				cifsFileInfo_put(smbfile);
2653 			} else
2654 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2655 		} else
2656 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2657 	}
2658 
2659 strict_fsync_exit:
2660 	free_xid(xid);
2661 	return rc;
2662 }
2663 
2664 /*
2665  * Flush data on a non-strict data.
2666  */
2667 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2668 {
2669 	unsigned int xid;
2670 	int rc = 0;
2671 	struct cifs_tcon *tcon;
2672 	struct TCP_Server_Info *server;
2673 	struct cifsFileInfo *smbfile = file->private_data;
2674 	struct inode *inode = file_inode(file);
2675 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2676 
2677 	rc = file_write_and_wait_range(file, start, end);
2678 	if (rc) {
2679 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2680 		return rc;
2681 	}
2682 
2683 	xid = get_xid();
2684 
2685 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2686 		 file, datasync);
2687 
2688 	tcon = tlink_tcon(smbfile->tlink);
2689 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2690 		server = tcon->ses->server;
2691 		if (server->ops->flush == NULL) {
2692 			rc = -ENOSYS;
2693 			goto fsync_exit;
2694 		}
2695 
2696 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2697 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2698 			if (smbfile) {
2699 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2700 				cifsFileInfo_put(smbfile);
2701 			} else
2702 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2703 		} else
2704 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2705 	}
2706 
2707 fsync_exit:
2708 	free_xid(xid);
2709 	return rc;
2710 }
2711 
2712 /*
2713  * As file closes, flush all cached write data for this inode checking
2714  * for write behind errors.
2715  */
2716 int cifs_flush(struct file *file, fl_owner_t id)
2717 {
2718 	struct inode *inode = file_inode(file);
2719 	int rc = 0;
2720 
2721 	if (file->f_mode & FMODE_WRITE)
2722 		rc = filemap_write_and_wait(inode->i_mapping);
2723 
2724 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2725 	if (rc) {
2726 		/* get more nuanced writeback errors */
2727 		rc = filemap_check_wb_err(file->f_mapping, 0);
2728 		trace_cifs_flush_err(inode->i_ino, rc);
2729 	}
2730 	return rc;
2731 }
2732 
2733 static ssize_t
2734 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2735 {
2736 	struct file *file = iocb->ki_filp;
2737 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2738 	struct inode *inode = file->f_mapping->host;
2739 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2740 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2741 	ssize_t rc;
2742 
2743 	rc = netfs_start_io_write(inode);
2744 	if (rc < 0)
2745 		return rc;
2746 
2747 	/*
2748 	 * We need to hold the sem to be sure nobody modifies lock list
2749 	 * with a brlock that prevents writing.
2750 	 */
2751 	down_read(&cinode->lock_sem);
2752 
2753 	rc = generic_write_checks(iocb, from);
2754 	if (rc <= 0)
2755 		goto out;
2756 
2757 	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2758 				     server->vals->exclusive_lock_type, 0,
2759 				     NULL, CIFS_WRITE_OP))
2760 		rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2761 	else
2762 		rc = -EACCES;
2763 out:
2764 	up_read(&cinode->lock_sem);
2765 	netfs_end_io_write(inode);
2766 	if (rc > 0)
2767 		rc = generic_write_sync(iocb, rc);
2768 	return rc;
2769 }
2770 
2771 ssize_t
2772 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2773 {
2774 	struct inode *inode = file_inode(iocb->ki_filp);
2775 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2776 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2777 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2778 						iocb->ki_filp->private_data;
2779 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2780 	ssize_t written;
2781 
2782 	written = cifs_get_writer(cinode);
2783 	if (written)
2784 		return written;
2785 
2786 	if (CIFS_CACHE_WRITE(cinode)) {
2787 		if (cap_unix(tcon->ses) &&
2788 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2789 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2790 			written = netfs_file_write_iter(iocb, from);
2791 			goto out;
2792 		}
2793 		written = cifs_writev(iocb, from);
2794 		goto out;
2795 	}
2796 	/*
2797 	 * For non-oplocked files in strict cache mode we need to write the data
2798 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2799 	 * affected pages because it may cause a error with mandatory locks on
2800 	 * these pages but not on the region from pos to ppos+len-1.
2801 	 */
2802 	written = netfs_file_write_iter(iocb, from);
2803 	if (CIFS_CACHE_READ(cinode)) {
2804 		/*
2805 		 * We have read level caching and we have just sent a write
2806 		 * request to the server thus making data in the cache stale.
2807 		 * Zap the cache and set oplock/lease level to NONE to avoid
2808 		 * reading stale data from the cache. All subsequent read
2809 		 * operations will read new data from the server.
2810 		 */
2811 		cifs_zap_mapping(inode);
2812 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2813 			 inode);
2814 		cinode->oplock = 0;
2815 	}
2816 out:
2817 	cifs_put_writer(cinode);
2818 	return written;
2819 }
2820 
2821 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2822 {
2823 	ssize_t rc;
2824 	struct inode *inode = file_inode(iocb->ki_filp);
2825 
2826 	if (iocb->ki_flags & IOCB_DIRECT)
2827 		return netfs_unbuffered_read_iter(iocb, iter);
2828 
2829 	rc = cifs_revalidate_mapping(inode);
2830 	if (rc)
2831 		return rc;
2832 
2833 	return netfs_file_read_iter(iocb, iter);
2834 }
2835 
2836 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2837 {
2838 	struct inode *inode = file_inode(iocb->ki_filp);
2839 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2840 	ssize_t written;
2841 	int rc;
2842 
2843 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2844 		written = netfs_unbuffered_write_iter(iocb, from);
2845 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2846 			cifs_zap_mapping(inode);
2847 			cifs_dbg(FYI,
2848 				 "Set no oplock for inode=%p after a write operation\n",
2849 				 inode);
2850 			cinode->oplock = 0;
2851 		}
2852 		return written;
2853 	}
2854 
2855 	written = cifs_get_writer(cinode);
2856 	if (written)
2857 		return written;
2858 
2859 	written = netfs_file_write_iter(iocb, from);
2860 
2861 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2862 		rc = filemap_fdatawrite(inode->i_mapping);
2863 		if (rc)
2864 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2865 				 rc, inode);
2866 	}
2867 
2868 	cifs_put_writer(cinode);
2869 	return written;
2870 }
2871 
2872 ssize_t
2873 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2874 {
2875 	struct inode *inode = file_inode(iocb->ki_filp);
2876 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2877 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2878 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2879 						iocb->ki_filp->private_data;
2880 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2881 	int rc = -EACCES;
2882 
2883 	/*
2884 	 * In strict cache mode we need to read from the server all the time
2885 	 * if we don't have level II oplock because the server can delay mtime
2886 	 * change - so we can't make a decision about inode invalidating.
2887 	 * And we can also fail with pagereading if there are mandatory locks
2888 	 * on pages affected by this read but not on the region from pos to
2889 	 * pos+len-1.
2890 	 */
2891 	if (!CIFS_CACHE_READ(cinode))
2892 		return netfs_unbuffered_read_iter(iocb, to);
2893 
2894 	if (cap_unix(tcon->ses) &&
2895 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2896 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2897 		if (iocb->ki_flags & IOCB_DIRECT)
2898 			return netfs_unbuffered_read_iter(iocb, to);
2899 		return netfs_buffered_read_iter(iocb, to);
2900 	}
2901 
2902 	/*
2903 	 * We need to hold the sem to be sure nobody modifies lock list
2904 	 * with a brlock that prevents reading.
2905 	 */
2906 	if (iocb->ki_flags & IOCB_DIRECT) {
2907 		rc = netfs_start_io_direct(inode);
2908 		if (rc < 0)
2909 			goto out;
2910 		down_read(&cinode->lock_sem);
2911 		if (!cifs_find_lock_conflict(
2912 			    cfile, iocb->ki_pos, iov_iter_count(to),
2913 			    tcon->ses->server->vals->shared_lock_type,
2914 			    0, NULL, CIFS_READ_OP))
2915 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2916 		up_read(&cinode->lock_sem);
2917 		netfs_end_io_direct(inode);
2918 	} else {
2919 		rc = netfs_start_io_read(inode);
2920 		if (rc < 0)
2921 			goto out;
2922 		down_read(&cinode->lock_sem);
2923 		if (!cifs_find_lock_conflict(
2924 			    cfile, iocb->ki_pos, iov_iter_count(to),
2925 			    tcon->ses->server->vals->shared_lock_type,
2926 			    0, NULL, CIFS_READ_OP))
2927 			rc = filemap_read(iocb, to, 0);
2928 		up_read(&cinode->lock_sem);
2929 		netfs_end_io_read(inode);
2930 	}
2931 out:
2932 	return rc;
2933 }
2934 
2935 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2936 {
2937 	return netfs_page_mkwrite(vmf, NULL);
2938 }
2939 
2940 static const struct vm_operations_struct cifs_file_vm_ops = {
2941 	.fault = filemap_fault,
2942 	.map_pages = filemap_map_pages,
2943 	.page_mkwrite = cifs_page_mkwrite,
2944 };
2945 
2946 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2947 {
2948 	int xid, rc = 0;
2949 	struct inode *inode = file_inode(file);
2950 
2951 	xid = get_xid();
2952 
2953 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2954 		rc = cifs_zap_mapping(inode);
2955 	if (!rc)
2956 		rc = generic_file_mmap(file, vma);
2957 	if (!rc)
2958 		vma->vm_ops = &cifs_file_vm_ops;
2959 
2960 	free_xid(xid);
2961 	return rc;
2962 }
2963 
2964 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2965 {
2966 	int rc, xid;
2967 
2968 	xid = get_xid();
2969 
2970 	rc = cifs_revalidate_file(file);
2971 	if (rc)
2972 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2973 			 rc);
2974 	if (!rc)
2975 		rc = generic_file_mmap(file, vma);
2976 	if (!rc)
2977 		vma->vm_ops = &cifs_file_vm_ops;
2978 
2979 	free_xid(xid);
2980 	return rc;
2981 }
2982 
2983 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2984 {
2985 	struct cifsFileInfo *open_file;
2986 
2987 	spin_lock(&cifs_inode->open_file_lock);
2988 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2989 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2990 			spin_unlock(&cifs_inode->open_file_lock);
2991 			return 1;
2992 		}
2993 	}
2994 	spin_unlock(&cifs_inode->open_file_lock);
2995 	return 0;
2996 }
2997 
2998 /* We do not want to update the file size from server for inodes
2999    open for write - to avoid races with writepage extending
3000    the file - in the future we could consider allowing
3001    refreshing the inode only on increases in the file size
3002    but this is tricky to do without racing with writebehind
3003    page caching in the current Linux kernel design */
3004 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3005 			    bool from_readdir)
3006 {
3007 	if (!cifsInode)
3008 		return true;
3009 
3010 	if (is_inode_writable(cifsInode) ||
3011 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3012 		/* This inode is open for write at least once */
3013 		struct cifs_sb_info *cifs_sb;
3014 
3015 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3016 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3017 			/* since no page cache to corrupt on directio
3018 			we can change size safely */
3019 			return true;
3020 		}
3021 
3022 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3023 			return true;
3024 
3025 		return false;
3026 	} else
3027 		return true;
3028 }
3029 
3030 void cifs_oplock_break(struct work_struct *work)
3031 {
3032 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3033 						  oplock_break);
3034 	struct inode *inode = d_inode(cfile->dentry);
3035 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3036 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3037 	struct cifs_tcon *tcon;
3038 	struct TCP_Server_Info *server;
3039 	struct tcon_link *tlink;
3040 	int rc = 0;
3041 	bool purge_cache = false, oplock_break_cancelled;
3042 	__u64 persistent_fid, volatile_fid;
3043 	__u16 net_fid;
3044 
3045 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3046 			TASK_UNINTERRUPTIBLE);
3047 
3048 	tlink = cifs_sb_tlink(cifs_sb);
3049 	if (IS_ERR(tlink))
3050 		goto out;
3051 	tcon = tlink_tcon(tlink);
3052 	server = tcon->ses->server;
3053 
3054 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3055 				      cfile->oplock_epoch, &purge_cache);
3056 
3057 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3058 						cifs_has_mand_locks(cinode)) {
3059 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3060 			 inode);
3061 		cinode->oplock = 0;
3062 	}
3063 
3064 	if (inode && S_ISREG(inode->i_mode)) {
3065 		if (CIFS_CACHE_READ(cinode))
3066 			break_lease(inode, O_RDONLY);
3067 		else
3068 			break_lease(inode, O_WRONLY);
3069 		rc = filemap_fdatawrite(inode->i_mapping);
3070 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3071 			rc = filemap_fdatawait(inode->i_mapping);
3072 			mapping_set_error(inode->i_mapping, rc);
3073 			cifs_zap_mapping(inode);
3074 		}
3075 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3076 		if (CIFS_CACHE_WRITE(cinode))
3077 			goto oplock_break_ack;
3078 	}
3079 
3080 	rc = cifs_push_locks(cfile);
3081 	if (rc)
3082 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3083 
3084 oplock_break_ack:
3085 	/*
3086 	 * When oplock break is received and there are no active
3087 	 * file handles but cached, then schedule deferred close immediately.
3088 	 * So, new open will not use cached handle.
3089 	 */
3090 
3091 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3092 		cifs_close_deferred_file(cinode);
3093 
3094 	persistent_fid = cfile->fid.persistent_fid;
3095 	volatile_fid = cfile->fid.volatile_fid;
3096 	net_fid = cfile->fid.netfid;
3097 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3098 
3099 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3100 	/*
3101 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3102 	 * an acknowledgment to be sent when the file has already been closed.
3103 	 */
3104 	spin_lock(&cinode->open_file_lock);
3105 	/* check list empty since can race with kill_sb calling tree disconnect */
3106 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3107 		spin_unlock(&cinode->open_file_lock);
3108 		rc = server->ops->oplock_response(tcon, persistent_fid,
3109 						  volatile_fid, net_fid, cinode);
3110 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3111 	} else
3112 		spin_unlock(&cinode->open_file_lock);
3113 
3114 	cifs_put_tlink(tlink);
3115 out:
3116 	cifs_done_oplock_break(cinode);
3117 }
3118 
3119 static int cifs_swap_activate(struct swap_info_struct *sis,
3120 			      struct file *swap_file, sector_t *span)
3121 {
3122 	struct cifsFileInfo *cfile = swap_file->private_data;
3123 	struct inode *inode = swap_file->f_mapping->host;
3124 	unsigned long blocks;
3125 	long long isize;
3126 
3127 	cifs_dbg(FYI, "swap activate\n");
3128 
3129 	if (!swap_file->f_mapping->a_ops->swap_rw)
3130 		/* Cannot support swap */
3131 		return -EINVAL;
3132 
3133 	spin_lock(&inode->i_lock);
3134 	blocks = inode->i_blocks;
3135 	isize = inode->i_size;
3136 	spin_unlock(&inode->i_lock);
3137 	if (blocks*512 < isize) {
3138 		pr_warn("swap activate: swapfile has holes\n");
3139 		return -EINVAL;
3140 	}
3141 	*span = sis->pages;
3142 
3143 	pr_warn_once("Swap support over SMB3 is experimental\n");
3144 
3145 	/*
3146 	 * TODO: consider adding ACL (or documenting how) to prevent other
3147 	 * users (on this or other systems) from reading it
3148 	 */
3149 
3150 
3151 	/* TODO: add sk_set_memalloc(inet) or similar */
3152 
3153 	if (cfile)
3154 		cfile->swapfile = true;
3155 	/*
3156 	 * TODO: Since file already open, we can't open with DENY_ALL here
3157 	 * but we could add call to grab a byte range lock to prevent others
3158 	 * from reading or writing the file
3159 	 */
3160 
3161 	sis->flags |= SWP_FS_OPS;
3162 	return add_swap_extent(sis, 0, sis->max, 0);
3163 }
3164 
3165 static void cifs_swap_deactivate(struct file *file)
3166 {
3167 	struct cifsFileInfo *cfile = file->private_data;
3168 
3169 	cifs_dbg(FYI, "swap deactivate\n");
3170 
3171 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3172 
3173 	if (cfile)
3174 		cfile->swapfile = false;
3175 
3176 	/* do we need to unpin (or unlock) the file */
3177 }
3178 
3179 /**
3180  * cifs_swap_rw - SMB3 address space operation for swap I/O
3181  * @iocb: target I/O control block
3182  * @iter: I/O buffer
3183  *
3184  * Perform IO to the swap-file.  This is much like direct IO.
3185  */
3186 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3187 {
3188 	ssize_t ret;
3189 
3190 	if (iov_iter_rw(iter) == READ)
3191 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3192 	else
3193 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3194 	if (ret < 0)
3195 		return ret;
3196 	return 0;
3197 }
3198 
3199 const struct address_space_operations cifs_addr_ops = {
3200 	.read_folio	= netfs_read_folio,
3201 	.readahead	= netfs_readahead,
3202 	.writepages	= netfs_writepages,
3203 	.dirty_folio	= netfs_dirty_folio,
3204 	.release_folio	= netfs_release_folio,
3205 	.direct_IO	= noop_direct_IO,
3206 	.invalidate_folio = netfs_invalidate_folio,
3207 	.migrate_folio	= filemap_migrate_folio,
3208 	/*
3209 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3210 	 * helper if needed
3211 	 */
3212 	.swap_activate	= cifs_swap_activate,
3213 	.swap_deactivate = cifs_swap_deactivate,
3214 	.swap_rw = cifs_swap_rw,
3215 };
3216 
3217 /*
3218  * cifs_readahead requires the server to support a buffer large enough to
3219  * contain the header plus one complete page of data.  Otherwise, we need
3220  * to leave cifs_readahead out of the address space operations.
3221  */
3222 const struct address_space_operations cifs_addr_ops_smallbuf = {
3223 	.read_folio	= netfs_read_folio,
3224 	.writepages	= netfs_writepages,
3225 	.dirty_folio	= netfs_dirty_folio,
3226 	.release_folio	= netfs_release_folio,
3227 	.invalidate_folio = netfs_invalidate_folio,
3228 	.migrate_folio	= filemap_migrate_folio,
3229 };
3230