xref: /linux/fs/smb/client/file.c (revision 9acb51e9617c28a92f9ce2af767db6bd660a6d4f)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
84 	wdata->credits.rreq_debug_index = subreq->debug_index;
85 	wdata->credits.in_flight_check = 1;
86 	trace_smb3_rw_credits(wdata->rreq->debug_id,
87 			      wdata->subreq.debug_index,
88 			      wdata->credits.value,
89 			      server->credits, server->in_flight,
90 			      wdata->credits.value,
91 			      cifs_trace_rw_credits_write_prepare);
92 
93 #ifdef CONFIG_CIFS_SMB_DIRECT
94 	if (server->smbd_conn)
95 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
96 #endif
97 }
98 
99 /*
100  * Issue a subrequest to upload to the server.
101  */
102 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
103 {
104 	struct cifs_io_subrequest *wdata =
105 		container_of(subreq, struct cifs_io_subrequest, subreq);
106 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
107 	int rc;
108 
109 	if (cifs_forced_shutdown(sbi)) {
110 		rc = -EIO;
111 		goto fail;
112 	}
113 
114 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
115 	if (rc)
116 		goto fail;
117 
118 	rc = -EAGAIN;
119 	if (wdata->req->cfile->invalidHandle)
120 		goto fail;
121 
122 	wdata->server->ops->async_writev(wdata);
123 out:
124 	return;
125 
126 fail:
127 	if (rc == -EAGAIN)
128 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
129 	else
130 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
131 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
132 	cifs_write_subrequest_terminated(wdata, rc, false);
133 	goto out;
134 }
135 
136 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
137 {
138 	cifs_invalidate_cache(wreq->inode, 0);
139 }
140 
141 /*
142  * Split the read up according to how many credits we can get for each piece.
143  * It's okay to sleep here if we need to wait for more credit to become
144  * available.
145  *
146  * We also choose the server and allocate an operation ID to be cleaned up
147  * later.
148  */
149 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
150 {
151 	struct netfs_io_request *rreq = subreq->rreq;
152 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
153 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
154 	struct TCP_Server_Info *server = req->server;
155 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
156 	size_t rsize = 0;
157 	int rc;
158 
159 	rdata->xid = get_xid();
160 	rdata->have_xid = true;
161 	rdata->server = server;
162 
163 	if (cifs_sb->ctx->rsize == 0)
164 		cifs_sb->ctx->rsize =
165 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 						     cifs_sb->ctx);
167 
168 
169 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
170 					   &rdata->credits);
171 	if (rc) {
172 		subreq->error = rc;
173 		return false;
174 	}
175 
176 	rdata->credits.in_flight_check = 1;
177 	rdata->credits.rreq_debug_id = rreq->debug_id;
178 	rdata->credits.rreq_debug_index = subreq->debug_index;
179 
180 	trace_smb3_rw_credits(rdata->rreq->debug_id,
181 			      rdata->subreq.debug_index,
182 			      rdata->credits.value,
183 			      server->credits, server->in_flight, 0,
184 			      cifs_trace_rw_credits_read_submit);
185 
186 	subreq->len = min_t(size_t, subreq->len, rsize);
187 
188 #ifdef CONFIG_CIFS_SMB_DIRECT
189 	if (server->smbd_conn)
190 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
191 #endif
192 	return true;
193 }
194 
195 /*
196  * Issue a read operation on behalf of the netfs helper functions.  We're asked
197  * to make a read of a certain size at a point in the file.  We are permitted
198  * to only read a portion of that, but as long as we read something, the netfs
199  * helper will call us again so that we can issue another read.
200  */
201 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
202 {
203 	struct netfs_io_request *rreq = subreq->rreq;
204 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
205 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
206 	int rc = 0;
207 
208 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
209 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
210 		 subreq->transferred, subreq->len);
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto out;
218 	}
219 
220 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
221 
222 	rc = rdata->server->ops->async_readv(rdata);
223 out:
224 	if (rc)
225 		netfs_subreq_terminated(subreq, rc, false);
226 }
227 
228 /*
229  * Writeback calls this when it finds a folio that needs uploading.  This isn't
230  * called if writeback only has copy-to-cache to deal with.
231  */
232 static void cifs_begin_writeback(struct netfs_io_request *wreq)
233 {
234 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
235 	int ret;
236 
237 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
238 	if (ret) {
239 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
240 		return;
241 	}
242 
243 	wreq->io_streams[0].avail = true;
244 }
245 
246 /*
247  * Initialise a request.
248  */
249 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
250 {
251 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
252 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
253 	struct cifsFileInfo *open_file = NULL;
254 
255 	rreq->rsize = cifs_sb->ctx->rsize;
256 	rreq->wsize = cifs_sb->ctx->wsize;
257 	req->pid = current->tgid; // Ummm...  This may be a workqueue
258 
259 	if (file) {
260 		open_file = file->private_data;
261 		rreq->netfs_priv = file->private_data;
262 		req->cfile = cifsFileInfo_get(open_file);
263 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
264 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
265 			req->pid = req->cfile->pid;
266 	} else if (rreq->origin != NETFS_WRITEBACK) {
267 		WARN_ON_ONCE(1);
268 		return -EIO;
269 	}
270 
271 	return 0;
272 }
273 
274 /*
275  * Completion of a request operation.
276  */
277 static void cifs_rreq_done(struct netfs_io_request *rreq)
278 {
279 	struct timespec64 atime, mtime;
280 	struct inode *inode = rreq->inode;
281 
282 	/* we do not want atime to be less than mtime, it broke some apps */
283 	atime = inode_set_atime_to_ts(inode, current_time(inode));
284 	mtime = inode_get_mtime(inode);
285 	if (timespec64_compare(&atime, &mtime))
286 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
287 }
288 
289 static void cifs_post_modify(struct inode *inode)
290 {
291 	/* Indication to update ctime and mtime as close is deferred */
292 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
293 }
294 
295 static void cifs_free_request(struct netfs_io_request *rreq)
296 {
297 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
298 
299 	if (req->cfile)
300 		cifsFileInfo_put(req->cfile);
301 }
302 
303 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
304 {
305 	struct cifs_io_subrequest *rdata =
306 		container_of(subreq, struct cifs_io_subrequest, subreq);
307 	int rc = subreq->error;
308 
309 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
310 #ifdef CONFIG_CIFS_SMB_DIRECT
311 		if (rdata->mr) {
312 			smbd_deregister_mr(rdata->mr);
313 			rdata->mr = NULL;
314 		}
315 #endif
316 	}
317 
318 	if (rdata->credits.value != 0)
319 		trace_smb3_rw_credits(rdata->rreq->debug_id,
320 				      rdata->subreq.debug_index,
321 				      rdata->credits.value,
322 				      rdata->server ? rdata->server->credits : 0,
323 				      rdata->server ? rdata->server->in_flight : 0,
324 				      -rdata->credits.value,
325 				      cifs_trace_rw_credits_free_subreq);
326 
327 	add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
328 	if (rdata->have_xid)
329 		free_xid(rdata->xid);
330 }
331 
332 const struct netfs_request_ops cifs_req_ops = {
333 	.request_pool		= &cifs_io_request_pool,
334 	.subrequest_pool	= &cifs_io_subrequest_pool,
335 	.init_request		= cifs_init_request,
336 	.free_request		= cifs_free_request,
337 	.free_subrequest	= cifs_free_subrequest,
338 	.clamp_length		= cifs_clamp_length,
339 	.issue_read		= cifs_req_issue_read,
340 	.done			= cifs_rreq_done,
341 	.post_modify		= cifs_post_modify,
342 	.begin_writeback	= cifs_begin_writeback,
343 	.prepare_write		= cifs_prepare_write,
344 	.issue_write		= cifs_issue_write,
345 	.invalidate_cache	= cifs_netfs_invalidate_cache,
346 };
347 
348 /*
349  * Mark as invalid, all open files on tree connections since they
350  * were closed when session to server was lost.
351  */
352 void
353 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
354 {
355 	struct cifsFileInfo *open_file = NULL;
356 	struct list_head *tmp;
357 	struct list_head *tmp1;
358 
359 	/* only send once per connect */
360 	spin_lock(&tcon->tc_lock);
361 	if (tcon->need_reconnect)
362 		tcon->status = TID_NEED_RECON;
363 
364 	if (tcon->status != TID_NEED_RECON) {
365 		spin_unlock(&tcon->tc_lock);
366 		return;
367 	}
368 	tcon->status = TID_IN_FILES_INVALIDATE;
369 	spin_unlock(&tcon->tc_lock);
370 
371 	/* list all files open on tree connection and mark them invalid */
372 	spin_lock(&tcon->open_file_lock);
373 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
374 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
375 		open_file->invalidHandle = true;
376 		open_file->oplock_break_cancelled = true;
377 	}
378 	spin_unlock(&tcon->open_file_lock);
379 
380 	invalidate_all_cached_dirs(tcon);
381 	spin_lock(&tcon->tc_lock);
382 	if (tcon->status == TID_IN_FILES_INVALIDATE)
383 		tcon->status = TID_NEED_TCON;
384 	spin_unlock(&tcon->tc_lock);
385 
386 	/*
387 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
388 	 * to this tcon.
389 	 */
390 }
391 
392 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
393 {
394 	if ((flags & O_ACCMODE) == O_RDONLY)
395 		return GENERIC_READ;
396 	else if ((flags & O_ACCMODE) == O_WRONLY)
397 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
398 	else if ((flags & O_ACCMODE) == O_RDWR) {
399 		/* GENERIC_ALL is too much permission to request
400 		   can cause unnecessary access denied on create */
401 		/* return GENERIC_ALL; */
402 		return (GENERIC_READ | GENERIC_WRITE);
403 	}
404 
405 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
406 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
407 		FILE_READ_DATA);
408 }
409 
410 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
411 static u32 cifs_posix_convert_flags(unsigned int flags)
412 {
413 	u32 posix_flags = 0;
414 
415 	if ((flags & O_ACCMODE) == O_RDONLY)
416 		posix_flags = SMB_O_RDONLY;
417 	else if ((flags & O_ACCMODE) == O_WRONLY)
418 		posix_flags = SMB_O_WRONLY;
419 	else if ((flags & O_ACCMODE) == O_RDWR)
420 		posix_flags = SMB_O_RDWR;
421 
422 	if (flags & O_CREAT) {
423 		posix_flags |= SMB_O_CREAT;
424 		if (flags & O_EXCL)
425 			posix_flags |= SMB_O_EXCL;
426 	} else if (flags & O_EXCL)
427 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
428 			 current->comm, current->tgid);
429 
430 	if (flags & O_TRUNC)
431 		posix_flags |= SMB_O_TRUNC;
432 	/* be safe and imply O_SYNC for O_DSYNC */
433 	if (flags & O_DSYNC)
434 		posix_flags |= SMB_O_SYNC;
435 	if (flags & O_DIRECTORY)
436 		posix_flags |= SMB_O_DIRECTORY;
437 	if (flags & O_NOFOLLOW)
438 		posix_flags |= SMB_O_NOFOLLOW;
439 	if (flags & O_DIRECT)
440 		posix_flags |= SMB_O_DIRECT;
441 
442 	return posix_flags;
443 }
444 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
445 
446 static inline int cifs_get_disposition(unsigned int flags)
447 {
448 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
449 		return FILE_CREATE;
450 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
451 		return FILE_OVERWRITE_IF;
452 	else if ((flags & O_CREAT) == O_CREAT)
453 		return FILE_OPEN_IF;
454 	else if ((flags & O_TRUNC) == O_TRUNC)
455 		return FILE_OVERWRITE;
456 	else
457 		return FILE_OPEN;
458 }
459 
460 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
461 int cifs_posix_open(const char *full_path, struct inode **pinode,
462 			struct super_block *sb, int mode, unsigned int f_flags,
463 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
464 {
465 	int rc;
466 	FILE_UNIX_BASIC_INFO *presp_data;
467 	__u32 posix_flags = 0;
468 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
469 	struct cifs_fattr fattr;
470 	struct tcon_link *tlink;
471 	struct cifs_tcon *tcon;
472 
473 	cifs_dbg(FYI, "posix open %s\n", full_path);
474 
475 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
476 	if (presp_data == NULL)
477 		return -ENOMEM;
478 
479 	tlink = cifs_sb_tlink(cifs_sb);
480 	if (IS_ERR(tlink)) {
481 		rc = PTR_ERR(tlink);
482 		goto posix_open_ret;
483 	}
484 
485 	tcon = tlink_tcon(tlink);
486 	mode &= ~current_umask();
487 
488 	posix_flags = cifs_posix_convert_flags(f_flags);
489 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
490 			     poplock, full_path, cifs_sb->local_nls,
491 			     cifs_remap(cifs_sb));
492 	cifs_put_tlink(tlink);
493 
494 	if (rc)
495 		goto posix_open_ret;
496 
497 	if (presp_data->Type == cpu_to_le32(-1))
498 		goto posix_open_ret; /* open ok, caller does qpathinfo */
499 
500 	if (!pinode)
501 		goto posix_open_ret; /* caller does not need info */
502 
503 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
504 
505 	/* get new inode and set it up */
506 	if (*pinode == NULL) {
507 		cifs_fill_uniqueid(sb, &fattr);
508 		*pinode = cifs_iget(sb, &fattr);
509 		if (!*pinode) {
510 			rc = -ENOMEM;
511 			goto posix_open_ret;
512 		}
513 	} else {
514 		cifs_revalidate_mapping(*pinode);
515 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
516 	}
517 
518 posix_open_ret:
519 	kfree(presp_data);
520 	return rc;
521 }
522 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
523 
524 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
525 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
526 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
527 {
528 	int rc;
529 	int desired_access;
530 	int disposition;
531 	int create_options = CREATE_NOT_DIR;
532 	struct TCP_Server_Info *server = tcon->ses->server;
533 	struct cifs_open_parms oparms;
534 	int rdwr_for_fscache = 0;
535 
536 	if (!server->ops->open)
537 		return -ENOSYS;
538 
539 	/* If we're caching, we need to be able to fill in around partial writes. */
540 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
541 		rdwr_for_fscache = 1;
542 
543 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
544 
545 /*********************************************************************
546  *  open flag mapping table:
547  *
548  *	POSIX Flag            CIFS Disposition
549  *	----------            ----------------
550  *	O_CREAT               FILE_OPEN_IF
551  *	O_CREAT | O_EXCL      FILE_CREATE
552  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
553  *	O_TRUNC               FILE_OVERWRITE
554  *	none of the above     FILE_OPEN
555  *
556  *	Note that there is not a direct match between disposition
557  *	FILE_SUPERSEDE (ie create whether or not file exists although
558  *	O_CREAT | O_TRUNC is similar but truncates the existing
559  *	file rather than creating a new file as FILE_SUPERSEDE does
560  *	(which uses the attributes / metadata passed in on open call)
561  *?
562  *?  O_SYNC is a reasonable match to CIFS writethrough flag
563  *?  and the read write flags match reasonably.  O_LARGEFILE
564  *?  is irrelevant because largefile support is always used
565  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
566  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
567  *********************************************************************/
568 
569 	disposition = cifs_get_disposition(f_flags);
570 
571 	/* BB pass O_SYNC flag through on file attributes .. BB */
572 
573 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
574 	if (f_flags & O_SYNC)
575 		create_options |= CREATE_WRITE_THROUGH;
576 
577 	if (f_flags & O_DIRECT)
578 		create_options |= CREATE_NO_BUFFER;
579 
580 retry_open:
581 	oparms = (struct cifs_open_parms) {
582 		.tcon = tcon,
583 		.cifs_sb = cifs_sb,
584 		.desired_access = desired_access,
585 		.create_options = cifs_create_options(cifs_sb, create_options),
586 		.disposition = disposition,
587 		.path = full_path,
588 		.fid = fid,
589 	};
590 
591 	rc = server->ops->open(xid, &oparms, oplock, buf);
592 	if (rc) {
593 		if (rc == -EACCES && rdwr_for_fscache == 1) {
594 			desired_access = cifs_convert_flags(f_flags, 0);
595 			rdwr_for_fscache = 2;
596 			goto retry_open;
597 		}
598 		return rc;
599 	}
600 	if (rdwr_for_fscache == 2)
601 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
602 
603 	/* TODO: Add support for calling posix query info but with passing in fid */
604 	if (tcon->unix_ext)
605 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
606 					      xid);
607 	else
608 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
609 					 xid, fid);
610 
611 	if (rc) {
612 		server->ops->close(xid, tcon, fid);
613 		if (rc == -ESTALE)
614 			rc = -EOPENSTALE;
615 	}
616 
617 	return rc;
618 }
619 
620 static bool
621 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
622 {
623 	struct cifs_fid_locks *cur;
624 	bool has_locks = false;
625 
626 	down_read(&cinode->lock_sem);
627 	list_for_each_entry(cur, &cinode->llist, llist) {
628 		if (!list_empty(&cur->locks)) {
629 			has_locks = true;
630 			break;
631 		}
632 	}
633 	up_read(&cinode->lock_sem);
634 	return has_locks;
635 }
636 
637 void
638 cifs_down_write(struct rw_semaphore *sem)
639 {
640 	while (!down_write_trylock(sem))
641 		msleep(10);
642 }
643 
644 static void cifsFileInfo_put_work(struct work_struct *work);
645 void serverclose_work(struct work_struct *work);
646 
647 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
648 				       struct tcon_link *tlink, __u32 oplock,
649 				       const char *symlink_target)
650 {
651 	struct dentry *dentry = file_dentry(file);
652 	struct inode *inode = d_inode(dentry);
653 	struct cifsInodeInfo *cinode = CIFS_I(inode);
654 	struct cifsFileInfo *cfile;
655 	struct cifs_fid_locks *fdlocks;
656 	struct cifs_tcon *tcon = tlink_tcon(tlink);
657 	struct TCP_Server_Info *server = tcon->ses->server;
658 
659 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
660 	if (cfile == NULL)
661 		return cfile;
662 
663 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
664 	if (!fdlocks) {
665 		kfree(cfile);
666 		return NULL;
667 	}
668 
669 	if (symlink_target) {
670 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
671 		if (!cfile->symlink_target) {
672 			kfree(fdlocks);
673 			kfree(cfile);
674 			return NULL;
675 		}
676 	}
677 
678 	INIT_LIST_HEAD(&fdlocks->locks);
679 	fdlocks->cfile = cfile;
680 	cfile->llist = fdlocks;
681 
682 	cfile->count = 1;
683 	cfile->pid = current->tgid;
684 	cfile->uid = current_fsuid();
685 	cfile->dentry = dget(dentry);
686 	cfile->f_flags = file->f_flags;
687 	cfile->invalidHandle = false;
688 	cfile->deferred_close_scheduled = false;
689 	cfile->tlink = cifs_get_tlink(tlink);
690 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
691 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
692 	INIT_WORK(&cfile->serverclose, serverclose_work);
693 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
694 	mutex_init(&cfile->fh_mutex);
695 	spin_lock_init(&cfile->file_info_lock);
696 
697 	cifs_sb_active(inode->i_sb);
698 
699 	/*
700 	 * If the server returned a read oplock and we have mandatory brlocks,
701 	 * set oplock level to None.
702 	 */
703 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
704 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
705 		oplock = 0;
706 	}
707 
708 	cifs_down_write(&cinode->lock_sem);
709 	list_add(&fdlocks->llist, &cinode->llist);
710 	up_write(&cinode->lock_sem);
711 
712 	spin_lock(&tcon->open_file_lock);
713 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
714 		oplock = fid->pending_open->oplock;
715 	list_del(&fid->pending_open->olist);
716 
717 	fid->purge_cache = false;
718 	server->ops->set_fid(cfile, fid, oplock);
719 
720 	list_add(&cfile->tlist, &tcon->openFileList);
721 	atomic_inc(&tcon->num_local_opens);
722 
723 	/* if readable file instance put first in list*/
724 	spin_lock(&cinode->open_file_lock);
725 	if (file->f_mode & FMODE_READ)
726 		list_add(&cfile->flist, &cinode->openFileList);
727 	else
728 		list_add_tail(&cfile->flist, &cinode->openFileList);
729 	spin_unlock(&cinode->open_file_lock);
730 	spin_unlock(&tcon->open_file_lock);
731 
732 	if (fid->purge_cache)
733 		cifs_zap_mapping(inode);
734 
735 	file->private_data = cfile;
736 	return cfile;
737 }
738 
739 struct cifsFileInfo *
740 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
741 {
742 	spin_lock(&cifs_file->file_info_lock);
743 	cifsFileInfo_get_locked(cifs_file);
744 	spin_unlock(&cifs_file->file_info_lock);
745 	return cifs_file;
746 }
747 
748 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
749 {
750 	struct inode *inode = d_inode(cifs_file->dentry);
751 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
752 	struct cifsLockInfo *li, *tmp;
753 	struct super_block *sb = inode->i_sb;
754 
755 	/*
756 	 * Delete any outstanding lock records. We'll lose them when the file
757 	 * is closed anyway.
758 	 */
759 	cifs_down_write(&cifsi->lock_sem);
760 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
761 		list_del(&li->llist);
762 		cifs_del_lock_waiters(li);
763 		kfree(li);
764 	}
765 	list_del(&cifs_file->llist->llist);
766 	kfree(cifs_file->llist);
767 	up_write(&cifsi->lock_sem);
768 
769 	cifs_put_tlink(cifs_file->tlink);
770 	dput(cifs_file->dentry);
771 	cifs_sb_deactive(sb);
772 	kfree(cifs_file->symlink_target);
773 	kfree(cifs_file);
774 }
775 
776 static void cifsFileInfo_put_work(struct work_struct *work)
777 {
778 	struct cifsFileInfo *cifs_file = container_of(work,
779 			struct cifsFileInfo, put);
780 
781 	cifsFileInfo_put_final(cifs_file);
782 }
783 
784 void serverclose_work(struct work_struct *work)
785 {
786 	struct cifsFileInfo *cifs_file = container_of(work,
787 			struct cifsFileInfo, serverclose);
788 
789 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
790 
791 	struct TCP_Server_Info *server = tcon->ses->server;
792 	int rc = 0;
793 	int retries = 0;
794 	int MAX_RETRIES = 4;
795 
796 	do {
797 		if (server->ops->close_getattr)
798 			rc = server->ops->close_getattr(0, tcon, cifs_file);
799 		else if (server->ops->close)
800 			rc = server->ops->close(0, tcon, &cifs_file->fid);
801 
802 		if (rc == -EBUSY || rc == -EAGAIN) {
803 			retries++;
804 			msleep(250);
805 		}
806 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
807 	);
808 
809 	if (retries == MAX_RETRIES)
810 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
811 
812 	if (cifs_file->offload)
813 		queue_work(fileinfo_put_wq, &cifs_file->put);
814 	else
815 		cifsFileInfo_put_final(cifs_file);
816 }
817 
818 /**
819  * cifsFileInfo_put - release a reference of file priv data
820  *
821  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
822  *
823  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
824  */
825 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
826 {
827 	_cifsFileInfo_put(cifs_file, true, true);
828 }
829 
830 /**
831  * _cifsFileInfo_put - release a reference of file priv data
832  *
833  * This may involve closing the filehandle @cifs_file out on the
834  * server. Must be called without holding tcon->open_file_lock,
835  * cinode->open_file_lock and cifs_file->file_info_lock.
836  *
837  * If @wait_for_oplock_handler is true and we are releasing the last
838  * reference, wait for any running oplock break handler of the file
839  * and cancel any pending one.
840  *
841  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
842  * @wait_oplock_handler: must be false if called from oplock_break_handler
843  * @offload:	not offloaded on close and oplock breaks
844  *
845  */
846 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
847 		       bool wait_oplock_handler, bool offload)
848 {
849 	struct inode *inode = d_inode(cifs_file->dentry);
850 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
851 	struct TCP_Server_Info *server = tcon->ses->server;
852 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
853 	struct super_block *sb = inode->i_sb;
854 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
855 	struct cifs_fid fid = {};
856 	struct cifs_pending_open open;
857 	bool oplock_break_cancelled;
858 	bool serverclose_offloaded = false;
859 
860 	spin_lock(&tcon->open_file_lock);
861 	spin_lock(&cifsi->open_file_lock);
862 	spin_lock(&cifs_file->file_info_lock);
863 
864 	cifs_file->offload = offload;
865 	if (--cifs_file->count > 0) {
866 		spin_unlock(&cifs_file->file_info_lock);
867 		spin_unlock(&cifsi->open_file_lock);
868 		spin_unlock(&tcon->open_file_lock);
869 		return;
870 	}
871 	spin_unlock(&cifs_file->file_info_lock);
872 
873 	if (server->ops->get_lease_key)
874 		server->ops->get_lease_key(inode, &fid);
875 
876 	/* store open in pending opens to make sure we don't miss lease break */
877 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
878 
879 	/* remove it from the lists */
880 	list_del(&cifs_file->flist);
881 	list_del(&cifs_file->tlist);
882 	atomic_dec(&tcon->num_local_opens);
883 
884 	if (list_empty(&cifsi->openFileList)) {
885 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
886 			 d_inode(cifs_file->dentry));
887 		/*
888 		 * In strict cache mode we need invalidate mapping on the last
889 		 * close  because it may cause a error when we open this file
890 		 * again and get at least level II oplock.
891 		 */
892 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
893 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
894 		cifs_set_oplock_level(cifsi, 0);
895 	}
896 
897 	spin_unlock(&cifsi->open_file_lock);
898 	spin_unlock(&tcon->open_file_lock);
899 
900 	oplock_break_cancelled = wait_oplock_handler ?
901 		cancel_work_sync(&cifs_file->oplock_break) : false;
902 
903 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
904 		struct TCP_Server_Info *server = tcon->ses->server;
905 		unsigned int xid;
906 		int rc = 0;
907 
908 		xid = get_xid();
909 		if (server->ops->close_getattr)
910 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
911 		else if (server->ops->close)
912 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
913 		_free_xid(xid);
914 
915 		if (rc == -EBUSY || rc == -EAGAIN) {
916 			// Server close failed, hence offloading it as an async op
917 			queue_work(serverclose_wq, &cifs_file->serverclose);
918 			serverclose_offloaded = true;
919 		}
920 	}
921 
922 	if (oplock_break_cancelled)
923 		cifs_done_oplock_break(cifsi);
924 
925 	cifs_del_pending_open(&open);
926 
927 	// if serverclose has been offloaded to wq (on failure), it will
928 	// handle offloading put as well. If serverclose not offloaded,
929 	// we need to handle offloading put here.
930 	if (!serverclose_offloaded) {
931 		if (offload)
932 			queue_work(fileinfo_put_wq, &cifs_file->put);
933 		else
934 			cifsFileInfo_put_final(cifs_file);
935 	}
936 }
937 
938 int cifs_open(struct inode *inode, struct file *file)
939 
940 {
941 	int rc = -EACCES;
942 	unsigned int xid;
943 	__u32 oplock;
944 	struct cifs_sb_info *cifs_sb;
945 	struct TCP_Server_Info *server;
946 	struct cifs_tcon *tcon;
947 	struct tcon_link *tlink;
948 	struct cifsFileInfo *cfile = NULL;
949 	void *page;
950 	const char *full_path;
951 	bool posix_open_ok = false;
952 	struct cifs_fid fid = {};
953 	struct cifs_pending_open open;
954 	struct cifs_open_info_data data = {};
955 
956 	xid = get_xid();
957 
958 	cifs_sb = CIFS_SB(inode->i_sb);
959 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
960 		free_xid(xid);
961 		return -EIO;
962 	}
963 
964 	tlink = cifs_sb_tlink(cifs_sb);
965 	if (IS_ERR(tlink)) {
966 		free_xid(xid);
967 		return PTR_ERR(tlink);
968 	}
969 	tcon = tlink_tcon(tlink);
970 	server = tcon->ses->server;
971 
972 	page = alloc_dentry_path();
973 	full_path = build_path_from_dentry(file_dentry(file), page);
974 	if (IS_ERR(full_path)) {
975 		rc = PTR_ERR(full_path);
976 		goto out;
977 	}
978 
979 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
980 		 inode, file->f_flags, full_path);
981 
982 	if (file->f_flags & O_DIRECT &&
983 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
984 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
985 			file->f_op = &cifs_file_direct_nobrl_ops;
986 		else
987 			file->f_op = &cifs_file_direct_ops;
988 	}
989 
990 	/* Get the cached handle as SMB2 close is deferred */
991 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
992 	if (rc == 0) {
993 		if (file->f_flags == cfile->f_flags) {
994 			file->private_data = cfile;
995 			spin_lock(&CIFS_I(inode)->deferred_lock);
996 			cifs_del_deferred_close(cfile);
997 			spin_unlock(&CIFS_I(inode)->deferred_lock);
998 			goto use_cache;
999 		} else {
1000 			_cifsFileInfo_put(cfile, true, false);
1001 		}
1002 	}
1003 
1004 	if (server->oplocks)
1005 		oplock = REQ_OPLOCK;
1006 	else
1007 		oplock = 0;
1008 
1009 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1010 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1011 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1012 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1013 		/* can not refresh inode info since size could be stale */
1014 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1015 				cifs_sb->ctx->file_mode /* ignored */,
1016 				file->f_flags, &oplock, &fid.netfid, xid);
1017 		if (rc == 0) {
1018 			cifs_dbg(FYI, "posix open succeeded\n");
1019 			posix_open_ok = true;
1020 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1021 			if (tcon->ses->serverNOS)
1022 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1023 					 tcon->ses->ip_addr,
1024 					 tcon->ses->serverNOS);
1025 			tcon->broken_posix_open = true;
1026 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1027 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1028 			goto out;
1029 		/*
1030 		 * Else fallthrough to retry open the old way on network i/o
1031 		 * or DFS errors.
1032 		 */
1033 	}
1034 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1035 
1036 	if (server->ops->get_lease_key)
1037 		server->ops->get_lease_key(inode, &fid);
1038 
1039 	cifs_add_pending_open(&fid, tlink, &open);
1040 
1041 	if (!posix_open_ok) {
1042 		if (server->ops->get_lease_key)
1043 			server->ops->get_lease_key(inode, &fid);
1044 
1045 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1046 				  xid, &data);
1047 		if (rc) {
1048 			cifs_del_pending_open(&open);
1049 			goto out;
1050 		}
1051 	}
1052 
1053 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1054 	if (cfile == NULL) {
1055 		if (server->ops->close)
1056 			server->ops->close(xid, tcon, &fid);
1057 		cifs_del_pending_open(&open);
1058 		rc = -ENOMEM;
1059 		goto out;
1060 	}
1061 
1062 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1063 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1064 		/*
1065 		 * Time to set mode which we can not set earlier due to
1066 		 * problems creating new read-only files.
1067 		 */
1068 		struct cifs_unix_set_info_args args = {
1069 			.mode	= inode->i_mode,
1070 			.uid	= INVALID_UID, /* no change */
1071 			.gid	= INVALID_GID, /* no change */
1072 			.ctime	= NO_CHANGE_64,
1073 			.atime	= NO_CHANGE_64,
1074 			.mtime	= NO_CHANGE_64,
1075 			.device	= 0,
1076 		};
1077 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1078 				       cfile->pid);
1079 	}
1080 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1081 
1082 use_cache:
1083 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1084 			   file->f_mode & FMODE_WRITE);
1085 	if (!(file->f_flags & O_DIRECT))
1086 		goto out;
1087 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1088 		goto out;
1089 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1090 
1091 out:
1092 	free_dentry_path(page);
1093 	free_xid(xid);
1094 	cifs_put_tlink(tlink);
1095 	cifs_free_open_info(&data);
1096 	return rc;
1097 }
1098 
1099 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1100 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1101 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1102 
1103 /*
1104  * Try to reacquire byte range locks that were released when session
1105  * to server was lost.
1106  */
1107 static int
1108 cifs_relock_file(struct cifsFileInfo *cfile)
1109 {
1110 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1111 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1112 	int rc = 0;
1113 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1114 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1115 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1116 
1117 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1118 	if (cinode->can_cache_brlcks) {
1119 		/* can cache locks - no need to relock */
1120 		up_read(&cinode->lock_sem);
1121 		return rc;
1122 	}
1123 
1124 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1125 	if (cap_unix(tcon->ses) &&
1126 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1127 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1128 		rc = cifs_push_posix_locks(cfile);
1129 	else
1130 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1131 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1132 
1133 	up_read(&cinode->lock_sem);
1134 	return rc;
1135 }
1136 
1137 static int
1138 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1139 {
1140 	int rc = -EACCES;
1141 	unsigned int xid;
1142 	__u32 oplock;
1143 	struct cifs_sb_info *cifs_sb;
1144 	struct cifs_tcon *tcon;
1145 	struct TCP_Server_Info *server;
1146 	struct cifsInodeInfo *cinode;
1147 	struct inode *inode;
1148 	void *page;
1149 	const char *full_path;
1150 	int desired_access;
1151 	int disposition = FILE_OPEN;
1152 	int create_options = CREATE_NOT_DIR;
1153 	struct cifs_open_parms oparms;
1154 	int rdwr_for_fscache = 0;
1155 
1156 	xid = get_xid();
1157 	mutex_lock(&cfile->fh_mutex);
1158 	if (!cfile->invalidHandle) {
1159 		mutex_unlock(&cfile->fh_mutex);
1160 		free_xid(xid);
1161 		return 0;
1162 	}
1163 
1164 	inode = d_inode(cfile->dentry);
1165 	cifs_sb = CIFS_SB(inode->i_sb);
1166 	tcon = tlink_tcon(cfile->tlink);
1167 	server = tcon->ses->server;
1168 
1169 	/*
1170 	 * Can not grab rename sem here because various ops, including those
1171 	 * that already have the rename sem can end up causing writepage to get
1172 	 * called and if the server was down that means we end up here, and we
1173 	 * can never tell if the caller already has the rename_sem.
1174 	 */
1175 	page = alloc_dentry_path();
1176 	full_path = build_path_from_dentry(cfile->dentry, page);
1177 	if (IS_ERR(full_path)) {
1178 		mutex_unlock(&cfile->fh_mutex);
1179 		free_dentry_path(page);
1180 		free_xid(xid);
1181 		return PTR_ERR(full_path);
1182 	}
1183 
1184 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1185 		 inode, cfile->f_flags, full_path);
1186 
1187 	if (tcon->ses->server->oplocks)
1188 		oplock = REQ_OPLOCK;
1189 	else
1190 		oplock = 0;
1191 
1192 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1193 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1194 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1195 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1196 		/*
1197 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1198 		 * original open. Must mask them off for a reopen.
1199 		 */
1200 		unsigned int oflags = cfile->f_flags &
1201 						~(O_CREAT | O_EXCL | O_TRUNC);
1202 
1203 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1204 				     cifs_sb->ctx->file_mode /* ignored */,
1205 				     oflags, &oplock, &cfile->fid.netfid, xid);
1206 		if (rc == 0) {
1207 			cifs_dbg(FYI, "posix reopen succeeded\n");
1208 			oparms.reconnect = true;
1209 			goto reopen_success;
1210 		}
1211 		/*
1212 		 * fallthrough to retry open the old way on errors, especially
1213 		 * in the reconnect path it is important to retry hard
1214 		 */
1215 	}
1216 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1217 
1218 	/* If we're caching, we need to be able to fill in around partial writes. */
1219 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1220 		rdwr_for_fscache = 1;
1221 
1222 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1223 
1224 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1225 	if (cfile->f_flags & O_SYNC)
1226 		create_options |= CREATE_WRITE_THROUGH;
1227 
1228 	if (cfile->f_flags & O_DIRECT)
1229 		create_options |= CREATE_NO_BUFFER;
1230 
1231 	if (server->ops->get_lease_key)
1232 		server->ops->get_lease_key(inode, &cfile->fid);
1233 
1234 retry_open:
1235 	oparms = (struct cifs_open_parms) {
1236 		.tcon = tcon,
1237 		.cifs_sb = cifs_sb,
1238 		.desired_access = desired_access,
1239 		.create_options = cifs_create_options(cifs_sb, create_options),
1240 		.disposition = disposition,
1241 		.path = full_path,
1242 		.fid = &cfile->fid,
1243 		.reconnect = true,
1244 	};
1245 
1246 	/*
1247 	 * Can not refresh inode by passing in file_info buf to be returned by
1248 	 * ops->open and then calling get_inode_info with returned buf since
1249 	 * file might have write behind data that needs to be flushed and server
1250 	 * version of file size can be stale. If we knew for sure that inode was
1251 	 * not dirty locally we could do this.
1252 	 */
1253 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1254 	if (rc == -ENOENT && oparms.reconnect == false) {
1255 		/* durable handle timeout is expired - open the file again */
1256 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1257 		/* indicate that we need to relock the file */
1258 		oparms.reconnect = true;
1259 	}
1260 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1261 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1262 		rdwr_for_fscache = 2;
1263 		goto retry_open;
1264 	}
1265 
1266 	if (rc) {
1267 		mutex_unlock(&cfile->fh_mutex);
1268 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1269 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1270 		goto reopen_error_exit;
1271 	}
1272 
1273 	if (rdwr_for_fscache == 2)
1274 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1275 
1276 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1277 reopen_success:
1278 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1279 	cfile->invalidHandle = false;
1280 	mutex_unlock(&cfile->fh_mutex);
1281 	cinode = CIFS_I(inode);
1282 
1283 	if (can_flush) {
1284 		rc = filemap_write_and_wait(inode->i_mapping);
1285 		if (!is_interrupt_error(rc))
1286 			mapping_set_error(inode->i_mapping, rc);
1287 
1288 		if (tcon->posix_extensions) {
1289 			rc = smb311_posix_get_inode_info(&inode, full_path,
1290 							 NULL, inode->i_sb, xid);
1291 		} else if (tcon->unix_ext) {
1292 			rc = cifs_get_inode_info_unix(&inode, full_path,
1293 						      inode->i_sb, xid);
1294 		} else {
1295 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1296 						 inode->i_sb, xid, NULL);
1297 		}
1298 	}
1299 	/*
1300 	 * Else we are writing out data to server already and could deadlock if
1301 	 * we tried to flush data, and since we do not know if we have data that
1302 	 * would invalidate the current end of file on the server we can not go
1303 	 * to the server to get the new inode info.
1304 	 */
1305 
1306 	/*
1307 	 * If the server returned a read oplock and we have mandatory brlocks,
1308 	 * set oplock level to None.
1309 	 */
1310 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1311 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1312 		oplock = 0;
1313 	}
1314 
1315 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1316 	if (oparms.reconnect)
1317 		cifs_relock_file(cfile);
1318 
1319 reopen_error_exit:
1320 	free_dentry_path(page);
1321 	free_xid(xid);
1322 	return rc;
1323 }
1324 
1325 void smb2_deferred_work_close(struct work_struct *work)
1326 {
1327 	struct cifsFileInfo *cfile = container_of(work,
1328 			struct cifsFileInfo, deferred.work);
1329 
1330 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1331 	cifs_del_deferred_close(cfile);
1332 	cfile->deferred_close_scheduled = false;
1333 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1334 	_cifsFileInfo_put(cfile, true, false);
1335 }
1336 
1337 static bool
1338 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1339 {
1340 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1341 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1342 
1343 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1344 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1345 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1346 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1347 
1348 }
1349 
1350 int cifs_close(struct inode *inode, struct file *file)
1351 {
1352 	struct cifsFileInfo *cfile;
1353 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1354 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1355 	struct cifs_deferred_close *dclose;
1356 
1357 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1358 
1359 	if (file->private_data != NULL) {
1360 		cfile = file->private_data;
1361 		file->private_data = NULL;
1362 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1363 		if ((cfile->status_file_deleted == false) &&
1364 		    (smb2_can_defer_close(inode, dclose))) {
1365 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1366 				inode_set_mtime_to_ts(inode,
1367 						      inode_set_ctime_current(inode));
1368 			}
1369 			spin_lock(&cinode->deferred_lock);
1370 			cifs_add_deferred_close(cfile, dclose);
1371 			if (cfile->deferred_close_scheduled &&
1372 			    delayed_work_pending(&cfile->deferred)) {
1373 				/*
1374 				 * If there is no pending work, mod_delayed_work queues new work.
1375 				 * So, Increase the ref count to avoid use-after-free.
1376 				 */
1377 				if (!mod_delayed_work(deferredclose_wq,
1378 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1379 					cifsFileInfo_get(cfile);
1380 			} else {
1381 				/* Deferred close for files */
1382 				queue_delayed_work(deferredclose_wq,
1383 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1384 				cfile->deferred_close_scheduled = true;
1385 				spin_unlock(&cinode->deferred_lock);
1386 				return 0;
1387 			}
1388 			spin_unlock(&cinode->deferred_lock);
1389 			_cifsFileInfo_put(cfile, true, false);
1390 		} else {
1391 			_cifsFileInfo_put(cfile, true, false);
1392 			kfree(dclose);
1393 		}
1394 	}
1395 
1396 	/* return code from the ->release op is always ignored */
1397 	return 0;
1398 }
1399 
1400 void
1401 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1402 {
1403 	struct cifsFileInfo *open_file, *tmp;
1404 	struct list_head tmp_list;
1405 
1406 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1407 		return;
1408 
1409 	tcon->need_reopen_files = false;
1410 
1411 	cifs_dbg(FYI, "Reopen persistent handles\n");
1412 	INIT_LIST_HEAD(&tmp_list);
1413 
1414 	/* list all files open on tree connection, reopen resilient handles  */
1415 	spin_lock(&tcon->open_file_lock);
1416 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1417 		if (!open_file->invalidHandle)
1418 			continue;
1419 		cifsFileInfo_get(open_file);
1420 		list_add_tail(&open_file->rlist, &tmp_list);
1421 	}
1422 	spin_unlock(&tcon->open_file_lock);
1423 
1424 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1425 		if (cifs_reopen_file(open_file, false /* do not flush */))
1426 			tcon->need_reopen_files = true;
1427 		list_del_init(&open_file->rlist);
1428 		cifsFileInfo_put(open_file);
1429 	}
1430 }
1431 
1432 int cifs_closedir(struct inode *inode, struct file *file)
1433 {
1434 	int rc = 0;
1435 	unsigned int xid;
1436 	struct cifsFileInfo *cfile = file->private_data;
1437 	struct cifs_tcon *tcon;
1438 	struct TCP_Server_Info *server;
1439 	char *buf;
1440 
1441 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1442 
1443 	if (cfile == NULL)
1444 		return rc;
1445 
1446 	xid = get_xid();
1447 	tcon = tlink_tcon(cfile->tlink);
1448 	server = tcon->ses->server;
1449 
1450 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1451 	spin_lock(&cfile->file_info_lock);
1452 	if (server->ops->dir_needs_close(cfile)) {
1453 		cfile->invalidHandle = true;
1454 		spin_unlock(&cfile->file_info_lock);
1455 		if (server->ops->close_dir)
1456 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1457 		else
1458 			rc = -ENOSYS;
1459 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1460 		/* not much we can do if it fails anyway, ignore rc */
1461 		rc = 0;
1462 	} else
1463 		spin_unlock(&cfile->file_info_lock);
1464 
1465 	buf = cfile->srch_inf.ntwrk_buf_start;
1466 	if (buf) {
1467 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1468 		cfile->srch_inf.ntwrk_buf_start = NULL;
1469 		if (cfile->srch_inf.smallBuf)
1470 			cifs_small_buf_release(buf);
1471 		else
1472 			cifs_buf_release(buf);
1473 	}
1474 
1475 	cifs_put_tlink(cfile->tlink);
1476 	kfree(file->private_data);
1477 	file->private_data = NULL;
1478 	/* BB can we lock the filestruct while this is going on? */
1479 	free_xid(xid);
1480 	return rc;
1481 }
1482 
1483 static struct cifsLockInfo *
1484 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1485 {
1486 	struct cifsLockInfo *lock =
1487 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1488 	if (!lock)
1489 		return lock;
1490 	lock->offset = offset;
1491 	lock->length = length;
1492 	lock->type = type;
1493 	lock->pid = current->tgid;
1494 	lock->flags = flags;
1495 	INIT_LIST_HEAD(&lock->blist);
1496 	init_waitqueue_head(&lock->block_q);
1497 	return lock;
1498 }
1499 
1500 void
1501 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1502 {
1503 	struct cifsLockInfo *li, *tmp;
1504 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1505 		list_del_init(&li->blist);
1506 		wake_up(&li->block_q);
1507 	}
1508 }
1509 
1510 #define CIFS_LOCK_OP	0
1511 #define CIFS_READ_OP	1
1512 #define CIFS_WRITE_OP	2
1513 
1514 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1515 static bool
1516 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1517 			    __u64 length, __u8 type, __u16 flags,
1518 			    struct cifsFileInfo *cfile,
1519 			    struct cifsLockInfo **conf_lock, int rw_check)
1520 {
1521 	struct cifsLockInfo *li;
1522 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1523 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1524 
1525 	list_for_each_entry(li, &fdlocks->locks, llist) {
1526 		if (offset + length <= li->offset ||
1527 		    offset >= li->offset + li->length)
1528 			continue;
1529 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1530 		    server->ops->compare_fids(cfile, cur_cfile)) {
1531 			/* shared lock prevents write op through the same fid */
1532 			if (!(li->type & server->vals->shared_lock_type) ||
1533 			    rw_check != CIFS_WRITE_OP)
1534 				continue;
1535 		}
1536 		if ((type & server->vals->shared_lock_type) &&
1537 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1538 		     current->tgid == li->pid) || type == li->type))
1539 			continue;
1540 		if (rw_check == CIFS_LOCK_OP &&
1541 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1542 		    server->ops->compare_fids(cfile, cur_cfile))
1543 			continue;
1544 		if (conf_lock)
1545 			*conf_lock = li;
1546 		return true;
1547 	}
1548 	return false;
1549 }
1550 
1551 bool
1552 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1553 			__u8 type, __u16 flags,
1554 			struct cifsLockInfo **conf_lock, int rw_check)
1555 {
1556 	bool rc = false;
1557 	struct cifs_fid_locks *cur;
1558 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1559 
1560 	list_for_each_entry(cur, &cinode->llist, llist) {
1561 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1562 						 flags, cfile, conf_lock,
1563 						 rw_check);
1564 		if (rc)
1565 			break;
1566 	}
1567 
1568 	return rc;
1569 }
1570 
1571 /*
1572  * Check if there is another lock that prevents us to set the lock (mandatory
1573  * style). If such a lock exists, update the flock structure with its
1574  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1575  * or leave it the same if we can't. Returns 0 if we don't need to request to
1576  * the server or 1 otherwise.
1577  */
1578 static int
1579 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1580 	       __u8 type, struct file_lock *flock)
1581 {
1582 	int rc = 0;
1583 	struct cifsLockInfo *conf_lock;
1584 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1585 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1586 	bool exist;
1587 
1588 	down_read(&cinode->lock_sem);
1589 
1590 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1591 					flock->c.flc_flags, &conf_lock,
1592 					CIFS_LOCK_OP);
1593 	if (exist) {
1594 		flock->fl_start = conf_lock->offset;
1595 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1596 		flock->c.flc_pid = conf_lock->pid;
1597 		if (conf_lock->type & server->vals->shared_lock_type)
1598 			flock->c.flc_type = F_RDLCK;
1599 		else
1600 			flock->c.flc_type = F_WRLCK;
1601 	} else if (!cinode->can_cache_brlcks)
1602 		rc = 1;
1603 	else
1604 		flock->c.flc_type = F_UNLCK;
1605 
1606 	up_read(&cinode->lock_sem);
1607 	return rc;
1608 }
1609 
1610 static void
1611 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1612 {
1613 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1614 	cifs_down_write(&cinode->lock_sem);
1615 	list_add_tail(&lock->llist, &cfile->llist->locks);
1616 	up_write(&cinode->lock_sem);
1617 }
1618 
1619 /*
1620  * Set the byte-range lock (mandatory style). Returns:
1621  * 1) 0, if we set the lock and don't need to request to the server;
1622  * 2) 1, if no locks prevent us but we need to request to the server;
1623  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1624  */
1625 static int
1626 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1627 		 bool wait)
1628 {
1629 	struct cifsLockInfo *conf_lock;
1630 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1631 	bool exist;
1632 	int rc = 0;
1633 
1634 try_again:
1635 	exist = false;
1636 	cifs_down_write(&cinode->lock_sem);
1637 
1638 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1639 					lock->type, lock->flags, &conf_lock,
1640 					CIFS_LOCK_OP);
1641 	if (!exist && cinode->can_cache_brlcks) {
1642 		list_add_tail(&lock->llist, &cfile->llist->locks);
1643 		up_write(&cinode->lock_sem);
1644 		return rc;
1645 	}
1646 
1647 	if (!exist)
1648 		rc = 1;
1649 	else if (!wait)
1650 		rc = -EACCES;
1651 	else {
1652 		list_add_tail(&lock->blist, &conf_lock->blist);
1653 		up_write(&cinode->lock_sem);
1654 		rc = wait_event_interruptible(lock->block_q,
1655 					(lock->blist.prev == &lock->blist) &&
1656 					(lock->blist.next == &lock->blist));
1657 		if (!rc)
1658 			goto try_again;
1659 		cifs_down_write(&cinode->lock_sem);
1660 		list_del_init(&lock->blist);
1661 	}
1662 
1663 	up_write(&cinode->lock_sem);
1664 	return rc;
1665 }
1666 
1667 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1668 /*
1669  * Check if there is another lock that prevents us to set the lock (posix
1670  * style). If such a lock exists, update the flock structure with its
1671  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1672  * or leave it the same if we can't. Returns 0 if we don't need to request to
1673  * the server or 1 otherwise.
1674  */
1675 static int
1676 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1677 {
1678 	int rc = 0;
1679 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1680 	unsigned char saved_type = flock->c.flc_type;
1681 
1682 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1683 		return 1;
1684 
1685 	down_read(&cinode->lock_sem);
1686 	posix_test_lock(file, flock);
1687 
1688 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1689 		flock->c.flc_type = saved_type;
1690 		rc = 1;
1691 	}
1692 
1693 	up_read(&cinode->lock_sem);
1694 	return rc;
1695 }
1696 
1697 /*
1698  * Set the byte-range lock (posix style). Returns:
1699  * 1) <0, if the error occurs while setting the lock;
1700  * 2) 0, if we set the lock and don't need to request to the server;
1701  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1702  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1703  */
1704 static int
1705 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1706 {
1707 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1708 	int rc = FILE_LOCK_DEFERRED + 1;
1709 
1710 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1711 		return rc;
1712 
1713 	cifs_down_write(&cinode->lock_sem);
1714 	if (!cinode->can_cache_brlcks) {
1715 		up_write(&cinode->lock_sem);
1716 		return rc;
1717 	}
1718 
1719 	rc = posix_lock_file(file, flock, NULL);
1720 	up_write(&cinode->lock_sem);
1721 	return rc;
1722 }
1723 
1724 int
1725 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1726 {
1727 	unsigned int xid;
1728 	int rc = 0, stored_rc;
1729 	struct cifsLockInfo *li, *tmp;
1730 	struct cifs_tcon *tcon;
1731 	unsigned int num, max_num, max_buf;
1732 	LOCKING_ANDX_RANGE *buf, *cur;
1733 	static const int types[] = {
1734 		LOCKING_ANDX_LARGE_FILES,
1735 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1736 	};
1737 	int i;
1738 
1739 	xid = get_xid();
1740 	tcon = tlink_tcon(cfile->tlink);
1741 
1742 	/*
1743 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1744 	 * and check it before using.
1745 	 */
1746 	max_buf = tcon->ses->server->maxBuf;
1747 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1748 		free_xid(xid);
1749 		return -EINVAL;
1750 	}
1751 
1752 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1753 		     PAGE_SIZE);
1754 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1755 			PAGE_SIZE);
1756 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1757 						sizeof(LOCKING_ANDX_RANGE);
1758 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1759 	if (!buf) {
1760 		free_xid(xid);
1761 		return -ENOMEM;
1762 	}
1763 
1764 	for (i = 0; i < 2; i++) {
1765 		cur = buf;
1766 		num = 0;
1767 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1768 			if (li->type != types[i])
1769 				continue;
1770 			cur->Pid = cpu_to_le16(li->pid);
1771 			cur->LengthLow = cpu_to_le32((u32)li->length);
1772 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1773 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1774 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1775 			if (++num == max_num) {
1776 				stored_rc = cifs_lockv(xid, tcon,
1777 						       cfile->fid.netfid,
1778 						       (__u8)li->type, 0, num,
1779 						       buf);
1780 				if (stored_rc)
1781 					rc = stored_rc;
1782 				cur = buf;
1783 				num = 0;
1784 			} else
1785 				cur++;
1786 		}
1787 
1788 		if (num) {
1789 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1790 					       (__u8)types[i], 0, num, buf);
1791 			if (stored_rc)
1792 				rc = stored_rc;
1793 		}
1794 	}
1795 
1796 	kfree(buf);
1797 	free_xid(xid);
1798 	return rc;
1799 }
1800 
1801 static __u32
1802 hash_lockowner(fl_owner_t owner)
1803 {
1804 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1805 }
1806 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1807 
1808 struct lock_to_push {
1809 	struct list_head llist;
1810 	__u64 offset;
1811 	__u64 length;
1812 	__u32 pid;
1813 	__u16 netfid;
1814 	__u8 type;
1815 };
1816 
1817 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1818 static int
1819 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1820 {
1821 	struct inode *inode = d_inode(cfile->dentry);
1822 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1823 	struct file_lock *flock;
1824 	struct file_lock_context *flctx = locks_inode_context(inode);
1825 	unsigned int count = 0, i;
1826 	int rc = 0, xid, type;
1827 	struct list_head locks_to_send, *el;
1828 	struct lock_to_push *lck, *tmp;
1829 	__u64 length;
1830 
1831 	xid = get_xid();
1832 
1833 	if (!flctx)
1834 		goto out;
1835 
1836 	spin_lock(&flctx->flc_lock);
1837 	list_for_each(el, &flctx->flc_posix) {
1838 		count++;
1839 	}
1840 	spin_unlock(&flctx->flc_lock);
1841 
1842 	INIT_LIST_HEAD(&locks_to_send);
1843 
1844 	/*
1845 	 * Allocating count locks is enough because no FL_POSIX locks can be
1846 	 * added to the list while we are holding cinode->lock_sem that
1847 	 * protects locking operations of this inode.
1848 	 */
1849 	for (i = 0; i < count; i++) {
1850 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1851 		if (!lck) {
1852 			rc = -ENOMEM;
1853 			goto err_out;
1854 		}
1855 		list_add_tail(&lck->llist, &locks_to_send);
1856 	}
1857 
1858 	el = locks_to_send.next;
1859 	spin_lock(&flctx->flc_lock);
1860 	for_each_file_lock(flock, &flctx->flc_posix) {
1861 		unsigned char ftype = flock->c.flc_type;
1862 
1863 		if (el == &locks_to_send) {
1864 			/*
1865 			 * The list ended. We don't have enough allocated
1866 			 * structures - something is really wrong.
1867 			 */
1868 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1869 			break;
1870 		}
1871 		length = cifs_flock_len(flock);
1872 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1873 			type = CIFS_RDLCK;
1874 		else
1875 			type = CIFS_WRLCK;
1876 		lck = list_entry(el, struct lock_to_push, llist);
1877 		lck->pid = hash_lockowner(flock->c.flc_owner);
1878 		lck->netfid = cfile->fid.netfid;
1879 		lck->length = length;
1880 		lck->type = type;
1881 		lck->offset = flock->fl_start;
1882 	}
1883 	spin_unlock(&flctx->flc_lock);
1884 
1885 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1886 		int stored_rc;
1887 
1888 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1889 					     lck->offset, lck->length, NULL,
1890 					     lck->type, 0);
1891 		if (stored_rc)
1892 			rc = stored_rc;
1893 		list_del(&lck->llist);
1894 		kfree(lck);
1895 	}
1896 
1897 out:
1898 	free_xid(xid);
1899 	return rc;
1900 err_out:
1901 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1902 		list_del(&lck->llist);
1903 		kfree(lck);
1904 	}
1905 	goto out;
1906 }
1907 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1908 
1909 static int
1910 cifs_push_locks(struct cifsFileInfo *cfile)
1911 {
1912 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1913 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1914 	int rc = 0;
1915 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1916 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1917 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1918 
1919 	/* we are going to update can_cache_brlcks here - need a write access */
1920 	cifs_down_write(&cinode->lock_sem);
1921 	if (!cinode->can_cache_brlcks) {
1922 		up_write(&cinode->lock_sem);
1923 		return rc;
1924 	}
1925 
1926 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1927 	if (cap_unix(tcon->ses) &&
1928 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1929 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1930 		rc = cifs_push_posix_locks(cfile);
1931 	else
1932 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1933 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1934 
1935 	cinode->can_cache_brlcks = false;
1936 	up_write(&cinode->lock_sem);
1937 	return rc;
1938 }
1939 
1940 static void
1941 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1942 		bool *wait_flag, struct TCP_Server_Info *server)
1943 {
1944 	if (flock->c.flc_flags & FL_POSIX)
1945 		cifs_dbg(FYI, "Posix\n");
1946 	if (flock->c.flc_flags & FL_FLOCK)
1947 		cifs_dbg(FYI, "Flock\n");
1948 	if (flock->c.flc_flags & FL_SLEEP) {
1949 		cifs_dbg(FYI, "Blocking lock\n");
1950 		*wait_flag = true;
1951 	}
1952 	if (flock->c.flc_flags & FL_ACCESS)
1953 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1954 	if (flock->c.flc_flags & FL_LEASE)
1955 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1956 	if (flock->c.flc_flags &
1957 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1958 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1959 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1960 		         flock->c.flc_flags);
1961 
1962 	*type = server->vals->large_lock_type;
1963 	if (lock_is_write(flock)) {
1964 		cifs_dbg(FYI, "F_WRLCK\n");
1965 		*type |= server->vals->exclusive_lock_type;
1966 		*lock = 1;
1967 	} else if (lock_is_unlock(flock)) {
1968 		cifs_dbg(FYI, "F_UNLCK\n");
1969 		*type |= server->vals->unlock_lock_type;
1970 		*unlock = 1;
1971 		/* Check if unlock includes more than one lock range */
1972 	} else if (lock_is_read(flock)) {
1973 		cifs_dbg(FYI, "F_RDLCK\n");
1974 		*type |= server->vals->shared_lock_type;
1975 		*lock = 1;
1976 	} else if (flock->c.flc_type == F_EXLCK) {
1977 		cifs_dbg(FYI, "F_EXLCK\n");
1978 		*type |= server->vals->exclusive_lock_type;
1979 		*lock = 1;
1980 	} else if (flock->c.flc_type == F_SHLCK) {
1981 		cifs_dbg(FYI, "F_SHLCK\n");
1982 		*type |= server->vals->shared_lock_type;
1983 		*lock = 1;
1984 	} else
1985 		cifs_dbg(FYI, "Unknown type of lock\n");
1986 }
1987 
1988 static int
1989 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1990 	   bool wait_flag, bool posix_lck, unsigned int xid)
1991 {
1992 	int rc = 0;
1993 	__u64 length = cifs_flock_len(flock);
1994 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1995 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1996 	struct TCP_Server_Info *server = tcon->ses->server;
1997 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1998 	__u16 netfid = cfile->fid.netfid;
1999 
2000 	if (posix_lck) {
2001 		int posix_lock_type;
2002 
2003 		rc = cifs_posix_lock_test(file, flock);
2004 		if (!rc)
2005 			return rc;
2006 
2007 		if (type & server->vals->shared_lock_type)
2008 			posix_lock_type = CIFS_RDLCK;
2009 		else
2010 			posix_lock_type = CIFS_WRLCK;
2011 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2012 				      hash_lockowner(flock->c.flc_owner),
2013 				      flock->fl_start, length, flock,
2014 				      posix_lock_type, wait_flag);
2015 		return rc;
2016 	}
2017 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2018 
2019 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2020 	if (!rc)
2021 		return rc;
2022 
2023 	/* BB we could chain these into one lock request BB */
2024 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2025 				    1, 0, false);
2026 	if (rc == 0) {
2027 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2028 					    type, 0, 1, false);
2029 		flock->c.flc_type = F_UNLCK;
2030 		if (rc != 0)
2031 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2032 				 rc);
2033 		return 0;
2034 	}
2035 
2036 	if (type & server->vals->shared_lock_type) {
2037 		flock->c.flc_type = F_WRLCK;
2038 		return 0;
2039 	}
2040 
2041 	type &= ~server->vals->exclusive_lock_type;
2042 
2043 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2044 				    type | server->vals->shared_lock_type,
2045 				    1, 0, false);
2046 	if (rc == 0) {
2047 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2048 			type | server->vals->shared_lock_type, 0, 1, false);
2049 		flock->c.flc_type = F_RDLCK;
2050 		if (rc != 0)
2051 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2052 				 rc);
2053 	} else
2054 		flock->c.flc_type = F_WRLCK;
2055 
2056 	return 0;
2057 }
2058 
2059 void
2060 cifs_move_llist(struct list_head *source, struct list_head *dest)
2061 {
2062 	struct list_head *li, *tmp;
2063 	list_for_each_safe(li, tmp, source)
2064 		list_move(li, dest);
2065 }
2066 
2067 void
2068 cifs_free_llist(struct list_head *llist)
2069 {
2070 	struct cifsLockInfo *li, *tmp;
2071 	list_for_each_entry_safe(li, tmp, llist, llist) {
2072 		cifs_del_lock_waiters(li);
2073 		list_del(&li->llist);
2074 		kfree(li);
2075 	}
2076 }
2077 
2078 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2079 int
2080 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2081 		  unsigned int xid)
2082 {
2083 	int rc = 0, stored_rc;
2084 	static const int types[] = {
2085 		LOCKING_ANDX_LARGE_FILES,
2086 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2087 	};
2088 	unsigned int i;
2089 	unsigned int max_num, num, max_buf;
2090 	LOCKING_ANDX_RANGE *buf, *cur;
2091 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2092 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2093 	struct cifsLockInfo *li, *tmp;
2094 	__u64 length = cifs_flock_len(flock);
2095 	struct list_head tmp_llist;
2096 
2097 	INIT_LIST_HEAD(&tmp_llist);
2098 
2099 	/*
2100 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2101 	 * and check it before using.
2102 	 */
2103 	max_buf = tcon->ses->server->maxBuf;
2104 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2105 		return -EINVAL;
2106 
2107 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2108 		     PAGE_SIZE);
2109 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2110 			PAGE_SIZE);
2111 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2112 						sizeof(LOCKING_ANDX_RANGE);
2113 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2114 	if (!buf)
2115 		return -ENOMEM;
2116 
2117 	cifs_down_write(&cinode->lock_sem);
2118 	for (i = 0; i < 2; i++) {
2119 		cur = buf;
2120 		num = 0;
2121 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2122 			if (flock->fl_start > li->offset ||
2123 			    (flock->fl_start + length) <
2124 			    (li->offset + li->length))
2125 				continue;
2126 			if (current->tgid != li->pid)
2127 				continue;
2128 			if (types[i] != li->type)
2129 				continue;
2130 			if (cinode->can_cache_brlcks) {
2131 				/*
2132 				 * We can cache brlock requests - simply remove
2133 				 * a lock from the file's list.
2134 				 */
2135 				list_del(&li->llist);
2136 				cifs_del_lock_waiters(li);
2137 				kfree(li);
2138 				continue;
2139 			}
2140 			cur->Pid = cpu_to_le16(li->pid);
2141 			cur->LengthLow = cpu_to_le32((u32)li->length);
2142 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2143 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2144 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2145 			/*
2146 			 * We need to save a lock here to let us add it again to
2147 			 * the file's list if the unlock range request fails on
2148 			 * the server.
2149 			 */
2150 			list_move(&li->llist, &tmp_llist);
2151 			if (++num == max_num) {
2152 				stored_rc = cifs_lockv(xid, tcon,
2153 						       cfile->fid.netfid,
2154 						       li->type, num, 0, buf);
2155 				if (stored_rc) {
2156 					/*
2157 					 * We failed on the unlock range
2158 					 * request - add all locks from the tmp
2159 					 * list to the head of the file's list.
2160 					 */
2161 					cifs_move_llist(&tmp_llist,
2162 							&cfile->llist->locks);
2163 					rc = stored_rc;
2164 				} else
2165 					/*
2166 					 * The unlock range request succeed -
2167 					 * free the tmp list.
2168 					 */
2169 					cifs_free_llist(&tmp_llist);
2170 				cur = buf;
2171 				num = 0;
2172 			} else
2173 				cur++;
2174 		}
2175 		if (num) {
2176 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2177 					       types[i], num, 0, buf);
2178 			if (stored_rc) {
2179 				cifs_move_llist(&tmp_llist,
2180 						&cfile->llist->locks);
2181 				rc = stored_rc;
2182 			} else
2183 				cifs_free_llist(&tmp_llist);
2184 		}
2185 	}
2186 
2187 	up_write(&cinode->lock_sem);
2188 	kfree(buf);
2189 	return rc;
2190 }
2191 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2192 
2193 static int
2194 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2195 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2196 	   unsigned int xid)
2197 {
2198 	int rc = 0;
2199 	__u64 length = cifs_flock_len(flock);
2200 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2201 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2202 	struct TCP_Server_Info *server = tcon->ses->server;
2203 	struct inode *inode = d_inode(cfile->dentry);
2204 
2205 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2206 	if (posix_lck) {
2207 		int posix_lock_type;
2208 
2209 		rc = cifs_posix_lock_set(file, flock);
2210 		if (rc <= FILE_LOCK_DEFERRED)
2211 			return rc;
2212 
2213 		if (type & server->vals->shared_lock_type)
2214 			posix_lock_type = CIFS_RDLCK;
2215 		else
2216 			posix_lock_type = CIFS_WRLCK;
2217 
2218 		if (unlock == 1)
2219 			posix_lock_type = CIFS_UNLCK;
2220 
2221 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2222 				      hash_lockowner(flock->c.flc_owner),
2223 				      flock->fl_start, length,
2224 				      NULL, posix_lock_type, wait_flag);
2225 		goto out;
2226 	}
2227 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2228 	if (lock) {
2229 		struct cifsLockInfo *lock;
2230 
2231 		lock = cifs_lock_init(flock->fl_start, length, type,
2232 				      flock->c.flc_flags);
2233 		if (!lock)
2234 			return -ENOMEM;
2235 
2236 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2237 		if (rc < 0) {
2238 			kfree(lock);
2239 			return rc;
2240 		}
2241 		if (!rc)
2242 			goto out;
2243 
2244 		/*
2245 		 * Windows 7 server can delay breaking lease from read to None
2246 		 * if we set a byte-range lock on a file - break it explicitly
2247 		 * before sending the lock to the server to be sure the next
2248 		 * read won't conflict with non-overlapted locks due to
2249 		 * pagereading.
2250 		 */
2251 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2252 					CIFS_CACHE_READ(CIFS_I(inode))) {
2253 			cifs_zap_mapping(inode);
2254 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2255 				 inode);
2256 			CIFS_I(inode)->oplock = 0;
2257 		}
2258 
2259 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2260 					    type, 1, 0, wait_flag);
2261 		if (rc) {
2262 			kfree(lock);
2263 			return rc;
2264 		}
2265 
2266 		cifs_lock_add(cfile, lock);
2267 	} else if (unlock)
2268 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2269 
2270 out:
2271 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2272 		/*
2273 		 * If this is a request to remove all locks because we
2274 		 * are closing the file, it doesn't matter if the
2275 		 * unlocking failed as both cifs.ko and the SMB server
2276 		 * remove the lock on file close
2277 		 */
2278 		if (rc) {
2279 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2280 			if (!(flock->c.flc_flags & FL_CLOSE))
2281 				return rc;
2282 		}
2283 		rc = locks_lock_file_wait(file, flock);
2284 	}
2285 	return rc;
2286 }
2287 
2288 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2289 {
2290 	int rc, xid;
2291 	int lock = 0, unlock = 0;
2292 	bool wait_flag = false;
2293 	bool posix_lck = false;
2294 	struct cifs_sb_info *cifs_sb;
2295 	struct cifs_tcon *tcon;
2296 	struct cifsFileInfo *cfile;
2297 	__u32 type;
2298 
2299 	xid = get_xid();
2300 
2301 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2302 		rc = -ENOLCK;
2303 		free_xid(xid);
2304 		return rc;
2305 	}
2306 
2307 	cfile = (struct cifsFileInfo *)file->private_data;
2308 	tcon = tlink_tcon(cfile->tlink);
2309 
2310 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2311 			tcon->ses->server);
2312 	cifs_sb = CIFS_FILE_SB(file);
2313 
2314 	if (cap_unix(tcon->ses) &&
2315 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2316 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2317 		posix_lck = true;
2318 
2319 	if (!lock && !unlock) {
2320 		/*
2321 		 * if no lock or unlock then nothing to do since we do not
2322 		 * know what it is
2323 		 */
2324 		rc = -EOPNOTSUPP;
2325 		free_xid(xid);
2326 		return rc;
2327 	}
2328 
2329 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2330 			xid);
2331 	free_xid(xid);
2332 	return rc;
2333 
2334 
2335 }
2336 
2337 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2338 {
2339 	int rc, xid;
2340 	int lock = 0, unlock = 0;
2341 	bool wait_flag = false;
2342 	bool posix_lck = false;
2343 	struct cifs_sb_info *cifs_sb;
2344 	struct cifs_tcon *tcon;
2345 	struct cifsFileInfo *cfile;
2346 	__u32 type;
2347 
2348 	rc = -EACCES;
2349 	xid = get_xid();
2350 
2351 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2352 		 flock->c.flc_flags, flock->c.flc_type,
2353 		 (long long)flock->fl_start,
2354 		 (long long)flock->fl_end);
2355 
2356 	cfile = (struct cifsFileInfo *)file->private_data;
2357 	tcon = tlink_tcon(cfile->tlink);
2358 
2359 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2360 			tcon->ses->server);
2361 	cifs_sb = CIFS_FILE_SB(file);
2362 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2363 
2364 	if (cap_unix(tcon->ses) &&
2365 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2366 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2367 		posix_lck = true;
2368 	/*
2369 	 * BB add code here to normalize offset and length to account for
2370 	 * negative length which we can not accept over the wire.
2371 	 */
2372 	if (IS_GETLK(cmd)) {
2373 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2374 		free_xid(xid);
2375 		return rc;
2376 	}
2377 
2378 	if (!lock && !unlock) {
2379 		/*
2380 		 * if no lock or unlock then nothing to do since we do not
2381 		 * know what it is
2382 		 */
2383 		free_xid(xid);
2384 		return -EOPNOTSUPP;
2385 	}
2386 
2387 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2388 			xid);
2389 	free_xid(xid);
2390 	return rc;
2391 }
2392 
2393 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2394 				      bool was_async)
2395 {
2396 	struct netfs_io_request *wreq = wdata->rreq;
2397 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2398 	loff_t wrend;
2399 
2400 	if (result > 0) {
2401 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2402 
2403 		if (wrend > ictx->zero_point &&
2404 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2405 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2406 			ictx->zero_point = wrend;
2407 		if (wrend > ictx->remote_i_size)
2408 			netfs_resize_file(ictx, wrend, true);
2409 	}
2410 
2411 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2412 }
2413 
2414 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2415 					bool fsuid_only)
2416 {
2417 	struct cifsFileInfo *open_file = NULL;
2418 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2419 
2420 	/* only filter by fsuid on multiuser mounts */
2421 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2422 		fsuid_only = false;
2423 
2424 	spin_lock(&cifs_inode->open_file_lock);
2425 	/* we could simply get the first_list_entry since write-only entries
2426 	   are always at the end of the list but since the first entry might
2427 	   have a close pending, we go through the whole list */
2428 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2429 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2430 			continue;
2431 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2432 			if ((!open_file->invalidHandle)) {
2433 				/* found a good file */
2434 				/* lock it so it will not be closed on us */
2435 				cifsFileInfo_get(open_file);
2436 				spin_unlock(&cifs_inode->open_file_lock);
2437 				return open_file;
2438 			} /* else might as well continue, and look for
2439 			     another, or simply have the caller reopen it
2440 			     again rather than trying to fix this handle */
2441 		} else /* write only file */
2442 			break; /* write only files are last so must be done */
2443 	}
2444 	spin_unlock(&cifs_inode->open_file_lock);
2445 	return NULL;
2446 }
2447 
2448 /* Return -EBADF if no handle is found and general rc otherwise */
2449 int
2450 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2451 		       struct cifsFileInfo **ret_file)
2452 {
2453 	struct cifsFileInfo *open_file, *inv_file = NULL;
2454 	struct cifs_sb_info *cifs_sb;
2455 	bool any_available = false;
2456 	int rc = -EBADF;
2457 	unsigned int refind = 0;
2458 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2459 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2460 	*ret_file = NULL;
2461 
2462 	/*
2463 	 * Having a null inode here (because mapping->host was set to zero by
2464 	 * the VFS or MM) should not happen but we had reports of on oops (due
2465 	 * to it being zero) during stress testcases so we need to check for it
2466 	 */
2467 
2468 	if (cifs_inode == NULL) {
2469 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2470 		dump_stack();
2471 		return rc;
2472 	}
2473 
2474 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2475 
2476 	/* only filter by fsuid on multiuser mounts */
2477 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2478 		fsuid_only = false;
2479 
2480 	spin_lock(&cifs_inode->open_file_lock);
2481 refind_writable:
2482 	if (refind > MAX_REOPEN_ATT) {
2483 		spin_unlock(&cifs_inode->open_file_lock);
2484 		return rc;
2485 	}
2486 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2487 		if (!any_available && open_file->pid != current->tgid)
2488 			continue;
2489 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2490 			continue;
2491 		if (with_delete && !(open_file->fid.access & DELETE))
2492 			continue;
2493 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2494 			if (!open_file->invalidHandle) {
2495 				/* found a good writable file */
2496 				cifsFileInfo_get(open_file);
2497 				spin_unlock(&cifs_inode->open_file_lock);
2498 				*ret_file = open_file;
2499 				return 0;
2500 			} else {
2501 				if (!inv_file)
2502 					inv_file = open_file;
2503 			}
2504 		}
2505 	}
2506 	/* couldn't find useable FH with same pid, try any available */
2507 	if (!any_available) {
2508 		any_available = true;
2509 		goto refind_writable;
2510 	}
2511 
2512 	if (inv_file) {
2513 		any_available = false;
2514 		cifsFileInfo_get(inv_file);
2515 	}
2516 
2517 	spin_unlock(&cifs_inode->open_file_lock);
2518 
2519 	if (inv_file) {
2520 		rc = cifs_reopen_file(inv_file, false);
2521 		if (!rc) {
2522 			*ret_file = inv_file;
2523 			return 0;
2524 		}
2525 
2526 		spin_lock(&cifs_inode->open_file_lock);
2527 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2528 		spin_unlock(&cifs_inode->open_file_lock);
2529 		cifsFileInfo_put(inv_file);
2530 		++refind;
2531 		inv_file = NULL;
2532 		spin_lock(&cifs_inode->open_file_lock);
2533 		goto refind_writable;
2534 	}
2535 
2536 	return rc;
2537 }
2538 
2539 struct cifsFileInfo *
2540 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2541 {
2542 	struct cifsFileInfo *cfile;
2543 	int rc;
2544 
2545 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2546 	if (rc)
2547 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2548 
2549 	return cfile;
2550 }
2551 
2552 int
2553 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2554 		       int flags,
2555 		       struct cifsFileInfo **ret_file)
2556 {
2557 	struct cifsFileInfo *cfile;
2558 	void *page = alloc_dentry_path();
2559 
2560 	*ret_file = NULL;
2561 
2562 	spin_lock(&tcon->open_file_lock);
2563 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2564 		struct cifsInodeInfo *cinode;
2565 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2566 		if (IS_ERR(full_path)) {
2567 			spin_unlock(&tcon->open_file_lock);
2568 			free_dentry_path(page);
2569 			return PTR_ERR(full_path);
2570 		}
2571 		if (strcmp(full_path, name))
2572 			continue;
2573 
2574 		cinode = CIFS_I(d_inode(cfile->dentry));
2575 		spin_unlock(&tcon->open_file_lock);
2576 		free_dentry_path(page);
2577 		return cifs_get_writable_file(cinode, flags, ret_file);
2578 	}
2579 
2580 	spin_unlock(&tcon->open_file_lock);
2581 	free_dentry_path(page);
2582 	return -ENOENT;
2583 }
2584 
2585 int
2586 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2587 		       struct cifsFileInfo **ret_file)
2588 {
2589 	struct cifsFileInfo *cfile;
2590 	void *page = alloc_dentry_path();
2591 
2592 	*ret_file = NULL;
2593 
2594 	spin_lock(&tcon->open_file_lock);
2595 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2596 		struct cifsInodeInfo *cinode;
2597 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2598 		if (IS_ERR(full_path)) {
2599 			spin_unlock(&tcon->open_file_lock);
2600 			free_dentry_path(page);
2601 			return PTR_ERR(full_path);
2602 		}
2603 		if (strcmp(full_path, name))
2604 			continue;
2605 
2606 		cinode = CIFS_I(d_inode(cfile->dentry));
2607 		spin_unlock(&tcon->open_file_lock);
2608 		free_dentry_path(page);
2609 		*ret_file = find_readable_file(cinode, 0);
2610 		return *ret_file ? 0 : -ENOENT;
2611 	}
2612 
2613 	spin_unlock(&tcon->open_file_lock);
2614 	free_dentry_path(page);
2615 	return -ENOENT;
2616 }
2617 
2618 /*
2619  * Flush data on a strict file.
2620  */
2621 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2622 		      int datasync)
2623 {
2624 	unsigned int xid;
2625 	int rc = 0;
2626 	struct cifs_tcon *tcon;
2627 	struct TCP_Server_Info *server;
2628 	struct cifsFileInfo *smbfile = file->private_data;
2629 	struct inode *inode = file_inode(file);
2630 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2631 
2632 	rc = file_write_and_wait_range(file, start, end);
2633 	if (rc) {
2634 		trace_cifs_fsync_err(inode->i_ino, rc);
2635 		return rc;
2636 	}
2637 
2638 	xid = get_xid();
2639 
2640 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2641 		 file, datasync);
2642 
2643 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2644 		rc = cifs_zap_mapping(inode);
2645 		if (rc) {
2646 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2647 			rc = 0; /* don't care about it in fsync */
2648 		}
2649 	}
2650 
2651 	tcon = tlink_tcon(smbfile->tlink);
2652 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2653 		server = tcon->ses->server;
2654 		if (server->ops->flush == NULL) {
2655 			rc = -ENOSYS;
2656 			goto strict_fsync_exit;
2657 		}
2658 
2659 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2660 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2661 			if (smbfile) {
2662 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2663 				cifsFileInfo_put(smbfile);
2664 			} else
2665 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2666 		} else
2667 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2668 	}
2669 
2670 strict_fsync_exit:
2671 	free_xid(xid);
2672 	return rc;
2673 }
2674 
2675 /*
2676  * Flush data on a non-strict data.
2677  */
2678 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2679 {
2680 	unsigned int xid;
2681 	int rc = 0;
2682 	struct cifs_tcon *tcon;
2683 	struct TCP_Server_Info *server;
2684 	struct cifsFileInfo *smbfile = file->private_data;
2685 	struct inode *inode = file_inode(file);
2686 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2687 
2688 	rc = file_write_and_wait_range(file, start, end);
2689 	if (rc) {
2690 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2691 		return rc;
2692 	}
2693 
2694 	xid = get_xid();
2695 
2696 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2697 		 file, datasync);
2698 
2699 	tcon = tlink_tcon(smbfile->tlink);
2700 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2701 		server = tcon->ses->server;
2702 		if (server->ops->flush == NULL) {
2703 			rc = -ENOSYS;
2704 			goto fsync_exit;
2705 		}
2706 
2707 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2708 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2709 			if (smbfile) {
2710 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2711 				cifsFileInfo_put(smbfile);
2712 			} else
2713 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2714 		} else
2715 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2716 	}
2717 
2718 fsync_exit:
2719 	free_xid(xid);
2720 	return rc;
2721 }
2722 
2723 /*
2724  * As file closes, flush all cached write data for this inode checking
2725  * for write behind errors.
2726  */
2727 int cifs_flush(struct file *file, fl_owner_t id)
2728 {
2729 	struct inode *inode = file_inode(file);
2730 	int rc = 0;
2731 
2732 	if (file->f_mode & FMODE_WRITE)
2733 		rc = filemap_write_and_wait(inode->i_mapping);
2734 
2735 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2736 	if (rc) {
2737 		/* get more nuanced writeback errors */
2738 		rc = filemap_check_wb_err(file->f_mapping, 0);
2739 		trace_cifs_flush_err(inode->i_ino, rc);
2740 	}
2741 	return rc;
2742 }
2743 
2744 static ssize_t
2745 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2746 {
2747 	struct file *file = iocb->ki_filp;
2748 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2749 	struct inode *inode = file->f_mapping->host;
2750 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2751 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2752 	ssize_t rc;
2753 
2754 	rc = netfs_start_io_write(inode);
2755 	if (rc < 0)
2756 		return rc;
2757 
2758 	/*
2759 	 * We need to hold the sem to be sure nobody modifies lock list
2760 	 * with a brlock that prevents writing.
2761 	 */
2762 	down_read(&cinode->lock_sem);
2763 
2764 	rc = generic_write_checks(iocb, from);
2765 	if (rc <= 0)
2766 		goto out;
2767 
2768 	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2769 				     server->vals->exclusive_lock_type, 0,
2770 				     NULL, CIFS_WRITE_OP))
2771 		rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2772 	else
2773 		rc = -EACCES;
2774 out:
2775 	up_read(&cinode->lock_sem);
2776 	netfs_end_io_write(inode);
2777 	if (rc > 0)
2778 		rc = generic_write_sync(iocb, rc);
2779 	return rc;
2780 }
2781 
2782 ssize_t
2783 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2784 {
2785 	struct inode *inode = file_inode(iocb->ki_filp);
2786 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2787 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2788 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2789 						iocb->ki_filp->private_data;
2790 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2791 	ssize_t written;
2792 
2793 	written = cifs_get_writer(cinode);
2794 	if (written)
2795 		return written;
2796 
2797 	if (CIFS_CACHE_WRITE(cinode)) {
2798 		if (cap_unix(tcon->ses) &&
2799 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2800 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2801 			written = netfs_file_write_iter(iocb, from);
2802 			goto out;
2803 		}
2804 		written = cifs_writev(iocb, from);
2805 		goto out;
2806 	}
2807 	/*
2808 	 * For non-oplocked files in strict cache mode we need to write the data
2809 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2810 	 * affected pages because it may cause a error with mandatory locks on
2811 	 * these pages but not on the region from pos to ppos+len-1.
2812 	 */
2813 	written = netfs_file_write_iter(iocb, from);
2814 	if (CIFS_CACHE_READ(cinode)) {
2815 		/*
2816 		 * We have read level caching and we have just sent a write
2817 		 * request to the server thus making data in the cache stale.
2818 		 * Zap the cache and set oplock/lease level to NONE to avoid
2819 		 * reading stale data from the cache. All subsequent read
2820 		 * operations will read new data from the server.
2821 		 */
2822 		cifs_zap_mapping(inode);
2823 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2824 			 inode);
2825 		cinode->oplock = 0;
2826 	}
2827 out:
2828 	cifs_put_writer(cinode);
2829 	return written;
2830 }
2831 
2832 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2833 {
2834 	ssize_t rc;
2835 	struct inode *inode = file_inode(iocb->ki_filp);
2836 
2837 	if (iocb->ki_flags & IOCB_DIRECT)
2838 		return netfs_unbuffered_read_iter(iocb, iter);
2839 
2840 	rc = cifs_revalidate_mapping(inode);
2841 	if (rc)
2842 		return rc;
2843 
2844 	return netfs_file_read_iter(iocb, iter);
2845 }
2846 
2847 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2848 {
2849 	struct inode *inode = file_inode(iocb->ki_filp);
2850 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2851 	ssize_t written;
2852 	int rc;
2853 
2854 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2855 		written = netfs_unbuffered_write_iter(iocb, from);
2856 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2857 			cifs_zap_mapping(inode);
2858 			cifs_dbg(FYI,
2859 				 "Set no oplock for inode=%p after a write operation\n",
2860 				 inode);
2861 			cinode->oplock = 0;
2862 		}
2863 		return written;
2864 	}
2865 
2866 	written = cifs_get_writer(cinode);
2867 	if (written)
2868 		return written;
2869 
2870 	written = netfs_file_write_iter(iocb, from);
2871 
2872 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2873 		rc = filemap_fdatawrite(inode->i_mapping);
2874 		if (rc)
2875 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2876 				 rc, inode);
2877 	}
2878 
2879 	cifs_put_writer(cinode);
2880 	return written;
2881 }
2882 
2883 ssize_t
2884 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2885 {
2886 	struct inode *inode = file_inode(iocb->ki_filp);
2887 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2888 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2889 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2890 						iocb->ki_filp->private_data;
2891 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2892 	int rc = -EACCES;
2893 
2894 	/*
2895 	 * In strict cache mode we need to read from the server all the time
2896 	 * if we don't have level II oplock because the server can delay mtime
2897 	 * change - so we can't make a decision about inode invalidating.
2898 	 * And we can also fail with pagereading if there are mandatory locks
2899 	 * on pages affected by this read but not on the region from pos to
2900 	 * pos+len-1.
2901 	 */
2902 	if (!CIFS_CACHE_READ(cinode))
2903 		return netfs_unbuffered_read_iter(iocb, to);
2904 
2905 	if (cap_unix(tcon->ses) &&
2906 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2907 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2908 		if (iocb->ki_flags & IOCB_DIRECT)
2909 			return netfs_unbuffered_read_iter(iocb, to);
2910 		return netfs_buffered_read_iter(iocb, to);
2911 	}
2912 
2913 	/*
2914 	 * We need to hold the sem to be sure nobody modifies lock list
2915 	 * with a brlock that prevents reading.
2916 	 */
2917 	if (iocb->ki_flags & IOCB_DIRECT) {
2918 		rc = netfs_start_io_direct(inode);
2919 		if (rc < 0)
2920 			goto out;
2921 		rc = -EACCES;
2922 		down_read(&cinode->lock_sem);
2923 		if (!cifs_find_lock_conflict(
2924 			    cfile, iocb->ki_pos, iov_iter_count(to),
2925 			    tcon->ses->server->vals->shared_lock_type,
2926 			    0, NULL, CIFS_READ_OP))
2927 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2928 		up_read(&cinode->lock_sem);
2929 		netfs_end_io_direct(inode);
2930 	} else {
2931 		rc = netfs_start_io_read(inode);
2932 		if (rc < 0)
2933 			goto out;
2934 		rc = -EACCES;
2935 		down_read(&cinode->lock_sem);
2936 		if (!cifs_find_lock_conflict(
2937 			    cfile, iocb->ki_pos, iov_iter_count(to),
2938 			    tcon->ses->server->vals->shared_lock_type,
2939 			    0, NULL, CIFS_READ_OP))
2940 			rc = filemap_read(iocb, to, 0);
2941 		up_read(&cinode->lock_sem);
2942 		netfs_end_io_read(inode);
2943 	}
2944 out:
2945 	return rc;
2946 }
2947 
2948 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2949 {
2950 	return netfs_page_mkwrite(vmf, NULL);
2951 }
2952 
2953 static const struct vm_operations_struct cifs_file_vm_ops = {
2954 	.fault = filemap_fault,
2955 	.map_pages = filemap_map_pages,
2956 	.page_mkwrite = cifs_page_mkwrite,
2957 };
2958 
2959 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2960 {
2961 	int xid, rc = 0;
2962 	struct inode *inode = file_inode(file);
2963 
2964 	xid = get_xid();
2965 
2966 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2967 		rc = cifs_zap_mapping(inode);
2968 	if (!rc)
2969 		rc = generic_file_mmap(file, vma);
2970 	if (!rc)
2971 		vma->vm_ops = &cifs_file_vm_ops;
2972 
2973 	free_xid(xid);
2974 	return rc;
2975 }
2976 
2977 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2978 {
2979 	int rc, xid;
2980 
2981 	xid = get_xid();
2982 
2983 	rc = cifs_revalidate_file(file);
2984 	if (rc)
2985 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2986 			 rc);
2987 	if (!rc)
2988 		rc = generic_file_mmap(file, vma);
2989 	if (!rc)
2990 		vma->vm_ops = &cifs_file_vm_ops;
2991 
2992 	free_xid(xid);
2993 	return rc;
2994 }
2995 
2996 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2997 {
2998 	struct cifsFileInfo *open_file;
2999 
3000 	spin_lock(&cifs_inode->open_file_lock);
3001 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3002 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3003 			spin_unlock(&cifs_inode->open_file_lock);
3004 			return 1;
3005 		}
3006 	}
3007 	spin_unlock(&cifs_inode->open_file_lock);
3008 	return 0;
3009 }
3010 
3011 /* We do not want to update the file size from server for inodes
3012    open for write - to avoid races with writepage extending
3013    the file - in the future we could consider allowing
3014    refreshing the inode only on increases in the file size
3015    but this is tricky to do without racing with writebehind
3016    page caching in the current Linux kernel design */
3017 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3018 			    bool from_readdir)
3019 {
3020 	if (!cifsInode)
3021 		return true;
3022 
3023 	if (is_inode_writable(cifsInode) ||
3024 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3025 		/* This inode is open for write at least once */
3026 		struct cifs_sb_info *cifs_sb;
3027 
3028 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3029 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3030 			/* since no page cache to corrupt on directio
3031 			we can change size safely */
3032 			return true;
3033 		}
3034 
3035 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3036 			return true;
3037 
3038 		return false;
3039 	} else
3040 		return true;
3041 }
3042 
3043 void cifs_oplock_break(struct work_struct *work)
3044 {
3045 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3046 						  oplock_break);
3047 	struct inode *inode = d_inode(cfile->dentry);
3048 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3049 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3050 	struct cifs_tcon *tcon;
3051 	struct TCP_Server_Info *server;
3052 	struct tcon_link *tlink;
3053 	int rc = 0;
3054 	bool purge_cache = false, oplock_break_cancelled;
3055 	__u64 persistent_fid, volatile_fid;
3056 	__u16 net_fid;
3057 
3058 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3059 			TASK_UNINTERRUPTIBLE);
3060 
3061 	tlink = cifs_sb_tlink(cifs_sb);
3062 	if (IS_ERR(tlink))
3063 		goto out;
3064 	tcon = tlink_tcon(tlink);
3065 	server = tcon->ses->server;
3066 
3067 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3068 				      cfile->oplock_epoch, &purge_cache);
3069 
3070 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3071 						cifs_has_mand_locks(cinode)) {
3072 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3073 			 inode);
3074 		cinode->oplock = 0;
3075 	}
3076 
3077 	if (inode && S_ISREG(inode->i_mode)) {
3078 		if (CIFS_CACHE_READ(cinode))
3079 			break_lease(inode, O_RDONLY);
3080 		else
3081 			break_lease(inode, O_WRONLY);
3082 		rc = filemap_fdatawrite(inode->i_mapping);
3083 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3084 			rc = filemap_fdatawait(inode->i_mapping);
3085 			mapping_set_error(inode->i_mapping, rc);
3086 			cifs_zap_mapping(inode);
3087 		}
3088 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3089 		if (CIFS_CACHE_WRITE(cinode))
3090 			goto oplock_break_ack;
3091 	}
3092 
3093 	rc = cifs_push_locks(cfile);
3094 	if (rc)
3095 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3096 
3097 oplock_break_ack:
3098 	/*
3099 	 * When oplock break is received and there are no active
3100 	 * file handles but cached, then schedule deferred close immediately.
3101 	 * So, new open will not use cached handle.
3102 	 */
3103 
3104 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3105 		cifs_close_deferred_file(cinode);
3106 
3107 	persistent_fid = cfile->fid.persistent_fid;
3108 	volatile_fid = cfile->fid.volatile_fid;
3109 	net_fid = cfile->fid.netfid;
3110 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3111 
3112 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3113 	/*
3114 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3115 	 * an acknowledgment to be sent when the file has already been closed.
3116 	 */
3117 	spin_lock(&cinode->open_file_lock);
3118 	/* check list empty since can race with kill_sb calling tree disconnect */
3119 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3120 		spin_unlock(&cinode->open_file_lock);
3121 		rc = server->ops->oplock_response(tcon, persistent_fid,
3122 						  volatile_fid, net_fid, cinode);
3123 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3124 	} else
3125 		spin_unlock(&cinode->open_file_lock);
3126 
3127 	cifs_put_tlink(tlink);
3128 out:
3129 	cifs_done_oplock_break(cinode);
3130 }
3131 
3132 static int cifs_swap_activate(struct swap_info_struct *sis,
3133 			      struct file *swap_file, sector_t *span)
3134 {
3135 	struct cifsFileInfo *cfile = swap_file->private_data;
3136 	struct inode *inode = swap_file->f_mapping->host;
3137 	unsigned long blocks;
3138 	long long isize;
3139 
3140 	cifs_dbg(FYI, "swap activate\n");
3141 
3142 	if (!swap_file->f_mapping->a_ops->swap_rw)
3143 		/* Cannot support swap */
3144 		return -EINVAL;
3145 
3146 	spin_lock(&inode->i_lock);
3147 	blocks = inode->i_blocks;
3148 	isize = inode->i_size;
3149 	spin_unlock(&inode->i_lock);
3150 	if (blocks*512 < isize) {
3151 		pr_warn("swap activate: swapfile has holes\n");
3152 		return -EINVAL;
3153 	}
3154 	*span = sis->pages;
3155 
3156 	pr_warn_once("Swap support over SMB3 is experimental\n");
3157 
3158 	/*
3159 	 * TODO: consider adding ACL (or documenting how) to prevent other
3160 	 * users (on this or other systems) from reading it
3161 	 */
3162 
3163 
3164 	/* TODO: add sk_set_memalloc(inet) or similar */
3165 
3166 	if (cfile)
3167 		cfile->swapfile = true;
3168 	/*
3169 	 * TODO: Since file already open, we can't open with DENY_ALL here
3170 	 * but we could add call to grab a byte range lock to prevent others
3171 	 * from reading or writing the file
3172 	 */
3173 
3174 	sis->flags |= SWP_FS_OPS;
3175 	return add_swap_extent(sis, 0, sis->max, 0);
3176 }
3177 
3178 static void cifs_swap_deactivate(struct file *file)
3179 {
3180 	struct cifsFileInfo *cfile = file->private_data;
3181 
3182 	cifs_dbg(FYI, "swap deactivate\n");
3183 
3184 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3185 
3186 	if (cfile)
3187 		cfile->swapfile = false;
3188 
3189 	/* do we need to unpin (or unlock) the file */
3190 }
3191 
3192 /**
3193  * cifs_swap_rw - SMB3 address space operation for swap I/O
3194  * @iocb: target I/O control block
3195  * @iter: I/O buffer
3196  *
3197  * Perform IO to the swap-file.  This is much like direct IO.
3198  */
3199 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3200 {
3201 	ssize_t ret;
3202 
3203 	if (iov_iter_rw(iter) == READ)
3204 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3205 	else
3206 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3207 	if (ret < 0)
3208 		return ret;
3209 	return 0;
3210 }
3211 
3212 const struct address_space_operations cifs_addr_ops = {
3213 	.read_folio	= netfs_read_folio,
3214 	.readahead	= netfs_readahead,
3215 	.writepages	= netfs_writepages,
3216 	.dirty_folio	= netfs_dirty_folio,
3217 	.release_folio	= netfs_release_folio,
3218 	.direct_IO	= noop_direct_IO,
3219 	.invalidate_folio = netfs_invalidate_folio,
3220 	.migrate_folio	= filemap_migrate_folio,
3221 	/*
3222 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3223 	 * helper if needed
3224 	 */
3225 	.swap_activate	= cifs_swap_activate,
3226 	.swap_deactivate = cifs_swap_deactivate,
3227 	.swap_rw = cifs_swap_rw,
3228 };
3229 
3230 /*
3231  * cifs_readahead requires the server to support a buffer large enough to
3232  * contain the header plus one complete page of data.  Otherwise, we need
3233  * to leave cifs_readahead out of the address space operations.
3234  */
3235 const struct address_space_operations cifs_addr_ops_smallbuf = {
3236 	.read_folio	= netfs_read_folio,
3237 	.writepages	= netfs_writepages,
3238 	.dirty_folio	= netfs_dirty_folio,
3239 	.release_folio	= netfs_release_folio,
3240 	.invalidate_folio = netfs_invalidate_folio,
3241 	.migrate_folio	= filemap_migrate_folio,
3242 };
3243