xref: /linux/fs/smb/client/file.c (revision 332d2c1d713e232e163386c35a3ba0c1b90df83f)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 #ifdef CONFIG_CIFS_SMB_DIRECT
84 	if (server->smbd_conn)
85 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
86 #endif
87 }
88 
89 /*
90  * Issue a subrequest to upload to the server.
91  */
92 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
93 {
94 	struct cifs_io_subrequest *wdata =
95 		container_of(subreq, struct cifs_io_subrequest, subreq);
96 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
97 	int rc;
98 
99 	if (cifs_forced_shutdown(sbi)) {
100 		rc = -EIO;
101 		goto fail;
102 	}
103 
104 	rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
105 	if (rc)
106 		goto fail;
107 
108 	rc = -EAGAIN;
109 	if (wdata->req->cfile->invalidHandle)
110 		goto fail;
111 
112 	wdata->server->ops->async_writev(wdata);
113 out:
114 	return;
115 
116 fail:
117 	if (rc == -EAGAIN)
118 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
119 	else
120 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
121 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
122 	cifs_write_subrequest_terminated(wdata, rc, false);
123 	goto out;
124 }
125 
126 /*
127  * Split the read up according to how many credits we can get for each piece.
128  * It's okay to sleep here if we need to wait for more credit to become
129  * available.
130  *
131  * We also choose the server and allocate an operation ID to be cleaned up
132  * later.
133  */
134 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
135 {
136 	struct netfs_io_request *rreq = subreq->rreq;
137 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
138 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
139 	struct TCP_Server_Info *server = req->server;
140 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
141 	size_t rsize = 0;
142 	int rc;
143 
144 	rdata->xid = get_xid();
145 	rdata->have_xid = true;
146 	rdata->server = server;
147 
148 	if (cifs_sb->ctx->rsize == 0)
149 		cifs_sb->ctx->rsize =
150 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
151 						     cifs_sb->ctx);
152 
153 
154 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
155 					   &rdata->credits);
156 	if (rc) {
157 		subreq->error = rc;
158 		return false;
159 	}
160 
161 	subreq->len = min_t(size_t, subreq->len, rsize);
162 #ifdef CONFIG_CIFS_SMB_DIRECT
163 	if (server->smbd_conn)
164 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
165 #endif
166 	return true;
167 }
168 
169 /*
170  * Issue a read operation on behalf of the netfs helper functions.  We're asked
171  * to make a read of a certain size at a point in the file.  We are permitted
172  * to only read a portion of that, but as long as we read something, the netfs
173  * helper will call us again so that we can issue another read.
174  */
175 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
176 {
177 	struct netfs_io_request *rreq = subreq->rreq;
178 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
179 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
180 	int rc = 0;
181 
182 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
183 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
184 		 subreq->transferred, subreq->len);
185 
186 	if (req->cfile->invalidHandle) {
187 		do {
188 			rc = cifs_reopen_file(req->cfile, true);
189 		} while (rc == -EAGAIN);
190 		if (rc)
191 			goto out;
192 	}
193 
194 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
195 
196 	rc = rdata->server->ops->async_readv(rdata);
197 out:
198 	if (rc)
199 		netfs_subreq_terminated(subreq, rc, false);
200 }
201 
202 /*
203  * Writeback calls this when it finds a folio that needs uploading.  This isn't
204  * called if writeback only has copy-to-cache to deal with.
205  */
206 static void cifs_begin_writeback(struct netfs_io_request *wreq)
207 {
208 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
209 	int ret;
210 
211 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
212 	if (ret) {
213 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
214 		return;
215 	}
216 
217 	wreq->io_streams[0].avail = true;
218 }
219 
220 /*
221  * Initialise a request.
222  */
223 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
224 {
225 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
226 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
227 	struct cifsFileInfo *open_file = NULL;
228 
229 	rreq->rsize = cifs_sb->ctx->rsize;
230 	rreq->wsize = cifs_sb->ctx->wsize;
231 	req->pid = current->tgid; // Ummm...  This may be a workqueue
232 
233 	if (file) {
234 		open_file = file->private_data;
235 		rreq->netfs_priv = file->private_data;
236 		req->cfile = cifsFileInfo_get(open_file);
237 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
238 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
239 			req->pid = req->cfile->pid;
240 	} else if (rreq->origin != NETFS_WRITEBACK) {
241 		WARN_ON_ONCE(1);
242 		return -EIO;
243 	}
244 
245 	return 0;
246 }
247 
248 /*
249  * Completion of a request operation.
250  */
251 static void cifs_rreq_done(struct netfs_io_request *rreq)
252 {
253 	struct timespec64 atime, mtime;
254 	struct inode *inode = rreq->inode;
255 
256 	/* we do not want atime to be less than mtime, it broke some apps */
257 	atime = inode_set_atime_to_ts(inode, current_time(inode));
258 	mtime = inode_get_mtime(inode);
259 	if (timespec64_compare(&atime, &mtime))
260 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
261 }
262 
263 static void cifs_post_modify(struct inode *inode)
264 {
265 	/* Indication to update ctime and mtime as close is deferred */
266 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
267 }
268 
269 static void cifs_free_request(struct netfs_io_request *rreq)
270 {
271 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
272 
273 	if (req->cfile)
274 		cifsFileInfo_put(req->cfile);
275 }
276 
277 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
278 {
279 	struct cifs_io_subrequest *rdata =
280 		container_of(subreq, struct cifs_io_subrequest, subreq);
281 	int rc = subreq->error;
282 
283 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
284 #ifdef CONFIG_CIFS_SMB_DIRECT
285 		if (rdata->mr) {
286 			smbd_deregister_mr(rdata->mr);
287 			rdata->mr = NULL;
288 		}
289 #endif
290 	}
291 
292 	add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
293 	if (rdata->have_xid)
294 		free_xid(rdata->xid);
295 }
296 
297 const struct netfs_request_ops cifs_req_ops = {
298 	.request_pool		= &cifs_io_request_pool,
299 	.subrequest_pool	= &cifs_io_subrequest_pool,
300 	.init_request		= cifs_init_request,
301 	.free_request		= cifs_free_request,
302 	.free_subrequest	= cifs_free_subrequest,
303 	.clamp_length		= cifs_clamp_length,
304 	.issue_read		= cifs_req_issue_read,
305 	.done			= cifs_rreq_done,
306 	.post_modify		= cifs_post_modify,
307 	.begin_writeback	= cifs_begin_writeback,
308 	.prepare_write		= cifs_prepare_write,
309 	.issue_write		= cifs_issue_write,
310 };
311 
312 /*
313  * Mark as invalid, all open files on tree connections since they
314  * were closed when session to server was lost.
315  */
316 void
317 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
318 {
319 	struct cifsFileInfo *open_file = NULL;
320 	struct list_head *tmp;
321 	struct list_head *tmp1;
322 
323 	/* only send once per connect */
324 	spin_lock(&tcon->tc_lock);
325 	if (tcon->need_reconnect)
326 		tcon->status = TID_NEED_RECON;
327 
328 	if (tcon->status != TID_NEED_RECON) {
329 		spin_unlock(&tcon->tc_lock);
330 		return;
331 	}
332 	tcon->status = TID_IN_FILES_INVALIDATE;
333 	spin_unlock(&tcon->tc_lock);
334 
335 	/* list all files open on tree connection and mark them invalid */
336 	spin_lock(&tcon->open_file_lock);
337 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
338 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
339 		open_file->invalidHandle = true;
340 		open_file->oplock_break_cancelled = true;
341 	}
342 	spin_unlock(&tcon->open_file_lock);
343 
344 	invalidate_all_cached_dirs(tcon);
345 	spin_lock(&tcon->tc_lock);
346 	if (tcon->status == TID_IN_FILES_INVALIDATE)
347 		tcon->status = TID_NEED_TCON;
348 	spin_unlock(&tcon->tc_lock);
349 
350 	/*
351 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
352 	 * to this tcon.
353 	 */
354 }
355 
356 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
357 {
358 	if ((flags & O_ACCMODE) == O_RDONLY)
359 		return GENERIC_READ;
360 	else if ((flags & O_ACCMODE) == O_WRONLY)
361 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
362 	else if ((flags & O_ACCMODE) == O_RDWR) {
363 		/* GENERIC_ALL is too much permission to request
364 		   can cause unnecessary access denied on create */
365 		/* return GENERIC_ALL; */
366 		return (GENERIC_READ | GENERIC_WRITE);
367 	}
368 
369 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
370 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
371 		FILE_READ_DATA);
372 }
373 
374 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
375 static u32 cifs_posix_convert_flags(unsigned int flags)
376 {
377 	u32 posix_flags = 0;
378 
379 	if ((flags & O_ACCMODE) == O_RDONLY)
380 		posix_flags = SMB_O_RDONLY;
381 	else if ((flags & O_ACCMODE) == O_WRONLY)
382 		posix_flags = SMB_O_WRONLY;
383 	else if ((flags & O_ACCMODE) == O_RDWR)
384 		posix_flags = SMB_O_RDWR;
385 
386 	if (flags & O_CREAT) {
387 		posix_flags |= SMB_O_CREAT;
388 		if (flags & O_EXCL)
389 			posix_flags |= SMB_O_EXCL;
390 	} else if (flags & O_EXCL)
391 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
392 			 current->comm, current->tgid);
393 
394 	if (flags & O_TRUNC)
395 		posix_flags |= SMB_O_TRUNC;
396 	/* be safe and imply O_SYNC for O_DSYNC */
397 	if (flags & O_DSYNC)
398 		posix_flags |= SMB_O_SYNC;
399 	if (flags & O_DIRECTORY)
400 		posix_flags |= SMB_O_DIRECTORY;
401 	if (flags & O_NOFOLLOW)
402 		posix_flags |= SMB_O_NOFOLLOW;
403 	if (flags & O_DIRECT)
404 		posix_flags |= SMB_O_DIRECT;
405 
406 	return posix_flags;
407 }
408 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
409 
410 static inline int cifs_get_disposition(unsigned int flags)
411 {
412 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
413 		return FILE_CREATE;
414 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
415 		return FILE_OVERWRITE_IF;
416 	else if ((flags & O_CREAT) == O_CREAT)
417 		return FILE_OPEN_IF;
418 	else if ((flags & O_TRUNC) == O_TRUNC)
419 		return FILE_OVERWRITE;
420 	else
421 		return FILE_OPEN;
422 }
423 
424 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
425 int cifs_posix_open(const char *full_path, struct inode **pinode,
426 			struct super_block *sb, int mode, unsigned int f_flags,
427 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
428 {
429 	int rc;
430 	FILE_UNIX_BASIC_INFO *presp_data;
431 	__u32 posix_flags = 0;
432 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
433 	struct cifs_fattr fattr;
434 	struct tcon_link *tlink;
435 	struct cifs_tcon *tcon;
436 
437 	cifs_dbg(FYI, "posix open %s\n", full_path);
438 
439 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
440 	if (presp_data == NULL)
441 		return -ENOMEM;
442 
443 	tlink = cifs_sb_tlink(cifs_sb);
444 	if (IS_ERR(tlink)) {
445 		rc = PTR_ERR(tlink);
446 		goto posix_open_ret;
447 	}
448 
449 	tcon = tlink_tcon(tlink);
450 	mode &= ~current_umask();
451 
452 	posix_flags = cifs_posix_convert_flags(f_flags);
453 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
454 			     poplock, full_path, cifs_sb->local_nls,
455 			     cifs_remap(cifs_sb));
456 	cifs_put_tlink(tlink);
457 
458 	if (rc)
459 		goto posix_open_ret;
460 
461 	if (presp_data->Type == cpu_to_le32(-1))
462 		goto posix_open_ret; /* open ok, caller does qpathinfo */
463 
464 	if (!pinode)
465 		goto posix_open_ret; /* caller does not need info */
466 
467 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
468 
469 	/* get new inode and set it up */
470 	if (*pinode == NULL) {
471 		cifs_fill_uniqueid(sb, &fattr);
472 		*pinode = cifs_iget(sb, &fattr);
473 		if (!*pinode) {
474 			rc = -ENOMEM;
475 			goto posix_open_ret;
476 		}
477 	} else {
478 		cifs_revalidate_mapping(*pinode);
479 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
480 	}
481 
482 posix_open_ret:
483 	kfree(presp_data);
484 	return rc;
485 }
486 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
487 
488 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
489 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
490 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
491 {
492 	int rc;
493 	int desired_access;
494 	int disposition;
495 	int create_options = CREATE_NOT_DIR;
496 	struct TCP_Server_Info *server = tcon->ses->server;
497 	struct cifs_open_parms oparms;
498 	int rdwr_for_fscache = 0;
499 
500 	if (!server->ops->open)
501 		return -ENOSYS;
502 
503 	/* If we're caching, we need to be able to fill in around partial writes. */
504 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
505 		rdwr_for_fscache = 1;
506 
507 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
508 
509 /*********************************************************************
510  *  open flag mapping table:
511  *
512  *	POSIX Flag            CIFS Disposition
513  *	----------            ----------------
514  *	O_CREAT               FILE_OPEN_IF
515  *	O_CREAT | O_EXCL      FILE_CREATE
516  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
517  *	O_TRUNC               FILE_OVERWRITE
518  *	none of the above     FILE_OPEN
519  *
520  *	Note that there is not a direct match between disposition
521  *	FILE_SUPERSEDE (ie create whether or not file exists although
522  *	O_CREAT | O_TRUNC is similar but truncates the existing
523  *	file rather than creating a new file as FILE_SUPERSEDE does
524  *	(which uses the attributes / metadata passed in on open call)
525  *?
526  *?  O_SYNC is a reasonable match to CIFS writethrough flag
527  *?  and the read write flags match reasonably.  O_LARGEFILE
528  *?  is irrelevant because largefile support is always used
529  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
530  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
531  *********************************************************************/
532 
533 	disposition = cifs_get_disposition(f_flags);
534 
535 	/* BB pass O_SYNC flag through on file attributes .. BB */
536 
537 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
538 	if (f_flags & O_SYNC)
539 		create_options |= CREATE_WRITE_THROUGH;
540 
541 	if (f_flags & O_DIRECT)
542 		create_options |= CREATE_NO_BUFFER;
543 
544 retry_open:
545 	oparms = (struct cifs_open_parms) {
546 		.tcon = tcon,
547 		.cifs_sb = cifs_sb,
548 		.desired_access = desired_access,
549 		.create_options = cifs_create_options(cifs_sb, create_options),
550 		.disposition = disposition,
551 		.path = full_path,
552 		.fid = fid,
553 	};
554 
555 	rc = server->ops->open(xid, &oparms, oplock, buf);
556 	if (rc) {
557 		if (rc == -EACCES && rdwr_for_fscache == 1) {
558 			desired_access = cifs_convert_flags(f_flags, 0);
559 			rdwr_for_fscache = 2;
560 			goto retry_open;
561 		}
562 		return rc;
563 	}
564 	if (rdwr_for_fscache == 2)
565 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
566 
567 	/* TODO: Add support for calling posix query info but with passing in fid */
568 	if (tcon->unix_ext)
569 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
570 					      xid);
571 	else
572 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
573 					 xid, fid);
574 
575 	if (rc) {
576 		server->ops->close(xid, tcon, fid);
577 		if (rc == -ESTALE)
578 			rc = -EOPENSTALE;
579 	}
580 
581 	return rc;
582 }
583 
584 static bool
585 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
586 {
587 	struct cifs_fid_locks *cur;
588 	bool has_locks = false;
589 
590 	down_read(&cinode->lock_sem);
591 	list_for_each_entry(cur, &cinode->llist, llist) {
592 		if (!list_empty(&cur->locks)) {
593 			has_locks = true;
594 			break;
595 		}
596 	}
597 	up_read(&cinode->lock_sem);
598 	return has_locks;
599 }
600 
601 void
602 cifs_down_write(struct rw_semaphore *sem)
603 {
604 	while (!down_write_trylock(sem))
605 		msleep(10);
606 }
607 
608 static void cifsFileInfo_put_work(struct work_struct *work);
609 void serverclose_work(struct work_struct *work);
610 
611 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
612 				       struct tcon_link *tlink, __u32 oplock,
613 				       const char *symlink_target)
614 {
615 	struct dentry *dentry = file_dentry(file);
616 	struct inode *inode = d_inode(dentry);
617 	struct cifsInodeInfo *cinode = CIFS_I(inode);
618 	struct cifsFileInfo *cfile;
619 	struct cifs_fid_locks *fdlocks;
620 	struct cifs_tcon *tcon = tlink_tcon(tlink);
621 	struct TCP_Server_Info *server = tcon->ses->server;
622 
623 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
624 	if (cfile == NULL)
625 		return cfile;
626 
627 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
628 	if (!fdlocks) {
629 		kfree(cfile);
630 		return NULL;
631 	}
632 
633 	if (symlink_target) {
634 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
635 		if (!cfile->symlink_target) {
636 			kfree(fdlocks);
637 			kfree(cfile);
638 			return NULL;
639 		}
640 	}
641 
642 	INIT_LIST_HEAD(&fdlocks->locks);
643 	fdlocks->cfile = cfile;
644 	cfile->llist = fdlocks;
645 
646 	cfile->count = 1;
647 	cfile->pid = current->tgid;
648 	cfile->uid = current_fsuid();
649 	cfile->dentry = dget(dentry);
650 	cfile->f_flags = file->f_flags;
651 	cfile->invalidHandle = false;
652 	cfile->deferred_close_scheduled = false;
653 	cfile->tlink = cifs_get_tlink(tlink);
654 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
655 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
656 	INIT_WORK(&cfile->serverclose, serverclose_work);
657 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
658 	mutex_init(&cfile->fh_mutex);
659 	spin_lock_init(&cfile->file_info_lock);
660 
661 	cifs_sb_active(inode->i_sb);
662 
663 	/*
664 	 * If the server returned a read oplock and we have mandatory brlocks,
665 	 * set oplock level to None.
666 	 */
667 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
668 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
669 		oplock = 0;
670 	}
671 
672 	cifs_down_write(&cinode->lock_sem);
673 	list_add(&fdlocks->llist, &cinode->llist);
674 	up_write(&cinode->lock_sem);
675 
676 	spin_lock(&tcon->open_file_lock);
677 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
678 		oplock = fid->pending_open->oplock;
679 	list_del(&fid->pending_open->olist);
680 
681 	fid->purge_cache = false;
682 	server->ops->set_fid(cfile, fid, oplock);
683 
684 	list_add(&cfile->tlist, &tcon->openFileList);
685 	atomic_inc(&tcon->num_local_opens);
686 
687 	/* if readable file instance put first in list*/
688 	spin_lock(&cinode->open_file_lock);
689 	if (file->f_mode & FMODE_READ)
690 		list_add(&cfile->flist, &cinode->openFileList);
691 	else
692 		list_add_tail(&cfile->flist, &cinode->openFileList);
693 	spin_unlock(&cinode->open_file_lock);
694 	spin_unlock(&tcon->open_file_lock);
695 
696 	if (fid->purge_cache)
697 		cifs_zap_mapping(inode);
698 
699 	file->private_data = cfile;
700 	return cfile;
701 }
702 
703 struct cifsFileInfo *
704 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
705 {
706 	spin_lock(&cifs_file->file_info_lock);
707 	cifsFileInfo_get_locked(cifs_file);
708 	spin_unlock(&cifs_file->file_info_lock);
709 	return cifs_file;
710 }
711 
712 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
713 {
714 	struct inode *inode = d_inode(cifs_file->dentry);
715 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
716 	struct cifsLockInfo *li, *tmp;
717 	struct super_block *sb = inode->i_sb;
718 
719 	/*
720 	 * Delete any outstanding lock records. We'll lose them when the file
721 	 * is closed anyway.
722 	 */
723 	cifs_down_write(&cifsi->lock_sem);
724 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
725 		list_del(&li->llist);
726 		cifs_del_lock_waiters(li);
727 		kfree(li);
728 	}
729 	list_del(&cifs_file->llist->llist);
730 	kfree(cifs_file->llist);
731 	up_write(&cifsi->lock_sem);
732 
733 	cifs_put_tlink(cifs_file->tlink);
734 	dput(cifs_file->dentry);
735 	cifs_sb_deactive(sb);
736 	kfree(cifs_file->symlink_target);
737 	kfree(cifs_file);
738 }
739 
740 static void cifsFileInfo_put_work(struct work_struct *work)
741 {
742 	struct cifsFileInfo *cifs_file = container_of(work,
743 			struct cifsFileInfo, put);
744 
745 	cifsFileInfo_put_final(cifs_file);
746 }
747 
748 void serverclose_work(struct work_struct *work)
749 {
750 	struct cifsFileInfo *cifs_file = container_of(work,
751 			struct cifsFileInfo, serverclose);
752 
753 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
754 
755 	struct TCP_Server_Info *server = tcon->ses->server;
756 	int rc = 0;
757 	int retries = 0;
758 	int MAX_RETRIES = 4;
759 
760 	do {
761 		if (server->ops->close_getattr)
762 			rc = server->ops->close_getattr(0, tcon, cifs_file);
763 		else if (server->ops->close)
764 			rc = server->ops->close(0, tcon, &cifs_file->fid);
765 
766 		if (rc == -EBUSY || rc == -EAGAIN) {
767 			retries++;
768 			msleep(250);
769 		}
770 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
771 	);
772 
773 	if (retries == MAX_RETRIES)
774 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
775 
776 	if (cifs_file->offload)
777 		queue_work(fileinfo_put_wq, &cifs_file->put);
778 	else
779 		cifsFileInfo_put_final(cifs_file);
780 }
781 
782 /**
783  * cifsFileInfo_put - release a reference of file priv data
784  *
785  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
786  *
787  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
788  */
789 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
790 {
791 	_cifsFileInfo_put(cifs_file, true, true);
792 }
793 
794 /**
795  * _cifsFileInfo_put - release a reference of file priv data
796  *
797  * This may involve closing the filehandle @cifs_file out on the
798  * server. Must be called without holding tcon->open_file_lock,
799  * cinode->open_file_lock and cifs_file->file_info_lock.
800  *
801  * If @wait_for_oplock_handler is true and we are releasing the last
802  * reference, wait for any running oplock break handler of the file
803  * and cancel any pending one.
804  *
805  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
806  * @wait_oplock_handler: must be false if called from oplock_break_handler
807  * @offload:	not offloaded on close and oplock breaks
808  *
809  */
810 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
811 		       bool wait_oplock_handler, bool offload)
812 {
813 	struct inode *inode = d_inode(cifs_file->dentry);
814 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
815 	struct TCP_Server_Info *server = tcon->ses->server;
816 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
817 	struct super_block *sb = inode->i_sb;
818 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
819 	struct cifs_fid fid = {};
820 	struct cifs_pending_open open;
821 	bool oplock_break_cancelled;
822 	bool serverclose_offloaded = false;
823 
824 	spin_lock(&tcon->open_file_lock);
825 	spin_lock(&cifsi->open_file_lock);
826 	spin_lock(&cifs_file->file_info_lock);
827 
828 	cifs_file->offload = offload;
829 	if (--cifs_file->count > 0) {
830 		spin_unlock(&cifs_file->file_info_lock);
831 		spin_unlock(&cifsi->open_file_lock);
832 		spin_unlock(&tcon->open_file_lock);
833 		return;
834 	}
835 	spin_unlock(&cifs_file->file_info_lock);
836 
837 	if (server->ops->get_lease_key)
838 		server->ops->get_lease_key(inode, &fid);
839 
840 	/* store open in pending opens to make sure we don't miss lease break */
841 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
842 
843 	/* remove it from the lists */
844 	list_del(&cifs_file->flist);
845 	list_del(&cifs_file->tlist);
846 	atomic_dec(&tcon->num_local_opens);
847 
848 	if (list_empty(&cifsi->openFileList)) {
849 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
850 			 d_inode(cifs_file->dentry));
851 		/*
852 		 * In strict cache mode we need invalidate mapping on the last
853 		 * close  because it may cause a error when we open this file
854 		 * again and get at least level II oplock.
855 		 */
856 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
857 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
858 		cifs_set_oplock_level(cifsi, 0);
859 	}
860 
861 	spin_unlock(&cifsi->open_file_lock);
862 	spin_unlock(&tcon->open_file_lock);
863 
864 	oplock_break_cancelled = wait_oplock_handler ?
865 		cancel_work_sync(&cifs_file->oplock_break) : false;
866 
867 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
868 		struct TCP_Server_Info *server = tcon->ses->server;
869 		unsigned int xid;
870 		int rc = 0;
871 
872 		xid = get_xid();
873 		if (server->ops->close_getattr)
874 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
875 		else if (server->ops->close)
876 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
877 		_free_xid(xid);
878 
879 		if (rc == -EBUSY || rc == -EAGAIN) {
880 			// Server close failed, hence offloading it as an async op
881 			queue_work(serverclose_wq, &cifs_file->serverclose);
882 			serverclose_offloaded = true;
883 		}
884 	}
885 
886 	if (oplock_break_cancelled)
887 		cifs_done_oplock_break(cifsi);
888 
889 	cifs_del_pending_open(&open);
890 
891 	// if serverclose has been offloaded to wq (on failure), it will
892 	// handle offloading put as well. If serverclose not offloaded,
893 	// we need to handle offloading put here.
894 	if (!serverclose_offloaded) {
895 		if (offload)
896 			queue_work(fileinfo_put_wq, &cifs_file->put);
897 		else
898 			cifsFileInfo_put_final(cifs_file);
899 	}
900 }
901 
902 int cifs_open(struct inode *inode, struct file *file)
903 
904 {
905 	int rc = -EACCES;
906 	unsigned int xid;
907 	__u32 oplock;
908 	struct cifs_sb_info *cifs_sb;
909 	struct TCP_Server_Info *server;
910 	struct cifs_tcon *tcon;
911 	struct tcon_link *tlink;
912 	struct cifsFileInfo *cfile = NULL;
913 	void *page;
914 	const char *full_path;
915 	bool posix_open_ok = false;
916 	struct cifs_fid fid = {};
917 	struct cifs_pending_open open;
918 	struct cifs_open_info_data data = {};
919 
920 	xid = get_xid();
921 
922 	cifs_sb = CIFS_SB(inode->i_sb);
923 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
924 		free_xid(xid);
925 		return -EIO;
926 	}
927 
928 	tlink = cifs_sb_tlink(cifs_sb);
929 	if (IS_ERR(tlink)) {
930 		free_xid(xid);
931 		return PTR_ERR(tlink);
932 	}
933 	tcon = tlink_tcon(tlink);
934 	server = tcon->ses->server;
935 
936 	page = alloc_dentry_path();
937 	full_path = build_path_from_dentry(file_dentry(file), page);
938 	if (IS_ERR(full_path)) {
939 		rc = PTR_ERR(full_path);
940 		goto out;
941 	}
942 
943 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
944 		 inode, file->f_flags, full_path);
945 
946 	if (file->f_flags & O_DIRECT &&
947 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
948 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
949 			file->f_op = &cifs_file_direct_nobrl_ops;
950 		else
951 			file->f_op = &cifs_file_direct_ops;
952 	}
953 
954 	/* Get the cached handle as SMB2 close is deferred */
955 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
956 	if (rc == 0) {
957 		if (file->f_flags == cfile->f_flags) {
958 			file->private_data = cfile;
959 			spin_lock(&CIFS_I(inode)->deferred_lock);
960 			cifs_del_deferred_close(cfile);
961 			spin_unlock(&CIFS_I(inode)->deferred_lock);
962 			goto use_cache;
963 		} else {
964 			_cifsFileInfo_put(cfile, true, false);
965 		}
966 	}
967 
968 	if (server->oplocks)
969 		oplock = REQ_OPLOCK;
970 	else
971 		oplock = 0;
972 
973 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
974 	if (!tcon->broken_posix_open && tcon->unix_ext &&
975 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
976 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
977 		/* can not refresh inode info since size could be stale */
978 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
979 				cifs_sb->ctx->file_mode /* ignored */,
980 				file->f_flags, &oplock, &fid.netfid, xid);
981 		if (rc == 0) {
982 			cifs_dbg(FYI, "posix open succeeded\n");
983 			posix_open_ok = true;
984 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
985 			if (tcon->ses->serverNOS)
986 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
987 					 tcon->ses->ip_addr,
988 					 tcon->ses->serverNOS);
989 			tcon->broken_posix_open = true;
990 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
991 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
992 			goto out;
993 		/*
994 		 * Else fallthrough to retry open the old way on network i/o
995 		 * or DFS errors.
996 		 */
997 	}
998 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
999 
1000 	if (server->ops->get_lease_key)
1001 		server->ops->get_lease_key(inode, &fid);
1002 
1003 	cifs_add_pending_open(&fid, tlink, &open);
1004 
1005 	if (!posix_open_ok) {
1006 		if (server->ops->get_lease_key)
1007 			server->ops->get_lease_key(inode, &fid);
1008 
1009 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1010 				  xid, &data);
1011 		if (rc) {
1012 			cifs_del_pending_open(&open);
1013 			goto out;
1014 		}
1015 	}
1016 
1017 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1018 	if (cfile == NULL) {
1019 		if (server->ops->close)
1020 			server->ops->close(xid, tcon, &fid);
1021 		cifs_del_pending_open(&open);
1022 		rc = -ENOMEM;
1023 		goto out;
1024 	}
1025 
1026 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1027 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1028 		/*
1029 		 * Time to set mode which we can not set earlier due to
1030 		 * problems creating new read-only files.
1031 		 */
1032 		struct cifs_unix_set_info_args args = {
1033 			.mode	= inode->i_mode,
1034 			.uid	= INVALID_UID, /* no change */
1035 			.gid	= INVALID_GID, /* no change */
1036 			.ctime	= NO_CHANGE_64,
1037 			.atime	= NO_CHANGE_64,
1038 			.mtime	= NO_CHANGE_64,
1039 			.device	= 0,
1040 		};
1041 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1042 				       cfile->pid);
1043 	}
1044 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1045 
1046 use_cache:
1047 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1048 			   file->f_mode & FMODE_WRITE);
1049 	if (!(file->f_flags & O_DIRECT))
1050 		goto out;
1051 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1052 		goto out;
1053 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1054 
1055 out:
1056 	free_dentry_path(page);
1057 	free_xid(xid);
1058 	cifs_put_tlink(tlink);
1059 	cifs_free_open_info(&data);
1060 	return rc;
1061 }
1062 
1063 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1064 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1065 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1066 
1067 /*
1068  * Try to reacquire byte range locks that were released when session
1069  * to server was lost.
1070  */
1071 static int
1072 cifs_relock_file(struct cifsFileInfo *cfile)
1073 {
1074 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1075 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1076 	int rc = 0;
1077 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1078 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1079 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1080 
1081 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1082 	if (cinode->can_cache_brlcks) {
1083 		/* can cache locks - no need to relock */
1084 		up_read(&cinode->lock_sem);
1085 		return rc;
1086 	}
1087 
1088 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1089 	if (cap_unix(tcon->ses) &&
1090 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1091 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1092 		rc = cifs_push_posix_locks(cfile);
1093 	else
1094 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1095 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1096 
1097 	up_read(&cinode->lock_sem);
1098 	return rc;
1099 }
1100 
1101 static int
1102 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1103 {
1104 	int rc = -EACCES;
1105 	unsigned int xid;
1106 	__u32 oplock;
1107 	struct cifs_sb_info *cifs_sb;
1108 	struct cifs_tcon *tcon;
1109 	struct TCP_Server_Info *server;
1110 	struct cifsInodeInfo *cinode;
1111 	struct inode *inode;
1112 	void *page;
1113 	const char *full_path;
1114 	int desired_access;
1115 	int disposition = FILE_OPEN;
1116 	int create_options = CREATE_NOT_DIR;
1117 	struct cifs_open_parms oparms;
1118 	int rdwr_for_fscache = 0;
1119 
1120 	xid = get_xid();
1121 	mutex_lock(&cfile->fh_mutex);
1122 	if (!cfile->invalidHandle) {
1123 		mutex_unlock(&cfile->fh_mutex);
1124 		free_xid(xid);
1125 		return 0;
1126 	}
1127 
1128 	inode = d_inode(cfile->dentry);
1129 	cifs_sb = CIFS_SB(inode->i_sb);
1130 	tcon = tlink_tcon(cfile->tlink);
1131 	server = tcon->ses->server;
1132 
1133 	/*
1134 	 * Can not grab rename sem here because various ops, including those
1135 	 * that already have the rename sem can end up causing writepage to get
1136 	 * called and if the server was down that means we end up here, and we
1137 	 * can never tell if the caller already has the rename_sem.
1138 	 */
1139 	page = alloc_dentry_path();
1140 	full_path = build_path_from_dentry(cfile->dentry, page);
1141 	if (IS_ERR(full_path)) {
1142 		mutex_unlock(&cfile->fh_mutex);
1143 		free_dentry_path(page);
1144 		free_xid(xid);
1145 		return PTR_ERR(full_path);
1146 	}
1147 
1148 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1149 		 inode, cfile->f_flags, full_path);
1150 
1151 	if (tcon->ses->server->oplocks)
1152 		oplock = REQ_OPLOCK;
1153 	else
1154 		oplock = 0;
1155 
1156 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1157 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1158 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1159 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1160 		/*
1161 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1162 		 * original open. Must mask them off for a reopen.
1163 		 */
1164 		unsigned int oflags = cfile->f_flags &
1165 						~(O_CREAT | O_EXCL | O_TRUNC);
1166 
1167 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1168 				     cifs_sb->ctx->file_mode /* ignored */,
1169 				     oflags, &oplock, &cfile->fid.netfid, xid);
1170 		if (rc == 0) {
1171 			cifs_dbg(FYI, "posix reopen succeeded\n");
1172 			oparms.reconnect = true;
1173 			goto reopen_success;
1174 		}
1175 		/*
1176 		 * fallthrough to retry open the old way on errors, especially
1177 		 * in the reconnect path it is important to retry hard
1178 		 */
1179 	}
1180 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1181 
1182 	/* If we're caching, we need to be able to fill in around partial writes. */
1183 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1184 		rdwr_for_fscache = 1;
1185 
1186 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1187 
1188 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1189 	if (cfile->f_flags & O_SYNC)
1190 		create_options |= CREATE_WRITE_THROUGH;
1191 
1192 	if (cfile->f_flags & O_DIRECT)
1193 		create_options |= CREATE_NO_BUFFER;
1194 
1195 	if (server->ops->get_lease_key)
1196 		server->ops->get_lease_key(inode, &cfile->fid);
1197 
1198 retry_open:
1199 	oparms = (struct cifs_open_parms) {
1200 		.tcon = tcon,
1201 		.cifs_sb = cifs_sb,
1202 		.desired_access = desired_access,
1203 		.create_options = cifs_create_options(cifs_sb, create_options),
1204 		.disposition = disposition,
1205 		.path = full_path,
1206 		.fid = &cfile->fid,
1207 		.reconnect = true,
1208 	};
1209 
1210 	/*
1211 	 * Can not refresh inode by passing in file_info buf to be returned by
1212 	 * ops->open and then calling get_inode_info with returned buf since
1213 	 * file might have write behind data that needs to be flushed and server
1214 	 * version of file size can be stale. If we knew for sure that inode was
1215 	 * not dirty locally we could do this.
1216 	 */
1217 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1218 	if (rc == -ENOENT && oparms.reconnect == false) {
1219 		/* durable handle timeout is expired - open the file again */
1220 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1221 		/* indicate that we need to relock the file */
1222 		oparms.reconnect = true;
1223 	}
1224 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1225 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1226 		rdwr_for_fscache = 2;
1227 		goto retry_open;
1228 	}
1229 
1230 	if (rc) {
1231 		mutex_unlock(&cfile->fh_mutex);
1232 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1233 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1234 		goto reopen_error_exit;
1235 	}
1236 
1237 	if (rdwr_for_fscache == 2)
1238 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1239 
1240 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1241 reopen_success:
1242 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1243 	cfile->invalidHandle = false;
1244 	mutex_unlock(&cfile->fh_mutex);
1245 	cinode = CIFS_I(inode);
1246 
1247 	if (can_flush) {
1248 		rc = filemap_write_and_wait(inode->i_mapping);
1249 		if (!is_interrupt_error(rc))
1250 			mapping_set_error(inode->i_mapping, rc);
1251 
1252 		if (tcon->posix_extensions) {
1253 			rc = smb311_posix_get_inode_info(&inode, full_path,
1254 							 NULL, inode->i_sb, xid);
1255 		} else if (tcon->unix_ext) {
1256 			rc = cifs_get_inode_info_unix(&inode, full_path,
1257 						      inode->i_sb, xid);
1258 		} else {
1259 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1260 						 inode->i_sb, xid, NULL);
1261 		}
1262 	}
1263 	/*
1264 	 * Else we are writing out data to server already and could deadlock if
1265 	 * we tried to flush data, and since we do not know if we have data that
1266 	 * would invalidate the current end of file on the server we can not go
1267 	 * to the server to get the new inode info.
1268 	 */
1269 
1270 	/*
1271 	 * If the server returned a read oplock and we have mandatory brlocks,
1272 	 * set oplock level to None.
1273 	 */
1274 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1275 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1276 		oplock = 0;
1277 	}
1278 
1279 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1280 	if (oparms.reconnect)
1281 		cifs_relock_file(cfile);
1282 
1283 reopen_error_exit:
1284 	free_dentry_path(page);
1285 	free_xid(xid);
1286 	return rc;
1287 }
1288 
1289 void smb2_deferred_work_close(struct work_struct *work)
1290 {
1291 	struct cifsFileInfo *cfile = container_of(work,
1292 			struct cifsFileInfo, deferred.work);
1293 
1294 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1295 	cifs_del_deferred_close(cfile);
1296 	cfile->deferred_close_scheduled = false;
1297 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1298 	_cifsFileInfo_put(cfile, true, false);
1299 }
1300 
1301 static bool
1302 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1303 {
1304 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1305 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1306 
1307 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1308 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1309 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1310 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1311 
1312 }
1313 
1314 int cifs_close(struct inode *inode, struct file *file)
1315 {
1316 	struct cifsFileInfo *cfile;
1317 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1318 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1319 	struct cifs_deferred_close *dclose;
1320 
1321 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1322 
1323 	if (file->private_data != NULL) {
1324 		cfile = file->private_data;
1325 		file->private_data = NULL;
1326 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1327 		if ((cfile->status_file_deleted == false) &&
1328 		    (smb2_can_defer_close(inode, dclose))) {
1329 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1330 				inode_set_mtime_to_ts(inode,
1331 						      inode_set_ctime_current(inode));
1332 			}
1333 			spin_lock(&cinode->deferred_lock);
1334 			cifs_add_deferred_close(cfile, dclose);
1335 			if (cfile->deferred_close_scheduled &&
1336 			    delayed_work_pending(&cfile->deferred)) {
1337 				/*
1338 				 * If there is no pending work, mod_delayed_work queues new work.
1339 				 * So, Increase the ref count to avoid use-after-free.
1340 				 */
1341 				if (!mod_delayed_work(deferredclose_wq,
1342 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1343 					cifsFileInfo_get(cfile);
1344 			} else {
1345 				/* Deferred close for files */
1346 				queue_delayed_work(deferredclose_wq,
1347 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1348 				cfile->deferred_close_scheduled = true;
1349 				spin_unlock(&cinode->deferred_lock);
1350 				return 0;
1351 			}
1352 			spin_unlock(&cinode->deferred_lock);
1353 			_cifsFileInfo_put(cfile, true, false);
1354 		} else {
1355 			_cifsFileInfo_put(cfile, true, false);
1356 			kfree(dclose);
1357 		}
1358 	}
1359 
1360 	/* return code from the ->release op is always ignored */
1361 	return 0;
1362 }
1363 
1364 void
1365 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1366 {
1367 	struct cifsFileInfo *open_file, *tmp;
1368 	struct list_head tmp_list;
1369 
1370 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1371 		return;
1372 
1373 	tcon->need_reopen_files = false;
1374 
1375 	cifs_dbg(FYI, "Reopen persistent handles\n");
1376 	INIT_LIST_HEAD(&tmp_list);
1377 
1378 	/* list all files open on tree connection, reopen resilient handles  */
1379 	spin_lock(&tcon->open_file_lock);
1380 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1381 		if (!open_file->invalidHandle)
1382 			continue;
1383 		cifsFileInfo_get(open_file);
1384 		list_add_tail(&open_file->rlist, &tmp_list);
1385 	}
1386 	spin_unlock(&tcon->open_file_lock);
1387 
1388 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1389 		if (cifs_reopen_file(open_file, false /* do not flush */))
1390 			tcon->need_reopen_files = true;
1391 		list_del_init(&open_file->rlist);
1392 		cifsFileInfo_put(open_file);
1393 	}
1394 }
1395 
1396 int cifs_closedir(struct inode *inode, struct file *file)
1397 {
1398 	int rc = 0;
1399 	unsigned int xid;
1400 	struct cifsFileInfo *cfile = file->private_data;
1401 	struct cifs_tcon *tcon;
1402 	struct TCP_Server_Info *server;
1403 	char *buf;
1404 
1405 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1406 
1407 	if (cfile == NULL)
1408 		return rc;
1409 
1410 	xid = get_xid();
1411 	tcon = tlink_tcon(cfile->tlink);
1412 	server = tcon->ses->server;
1413 
1414 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1415 	spin_lock(&cfile->file_info_lock);
1416 	if (server->ops->dir_needs_close(cfile)) {
1417 		cfile->invalidHandle = true;
1418 		spin_unlock(&cfile->file_info_lock);
1419 		if (server->ops->close_dir)
1420 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1421 		else
1422 			rc = -ENOSYS;
1423 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1424 		/* not much we can do if it fails anyway, ignore rc */
1425 		rc = 0;
1426 	} else
1427 		spin_unlock(&cfile->file_info_lock);
1428 
1429 	buf = cfile->srch_inf.ntwrk_buf_start;
1430 	if (buf) {
1431 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1432 		cfile->srch_inf.ntwrk_buf_start = NULL;
1433 		if (cfile->srch_inf.smallBuf)
1434 			cifs_small_buf_release(buf);
1435 		else
1436 			cifs_buf_release(buf);
1437 	}
1438 
1439 	cifs_put_tlink(cfile->tlink);
1440 	kfree(file->private_data);
1441 	file->private_data = NULL;
1442 	/* BB can we lock the filestruct while this is going on? */
1443 	free_xid(xid);
1444 	return rc;
1445 }
1446 
1447 static struct cifsLockInfo *
1448 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1449 {
1450 	struct cifsLockInfo *lock =
1451 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1452 	if (!lock)
1453 		return lock;
1454 	lock->offset = offset;
1455 	lock->length = length;
1456 	lock->type = type;
1457 	lock->pid = current->tgid;
1458 	lock->flags = flags;
1459 	INIT_LIST_HEAD(&lock->blist);
1460 	init_waitqueue_head(&lock->block_q);
1461 	return lock;
1462 }
1463 
1464 void
1465 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1466 {
1467 	struct cifsLockInfo *li, *tmp;
1468 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1469 		list_del_init(&li->blist);
1470 		wake_up(&li->block_q);
1471 	}
1472 }
1473 
1474 #define CIFS_LOCK_OP	0
1475 #define CIFS_READ_OP	1
1476 #define CIFS_WRITE_OP	2
1477 
1478 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1479 static bool
1480 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1481 			    __u64 length, __u8 type, __u16 flags,
1482 			    struct cifsFileInfo *cfile,
1483 			    struct cifsLockInfo **conf_lock, int rw_check)
1484 {
1485 	struct cifsLockInfo *li;
1486 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1487 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1488 
1489 	list_for_each_entry(li, &fdlocks->locks, llist) {
1490 		if (offset + length <= li->offset ||
1491 		    offset >= li->offset + li->length)
1492 			continue;
1493 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1494 		    server->ops->compare_fids(cfile, cur_cfile)) {
1495 			/* shared lock prevents write op through the same fid */
1496 			if (!(li->type & server->vals->shared_lock_type) ||
1497 			    rw_check != CIFS_WRITE_OP)
1498 				continue;
1499 		}
1500 		if ((type & server->vals->shared_lock_type) &&
1501 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1502 		     current->tgid == li->pid) || type == li->type))
1503 			continue;
1504 		if (rw_check == CIFS_LOCK_OP &&
1505 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1506 		    server->ops->compare_fids(cfile, cur_cfile))
1507 			continue;
1508 		if (conf_lock)
1509 			*conf_lock = li;
1510 		return true;
1511 	}
1512 	return false;
1513 }
1514 
1515 bool
1516 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1517 			__u8 type, __u16 flags,
1518 			struct cifsLockInfo **conf_lock, int rw_check)
1519 {
1520 	bool rc = false;
1521 	struct cifs_fid_locks *cur;
1522 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1523 
1524 	list_for_each_entry(cur, &cinode->llist, llist) {
1525 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1526 						 flags, cfile, conf_lock,
1527 						 rw_check);
1528 		if (rc)
1529 			break;
1530 	}
1531 
1532 	return rc;
1533 }
1534 
1535 /*
1536  * Check if there is another lock that prevents us to set the lock (mandatory
1537  * style). If such a lock exists, update the flock structure with its
1538  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1539  * or leave it the same if we can't. Returns 0 if we don't need to request to
1540  * the server or 1 otherwise.
1541  */
1542 static int
1543 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1544 	       __u8 type, struct file_lock *flock)
1545 {
1546 	int rc = 0;
1547 	struct cifsLockInfo *conf_lock;
1548 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1549 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1550 	bool exist;
1551 
1552 	down_read(&cinode->lock_sem);
1553 
1554 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1555 					flock->c.flc_flags, &conf_lock,
1556 					CIFS_LOCK_OP);
1557 	if (exist) {
1558 		flock->fl_start = conf_lock->offset;
1559 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1560 		flock->c.flc_pid = conf_lock->pid;
1561 		if (conf_lock->type & server->vals->shared_lock_type)
1562 			flock->c.flc_type = F_RDLCK;
1563 		else
1564 			flock->c.flc_type = F_WRLCK;
1565 	} else if (!cinode->can_cache_brlcks)
1566 		rc = 1;
1567 	else
1568 		flock->c.flc_type = F_UNLCK;
1569 
1570 	up_read(&cinode->lock_sem);
1571 	return rc;
1572 }
1573 
1574 static void
1575 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1576 {
1577 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1578 	cifs_down_write(&cinode->lock_sem);
1579 	list_add_tail(&lock->llist, &cfile->llist->locks);
1580 	up_write(&cinode->lock_sem);
1581 }
1582 
1583 /*
1584  * Set the byte-range lock (mandatory style). Returns:
1585  * 1) 0, if we set the lock and don't need to request to the server;
1586  * 2) 1, if no locks prevent us but we need to request to the server;
1587  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1588  */
1589 static int
1590 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1591 		 bool wait)
1592 {
1593 	struct cifsLockInfo *conf_lock;
1594 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1595 	bool exist;
1596 	int rc = 0;
1597 
1598 try_again:
1599 	exist = false;
1600 	cifs_down_write(&cinode->lock_sem);
1601 
1602 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1603 					lock->type, lock->flags, &conf_lock,
1604 					CIFS_LOCK_OP);
1605 	if (!exist && cinode->can_cache_brlcks) {
1606 		list_add_tail(&lock->llist, &cfile->llist->locks);
1607 		up_write(&cinode->lock_sem);
1608 		return rc;
1609 	}
1610 
1611 	if (!exist)
1612 		rc = 1;
1613 	else if (!wait)
1614 		rc = -EACCES;
1615 	else {
1616 		list_add_tail(&lock->blist, &conf_lock->blist);
1617 		up_write(&cinode->lock_sem);
1618 		rc = wait_event_interruptible(lock->block_q,
1619 					(lock->blist.prev == &lock->blist) &&
1620 					(lock->blist.next == &lock->blist));
1621 		if (!rc)
1622 			goto try_again;
1623 		cifs_down_write(&cinode->lock_sem);
1624 		list_del_init(&lock->blist);
1625 	}
1626 
1627 	up_write(&cinode->lock_sem);
1628 	return rc;
1629 }
1630 
1631 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1632 /*
1633  * Check if there is another lock that prevents us to set the lock (posix
1634  * style). If such a lock exists, update the flock structure with its
1635  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1636  * or leave it the same if we can't. Returns 0 if we don't need to request to
1637  * the server or 1 otherwise.
1638  */
1639 static int
1640 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1641 {
1642 	int rc = 0;
1643 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1644 	unsigned char saved_type = flock->c.flc_type;
1645 
1646 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1647 		return 1;
1648 
1649 	down_read(&cinode->lock_sem);
1650 	posix_test_lock(file, flock);
1651 
1652 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1653 		flock->c.flc_type = saved_type;
1654 		rc = 1;
1655 	}
1656 
1657 	up_read(&cinode->lock_sem);
1658 	return rc;
1659 }
1660 
1661 /*
1662  * Set the byte-range lock (posix style). Returns:
1663  * 1) <0, if the error occurs while setting the lock;
1664  * 2) 0, if we set the lock and don't need to request to the server;
1665  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1666  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1667  */
1668 static int
1669 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1670 {
1671 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1672 	int rc = FILE_LOCK_DEFERRED + 1;
1673 
1674 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1675 		return rc;
1676 
1677 	cifs_down_write(&cinode->lock_sem);
1678 	if (!cinode->can_cache_brlcks) {
1679 		up_write(&cinode->lock_sem);
1680 		return rc;
1681 	}
1682 
1683 	rc = posix_lock_file(file, flock, NULL);
1684 	up_write(&cinode->lock_sem);
1685 	return rc;
1686 }
1687 
1688 int
1689 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1690 {
1691 	unsigned int xid;
1692 	int rc = 0, stored_rc;
1693 	struct cifsLockInfo *li, *tmp;
1694 	struct cifs_tcon *tcon;
1695 	unsigned int num, max_num, max_buf;
1696 	LOCKING_ANDX_RANGE *buf, *cur;
1697 	static const int types[] = {
1698 		LOCKING_ANDX_LARGE_FILES,
1699 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1700 	};
1701 	int i;
1702 
1703 	xid = get_xid();
1704 	tcon = tlink_tcon(cfile->tlink);
1705 
1706 	/*
1707 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1708 	 * and check it before using.
1709 	 */
1710 	max_buf = tcon->ses->server->maxBuf;
1711 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1712 		free_xid(xid);
1713 		return -EINVAL;
1714 	}
1715 
1716 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1717 		     PAGE_SIZE);
1718 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1719 			PAGE_SIZE);
1720 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1721 						sizeof(LOCKING_ANDX_RANGE);
1722 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1723 	if (!buf) {
1724 		free_xid(xid);
1725 		return -ENOMEM;
1726 	}
1727 
1728 	for (i = 0; i < 2; i++) {
1729 		cur = buf;
1730 		num = 0;
1731 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1732 			if (li->type != types[i])
1733 				continue;
1734 			cur->Pid = cpu_to_le16(li->pid);
1735 			cur->LengthLow = cpu_to_le32((u32)li->length);
1736 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1737 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1738 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1739 			if (++num == max_num) {
1740 				stored_rc = cifs_lockv(xid, tcon,
1741 						       cfile->fid.netfid,
1742 						       (__u8)li->type, 0, num,
1743 						       buf);
1744 				if (stored_rc)
1745 					rc = stored_rc;
1746 				cur = buf;
1747 				num = 0;
1748 			} else
1749 				cur++;
1750 		}
1751 
1752 		if (num) {
1753 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1754 					       (__u8)types[i], 0, num, buf);
1755 			if (stored_rc)
1756 				rc = stored_rc;
1757 		}
1758 	}
1759 
1760 	kfree(buf);
1761 	free_xid(xid);
1762 	return rc;
1763 }
1764 
1765 static __u32
1766 hash_lockowner(fl_owner_t owner)
1767 {
1768 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1769 }
1770 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1771 
1772 struct lock_to_push {
1773 	struct list_head llist;
1774 	__u64 offset;
1775 	__u64 length;
1776 	__u32 pid;
1777 	__u16 netfid;
1778 	__u8 type;
1779 };
1780 
1781 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1782 static int
1783 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1784 {
1785 	struct inode *inode = d_inode(cfile->dentry);
1786 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1787 	struct file_lock *flock;
1788 	struct file_lock_context *flctx = locks_inode_context(inode);
1789 	unsigned int count = 0, i;
1790 	int rc = 0, xid, type;
1791 	struct list_head locks_to_send, *el;
1792 	struct lock_to_push *lck, *tmp;
1793 	__u64 length;
1794 
1795 	xid = get_xid();
1796 
1797 	if (!flctx)
1798 		goto out;
1799 
1800 	spin_lock(&flctx->flc_lock);
1801 	list_for_each(el, &flctx->flc_posix) {
1802 		count++;
1803 	}
1804 	spin_unlock(&flctx->flc_lock);
1805 
1806 	INIT_LIST_HEAD(&locks_to_send);
1807 
1808 	/*
1809 	 * Allocating count locks is enough because no FL_POSIX locks can be
1810 	 * added to the list while we are holding cinode->lock_sem that
1811 	 * protects locking operations of this inode.
1812 	 */
1813 	for (i = 0; i < count; i++) {
1814 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1815 		if (!lck) {
1816 			rc = -ENOMEM;
1817 			goto err_out;
1818 		}
1819 		list_add_tail(&lck->llist, &locks_to_send);
1820 	}
1821 
1822 	el = locks_to_send.next;
1823 	spin_lock(&flctx->flc_lock);
1824 	for_each_file_lock(flock, &flctx->flc_posix) {
1825 		unsigned char ftype = flock->c.flc_type;
1826 
1827 		if (el == &locks_to_send) {
1828 			/*
1829 			 * The list ended. We don't have enough allocated
1830 			 * structures - something is really wrong.
1831 			 */
1832 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1833 			break;
1834 		}
1835 		length = cifs_flock_len(flock);
1836 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1837 			type = CIFS_RDLCK;
1838 		else
1839 			type = CIFS_WRLCK;
1840 		lck = list_entry(el, struct lock_to_push, llist);
1841 		lck->pid = hash_lockowner(flock->c.flc_owner);
1842 		lck->netfid = cfile->fid.netfid;
1843 		lck->length = length;
1844 		lck->type = type;
1845 		lck->offset = flock->fl_start;
1846 	}
1847 	spin_unlock(&flctx->flc_lock);
1848 
1849 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1850 		int stored_rc;
1851 
1852 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1853 					     lck->offset, lck->length, NULL,
1854 					     lck->type, 0);
1855 		if (stored_rc)
1856 			rc = stored_rc;
1857 		list_del(&lck->llist);
1858 		kfree(lck);
1859 	}
1860 
1861 out:
1862 	free_xid(xid);
1863 	return rc;
1864 err_out:
1865 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1866 		list_del(&lck->llist);
1867 		kfree(lck);
1868 	}
1869 	goto out;
1870 }
1871 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1872 
1873 static int
1874 cifs_push_locks(struct cifsFileInfo *cfile)
1875 {
1876 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1877 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1878 	int rc = 0;
1879 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1880 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1881 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1882 
1883 	/* we are going to update can_cache_brlcks here - need a write access */
1884 	cifs_down_write(&cinode->lock_sem);
1885 	if (!cinode->can_cache_brlcks) {
1886 		up_write(&cinode->lock_sem);
1887 		return rc;
1888 	}
1889 
1890 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1891 	if (cap_unix(tcon->ses) &&
1892 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1893 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1894 		rc = cifs_push_posix_locks(cfile);
1895 	else
1896 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1897 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1898 
1899 	cinode->can_cache_brlcks = false;
1900 	up_write(&cinode->lock_sem);
1901 	return rc;
1902 }
1903 
1904 static void
1905 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1906 		bool *wait_flag, struct TCP_Server_Info *server)
1907 {
1908 	if (flock->c.flc_flags & FL_POSIX)
1909 		cifs_dbg(FYI, "Posix\n");
1910 	if (flock->c.flc_flags & FL_FLOCK)
1911 		cifs_dbg(FYI, "Flock\n");
1912 	if (flock->c.flc_flags & FL_SLEEP) {
1913 		cifs_dbg(FYI, "Blocking lock\n");
1914 		*wait_flag = true;
1915 	}
1916 	if (flock->c.flc_flags & FL_ACCESS)
1917 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1918 	if (flock->c.flc_flags & FL_LEASE)
1919 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1920 	if (flock->c.flc_flags &
1921 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1922 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1923 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1924 		         flock->c.flc_flags);
1925 
1926 	*type = server->vals->large_lock_type;
1927 	if (lock_is_write(flock)) {
1928 		cifs_dbg(FYI, "F_WRLCK\n");
1929 		*type |= server->vals->exclusive_lock_type;
1930 		*lock = 1;
1931 	} else if (lock_is_unlock(flock)) {
1932 		cifs_dbg(FYI, "F_UNLCK\n");
1933 		*type |= server->vals->unlock_lock_type;
1934 		*unlock = 1;
1935 		/* Check if unlock includes more than one lock range */
1936 	} else if (lock_is_read(flock)) {
1937 		cifs_dbg(FYI, "F_RDLCK\n");
1938 		*type |= server->vals->shared_lock_type;
1939 		*lock = 1;
1940 	} else if (flock->c.flc_type == F_EXLCK) {
1941 		cifs_dbg(FYI, "F_EXLCK\n");
1942 		*type |= server->vals->exclusive_lock_type;
1943 		*lock = 1;
1944 	} else if (flock->c.flc_type == F_SHLCK) {
1945 		cifs_dbg(FYI, "F_SHLCK\n");
1946 		*type |= server->vals->shared_lock_type;
1947 		*lock = 1;
1948 	} else
1949 		cifs_dbg(FYI, "Unknown type of lock\n");
1950 }
1951 
1952 static int
1953 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1954 	   bool wait_flag, bool posix_lck, unsigned int xid)
1955 {
1956 	int rc = 0;
1957 	__u64 length = cifs_flock_len(flock);
1958 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1959 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1960 	struct TCP_Server_Info *server = tcon->ses->server;
1961 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1962 	__u16 netfid = cfile->fid.netfid;
1963 
1964 	if (posix_lck) {
1965 		int posix_lock_type;
1966 
1967 		rc = cifs_posix_lock_test(file, flock);
1968 		if (!rc)
1969 			return rc;
1970 
1971 		if (type & server->vals->shared_lock_type)
1972 			posix_lock_type = CIFS_RDLCK;
1973 		else
1974 			posix_lock_type = CIFS_WRLCK;
1975 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
1976 				      hash_lockowner(flock->c.flc_owner),
1977 				      flock->fl_start, length, flock,
1978 				      posix_lock_type, wait_flag);
1979 		return rc;
1980 	}
1981 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1982 
1983 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1984 	if (!rc)
1985 		return rc;
1986 
1987 	/* BB we could chain these into one lock request BB */
1988 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1989 				    1, 0, false);
1990 	if (rc == 0) {
1991 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1992 					    type, 0, 1, false);
1993 		flock->c.flc_type = F_UNLCK;
1994 		if (rc != 0)
1995 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1996 				 rc);
1997 		return 0;
1998 	}
1999 
2000 	if (type & server->vals->shared_lock_type) {
2001 		flock->c.flc_type = F_WRLCK;
2002 		return 0;
2003 	}
2004 
2005 	type &= ~server->vals->exclusive_lock_type;
2006 
2007 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2008 				    type | server->vals->shared_lock_type,
2009 				    1, 0, false);
2010 	if (rc == 0) {
2011 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2012 			type | server->vals->shared_lock_type, 0, 1, false);
2013 		flock->c.flc_type = F_RDLCK;
2014 		if (rc != 0)
2015 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2016 				 rc);
2017 	} else
2018 		flock->c.flc_type = F_WRLCK;
2019 
2020 	return 0;
2021 }
2022 
2023 void
2024 cifs_move_llist(struct list_head *source, struct list_head *dest)
2025 {
2026 	struct list_head *li, *tmp;
2027 	list_for_each_safe(li, tmp, source)
2028 		list_move(li, dest);
2029 }
2030 
2031 void
2032 cifs_free_llist(struct list_head *llist)
2033 {
2034 	struct cifsLockInfo *li, *tmp;
2035 	list_for_each_entry_safe(li, tmp, llist, llist) {
2036 		cifs_del_lock_waiters(li);
2037 		list_del(&li->llist);
2038 		kfree(li);
2039 	}
2040 }
2041 
2042 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2043 int
2044 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2045 		  unsigned int xid)
2046 {
2047 	int rc = 0, stored_rc;
2048 	static const int types[] = {
2049 		LOCKING_ANDX_LARGE_FILES,
2050 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2051 	};
2052 	unsigned int i;
2053 	unsigned int max_num, num, max_buf;
2054 	LOCKING_ANDX_RANGE *buf, *cur;
2055 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2056 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2057 	struct cifsLockInfo *li, *tmp;
2058 	__u64 length = cifs_flock_len(flock);
2059 	struct list_head tmp_llist;
2060 
2061 	INIT_LIST_HEAD(&tmp_llist);
2062 
2063 	/*
2064 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2065 	 * and check it before using.
2066 	 */
2067 	max_buf = tcon->ses->server->maxBuf;
2068 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2069 		return -EINVAL;
2070 
2071 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2072 		     PAGE_SIZE);
2073 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2074 			PAGE_SIZE);
2075 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2076 						sizeof(LOCKING_ANDX_RANGE);
2077 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2078 	if (!buf)
2079 		return -ENOMEM;
2080 
2081 	cifs_down_write(&cinode->lock_sem);
2082 	for (i = 0; i < 2; i++) {
2083 		cur = buf;
2084 		num = 0;
2085 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2086 			if (flock->fl_start > li->offset ||
2087 			    (flock->fl_start + length) <
2088 			    (li->offset + li->length))
2089 				continue;
2090 			if (current->tgid != li->pid)
2091 				continue;
2092 			if (types[i] != li->type)
2093 				continue;
2094 			if (cinode->can_cache_brlcks) {
2095 				/*
2096 				 * We can cache brlock requests - simply remove
2097 				 * a lock from the file's list.
2098 				 */
2099 				list_del(&li->llist);
2100 				cifs_del_lock_waiters(li);
2101 				kfree(li);
2102 				continue;
2103 			}
2104 			cur->Pid = cpu_to_le16(li->pid);
2105 			cur->LengthLow = cpu_to_le32((u32)li->length);
2106 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2107 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2108 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2109 			/*
2110 			 * We need to save a lock here to let us add it again to
2111 			 * the file's list if the unlock range request fails on
2112 			 * the server.
2113 			 */
2114 			list_move(&li->llist, &tmp_llist);
2115 			if (++num == max_num) {
2116 				stored_rc = cifs_lockv(xid, tcon,
2117 						       cfile->fid.netfid,
2118 						       li->type, num, 0, buf);
2119 				if (stored_rc) {
2120 					/*
2121 					 * We failed on the unlock range
2122 					 * request - add all locks from the tmp
2123 					 * list to the head of the file's list.
2124 					 */
2125 					cifs_move_llist(&tmp_llist,
2126 							&cfile->llist->locks);
2127 					rc = stored_rc;
2128 				} else
2129 					/*
2130 					 * The unlock range request succeed -
2131 					 * free the tmp list.
2132 					 */
2133 					cifs_free_llist(&tmp_llist);
2134 				cur = buf;
2135 				num = 0;
2136 			} else
2137 				cur++;
2138 		}
2139 		if (num) {
2140 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2141 					       types[i], num, 0, buf);
2142 			if (stored_rc) {
2143 				cifs_move_llist(&tmp_llist,
2144 						&cfile->llist->locks);
2145 				rc = stored_rc;
2146 			} else
2147 				cifs_free_llist(&tmp_llist);
2148 		}
2149 	}
2150 
2151 	up_write(&cinode->lock_sem);
2152 	kfree(buf);
2153 	return rc;
2154 }
2155 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2156 
2157 static int
2158 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2159 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2160 	   unsigned int xid)
2161 {
2162 	int rc = 0;
2163 	__u64 length = cifs_flock_len(flock);
2164 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2165 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2166 	struct TCP_Server_Info *server = tcon->ses->server;
2167 	struct inode *inode = d_inode(cfile->dentry);
2168 
2169 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2170 	if (posix_lck) {
2171 		int posix_lock_type;
2172 
2173 		rc = cifs_posix_lock_set(file, flock);
2174 		if (rc <= FILE_LOCK_DEFERRED)
2175 			return rc;
2176 
2177 		if (type & server->vals->shared_lock_type)
2178 			posix_lock_type = CIFS_RDLCK;
2179 		else
2180 			posix_lock_type = CIFS_WRLCK;
2181 
2182 		if (unlock == 1)
2183 			posix_lock_type = CIFS_UNLCK;
2184 
2185 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2186 				      hash_lockowner(flock->c.flc_owner),
2187 				      flock->fl_start, length,
2188 				      NULL, posix_lock_type, wait_flag);
2189 		goto out;
2190 	}
2191 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2192 	if (lock) {
2193 		struct cifsLockInfo *lock;
2194 
2195 		lock = cifs_lock_init(flock->fl_start, length, type,
2196 				      flock->c.flc_flags);
2197 		if (!lock)
2198 			return -ENOMEM;
2199 
2200 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2201 		if (rc < 0) {
2202 			kfree(lock);
2203 			return rc;
2204 		}
2205 		if (!rc)
2206 			goto out;
2207 
2208 		/*
2209 		 * Windows 7 server can delay breaking lease from read to None
2210 		 * if we set a byte-range lock on a file - break it explicitly
2211 		 * before sending the lock to the server to be sure the next
2212 		 * read won't conflict with non-overlapted locks due to
2213 		 * pagereading.
2214 		 */
2215 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2216 					CIFS_CACHE_READ(CIFS_I(inode))) {
2217 			cifs_zap_mapping(inode);
2218 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2219 				 inode);
2220 			CIFS_I(inode)->oplock = 0;
2221 		}
2222 
2223 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2224 					    type, 1, 0, wait_flag);
2225 		if (rc) {
2226 			kfree(lock);
2227 			return rc;
2228 		}
2229 
2230 		cifs_lock_add(cfile, lock);
2231 	} else if (unlock)
2232 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2233 
2234 out:
2235 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2236 		/*
2237 		 * If this is a request to remove all locks because we
2238 		 * are closing the file, it doesn't matter if the
2239 		 * unlocking failed as both cifs.ko and the SMB server
2240 		 * remove the lock on file close
2241 		 */
2242 		if (rc) {
2243 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2244 			if (!(flock->c.flc_flags & FL_CLOSE))
2245 				return rc;
2246 		}
2247 		rc = locks_lock_file_wait(file, flock);
2248 	}
2249 	return rc;
2250 }
2251 
2252 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2253 {
2254 	int rc, xid;
2255 	int lock = 0, unlock = 0;
2256 	bool wait_flag = false;
2257 	bool posix_lck = false;
2258 	struct cifs_sb_info *cifs_sb;
2259 	struct cifs_tcon *tcon;
2260 	struct cifsFileInfo *cfile;
2261 	__u32 type;
2262 
2263 	xid = get_xid();
2264 
2265 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2266 		rc = -ENOLCK;
2267 		free_xid(xid);
2268 		return rc;
2269 	}
2270 
2271 	cfile = (struct cifsFileInfo *)file->private_data;
2272 	tcon = tlink_tcon(cfile->tlink);
2273 
2274 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2275 			tcon->ses->server);
2276 	cifs_sb = CIFS_FILE_SB(file);
2277 
2278 	if (cap_unix(tcon->ses) &&
2279 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2280 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2281 		posix_lck = true;
2282 
2283 	if (!lock && !unlock) {
2284 		/*
2285 		 * if no lock or unlock then nothing to do since we do not
2286 		 * know what it is
2287 		 */
2288 		rc = -EOPNOTSUPP;
2289 		free_xid(xid);
2290 		return rc;
2291 	}
2292 
2293 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2294 			xid);
2295 	free_xid(xid);
2296 	return rc;
2297 
2298 
2299 }
2300 
2301 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2302 {
2303 	int rc, xid;
2304 	int lock = 0, unlock = 0;
2305 	bool wait_flag = false;
2306 	bool posix_lck = false;
2307 	struct cifs_sb_info *cifs_sb;
2308 	struct cifs_tcon *tcon;
2309 	struct cifsFileInfo *cfile;
2310 	__u32 type;
2311 
2312 	rc = -EACCES;
2313 	xid = get_xid();
2314 
2315 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2316 		 flock->c.flc_flags, flock->c.flc_type,
2317 		 (long long)flock->fl_start,
2318 		 (long long)flock->fl_end);
2319 
2320 	cfile = (struct cifsFileInfo *)file->private_data;
2321 	tcon = tlink_tcon(cfile->tlink);
2322 
2323 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2324 			tcon->ses->server);
2325 	cifs_sb = CIFS_FILE_SB(file);
2326 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2327 
2328 	if (cap_unix(tcon->ses) &&
2329 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2330 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2331 		posix_lck = true;
2332 	/*
2333 	 * BB add code here to normalize offset and length to account for
2334 	 * negative length which we can not accept over the wire.
2335 	 */
2336 	if (IS_GETLK(cmd)) {
2337 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2338 		free_xid(xid);
2339 		return rc;
2340 	}
2341 
2342 	if (!lock && !unlock) {
2343 		/*
2344 		 * if no lock or unlock then nothing to do since we do not
2345 		 * know what it is
2346 		 */
2347 		free_xid(xid);
2348 		return -EOPNOTSUPP;
2349 	}
2350 
2351 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2352 			xid);
2353 	free_xid(xid);
2354 	return rc;
2355 }
2356 
2357 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2358 				      bool was_async)
2359 {
2360 	struct netfs_io_request *wreq = wdata->rreq;
2361 	loff_t new_server_eof;
2362 
2363 	if (result > 0) {
2364 		new_server_eof = wdata->subreq.start + wdata->subreq.transferred + result;
2365 
2366 		if (new_server_eof > netfs_inode(wreq->inode)->remote_i_size)
2367 			netfs_resize_file(netfs_inode(wreq->inode), new_server_eof, true);
2368 	}
2369 
2370 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2371 }
2372 
2373 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2374 					bool fsuid_only)
2375 {
2376 	struct cifsFileInfo *open_file = NULL;
2377 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2378 
2379 	/* only filter by fsuid on multiuser mounts */
2380 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2381 		fsuid_only = false;
2382 
2383 	spin_lock(&cifs_inode->open_file_lock);
2384 	/* we could simply get the first_list_entry since write-only entries
2385 	   are always at the end of the list but since the first entry might
2386 	   have a close pending, we go through the whole list */
2387 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2388 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2389 			continue;
2390 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2391 			if ((!open_file->invalidHandle)) {
2392 				/* found a good file */
2393 				/* lock it so it will not be closed on us */
2394 				cifsFileInfo_get(open_file);
2395 				spin_unlock(&cifs_inode->open_file_lock);
2396 				return open_file;
2397 			} /* else might as well continue, and look for
2398 			     another, or simply have the caller reopen it
2399 			     again rather than trying to fix this handle */
2400 		} else /* write only file */
2401 			break; /* write only files are last so must be done */
2402 	}
2403 	spin_unlock(&cifs_inode->open_file_lock);
2404 	return NULL;
2405 }
2406 
2407 /* Return -EBADF if no handle is found and general rc otherwise */
2408 int
2409 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2410 		       struct cifsFileInfo **ret_file)
2411 {
2412 	struct cifsFileInfo *open_file, *inv_file = NULL;
2413 	struct cifs_sb_info *cifs_sb;
2414 	bool any_available = false;
2415 	int rc = -EBADF;
2416 	unsigned int refind = 0;
2417 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2418 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2419 	*ret_file = NULL;
2420 
2421 	/*
2422 	 * Having a null inode here (because mapping->host was set to zero by
2423 	 * the VFS or MM) should not happen but we had reports of on oops (due
2424 	 * to it being zero) during stress testcases so we need to check for it
2425 	 */
2426 
2427 	if (cifs_inode == NULL) {
2428 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2429 		dump_stack();
2430 		return rc;
2431 	}
2432 
2433 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2434 
2435 	/* only filter by fsuid on multiuser mounts */
2436 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2437 		fsuid_only = false;
2438 
2439 	spin_lock(&cifs_inode->open_file_lock);
2440 refind_writable:
2441 	if (refind > MAX_REOPEN_ATT) {
2442 		spin_unlock(&cifs_inode->open_file_lock);
2443 		return rc;
2444 	}
2445 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2446 		if (!any_available && open_file->pid != current->tgid)
2447 			continue;
2448 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2449 			continue;
2450 		if (with_delete && !(open_file->fid.access & DELETE))
2451 			continue;
2452 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2453 			if (!open_file->invalidHandle) {
2454 				/* found a good writable file */
2455 				cifsFileInfo_get(open_file);
2456 				spin_unlock(&cifs_inode->open_file_lock);
2457 				*ret_file = open_file;
2458 				return 0;
2459 			} else {
2460 				if (!inv_file)
2461 					inv_file = open_file;
2462 			}
2463 		}
2464 	}
2465 	/* couldn't find useable FH with same pid, try any available */
2466 	if (!any_available) {
2467 		any_available = true;
2468 		goto refind_writable;
2469 	}
2470 
2471 	if (inv_file) {
2472 		any_available = false;
2473 		cifsFileInfo_get(inv_file);
2474 	}
2475 
2476 	spin_unlock(&cifs_inode->open_file_lock);
2477 
2478 	if (inv_file) {
2479 		rc = cifs_reopen_file(inv_file, false);
2480 		if (!rc) {
2481 			*ret_file = inv_file;
2482 			return 0;
2483 		}
2484 
2485 		spin_lock(&cifs_inode->open_file_lock);
2486 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2487 		spin_unlock(&cifs_inode->open_file_lock);
2488 		cifsFileInfo_put(inv_file);
2489 		++refind;
2490 		inv_file = NULL;
2491 		spin_lock(&cifs_inode->open_file_lock);
2492 		goto refind_writable;
2493 	}
2494 
2495 	return rc;
2496 }
2497 
2498 struct cifsFileInfo *
2499 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2500 {
2501 	struct cifsFileInfo *cfile;
2502 	int rc;
2503 
2504 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2505 	if (rc)
2506 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2507 
2508 	return cfile;
2509 }
2510 
2511 int
2512 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2513 		       int flags,
2514 		       struct cifsFileInfo **ret_file)
2515 {
2516 	struct cifsFileInfo *cfile;
2517 	void *page = alloc_dentry_path();
2518 
2519 	*ret_file = NULL;
2520 
2521 	spin_lock(&tcon->open_file_lock);
2522 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2523 		struct cifsInodeInfo *cinode;
2524 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2525 		if (IS_ERR(full_path)) {
2526 			spin_unlock(&tcon->open_file_lock);
2527 			free_dentry_path(page);
2528 			return PTR_ERR(full_path);
2529 		}
2530 		if (strcmp(full_path, name))
2531 			continue;
2532 
2533 		cinode = CIFS_I(d_inode(cfile->dentry));
2534 		spin_unlock(&tcon->open_file_lock);
2535 		free_dentry_path(page);
2536 		return cifs_get_writable_file(cinode, flags, ret_file);
2537 	}
2538 
2539 	spin_unlock(&tcon->open_file_lock);
2540 	free_dentry_path(page);
2541 	return -ENOENT;
2542 }
2543 
2544 int
2545 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2546 		       struct cifsFileInfo **ret_file)
2547 {
2548 	struct cifsFileInfo *cfile;
2549 	void *page = alloc_dentry_path();
2550 
2551 	*ret_file = NULL;
2552 
2553 	spin_lock(&tcon->open_file_lock);
2554 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2555 		struct cifsInodeInfo *cinode;
2556 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2557 		if (IS_ERR(full_path)) {
2558 			spin_unlock(&tcon->open_file_lock);
2559 			free_dentry_path(page);
2560 			return PTR_ERR(full_path);
2561 		}
2562 		if (strcmp(full_path, name))
2563 			continue;
2564 
2565 		cinode = CIFS_I(d_inode(cfile->dentry));
2566 		spin_unlock(&tcon->open_file_lock);
2567 		free_dentry_path(page);
2568 		*ret_file = find_readable_file(cinode, 0);
2569 		return *ret_file ? 0 : -ENOENT;
2570 	}
2571 
2572 	spin_unlock(&tcon->open_file_lock);
2573 	free_dentry_path(page);
2574 	return -ENOENT;
2575 }
2576 
2577 /*
2578  * Flush data on a strict file.
2579  */
2580 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2581 		      int datasync)
2582 {
2583 	unsigned int xid;
2584 	int rc = 0;
2585 	struct cifs_tcon *tcon;
2586 	struct TCP_Server_Info *server;
2587 	struct cifsFileInfo *smbfile = file->private_data;
2588 	struct inode *inode = file_inode(file);
2589 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2590 
2591 	rc = file_write_and_wait_range(file, start, end);
2592 	if (rc) {
2593 		trace_cifs_fsync_err(inode->i_ino, rc);
2594 		return rc;
2595 	}
2596 
2597 	xid = get_xid();
2598 
2599 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2600 		 file, datasync);
2601 
2602 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2603 		rc = cifs_zap_mapping(inode);
2604 		if (rc) {
2605 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2606 			rc = 0; /* don't care about it in fsync */
2607 		}
2608 	}
2609 
2610 	tcon = tlink_tcon(smbfile->tlink);
2611 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2612 		server = tcon->ses->server;
2613 		if (server->ops->flush == NULL) {
2614 			rc = -ENOSYS;
2615 			goto strict_fsync_exit;
2616 		}
2617 
2618 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2619 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2620 			if (smbfile) {
2621 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2622 				cifsFileInfo_put(smbfile);
2623 			} else
2624 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2625 		} else
2626 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2627 	}
2628 
2629 strict_fsync_exit:
2630 	free_xid(xid);
2631 	return rc;
2632 }
2633 
2634 /*
2635  * Flush data on a non-strict data.
2636  */
2637 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2638 {
2639 	unsigned int xid;
2640 	int rc = 0;
2641 	struct cifs_tcon *tcon;
2642 	struct TCP_Server_Info *server;
2643 	struct cifsFileInfo *smbfile = file->private_data;
2644 	struct inode *inode = file_inode(file);
2645 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2646 
2647 	rc = file_write_and_wait_range(file, start, end);
2648 	if (rc) {
2649 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2650 		return rc;
2651 	}
2652 
2653 	xid = get_xid();
2654 
2655 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2656 		 file, datasync);
2657 
2658 	tcon = tlink_tcon(smbfile->tlink);
2659 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2660 		server = tcon->ses->server;
2661 		if (server->ops->flush == NULL) {
2662 			rc = -ENOSYS;
2663 			goto fsync_exit;
2664 		}
2665 
2666 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2667 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2668 			if (smbfile) {
2669 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2670 				cifsFileInfo_put(smbfile);
2671 			} else
2672 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2673 		} else
2674 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2675 	}
2676 
2677 fsync_exit:
2678 	free_xid(xid);
2679 	return rc;
2680 }
2681 
2682 /*
2683  * As file closes, flush all cached write data for this inode checking
2684  * for write behind errors.
2685  */
2686 int cifs_flush(struct file *file, fl_owner_t id)
2687 {
2688 	struct inode *inode = file_inode(file);
2689 	int rc = 0;
2690 
2691 	if (file->f_mode & FMODE_WRITE)
2692 		rc = filemap_write_and_wait(inode->i_mapping);
2693 
2694 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2695 	if (rc) {
2696 		/* get more nuanced writeback errors */
2697 		rc = filemap_check_wb_err(file->f_mapping, 0);
2698 		trace_cifs_flush_err(inode->i_ino, rc);
2699 	}
2700 	return rc;
2701 }
2702 
2703 static ssize_t
2704 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2705 {
2706 	struct file *file = iocb->ki_filp;
2707 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2708 	struct inode *inode = file->f_mapping->host;
2709 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2710 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2711 	ssize_t rc;
2712 
2713 	rc = netfs_start_io_write(inode);
2714 	if (rc < 0)
2715 		return rc;
2716 
2717 	/*
2718 	 * We need to hold the sem to be sure nobody modifies lock list
2719 	 * with a brlock that prevents writing.
2720 	 */
2721 	down_read(&cinode->lock_sem);
2722 
2723 	rc = generic_write_checks(iocb, from);
2724 	if (rc <= 0)
2725 		goto out;
2726 
2727 	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2728 				     server->vals->exclusive_lock_type, 0,
2729 				     NULL, CIFS_WRITE_OP))
2730 		rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2731 	else
2732 		rc = -EACCES;
2733 out:
2734 	up_read(&cinode->lock_sem);
2735 	netfs_end_io_write(inode);
2736 	if (rc > 0)
2737 		rc = generic_write_sync(iocb, rc);
2738 	return rc;
2739 }
2740 
2741 ssize_t
2742 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2743 {
2744 	struct inode *inode = file_inode(iocb->ki_filp);
2745 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2746 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2747 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2748 						iocb->ki_filp->private_data;
2749 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2750 	ssize_t written;
2751 
2752 	written = cifs_get_writer(cinode);
2753 	if (written)
2754 		return written;
2755 
2756 	if (CIFS_CACHE_WRITE(cinode)) {
2757 		if (cap_unix(tcon->ses) &&
2758 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2759 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2760 			written = netfs_file_write_iter(iocb, from);
2761 			goto out;
2762 		}
2763 		written = cifs_writev(iocb, from);
2764 		goto out;
2765 	}
2766 	/*
2767 	 * For non-oplocked files in strict cache mode we need to write the data
2768 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2769 	 * affected pages because it may cause a error with mandatory locks on
2770 	 * these pages but not on the region from pos to ppos+len-1.
2771 	 */
2772 	written = netfs_file_write_iter(iocb, from);
2773 	if (CIFS_CACHE_READ(cinode)) {
2774 		/*
2775 		 * We have read level caching and we have just sent a write
2776 		 * request to the server thus making data in the cache stale.
2777 		 * Zap the cache and set oplock/lease level to NONE to avoid
2778 		 * reading stale data from the cache. All subsequent read
2779 		 * operations will read new data from the server.
2780 		 */
2781 		cifs_zap_mapping(inode);
2782 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2783 			 inode);
2784 		cinode->oplock = 0;
2785 	}
2786 out:
2787 	cifs_put_writer(cinode);
2788 	return written;
2789 }
2790 
2791 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2792 {
2793 	ssize_t rc;
2794 	struct inode *inode = file_inode(iocb->ki_filp);
2795 
2796 	if (iocb->ki_flags & IOCB_DIRECT)
2797 		return netfs_unbuffered_read_iter(iocb, iter);
2798 
2799 	rc = cifs_revalidate_mapping(inode);
2800 	if (rc)
2801 		return rc;
2802 
2803 	return netfs_file_read_iter(iocb, iter);
2804 }
2805 
2806 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2807 {
2808 	struct inode *inode = file_inode(iocb->ki_filp);
2809 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2810 	ssize_t written;
2811 	int rc;
2812 
2813 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2814 		written = netfs_unbuffered_write_iter(iocb, from);
2815 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2816 			cifs_zap_mapping(inode);
2817 			cifs_dbg(FYI,
2818 				 "Set no oplock for inode=%p after a write operation\n",
2819 				 inode);
2820 			cinode->oplock = 0;
2821 		}
2822 		return written;
2823 	}
2824 
2825 	written = cifs_get_writer(cinode);
2826 	if (written)
2827 		return written;
2828 
2829 	written = netfs_file_write_iter(iocb, from);
2830 
2831 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2832 		rc = filemap_fdatawrite(inode->i_mapping);
2833 		if (rc)
2834 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2835 				 rc, inode);
2836 	}
2837 
2838 	cifs_put_writer(cinode);
2839 	return written;
2840 }
2841 
2842 ssize_t
2843 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2844 {
2845 	struct inode *inode = file_inode(iocb->ki_filp);
2846 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2847 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2848 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2849 						iocb->ki_filp->private_data;
2850 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2851 	int rc = -EACCES;
2852 
2853 	/*
2854 	 * In strict cache mode we need to read from the server all the time
2855 	 * if we don't have level II oplock because the server can delay mtime
2856 	 * change - so we can't make a decision about inode invalidating.
2857 	 * And we can also fail with pagereading if there are mandatory locks
2858 	 * on pages affected by this read but not on the region from pos to
2859 	 * pos+len-1.
2860 	 */
2861 	if (!CIFS_CACHE_READ(cinode))
2862 		return netfs_unbuffered_read_iter(iocb, to);
2863 
2864 	if (cap_unix(tcon->ses) &&
2865 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2866 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2867 		if (iocb->ki_flags & IOCB_DIRECT)
2868 			return netfs_unbuffered_read_iter(iocb, to);
2869 		return netfs_buffered_read_iter(iocb, to);
2870 	}
2871 
2872 	/*
2873 	 * We need to hold the sem to be sure nobody modifies lock list
2874 	 * with a brlock that prevents reading.
2875 	 */
2876 	if (iocb->ki_flags & IOCB_DIRECT) {
2877 		rc = netfs_start_io_direct(inode);
2878 		if (rc < 0)
2879 			goto out;
2880 		down_read(&cinode->lock_sem);
2881 		if (!cifs_find_lock_conflict(
2882 			    cfile, iocb->ki_pos, iov_iter_count(to),
2883 			    tcon->ses->server->vals->shared_lock_type,
2884 			    0, NULL, CIFS_READ_OP))
2885 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2886 		up_read(&cinode->lock_sem);
2887 		netfs_end_io_direct(inode);
2888 	} else {
2889 		rc = netfs_start_io_read(inode);
2890 		if (rc < 0)
2891 			goto out;
2892 		down_read(&cinode->lock_sem);
2893 		if (!cifs_find_lock_conflict(
2894 			    cfile, iocb->ki_pos, iov_iter_count(to),
2895 			    tcon->ses->server->vals->shared_lock_type,
2896 			    0, NULL, CIFS_READ_OP))
2897 			rc = filemap_read(iocb, to, 0);
2898 		up_read(&cinode->lock_sem);
2899 		netfs_end_io_read(inode);
2900 	}
2901 out:
2902 	return rc;
2903 }
2904 
2905 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2906 {
2907 	return netfs_page_mkwrite(vmf, NULL);
2908 }
2909 
2910 static const struct vm_operations_struct cifs_file_vm_ops = {
2911 	.fault = filemap_fault,
2912 	.map_pages = filemap_map_pages,
2913 	.page_mkwrite = cifs_page_mkwrite,
2914 };
2915 
2916 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2917 {
2918 	int xid, rc = 0;
2919 	struct inode *inode = file_inode(file);
2920 
2921 	xid = get_xid();
2922 
2923 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2924 		rc = cifs_zap_mapping(inode);
2925 	if (!rc)
2926 		rc = generic_file_mmap(file, vma);
2927 	if (!rc)
2928 		vma->vm_ops = &cifs_file_vm_ops;
2929 
2930 	free_xid(xid);
2931 	return rc;
2932 }
2933 
2934 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2935 {
2936 	int rc, xid;
2937 
2938 	xid = get_xid();
2939 
2940 	rc = cifs_revalidate_file(file);
2941 	if (rc)
2942 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2943 			 rc);
2944 	if (!rc)
2945 		rc = generic_file_mmap(file, vma);
2946 	if (!rc)
2947 		vma->vm_ops = &cifs_file_vm_ops;
2948 
2949 	free_xid(xid);
2950 	return rc;
2951 }
2952 
2953 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2954 {
2955 	struct cifsFileInfo *open_file;
2956 
2957 	spin_lock(&cifs_inode->open_file_lock);
2958 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2959 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2960 			spin_unlock(&cifs_inode->open_file_lock);
2961 			return 1;
2962 		}
2963 	}
2964 	spin_unlock(&cifs_inode->open_file_lock);
2965 	return 0;
2966 }
2967 
2968 /* We do not want to update the file size from server for inodes
2969    open for write - to avoid races with writepage extending
2970    the file - in the future we could consider allowing
2971    refreshing the inode only on increases in the file size
2972    but this is tricky to do without racing with writebehind
2973    page caching in the current Linux kernel design */
2974 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
2975 			    bool from_readdir)
2976 {
2977 	if (!cifsInode)
2978 		return true;
2979 
2980 	if (is_inode_writable(cifsInode) ||
2981 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
2982 		/* This inode is open for write at least once */
2983 		struct cifs_sb_info *cifs_sb;
2984 
2985 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
2986 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2987 			/* since no page cache to corrupt on directio
2988 			we can change size safely */
2989 			return true;
2990 		}
2991 
2992 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
2993 			return true;
2994 
2995 		return false;
2996 	} else
2997 		return true;
2998 }
2999 
3000 void cifs_oplock_break(struct work_struct *work)
3001 {
3002 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3003 						  oplock_break);
3004 	struct inode *inode = d_inode(cfile->dentry);
3005 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3006 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3007 	struct cifs_tcon *tcon;
3008 	struct TCP_Server_Info *server;
3009 	struct tcon_link *tlink;
3010 	int rc = 0;
3011 	bool purge_cache = false, oplock_break_cancelled;
3012 	__u64 persistent_fid, volatile_fid;
3013 	__u16 net_fid;
3014 
3015 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3016 			TASK_UNINTERRUPTIBLE);
3017 
3018 	tlink = cifs_sb_tlink(cifs_sb);
3019 	if (IS_ERR(tlink))
3020 		goto out;
3021 	tcon = tlink_tcon(tlink);
3022 	server = tcon->ses->server;
3023 
3024 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3025 				      cfile->oplock_epoch, &purge_cache);
3026 
3027 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3028 						cifs_has_mand_locks(cinode)) {
3029 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3030 			 inode);
3031 		cinode->oplock = 0;
3032 	}
3033 
3034 	if (inode && S_ISREG(inode->i_mode)) {
3035 		if (CIFS_CACHE_READ(cinode))
3036 			break_lease(inode, O_RDONLY);
3037 		else
3038 			break_lease(inode, O_WRONLY);
3039 		rc = filemap_fdatawrite(inode->i_mapping);
3040 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3041 			rc = filemap_fdatawait(inode->i_mapping);
3042 			mapping_set_error(inode->i_mapping, rc);
3043 			cifs_zap_mapping(inode);
3044 		}
3045 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3046 		if (CIFS_CACHE_WRITE(cinode))
3047 			goto oplock_break_ack;
3048 	}
3049 
3050 	rc = cifs_push_locks(cfile);
3051 	if (rc)
3052 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3053 
3054 oplock_break_ack:
3055 	/*
3056 	 * When oplock break is received and there are no active
3057 	 * file handles but cached, then schedule deferred close immediately.
3058 	 * So, new open will not use cached handle.
3059 	 */
3060 
3061 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3062 		cifs_close_deferred_file(cinode);
3063 
3064 	persistent_fid = cfile->fid.persistent_fid;
3065 	volatile_fid = cfile->fid.volatile_fid;
3066 	net_fid = cfile->fid.netfid;
3067 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3068 
3069 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3070 	/*
3071 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3072 	 * an acknowledgment to be sent when the file has already been closed.
3073 	 */
3074 	spin_lock(&cinode->open_file_lock);
3075 	/* check list empty since can race with kill_sb calling tree disconnect */
3076 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3077 		spin_unlock(&cinode->open_file_lock);
3078 		rc = server->ops->oplock_response(tcon, persistent_fid,
3079 						  volatile_fid, net_fid, cinode);
3080 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3081 	} else
3082 		spin_unlock(&cinode->open_file_lock);
3083 
3084 	cifs_put_tlink(tlink);
3085 out:
3086 	cifs_done_oplock_break(cinode);
3087 }
3088 
3089 static int cifs_swap_activate(struct swap_info_struct *sis,
3090 			      struct file *swap_file, sector_t *span)
3091 {
3092 	struct cifsFileInfo *cfile = swap_file->private_data;
3093 	struct inode *inode = swap_file->f_mapping->host;
3094 	unsigned long blocks;
3095 	long long isize;
3096 
3097 	cifs_dbg(FYI, "swap activate\n");
3098 
3099 	if (!swap_file->f_mapping->a_ops->swap_rw)
3100 		/* Cannot support swap */
3101 		return -EINVAL;
3102 
3103 	spin_lock(&inode->i_lock);
3104 	blocks = inode->i_blocks;
3105 	isize = inode->i_size;
3106 	spin_unlock(&inode->i_lock);
3107 	if (blocks*512 < isize) {
3108 		pr_warn("swap activate: swapfile has holes\n");
3109 		return -EINVAL;
3110 	}
3111 	*span = sis->pages;
3112 
3113 	pr_warn_once("Swap support over SMB3 is experimental\n");
3114 
3115 	/*
3116 	 * TODO: consider adding ACL (or documenting how) to prevent other
3117 	 * users (on this or other systems) from reading it
3118 	 */
3119 
3120 
3121 	/* TODO: add sk_set_memalloc(inet) or similar */
3122 
3123 	if (cfile)
3124 		cfile->swapfile = true;
3125 	/*
3126 	 * TODO: Since file already open, we can't open with DENY_ALL here
3127 	 * but we could add call to grab a byte range lock to prevent others
3128 	 * from reading or writing the file
3129 	 */
3130 
3131 	sis->flags |= SWP_FS_OPS;
3132 	return add_swap_extent(sis, 0, sis->max, 0);
3133 }
3134 
3135 static void cifs_swap_deactivate(struct file *file)
3136 {
3137 	struct cifsFileInfo *cfile = file->private_data;
3138 
3139 	cifs_dbg(FYI, "swap deactivate\n");
3140 
3141 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3142 
3143 	if (cfile)
3144 		cfile->swapfile = false;
3145 
3146 	/* do we need to unpin (or unlock) the file */
3147 }
3148 
3149 /**
3150  * cifs_swap_rw - SMB3 address space operation for swap I/O
3151  * @iocb: target I/O control block
3152  * @iter: I/O buffer
3153  *
3154  * Perform IO to the swap-file.  This is much like direct IO.
3155  */
3156 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3157 {
3158 	ssize_t ret;
3159 
3160 	if (iov_iter_rw(iter) == READ)
3161 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3162 	else
3163 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3164 	if (ret < 0)
3165 		return ret;
3166 	return 0;
3167 }
3168 
3169 const struct address_space_operations cifs_addr_ops = {
3170 	.read_folio	= netfs_read_folio,
3171 	.readahead	= netfs_readahead,
3172 	.writepages	= netfs_writepages,
3173 	.dirty_folio	= netfs_dirty_folio,
3174 	.release_folio	= netfs_release_folio,
3175 	.direct_IO	= noop_direct_IO,
3176 	.invalidate_folio = netfs_invalidate_folio,
3177 	.migrate_folio	= filemap_migrate_folio,
3178 	/*
3179 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3180 	 * helper if needed
3181 	 */
3182 	.swap_activate	= cifs_swap_activate,
3183 	.swap_deactivate = cifs_swap_deactivate,
3184 	.swap_rw = cifs_swap_rw,
3185 };
3186 
3187 /*
3188  * cifs_readahead requires the server to support a buffer large enough to
3189  * contain the header plus one complete page of data.  Otherwise, we need
3190  * to leave cifs_readahead out of the address space operations.
3191  */
3192 const struct address_space_operations cifs_addr_ops_smallbuf = {
3193 	.read_folio	= netfs_read_folio,
3194 	.writepages	= netfs_writepages,
3195 	.dirty_folio	= netfs_dirty_folio,
3196 	.release_folio	= netfs_release_folio,
3197 	.invalidate_folio = netfs_invalidate_folio,
3198 	.migrate_folio	= filemap_migrate_folio,
3199 };
3200