xref: /linux/fs/smb/client/file.c (revision f990ad67f0febc51274adb604d5bdeab0d06d024)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/writeback.h>
20 #include <linux/task_io_accounting_ops.h>
21 #include <linux/delay.h>
22 #include <linux/mount.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include <linux/mm.h>
26 #include <asm/div64.h>
27 #include "cifsfs.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn) {
101 		const struct smbdirect_socket_parameters *sp =
102 			smbd_get_parameters(server->smbd_conn);
103 
104 		stream->sreq_max_segs = sp->max_frmr_depth;
105 	}
106 #endif
107 }
108 
109 /*
110  * Issue a subrequest to upload to the server.
111  */
112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 	struct cifs_io_subrequest *wdata =
115 		container_of(subreq, struct cifs_io_subrequest, subreq);
116 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 	int rc;
118 
119 	if (cifs_forced_shutdown(sbi)) {
120 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
121 		goto fail;
122 	}
123 
124 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 	if (rc)
126 		goto fail;
127 
128 	rc = -EAGAIN;
129 	if (wdata->req->cfile->invalidHandle)
130 		goto fail;
131 
132 	wdata->server->ops->async_writev(wdata);
133 out:
134 	return;
135 
136 fail:
137 	if (rc == -EAGAIN)
138 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 	else
140 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 	cifs_write_subrequest_terminated(wdata, rc);
143 	goto out;
144 }
145 
146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 	cifs_invalidate_cache(wreq->inode, 0);
149 }
150 
151 /*
152  * Negotiate the size of a read operation on behalf of the netfs library.
153  */
154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 	struct netfs_io_request *rreq = subreq->rreq;
157 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 	struct TCP_Server_Info *server;
160 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 	size_t size;
162 	int rc = 0;
163 
164 	if (!rdata->have_xid) {
165 		rdata->xid = get_xid();
166 		rdata->have_xid = true;
167 	}
168 
169 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 	rdata->server = server;
171 
172 	if (cifs_sb->ctx->rsize == 0)
173 		cifs_negotiate_rsize(server, cifs_sb->ctx,
174 				     tlink_tcon(req->cfile->tlink));
175 
176 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 					   &size, &rdata->credits);
178 	if (rc)
179 		return rc;
180 
181 	rreq->io_streams[0].sreq_max_len = size;
182 
183 	rdata->credits.in_flight_check = 1;
184 	rdata->credits.rreq_debug_id = rreq->debug_id;
185 	rdata->credits.rreq_debug_index = subreq->debug_index;
186 
187 	trace_smb3_rw_credits(rdata->rreq->debug_id,
188 			      rdata->subreq.debug_index,
189 			      rdata->credits.value,
190 			      server->credits, server->in_flight, 0,
191 			      cifs_trace_rw_credits_read_submit);
192 
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 	if (server->smbd_conn) {
195 		const struct smbdirect_socket_parameters *sp =
196 			smbd_get_parameters(server->smbd_conn);
197 
198 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 	}
200 #endif
201 	return 0;
202 }
203 
204 /*
205  * Issue a read operation on behalf of the netfs helper functions.  We're asked
206  * to make a read of a certain size at a point in the file.  We are permitted
207  * to only read a portion of that, but as long as we read something, the netfs
208  * helper will call us again so that we can issue another read.
209  */
210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 	struct netfs_io_request *rreq = subreq->rreq;
213 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 	struct TCP_Server_Info *server = rdata->server;
216 	int rc = 0;
217 
218 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 		 subreq->transferred, subreq->len);
221 
222 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 	if (rc)
224 		goto failed;
225 
226 	if (req->cfile->invalidHandle) {
227 		do {
228 			rc = cifs_reopen_file(req->cfile, true);
229 		} while (rc == -EAGAIN);
230 		if (rc)
231 			goto failed;
232 	}
233 
234 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 	    subreq->rreq->origin != NETFS_DIO_READ)
236 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237 
238 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 	rc = rdata->server->ops->async_readv(rdata);
240 	if (rc)
241 		goto failed;
242 	return;
243 
244 failed:
245 	subreq->error = rc;
246 	netfs_read_subreq_terminated(subreq);
247 }
248 
249 /*
250  * Writeback calls this when it finds a folio that needs uploading.  This isn't
251  * called if writeback only has copy-to-cache to deal with.
252  */
253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 	int ret;
257 
258 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
259 	if (ret) {
260 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 		return;
262 	}
263 
264 	wreq->io_streams[0].avail = true;
265 }
266 
267 /*
268  * Initialise a request.
269  */
270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
274 	struct cifsFileInfo *open_file = NULL;
275 
276 	rreq->rsize = cifs_sb->ctx->rsize;
277 	rreq->wsize = cifs_sb->ctx->wsize;
278 	req->pid = current->tgid; // Ummm...  This may be a workqueue
279 
280 	if (file) {
281 		open_file = file->private_data;
282 		rreq->netfs_priv = file->private_data;
283 		req->cfile = cifsFileInfo_get(open_file);
284 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
285 			req->pid = req->cfile->pid;
286 	} else if (rreq->origin != NETFS_WRITEBACK) {
287 		WARN_ON_ONCE(1);
288 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * Completion of a request operation.
296  */
297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 	struct timespec64 atime, mtime;
300 	struct inode *inode = rreq->inode;
301 
302 	/* we do not want atime to be less than mtime, it broke some apps */
303 	atime = inode_set_atime_to_ts(inode, current_time(inode));
304 	mtime = inode_get_mtime(inode);
305 	if (timespec64_compare(&atime, &mtime))
306 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308 
309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312 
313 	if (req->cfile)
314 		cifsFileInfo_put(req->cfile);
315 }
316 
317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 	struct cifs_io_subrequest *rdata =
320 		container_of(subreq, struct cifs_io_subrequest, subreq);
321 	int rc = subreq->error;
322 
323 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 		if (rdata->mr) {
326 			smbd_deregister_mr(rdata->mr);
327 			rdata->mr = NULL;
328 		}
329 #endif
330 	}
331 
332 	if (rdata->credits.value != 0) {
333 		trace_smb3_rw_credits(rdata->rreq->debug_id,
334 				      rdata->subreq.debug_index,
335 				      rdata->credits.value,
336 				      rdata->server ? rdata->server->credits : 0,
337 				      rdata->server ? rdata->server->in_flight : 0,
338 				      -rdata->credits.value,
339 				      cifs_trace_rw_credits_free_subreq);
340 		if (rdata->server)
341 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 		else
343 			rdata->credits.value = 0;
344 	}
345 
346 	if (rdata->have_xid)
347 		free_xid(rdata->xid);
348 }
349 
350 const struct netfs_request_ops cifs_req_ops = {
351 	.request_pool		= &cifs_io_request_pool,
352 	.subrequest_pool	= &cifs_io_subrequest_pool,
353 	.init_request		= cifs_init_request,
354 	.free_request		= cifs_free_request,
355 	.free_subrequest	= cifs_free_subrequest,
356 	.prepare_read		= cifs_prepare_read,
357 	.issue_read		= cifs_issue_read,
358 	.done			= cifs_rreq_done,
359 	.begin_writeback	= cifs_begin_writeback,
360 	.prepare_write		= cifs_prepare_write,
361 	.issue_write		= cifs_issue_write,
362 	.invalidate_cache	= cifs_netfs_invalidate_cache,
363 };
364 
365 /*
366  * Mark as invalid, all open files on tree connections since they
367  * were closed when session to server was lost.
368  */
369 void
370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 	struct cifsFileInfo *open_file = NULL;
373 	struct list_head *tmp;
374 	struct list_head *tmp1;
375 
376 	/* only send once per connect */
377 	spin_lock(&tcon->tc_lock);
378 	if (tcon->need_reconnect)
379 		tcon->status = TID_NEED_RECON;
380 
381 	if (tcon->status != TID_NEED_RECON) {
382 		spin_unlock(&tcon->tc_lock);
383 		return;
384 	}
385 	tcon->status = TID_IN_FILES_INVALIDATE;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/* list all files open on tree connection and mark them invalid */
389 	spin_lock(&tcon->open_file_lock);
390 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 		open_file->invalidHandle = true;
393 		open_file->oplock_break_cancelled = true;
394 	}
395 	spin_unlock(&tcon->open_file_lock);
396 
397 	invalidate_all_cached_dirs(tcon);
398 	spin_lock(&tcon->tc_lock);
399 	if (tcon->status == TID_IN_FILES_INVALIDATE)
400 		tcon->status = TID_NEED_TCON;
401 	spin_unlock(&tcon->tc_lock);
402 
403 	/*
404 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 	 * to this tcon.
406 	 */
407 }
408 
409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
410 {
411 	if ((flags & O_ACCMODE) == O_RDONLY)
412 		return GENERIC_READ;
413 	else if ((flags & O_ACCMODE) == O_WRONLY)
414 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
415 	else if ((flags & O_ACCMODE) == O_RDWR) {
416 		/* GENERIC_ALL is too much permission to request
417 		   can cause unnecessary access denied on create */
418 		/* return GENERIC_ALL; */
419 		return (GENERIC_READ | GENERIC_WRITE);
420 	}
421 
422 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
423 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
424 		FILE_READ_DATA);
425 }
426 
427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
428 static u32 cifs_posix_convert_flags(unsigned int flags)
429 {
430 	u32 posix_flags = 0;
431 
432 	if ((flags & O_ACCMODE) == O_RDONLY)
433 		posix_flags = SMB_O_RDONLY;
434 	else if ((flags & O_ACCMODE) == O_WRONLY)
435 		posix_flags = SMB_O_WRONLY;
436 	else if ((flags & O_ACCMODE) == O_RDWR)
437 		posix_flags = SMB_O_RDWR;
438 
439 	if (flags & O_CREAT) {
440 		posix_flags |= SMB_O_CREAT;
441 		if (flags & O_EXCL)
442 			posix_flags |= SMB_O_EXCL;
443 	} else if (flags & O_EXCL)
444 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
445 			 current->comm, current->tgid);
446 
447 	if (flags & O_TRUNC)
448 		posix_flags |= SMB_O_TRUNC;
449 	/* be safe and imply O_SYNC for O_DSYNC */
450 	if (flags & O_DSYNC)
451 		posix_flags |= SMB_O_SYNC;
452 	if (flags & O_DIRECTORY)
453 		posix_flags |= SMB_O_DIRECTORY;
454 	if (flags & O_NOFOLLOW)
455 		posix_flags |= SMB_O_NOFOLLOW;
456 	if (flags & O_DIRECT)
457 		posix_flags |= SMB_O_DIRECT;
458 
459 	return posix_flags;
460 }
461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
462 
463 static inline int cifs_get_disposition(unsigned int flags)
464 {
465 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
466 		return FILE_CREATE;
467 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
468 		return FILE_OVERWRITE_IF;
469 	else if ((flags & O_CREAT) == O_CREAT)
470 		return FILE_OPEN_IF;
471 	else if ((flags & O_TRUNC) == O_TRUNC)
472 		return FILE_OVERWRITE;
473 	else
474 		return FILE_OPEN;
475 }
476 
477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
478 int cifs_posix_open(const char *full_path, struct inode **pinode,
479 			struct super_block *sb, int mode, unsigned int f_flags,
480 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
481 {
482 	int rc;
483 	FILE_UNIX_BASIC_INFO *presp_data;
484 	__u32 posix_flags = 0;
485 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
486 	struct cifs_fattr fattr;
487 	struct tcon_link *tlink;
488 	struct cifs_tcon *tcon;
489 
490 	cifs_dbg(FYI, "posix open %s\n", full_path);
491 
492 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
493 	if (presp_data == NULL)
494 		return -ENOMEM;
495 
496 	tlink = cifs_sb_tlink(cifs_sb);
497 	if (IS_ERR(tlink)) {
498 		rc = PTR_ERR(tlink);
499 		goto posix_open_ret;
500 	}
501 
502 	tcon = tlink_tcon(tlink);
503 	mode &= ~current_umask();
504 
505 	posix_flags = cifs_posix_convert_flags(f_flags);
506 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
507 			     poplock, full_path, cifs_sb->local_nls,
508 			     cifs_remap(cifs_sb));
509 	cifs_put_tlink(tlink);
510 
511 	if (rc)
512 		goto posix_open_ret;
513 
514 	if (presp_data->Type == cpu_to_le32(-1))
515 		goto posix_open_ret; /* open ok, caller does qpathinfo */
516 
517 	if (!pinode)
518 		goto posix_open_ret; /* caller does not need info */
519 
520 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
521 
522 	/* get new inode and set it up */
523 	if (*pinode == NULL) {
524 		cifs_fill_uniqueid(sb, &fattr);
525 		*pinode = cifs_iget(sb, &fattr);
526 		if (!*pinode) {
527 			rc = -ENOMEM;
528 			goto posix_open_ret;
529 		}
530 	} else {
531 		cifs_revalidate_mapping(*pinode);
532 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
533 	}
534 
535 posix_open_ret:
536 	kfree(presp_data);
537 	return rc;
538 }
539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
540 
541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
542 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
543 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
544 {
545 	int rc;
546 	int desired_access;
547 	int disposition;
548 	int create_options = CREATE_NOT_DIR;
549 	struct TCP_Server_Info *server = tcon->ses->server;
550 	struct cifs_open_parms oparms;
551 	int rdwr_for_fscache = 0;
552 
553 	if (!server->ops->open)
554 		return -ENOSYS;
555 
556 	/* If we're caching, we need to be able to fill in around partial writes. */
557 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
558 		rdwr_for_fscache = 1;
559 
560 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
561 
562 /*********************************************************************
563  *  open flag mapping table:
564  *
565  *	POSIX Flag            CIFS Disposition
566  *	----------            ----------------
567  *	O_CREAT               FILE_OPEN_IF
568  *	O_CREAT | O_EXCL      FILE_CREATE
569  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
570  *	O_TRUNC               FILE_OVERWRITE
571  *	none of the above     FILE_OPEN
572  *
573  *	Note that there is not a direct match between disposition
574  *	FILE_SUPERSEDE (ie create whether or not file exists although
575  *	O_CREAT | O_TRUNC is similar but truncates the existing
576  *	file rather than creating a new file as FILE_SUPERSEDE does
577  *	(which uses the attributes / metadata passed in on open call)
578  *?
579  *?  O_SYNC is a reasonable match to CIFS writethrough flag
580  *?  and the read write flags match reasonably.  O_LARGEFILE
581  *?  is irrelevant because largefile support is always used
582  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
583  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
584  *********************************************************************/
585 
586 	disposition = cifs_get_disposition(f_flags);
587 
588 	/* BB pass O_SYNC flag through on file attributes .. BB */
589 
590 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
591 	if (f_flags & O_SYNC)
592 		create_options |= CREATE_WRITE_THROUGH;
593 
594 	if (f_flags & O_DIRECT)
595 		create_options |= CREATE_NO_BUFFER;
596 
597 retry_open:
598 	oparms = (struct cifs_open_parms) {
599 		.tcon = tcon,
600 		.cifs_sb = cifs_sb,
601 		.desired_access = desired_access,
602 		.create_options = cifs_create_options(cifs_sb, create_options),
603 		.disposition = disposition,
604 		.path = full_path,
605 		.fid = fid,
606 	};
607 
608 	rc = server->ops->open(xid, &oparms, oplock, buf);
609 	if (rc) {
610 		if (rc == -EACCES && rdwr_for_fscache == 1) {
611 			desired_access = cifs_convert_flags(f_flags, 0);
612 			rdwr_for_fscache = 2;
613 			goto retry_open;
614 		}
615 		return rc;
616 	}
617 	if (rdwr_for_fscache == 2)
618 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
619 
620 	/* TODO: Add support for calling posix query info but with passing in fid */
621 	if (tcon->unix_ext)
622 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
623 					      xid);
624 	else
625 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
626 					 xid, fid);
627 
628 	if (rc) {
629 		server->ops->close(xid, tcon, fid);
630 		if (rc == -ESTALE)
631 			rc = -EOPENSTALE;
632 	}
633 
634 	return rc;
635 }
636 
637 static bool
638 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
639 {
640 	struct cifs_fid_locks *cur;
641 	bool has_locks = false;
642 
643 	down_read(&cinode->lock_sem);
644 	list_for_each_entry(cur, &cinode->llist, llist) {
645 		if (!list_empty(&cur->locks)) {
646 			has_locks = true;
647 			break;
648 		}
649 	}
650 	up_read(&cinode->lock_sem);
651 	return has_locks;
652 }
653 
654 void
655 cifs_down_write(struct rw_semaphore *sem)
656 {
657 	while (!down_write_trylock(sem))
658 		msleep(10);
659 }
660 
661 static void cifsFileInfo_put_work(struct work_struct *work);
662 void serverclose_work(struct work_struct *work);
663 
664 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
665 				       struct tcon_link *tlink, __u32 oplock,
666 				       const char *symlink_target)
667 {
668 	struct dentry *dentry = file_dentry(file);
669 	struct inode *inode = d_inode(dentry);
670 	struct cifsInodeInfo *cinode = CIFS_I(inode);
671 	struct cifsFileInfo *cfile;
672 	struct cifs_fid_locks *fdlocks;
673 	struct cifs_tcon *tcon = tlink_tcon(tlink);
674 	struct TCP_Server_Info *server = tcon->ses->server;
675 
676 	cfile = kzalloc_obj(struct cifsFileInfo);
677 	if (cfile == NULL)
678 		return cfile;
679 
680 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
681 	if (!fdlocks) {
682 		kfree(cfile);
683 		return NULL;
684 	}
685 
686 	if (symlink_target) {
687 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
688 		if (!cfile->symlink_target) {
689 			kfree(fdlocks);
690 			kfree(cfile);
691 			return NULL;
692 		}
693 	}
694 
695 	INIT_LIST_HEAD(&fdlocks->locks);
696 	fdlocks->cfile = cfile;
697 	cfile->llist = fdlocks;
698 
699 	cfile->count = 1;
700 	cfile->pid = current->tgid;
701 	cfile->uid = current_fsuid();
702 	cfile->dentry = dget(dentry);
703 	cfile->f_flags = file->f_flags;
704 	cfile->invalidHandle = false;
705 	cfile->deferred_close_scheduled = false;
706 	cfile->tlink = cifs_get_tlink(tlink);
707 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
708 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
709 	INIT_WORK(&cfile->serverclose, serverclose_work);
710 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
711 	mutex_init(&cfile->fh_mutex);
712 	spin_lock_init(&cfile->file_info_lock);
713 
714 	/*
715 	 * If the server returned a read oplock and we have mandatory brlocks,
716 	 * set oplock level to None.
717 	 */
718 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
719 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
720 		oplock = 0;
721 	}
722 
723 	cifs_down_write(&cinode->lock_sem);
724 	list_add(&fdlocks->llist, &cinode->llist);
725 	up_write(&cinode->lock_sem);
726 
727 	spin_lock(&tcon->open_file_lock);
728 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
729 		oplock = fid->pending_open->oplock;
730 	list_del(&fid->pending_open->olist);
731 
732 	list_add(&cfile->tlist, &tcon->openFileList);
733 	atomic_inc(&tcon->num_local_opens);
734 
735 	/* if readable file instance put first in list*/
736 	spin_lock(&cinode->open_file_lock);
737 	fid->purge_cache = false;
738 	server->ops->set_fid(cfile, fid, oplock);
739 
740 	if (file->f_mode & FMODE_READ)
741 		list_add(&cfile->flist, &cinode->openFileList);
742 	else
743 		list_add_tail(&cfile->flist, &cinode->openFileList);
744 	spin_unlock(&cinode->open_file_lock);
745 	spin_unlock(&tcon->open_file_lock);
746 
747 	if (fid->purge_cache)
748 		cifs_zap_mapping(inode);
749 
750 	file->private_data = cfile;
751 	return cfile;
752 }
753 
754 struct cifsFileInfo *
755 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
756 {
757 	spin_lock(&cifs_file->file_info_lock);
758 	cifsFileInfo_get_locked(cifs_file);
759 	spin_unlock(&cifs_file->file_info_lock);
760 	return cifs_file;
761 }
762 
763 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
764 {
765 	struct inode *inode = d_inode(cifs_file->dentry);
766 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
767 	struct cifsLockInfo *li, *tmp;
768 
769 	/*
770 	 * Delete any outstanding lock records. We'll lose them when the file
771 	 * is closed anyway.
772 	 */
773 	cifs_down_write(&cifsi->lock_sem);
774 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
775 		list_del(&li->llist);
776 		cifs_del_lock_waiters(li);
777 		kfree(li);
778 	}
779 	list_del(&cifs_file->llist->llist);
780 	kfree(cifs_file->llist);
781 	up_write(&cifsi->lock_sem);
782 
783 	cifs_put_tlink(cifs_file->tlink);
784 	dput(cifs_file->dentry);
785 	kfree(cifs_file->symlink_target);
786 	kfree(cifs_file);
787 }
788 
789 static void cifsFileInfo_put_work(struct work_struct *work)
790 {
791 	struct cifsFileInfo *cifs_file = container_of(work,
792 			struct cifsFileInfo, put);
793 
794 	cifsFileInfo_put_final(cifs_file);
795 }
796 
797 void serverclose_work(struct work_struct *work)
798 {
799 	struct cifsFileInfo *cifs_file = container_of(work,
800 			struct cifsFileInfo, serverclose);
801 
802 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
803 
804 	struct TCP_Server_Info *server = tcon->ses->server;
805 	int rc = 0;
806 	int retries = 0;
807 	int MAX_RETRIES = 4;
808 
809 	do {
810 		if (server->ops->close_getattr)
811 			rc = server->ops->close_getattr(0, tcon, cifs_file);
812 		else if (server->ops->close)
813 			rc = server->ops->close(0, tcon, &cifs_file->fid);
814 
815 		if (rc == -EBUSY || rc == -EAGAIN) {
816 			retries++;
817 			msleep(250);
818 		}
819 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
820 	);
821 
822 	if (retries == MAX_RETRIES)
823 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
824 
825 	if (cifs_file->offload)
826 		queue_work(fileinfo_put_wq, &cifs_file->put);
827 	else
828 		cifsFileInfo_put_final(cifs_file);
829 }
830 
831 /**
832  * cifsFileInfo_put - release a reference of file priv data
833  *
834  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
835  *
836  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
837  */
838 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
839 {
840 	_cifsFileInfo_put(cifs_file, true, true);
841 }
842 
843 /**
844  * _cifsFileInfo_put - release a reference of file priv data
845  *
846  * This may involve closing the filehandle @cifs_file out on the
847  * server. Must be called without holding tcon->open_file_lock,
848  * cinode->open_file_lock and cifs_file->file_info_lock.
849  *
850  * If @wait_for_oplock_handler is true and we are releasing the last
851  * reference, wait for any running oplock break handler of the file
852  * and cancel any pending one.
853  *
854  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
855  * @wait_oplock_handler: must be false if called from oplock_break_handler
856  * @offload:	not offloaded on close and oplock breaks
857  *
858  */
859 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
860 		       bool wait_oplock_handler, bool offload)
861 {
862 	struct inode *inode = d_inode(cifs_file->dentry);
863 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
864 	struct TCP_Server_Info *server = tcon->ses->server;
865 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
866 	struct super_block *sb = inode->i_sb;
867 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
868 	struct cifs_fid fid = {};
869 	struct cifs_pending_open open;
870 	bool oplock_break_cancelled;
871 	bool serverclose_offloaded = false;
872 
873 	spin_lock(&tcon->open_file_lock);
874 	spin_lock(&cifsi->open_file_lock);
875 	spin_lock(&cifs_file->file_info_lock);
876 
877 	cifs_file->offload = offload;
878 	if (--cifs_file->count > 0) {
879 		spin_unlock(&cifs_file->file_info_lock);
880 		spin_unlock(&cifsi->open_file_lock);
881 		spin_unlock(&tcon->open_file_lock);
882 		return;
883 	}
884 	spin_unlock(&cifs_file->file_info_lock);
885 
886 	if (server->ops->get_lease_key)
887 		server->ops->get_lease_key(inode, &fid);
888 
889 	/* store open in pending opens to make sure we don't miss lease break */
890 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
891 
892 	/* remove it from the lists */
893 	list_del(&cifs_file->flist);
894 	list_del(&cifs_file->tlist);
895 	atomic_dec(&tcon->num_local_opens);
896 
897 	if (list_empty(&cifsi->openFileList)) {
898 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
899 			 d_inode(cifs_file->dentry));
900 		/*
901 		 * In strict cache mode we need invalidate mapping on the last
902 		 * close  because it may cause a error when we open this file
903 		 * again and get at least level II oplock.
904 		 */
905 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
906 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
907 		cifs_set_oplock_level(cifsi, 0);
908 	}
909 
910 	spin_unlock(&cifsi->open_file_lock);
911 	spin_unlock(&tcon->open_file_lock);
912 
913 	oplock_break_cancelled = wait_oplock_handler ?
914 		cancel_work_sync(&cifs_file->oplock_break) : false;
915 
916 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
917 		struct TCP_Server_Info *server = tcon->ses->server;
918 		unsigned int xid;
919 		int rc = 0;
920 
921 		xid = get_xid();
922 		if (server->ops->close_getattr)
923 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
924 		else if (server->ops->close)
925 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
926 		_free_xid(xid);
927 
928 		if (rc == -EBUSY || rc == -EAGAIN) {
929 			// Server close failed, hence offloading it as an async op
930 			queue_work(serverclose_wq, &cifs_file->serverclose);
931 			serverclose_offloaded = true;
932 		}
933 	}
934 
935 	if (oplock_break_cancelled)
936 		cifs_done_oplock_break(cifsi);
937 
938 	cifs_del_pending_open(&open);
939 
940 	// if serverclose has been offloaded to wq (on failure), it will
941 	// handle offloading put as well. If serverclose not offloaded,
942 	// we need to handle offloading put here.
943 	if (!serverclose_offloaded) {
944 		if (offload)
945 			queue_work(fileinfo_put_wq, &cifs_file->put);
946 		else
947 			cifsFileInfo_put_final(cifs_file);
948 	}
949 }
950 
951 int cifs_file_flush(const unsigned int xid, struct inode *inode,
952 		    struct cifsFileInfo *cfile)
953 {
954 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
955 	struct cifs_tcon *tcon;
956 	int rc;
957 
958 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
959 		return 0;
960 
961 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
962 		tcon = tlink_tcon(cfile->tlink);
963 		return tcon->ses->server->ops->flush(xid, tcon,
964 						     &cfile->fid);
965 	}
966 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
967 	if (!rc) {
968 		tcon = tlink_tcon(cfile->tlink);
969 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
970 		cifsFileInfo_put(cfile);
971 	} else if (rc == -EBADF) {
972 		rc = 0;
973 	}
974 	return rc;
975 }
976 
977 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
978 {
979 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
980 	struct inode *inode = d_inode(dentry);
981 	struct cifsFileInfo *cfile = NULL;
982 	struct TCP_Server_Info *server;
983 	struct cifs_tcon *tcon;
984 	int rc;
985 
986 	rc = filemap_write_and_wait(inode->i_mapping);
987 	if (is_interrupt_error(rc))
988 		return -ERESTARTSYS;
989 	mapping_set_error(inode->i_mapping, rc);
990 
991 	cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
992 	rc = cifs_file_flush(xid, inode, cfile);
993 	if (!rc) {
994 		if (cfile) {
995 			tcon = tlink_tcon(cfile->tlink);
996 			server = tcon->ses->server;
997 			rc = server->ops->set_file_size(xid, tcon,
998 							cfile, 0, false);
999 		}
1000 		if (!rc) {
1001 			netfs_resize_file(&cinode->netfs, 0, true);
1002 			cifs_setsize(inode, 0);
1003 			inode->i_blocks = 0;
1004 		}
1005 	}
1006 	if (cfile)
1007 		cifsFileInfo_put(cfile);
1008 	return rc;
1009 }
1010 
1011 int cifs_open(struct inode *inode, struct file *file)
1012 
1013 {
1014 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1015 	struct cifs_open_info_data data = {};
1016 	struct cifsFileInfo *cfile = NULL;
1017 	struct TCP_Server_Info *server;
1018 	struct cifs_pending_open open;
1019 	bool posix_open_ok = false;
1020 	struct cifs_fid fid = {};
1021 	struct tcon_link *tlink;
1022 	struct cifs_tcon *tcon;
1023 	const char *full_path;
1024 	unsigned int sbflags;
1025 	int rc = -EACCES;
1026 	unsigned int xid;
1027 	__u32 oplock;
1028 	void *page;
1029 
1030 	xid = get_xid();
1031 
1032 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1033 		free_xid(xid);
1034 		return smb_EIO(smb_eio_trace_forced_shutdown);
1035 	}
1036 
1037 	tlink = cifs_sb_tlink(cifs_sb);
1038 	if (IS_ERR(tlink)) {
1039 		free_xid(xid);
1040 		return PTR_ERR(tlink);
1041 	}
1042 	tcon = tlink_tcon(tlink);
1043 	server = tcon->ses->server;
1044 
1045 	page = alloc_dentry_path();
1046 	full_path = build_path_from_dentry(file_dentry(file), page);
1047 	if (IS_ERR(full_path)) {
1048 		rc = PTR_ERR(full_path);
1049 		goto out;
1050 	}
1051 
1052 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1053 		 inode, file->f_flags, full_path);
1054 
1055 	sbflags = cifs_sb_flags(cifs_sb);
1056 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1057 		if (sbflags & CIFS_MOUNT_NO_BRL)
1058 			file->f_op = &cifs_file_direct_nobrl_ops;
1059 		else
1060 			file->f_op = &cifs_file_direct_ops;
1061 	}
1062 
1063 	if (file->f_flags & O_TRUNC) {
1064 		rc = cifs_do_truncate(xid, file_dentry(file));
1065 		if (rc)
1066 			goto out;
1067 	}
1068 
1069 	/* Get the cached handle as SMB2 close is deferred */
1070 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1071 		rc = cifs_get_writable_path(tcon, full_path,
1072 					    FIND_WR_FSUID_ONLY |
1073 					    FIND_WR_NO_PENDING_DELETE,
1074 					    &cfile);
1075 	} else {
1076 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
1077 	}
1078 	if (rc == 0) {
1079 		unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1080 		unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1081 
1082 		if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1083 		    (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1084 			file->private_data = cfile;
1085 			spin_lock(&CIFS_I(inode)->deferred_lock);
1086 			cifs_del_deferred_close(cfile);
1087 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1088 			goto use_cache;
1089 		}
1090 		_cifsFileInfo_put(cfile, true, false);
1091 	} else {
1092 		/* hard link on the defeered close file */
1093 		rc = cifs_get_hardlink_path(tcon, inode, file);
1094 		if (rc)
1095 			cifs_close_deferred_file(CIFS_I(inode));
1096 	}
1097 
1098 	if (server->oplocks)
1099 		oplock = REQ_OPLOCK;
1100 	else
1101 		oplock = 0;
1102 
1103 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1104 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1105 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1106 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1107 		/* can not refresh inode info since size could be stale */
1108 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1109 				cifs_sb->ctx->file_mode /* ignored */,
1110 				file->f_flags, &oplock, &fid.netfid, xid);
1111 		if (rc == 0) {
1112 			cifs_dbg(FYI, "posix open succeeded\n");
1113 			posix_open_ok = true;
1114 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1115 			if (tcon->ses->serverNOS)
1116 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1117 					 tcon->ses->ip_addr,
1118 					 tcon->ses->serverNOS);
1119 			tcon->broken_posix_open = true;
1120 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1121 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1122 			goto out;
1123 		/*
1124 		 * Else fallthrough to retry open the old way on network i/o
1125 		 * or DFS errors.
1126 		 */
1127 	}
1128 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1129 
1130 	if (server->ops->get_lease_key)
1131 		server->ops->get_lease_key(inode, &fid);
1132 
1133 	cifs_add_pending_open(&fid, tlink, &open);
1134 
1135 	if (!posix_open_ok) {
1136 		if (server->ops->get_lease_key)
1137 			server->ops->get_lease_key(inode, &fid);
1138 
1139 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1140 				  xid, &data);
1141 		if (rc) {
1142 			cifs_del_pending_open(&open);
1143 			goto out;
1144 		}
1145 	}
1146 
1147 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1148 	if (cfile == NULL) {
1149 		if (server->ops->close)
1150 			server->ops->close(xid, tcon, &fid);
1151 		cifs_del_pending_open(&open);
1152 		rc = -ENOMEM;
1153 		goto out;
1154 	}
1155 
1156 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1157 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1158 		/*
1159 		 * Time to set mode which we can not set earlier due to
1160 		 * problems creating new read-only files.
1161 		 */
1162 		struct cifs_unix_set_info_args args = {
1163 			.mode	= inode->i_mode,
1164 			.uid	= INVALID_UID, /* no change */
1165 			.gid	= INVALID_GID, /* no change */
1166 			.ctime	= NO_CHANGE_64,
1167 			.atime	= NO_CHANGE_64,
1168 			.mtime	= NO_CHANGE_64,
1169 			.device	= 0,
1170 		};
1171 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1172 				       cfile->pid);
1173 	}
1174 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1175 
1176 use_cache:
1177 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1178 			   file->f_mode & FMODE_WRITE);
1179 	if (!(file->f_flags & O_DIRECT))
1180 		goto out;
1181 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1182 		goto out;
1183 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1184 
1185 out:
1186 	free_dentry_path(page);
1187 	free_xid(xid);
1188 	cifs_put_tlink(tlink);
1189 	cifs_free_open_info(&data);
1190 	return rc;
1191 }
1192 
1193 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1194 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1195 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1196 
1197 /*
1198  * Try to reacquire byte range locks that were released when session
1199  * to server was lost.
1200  */
1201 static int
1202 cifs_relock_file(struct cifsFileInfo *cfile)
1203 {
1204 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1205 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1206 	int rc = 0;
1207 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1208 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1209 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1210 
1211 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1212 	if (cinode->can_cache_brlcks) {
1213 		/* can cache locks - no need to relock */
1214 		up_read(&cinode->lock_sem);
1215 		return rc;
1216 	}
1217 
1218 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1219 	if (cap_unix(tcon->ses) &&
1220 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1221 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1222 		rc = cifs_push_posix_locks(cfile);
1223 	else
1224 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1225 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1226 
1227 	up_read(&cinode->lock_sem);
1228 	return rc;
1229 }
1230 
1231 static int
1232 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1233 {
1234 	int rc = -EACCES;
1235 	unsigned int xid;
1236 	__u32 oplock;
1237 	struct cifs_sb_info *cifs_sb;
1238 	struct cifs_tcon *tcon;
1239 	struct TCP_Server_Info *server;
1240 	struct cifsInodeInfo *cinode;
1241 	struct inode *inode;
1242 	void *page;
1243 	const char *full_path;
1244 	int desired_access;
1245 	int disposition = FILE_OPEN;
1246 	int create_options = CREATE_NOT_DIR;
1247 	struct cifs_open_parms oparms;
1248 	int rdwr_for_fscache = 0;
1249 
1250 	xid = get_xid();
1251 	mutex_lock(&cfile->fh_mutex);
1252 	if (!cfile->invalidHandle) {
1253 		mutex_unlock(&cfile->fh_mutex);
1254 		free_xid(xid);
1255 		return 0;
1256 	}
1257 
1258 	inode = d_inode(cfile->dentry);
1259 	cifs_sb = CIFS_SB(inode->i_sb);
1260 	tcon = tlink_tcon(cfile->tlink);
1261 	server = tcon->ses->server;
1262 
1263 	/*
1264 	 * Can not grab rename sem here because various ops, including those
1265 	 * that already have the rename sem can end up causing writepage to get
1266 	 * called and if the server was down that means we end up here, and we
1267 	 * can never tell if the caller already has the rename_sem.
1268 	 */
1269 	page = alloc_dentry_path();
1270 	full_path = build_path_from_dentry(cfile->dentry, page);
1271 	if (IS_ERR(full_path)) {
1272 		mutex_unlock(&cfile->fh_mutex);
1273 		free_dentry_path(page);
1274 		free_xid(xid);
1275 		return PTR_ERR(full_path);
1276 	}
1277 
1278 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1279 		 inode, cfile->f_flags, full_path);
1280 
1281 	if (tcon->ses->server->oplocks)
1282 		oplock = REQ_OPLOCK;
1283 	else
1284 		oplock = 0;
1285 
1286 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1287 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1288 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1289 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1290 		/*
1291 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1292 		 * original open. Must mask them off for a reopen.
1293 		 */
1294 		unsigned int oflags = cfile->f_flags &
1295 						~(O_CREAT | O_EXCL | O_TRUNC);
1296 
1297 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1298 				     cifs_sb->ctx->file_mode /* ignored */,
1299 				     oflags, &oplock, &cfile->fid.netfid, xid);
1300 		if (rc == 0) {
1301 			cifs_dbg(FYI, "posix reopen succeeded\n");
1302 			oparms.reconnect = true;
1303 			goto reopen_success;
1304 		}
1305 		/*
1306 		 * fallthrough to retry open the old way on errors, especially
1307 		 * in the reconnect path it is important to retry hard
1308 		 */
1309 	}
1310 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1311 
1312 	/* If we're caching, we need to be able to fill in around partial writes. */
1313 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1314 		rdwr_for_fscache = 1;
1315 
1316 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1317 
1318 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1319 	if (cfile->f_flags & O_SYNC)
1320 		create_options |= CREATE_WRITE_THROUGH;
1321 
1322 	if (cfile->f_flags & O_DIRECT)
1323 		create_options |= CREATE_NO_BUFFER;
1324 
1325 	if (server->ops->get_lease_key)
1326 		server->ops->get_lease_key(inode, &cfile->fid);
1327 
1328 retry_open:
1329 	oparms = (struct cifs_open_parms) {
1330 		.tcon = tcon,
1331 		.cifs_sb = cifs_sb,
1332 		.desired_access = desired_access,
1333 		.create_options = cifs_create_options(cifs_sb, create_options),
1334 		.disposition = disposition,
1335 		.path = full_path,
1336 		.fid = &cfile->fid,
1337 		.reconnect = true,
1338 	};
1339 
1340 	/*
1341 	 * Can not refresh inode by passing in file_info buf to be returned by
1342 	 * ops->open and then calling get_inode_info with returned buf since
1343 	 * file might have write behind data that needs to be flushed and server
1344 	 * version of file size can be stale. If we knew for sure that inode was
1345 	 * not dirty locally we could do this.
1346 	 */
1347 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1348 	if (rc == -ENOENT && oparms.reconnect == false) {
1349 		/* durable handle timeout is expired - open the file again */
1350 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1351 		/* indicate that we need to relock the file */
1352 		oparms.reconnect = true;
1353 	}
1354 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1355 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1356 		rdwr_for_fscache = 2;
1357 		goto retry_open;
1358 	}
1359 
1360 	if (rc) {
1361 		mutex_unlock(&cfile->fh_mutex);
1362 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1363 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1364 		goto reopen_error_exit;
1365 	}
1366 
1367 	if (rdwr_for_fscache == 2)
1368 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1369 
1370 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1371 reopen_success:
1372 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1373 	cfile->invalidHandle = false;
1374 	mutex_unlock(&cfile->fh_mutex);
1375 	cinode = CIFS_I(inode);
1376 
1377 	if (can_flush) {
1378 		rc = filemap_write_and_wait(inode->i_mapping);
1379 		if (!is_interrupt_error(rc))
1380 			mapping_set_error(inode->i_mapping, rc);
1381 
1382 		if (tcon->posix_extensions) {
1383 			rc = smb311_posix_get_inode_info(&inode, full_path,
1384 							 NULL, inode->i_sb, xid);
1385 		} else if (tcon->unix_ext) {
1386 			rc = cifs_get_inode_info_unix(&inode, full_path,
1387 						      inode->i_sb, xid);
1388 		} else {
1389 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1390 						 inode->i_sb, xid, NULL);
1391 		}
1392 	}
1393 	/*
1394 	 * Else we are writing out data to server already and could deadlock if
1395 	 * we tried to flush data, and since we do not know if we have data that
1396 	 * would invalidate the current end of file on the server we can not go
1397 	 * to the server to get the new inode info.
1398 	 */
1399 
1400 	/*
1401 	 * If the server returned a read oplock and we have mandatory brlocks,
1402 	 * set oplock level to None.
1403 	 */
1404 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1405 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1406 		oplock = 0;
1407 	}
1408 
1409 	scoped_guard(spinlock, &cinode->open_file_lock)
1410 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1411 	if (oparms.reconnect)
1412 		cifs_relock_file(cfile);
1413 
1414 reopen_error_exit:
1415 	free_dentry_path(page);
1416 	free_xid(xid);
1417 	return rc;
1418 }
1419 
1420 void smb2_deferred_work_close(struct work_struct *work)
1421 {
1422 	struct cifsFileInfo *cfile = container_of(work,
1423 			struct cifsFileInfo, deferred.work);
1424 
1425 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1426 	cifs_del_deferred_close(cfile);
1427 	cfile->deferred_close_scheduled = false;
1428 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1429 	_cifsFileInfo_put(cfile, true, false);
1430 }
1431 
1432 static bool
1433 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1434 {
1435 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1436 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1437 	unsigned int oplock = READ_ONCE(cinode->oplock);
1438 
1439 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1440 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1441 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1442 
1443 }
1444 
1445 int cifs_close(struct inode *inode, struct file *file)
1446 {
1447 	struct cifsFileInfo *cfile;
1448 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1449 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1450 	struct cifs_deferred_close *dclose;
1451 
1452 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1453 
1454 	if (file->private_data != NULL) {
1455 		cfile = file->private_data;
1456 		file->private_data = NULL;
1457 		dclose = kmalloc_obj(struct cifs_deferred_close);
1458 		if ((cfile->status_file_deleted == false) &&
1459 		    (smb2_can_defer_close(inode, dclose))) {
1460 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1461 				inode_set_mtime_to_ts(inode,
1462 						      inode_set_ctime_current(inode));
1463 			}
1464 			spin_lock(&cinode->deferred_lock);
1465 			cifs_add_deferred_close(cfile, dclose);
1466 			if (cfile->deferred_close_scheduled &&
1467 			    delayed_work_pending(&cfile->deferred)) {
1468 				/*
1469 				 * If there is no pending work, mod_delayed_work queues new work.
1470 				 * So, Increase the ref count to avoid use-after-free.
1471 				 */
1472 				if (!mod_delayed_work(deferredclose_wq,
1473 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1474 					cifsFileInfo_get(cfile);
1475 			} else {
1476 				/* Deferred close for files */
1477 				queue_delayed_work(deferredclose_wq,
1478 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1479 				cfile->deferred_close_scheduled = true;
1480 				spin_unlock(&cinode->deferred_lock);
1481 				return 0;
1482 			}
1483 			spin_unlock(&cinode->deferred_lock);
1484 			_cifsFileInfo_put(cfile, true, false);
1485 		} else {
1486 			_cifsFileInfo_put(cfile, true, false);
1487 			kfree(dclose);
1488 		}
1489 	}
1490 
1491 	/* return code from the ->release op is always ignored */
1492 	return 0;
1493 }
1494 
1495 void
1496 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1497 {
1498 	struct cifsFileInfo *open_file, *tmp;
1499 	LIST_HEAD(tmp_list);
1500 
1501 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1502 		return;
1503 
1504 	tcon->need_reopen_files = false;
1505 
1506 	cifs_dbg(FYI, "Reopen persistent handles\n");
1507 
1508 	/* list all files open on tree connection, reopen resilient handles  */
1509 	spin_lock(&tcon->open_file_lock);
1510 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1511 		if (!open_file->invalidHandle)
1512 			continue;
1513 		cifsFileInfo_get(open_file);
1514 		list_add_tail(&open_file->rlist, &tmp_list);
1515 	}
1516 	spin_unlock(&tcon->open_file_lock);
1517 
1518 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1519 		if (cifs_reopen_file(open_file, false /* do not flush */))
1520 			tcon->need_reopen_files = true;
1521 		list_del_init(&open_file->rlist);
1522 		cifsFileInfo_put(open_file);
1523 	}
1524 }
1525 
1526 int cifs_closedir(struct inode *inode, struct file *file)
1527 {
1528 	int rc = 0;
1529 	unsigned int xid;
1530 	struct cifsFileInfo *cfile = file->private_data;
1531 	struct cifs_tcon *tcon;
1532 	struct TCP_Server_Info *server;
1533 	char *buf;
1534 
1535 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1536 
1537 	if (cfile == NULL)
1538 		return rc;
1539 
1540 	xid = get_xid();
1541 	tcon = tlink_tcon(cfile->tlink);
1542 	server = tcon->ses->server;
1543 
1544 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1545 	spin_lock(&cfile->file_info_lock);
1546 	if (server->ops->dir_needs_close(cfile)) {
1547 		cfile->invalidHandle = true;
1548 		spin_unlock(&cfile->file_info_lock);
1549 		if (server->ops->close_dir)
1550 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1551 		else
1552 			rc = -ENOSYS;
1553 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1554 		/* not much we can do if it fails anyway, ignore rc */
1555 		rc = 0;
1556 	} else
1557 		spin_unlock(&cfile->file_info_lock);
1558 
1559 	buf = cfile->srch_inf.ntwrk_buf_start;
1560 	if (buf) {
1561 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1562 		cfile->srch_inf.ntwrk_buf_start = NULL;
1563 		if (cfile->srch_inf.smallBuf)
1564 			cifs_small_buf_release(buf);
1565 		else
1566 			cifs_buf_release(buf);
1567 	}
1568 
1569 	cifs_put_tlink(cfile->tlink);
1570 	kfree(file->private_data);
1571 	file->private_data = NULL;
1572 	/* BB can we lock the filestruct while this is going on? */
1573 	free_xid(xid);
1574 	return rc;
1575 }
1576 
1577 static struct cifsLockInfo *
1578 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1579 {
1580 	struct cifsLockInfo *lock =
1581 		kmalloc_obj(struct cifsLockInfo);
1582 	if (!lock)
1583 		return lock;
1584 	lock->offset = offset;
1585 	lock->length = length;
1586 	lock->type = type;
1587 	lock->pid = current->tgid;
1588 	lock->flags = flags;
1589 	INIT_LIST_HEAD(&lock->blist);
1590 	init_waitqueue_head(&lock->block_q);
1591 	return lock;
1592 }
1593 
1594 void
1595 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1596 {
1597 	struct cifsLockInfo *li, *tmp;
1598 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1599 		list_del_init(&li->blist);
1600 		wake_up(&li->block_q);
1601 	}
1602 }
1603 
1604 #define CIFS_LOCK_OP	0
1605 #define CIFS_READ_OP	1
1606 #define CIFS_WRITE_OP	2
1607 
1608 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1609 static bool
1610 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1611 			    __u64 length, __u8 type, __u16 flags,
1612 			    struct cifsFileInfo *cfile,
1613 			    struct cifsLockInfo **conf_lock, int rw_check)
1614 {
1615 	struct cifsLockInfo *li;
1616 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1617 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1618 
1619 	list_for_each_entry(li, &fdlocks->locks, llist) {
1620 		if (offset + length <= li->offset ||
1621 		    offset >= li->offset + li->length)
1622 			continue;
1623 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1624 		    server->ops->compare_fids(cfile, cur_cfile)) {
1625 			/* shared lock prevents write op through the same fid */
1626 			if (!(li->type & server->vals->shared_lock_type) ||
1627 			    rw_check != CIFS_WRITE_OP)
1628 				continue;
1629 		}
1630 		if ((type & server->vals->shared_lock_type) &&
1631 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1632 		     current->tgid == li->pid) || type == li->type))
1633 			continue;
1634 		if (rw_check == CIFS_LOCK_OP &&
1635 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1636 		    server->ops->compare_fids(cfile, cur_cfile))
1637 			continue;
1638 		if (conf_lock)
1639 			*conf_lock = li;
1640 		return true;
1641 	}
1642 	return false;
1643 }
1644 
1645 bool
1646 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1647 			__u8 type, __u16 flags,
1648 			struct cifsLockInfo **conf_lock, int rw_check)
1649 {
1650 	bool rc = false;
1651 	struct cifs_fid_locks *cur;
1652 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1653 
1654 	list_for_each_entry(cur, &cinode->llist, llist) {
1655 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1656 						 flags, cfile, conf_lock,
1657 						 rw_check);
1658 		if (rc)
1659 			break;
1660 	}
1661 
1662 	return rc;
1663 }
1664 
1665 /*
1666  * Check if there is another lock that prevents us to set the lock (mandatory
1667  * style). If such a lock exists, update the flock structure with its
1668  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1669  * or leave it the same if we can't. Returns 0 if we don't need to request to
1670  * the server or 1 otherwise.
1671  */
1672 static int
1673 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1674 	       __u8 type, struct file_lock *flock)
1675 {
1676 	int rc = 0;
1677 	struct cifsLockInfo *conf_lock;
1678 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1679 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1680 	bool exist;
1681 
1682 	down_read(&cinode->lock_sem);
1683 
1684 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1685 					flock->c.flc_flags, &conf_lock,
1686 					CIFS_LOCK_OP);
1687 	if (exist) {
1688 		flock->fl_start = conf_lock->offset;
1689 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1690 		flock->c.flc_pid = conf_lock->pid;
1691 		if (conf_lock->type & server->vals->shared_lock_type)
1692 			flock->c.flc_type = F_RDLCK;
1693 		else
1694 			flock->c.flc_type = F_WRLCK;
1695 	} else if (!cinode->can_cache_brlcks)
1696 		rc = 1;
1697 	else
1698 		flock->c.flc_type = F_UNLCK;
1699 
1700 	up_read(&cinode->lock_sem);
1701 	return rc;
1702 }
1703 
1704 static void
1705 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1706 {
1707 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1708 	cifs_down_write(&cinode->lock_sem);
1709 	list_add_tail(&lock->llist, &cfile->llist->locks);
1710 	up_write(&cinode->lock_sem);
1711 }
1712 
1713 /*
1714  * Set the byte-range lock (mandatory style). Returns:
1715  * 1) 0, if we set the lock and don't need to request to the server;
1716  * 2) 1, if no locks prevent us but we need to request to the server;
1717  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1718  */
1719 static int
1720 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1721 		 bool wait)
1722 {
1723 	struct cifsLockInfo *conf_lock;
1724 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1725 	bool exist;
1726 	int rc = 0;
1727 
1728 try_again:
1729 	exist = false;
1730 	cifs_down_write(&cinode->lock_sem);
1731 
1732 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1733 					lock->type, lock->flags, &conf_lock,
1734 					CIFS_LOCK_OP);
1735 	if (!exist && cinode->can_cache_brlcks) {
1736 		list_add_tail(&lock->llist, &cfile->llist->locks);
1737 		up_write(&cinode->lock_sem);
1738 		return rc;
1739 	}
1740 
1741 	if (!exist)
1742 		rc = 1;
1743 	else if (!wait)
1744 		rc = -EACCES;
1745 	else {
1746 		list_add_tail(&lock->blist, &conf_lock->blist);
1747 		up_write(&cinode->lock_sem);
1748 		rc = wait_event_interruptible(lock->block_q,
1749 					(lock->blist.prev == &lock->blist) &&
1750 					(lock->blist.next == &lock->blist));
1751 		if (!rc)
1752 			goto try_again;
1753 		cifs_down_write(&cinode->lock_sem);
1754 		list_del_init(&lock->blist);
1755 	}
1756 
1757 	up_write(&cinode->lock_sem);
1758 	return rc;
1759 }
1760 
1761 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1762 /*
1763  * Check if there is another lock that prevents us to set the lock (posix
1764  * style). If such a lock exists, update the flock structure with its
1765  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1766  * or leave it the same if we can't. Returns 0 if we don't need to request to
1767  * the server or 1 otherwise.
1768  */
1769 static int
1770 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1771 {
1772 	int rc = 0;
1773 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1774 	unsigned char saved_type = flock->c.flc_type;
1775 
1776 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1777 		return 1;
1778 
1779 	down_read(&cinode->lock_sem);
1780 	posix_test_lock(file, flock);
1781 
1782 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1783 		flock->c.flc_type = saved_type;
1784 		rc = 1;
1785 	}
1786 
1787 	up_read(&cinode->lock_sem);
1788 	return rc;
1789 }
1790 
1791 /*
1792  * Set the byte-range lock (posix style). Returns:
1793  * 1) <0, if the error occurs while setting the lock;
1794  * 2) 0, if we set the lock and don't need to request to the server;
1795  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1796  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1797  */
1798 static int
1799 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1800 {
1801 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1802 	int rc = FILE_LOCK_DEFERRED + 1;
1803 
1804 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1805 		return rc;
1806 
1807 	cifs_down_write(&cinode->lock_sem);
1808 	if (!cinode->can_cache_brlcks) {
1809 		up_write(&cinode->lock_sem);
1810 		return rc;
1811 	}
1812 
1813 	rc = posix_lock_file(file, flock, NULL);
1814 	up_write(&cinode->lock_sem);
1815 	return rc;
1816 }
1817 
1818 int
1819 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1820 {
1821 	unsigned int xid;
1822 	int rc = 0, stored_rc;
1823 	struct cifsLockInfo *li, *tmp;
1824 	struct cifs_tcon *tcon;
1825 	unsigned int num, max_num, max_buf;
1826 	LOCKING_ANDX_RANGE *buf, *cur;
1827 	static const int types[] = {
1828 		LOCKING_ANDX_LARGE_FILES,
1829 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1830 	};
1831 	int i;
1832 
1833 	xid = get_xid();
1834 	tcon = tlink_tcon(cfile->tlink);
1835 
1836 	/*
1837 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1838 	 * and check it before using.
1839 	 */
1840 	max_buf = tcon->ses->server->maxBuf;
1841 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1842 		free_xid(xid);
1843 		return -EINVAL;
1844 	}
1845 
1846 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1847 		     PAGE_SIZE);
1848 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1849 			PAGE_SIZE);
1850 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1851 						sizeof(LOCKING_ANDX_RANGE);
1852 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1853 	if (!buf) {
1854 		free_xid(xid);
1855 		return -ENOMEM;
1856 	}
1857 
1858 	for (i = 0; i < 2; i++) {
1859 		cur = buf;
1860 		num = 0;
1861 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1862 			if (li->type != types[i])
1863 				continue;
1864 			cur->Pid = cpu_to_le16(li->pid);
1865 			cur->LengthLow = cpu_to_le32((u32)li->length);
1866 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1867 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1868 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1869 			if (++num == max_num) {
1870 				stored_rc = cifs_lockv(xid, tcon,
1871 						       cfile->fid.netfid,
1872 						       (__u8)li->type, 0, num,
1873 						       buf);
1874 				if (stored_rc)
1875 					rc = stored_rc;
1876 				cur = buf;
1877 				num = 0;
1878 			} else
1879 				cur++;
1880 		}
1881 
1882 		if (num) {
1883 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1884 					       (__u8)types[i], 0, num, buf);
1885 			if (stored_rc)
1886 				rc = stored_rc;
1887 		}
1888 	}
1889 
1890 	kfree(buf);
1891 	free_xid(xid);
1892 	return rc;
1893 }
1894 
1895 static __u32
1896 hash_lockowner(fl_owner_t owner)
1897 {
1898 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1899 }
1900 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1901 
1902 struct lock_to_push {
1903 	struct list_head llist;
1904 	__u64 offset;
1905 	__u64 length;
1906 	__u32 pid;
1907 	__u16 netfid;
1908 	__u8 type;
1909 };
1910 
1911 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1912 static int
1913 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1914 {
1915 	struct inode *inode = d_inode(cfile->dentry);
1916 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1917 	struct file_lock *flock;
1918 	struct file_lock_context *flctx = locks_inode_context(inode);
1919 	unsigned int count = 0, i;
1920 	int rc = 0, xid, type;
1921 	struct list_head locks_to_send, *el;
1922 	struct lock_to_push *lck, *tmp;
1923 	__u64 length;
1924 
1925 	xid = get_xid();
1926 
1927 	if (!flctx)
1928 		goto out;
1929 
1930 	spin_lock(&flctx->flc_lock);
1931 	list_for_each(el, &flctx->flc_posix) {
1932 		count++;
1933 	}
1934 	spin_unlock(&flctx->flc_lock);
1935 
1936 	INIT_LIST_HEAD(&locks_to_send);
1937 
1938 	/*
1939 	 * Allocating count locks is enough because no FL_POSIX locks can be
1940 	 * added to the list while we are holding cinode->lock_sem that
1941 	 * protects locking operations of this inode.
1942 	 */
1943 	for (i = 0; i < count; i++) {
1944 		lck = kmalloc_obj(struct lock_to_push);
1945 		if (!lck) {
1946 			rc = -ENOMEM;
1947 			goto err_out;
1948 		}
1949 		list_add_tail(&lck->llist, &locks_to_send);
1950 	}
1951 
1952 	el = locks_to_send.next;
1953 	spin_lock(&flctx->flc_lock);
1954 	for_each_file_lock(flock, &flctx->flc_posix) {
1955 		unsigned char ftype = flock->c.flc_type;
1956 
1957 		if (el == &locks_to_send) {
1958 			/*
1959 			 * The list ended. We don't have enough allocated
1960 			 * structures - something is really wrong.
1961 			 */
1962 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1963 			break;
1964 		}
1965 		length = cifs_flock_len(flock);
1966 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1967 			type = CIFS_RDLCK;
1968 		else
1969 			type = CIFS_WRLCK;
1970 		lck = list_entry(el, struct lock_to_push, llist);
1971 		lck->pid = hash_lockowner(flock->c.flc_owner);
1972 		lck->netfid = cfile->fid.netfid;
1973 		lck->length = length;
1974 		lck->type = type;
1975 		lck->offset = flock->fl_start;
1976 	}
1977 	spin_unlock(&flctx->flc_lock);
1978 
1979 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1980 		int stored_rc;
1981 
1982 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1983 					     lck->offset, lck->length, NULL,
1984 					     lck->type, 0);
1985 		if (stored_rc)
1986 			rc = stored_rc;
1987 		list_del(&lck->llist);
1988 		kfree(lck);
1989 	}
1990 
1991 out:
1992 	free_xid(xid);
1993 	return rc;
1994 err_out:
1995 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1996 		list_del(&lck->llist);
1997 		kfree(lck);
1998 	}
1999 	goto out;
2000 }
2001 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2002 
2003 static int
2004 cifs_push_locks(struct cifsFileInfo *cfile)
2005 {
2006 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2007 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2008 	int rc = 0;
2009 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2010 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
2011 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2012 
2013 	/* we are going to update can_cache_brlcks here - need a write access */
2014 	cifs_down_write(&cinode->lock_sem);
2015 	if (!cinode->can_cache_brlcks) {
2016 		up_write(&cinode->lock_sem);
2017 		return rc;
2018 	}
2019 
2020 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2021 	if (cap_unix(tcon->ses) &&
2022 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2023 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2024 		rc = cifs_push_posix_locks(cfile);
2025 	else
2026 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2027 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2028 
2029 	cinode->can_cache_brlcks = false;
2030 	up_write(&cinode->lock_sem);
2031 	return rc;
2032 }
2033 
2034 static void
2035 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2036 		bool *wait_flag, struct TCP_Server_Info *server)
2037 {
2038 	if (flock->c.flc_flags & FL_POSIX)
2039 		cifs_dbg(FYI, "Posix\n");
2040 	if (flock->c.flc_flags & FL_FLOCK)
2041 		cifs_dbg(FYI, "Flock\n");
2042 	if (flock->c.flc_flags & FL_SLEEP) {
2043 		cifs_dbg(FYI, "Blocking lock\n");
2044 		*wait_flag = true;
2045 	}
2046 	if (flock->c.flc_flags & FL_ACCESS)
2047 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2048 	if (flock->c.flc_flags & FL_LEASE)
2049 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2050 	if (flock->c.flc_flags &
2051 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2052 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2053 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2054 		         flock->c.flc_flags);
2055 
2056 	*type = server->vals->large_lock_type;
2057 	if (lock_is_write(flock)) {
2058 		cifs_dbg(FYI, "F_WRLCK\n");
2059 		*type |= server->vals->exclusive_lock_type;
2060 		*lock = 1;
2061 	} else if (lock_is_unlock(flock)) {
2062 		cifs_dbg(FYI, "F_UNLCK\n");
2063 		*type |= server->vals->unlock_lock_type;
2064 		*unlock = 1;
2065 		/* Check if unlock includes more than one lock range */
2066 	} else if (lock_is_read(flock)) {
2067 		cifs_dbg(FYI, "F_RDLCK\n");
2068 		*type |= server->vals->shared_lock_type;
2069 		*lock = 1;
2070 	} else if (flock->c.flc_type == F_EXLCK) {
2071 		cifs_dbg(FYI, "F_EXLCK\n");
2072 		*type |= server->vals->exclusive_lock_type;
2073 		*lock = 1;
2074 	} else if (flock->c.flc_type == F_SHLCK) {
2075 		cifs_dbg(FYI, "F_SHLCK\n");
2076 		*type |= server->vals->shared_lock_type;
2077 		*lock = 1;
2078 	} else
2079 		cifs_dbg(FYI, "Unknown type of lock\n");
2080 }
2081 
2082 static int
2083 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2084 	   bool wait_flag, bool posix_lck, unsigned int xid)
2085 {
2086 	int rc = 0;
2087 	__u64 length = cifs_flock_len(flock);
2088 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2089 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2090 	struct TCP_Server_Info *server = tcon->ses->server;
2091 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2092 	__u16 netfid = cfile->fid.netfid;
2093 
2094 	if (posix_lck) {
2095 		int posix_lock_type;
2096 
2097 		rc = cifs_posix_lock_test(file, flock);
2098 		if (!rc)
2099 			return rc;
2100 
2101 		if (type & server->vals->shared_lock_type)
2102 			posix_lock_type = CIFS_RDLCK;
2103 		else
2104 			posix_lock_type = CIFS_WRLCK;
2105 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2106 				      hash_lockowner(flock->c.flc_owner),
2107 				      flock->fl_start, length, flock,
2108 				      posix_lock_type, wait_flag);
2109 		return rc;
2110 	}
2111 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2112 
2113 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2114 	if (!rc)
2115 		return rc;
2116 
2117 	/* BB we could chain these into one lock request BB */
2118 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2119 				    1, 0, false);
2120 	if (rc == 0) {
2121 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2122 					    type, 0, 1, false);
2123 		flock->c.flc_type = F_UNLCK;
2124 		if (rc != 0)
2125 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2126 				 rc);
2127 		return 0;
2128 	}
2129 
2130 	if (type & server->vals->shared_lock_type) {
2131 		flock->c.flc_type = F_WRLCK;
2132 		return 0;
2133 	}
2134 
2135 	type &= ~server->vals->exclusive_lock_type;
2136 
2137 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2138 				    type | server->vals->shared_lock_type,
2139 				    1, 0, false);
2140 	if (rc == 0) {
2141 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2142 			type | server->vals->shared_lock_type, 0, 1, false);
2143 		flock->c.flc_type = F_RDLCK;
2144 		if (rc != 0)
2145 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2146 				 rc);
2147 	} else
2148 		flock->c.flc_type = F_WRLCK;
2149 
2150 	return 0;
2151 }
2152 
2153 void
2154 cifs_move_llist(struct list_head *source, struct list_head *dest)
2155 {
2156 	struct list_head *li, *tmp;
2157 	list_for_each_safe(li, tmp, source)
2158 		list_move(li, dest);
2159 }
2160 
2161 int
2162 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2163 				struct file *file)
2164 {
2165 	struct cifsFileInfo *open_file = NULL;
2166 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2167 	int rc = 0;
2168 
2169 	spin_lock(&tcon->open_file_lock);
2170 	spin_lock(&cinode->open_file_lock);
2171 
2172 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2173 		if (file->f_flags == open_file->f_flags) {
2174 			rc = -EINVAL;
2175 			break;
2176 		}
2177 	}
2178 
2179 	spin_unlock(&cinode->open_file_lock);
2180 	spin_unlock(&tcon->open_file_lock);
2181 	return rc;
2182 }
2183 
2184 void
2185 cifs_free_llist(struct list_head *llist)
2186 {
2187 	struct cifsLockInfo *li, *tmp;
2188 	list_for_each_entry_safe(li, tmp, llist, llist) {
2189 		cifs_del_lock_waiters(li);
2190 		list_del(&li->llist);
2191 		kfree(li);
2192 	}
2193 }
2194 
2195 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2196 int
2197 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2198 		  unsigned int xid)
2199 {
2200 	int rc = 0, stored_rc;
2201 	static const int types[] = {
2202 		LOCKING_ANDX_LARGE_FILES,
2203 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2204 	};
2205 	unsigned int i;
2206 	unsigned int max_num, num, max_buf;
2207 	LOCKING_ANDX_RANGE *buf, *cur;
2208 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2209 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2210 	struct cifsLockInfo *li, *tmp;
2211 	__u64 length = cifs_flock_len(flock);
2212 	LIST_HEAD(tmp_llist);
2213 
2214 	/*
2215 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2216 	 * and check it before using.
2217 	 */
2218 	max_buf = tcon->ses->server->maxBuf;
2219 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2220 		return -EINVAL;
2221 
2222 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2223 		     PAGE_SIZE);
2224 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2225 			PAGE_SIZE);
2226 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2227 						sizeof(LOCKING_ANDX_RANGE);
2228 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2229 	if (!buf)
2230 		return -ENOMEM;
2231 
2232 	cifs_down_write(&cinode->lock_sem);
2233 	for (i = 0; i < 2; i++) {
2234 		cur = buf;
2235 		num = 0;
2236 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2237 			if (flock->fl_start > li->offset ||
2238 			    (flock->fl_start + length) <
2239 			    (li->offset + li->length))
2240 				continue;
2241 			if (current->tgid != li->pid)
2242 				continue;
2243 			if (types[i] != li->type)
2244 				continue;
2245 			if (cinode->can_cache_brlcks) {
2246 				/*
2247 				 * We can cache brlock requests - simply remove
2248 				 * a lock from the file's list.
2249 				 */
2250 				list_del(&li->llist);
2251 				cifs_del_lock_waiters(li);
2252 				kfree(li);
2253 				continue;
2254 			}
2255 			cur->Pid = cpu_to_le16(li->pid);
2256 			cur->LengthLow = cpu_to_le32((u32)li->length);
2257 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2258 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2259 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2260 			/*
2261 			 * We need to save a lock here to let us add it again to
2262 			 * the file's list if the unlock range request fails on
2263 			 * the server.
2264 			 */
2265 			list_move(&li->llist, &tmp_llist);
2266 			if (++num == max_num) {
2267 				stored_rc = cifs_lockv(xid, tcon,
2268 						       cfile->fid.netfid,
2269 						       li->type, num, 0, buf);
2270 				if (stored_rc) {
2271 					/*
2272 					 * We failed on the unlock range
2273 					 * request - add all locks from the tmp
2274 					 * list to the head of the file's list.
2275 					 */
2276 					cifs_move_llist(&tmp_llist,
2277 							&cfile->llist->locks);
2278 					rc = stored_rc;
2279 				} else
2280 					/*
2281 					 * The unlock range request succeed -
2282 					 * free the tmp list.
2283 					 */
2284 					cifs_free_llist(&tmp_llist);
2285 				cur = buf;
2286 				num = 0;
2287 			} else
2288 				cur++;
2289 		}
2290 		if (num) {
2291 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2292 					       types[i], num, 0, buf);
2293 			if (stored_rc) {
2294 				cifs_move_llist(&tmp_llist,
2295 						&cfile->llist->locks);
2296 				rc = stored_rc;
2297 			} else
2298 				cifs_free_llist(&tmp_llist);
2299 		}
2300 	}
2301 
2302 	up_write(&cinode->lock_sem);
2303 	kfree(buf);
2304 	return rc;
2305 }
2306 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2307 
2308 static int
2309 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2310 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2311 	   unsigned int xid)
2312 {
2313 	int rc = 0;
2314 	__u64 length = cifs_flock_len(flock);
2315 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2316 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2317 	struct TCP_Server_Info *server = tcon->ses->server;
2318 	struct inode *inode = d_inode(cfile->dentry);
2319 
2320 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2321 	if (posix_lck) {
2322 		int posix_lock_type;
2323 
2324 		rc = cifs_posix_lock_set(file, flock);
2325 		if (rc <= FILE_LOCK_DEFERRED)
2326 			return rc;
2327 
2328 		if (type & server->vals->shared_lock_type)
2329 			posix_lock_type = CIFS_RDLCK;
2330 		else
2331 			posix_lock_type = CIFS_WRLCK;
2332 
2333 		if (unlock == 1)
2334 			posix_lock_type = CIFS_UNLCK;
2335 
2336 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2337 				      hash_lockowner(flock->c.flc_owner),
2338 				      flock->fl_start, length,
2339 				      NULL, posix_lock_type, wait_flag);
2340 		goto out;
2341 	}
2342 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2343 	if (lock) {
2344 		struct cifsLockInfo *lock;
2345 
2346 		lock = cifs_lock_init(flock->fl_start, length, type,
2347 				      flock->c.flc_flags);
2348 		if (!lock)
2349 			return -ENOMEM;
2350 
2351 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2352 		if (rc < 0) {
2353 			kfree(lock);
2354 			return rc;
2355 		}
2356 		if (!rc)
2357 			goto out;
2358 
2359 		/*
2360 		 * Windows 7 server can delay breaking lease from read to None
2361 		 * if we set a byte-range lock on a file - break it explicitly
2362 		 * before sending the lock to the server to be sure the next
2363 		 * read won't conflict with non-overlapted locks due to
2364 		 * pagereading.
2365 		 */
2366 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2367 					CIFS_CACHE_READ(CIFS_I(inode))) {
2368 			cifs_zap_mapping(inode);
2369 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2370 				 inode);
2371 			cifs_reset_oplock(CIFS_I(inode));
2372 		}
2373 
2374 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2375 					    type, 1, 0, wait_flag);
2376 		if (rc) {
2377 			kfree(lock);
2378 			return rc;
2379 		}
2380 
2381 		cifs_lock_add(cfile, lock);
2382 	} else if (unlock)
2383 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2384 
2385 out:
2386 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2387 		/*
2388 		 * If this is a request to remove all locks because we
2389 		 * are closing the file, it doesn't matter if the
2390 		 * unlocking failed as both cifs.ko and the SMB server
2391 		 * remove the lock on file close
2392 		 */
2393 		if (rc) {
2394 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2395 			if (!(flock->c.flc_flags & FL_CLOSE))
2396 				return rc;
2397 		}
2398 		rc = locks_lock_file_wait(file, flock);
2399 	}
2400 	return rc;
2401 }
2402 
2403 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2404 {
2405 	int rc, xid;
2406 	int lock = 0, unlock = 0;
2407 	bool wait_flag = false;
2408 	bool posix_lck = false;
2409 	struct cifs_sb_info *cifs_sb;
2410 	struct cifs_tcon *tcon;
2411 	struct cifsFileInfo *cfile;
2412 	__u32 type;
2413 
2414 	xid = get_xid();
2415 
2416 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2417 		rc = -ENOLCK;
2418 		free_xid(xid);
2419 		return rc;
2420 	}
2421 
2422 	cfile = (struct cifsFileInfo *)file->private_data;
2423 	tcon = tlink_tcon(cfile->tlink);
2424 
2425 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2426 			tcon->ses->server);
2427 	cifs_sb = CIFS_SB(file);
2428 
2429 	if (cap_unix(tcon->ses) &&
2430 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2431 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2432 		posix_lck = true;
2433 
2434 	if (!lock && !unlock) {
2435 		/*
2436 		 * if no lock or unlock then nothing to do since we do not
2437 		 * know what it is
2438 		 */
2439 		rc = -EOPNOTSUPP;
2440 		free_xid(xid);
2441 		return rc;
2442 	}
2443 
2444 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2445 			xid);
2446 	free_xid(xid);
2447 	return rc;
2448 
2449 
2450 }
2451 
2452 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2453 {
2454 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2455 	struct cifsFileInfo *cfile;
2456 	int lock = 0, unlock = 0;
2457 	bool wait_flag = false;
2458 	bool posix_lck = false;
2459 	struct cifs_tcon *tcon;
2460 	__u32 type;
2461 	int rc, xid;
2462 
2463 	rc = -EACCES;
2464 	xid = get_xid();
2465 
2466 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2467 		 flock->c.flc_flags, flock->c.flc_type,
2468 		 (long long)flock->fl_start,
2469 		 (long long)flock->fl_end);
2470 
2471 	cfile = (struct cifsFileInfo *)file->private_data;
2472 	tcon = tlink_tcon(cfile->tlink);
2473 
2474 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2475 			tcon->ses->server);
2476 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2477 
2478 	if (cap_unix(tcon->ses) &&
2479 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2480 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2481 		posix_lck = true;
2482 	/*
2483 	 * BB add code here to normalize offset and length to account for
2484 	 * negative length which we can not accept over the wire.
2485 	 */
2486 	if (IS_GETLK(cmd)) {
2487 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2488 		free_xid(xid);
2489 		return rc;
2490 	}
2491 
2492 	if (!lock && !unlock) {
2493 		/*
2494 		 * if no lock or unlock then nothing to do since we do not
2495 		 * know what it is
2496 		 */
2497 		free_xid(xid);
2498 		return -EOPNOTSUPP;
2499 	}
2500 
2501 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2502 			xid);
2503 	free_xid(xid);
2504 	return rc;
2505 }
2506 
2507 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2508 {
2509 	struct netfs_io_request *wreq = wdata->rreq;
2510 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2511 	loff_t wrend;
2512 
2513 	if (result > 0) {
2514 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2515 
2516 		if (wrend > ictx->zero_point &&
2517 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2518 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2519 			ictx->zero_point = wrend;
2520 		if (wrend > ictx->remote_i_size)
2521 			netfs_resize_file(ictx, wrend, true);
2522 	}
2523 
2524 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2525 }
2526 
2527 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2528 					bool fsuid_only)
2529 {
2530 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2531 	struct cifsFileInfo *open_file = NULL;
2532 
2533 	/* only filter by fsuid on multiuser mounts */
2534 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2535 		fsuid_only = false;
2536 
2537 	spin_lock(&cifs_inode->open_file_lock);
2538 	/* we could simply get the first_list_entry since write-only entries
2539 	   are always at the end of the list but since the first entry might
2540 	   have a close pending, we go through the whole list */
2541 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2542 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2543 			continue;
2544 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2545 			if ((!open_file->invalidHandle)) {
2546 				/* found a good file */
2547 				/* lock it so it will not be closed on us */
2548 				cifsFileInfo_get(open_file);
2549 				spin_unlock(&cifs_inode->open_file_lock);
2550 				return open_file;
2551 			} /* else might as well continue, and look for
2552 			     another, or simply have the caller reopen it
2553 			     again rather than trying to fix this handle */
2554 		} else /* write only file */
2555 			break; /* write only files are last so must be done */
2556 	}
2557 	spin_unlock(&cifs_inode->open_file_lock);
2558 	return NULL;
2559 }
2560 
2561 /* Return -EBADF if no handle is found and general rc otherwise */
2562 int
2563 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2564 		       struct cifsFileInfo **ret_file)
2565 {
2566 	struct cifsFileInfo *open_file, *inv_file = NULL;
2567 	struct cifs_sb_info *cifs_sb;
2568 	bool any_available = false;
2569 	int rc = -EBADF;
2570 	unsigned int refind = 0;
2571 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2572 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2573 	*ret_file = NULL;
2574 
2575 	/*
2576 	 * Having a null inode here (because mapping->host was set to zero by
2577 	 * the VFS or MM) should not happen but we had reports of on oops (due
2578 	 * to it being zero) during stress testcases so we need to check for it
2579 	 */
2580 
2581 	if (cifs_inode == NULL) {
2582 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2583 		dump_stack();
2584 		return rc;
2585 	}
2586 
2587 	cifs_sb = CIFS_SB(cifs_inode);
2588 
2589 	/* only filter by fsuid on multiuser mounts */
2590 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2591 		fsuid_only = false;
2592 
2593 	spin_lock(&cifs_inode->open_file_lock);
2594 refind_writable:
2595 	if (refind > MAX_REOPEN_ATT) {
2596 		spin_unlock(&cifs_inode->open_file_lock);
2597 		return rc;
2598 	}
2599 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2600 		if (!any_available && open_file->pid != current->tgid)
2601 			continue;
2602 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2603 			continue;
2604 		if (with_delete && !(open_file->fid.access & DELETE))
2605 			continue;
2606 		if ((flags & FIND_WR_NO_PENDING_DELETE) &&
2607 		    open_file->status_file_deleted)
2608 			continue;
2609 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2610 			if (!open_file->invalidHandle) {
2611 				/* found a good writable file */
2612 				cifsFileInfo_get(open_file);
2613 				spin_unlock(&cifs_inode->open_file_lock);
2614 				*ret_file = open_file;
2615 				return 0;
2616 			} else {
2617 				if (!inv_file)
2618 					inv_file = open_file;
2619 			}
2620 		}
2621 	}
2622 	/* couldn't find usable FH with same pid, try any available */
2623 	if (!any_available) {
2624 		any_available = true;
2625 		goto refind_writable;
2626 	}
2627 
2628 	if (inv_file) {
2629 		any_available = false;
2630 		cifsFileInfo_get(inv_file);
2631 	}
2632 
2633 	spin_unlock(&cifs_inode->open_file_lock);
2634 
2635 	if (inv_file) {
2636 		rc = cifs_reopen_file(inv_file, false);
2637 		if (!rc) {
2638 			*ret_file = inv_file;
2639 			return 0;
2640 		}
2641 
2642 		spin_lock(&cifs_inode->open_file_lock);
2643 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2644 		spin_unlock(&cifs_inode->open_file_lock);
2645 		cifsFileInfo_put(inv_file);
2646 		++refind;
2647 		inv_file = NULL;
2648 		spin_lock(&cifs_inode->open_file_lock);
2649 		goto refind_writable;
2650 	}
2651 
2652 	return rc;
2653 }
2654 
2655 struct cifsFileInfo *
2656 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2657 {
2658 	struct cifsFileInfo *cfile;
2659 	int rc;
2660 
2661 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2662 	if (rc)
2663 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2664 
2665 	return cfile;
2666 }
2667 
2668 int
2669 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2670 		       int flags,
2671 		       struct cifsFileInfo **ret_file)
2672 {
2673 	struct cifsFileInfo *cfile;
2674 	void *page = alloc_dentry_path();
2675 
2676 	*ret_file = NULL;
2677 
2678 	spin_lock(&tcon->open_file_lock);
2679 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2680 		struct cifsInodeInfo *cinode;
2681 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2682 		if (IS_ERR(full_path)) {
2683 			spin_unlock(&tcon->open_file_lock);
2684 			free_dentry_path(page);
2685 			return PTR_ERR(full_path);
2686 		}
2687 		if (strcmp(full_path, name))
2688 			continue;
2689 
2690 		cinode = CIFS_I(d_inode(cfile->dentry));
2691 		spin_unlock(&tcon->open_file_lock);
2692 		free_dentry_path(page);
2693 		return cifs_get_writable_file(cinode, flags, ret_file);
2694 	}
2695 
2696 	spin_unlock(&tcon->open_file_lock);
2697 	free_dentry_path(page);
2698 	return -ENOENT;
2699 }
2700 
2701 int
2702 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2703 		       struct cifsFileInfo **ret_file)
2704 {
2705 	struct cifsFileInfo *cfile;
2706 	void *page = alloc_dentry_path();
2707 
2708 	*ret_file = NULL;
2709 
2710 	spin_lock(&tcon->open_file_lock);
2711 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2712 		struct cifsInodeInfo *cinode;
2713 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2714 		if (IS_ERR(full_path)) {
2715 			spin_unlock(&tcon->open_file_lock);
2716 			free_dentry_path(page);
2717 			return PTR_ERR(full_path);
2718 		}
2719 		if (strcmp(full_path, name))
2720 			continue;
2721 
2722 		cinode = CIFS_I(d_inode(cfile->dentry));
2723 		spin_unlock(&tcon->open_file_lock);
2724 		free_dentry_path(page);
2725 		*ret_file = find_readable_file(cinode, 0);
2726 		if (*ret_file) {
2727 			spin_lock(&cinode->open_file_lock);
2728 			if ((*ret_file)->status_file_deleted) {
2729 				spin_unlock(&cinode->open_file_lock);
2730 				cifsFileInfo_put(*ret_file);
2731 				*ret_file = NULL;
2732 			} else {
2733 				spin_unlock(&cinode->open_file_lock);
2734 			}
2735 		}
2736 		return *ret_file ? 0 : -ENOENT;
2737 	}
2738 
2739 	spin_unlock(&tcon->open_file_lock);
2740 	free_dentry_path(page);
2741 	return -ENOENT;
2742 }
2743 
2744 /*
2745  * Flush data on a strict file.
2746  */
2747 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2748 		      int datasync)
2749 {
2750 	struct cifsFileInfo *smbfile = file->private_data;
2751 	struct inode *inode = file_inode(file);
2752 	unsigned int xid;
2753 	int rc;
2754 
2755 	rc = file_write_and_wait_range(file, start, end);
2756 	if (rc) {
2757 		trace_cifs_fsync_err(inode->i_ino, rc);
2758 		return rc;
2759 	}
2760 
2761 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2762 
2763 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2764 		rc = cifs_zap_mapping(inode);
2765 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2766 	}
2767 
2768 	xid = get_xid();
2769 	rc = cifs_file_flush(xid, inode, smbfile);
2770 	free_xid(xid);
2771 	return rc;
2772 }
2773 
2774 /*
2775  * Flush data on a non-strict data.
2776  */
2777 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2778 {
2779 	unsigned int xid;
2780 	int rc = 0;
2781 	struct cifs_tcon *tcon;
2782 	struct TCP_Server_Info *server;
2783 	struct cifsFileInfo *smbfile = file->private_data;
2784 	struct inode *inode = file_inode(file);
2785 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2786 
2787 	rc = file_write_and_wait_range(file, start, end);
2788 	if (rc) {
2789 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2790 		return rc;
2791 	}
2792 
2793 	xid = get_xid();
2794 
2795 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2796 		 file, datasync);
2797 
2798 	tcon = tlink_tcon(smbfile->tlink);
2799 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2800 		server = tcon->ses->server;
2801 		if (server->ops->flush == NULL) {
2802 			rc = -ENOSYS;
2803 			goto fsync_exit;
2804 		}
2805 
2806 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2807 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2808 			if (smbfile) {
2809 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2810 				cifsFileInfo_put(smbfile);
2811 			} else
2812 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2813 		} else
2814 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2815 	}
2816 
2817 fsync_exit:
2818 	free_xid(xid);
2819 	return rc;
2820 }
2821 
2822 /*
2823  * As file closes, flush all cached write data for this inode checking
2824  * for write behind errors.
2825  */
2826 int cifs_flush(struct file *file, fl_owner_t id)
2827 {
2828 	struct inode *inode = file_inode(file);
2829 	int rc = 0;
2830 
2831 	if (file->f_mode & FMODE_WRITE)
2832 		rc = filemap_write_and_wait(inode->i_mapping);
2833 
2834 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2835 	if (rc) {
2836 		/* get more nuanced writeback errors */
2837 		rc = filemap_check_wb_err(file->f_mapping, 0);
2838 		trace_cifs_flush_err(inode->i_ino, rc);
2839 	}
2840 	return rc;
2841 }
2842 
2843 static ssize_t
2844 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2845 {
2846 	struct file *file = iocb->ki_filp;
2847 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2848 	struct inode *inode = file->f_mapping->host;
2849 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2850 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2851 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2852 	ssize_t rc;
2853 
2854 	rc = netfs_start_io_write(inode);
2855 	if (rc < 0)
2856 		return rc;
2857 
2858 	/*
2859 	 * We need to hold the sem to be sure nobody modifies lock list
2860 	 * with a brlock that prevents writing.
2861 	 */
2862 	down_read(&cinode->lock_sem);
2863 
2864 	rc = generic_write_checks(iocb, from);
2865 	if (rc <= 0)
2866 		goto out;
2867 
2868 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2869 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2870 				     server->vals->exclusive_lock_type, 0,
2871 				     NULL, CIFS_WRITE_OP))) {
2872 		rc = -EACCES;
2873 		goto out;
2874 	}
2875 
2876 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2877 
2878 out:
2879 	up_read(&cinode->lock_sem);
2880 	netfs_end_io_write(inode);
2881 	if (rc > 0)
2882 		rc = generic_write_sync(iocb, rc);
2883 	return rc;
2884 }
2885 
2886 ssize_t
2887 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2888 {
2889 	struct inode *inode = file_inode(iocb->ki_filp);
2890 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2891 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2892 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2893 						iocb->ki_filp->private_data;
2894 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2895 	ssize_t written;
2896 
2897 	written = cifs_get_writer(cinode);
2898 	if (written)
2899 		return written;
2900 
2901 	if (CIFS_CACHE_WRITE(cinode)) {
2902 		if (cap_unix(tcon->ses) &&
2903 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2904 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2905 			written = netfs_file_write_iter(iocb, from);
2906 			goto out;
2907 		}
2908 		written = cifs_writev(iocb, from);
2909 		goto out;
2910 	}
2911 	/*
2912 	 * For non-oplocked files in strict cache mode we need to write the data
2913 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2914 	 * affected pages because it may cause a error with mandatory locks on
2915 	 * these pages but not on the region from pos to ppos+len-1.
2916 	 */
2917 	written = netfs_file_write_iter(iocb, from);
2918 	if (CIFS_CACHE_READ(cinode)) {
2919 		/*
2920 		 * We have read level caching and we have just sent a write
2921 		 * request to the server thus making data in the cache stale.
2922 		 * Zap the cache and set oplock/lease level to NONE to avoid
2923 		 * reading stale data from the cache. All subsequent read
2924 		 * operations will read new data from the server.
2925 		 */
2926 		cifs_zap_mapping(inode);
2927 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2928 			 inode);
2929 		cifs_reset_oplock(cinode);
2930 	}
2931 out:
2932 	cifs_put_writer(cinode);
2933 	return written;
2934 }
2935 
2936 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2937 {
2938 	ssize_t rc;
2939 	struct inode *inode = file_inode(iocb->ki_filp);
2940 
2941 	if (iocb->ki_flags & IOCB_DIRECT)
2942 		return netfs_unbuffered_read_iter(iocb, iter);
2943 
2944 	rc = cifs_revalidate_mapping(inode);
2945 	if (rc)
2946 		return rc;
2947 
2948 	return netfs_file_read_iter(iocb, iter);
2949 }
2950 
2951 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2952 {
2953 	struct inode *inode = file_inode(iocb->ki_filp);
2954 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2955 	ssize_t written;
2956 	int rc;
2957 
2958 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2959 		written = netfs_unbuffered_write_iter(iocb, from);
2960 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2961 			cifs_zap_mapping(inode);
2962 			cifs_dbg(FYI,
2963 				 "Set no oplock for inode=%p after a write operation\n",
2964 				 inode);
2965 			cifs_reset_oplock(cinode);
2966 		}
2967 		return written;
2968 	}
2969 
2970 	written = cifs_get_writer(cinode);
2971 	if (written)
2972 		return written;
2973 
2974 	written = netfs_file_write_iter(iocb, from);
2975 
2976 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2977 		rc = filemap_fdatawrite(inode->i_mapping);
2978 		if (rc)
2979 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2980 				 rc, inode);
2981 	}
2982 
2983 	cifs_put_writer(cinode);
2984 	return written;
2985 }
2986 
2987 ssize_t
2988 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2989 {
2990 	struct inode *inode = file_inode(iocb->ki_filp);
2991 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2992 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2993 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2994 						iocb->ki_filp->private_data;
2995 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2996 	int rc = -EACCES;
2997 
2998 	/*
2999 	 * In strict cache mode we need to read from the server all the time
3000 	 * if we don't have level II oplock because the server can delay mtime
3001 	 * change - so we can't make a decision about inode invalidating.
3002 	 * And we can also fail with pagereading if there are mandatory locks
3003 	 * on pages affected by this read but not on the region from pos to
3004 	 * pos+len-1.
3005 	 */
3006 	if (!CIFS_CACHE_READ(cinode))
3007 		return netfs_unbuffered_read_iter(iocb, to);
3008 
3009 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3010 		if (iocb->ki_flags & IOCB_DIRECT)
3011 			return netfs_unbuffered_read_iter(iocb, to);
3012 		return netfs_buffered_read_iter(iocb, to);
3013 	}
3014 
3015 	/*
3016 	 * We need to hold the sem to be sure nobody modifies lock list
3017 	 * with a brlock that prevents reading.
3018 	 */
3019 	if (iocb->ki_flags & IOCB_DIRECT) {
3020 		rc = netfs_start_io_direct(inode);
3021 		if (rc < 0)
3022 			goto out;
3023 		rc = -EACCES;
3024 		down_read(&cinode->lock_sem);
3025 		if (!cifs_find_lock_conflict(
3026 			    cfile, iocb->ki_pos, iov_iter_count(to),
3027 			    tcon->ses->server->vals->shared_lock_type,
3028 			    0, NULL, CIFS_READ_OP))
3029 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3030 		up_read(&cinode->lock_sem);
3031 		netfs_end_io_direct(inode);
3032 	} else {
3033 		rc = netfs_start_io_read(inode);
3034 		if (rc < 0)
3035 			goto out;
3036 		rc = -EACCES;
3037 		down_read(&cinode->lock_sem);
3038 		if (!cifs_find_lock_conflict(
3039 			    cfile, iocb->ki_pos, iov_iter_count(to),
3040 			    tcon->ses->server->vals->shared_lock_type,
3041 			    0, NULL, CIFS_READ_OP))
3042 			rc = filemap_read(iocb, to, 0);
3043 		up_read(&cinode->lock_sem);
3044 		netfs_end_io_read(inode);
3045 	}
3046 out:
3047 	return rc;
3048 }
3049 
3050 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3051 {
3052 	return netfs_page_mkwrite(vmf, NULL);
3053 }
3054 
3055 static const struct vm_operations_struct cifs_file_vm_ops = {
3056 	.fault = filemap_fault,
3057 	.map_pages = filemap_map_pages,
3058 	.page_mkwrite = cifs_page_mkwrite,
3059 };
3060 
3061 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3062 {
3063 	int xid, rc = 0;
3064 	struct inode *inode = file_inode(desc->file);
3065 
3066 	xid = get_xid();
3067 
3068 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3069 		rc = cifs_zap_mapping(inode);
3070 	if (!rc)
3071 		rc = generic_file_mmap_prepare(desc);
3072 	if (!rc)
3073 		desc->vm_ops = &cifs_file_vm_ops;
3074 
3075 	free_xid(xid);
3076 	return rc;
3077 }
3078 
3079 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3080 {
3081 	int rc, xid;
3082 
3083 	xid = get_xid();
3084 
3085 	rc = cifs_revalidate_file(desc->file);
3086 	if (rc)
3087 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3088 			 rc);
3089 	if (!rc)
3090 		rc = generic_file_mmap_prepare(desc);
3091 	if (!rc)
3092 		desc->vm_ops = &cifs_file_vm_ops;
3093 
3094 	free_xid(xid);
3095 	return rc;
3096 }
3097 
3098 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3099 {
3100 	struct cifsFileInfo *open_file;
3101 
3102 	spin_lock(&cifs_inode->open_file_lock);
3103 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3104 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3105 			spin_unlock(&cifs_inode->open_file_lock);
3106 			return 1;
3107 		}
3108 	}
3109 	spin_unlock(&cifs_inode->open_file_lock);
3110 	return 0;
3111 }
3112 
3113 /* We do not want to update the file size from server for inodes
3114    open for write - to avoid races with writepage extending
3115    the file - in the future we could consider allowing
3116    refreshing the inode only on increases in the file size
3117    but this is tricky to do without racing with writebehind
3118    page caching in the current Linux kernel design */
3119 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3120 			    bool from_readdir)
3121 {
3122 	if (!cifsInode)
3123 		return true;
3124 
3125 	if (is_inode_writable(cifsInode) ||
3126 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3127 		/* This inode is open for write at least once */
3128 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3129 
3130 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3131 			/* since no page cache to corrupt on directio
3132 			we can change size safely */
3133 			return true;
3134 		}
3135 
3136 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3137 			return true;
3138 
3139 		return false;
3140 	} else
3141 		return true;
3142 }
3143 
3144 void cifs_oplock_break(struct work_struct *work)
3145 {
3146 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3147 						  oplock_break);
3148 	struct inode *inode = d_inode(cfile->dentry);
3149 	struct super_block *sb = inode->i_sb;
3150 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3151 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3152 	bool cache_read, cache_write, cache_handle;
3153 	struct cifs_tcon *tcon;
3154 	struct TCP_Server_Info *server;
3155 	struct tcon_link *tlink;
3156 	unsigned int oplock;
3157 	int rc = 0;
3158 	bool purge_cache = false, oplock_break_cancelled;
3159 	__u64 persistent_fid, volatile_fid;
3160 	__u16 net_fid;
3161 
3162 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3163 			TASK_UNINTERRUPTIBLE);
3164 
3165 	tlink = cifs_sb_tlink(cifs_sb);
3166 	if (IS_ERR(tlink))
3167 		goto out;
3168 	tcon = tlink_tcon(tlink);
3169 	server = tcon->ses->server;
3170 
3171 	scoped_guard(spinlock, &cinode->open_file_lock) {
3172 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3173 
3174 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3175 					      cfile->oplock_epoch, &purge_cache);
3176 		oplock = READ_ONCE(cinode->oplock);
3177 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3178 			(sbflags & CIFS_MOUNT_RO_CACHE);
3179 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3180 			(sbflags & CIFS_MOUNT_RW_CACHE);
3181 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3182 	}
3183 
3184 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3185 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3186 			 inode);
3187 		cifs_reset_oplock(cinode);
3188 		oplock = 0;
3189 		cache_read = cache_write = cache_handle = false;
3190 	}
3191 
3192 	if (S_ISREG(inode->i_mode)) {
3193 		if (cache_read)
3194 			break_lease(inode, O_RDONLY);
3195 		else
3196 			break_lease(inode, O_WRONLY);
3197 		rc = filemap_fdatawrite(inode->i_mapping);
3198 		if (!cache_read || purge_cache) {
3199 			rc = filemap_fdatawait(inode->i_mapping);
3200 			mapping_set_error(inode->i_mapping, rc);
3201 			cifs_zap_mapping(inode);
3202 		}
3203 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3204 		if (cache_write)
3205 			goto oplock_break_ack;
3206 	}
3207 
3208 	rc = cifs_push_locks(cfile);
3209 	if (rc)
3210 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3211 
3212 oplock_break_ack:
3213 	/*
3214 	 * When oplock break is received and there are no active
3215 	 * file handles but cached, then schedule deferred close immediately.
3216 	 * So, new open will not use cached handle.
3217 	 */
3218 
3219 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3220 		cifs_close_deferred_file(cinode);
3221 
3222 	persistent_fid = cfile->fid.persistent_fid;
3223 	volatile_fid = cfile->fid.volatile_fid;
3224 	net_fid = cfile->fid.netfid;
3225 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3226 
3227 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3228 	/*
3229 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3230 	 * an acknowledgment to be sent when the file has already been closed.
3231 	 */
3232 	spin_lock(&cinode->open_file_lock);
3233 	/* check list empty since can race with kill_sb calling tree disconnect */
3234 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3235 		spin_unlock(&cinode->open_file_lock);
3236 		rc = server->ops->oplock_response(tcon, persistent_fid,
3237 						  volatile_fid, net_fid,
3238 						  cinode, oplock);
3239 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3240 	} else
3241 		spin_unlock(&cinode->open_file_lock);
3242 
3243 	cifs_put_tlink(tlink);
3244 out:
3245 	cifs_done_oplock_break(cinode);
3246 }
3247 
3248 static int cifs_swap_activate(struct swap_info_struct *sis,
3249 			      struct file *swap_file, sector_t *span)
3250 {
3251 	struct cifsFileInfo *cfile = swap_file->private_data;
3252 	struct inode *inode = swap_file->f_mapping->host;
3253 	unsigned long blocks;
3254 	long long isize;
3255 
3256 	cifs_dbg(FYI, "swap activate\n");
3257 
3258 	if (!swap_file->f_mapping->a_ops->swap_rw)
3259 		/* Cannot support swap */
3260 		return -EINVAL;
3261 
3262 	spin_lock(&inode->i_lock);
3263 	blocks = inode->i_blocks;
3264 	isize = inode->i_size;
3265 	spin_unlock(&inode->i_lock);
3266 	if (blocks*512 < isize) {
3267 		pr_warn("swap activate: swapfile has holes\n");
3268 		return -EINVAL;
3269 	}
3270 	*span = sis->pages;
3271 
3272 	pr_warn_once("Swap support over SMB3 is experimental\n");
3273 
3274 	/*
3275 	 * TODO: consider adding ACL (or documenting how) to prevent other
3276 	 * users (on this or other systems) from reading it
3277 	 */
3278 
3279 
3280 	/* TODO: add sk_set_memalloc(inet) or similar */
3281 
3282 	if (cfile)
3283 		cfile->swapfile = true;
3284 	/*
3285 	 * TODO: Since file already open, we can't open with DENY_ALL here
3286 	 * but we could add call to grab a byte range lock to prevent others
3287 	 * from reading or writing the file
3288 	 */
3289 
3290 	sis->flags |= SWP_FS_OPS;
3291 	return add_swap_extent(sis, 0, sis->max, 0);
3292 }
3293 
3294 static void cifs_swap_deactivate(struct file *file)
3295 {
3296 	struct cifsFileInfo *cfile = file->private_data;
3297 
3298 	cifs_dbg(FYI, "swap deactivate\n");
3299 
3300 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3301 
3302 	if (cfile)
3303 		cfile->swapfile = false;
3304 
3305 	/* do we need to unpin (or unlock) the file */
3306 }
3307 
3308 /**
3309  * cifs_swap_rw - SMB3 address space operation for swap I/O
3310  * @iocb: target I/O control block
3311  * @iter: I/O buffer
3312  *
3313  * Perform IO to the swap-file.  This is much like direct IO.
3314  */
3315 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3316 {
3317 	ssize_t ret;
3318 
3319 	if (iov_iter_rw(iter) == READ)
3320 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3321 	else
3322 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3323 	if (ret < 0)
3324 		return ret;
3325 	return 0;
3326 }
3327 
3328 const struct address_space_operations cifs_addr_ops = {
3329 	.read_folio	= netfs_read_folio,
3330 	.readahead	= netfs_readahead,
3331 	.writepages	= netfs_writepages,
3332 	.dirty_folio	= netfs_dirty_folio,
3333 	.release_folio	= netfs_release_folio,
3334 	.direct_IO	= noop_direct_IO,
3335 	.invalidate_folio = netfs_invalidate_folio,
3336 	.migrate_folio	= filemap_migrate_folio,
3337 	/*
3338 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3339 	 * helper if needed
3340 	 */
3341 	.swap_activate	= cifs_swap_activate,
3342 	.swap_deactivate = cifs_swap_deactivate,
3343 	.swap_rw = cifs_swap_rw,
3344 };
3345 
3346 /*
3347  * cifs_readahead requires the server to support a buffer large enough to
3348  * contain the header plus one complete page of data.  Otherwise, we need
3349  * to leave cifs_readahead out of the address space operations.
3350  */
3351 const struct address_space_operations cifs_addr_ops_smallbuf = {
3352 	.read_folio	= netfs_read_folio,
3353 	.writepages	= netfs_writepages,
3354 	.dirty_folio	= netfs_dirty_folio,
3355 	.release_folio	= netfs_release_folio,
3356 	.invalidate_folio = netfs_invalidate_folio,
3357 	.migrate_folio	= filemap_migrate_folio,
3358 };
3359