xref: /linux/fs/smb/client/file.c (revision d874ca0522389405e26bc2ba38b59c9849c52cc1)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/writeback.h>
20 #include <linux/task_io_accounting_ops.h>
21 #include <linux/delay.h>
22 #include <linux/mount.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include <linux/mm.h>
26 #include <asm/div64.h>
27 #include "cifsfs.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn) {
101 		const struct smbdirect_socket_parameters *sp =
102 			smbd_get_parameters(server->smbd_conn);
103 
104 		stream->sreq_max_segs = sp->max_frmr_depth;
105 	}
106 #endif
107 }
108 
109 /*
110  * Issue a subrequest to upload to the server.
111  */
cifs_issue_write(struct netfs_io_subrequest * subreq)112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 	struct cifs_io_subrequest *wdata =
115 		container_of(subreq, struct cifs_io_subrequest, subreq);
116 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 	int rc;
118 
119 	if (cifs_forced_shutdown(sbi)) {
120 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
121 		goto fail;
122 	}
123 
124 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 	if (rc)
126 		goto fail;
127 
128 	rc = -EAGAIN;
129 	if (wdata->req->cfile->invalidHandle)
130 		goto fail;
131 
132 	wdata->server->ops->async_writev(wdata);
133 out:
134 	return;
135 
136 fail:
137 	if (rc == -EAGAIN)
138 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 	else
140 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 	cifs_write_subrequest_terminated(wdata, rc);
143 	goto out;
144 }
145 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 	cifs_invalidate_cache(wreq->inode, 0);
149 }
150 
151 /*
152  * Negotiate the size of a read operation on behalf of the netfs library.
153  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 	struct netfs_io_request *rreq = subreq->rreq;
157 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 	struct TCP_Server_Info *server;
160 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 	size_t size;
162 	int rc = 0;
163 
164 	if (!rdata->have_xid) {
165 		rdata->xid = get_xid();
166 		rdata->have_xid = true;
167 	}
168 
169 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 	rdata->server = server;
171 
172 	if (cifs_sb->ctx->rsize == 0)
173 		cifs_negotiate_rsize(server, cifs_sb->ctx,
174 				     tlink_tcon(req->cfile->tlink));
175 
176 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 					   &size, &rdata->credits);
178 	if (rc)
179 		return rc;
180 
181 	rreq->io_streams[0].sreq_max_len = size;
182 
183 	rdata->credits.in_flight_check = 1;
184 	rdata->credits.rreq_debug_id = rreq->debug_id;
185 	rdata->credits.rreq_debug_index = subreq->debug_index;
186 
187 	trace_smb3_rw_credits(rdata->rreq->debug_id,
188 			      rdata->subreq.debug_index,
189 			      rdata->credits.value,
190 			      server->credits, server->in_flight, 0,
191 			      cifs_trace_rw_credits_read_submit);
192 
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 	if (server->smbd_conn) {
195 		const struct smbdirect_socket_parameters *sp =
196 			smbd_get_parameters(server->smbd_conn);
197 
198 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 	}
200 #endif
201 	return 0;
202 }
203 
204 /*
205  * Issue a read operation on behalf of the netfs helper functions.  We're asked
206  * to make a read of a certain size at a point in the file.  We are permitted
207  * to only read a portion of that, but as long as we read something, the netfs
208  * helper will call us again so that we can issue another read.
209  */
cifs_issue_read(struct netfs_io_subrequest * subreq)210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 	struct netfs_io_request *rreq = subreq->rreq;
213 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 	struct TCP_Server_Info *server = rdata->server;
216 	int rc = 0;
217 
218 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 		 subreq->transferred, subreq->len);
221 
222 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 	if (rc)
224 		goto failed;
225 
226 	if (req->cfile->invalidHandle) {
227 		do {
228 			rc = cifs_reopen_file(req->cfile, true);
229 		} while (rc == -EAGAIN);
230 		if (rc)
231 			goto failed;
232 	}
233 
234 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 	    subreq->rreq->origin != NETFS_DIO_READ)
236 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237 
238 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 	rc = rdata->server->ops->async_readv(rdata);
240 	if (rc)
241 		goto failed;
242 	return;
243 
244 failed:
245 	subreq->error = rc;
246 	netfs_read_subreq_terminated(subreq);
247 }
248 
249 /*
250  * Writeback calls this when it finds a folio that needs uploading.  This isn't
251  * called if writeback only has copy-to-cache to deal with.
252  */
cifs_begin_writeback(struct netfs_io_request * wreq)253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 	int ret;
257 
258 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
259 	if (ret) {
260 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 		return;
262 	}
263 
264 	wreq->io_streams[0].avail = true;
265 }
266 
267 /*
268  * Initialise a request.
269  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
274 	struct cifsFileInfo *open_file = NULL;
275 
276 	rreq->rsize = cifs_sb->ctx->rsize;
277 	rreq->wsize = cifs_sb->ctx->wsize;
278 	req->pid = current->tgid; // Ummm...  This may be a workqueue
279 
280 	if (file) {
281 		open_file = file->private_data;
282 		rreq->netfs_priv = file->private_data;
283 		req->cfile = cifsFileInfo_get(open_file);
284 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
285 			req->pid = req->cfile->pid;
286 	} else if (rreq->origin != NETFS_WRITEBACK) {
287 		WARN_ON_ONCE(1);
288 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * Completion of a request operation.
296  */
cifs_rreq_done(struct netfs_io_request * rreq)297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 	struct timespec64 atime, mtime;
300 	struct inode *inode = rreq->inode;
301 
302 	/* we do not want atime to be less than mtime, it broke some apps */
303 	atime = inode_set_atime_to_ts(inode, current_time(inode));
304 	mtime = inode_get_mtime(inode);
305 	if (timespec64_compare(&atime, &mtime))
306 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308 
cifs_free_request(struct netfs_io_request * rreq)309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312 
313 	if (req->cfile)
314 		cifsFileInfo_put(req->cfile);
315 }
316 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 	struct cifs_io_subrequest *rdata =
320 		container_of(subreq, struct cifs_io_subrequest, subreq);
321 	int rc = subreq->error;
322 
323 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 		if (rdata->mr) {
326 			smbd_deregister_mr(rdata->mr);
327 			rdata->mr = NULL;
328 		}
329 #endif
330 	}
331 
332 	if (rdata->credits.value != 0) {
333 		trace_smb3_rw_credits(rdata->rreq->debug_id,
334 				      rdata->subreq.debug_index,
335 				      rdata->credits.value,
336 				      rdata->server ? rdata->server->credits : 0,
337 				      rdata->server ? rdata->server->in_flight : 0,
338 				      -rdata->credits.value,
339 				      cifs_trace_rw_credits_free_subreq);
340 		if (rdata->server)
341 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 		else
343 			rdata->credits.value = 0;
344 	}
345 
346 	if (rdata->have_xid)
347 		free_xid(rdata->xid);
348 }
349 
350 const struct netfs_request_ops cifs_req_ops = {
351 	.request_pool		= &cifs_io_request_pool,
352 	.subrequest_pool	= &cifs_io_subrequest_pool,
353 	.init_request		= cifs_init_request,
354 	.free_request		= cifs_free_request,
355 	.free_subrequest	= cifs_free_subrequest,
356 	.prepare_read		= cifs_prepare_read,
357 	.issue_read		= cifs_issue_read,
358 	.done			= cifs_rreq_done,
359 	.begin_writeback	= cifs_begin_writeback,
360 	.prepare_write		= cifs_prepare_write,
361 	.issue_write		= cifs_issue_write,
362 	.invalidate_cache	= cifs_netfs_invalidate_cache,
363 };
364 
365 /*
366  * Mark as invalid, all open files on tree connections since they
367  * were closed when session to server was lost.
368  */
369 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 	struct cifsFileInfo *open_file = NULL;
373 	struct list_head *tmp;
374 	struct list_head *tmp1;
375 
376 	/* only send once per connect */
377 	spin_lock(&tcon->tc_lock);
378 	if (tcon->need_reconnect)
379 		tcon->status = TID_NEED_RECON;
380 
381 	if (tcon->status != TID_NEED_RECON) {
382 		spin_unlock(&tcon->tc_lock);
383 		return;
384 	}
385 	tcon->status = TID_IN_FILES_INVALIDATE;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/* list all files open on tree connection and mark them invalid */
389 	spin_lock(&tcon->open_file_lock);
390 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 		open_file->invalidHandle = true;
393 		open_file->oplock_break_cancelled = true;
394 	}
395 	spin_unlock(&tcon->open_file_lock);
396 
397 	invalidate_all_cached_dirs(tcon);
398 	spin_lock(&tcon->tc_lock);
399 	if (tcon->status == TID_IN_FILES_INVALIDATE)
400 		tcon->status = TID_NEED_TCON;
401 	spin_unlock(&tcon->tc_lock);
402 
403 	/*
404 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 	 * to this tcon.
406 	 */
407 }
408 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
410 {
411 	if ((flags & O_ACCMODE) == O_RDONLY)
412 		return GENERIC_READ;
413 	else if ((flags & O_ACCMODE) == O_WRONLY)
414 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
415 	else if ((flags & O_ACCMODE) == O_RDWR) {
416 		/* GENERIC_ALL is too much permission to request
417 		   can cause unnecessary access denied on create */
418 		/* return GENERIC_ALL; */
419 		return (GENERIC_READ | GENERIC_WRITE);
420 	}
421 
422 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
423 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
424 		FILE_READ_DATA);
425 }
426 
427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)428 static u32 cifs_posix_convert_flags(unsigned int flags)
429 {
430 	u32 posix_flags = 0;
431 
432 	if ((flags & O_ACCMODE) == O_RDONLY)
433 		posix_flags = SMB_O_RDONLY;
434 	else if ((flags & O_ACCMODE) == O_WRONLY)
435 		posix_flags = SMB_O_WRONLY;
436 	else if ((flags & O_ACCMODE) == O_RDWR)
437 		posix_flags = SMB_O_RDWR;
438 
439 	if (flags & O_CREAT) {
440 		posix_flags |= SMB_O_CREAT;
441 		if (flags & O_EXCL)
442 			posix_flags |= SMB_O_EXCL;
443 	} else if (flags & O_EXCL)
444 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
445 			 current->comm, current->tgid);
446 
447 	if (flags & O_TRUNC)
448 		posix_flags |= SMB_O_TRUNC;
449 	/* be safe and imply O_SYNC for O_DSYNC */
450 	if (flags & O_DSYNC)
451 		posix_flags |= SMB_O_SYNC;
452 	if (flags & O_DIRECTORY)
453 		posix_flags |= SMB_O_DIRECTORY;
454 	if (flags & O_NOFOLLOW)
455 		posix_flags |= SMB_O_NOFOLLOW;
456 	if (flags & O_DIRECT)
457 		posix_flags |= SMB_O_DIRECT;
458 
459 	return posix_flags;
460 }
461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
462 
cifs_get_disposition(unsigned int flags)463 static inline int cifs_get_disposition(unsigned int flags)
464 {
465 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
466 		return FILE_CREATE;
467 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
468 		return FILE_OVERWRITE_IF;
469 	else if ((flags & O_CREAT) == O_CREAT)
470 		return FILE_OPEN_IF;
471 	else if ((flags & O_TRUNC) == O_TRUNC)
472 		return FILE_OVERWRITE;
473 	else
474 		return FILE_OPEN;
475 }
476 
477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)478 int cifs_posix_open(const char *full_path, struct inode **pinode,
479 			struct super_block *sb, int mode, unsigned int f_flags,
480 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
481 {
482 	int rc;
483 	FILE_UNIX_BASIC_INFO *presp_data;
484 	__u32 posix_flags = 0;
485 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
486 	struct cifs_fattr fattr;
487 	struct tcon_link *tlink;
488 	struct cifs_tcon *tcon;
489 
490 	cifs_dbg(FYI, "posix open %s\n", full_path);
491 
492 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
493 	if (presp_data == NULL)
494 		return -ENOMEM;
495 
496 	tlink = cifs_sb_tlink(cifs_sb);
497 	if (IS_ERR(tlink)) {
498 		rc = PTR_ERR(tlink);
499 		goto posix_open_ret;
500 	}
501 
502 	tcon = tlink_tcon(tlink);
503 	mode &= ~current_umask();
504 
505 	posix_flags = cifs_posix_convert_flags(f_flags);
506 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
507 			     poplock, full_path, cifs_sb->local_nls,
508 			     cifs_remap(cifs_sb));
509 	cifs_put_tlink(tlink);
510 
511 	if (rc)
512 		goto posix_open_ret;
513 
514 	if (presp_data->Type == cpu_to_le32(-1))
515 		goto posix_open_ret; /* open ok, caller does qpathinfo */
516 
517 	if (!pinode)
518 		goto posix_open_ret; /* caller does not need info */
519 
520 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
521 
522 	/* get new inode and set it up */
523 	if (*pinode == NULL) {
524 		cifs_fill_uniqueid(sb, &fattr);
525 		*pinode = cifs_iget(sb, &fattr);
526 		if (!*pinode) {
527 			rc = -ENOMEM;
528 			goto posix_open_ret;
529 		}
530 	} else {
531 		cifs_revalidate_mapping(*pinode);
532 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
533 	}
534 
535 posix_open_ret:
536 	kfree(presp_data);
537 	return rc;
538 }
539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
540 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
542 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
543 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
544 {
545 	int rc;
546 	int desired_access;
547 	int disposition;
548 	int create_options = CREATE_NOT_DIR;
549 	struct TCP_Server_Info *server = tcon->ses->server;
550 	struct cifs_open_parms oparms;
551 	int rdwr_for_fscache = 0;
552 
553 	if (!server->ops->open)
554 		return -ENOSYS;
555 
556 	/* If we're caching, we need to be able to fill in around partial writes. */
557 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
558 		rdwr_for_fscache = 1;
559 
560 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
561 
562 /*********************************************************************
563  *  open flag mapping table:
564  *
565  *	POSIX Flag            CIFS Disposition
566  *	----------            ----------------
567  *	O_CREAT               FILE_OPEN_IF
568  *	O_CREAT | O_EXCL      FILE_CREATE
569  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
570  *	O_TRUNC               FILE_OVERWRITE
571  *	none of the above     FILE_OPEN
572  *
573  *	Note that there is not a direct match between disposition
574  *	FILE_SUPERSEDE (ie create whether or not file exists although
575  *	O_CREAT | O_TRUNC is similar but truncates the existing
576  *	file rather than creating a new file as FILE_SUPERSEDE does
577  *	(which uses the attributes / metadata passed in on open call)
578  *?
579  *?  O_SYNC is a reasonable match to CIFS writethrough flag
580  *?  and the read write flags match reasonably.  O_LARGEFILE
581  *?  is irrelevant because largefile support is always used
582  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
583  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
584  *********************************************************************/
585 
586 	disposition = cifs_get_disposition(f_flags);
587 	/* BB pass O_SYNC flag through on file attributes .. BB */
588 	create_options |= cifs_open_create_options(f_flags, create_options);
589 
590 retry_open:
591 	oparms = (struct cifs_open_parms) {
592 		.tcon = tcon,
593 		.cifs_sb = cifs_sb,
594 		.desired_access = desired_access,
595 		.create_options = cifs_create_options(cifs_sb, create_options),
596 		.disposition = disposition,
597 		.path = full_path,
598 		.fid = fid,
599 	};
600 
601 	rc = server->ops->open(xid, &oparms, oplock, buf);
602 	if (rc) {
603 		if (rc == -EACCES && rdwr_for_fscache == 1) {
604 			desired_access = cifs_convert_flags(f_flags, 0);
605 			rdwr_for_fscache = 2;
606 			goto retry_open;
607 		}
608 		return rc;
609 	}
610 	if (rdwr_for_fscache == 2)
611 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
612 
613 	/* TODO: Add support for calling posix query info but with passing in fid */
614 	if (tcon->unix_ext)
615 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
616 					      xid);
617 	else
618 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
619 					 xid, fid);
620 
621 	if (rc) {
622 		server->ops->close(xid, tcon, fid);
623 		if (rc == -ESTALE)
624 			rc = -EOPENSTALE;
625 	}
626 
627 	return rc;
628 }
629 
630 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)631 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
632 {
633 	struct cifs_fid_locks *cur;
634 	bool has_locks = false;
635 
636 	down_read(&cinode->lock_sem);
637 	list_for_each_entry(cur, &cinode->llist, llist) {
638 		if (!list_empty(&cur->locks)) {
639 			has_locks = true;
640 			break;
641 		}
642 	}
643 	up_read(&cinode->lock_sem);
644 	return has_locks;
645 }
646 
647 void
cifs_down_write(struct rw_semaphore * sem)648 cifs_down_write(struct rw_semaphore *sem)
649 {
650 	while (!down_write_trylock(sem))
651 		msleep(10);
652 }
653 
654 static void cifsFileInfo_put_work(struct work_struct *work);
655 void serverclose_work(struct work_struct *work);
656 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)657 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
658 				       struct tcon_link *tlink, __u32 oplock,
659 				       const char *symlink_target)
660 {
661 	struct dentry *dentry = file_dentry(file);
662 	struct inode *inode = d_inode(dentry);
663 	struct cifsInodeInfo *cinode = CIFS_I(inode);
664 	struct cifsFileInfo *cfile;
665 	struct cifs_fid_locks *fdlocks;
666 	struct cifs_tcon *tcon = tlink_tcon(tlink);
667 	struct TCP_Server_Info *server = tcon->ses->server;
668 
669 	cfile = kzalloc_obj(struct cifsFileInfo);
670 	if (cfile == NULL)
671 		return cfile;
672 
673 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
674 	if (!fdlocks) {
675 		kfree(cfile);
676 		return NULL;
677 	}
678 
679 	if (symlink_target) {
680 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
681 		if (!cfile->symlink_target) {
682 			kfree(fdlocks);
683 			kfree(cfile);
684 			return NULL;
685 		}
686 	}
687 
688 	INIT_LIST_HEAD(&fdlocks->locks);
689 	fdlocks->cfile = cfile;
690 	cfile->llist = fdlocks;
691 
692 	cfile->count = 1;
693 	cfile->pid = current->tgid;
694 	cfile->uid = current_fsuid();
695 	cfile->dentry = dget(dentry);
696 	cfile->f_flags = file->f_flags;
697 	cfile->invalidHandle = false;
698 	cfile->deferred_close_scheduled = false;
699 	cfile->tlink = cifs_get_tlink(tlink);
700 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
701 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
702 	INIT_WORK(&cfile->serverclose, serverclose_work);
703 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
704 	mutex_init(&cfile->fh_mutex);
705 	spin_lock_init(&cfile->file_info_lock);
706 
707 	/*
708 	 * If the server returned a read oplock and we have mandatory brlocks,
709 	 * set oplock level to None.
710 	 */
711 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
712 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
713 		oplock = 0;
714 	}
715 
716 	cifs_down_write(&cinode->lock_sem);
717 	list_add(&fdlocks->llist, &cinode->llist);
718 	up_write(&cinode->lock_sem);
719 
720 	spin_lock(&tcon->open_file_lock);
721 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
722 		oplock = fid->pending_open->oplock;
723 	list_del(&fid->pending_open->olist);
724 
725 	list_add(&cfile->tlist, &tcon->openFileList);
726 	atomic_inc(&tcon->num_local_opens);
727 
728 	/* if readable file instance put first in list*/
729 	spin_lock(&cinode->open_file_lock);
730 	fid->purge_cache = false;
731 	server->ops->set_fid(cfile, fid, oplock);
732 
733 	if (file->f_mode & FMODE_READ)
734 		list_add(&cfile->flist, &cinode->openFileList);
735 	else
736 		list_add_tail(&cfile->flist, &cinode->openFileList);
737 	spin_unlock(&cinode->open_file_lock);
738 	spin_unlock(&tcon->open_file_lock);
739 
740 	if (fid->purge_cache)
741 		cifs_zap_mapping(inode);
742 
743 	file->private_data = cfile;
744 	return cfile;
745 }
746 
747 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)748 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
749 {
750 	spin_lock(&cifs_file->file_info_lock);
751 	cifsFileInfo_get_locked(cifs_file);
752 	spin_unlock(&cifs_file->file_info_lock);
753 	return cifs_file;
754 }
755 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)756 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
757 {
758 	struct inode *inode = d_inode(cifs_file->dentry);
759 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
760 	struct cifsLockInfo *li, *tmp;
761 
762 	/*
763 	 * Delete any outstanding lock records. We'll lose them when the file
764 	 * is closed anyway.
765 	 */
766 	cifs_down_write(&cifsi->lock_sem);
767 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
768 		list_del(&li->llist);
769 		cifs_del_lock_waiters(li);
770 		kfree(li);
771 	}
772 	list_del(&cifs_file->llist->llist);
773 	kfree(cifs_file->llist);
774 	up_write(&cifsi->lock_sem);
775 
776 	cifs_put_tlink(cifs_file->tlink);
777 	dput(cifs_file->dentry);
778 	kfree(cifs_file->symlink_target);
779 	kfree(cifs_file);
780 }
781 
cifsFileInfo_put_work(struct work_struct * work)782 static void cifsFileInfo_put_work(struct work_struct *work)
783 {
784 	struct cifsFileInfo *cifs_file = container_of(work,
785 			struct cifsFileInfo, put);
786 
787 	cifsFileInfo_put_final(cifs_file);
788 }
789 
serverclose_work(struct work_struct * work)790 void serverclose_work(struct work_struct *work)
791 {
792 	struct cifsFileInfo *cifs_file = container_of(work,
793 			struct cifsFileInfo, serverclose);
794 
795 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
796 
797 	struct TCP_Server_Info *server = tcon->ses->server;
798 	int rc = 0;
799 	int retries = 0;
800 	int MAX_RETRIES = 4;
801 
802 	do {
803 		if (server->ops->close_getattr)
804 			rc = server->ops->close_getattr(0, tcon, cifs_file);
805 		else if (server->ops->close)
806 			rc = server->ops->close(0, tcon, &cifs_file->fid);
807 
808 		if (rc == -EBUSY || rc == -EAGAIN) {
809 			retries++;
810 			msleep(250);
811 		}
812 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
813 	);
814 
815 	if (retries == MAX_RETRIES)
816 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
817 
818 	if (cifs_file->offload)
819 		queue_work(fileinfo_put_wq, &cifs_file->put);
820 	else
821 		cifsFileInfo_put_final(cifs_file);
822 }
823 
824 /**
825  * cifsFileInfo_put - release a reference of file priv data
826  *
827  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
828  *
829  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
830  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)831 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
832 {
833 	_cifsFileInfo_put(cifs_file, true, true);
834 }
835 
836 /**
837  * _cifsFileInfo_put - release a reference of file priv data
838  *
839  * This may involve closing the filehandle @cifs_file out on the
840  * server. Must be called without holding tcon->open_file_lock,
841  * cinode->open_file_lock and cifs_file->file_info_lock.
842  *
843  * If @wait_for_oplock_handler is true and we are releasing the last
844  * reference, wait for any running oplock break handler of the file
845  * and cancel any pending one.
846  *
847  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
848  * @wait_oplock_handler: must be false if called from oplock_break_handler
849  * @offload:	not offloaded on close and oplock breaks
850  *
851  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)852 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
853 		       bool wait_oplock_handler, bool offload)
854 {
855 	struct inode *inode = d_inode(cifs_file->dentry);
856 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
857 	struct TCP_Server_Info *server = tcon->ses->server;
858 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
859 	struct super_block *sb = inode->i_sb;
860 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
861 	struct cifs_fid fid = {};
862 	struct cifs_pending_open open;
863 	bool oplock_break_cancelled;
864 	bool serverclose_offloaded = false;
865 
866 	spin_lock(&tcon->open_file_lock);
867 	spin_lock(&cifsi->open_file_lock);
868 	spin_lock(&cifs_file->file_info_lock);
869 
870 	cifs_file->offload = offload;
871 	if (--cifs_file->count > 0) {
872 		spin_unlock(&cifs_file->file_info_lock);
873 		spin_unlock(&cifsi->open_file_lock);
874 		spin_unlock(&tcon->open_file_lock);
875 		return;
876 	}
877 	spin_unlock(&cifs_file->file_info_lock);
878 
879 	if (server->ops->get_lease_key)
880 		server->ops->get_lease_key(inode, &fid);
881 
882 	/* store open in pending opens to make sure we don't miss lease break */
883 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
884 
885 	/* remove it from the lists */
886 	list_del(&cifs_file->flist);
887 	list_del(&cifs_file->tlist);
888 	atomic_dec(&tcon->num_local_opens);
889 
890 	if (list_empty(&cifsi->openFileList)) {
891 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
892 			 d_inode(cifs_file->dentry));
893 		/*
894 		 * In strict cache mode we need invalidate mapping on the last
895 		 * close  because it may cause a error when we open this file
896 		 * again and get at least level II oplock.
897 		 */
898 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
899 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
900 		cifs_set_oplock_level(cifsi, 0);
901 	}
902 
903 	spin_unlock(&cifsi->open_file_lock);
904 	spin_unlock(&tcon->open_file_lock);
905 
906 	oplock_break_cancelled = wait_oplock_handler ?
907 		cancel_work_sync(&cifs_file->oplock_break) : false;
908 
909 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
910 		struct TCP_Server_Info *server = tcon->ses->server;
911 		unsigned int xid;
912 		int rc = 0;
913 
914 		xid = get_xid();
915 		if (server->ops->close_getattr)
916 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
917 		else if (server->ops->close)
918 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
919 		_free_xid(xid);
920 
921 		if (rc == -EBUSY || rc == -EAGAIN) {
922 			// Server close failed, hence offloading it as an async op
923 			queue_work(serverclose_wq, &cifs_file->serverclose);
924 			serverclose_offloaded = true;
925 		}
926 	}
927 
928 	if (oplock_break_cancelled)
929 		cifs_done_oplock_break(cifsi);
930 
931 	cifs_del_pending_open(&open);
932 
933 	// if serverclose has been offloaded to wq (on failure), it will
934 	// handle offloading put as well. If serverclose not offloaded,
935 	// we need to handle offloading put here.
936 	if (!serverclose_offloaded) {
937 		if (offload)
938 			queue_work(fileinfo_put_wq, &cifs_file->put);
939 		else
940 			cifsFileInfo_put_final(cifs_file);
941 	}
942 }
943 
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)944 int cifs_file_flush(const unsigned int xid, struct inode *inode,
945 		    struct cifsFileInfo *cfile)
946 {
947 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
948 	struct cifs_tcon *tcon;
949 	int rc;
950 
951 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
952 		return 0;
953 
954 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
955 		tcon = tlink_tcon(cfile->tlink);
956 		return tcon->ses->server->ops->flush(xid, tcon,
957 						     &cfile->fid);
958 	}
959 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
960 	if (!rc) {
961 		tcon = tlink_tcon(cfile->tlink);
962 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
963 		cifsFileInfo_put(cfile);
964 	} else if (rc == -EBADF) {
965 		rc = 0;
966 	}
967 	return rc;
968 }
969 
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)970 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
971 {
972 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
973 	struct inode *inode = d_inode(dentry);
974 	struct cifsFileInfo *cfile = NULL;
975 	struct TCP_Server_Info *server;
976 	struct cifs_tcon *tcon;
977 	int rc;
978 
979 	rc = filemap_write_and_wait(inode->i_mapping);
980 	if (is_interrupt_error(rc))
981 		return -ERESTARTSYS;
982 	mapping_set_error(inode->i_mapping, rc);
983 
984 	cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
985 	rc = cifs_file_flush(xid, inode, cfile);
986 	if (!rc) {
987 		if (cfile) {
988 			tcon = tlink_tcon(cfile->tlink);
989 			server = tcon->ses->server;
990 			rc = server->ops->set_file_size(xid, tcon,
991 							cfile, 0, false);
992 		}
993 		if (!rc) {
994 			netfs_resize_file(&cinode->netfs, 0, true);
995 			cifs_setsize(inode, 0);
996 			inode->i_blocks = 0;
997 		}
998 	}
999 	if (cfile)
1000 		cifsFileInfo_put(cfile);
1001 	return rc;
1002 }
1003 
cifs_open(struct inode * inode,struct file * file)1004 int cifs_open(struct inode *inode, struct file *file)
1005 
1006 {
1007 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1008 	struct cifs_open_info_data data = {};
1009 	struct cifsFileInfo *cfile = NULL;
1010 	struct TCP_Server_Info *server;
1011 	struct cifs_pending_open open;
1012 	bool posix_open_ok = false;
1013 	struct cifs_fid fid = {};
1014 	struct tcon_link *tlink;
1015 	struct cifs_tcon *tcon;
1016 	const char *full_path;
1017 	unsigned int sbflags;
1018 	int rc = -EACCES;
1019 	unsigned int xid;
1020 	__u32 oplock;
1021 	void *page;
1022 
1023 	xid = get_xid();
1024 
1025 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1026 		free_xid(xid);
1027 		return smb_EIO(smb_eio_trace_forced_shutdown);
1028 	}
1029 
1030 	tlink = cifs_sb_tlink(cifs_sb);
1031 	if (IS_ERR(tlink)) {
1032 		free_xid(xid);
1033 		return PTR_ERR(tlink);
1034 	}
1035 	tcon = tlink_tcon(tlink);
1036 	server = tcon->ses->server;
1037 
1038 	page = alloc_dentry_path();
1039 	full_path = build_path_from_dentry(file_dentry(file), page);
1040 	if (IS_ERR(full_path)) {
1041 		rc = PTR_ERR(full_path);
1042 		goto out;
1043 	}
1044 
1045 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1046 		 inode, file->f_flags, full_path);
1047 
1048 	sbflags = cifs_sb_flags(cifs_sb);
1049 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1050 		if (sbflags & CIFS_MOUNT_NO_BRL)
1051 			file->f_op = &cifs_file_direct_nobrl_ops;
1052 		else
1053 			file->f_op = &cifs_file_direct_ops;
1054 	}
1055 
1056 	if (file->f_flags & O_TRUNC) {
1057 		rc = cifs_do_truncate(xid, file_dentry(file));
1058 		if (rc)
1059 			goto out;
1060 	}
1061 
1062 	/* Get the cached handle as SMB2 close is deferred */
1063 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1064 		rc = __cifs_get_writable_file(CIFS_I(inode),
1065 					      FIND_FSUID_ONLY |
1066 					      FIND_NO_PENDING_DELETE |
1067 					      FIND_OPEN_FLAGS,
1068 					      file->f_flags, &cfile);
1069 	} else {
1070 		cfile = __find_readable_file(CIFS_I(inode),
1071 					     FIND_NO_PENDING_DELETE |
1072 					     FIND_OPEN_FLAGS,
1073 					     file->f_flags);
1074 		rc = cfile ? 0 : -ENOENT;
1075 	}
1076 	if (rc == 0) {
1077 		file->private_data = cfile;
1078 		spin_lock(&CIFS_I(inode)->deferred_lock);
1079 		cifs_del_deferred_close(cfile);
1080 		spin_unlock(&CIFS_I(inode)->deferred_lock);
1081 		goto use_cache;
1082 	}
1083 	/* hard link on the deferred close file */
1084 	rc = cifs_get_hardlink_path(tcon, inode, file);
1085 	if (rc)
1086 		cifs_close_deferred_file(CIFS_I(inode));
1087 
1088 	if (server->oplocks)
1089 		oplock = REQ_OPLOCK;
1090 	else
1091 		oplock = 0;
1092 
1093 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1094 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1095 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1096 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1097 		/* can not refresh inode info since size could be stale */
1098 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1099 				cifs_sb->ctx->file_mode /* ignored */,
1100 				file->f_flags, &oplock, &fid.netfid, xid);
1101 		if (rc == 0) {
1102 			cifs_dbg(FYI, "posix open succeeded\n");
1103 			posix_open_ok = true;
1104 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1105 			if (tcon->ses->serverNOS)
1106 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1107 					 tcon->ses->ip_addr,
1108 					 tcon->ses->serverNOS);
1109 			tcon->broken_posix_open = true;
1110 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1111 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1112 			goto out;
1113 		/*
1114 		 * Else fallthrough to retry open the old way on network i/o
1115 		 * or DFS errors.
1116 		 */
1117 	}
1118 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1119 
1120 	if (server->ops->get_lease_key)
1121 		server->ops->get_lease_key(inode, &fid);
1122 
1123 	cifs_add_pending_open(&fid, tlink, &open);
1124 
1125 	if (!posix_open_ok) {
1126 		if (server->ops->get_lease_key)
1127 			server->ops->get_lease_key(inode, &fid);
1128 
1129 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1130 				  xid, &data);
1131 		if (rc) {
1132 			cifs_del_pending_open(&open);
1133 			goto out;
1134 		}
1135 	}
1136 
1137 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1138 	if (cfile == NULL) {
1139 		if (server->ops->close)
1140 			server->ops->close(xid, tcon, &fid);
1141 		cifs_del_pending_open(&open);
1142 		rc = -ENOMEM;
1143 		goto out;
1144 	}
1145 
1146 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1147 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1148 		/*
1149 		 * Time to set mode which we can not set earlier due to
1150 		 * problems creating new read-only files.
1151 		 */
1152 		struct cifs_unix_set_info_args args = {
1153 			.mode	= inode->i_mode,
1154 			.uid	= INVALID_UID, /* no change */
1155 			.gid	= INVALID_GID, /* no change */
1156 			.ctime	= NO_CHANGE_64,
1157 			.atime	= NO_CHANGE_64,
1158 			.mtime	= NO_CHANGE_64,
1159 			.device	= 0,
1160 		};
1161 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1162 				       cfile->pid);
1163 	}
1164 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1165 
1166 use_cache:
1167 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1168 			   file->f_mode & FMODE_WRITE);
1169 	if (!(file->f_flags & O_DIRECT))
1170 		goto out;
1171 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1172 		goto out;
1173 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1174 
1175 out:
1176 	free_dentry_path(page);
1177 	free_xid(xid);
1178 	cifs_put_tlink(tlink);
1179 	cifs_free_open_info(&data);
1180 	return rc;
1181 }
1182 
1183 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1184 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1185 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1186 
1187 /*
1188  * Try to reacquire byte range locks that were released when session
1189  * to server was lost.
1190  */
1191 static int
cifs_relock_file(struct cifsFileInfo * cfile)1192 cifs_relock_file(struct cifsFileInfo *cfile)
1193 {
1194 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1195 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1196 	int rc = 0;
1197 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1198 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1199 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1200 
1201 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1202 	if (cinode->can_cache_brlcks) {
1203 		/* can cache locks - no need to relock */
1204 		up_read(&cinode->lock_sem);
1205 		return rc;
1206 	}
1207 
1208 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1209 	if (cap_unix(tcon->ses) &&
1210 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1211 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1212 		rc = cifs_push_posix_locks(cfile);
1213 	else
1214 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1215 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1216 
1217 	up_read(&cinode->lock_sem);
1218 	return rc;
1219 }
1220 
1221 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1222 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1223 {
1224 	int rc = -EACCES;
1225 	unsigned int xid;
1226 	__u32 oplock;
1227 	struct cifs_sb_info *cifs_sb;
1228 	struct cifs_tcon *tcon;
1229 	struct TCP_Server_Info *server;
1230 	struct cifsInodeInfo *cinode;
1231 	struct inode *inode;
1232 	void *page;
1233 	const char *full_path;
1234 	int desired_access;
1235 	int disposition = FILE_OPEN;
1236 	int create_options = CREATE_NOT_DIR;
1237 	struct cifs_open_parms oparms;
1238 	int rdwr_for_fscache = 0;
1239 
1240 	xid = get_xid();
1241 	mutex_lock(&cfile->fh_mutex);
1242 	if (!cfile->invalidHandle) {
1243 		mutex_unlock(&cfile->fh_mutex);
1244 		free_xid(xid);
1245 		return 0;
1246 	}
1247 
1248 	inode = d_inode(cfile->dentry);
1249 	cifs_sb = CIFS_SB(inode->i_sb);
1250 	tcon = tlink_tcon(cfile->tlink);
1251 	server = tcon->ses->server;
1252 
1253 	/*
1254 	 * Can not grab rename sem here because various ops, including those
1255 	 * that already have the rename sem can end up causing writepage to get
1256 	 * called and if the server was down that means we end up here, and we
1257 	 * can never tell if the caller already has the rename_sem.
1258 	 */
1259 	page = alloc_dentry_path();
1260 	full_path = build_path_from_dentry(cfile->dentry, page);
1261 	if (IS_ERR(full_path)) {
1262 		mutex_unlock(&cfile->fh_mutex);
1263 		free_dentry_path(page);
1264 		free_xid(xid);
1265 		return PTR_ERR(full_path);
1266 	}
1267 
1268 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1269 		 inode, cfile->f_flags, full_path);
1270 
1271 	if (tcon->ses->server->oplocks)
1272 		oplock = REQ_OPLOCK;
1273 	else
1274 		oplock = 0;
1275 
1276 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1277 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1278 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1279 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1280 		/*
1281 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1282 		 * original open. Must mask them off for a reopen.
1283 		 */
1284 		unsigned int oflags = cfile->f_flags &
1285 						~(O_CREAT | O_EXCL | O_TRUNC);
1286 
1287 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1288 				     cifs_sb->ctx->file_mode /* ignored */,
1289 				     oflags, &oplock, &cfile->fid.netfid, xid);
1290 		if (rc == 0) {
1291 			cifs_dbg(FYI, "posix reopen succeeded\n");
1292 			oparms.reconnect = true;
1293 			goto reopen_success;
1294 		}
1295 		/*
1296 		 * fallthrough to retry open the old way on errors, especially
1297 		 * in the reconnect path it is important to retry hard
1298 		 */
1299 	}
1300 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1301 
1302 	/* If we're caching, we need to be able to fill in around partial writes. */
1303 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1304 		rdwr_for_fscache = 1;
1305 
1306 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1307 	create_options |= cifs_open_create_options(cfile->f_flags,
1308 						   create_options);
1309 
1310 	if (server->ops->get_lease_key)
1311 		server->ops->get_lease_key(inode, &cfile->fid);
1312 
1313 retry_open:
1314 	oparms = (struct cifs_open_parms) {
1315 		.tcon = tcon,
1316 		.cifs_sb = cifs_sb,
1317 		.desired_access = desired_access,
1318 		.create_options = cifs_create_options(cifs_sb, create_options),
1319 		.disposition = disposition,
1320 		.path = full_path,
1321 		.fid = &cfile->fid,
1322 		.reconnect = true,
1323 	};
1324 
1325 	/*
1326 	 * Can not refresh inode by passing in file_info buf to be returned by
1327 	 * ops->open and then calling get_inode_info with returned buf since
1328 	 * file might have write behind data that needs to be flushed and server
1329 	 * version of file size can be stale. If we knew for sure that inode was
1330 	 * not dirty locally we could do this.
1331 	 */
1332 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1333 	if (rc == -ENOENT && oparms.reconnect == false) {
1334 		/* durable handle timeout is expired - open the file again */
1335 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1336 		/* indicate that we need to relock the file */
1337 		oparms.reconnect = true;
1338 	}
1339 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1340 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1341 		rdwr_for_fscache = 2;
1342 		goto retry_open;
1343 	}
1344 
1345 	if (rc) {
1346 		mutex_unlock(&cfile->fh_mutex);
1347 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1348 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1349 		goto reopen_error_exit;
1350 	}
1351 
1352 	if (rdwr_for_fscache == 2)
1353 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1354 
1355 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1356 reopen_success:
1357 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1358 	cfile->invalidHandle = false;
1359 	mutex_unlock(&cfile->fh_mutex);
1360 	cinode = CIFS_I(inode);
1361 
1362 	if (can_flush) {
1363 		rc = filemap_write_and_wait(inode->i_mapping);
1364 		if (!is_interrupt_error(rc))
1365 			mapping_set_error(inode->i_mapping, rc);
1366 
1367 		if (tcon->posix_extensions) {
1368 			rc = smb311_posix_get_inode_info(&inode, full_path,
1369 							 NULL, inode->i_sb, xid);
1370 		} else if (tcon->unix_ext) {
1371 			rc = cifs_get_inode_info_unix(&inode, full_path,
1372 						      inode->i_sb, xid);
1373 		} else {
1374 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1375 						 inode->i_sb, xid, NULL);
1376 		}
1377 	}
1378 	/*
1379 	 * Else we are writing out data to server already and could deadlock if
1380 	 * we tried to flush data, and since we do not know if we have data that
1381 	 * would invalidate the current end of file on the server we can not go
1382 	 * to the server to get the new inode info.
1383 	 */
1384 
1385 	/*
1386 	 * If the server returned a read oplock and we have mandatory brlocks,
1387 	 * set oplock level to None.
1388 	 */
1389 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1390 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1391 		oplock = 0;
1392 	}
1393 
1394 	scoped_guard(spinlock, &cinode->open_file_lock)
1395 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1396 	if (oparms.reconnect)
1397 		cifs_relock_file(cfile);
1398 
1399 reopen_error_exit:
1400 	free_dentry_path(page);
1401 	free_xid(xid);
1402 	return rc;
1403 }
1404 
smb2_deferred_work_close(struct work_struct * work)1405 void smb2_deferred_work_close(struct work_struct *work)
1406 {
1407 	struct cifsFileInfo *cfile = container_of(work,
1408 			struct cifsFileInfo, deferred.work);
1409 
1410 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1411 	cifs_del_deferred_close(cfile);
1412 	cfile->deferred_close_scheduled = false;
1413 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1414 	_cifsFileInfo_put(cfile, true, false);
1415 }
1416 
1417 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1418 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1419 {
1420 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1421 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1422 	unsigned int oplock = READ_ONCE(cinode->oplock);
1423 
1424 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1425 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1426 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1427 
1428 }
1429 
cifs_close(struct inode * inode,struct file * file)1430 int cifs_close(struct inode *inode, struct file *file)
1431 {
1432 	struct cifsFileInfo *cfile;
1433 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1434 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1435 	struct cifs_deferred_close *dclose;
1436 
1437 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1438 
1439 	if (file->private_data != NULL) {
1440 		cfile = file->private_data;
1441 		file->private_data = NULL;
1442 		dclose = kmalloc_obj(struct cifs_deferred_close);
1443 		if ((cfile->status_file_deleted == false) &&
1444 		    (smb2_can_defer_close(inode, dclose))) {
1445 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1446 				inode_set_mtime_to_ts(inode,
1447 						      inode_set_ctime_current(inode));
1448 			}
1449 			spin_lock(&cinode->deferred_lock);
1450 			cifs_add_deferred_close(cfile, dclose);
1451 			if (cfile->deferred_close_scheduled &&
1452 			    delayed_work_pending(&cfile->deferred)) {
1453 				/*
1454 				 * If there is no pending work, mod_delayed_work queues new work.
1455 				 * So, Increase the ref count to avoid use-after-free.
1456 				 */
1457 				if (!mod_delayed_work(deferredclose_wq,
1458 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1459 					cifsFileInfo_get(cfile);
1460 			} else {
1461 				/* Deferred close for files */
1462 				queue_delayed_work(deferredclose_wq,
1463 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1464 				cfile->deferred_close_scheduled = true;
1465 				spin_unlock(&cinode->deferred_lock);
1466 				return 0;
1467 			}
1468 			spin_unlock(&cinode->deferred_lock);
1469 			_cifsFileInfo_put(cfile, true, false);
1470 		} else {
1471 			_cifsFileInfo_put(cfile, true, false);
1472 			kfree(dclose);
1473 		}
1474 	}
1475 
1476 	/* return code from the ->release op is always ignored */
1477 	return 0;
1478 }
1479 
1480 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1481 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1482 {
1483 	struct cifsFileInfo *open_file, *tmp;
1484 	LIST_HEAD(tmp_list);
1485 
1486 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1487 		return;
1488 
1489 	tcon->need_reopen_files = false;
1490 
1491 	cifs_dbg(FYI, "Reopen persistent handles\n");
1492 
1493 	/* list all files open on tree connection, reopen resilient handles  */
1494 	spin_lock(&tcon->open_file_lock);
1495 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1496 		if (!open_file->invalidHandle)
1497 			continue;
1498 		cifsFileInfo_get(open_file);
1499 		list_add_tail(&open_file->rlist, &tmp_list);
1500 	}
1501 	spin_unlock(&tcon->open_file_lock);
1502 
1503 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1504 		if (cifs_reopen_file(open_file, false /* do not flush */))
1505 			tcon->need_reopen_files = true;
1506 		list_del_init(&open_file->rlist);
1507 		cifsFileInfo_put(open_file);
1508 	}
1509 }
1510 
cifs_closedir(struct inode * inode,struct file * file)1511 int cifs_closedir(struct inode *inode, struct file *file)
1512 {
1513 	int rc = 0;
1514 	unsigned int xid;
1515 	struct cifsFileInfo *cfile = file->private_data;
1516 	struct cifs_tcon *tcon;
1517 	struct TCP_Server_Info *server;
1518 	char *buf;
1519 
1520 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1521 
1522 	if (cfile == NULL)
1523 		return rc;
1524 
1525 	xid = get_xid();
1526 	tcon = tlink_tcon(cfile->tlink);
1527 	server = tcon->ses->server;
1528 
1529 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1530 	spin_lock(&cfile->file_info_lock);
1531 	if (server->ops->dir_needs_close(cfile)) {
1532 		cfile->invalidHandle = true;
1533 		spin_unlock(&cfile->file_info_lock);
1534 		if (server->ops->close_dir)
1535 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1536 		else
1537 			rc = -ENOSYS;
1538 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1539 		/* not much we can do if it fails anyway, ignore rc */
1540 		rc = 0;
1541 	} else
1542 		spin_unlock(&cfile->file_info_lock);
1543 
1544 	buf = cfile->srch_inf.ntwrk_buf_start;
1545 	if (buf) {
1546 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1547 		cfile->srch_inf.ntwrk_buf_start = NULL;
1548 		if (cfile->srch_inf.smallBuf)
1549 			cifs_small_buf_release(buf);
1550 		else
1551 			cifs_buf_release(buf);
1552 	}
1553 
1554 	cifs_put_tlink(cfile->tlink);
1555 	kfree(file->private_data);
1556 	file->private_data = NULL;
1557 	/* BB can we lock the filestruct while this is going on? */
1558 	free_xid(xid);
1559 	return rc;
1560 }
1561 
1562 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1563 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1564 {
1565 	struct cifsLockInfo *lock =
1566 		kmalloc_obj(struct cifsLockInfo);
1567 	if (!lock)
1568 		return lock;
1569 	lock->offset = offset;
1570 	lock->length = length;
1571 	lock->type = type;
1572 	lock->pid = current->tgid;
1573 	lock->flags = flags;
1574 	INIT_LIST_HEAD(&lock->blist);
1575 	init_waitqueue_head(&lock->block_q);
1576 	return lock;
1577 }
1578 
1579 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1580 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1581 {
1582 	struct cifsLockInfo *li, *tmp;
1583 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1584 		list_del_init(&li->blist);
1585 		wake_up(&li->block_q);
1586 	}
1587 }
1588 
1589 #define CIFS_LOCK_OP	0
1590 #define CIFS_READ_OP	1
1591 #define CIFS_WRITE_OP	2
1592 
1593 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1594 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1595 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1596 			    __u64 length, __u8 type, __u16 flags,
1597 			    struct cifsFileInfo *cfile,
1598 			    struct cifsLockInfo **conf_lock, int rw_check)
1599 {
1600 	struct cifsLockInfo *li;
1601 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1602 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1603 
1604 	list_for_each_entry(li, &fdlocks->locks, llist) {
1605 		if (offset + length <= li->offset ||
1606 		    offset >= li->offset + li->length)
1607 			continue;
1608 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1609 		    server->ops->compare_fids(cfile, cur_cfile)) {
1610 			/* shared lock prevents write op through the same fid */
1611 			if (!(li->type & server->vals->shared_lock_type) ||
1612 			    rw_check != CIFS_WRITE_OP)
1613 				continue;
1614 		}
1615 		if ((type & server->vals->shared_lock_type) &&
1616 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1617 		     current->tgid == li->pid) || type == li->type))
1618 			continue;
1619 		if (rw_check == CIFS_LOCK_OP &&
1620 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1621 		    server->ops->compare_fids(cfile, cur_cfile))
1622 			continue;
1623 		if (conf_lock)
1624 			*conf_lock = li;
1625 		return true;
1626 	}
1627 	return false;
1628 }
1629 
1630 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1631 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1632 			__u8 type, __u16 flags,
1633 			struct cifsLockInfo **conf_lock, int rw_check)
1634 {
1635 	bool rc = false;
1636 	struct cifs_fid_locks *cur;
1637 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1638 
1639 	list_for_each_entry(cur, &cinode->llist, llist) {
1640 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1641 						 flags, cfile, conf_lock,
1642 						 rw_check);
1643 		if (rc)
1644 			break;
1645 	}
1646 
1647 	return rc;
1648 }
1649 
1650 /*
1651  * Check if there is another lock that prevents us to set the lock (mandatory
1652  * style). If such a lock exists, update the flock structure with its
1653  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1654  * or leave it the same if we can't. Returns 0 if we don't need to request to
1655  * the server or 1 otherwise.
1656  */
1657 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1658 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1659 	       __u8 type, struct file_lock *flock)
1660 {
1661 	int rc = 0;
1662 	struct cifsLockInfo *conf_lock;
1663 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1664 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1665 	bool exist;
1666 
1667 	down_read(&cinode->lock_sem);
1668 
1669 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1670 					flock->c.flc_flags, &conf_lock,
1671 					CIFS_LOCK_OP);
1672 	if (exist) {
1673 		flock->fl_start = conf_lock->offset;
1674 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1675 		flock->c.flc_pid = conf_lock->pid;
1676 		if (conf_lock->type & server->vals->shared_lock_type)
1677 			flock->c.flc_type = F_RDLCK;
1678 		else
1679 			flock->c.flc_type = F_WRLCK;
1680 	} else if (!cinode->can_cache_brlcks)
1681 		rc = 1;
1682 	else
1683 		flock->c.flc_type = F_UNLCK;
1684 
1685 	up_read(&cinode->lock_sem);
1686 	return rc;
1687 }
1688 
1689 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1690 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1691 {
1692 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1693 	cifs_down_write(&cinode->lock_sem);
1694 	list_add_tail(&lock->llist, &cfile->llist->locks);
1695 	up_write(&cinode->lock_sem);
1696 }
1697 
1698 /*
1699  * Set the byte-range lock (mandatory style). Returns:
1700  * 1) 0, if we set the lock and don't need to request to the server;
1701  * 2) 1, if no locks prevent us but we need to request to the server;
1702  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1703  */
1704 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1705 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1706 		 bool wait)
1707 {
1708 	struct cifsLockInfo *conf_lock;
1709 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1710 	bool exist;
1711 	int rc = 0;
1712 
1713 try_again:
1714 	exist = false;
1715 	cifs_down_write(&cinode->lock_sem);
1716 
1717 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1718 					lock->type, lock->flags, &conf_lock,
1719 					CIFS_LOCK_OP);
1720 	if (!exist && cinode->can_cache_brlcks) {
1721 		list_add_tail(&lock->llist, &cfile->llist->locks);
1722 		up_write(&cinode->lock_sem);
1723 		return rc;
1724 	}
1725 
1726 	if (!exist)
1727 		rc = 1;
1728 	else if (!wait)
1729 		rc = -EACCES;
1730 	else {
1731 		list_add_tail(&lock->blist, &conf_lock->blist);
1732 		up_write(&cinode->lock_sem);
1733 		rc = wait_event_interruptible(lock->block_q,
1734 					(lock->blist.prev == &lock->blist) &&
1735 					(lock->blist.next == &lock->blist));
1736 		if (!rc)
1737 			goto try_again;
1738 		cifs_down_write(&cinode->lock_sem);
1739 		list_del_init(&lock->blist);
1740 	}
1741 
1742 	up_write(&cinode->lock_sem);
1743 	return rc;
1744 }
1745 
1746 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1747 /*
1748  * Check if there is another lock that prevents us to set the lock (posix
1749  * style). If such a lock exists, update the flock structure with its
1750  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1751  * or leave it the same if we can't. Returns 0 if we don't need to request to
1752  * the server or 1 otherwise.
1753  */
1754 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1755 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1756 {
1757 	int rc = 0;
1758 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1759 	unsigned char saved_type = flock->c.flc_type;
1760 
1761 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1762 		return 1;
1763 
1764 	down_read(&cinode->lock_sem);
1765 	posix_test_lock(file, flock);
1766 
1767 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1768 		flock->c.flc_type = saved_type;
1769 		rc = 1;
1770 	}
1771 
1772 	up_read(&cinode->lock_sem);
1773 	return rc;
1774 }
1775 
1776 /*
1777  * Set the byte-range lock (posix style). Returns:
1778  * 1) <0, if the error occurs while setting the lock;
1779  * 2) 0, if we set the lock and don't need to request to the server;
1780  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1781  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1782  */
1783 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1784 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1785 {
1786 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1787 	int rc = FILE_LOCK_DEFERRED + 1;
1788 
1789 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1790 		return rc;
1791 
1792 	cifs_down_write(&cinode->lock_sem);
1793 	if (!cinode->can_cache_brlcks) {
1794 		up_write(&cinode->lock_sem);
1795 		return rc;
1796 	}
1797 
1798 	rc = posix_lock_file(file, flock, NULL);
1799 	up_write(&cinode->lock_sem);
1800 	return rc;
1801 }
1802 
1803 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1804 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1805 {
1806 	unsigned int xid;
1807 	int rc = 0, stored_rc;
1808 	struct cifsLockInfo *li, *tmp;
1809 	struct cifs_tcon *tcon;
1810 	unsigned int num, max_num, max_buf;
1811 	LOCKING_ANDX_RANGE *buf, *cur;
1812 	static const int types[] = {
1813 		LOCKING_ANDX_LARGE_FILES,
1814 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1815 	};
1816 	int i;
1817 
1818 	xid = get_xid();
1819 	tcon = tlink_tcon(cfile->tlink);
1820 
1821 	/*
1822 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1823 	 * and check it before using.
1824 	 */
1825 	max_buf = tcon->ses->server->maxBuf;
1826 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1827 		free_xid(xid);
1828 		return -EINVAL;
1829 	}
1830 
1831 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1832 		     PAGE_SIZE);
1833 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1834 			PAGE_SIZE);
1835 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1836 						sizeof(LOCKING_ANDX_RANGE);
1837 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1838 	if (!buf) {
1839 		free_xid(xid);
1840 		return -ENOMEM;
1841 	}
1842 
1843 	for (i = 0; i < 2; i++) {
1844 		cur = buf;
1845 		num = 0;
1846 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1847 			if (li->type != types[i])
1848 				continue;
1849 			cur->Pid = cpu_to_le16(li->pid);
1850 			cur->LengthLow = cpu_to_le32((u32)li->length);
1851 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1852 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1853 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1854 			if (++num == max_num) {
1855 				stored_rc = cifs_lockv(xid, tcon,
1856 						       cfile->fid.netfid,
1857 						       (__u8)li->type, 0, num,
1858 						       buf);
1859 				if (stored_rc)
1860 					rc = stored_rc;
1861 				cur = buf;
1862 				num = 0;
1863 			} else
1864 				cur++;
1865 		}
1866 
1867 		if (num) {
1868 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1869 					       (__u8)types[i], 0, num, buf);
1870 			if (stored_rc)
1871 				rc = stored_rc;
1872 		}
1873 	}
1874 
1875 	kfree(buf);
1876 	free_xid(xid);
1877 	return rc;
1878 }
1879 
1880 static __u32
hash_lockowner(fl_owner_t owner)1881 hash_lockowner(fl_owner_t owner)
1882 {
1883 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1884 }
1885 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1886 
1887 struct lock_to_push {
1888 	struct list_head llist;
1889 	__u64 offset;
1890 	__u64 length;
1891 	__u32 pid;
1892 	__u16 netfid;
1893 	__u8 type;
1894 };
1895 
1896 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1897 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1898 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1899 {
1900 	struct inode *inode = d_inode(cfile->dentry);
1901 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1902 	struct file_lock *flock;
1903 	struct file_lock_context *flctx = locks_inode_context(inode);
1904 	unsigned int count = 0, i;
1905 	int rc = 0, xid, type;
1906 	struct list_head locks_to_send, *el;
1907 	struct lock_to_push *lck, *tmp;
1908 	__u64 length;
1909 
1910 	xid = get_xid();
1911 
1912 	if (!flctx)
1913 		goto out;
1914 
1915 	spin_lock(&flctx->flc_lock);
1916 	list_for_each(el, &flctx->flc_posix) {
1917 		count++;
1918 	}
1919 	spin_unlock(&flctx->flc_lock);
1920 
1921 	INIT_LIST_HEAD(&locks_to_send);
1922 
1923 	/*
1924 	 * Allocating count locks is enough because no FL_POSIX locks can be
1925 	 * added to the list while we are holding cinode->lock_sem that
1926 	 * protects locking operations of this inode.
1927 	 */
1928 	for (i = 0; i < count; i++) {
1929 		lck = kmalloc_obj(struct lock_to_push);
1930 		if (!lck) {
1931 			rc = -ENOMEM;
1932 			goto err_out;
1933 		}
1934 		list_add_tail(&lck->llist, &locks_to_send);
1935 	}
1936 
1937 	el = locks_to_send.next;
1938 	spin_lock(&flctx->flc_lock);
1939 	for_each_file_lock(flock, &flctx->flc_posix) {
1940 		unsigned char ftype = flock->c.flc_type;
1941 
1942 		if (el == &locks_to_send) {
1943 			/*
1944 			 * The list ended. We don't have enough allocated
1945 			 * structures - something is really wrong.
1946 			 */
1947 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1948 			break;
1949 		}
1950 		length = cifs_flock_len(flock);
1951 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1952 			type = CIFS_RDLCK;
1953 		else
1954 			type = CIFS_WRLCK;
1955 		lck = list_entry(el, struct lock_to_push, llist);
1956 		lck->pid = hash_lockowner(flock->c.flc_owner);
1957 		lck->netfid = cfile->fid.netfid;
1958 		lck->length = length;
1959 		lck->type = type;
1960 		lck->offset = flock->fl_start;
1961 	}
1962 	spin_unlock(&flctx->flc_lock);
1963 
1964 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1965 		int stored_rc;
1966 
1967 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1968 					     lck->offset, lck->length, NULL,
1969 					     lck->type, 0);
1970 		if (stored_rc)
1971 			rc = stored_rc;
1972 		list_del(&lck->llist);
1973 		kfree(lck);
1974 	}
1975 
1976 out:
1977 	free_xid(xid);
1978 	return rc;
1979 err_out:
1980 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1981 		list_del(&lck->llist);
1982 		kfree(lck);
1983 	}
1984 	goto out;
1985 }
1986 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1987 
1988 static int
cifs_push_locks(struct cifsFileInfo * cfile)1989 cifs_push_locks(struct cifsFileInfo *cfile)
1990 {
1991 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1992 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1993 	int rc = 0;
1994 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1995 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1996 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1997 
1998 	/* we are going to update can_cache_brlcks here - need a write access */
1999 	cifs_down_write(&cinode->lock_sem);
2000 	if (!cinode->can_cache_brlcks) {
2001 		up_write(&cinode->lock_sem);
2002 		return rc;
2003 	}
2004 
2005 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2006 	if (cap_unix(tcon->ses) &&
2007 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2008 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2009 		rc = cifs_push_posix_locks(cfile);
2010 	else
2011 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2012 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2013 
2014 	cinode->can_cache_brlcks = false;
2015 	up_write(&cinode->lock_sem);
2016 	return rc;
2017 }
2018 
2019 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2020 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2021 		bool *wait_flag, struct TCP_Server_Info *server)
2022 {
2023 	if (flock->c.flc_flags & FL_POSIX)
2024 		cifs_dbg(FYI, "Posix\n");
2025 	if (flock->c.flc_flags & FL_FLOCK)
2026 		cifs_dbg(FYI, "Flock\n");
2027 	if (flock->c.flc_flags & FL_SLEEP) {
2028 		cifs_dbg(FYI, "Blocking lock\n");
2029 		*wait_flag = true;
2030 	}
2031 	if (flock->c.flc_flags & FL_ACCESS)
2032 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2033 	if (flock->c.flc_flags & FL_LEASE)
2034 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2035 	if (flock->c.flc_flags &
2036 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2037 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2038 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2039 		         flock->c.flc_flags);
2040 
2041 	*type = server->vals->large_lock_type;
2042 	if (lock_is_write(flock)) {
2043 		cifs_dbg(FYI, "F_WRLCK\n");
2044 		*type |= server->vals->exclusive_lock_type;
2045 		*lock = 1;
2046 	} else if (lock_is_unlock(flock)) {
2047 		cifs_dbg(FYI, "F_UNLCK\n");
2048 		*type |= server->vals->unlock_lock_type;
2049 		*unlock = 1;
2050 		/* Check if unlock includes more than one lock range */
2051 	} else if (lock_is_read(flock)) {
2052 		cifs_dbg(FYI, "F_RDLCK\n");
2053 		*type |= server->vals->shared_lock_type;
2054 		*lock = 1;
2055 	} else if (flock->c.flc_type == F_EXLCK) {
2056 		cifs_dbg(FYI, "F_EXLCK\n");
2057 		*type |= server->vals->exclusive_lock_type;
2058 		*lock = 1;
2059 	} else if (flock->c.flc_type == F_SHLCK) {
2060 		cifs_dbg(FYI, "F_SHLCK\n");
2061 		*type |= server->vals->shared_lock_type;
2062 		*lock = 1;
2063 	} else
2064 		cifs_dbg(FYI, "Unknown type of lock\n");
2065 }
2066 
2067 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2068 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2069 	   bool wait_flag, bool posix_lck, unsigned int xid)
2070 {
2071 	int rc = 0;
2072 	__u64 length = cifs_flock_len(flock);
2073 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2074 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2075 	struct TCP_Server_Info *server = tcon->ses->server;
2076 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2077 	__u16 netfid = cfile->fid.netfid;
2078 
2079 	if (posix_lck) {
2080 		int posix_lock_type;
2081 
2082 		rc = cifs_posix_lock_test(file, flock);
2083 		if (!rc)
2084 			return rc;
2085 
2086 		if (type & server->vals->shared_lock_type)
2087 			posix_lock_type = CIFS_RDLCK;
2088 		else
2089 			posix_lock_type = CIFS_WRLCK;
2090 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2091 				      hash_lockowner(flock->c.flc_owner),
2092 				      flock->fl_start, length, flock,
2093 				      posix_lock_type, wait_flag);
2094 		return rc;
2095 	}
2096 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2097 
2098 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2099 	if (!rc)
2100 		return rc;
2101 
2102 	/* BB we could chain these into one lock request BB */
2103 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2104 				    1, 0, false);
2105 	if (rc == 0) {
2106 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2107 					    type, 0, 1, false);
2108 		flock->c.flc_type = F_UNLCK;
2109 		if (rc != 0)
2110 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2111 				 rc);
2112 		return 0;
2113 	}
2114 
2115 	if (type & server->vals->shared_lock_type) {
2116 		flock->c.flc_type = F_WRLCK;
2117 		return 0;
2118 	}
2119 
2120 	type &= ~server->vals->exclusive_lock_type;
2121 
2122 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2123 				    type | server->vals->shared_lock_type,
2124 				    1, 0, false);
2125 	if (rc == 0) {
2126 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2127 			type | server->vals->shared_lock_type, 0, 1, false);
2128 		flock->c.flc_type = F_RDLCK;
2129 		if (rc != 0)
2130 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2131 				 rc);
2132 	} else
2133 		flock->c.flc_type = F_WRLCK;
2134 
2135 	return 0;
2136 }
2137 
2138 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2139 cifs_move_llist(struct list_head *source, struct list_head *dest)
2140 {
2141 	struct list_head *li, *tmp;
2142 	list_for_each_safe(li, tmp, source)
2143 		list_move(li, dest);
2144 }
2145 
2146 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2147 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2148 				struct file *file)
2149 {
2150 	struct cifsFileInfo *open_file = NULL;
2151 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2152 	int rc = 0;
2153 
2154 	spin_lock(&tcon->open_file_lock);
2155 	spin_lock(&cinode->open_file_lock);
2156 
2157 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2158 		if (file->f_flags == open_file->f_flags) {
2159 			rc = -EINVAL;
2160 			break;
2161 		}
2162 	}
2163 
2164 	spin_unlock(&cinode->open_file_lock);
2165 	spin_unlock(&tcon->open_file_lock);
2166 	return rc;
2167 }
2168 
2169 void
cifs_free_llist(struct list_head * llist)2170 cifs_free_llist(struct list_head *llist)
2171 {
2172 	struct cifsLockInfo *li, *tmp;
2173 	list_for_each_entry_safe(li, tmp, llist, llist) {
2174 		cifs_del_lock_waiters(li);
2175 		list_del(&li->llist);
2176 		kfree(li);
2177 	}
2178 }
2179 
2180 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2181 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2182 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2183 		  unsigned int xid)
2184 {
2185 	int rc = 0, stored_rc;
2186 	static const int types[] = {
2187 		LOCKING_ANDX_LARGE_FILES,
2188 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2189 	};
2190 	unsigned int i;
2191 	unsigned int max_num, num, max_buf;
2192 	LOCKING_ANDX_RANGE *buf, *cur;
2193 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2194 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2195 	struct cifsLockInfo *li, *tmp;
2196 	__u64 length = cifs_flock_len(flock);
2197 	LIST_HEAD(tmp_llist);
2198 
2199 	/*
2200 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2201 	 * and check it before using.
2202 	 */
2203 	max_buf = tcon->ses->server->maxBuf;
2204 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2205 		return -EINVAL;
2206 
2207 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2208 		     PAGE_SIZE);
2209 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2210 			PAGE_SIZE);
2211 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2212 						sizeof(LOCKING_ANDX_RANGE);
2213 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2214 	if (!buf)
2215 		return -ENOMEM;
2216 
2217 	cifs_down_write(&cinode->lock_sem);
2218 	for (i = 0; i < 2; i++) {
2219 		cur = buf;
2220 		num = 0;
2221 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2222 			if (flock->fl_start > li->offset ||
2223 			    (flock->fl_start + length) <
2224 			    (li->offset + li->length))
2225 				continue;
2226 			if (current->tgid != li->pid)
2227 				continue;
2228 			if (types[i] != li->type)
2229 				continue;
2230 			if (cinode->can_cache_brlcks) {
2231 				/*
2232 				 * We can cache brlock requests - simply remove
2233 				 * a lock from the file's list.
2234 				 */
2235 				list_del(&li->llist);
2236 				cifs_del_lock_waiters(li);
2237 				kfree(li);
2238 				continue;
2239 			}
2240 			cur->Pid = cpu_to_le16(li->pid);
2241 			cur->LengthLow = cpu_to_le32((u32)li->length);
2242 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2243 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2244 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2245 			/*
2246 			 * We need to save a lock here to let us add it again to
2247 			 * the file's list if the unlock range request fails on
2248 			 * the server.
2249 			 */
2250 			list_move(&li->llist, &tmp_llist);
2251 			if (++num == max_num) {
2252 				stored_rc = cifs_lockv(xid, tcon,
2253 						       cfile->fid.netfid,
2254 						       li->type, num, 0, buf);
2255 				if (stored_rc) {
2256 					/*
2257 					 * We failed on the unlock range
2258 					 * request - add all locks from the tmp
2259 					 * list to the head of the file's list.
2260 					 */
2261 					cifs_move_llist(&tmp_llist,
2262 							&cfile->llist->locks);
2263 					rc = stored_rc;
2264 				} else
2265 					/*
2266 					 * The unlock range request succeed -
2267 					 * free the tmp list.
2268 					 */
2269 					cifs_free_llist(&tmp_llist);
2270 				cur = buf;
2271 				num = 0;
2272 			} else
2273 				cur++;
2274 		}
2275 		if (num) {
2276 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2277 					       types[i], num, 0, buf);
2278 			if (stored_rc) {
2279 				cifs_move_llist(&tmp_llist,
2280 						&cfile->llist->locks);
2281 				rc = stored_rc;
2282 			} else
2283 				cifs_free_llist(&tmp_llist);
2284 		}
2285 	}
2286 
2287 	up_write(&cinode->lock_sem);
2288 	kfree(buf);
2289 	return rc;
2290 }
2291 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2292 
2293 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2294 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2295 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2296 	   unsigned int xid)
2297 {
2298 	int rc = 0;
2299 	__u64 length = cifs_flock_len(flock);
2300 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2301 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2302 	struct TCP_Server_Info *server = tcon->ses->server;
2303 	struct inode *inode = d_inode(cfile->dentry);
2304 
2305 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2306 	if (posix_lck) {
2307 		int posix_lock_type;
2308 
2309 		rc = cifs_posix_lock_set(file, flock);
2310 		if (rc <= FILE_LOCK_DEFERRED)
2311 			return rc;
2312 
2313 		if (type & server->vals->shared_lock_type)
2314 			posix_lock_type = CIFS_RDLCK;
2315 		else
2316 			posix_lock_type = CIFS_WRLCK;
2317 
2318 		if (unlock == 1)
2319 			posix_lock_type = CIFS_UNLCK;
2320 
2321 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2322 				      hash_lockowner(flock->c.flc_owner),
2323 				      flock->fl_start, length,
2324 				      NULL, posix_lock_type, wait_flag);
2325 		goto out;
2326 	}
2327 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2328 	if (lock) {
2329 		struct cifsLockInfo *lock;
2330 
2331 		lock = cifs_lock_init(flock->fl_start, length, type,
2332 				      flock->c.flc_flags);
2333 		if (!lock)
2334 			return -ENOMEM;
2335 
2336 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2337 		if (rc < 0) {
2338 			kfree(lock);
2339 			return rc;
2340 		}
2341 		if (!rc)
2342 			goto out;
2343 
2344 		/*
2345 		 * Windows 7 server can delay breaking lease from read to None
2346 		 * if we set a byte-range lock on a file - break it explicitly
2347 		 * before sending the lock to the server to be sure the next
2348 		 * read won't conflict with non-overlapted locks due to
2349 		 * pagereading.
2350 		 */
2351 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2352 					CIFS_CACHE_READ(CIFS_I(inode))) {
2353 			cifs_zap_mapping(inode);
2354 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2355 				 inode);
2356 			cifs_reset_oplock(CIFS_I(inode));
2357 		}
2358 
2359 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2360 					    type, 1, 0, wait_flag);
2361 		if (rc) {
2362 			kfree(lock);
2363 			return rc;
2364 		}
2365 
2366 		cifs_lock_add(cfile, lock);
2367 	} else if (unlock)
2368 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2369 
2370 out:
2371 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2372 		/*
2373 		 * If this is a request to remove all locks because we
2374 		 * are closing the file, it doesn't matter if the
2375 		 * unlocking failed as both cifs.ko and the SMB server
2376 		 * remove the lock on file close
2377 		 */
2378 		if (rc) {
2379 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2380 			if (!(flock->c.flc_flags & FL_CLOSE))
2381 				return rc;
2382 		}
2383 		rc = locks_lock_file_wait(file, flock);
2384 	}
2385 	return rc;
2386 }
2387 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2388 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2389 {
2390 	int rc, xid;
2391 	int lock = 0, unlock = 0;
2392 	bool wait_flag = false;
2393 	bool posix_lck = false;
2394 	struct cifs_sb_info *cifs_sb;
2395 	struct cifs_tcon *tcon;
2396 	struct cifsFileInfo *cfile;
2397 	__u32 type;
2398 
2399 	xid = get_xid();
2400 
2401 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2402 		rc = -ENOLCK;
2403 		free_xid(xid);
2404 		return rc;
2405 	}
2406 
2407 	cfile = (struct cifsFileInfo *)file->private_data;
2408 	tcon = tlink_tcon(cfile->tlink);
2409 
2410 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2411 			tcon->ses->server);
2412 	cifs_sb = CIFS_SB(file);
2413 
2414 	if (cap_unix(tcon->ses) &&
2415 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2416 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2417 		posix_lck = true;
2418 
2419 	if (!lock && !unlock) {
2420 		/*
2421 		 * if no lock or unlock then nothing to do since we do not
2422 		 * know what it is
2423 		 */
2424 		rc = -EOPNOTSUPP;
2425 		free_xid(xid);
2426 		return rc;
2427 	}
2428 
2429 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2430 			xid);
2431 	free_xid(xid);
2432 	return rc;
2433 
2434 
2435 }
2436 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2437 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2438 {
2439 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2440 	struct cifsFileInfo *cfile;
2441 	int lock = 0, unlock = 0;
2442 	bool wait_flag = false;
2443 	bool posix_lck = false;
2444 	struct cifs_tcon *tcon;
2445 	__u32 type;
2446 	int rc, xid;
2447 
2448 	rc = -EACCES;
2449 	xid = get_xid();
2450 
2451 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2452 		 flock->c.flc_flags, flock->c.flc_type,
2453 		 (long long)flock->fl_start,
2454 		 (long long)flock->fl_end);
2455 
2456 	cfile = (struct cifsFileInfo *)file->private_data;
2457 	tcon = tlink_tcon(cfile->tlink);
2458 
2459 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2460 			tcon->ses->server);
2461 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2462 
2463 	if (cap_unix(tcon->ses) &&
2464 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2465 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2466 		posix_lck = true;
2467 	/*
2468 	 * BB add code here to normalize offset and length to account for
2469 	 * negative length which we can not accept over the wire.
2470 	 */
2471 	if (IS_GETLK(cmd)) {
2472 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2473 		free_xid(xid);
2474 		return rc;
2475 	}
2476 
2477 	if (!lock && !unlock) {
2478 		/*
2479 		 * if no lock or unlock then nothing to do since we do not
2480 		 * know what it is
2481 		 */
2482 		free_xid(xid);
2483 		return -EOPNOTSUPP;
2484 	}
2485 
2486 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2487 			xid);
2488 	free_xid(xid);
2489 	return rc;
2490 }
2491 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2492 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2493 {
2494 	struct netfs_io_request *wreq = wdata->rreq;
2495 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2496 	loff_t wrend;
2497 
2498 	if (result > 0) {
2499 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2500 
2501 		if (wrend > ictx->zero_point &&
2502 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2503 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2504 			ictx->zero_point = wrend;
2505 		if (wrend > ictx->remote_i_size)
2506 			netfs_resize_file(ictx, wrend, true);
2507 	}
2508 
2509 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2510 }
2511 
open_flags_match(struct cifsInodeInfo * cinode,unsigned int oflags,unsigned int cflags)2512 static bool open_flags_match(struct cifsInodeInfo *cinode,
2513 			     unsigned int oflags, unsigned int cflags)
2514 {
2515 	struct inode *inode = &cinode->netfs.inode;
2516 	int crw = 0, orw = 0;
2517 
2518 	oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2519 	cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2520 
2521 	if (cifs_fscache_enabled(inode)) {
2522 		if (OPEN_FMODE(cflags) & FMODE_WRITE)
2523 			crw = 1;
2524 		if (OPEN_FMODE(oflags) & FMODE_WRITE)
2525 			orw = 1;
2526 	}
2527 	if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
2528 		return false;
2529 
2530 	return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
2531 }
2532 
__find_readable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags)2533 struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
2534 					  unsigned int find_flags,
2535 					  unsigned int open_flags)
2536 {
2537 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2538 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2539 	struct cifsFileInfo *open_file = NULL;
2540 
2541 	/* only filter by fsuid on multiuser mounts */
2542 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2543 		fsuid_only = false;
2544 
2545 	spin_lock(&cifs_inode->open_file_lock);
2546 	/* we could simply get the first_list_entry since write-only entries
2547 	   are always at the end of the list but since the first entry might
2548 	   have a close pending, we go through the whole list */
2549 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2550 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2551 			continue;
2552 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2553 		    open_file->status_file_deleted)
2554 			continue;
2555 		if ((find_flags & FIND_OPEN_FLAGS) &&
2556 		    !open_flags_match(cifs_inode, open_flags,
2557 				      open_file->f_flags))
2558 			continue;
2559 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2560 			if ((!open_file->invalidHandle)) {
2561 				/* found a good file */
2562 				/* lock it so it will not be closed on us */
2563 				cifsFileInfo_get(open_file);
2564 				spin_unlock(&cifs_inode->open_file_lock);
2565 				return open_file;
2566 			} /* else might as well continue, and look for
2567 			     another, or simply have the caller reopen it
2568 			     again rather than trying to fix this handle */
2569 		} else /* write only file */
2570 			break; /* write only files are last so must be done */
2571 	}
2572 	spin_unlock(&cifs_inode->open_file_lock);
2573 	return NULL;
2574 }
2575 
2576 /* Return -EBADF if no handle is found and general rc otherwise */
__cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags,struct cifsFileInfo ** ret_file)2577 int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
2578 			     unsigned int find_flags, unsigned int open_flags,
2579 			     struct cifsFileInfo **ret_file)
2580 {
2581 	struct cifsFileInfo *open_file, *inv_file = NULL;
2582 	struct cifs_sb_info *cifs_sb;
2583 	bool any_available = false;
2584 	int rc = -EBADF;
2585 	unsigned int refind = 0;
2586 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2587 	bool with_delete = find_flags & FIND_WITH_DELETE;
2588 	*ret_file = NULL;
2589 
2590 	/*
2591 	 * Having a null inode here (because mapping->host was set to zero by
2592 	 * the VFS or MM) should not happen but we had reports of on oops (due
2593 	 * to it being zero) during stress testcases so we need to check for it
2594 	 */
2595 
2596 	if (cifs_inode == NULL) {
2597 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2598 		dump_stack();
2599 		return rc;
2600 	}
2601 
2602 	cifs_sb = CIFS_SB(cifs_inode);
2603 
2604 	/* only filter by fsuid on multiuser mounts */
2605 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2606 		fsuid_only = false;
2607 
2608 	spin_lock(&cifs_inode->open_file_lock);
2609 refind_writable:
2610 	if (refind > MAX_REOPEN_ATT) {
2611 		spin_unlock(&cifs_inode->open_file_lock);
2612 		return rc;
2613 	}
2614 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2615 		if (!any_available && open_file->pid != current->tgid)
2616 			continue;
2617 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2618 			continue;
2619 		if (with_delete && !(open_file->fid.access & DELETE))
2620 			continue;
2621 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2622 		    open_file->status_file_deleted)
2623 			continue;
2624 		if ((find_flags & FIND_OPEN_FLAGS) &&
2625 		    !open_flags_match(cifs_inode, open_flags,
2626 				      open_file->f_flags))
2627 			continue;
2628 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2629 			if (!open_file->invalidHandle) {
2630 				/* found a good writable file */
2631 				cifsFileInfo_get(open_file);
2632 				spin_unlock(&cifs_inode->open_file_lock);
2633 				*ret_file = open_file;
2634 				return 0;
2635 			} else {
2636 				if (!inv_file)
2637 					inv_file = open_file;
2638 			}
2639 		}
2640 	}
2641 	/* couldn't find usable FH with same pid, try any available */
2642 	if (!any_available) {
2643 		any_available = true;
2644 		goto refind_writable;
2645 	}
2646 
2647 	if (inv_file) {
2648 		any_available = false;
2649 		cifsFileInfo_get(inv_file);
2650 	}
2651 
2652 	spin_unlock(&cifs_inode->open_file_lock);
2653 
2654 	if (inv_file) {
2655 		rc = cifs_reopen_file(inv_file, false);
2656 		if (!rc) {
2657 			*ret_file = inv_file;
2658 			return 0;
2659 		}
2660 
2661 		spin_lock(&cifs_inode->open_file_lock);
2662 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2663 		spin_unlock(&cifs_inode->open_file_lock);
2664 		cifsFileInfo_put(inv_file);
2665 		++refind;
2666 		inv_file = NULL;
2667 		spin_lock(&cifs_inode->open_file_lock);
2668 		goto refind_writable;
2669 	}
2670 
2671 	return rc;
2672 }
2673 
2674 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2675 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2676 {
2677 	struct cifsFileInfo *cfile;
2678 	int rc;
2679 
2680 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2681 	if (rc)
2682 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2683 
2684 	return cfile;
2685 }
2686 
2687 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2688 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2689 		       int flags,
2690 		       struct cifsFileInfo **ret_file)
2691 {
2692 	struct cifsFileInfo *cfile;
2693 	void *page = alloc_dentry_path();
2694 
2695 	*ret_file = NULL;
2696 
2697 	spin_lock(&tcon->open_file_lock);
2698 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2699 		struct cifsInodeInfo *cinode;
2700 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2701 		if (IS_ERR(full_path)) {
2702 			spin_unlock(&tcon->open_file_lock);
2703 			free_dentry_path(page);
2704 			return PTR_ERR(full_path);
2705 		}
2706 		if (strcmp(full_path, name))
2707 			continue;
2708 
2709 		cinode = CIFS_I(d_inode(cfile->dentry));
2710 		spin_unlock(&tcon->open_file_lock);
2711 		free_dentry_path(page);
2712 		return cifs_get_writable_file(cinode, flags, ret_file);
2713 	}
2714 
2715 	spin_unlock(&tcon->open_file_lock);
2716 	free_dentry_path(page);
2717 	return -ENOENT;
2718 }
2719 
2720 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2721 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2722 		       struct cifsFileInfo **ret_file)
2723 {
2724 	struct cifsFileInfo *cfile;
2725 	void *page = alloc_dentry_path();
2726 
2727 	*ret_file = NULL;
2728 
2729 	spin_lock(&tcon->open_file_lock);
2730 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2731 		struct cifsInodeInfo *cinode;
2732 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2733 		if (IS_ERR(full_path)) {
2734 			spin_unlock(&tcon->open_file_lock);
2735 			free_dentry_path(page);
2736 			return PTR_ERR(full_path);
2737 		}
2738 		if (strcmp(full_path, name))
2739 			continue;
2740 
2741 		cinode = CIFS_I(d_inode(cfile->dentry));
2742 		spin_unlock(&tcon->open_file_lock);
2743 		free_dentry_path(page);
2744 		*ret_file = find_readable_file(cinode, FIND_ANY);
2745 		return *ret_file ? 0 : -ENOENT;
2746 	}
2747 
2748 	spin_unlock(&tcon->open_file_lock);
2749 	free_dentry_path(page);
2750 	return -ENOENT;
2751 }
2752 
2753 /*
2754  * Flush data on a strict file.
2755  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2756 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2757 		      int datasync)
2758 {
2759 	struct cifsFileInfo *smbfile = file->private_data;
2760 	struct inode *inode = file_inode(file);
2761 	unsigned int xid;
2762 	int rc;
2763 
2764 	rc = file_write_and_wait_range(file, start, end);
2765 	if (rc) {
2766 		trace_cifs_fsync_err(inode->i_ino, rc);
2767 		return rc;
2768 	}
2769 
2770 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2771 
2772 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2773 		rc = cifs_zap_mapping(inode);
2774 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2775 	}
2776 
2777 	xid = get_xid();
2778 	rc = cifs_file_flush(xid, inode, smbfile);
2779 	free_xid(xid);
2780 	return rc;
2781 }
2782 
2783 /*
2784  * Flush data on a non-strict data.
2785  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2786 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2787 {
2788 	unsigned int xid;
2789 	int rc = 0;
2790 	struct cifs_tcon *tcon;
2791 	struct TCP_Server_Info *server;
2792 	struct cifsFileInfo *smbfile = file->private_data;
2793 	struct inode *inode = file_inode(file);
2794 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2795 
2796 	rc = file_write_and_wait_range(file, start, end);
2797 	if (rc) {
2798 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2799 		return rc;
2800 	}
2801 
2802 	xid = get_xid();
2803 
2804 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2805 		 file, datasync);
2806 
2807 	tcon = tlink_tcon(smbfile->tlink);
2808 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2809 		server = tcon->ses->server;
2810 		if (server->ops->flush == NULL) {
2811 			rc = -ENOSYS;
2812 			goto fsync_exit;
2813 		}
2814 
2815 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2816 			smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
2817 			if (smbfile) {
2818 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2819 				cifsFileInfo_put(smbfile);
2820 			} else
2821 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2822 		} else
2823 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2824 	}
2825 
2826 fsync_exit:
2827 	free_xid(xid);
2828 	return rc;
2829 }
2830 
2831 /*
2832  * As file closes, flush all cached write data for this inode checking
2833  * for write behind errors.
2834  */
cifs_flush(struct file * file,fl_owner_t id)2835 int cifs_flush(struct file *file, fl_owner_t id)
2836 {
2837 	struct inode *inode = file_inode(file);
2838 	int rc = 0;
2839 
2840 	if (file->f_mode & FMODE_WRITE)
2841 		rc = filemap_write_and_wait(inode->i_mapping);
2842 
2843 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2844 	if (rc) {
2845 		/* get more nuanced writeback errors */
2846 		rc = filemap_check_wb_err(file->f_mapping, 0);
2847 		trace_cifs_flush_err(inode->i_ino, rc);
2848 	}
2849 	return rc;
2850 }
2851 
2852 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2853 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2854 {
2855 	struct file *file = iocb->ki_filp;
2856 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2857 	struct inode *inode = file->f_mapping->host;
2858 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2859 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2860 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2861 	ssize_t rc;
2862 
2863 	rc = netfs_start_io_write(inode);
2864 	if (rc < 0)
2865 		return rc;
2866 
2867 	/*
2868 	 * We need to hold the sem to be sure nobody modifies lock list
2869 	 * with a brlock that prevents writing.
2870 	 */
2871 	down_read(&cinode->lock_sem);
2872 
2873 	rc = generic_write_checks(iocb, from);
2874 	if (rc <= 0)
2875 		goto out;
2876 
2877 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2878 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2879 				     server->vals->exclusive_lock_type, 0,
2880 				     NULL, CIFS_WRITE_OP))) {
2881 		rc = -EACCES;
2882 		goto out;
2883 	}
2884 
2885 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2886 
2887 out:
2888 	up_read(&cinode->lock_sem);
2889 	netfs_end_io_write(inode);
2890 	if (rc > 0)
2891 		rc = generic_write_sync(iocb, rc);
2892 	return rc;
2893 }
2894 
2895 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2896 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2897 {
2898 	struct inode *inode = file_inode(iocb->ki_filp);
2899 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2900 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2901 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2902 						iocb->ki_filp->private_data;
2903 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2904 	ssize_t written;
2905 
2906 	written = cifs_get_writer(cinode);
2907 	if (written)
2908 		return written;
2909 
2910 	if (CIFS_CACHE_WRITE(cinode)) {
2911 		if (cap_unix(tcon->ses) &&
2912 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2913 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2914 			written = netfs_file_write_iter(iocb, from);
2915 			goto out;
2916 		}
2917 		written = cifs_writev(iocb, from);
2918 		goto out;
2919 	}
2920 	/*
2921 	 * For non-oplocked files in strict cache mode we need to write the data
2922 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2923 	 * affected pages because it may cause a error with mandatory locks on
2924 	 * these pages but not on the region from pos to ppos+len-1.
2925 	 */
2926 	written = netfs_file_write_iter(iocb, from);
2927 	if (CIFS_CACHE_READ(cinode)) {
2928 		/*
2929 		 * We have read level caching and we have just sent a write
2930 		 * request to the server thus making data in the cache stale.
2931 		 * Zap the cache and set oplock/lease level to NONE to avoid
2932 		 * reading stale data from the cache. All subsequent read
2933 		 * operations will read new data from the server.
2934 		 */
2935 		cifs_zap_mapping(inode);
2936 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2937 			 inode);
2938 		cifs_reset_oplock(cinode);
2939 	}
2940 out:
2941 	cifs_put_writer(cinode);
2942 	return written;
2943 }
2944 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2945 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2946 {
2947 	ssize_t rc;
2948 	struct inode *inode = file_inode(iocb->ki_filp);
2949 
2950 	if (iocb->ki_flags & IOCB_DIRECT)
2951 		return netfs_unbuffered_read_iter(iocb, iter);
2952 
2953 	rc = cifs_revalidate_mapping(inode);
2954 	if (rc)
2955 		return rc;
2956 
2957 	return netfs_file_read_iter(iocb, iter);
2958 }
2959 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2960 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2961 {
2962 	struct inode *inode = file_inode(iocb->ki_filp);
2963 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2964 	ssize_t written;
2965 	int rc;
2966 
2967 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2968 		written = netfs_unbuffered_write_iter(iocb, from);
2969 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2970 			cifs_zap_mapping(inode);
2971 			cifs_dbg(FYI,
2972 				 "Set no oplock for inode=%p after a write operation\n",
2973 				 inode);
2974 			cifs_reset_oplock(cinode);
2975 		}
2976 		return written;
2977 	}
2978 
2979 	written = cifs_get_writer(cinode);
2980 	if (written)
2981 		return written;
2982 
2983 	written = netfs_file_write_iter(iocb, from);
2984 
2985 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2986 		rc = filemap_fdatawrite(inode->i_mapping);
2987 		if (rc)
2988 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2989 				 rc, inode);
2990 	}
2991 
2992 	cifs_put_writer(cinode);
2993 	return written;
2994 }
2995 
2996 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2997 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2998 {
2999 	struct inode *inode = file_inode(iocb->ki_filp);
3000 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3001 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
3002 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3003 						iocb->ki_filp->private_data;
3004 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3005 	int rc = -EACCES;
3006 
3007 	/*
3008 	 * In strict cache mode we need to read from the server all the time
3009 	 * if we don't have level II oplock because the server can delay mtime
3010 	 * change - so we can't make a decision about inode invalidating.
3011 	 * And we can also fail with pagereading if there are mandatory locks
3012 	 * on pages affected by this read but not on the region from pos to
3013 	 * pos+len-1.
3014 	 */
3015 	if (!CIFS_CACHE_READ(cinode))
3016 		return netfs_unbuffered_read_iter(iocb, to);
3017 
3018 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3019 		if (iocb->ki_flags & IOCB_DIRECT)
3020 			return netfs_unbuffered_read_iter(iocb, to);
3021 		return netfs_buffered_read_iter(iocb, to);
3022 	}
3023 
3024 	/*
3025 	 * We need to hold the sem to be sure nobody modifies lock list
3026 	 * with a brlock that prevents reading.
3027 	 */
3028 	if (iocb->ki_flags & IOCB_DIRECT) {
3029 		rc = netfs_start_io_direct(inode);
3030 		if (rc < 0)
3031 			goto out;
3032 		rc = -EACCES;
3033 		down_read(&cinode->lock_sem);
3034 		if (!cifs_find_lock_conflict(
3035 			    cfile, iocb->ki_pos, iov_iter_count(to),
3036 			    tcon->ses->server->vals->shared_lock_type,
3037 			    0, NULL, CIFS_READ_OP))
3038 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3039 		up_read(&cinode->lock_sem);
3040 		netfs_end_io_direct(inode);
3041 	} else {
3042 		rc = netfs_start_io_read(inode);
3043 		if (rc < 0)
3044 			goto out;
3045 		rc = -EACCES;
3046 		down_read(&cinode->lock_sem);
3047 		if (!cifs_find_lock_conflict(
3048 			    cfile, iocb->ki_pos, iov_iter_count(to),
3049 			    tcon->ses->server->vals->shared_lock_type,
3050 			    0, NULL, CIFS_READ_OP))
3051 			rc = filemap_read(iocb, to, 0);
3052 		up_read(&cinode->lock_sem);
3053 		netfs_end_io_read(inode);
3054 	}
3055 out:
3056 	return rc;
3057 }
3058 
cifs_page_mkwrite(struct vm_fault * vmf)3059 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3060 {
3061 	return netfs_page_mkwrite(vmf, NULL);
3062 }
3063 
3064 static const struct vm_operations_struct cifs_file_vm_ops = {
3065 	.fault = filemap_fault,
3066 	.map_pages = filemap_map_pages,
3067 	.page_mkwrite = cifs_page_mkwrite,
3068 };
3069 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3070 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3071 {
3072 	int xid, rc = 0;
3073 	struct inode *inode = file_inode(desc->file);
3074 
3075 	xid = get_xid();
3076 
3077 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3078 		rc = cifs_zap_mapping(inode);
3079 	if (!rc)
3080 		rc = generic_file_mmap_prepare(desc);
3081 	if (!rc)
3082 		desc->vm_ops = &cifs_file_vm_ops;
3083 
3084 	free_xid(xid);
3085 	return rc;
3086 }
3087 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3088 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3089 {
3090 	int rc, xid;
3091 
3092 	xid = get_xid();
3093 
3094 	rc = cifs_revalidate_file(desc->file);
3095 	if (rc)
3096 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3097 			 rc);
3098 	if (!rc)
3099 		rc = generic_file_mmap_prepare(desc);
3100 	if (!rc)
3101 		desc->vm_ops = &cifs_file_vm_ops;
3102 
3103 	free_xid(xid);
3104 	return rc;
3105 }
3106 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3107 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3108 {
3109 	struct cifsFileInfo *open_file;
3110 
3111 	spin_lock(&cifs_inode->open_file_lock);
3112 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3113 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3114 			spin_unlock(&cifs_inode->open_file_lock);
3115 			return 1;
3116 		}
3117 	}
3118 	spin_unlock(&cifs_inode->open_file_lock);
3119 	return 0;
3120 }
3121 
3122 /* We do not want to update the file size from server for inodes
3123    open for write - to avoid races with writepage extending
3124    the file - in the future we could consider allowing
3125    refreshing the inode only on increases in the file size
3126    but this is tricky to do without racing with writebehind
3127    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3128 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3129 			    bool from_readdir)
3130 {
3131 	if (!cifsInode)
3132 		return true;
3133 
3134 	if (is_inode_writable(cifsInode) ||
3135 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3136 		/* This inode is open for write at least once */
3137 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3138 
3139 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3140 			/* since no page cache to corrupt on directio
3141 			we can change size safely */
3142 			return true;
3143 		}
3144 
3145 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3146 			return true;
3147 
3148 		return false;
3149 	} else
3150 		return true;
3151 }
3152 
cifs_oplock_break(struct work_struct * work)3153 void cifs_oplock_break(struct work_struct *work)
3154 {
3155 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3156 						  oplock_break);
3157 	struct inode *inode = d_inode(cfile->dentry);
3158 	struct super_block *sb = inode->i_sb;
3159 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3160 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3161 	bool cache_read, cache_write, cache_handle;
3162 	struct cifs_tcon *tcon;
3163 	struct TCP_Server_Info *server;
3164 	struct tcon_link *tlink;
3165 	unsigned int oplock;
3166 	int rc = 0;
3167 	bool purge_cache = false, oplock_break_cancelled;
3168 	__u64 persistent_fid, volatile_fid;
3169 	__u16 net_fid;
3170 
3171 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3172 			TASK_UNINTERRUPTIBLE);
3173 
3174 	tlink = cifs_sb_tlink(cifs_sb);
3175 	if (IS_ERR(tlink))
3176 		goto out;
3177 	tcon = tlink_tcon(tlink);
3178 	server = tcon->ses->server;
3179 
3180 	scoped_guard(spinlock, &cinode->open_file_lock) {
3181 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3182 
3183 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3184 					      cfile->oplock_epoch, &purge_cache);
3185 		oplock = READ_ONCE(cinode->oplock);
3186 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3187 			(sbflags & CIFS_MOUNT_RO_CACHE);
3188 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3189 			(sbflags & CIFS_MOUNT_RW_CACHE);
3190 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3191 	}
3192 
3193 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3194 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3195 			 inode);
3196 		cifs_reset_oplock(cinode);
3197 		oplock = 0;
3198 		cache_read = cache_write = cache_handle = false;
3199 	}
3200 
3201 	if (S_ISREG(inode->i_mode)) {
3202 		if (cache_read)
3203 			break_lease(inode, O_RDONLY);
3204 		else
3205 			break_lease(inode, O_WRONLY);
3206 		rc = filemap_fdatawrite(inode->i_mapping);
3207 		if (!cache_read || purge_cache) {
3208 			rc = filemap_fdatawait(inode->i_mapping);
3209 			mapping_set_error(inode->i_mapping, rc);
3210 			cifs_zap_mapping(inode);
3211 		}
3212 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3213 		if (cache_write)
3214 			goto oplock_break_ack;
3215 	}
3216 
3217 	rc = cifs_push_locks(cfile);
3218 	if (rc)
3219 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3220 
3221 oplock_break_ack:
3222 	/*
3223 	 * When oplock break is received and there are no active
3224 	 * file handles but cached, then schedule deferred close immediately.
3225 	 * So, new open will not use cached handle.
3226 	 */
3227 
3228 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3229 		cifs_close_deferred_file(cinode);
3230 
3231 	persistent_fid = cfile->fid.persistent_fid;
3232 	volatile_fid = cfile->fid.volatile_fid;
3233 	net_fid = cfile->fid.netfid;
3234 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3235 
3236 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3237 	/*
3238 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3239 	 * an acknowledgment to be sent when the file has already been closed.
3240 	 */
3241 	spin_lock(&cinode->open_file_lock);
3242 	/* check list empty since can race with kill_sb calling tree disconnect */
3243 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3244 		spin_unlock(&cinode->open_file_lock);
3245 		rc = server->ops->oplock_response(tcon, persistent_fid,
3246 						  volatile_fid, net_fid,
3247 						  cinode, oplock);
3248 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3249 	} else
3250 		spin_unlock(&cinode->open_file_lock);
3251 
3252 	cifs_put_tlink(tlink);
3253 out:
3254 	cifs_done_oplock_break(cinode);
3255 }
3256 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3257 static int cifs_swap_activate(struct swap_info_struct *sis,
3258 			      struct file *swap_file, sector_t *span)
3259 {
3260 	struct cifsFileInfo *cfile = swap_file->private_data;
3261 	struct inode *inode = swap_file->f_mapping->host;
3262 	unsigned long blocks;
3263 	long long isize;
3264 
3265 	cifs_dbg(FYI, "swap activate\n");
3266 
3267 	if (!swap_file->f_mapping->a_ops->swap_rw)
3268 		/* Cannot support swap */
3269 		return -EINVAL;
3270 
3271 	spin_lock(&inode->i_lock);
3272 	blocks = inode->i_blocks;
3273 	isize = inode->i_size;
3274 	spin_unlock(&inode->i_lock);
3275 	if (blocks*512 < isize) {
3276 		pr_warn("swap activate: swapfile has holes\n");
3277 		return -EINVAL;
3278 	}
3279 	*span = sis->pages;
3280 
3281 	pr_warn_once("Swap support over SMB3 is experimental\n");
3282 
3283 	/*
3284 	 * TODO: consider adding ACL (or documenting how) to prevent other
3285 	 * users (on this or other systems) from reading it
3286 	 */
3287 
3288 
3289 	/* TODO: add sk_set_memalloc(inet) or similar */
3290 
3291 	if (cfile)
3292 		cfile->swapfile = true;
3293 	/*
3294 	 * TODO: Since file already open, we can't open with DENY_ALL here
3295 	 * but we could add call to grab a byte range lock to prevent others
3296 	 * from reading or writing the file
3297 	 */
3298 
3299 	sis->flags |= SWP_FS_OPS;
3300 	return add_swap_extent(sis, 0, sis->max, 0);
3301 }
3302 
cifs_swap_deactivate(struct file * file)3303 static void cifs_swap_deactivate(struct file *file)
3304 {
3305 	struct cifsFileInfo *cfile = file->private_data;
3306 
3307 	cifs_dbg(FYI, "swap deactivate\n");
3308 
3309 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3310 
3311 	if (cfile)
3312 		cfile->swapfile = false;
3313 
3314 	/* do we need to unpin (or unlock) the file */
3315 }
3316 
3317 /**
3318  * cifs_swap_rw - SMB3 address space operation for swap I/O
3319  * @iocb: target I/O control block
3320  * @iter: I/O buffer
3321  *
3322  * Perform IO to the swap-file.  This is much like direct IO.
3323  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3324 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3325 {
3326 	ssize_t ret;
3327 
3328 	if (iov_iter_rw(iter) == READ)
3329 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3330 	else
3331 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3332 	if (ret < 0)
3333 		return ret;
3334 	return 0;
3335 }
3336 
3337 const struct address_space_operations cifs_addr_ops = {
3338 	.read_folio	= netfs_read_folio,
3339 	.readahead	= netfs_readahead,
3340 	.writepages	= netfs_writepages,
3341 	.dirty_folio	= netfs_dirty_folio,
3342 	.release_folio	= netfs_release_folio,
3343 	.direct_IO	= noop_direct_IO,
3344 	.invalidate_folio = netfs_invalidate_folio,
3345 	.migrate_folio	= filemap_migrate_folio,
3346 	/*
3347 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3348 	 * helper if needed
3349 	 */
3350 	.swap_activate	= cifs_swap_activate,
3351 	.swap_deactivate = cifs_swap_deactivate,
3352 	.swap_rw = cifs_swap_rw,
3353 };
3354 
3355 /*
3356  * cifs_readahead requires the server to support a buffer large enough to
3357  * contain the header plus one complete page of data.  Otherwise, we need
3358  * to leave cifs_readahead out of the address space operations.
3359  */
3360 const struct address_space_operations cifs_addr_ops_smallbuf = {
3361 	.read_folio	= netfs_read_folio,
3362 	.writepages	= netfs_writepages,
3363 	.dirty_folio	= netfs_dirty_folio,
3364 	.release_folio	= netfs_release_folio,
3365 	.invalidate_folio = netfs_invalidate_folio,
3366 	.migrate_folio	= filemap_migrate_folio,
3367 };
3368