1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/writeback.h>
20 #include <linux/task_io_accounting_ops.h>
21 #include <linux/delay.h>
22 #include <linux/mount.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include <linux/mm.h>
26 #include <asm/div64.h>
27 #include "cifsfs.h"
28 #include "cifspdu.h"
29 #include "cifsglob.h"
30 #include "cifsproto.h"
31 #include "smb2proto.h"
32 #include "cifs_unicode.h"
33 #include "cifs_debug.h"
34 #include "cifs_fs_sb.h"
35 #include "fscache.h"
36 #include "smbdirect.h"
37 #include "fs_context.h"
38 #include "cifs_ioctl.h"
39 #include "cached_dir.h"
40 #include <trace/events/netfs.h>
41
42 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
43
44 /*
45 * Prepare a subrequest to upload to the server. We need to allocate credits
46 * so that we know the maximum amount of data that we can include in it.
47 */
cifs_prepare_write(struct netfs_io_subrequest * subreq)48 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
49 {
50 struct cifs_io_subrequest *wdata =
51 container_of(subreq, struct cifs_io_subrequest, subreq);
52 struct cifs_io_request *req = wdata->req;
53 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
54 struct TCP_Server_Info *server;
55 struct cifsFileInfo *open_file = req->cfile;
56 struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
57 size_t wsize = req->rreq.wsize;
58 int rc;
59
60 if (!wdata->have_xid) {
61 wdata->xid = get_xid();
62 wdata->have_xid = true;
63 }
64
65 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
66 wdata->server = server;
67
68 if (cifs_sb->ctx->wsize == 0)
69 cifs_negotiate_wsize(server, cifs_sb->ctx,
70 tlink_tcon(req->cfile->tlink));
71
72 retry:
73 if (open_file->invalidHandle) {
74 rc = cifs_reopen_file(open_file, false);
75 if (rc < 0) {
76 if (rc == -EAGAIN)
77 goto retry;
78 subreq->error = rc;
79 return netfs_prepare_write_failed(subreq);
80 }
81 }
82
83 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
84 &wdata->credits);
85 if (rc < 0) {
86 subreq->error = rc;
87 return netfs_prepare_write_failed(subreq);
88 }
89
90 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
91 wdata->credits.rreq_debug_index = subreq->debug_index;
92 wdata->credits.in_flight_check = 1;
93 trace_smb3_rw_credits(wdata->rreq->debug_id,
94 wdata->subreq.debug_index,
95 wdata->credits.value,
96 server->credits, server->in_flight,
97 wdata->credits.value,
98 cifs_trace_rw_credits_write_prepare);
99
100 #ifdef CONFIG_CIFS_SMB_DIRECT
101 if (server->smbd_conn) {
102 const struct smbdirect_socket_parameters *sp =
103 smbd_get_parameters(server->smbd_conn);
104
105 stream->sreq_max_segs = sp->max_frmr_depth;
106 }
107 #endif
108 }
109
110 /*
111 * Issue a subrequest to upload to the server.
112 */
cifs_issue_write(struct netfs_io_subrequest * subreq)113 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
114 {
115 struct cifs_io_subrequest *wdata =
116 container_of(subreq, struct cifs_io_subrequest, subreq);
117 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
118 int rc;
119
120 if (cifs_forced_shutdown(sbi)) {
121 rc = -EIO;
122 goto fail;
123 }
124
125 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
126 if (rc)
127 goto fail;
128
129 rc = -EAGAIN;
130 if (wdata->req->cfile->invalidHandle)
131 goto fail;
132
133 wdata->server->ops->async_writev(wdata);
134 out:
135 return;
136
137 fail:
138 if (rc == -EAGAIN)
139 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
140 else
141 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
142 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
143 cifs_write_subrequest_terminated(wdata, rc);
144 goto out;
145 }
146
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)147 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
148 {
149 cifs_invalidate_cache(wreq->inode, 0);
150 }
151
152 /*
153 * Negotiate the size of a read operation on behalf of the netfs library.
154 */
cifs_prepare_read(struct netfs_io_subrequest * subreq)155 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
156 {
157 struct netfs_io_request *rreq = subreq->rreq;
158 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
159 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
160 struct TCP_Server_Info *server;
161 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
162 size_t size;
163 int rc = 0;
164
165 if (!rdata->have_xid) {
166 rdata->xid = get_xid();
167 rdata->have_xid = true;
168 }
169
170 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
171 rdata->server = server;
172
173 if (cifs_sb->ctx->rsize == 0)
174 cifs_negotiate_rsize(server, cifs_sb->ctx,
175 tlink_tcon(req->cfile->tlink));
176
177 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
178 &size, &rdata->credits);
179 if (rc)
180 return rc;
181
182 rreq->io_streams[0].sreq_max_len = size;
183
184 rdata->credits.in_flight_check = 1;
185 rdata->credits.rreq_debug_id = rreq->debug_id;
186 rdata->credits.rreq_debug_index = subreq->debug_index;
187
188 trace_smb3_rw_credits(rdata->rreq->debug_id,
189 rdata->subreq.debug_index,
190 rdata->credits.value,
191 server->credits, server->in_flight, 0,
192 cifs_trace_rw_credits_read_submit);
193
194 #ifdef CONFIG_CIFS_SMB_DIRECT
195 if (server->smbd_conn) {
196 const struct smbdirect_socket_parameters *sp =
197 smbd_get_parameters(server->smbd_conn);
198
199 rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
200 }
201 #endif
202 return 0;
203 }
204
205 /*
206 * Issue a read operation on behalf of the netfs helper functions. We're asked
207 * to make a read of a certain size at a point in the file. We are permitted
208 * to only read a portion of that, but as long as we read something, the netfs
209 * helper will call us again so that we can issue another read.
210 */
cifs_issue_read(struct netfs_io_subrequest * subreq)211 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
212 {
213 struct netfs_io_request *rreq = subreq->rreq;
214 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
215 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
216 struct TCP_Server_Info *server = rdata->server;
217 int rc = 0;
218
219 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
220 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
221 subreq->transferred, subreq->len);
222
223 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
224 if (rc)
225 goto failed;
226
227 if (req->cfile->invalidHandle) {
228 do {
229 rc = cifs_reopen_file(req->cfile, true);
230 } while (rc == -EAGAIN);
231 if (rc)
232 goto failed;
233 }
234
235 if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
236 subreq->rreq->origin != NETFS_DIO_READ)
237 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
238
239 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
240 rc = rdata->server->ops->async_readv(rdata);
241 if (rc)
242 goto failed;
243 return;
244
245 failed:
246 subreq->error = rc;
247 netfs_read_subreq_terminated(subreq);
248 }
249
250 /*
251 * Writeback calls this when it finds a folio that needs uploading. This isn't
252 * called if writeback only has copy-to-cache to deal with.
253 */
cifs_begin_writeback(struct netfs_io_request * wreq)254 static void cifs_begin_writeback(struct netfs_io_request *wreq)
255 {
256 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
257 int ret;
258
259 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
260 if (ret) {
261 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
262 return;
263 }
264
265 wreq->io_streams[0].avail = true;
266 }
267
268 /*
269 * Initialise a request.
270 */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)271 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
272 {
273 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
274 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
275 struct cifsFileInfo *open_file = NULL;
276
277 rreq->rsize = cifs_sb->ctx->rsize;
278 rreq->wsize = cifs_sb->ctx->wsize;
279 req->pid = current->tgid; // Ummm... This may be a workqueue
280
281 if (file) {
282 open_file = file->private_data;
283 rreq->netfs_priv = file->private_data;
284 req->cfile = cifsFileInfo_get(open_file);
285 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
286 req->pid = req->cfile->pid;
287 } else if (rreq->origin != NETFS_WRITEBACK) {
288 WARN_ON_ONCE(1);
289 return -EIO;
290 }
291
292 return 0;
293 }
294
295 /*
296 * Completion of a request operation.
297 */
cifs_rreq_done(struct netfs_io_request * rreq)298 static void cifs_rreq_done(struct netfs_io_request *rreq)
299 {
300 struct timespec64 atime, mtime;
301 struct inode *inode = rreq->inode;
302
303 /* we do not want atime to be less than mtime, it broke some apps */
304 atime = inode_set_atime_to_ts(inode, current_time(inode));
305 mtime = inode_get_mtime(inode);
306 if (timespec64_compare(&atime, &mtime))
307 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
308 }
309
cifs_free_request(struct netfs_io_request * rreq)310 static void cifs_free_request(struct netfs_io_request *rreq)
311 {
312 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
313
314 if (req->cfile)
315 cifsFileInfo_put(req->cfile);
316 }
317
cifs_free_subrequest(struct netfs_io_subrequest * subreq)318 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
319 {
320 struct cifs_io_subrequest *rdata =
321 container_of(subreq, struct cifs_io_subrequest, subreq);
322 int rc = subreq->error;
323
324 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
325 #ifdef CONFIG_CIFS_SMB_DIRECT
326 if (rdata->mr) {
327 smbd_deregister_mr(rdata->mr);
328 rdata->mr = NULL;
329 }
330 #endif
331 }
332
333 if (rdata->credits.value != 0) {
334 trace_smb3_rw_credits(rdata->rreq->debug_id,
335 rdata->subreq.debug_index,
336 rdata->credits.value,
337 rdata->server ? rdata->server->credits : 0,
338 rdata->server ? rdata->server->in_flight : 0,
339 -rdata->credits.value,
340 cifs_trace_rw_credits_free_subreq);
341 if (rdata->server)
342 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
343 else
344 rdata->credits.value = 0;
345 }
346
347 if (rdata->have_xid)
348 free_xid(rdata->xid);
349 }
350
351 const struct netfs_request_ops cifs_req_ops = {
352 .request_pool = &cifs_io_request_pool,
353 .subrequest_pool = &cifs_io_subrequest_pool,
354 .init_request = cifs_init_request,
355 .free_request = cifs_free_request,
356 .free_subrequest = cifs_free_subrequest,
357 .prepare_read = cifs_prepare_read,
358 .issue_read = cifs_issue_read,
359 .done = cifs_rreq_done,
360 .begin_writeback = cifs_begin_writeback,
361 .prepare_write = cifs_prepare_write,
362 .issue_write = cifs_issue_write,
363 .invalidate_cache = cifs_netfs_invalidate_cache,
364 };
365
366 /*
367 * Mark as invalid, all open files on tree connections since they
368 * were closed when session to server was lost.
369 */
370 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)371 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
372 {
373 struct cifsFileInfo *open_file = NULL;
374 struct list_head *tmp;
375 struct list_head *tmp1;
376
377 /* only send once per connect */
378 spin_lock(&tcon->tc_lock);
379 if (tcon->need_reconnect)
380 tcon->status = TID_NEED_RECON;
381
382 if (tcon->status != TID_NEED_RECON) {
383 spin_unlock(&tcon->tc_lock);
384 return;
385 }
386 tcon->status = TID_IN_FILES_INVALIDATE;
387 spin_unlock(&tcon->tc_lock);
388
389 /* list all files open on tree connection and mark them invalid */
390 spin_lock(&tcon->open_file_lock);
391 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
392 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
393 open_file->invalidHandle = true;
394 open_file->oplock_break_cancelled = true;
395 }
396 spin_unlock(&tcon->open_file_lock);
397
398 invalidate_all_cached_dirs(tcon);
399 spin_lock(&tcon->tc_lock);
400 if (tcon->status == TID_IN_FILES_INVALIDATE)
401 tcon->status = TID_NEED_TCON;
402 spin_unlock(&tcon->tc_lock);
403
404 /*
405 * BB Add call to evict_inodes(sb) for all superblocks mounted
406 * to this tcon.
407 */
408 }
409
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)410 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
411 {
412 if ((flags & O_ACCMODE) == O_RDONLY)
413 return GENERIC_READ;
414 else if ((flags & O_ACCMODE) == O_WRONLY)
415 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
416 else if ((flags & O_ACCMODE) == O_RDWR) {
417 /* GENERIC_ALL is too much permission to request
418 can cause unnecessary access denied on create */
419 /* return GENERIC_ALL; */
420 return (GENERIC_READ | GENERIC_WRITE);
421 }
422
423 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
424 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
425 FILE_READ_DATA);
426 }
427
428 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)429 static u32 cifs_posix_convert_flags(unsigned int flags)
430 {
431 u32 posix_flags = 0;
432
433 if ((flags & O_ACCMODE) == O_RDONLY)
434 posix_flags = SMB_O_RDONLY;
435 else if ((flags & O_ACCMODE) == O_WRONLY)
436 posix_flags = SMB_O_WRONLY;
437 else if ((flags & O_ACCMODE) == O_RDWR)
438 posix_flags = SMB_O_RDWR;
439
440 if (flags & O_CREAT) {
441 posix_flags |= SMB_O_CREAT;
442 if (flags & O_EXCL)
443 posix_flags |= SMB_O_EXCL;
444 } else if (flags & O_EXCL)
445 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
446 current->comm, current->tgid);
447
448 if (flags & O_TRUNC)
449 posix_flags |= SMB_O_TRUNC;
450 /* be safe and imply O_SYNC for O_DSYNC */
451 if (flags & O_DSYNC)
452 posix_flags |= SMB_O_SYNC;
453 if (flags & O_DIRECTORY)
454 posix_flags |= SMB_O_DIRECTORY;
455 if (flags & O_NOFOLLOW)
456 posix_flags |= SMB_O_NOFOLLOW;
457 if (flags & O_DIRECT)
458 posix_flags |= SMB_O_DIRECT;
459
460 return posix_flags;
461 }
462 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
463
cifs_get_disposition(unsigned int flags)464 static inline int cifs_get_disposition(unsigned int flags)
465 {
466 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
467 return FILE_CREATE;
468 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
469 return FILE_OVERWRITE_IF;
470 else if ((flags & O_CREAT) == O_CREAT)
471 return FILE_OPEN_IF;
472 else if ((flags & O_TRUNC) == O_TRUNC)
473 return FILE_OVERWRITE;
474 else
475 return FILE_OPEN;
476 }
477
478 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)479 int cifs_posix_open(const char *full_path, struct inode **pinode,
480 struct super_block *sb, int mode, unsigned int f_flags,
481 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
482 {
483 int rc;
484 FILE_UNIX_BASIC_INFO *presp_data;
485 __u32 posix_flags = 0;
486 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
487 struct cifs_fattr fattr;
488 struct tcon_link *tlink;
489 struct cifs_tcon *tcon;
490
491 cifs_dbg(FYI, "posix open %s\n", full_path);
492
493 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
494 if (presp_data == NULL)
495 return -ENOMEM;
496
497 tlink = cifs_sb_tlink(cifs_sb);
498 if (IS_ERR(tlink)) {
499 rc = PTR_ERR(tlink);
500 goto posix_open_ret;
501 }
502
503 tcon = tlink_tcon(tlink);
504 mode &= ~current_umask();
505
506 posix_flags = cifs_posix_convert_flags(f_flags);
507 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
508 poplock, full_path, cifs_sb->local_nls,
509 cifs_remap(cifs_sb));
510 cifs_put_tlink(tlink);
511
512 if (rc)
513 goto posix_open_ret;
514
515 if (presp_data->Type == cpu_to_le32(-1))
516 goto posix_open_ret; /* open ok, caller does qpathinfo */
517
518 if (!pinode)
519 goto posix_open_ret; /* caller does not need info */
520
521 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
522
523 /* get new inode and set it up */
524 if (*pinode == NULL) {
525 cifs_fill_uniqueid(sb, &fattr);
526 *pinode = cifs_iget(sb, &fattr);
527 if (!*pinode) {
528 rc = -ENOMEM;
529 goto posix_open_ret;
530 }
531 } else {
532 cifs_revalidate_mapping(*pinode);
533 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
534 }
535
536 posix_open_ret:
537 kfree(presp_data);
538 return rc;
539 }
540 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
541
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)542 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
543 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
544 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
545 {
546 int rc;
547 int desired_access;
548 int disposition;
549 int create_options = CREATE_NOT_DIR;
550 struct TCP_Server_Info *server = tcon->ses->server;
551 struct cifs_open_parms oparms;
552 int rdwr_for_fscache = 0;
553
554 if (!server->ops->open)
555 return -ENOSYS;
556
557 /* If we're caching, we need to be able to fill in around partial writes. */
558 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
559 rdwr_for_fscache = 1;
560
561 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
562
563 /*********************************************************************
564 * open flag mapping table:
565 *
566 * POSIX Flag CIFS Disposition
567 * ---------- ----------------
568 * O_CREAT FILE_OPEN_IF
569 * O_CREAT | O_EXCL FILE_CREATE
570 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
571 * O_TRUNC FILE_OVERWRITE
572 * none of the above FILE_OPEN
573 *
574 * Note that there is not a direct match between disposition
575 * FILE_SUPERSEDE (ie create whether or not file exists although
576 * O_CREAT | O_TRUNC is similar but truncates the existing
577 * file rather than creating a new file as FILE_SUPERSEDE does
578 * (which uses the attributes / metadata passed in on open call)
579 *?
580 *? O_SYNC is a reasonable match to CIFS writethrough flag
581 *? and the read write flags match reasonably. O_LARGEFILE
582 *? is irrelevant because largefile support is always used
583 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
584 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
585 *********************************************************************/
586
587 disposition = cifs_get_disposition(f_flags);
588
589 /* BB pass O_SYNC flag through on file attributes .. BB */
590
591 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
592 if (f_flags & O_SYNC)
593 create_options |= CREATE_WRITE_THROUGH;
594
595 if (f_flags & O_DIRECT)
596 create_options |= CREATE_NO_BUFFER;
597
598 retry_open:
599 oparms = (struct cifs_open_parms) {
600 .tcon = tcon,
601 .cifs_sb = cifs_sb,
602 .desired_access = desired_access,
603 .create_options = cifs_create_options(cifs_sb, create_options),
604 .disposition = disposition,
605 .path = full_path,
606 .fid = fid,
607 };
608
609 rc = server->ops->open(xid, &oparms, oplock, buf);
610 if (rc) {
611 if (rc == -EACCES && rdwr_for_fscache == 1) {
612 desired_access = cifs_convert_flags(f_flags, 0);
613 rdwr_for_fscache = 2;
614 goto retry_open;
615 }
616 return rc;
617 }
618 if (rdwr_for_fscache == 2)
619 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
620
621 /* TODO: Add support for calling posix query info but with passing in fid */
622 if (tcon->unix_ext)
623 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
624 xid);
625 else
626 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
627 xid, fid);
628
629 if (rc) {
630 server->ops->close(xid, tcon, fid);
631 if (rc == -ESTALE)
632 rc = -EOPENSTALE;
633 }
634
635 return rc;
636 }
637
638 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)639 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
640 {
641 struct cifs_fid_locks *cur;
642 bool has_locks = false;
643
644 down_read(&cinode->lock_sem);
645 list_for_each_entry(cur, &cinode->llist, llist) {
646 if (!list_empty(&cur->locks)) {
647 has_locks = true;
648 break;
649 }
650 }
651 up_read(&cinode->lock_sem);
652 return has_locks;
653 }
654
655 void
cifs_down_write(struct rw_semaphore * sem)656 cifs_down_write(struct rw_semaphore *sem)
657 {
658 while (!down_write_trylock(sem))
659 msleep(10);
660 }
661
662 static void cifsFileInfo_put_work(struct work_struct *work);
663 void serverclose_work(struct work_struct *work);
664
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)665 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
666 struct tcon_link *tlink, __u32 oplock,
667 const char *symlink_target)
668 {
669 struct dentry *dentry = file_dentry(file);
670 struct inode *inode = d_inode(dentry);
671 struct cifsInodeInfo *cinode = CIFS_I(inode);
672 struct cifsFileInfo *cfile;
673 struct cifs_fid_locks *fdlocks;
674 struct cifs_tcon *tcon = tlink_tcon(tlink);
675 struct TCP_Server_Info *server = tcon->ses->server;
676
677 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
678 if (cfile == NULL)
679 return cfile;
680
681 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
682 if (!fdlocks) {
683 kfree(cfile);
684 return NULL;
685 }
686
687 if (symlink_target) {
688 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
689 if (!cfile->symlink_target) {
690 kfree(fdlocks);
691 kfree(cfile);
692 return NULL;
693 }
694 }
695
696 INIT_LIST_HEAD(&fdlocks->locks);
697 fdlocks->cfile = cfile;
698 cfile->llist = fdlocks;
699
700 cfile->count = 1;
701 cfile->pid = current->tgid;
702 cfile->uid = current_fsuid();
703 cfile->dentry = dget(dentry);
704 cfile->f_flags = file->f_flags;
705 cfile->invalidHandle = false;
706 cfile->deferred_close_scheduled = false;
707 cfile->tlink = cifs_get_tlink(tlink);
708 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
709 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
710 INIT_WORK(&cfile->serverclose, serverclose_work);
711 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
712 mutex_init(&cfile->fh_mutex);
713 spin_lock_init(&cfile->file_info_lock);
714
715 cifs_sb_active(inode->i_sb);
716
717 /*
718 * If the server returned a read oplock and we have mandatory brlocks,
719 * set oplock level to None.
720 */
721 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
722 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
723 oplock = 0;
724 }
725
726 cifs_down_write(&cinode->lock_sem);
727 list_add(&fdlocks->llist, &cinode->llist);
728 up_write(&cinode->lock_sem);
729
730 spin_lock(&tcon->open_file_lock);
731 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
732 oplock = fid->pending_open->oplock;
733 list_del(&fid->pending_open->olist);
734
735 fid->purge_cache = false;
736 server->ops->set_fid(cfile, fid, oplock);
737
738 list_add(&cfile->tlist, &tcon->openFileList);
739 atomic_inc(&tcon->num_local_opens);
740
741 /* if readable file instance put first in list*/
742 spin_lock(&cinode->open_file_lock);
743 if (file->f_mode & FMODE_READ)
744 list_add(&cfile->flist, &cinode->openFileList);
745 else
746 list_add_tail(&cfile->flist, &cinode->openFileList);
747 spin_unlock(&cinode->open_file_lock);
748 spin_unlock(&tcon->open_file_lock);
749
750 if (fid->purge_cache)
751 cifs_zap_mapping(inode);
752
753 file->private_data = cfile;
754 return cfile;
755 }
756
757 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)758 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
759 {
760 spin_lock(&cifs_file->file_info_lock);
761 cifsFileInfo_get_locked(cifs_file);
762 spin_unlock(&cifs_file->file_info_lock);
763 return cifs_file;
764 }
765
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)766 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
767 {
768 struct inode *inode = d_inode(cifs_file->dentry);
769 struct cifsInodeInfo *cifsi = CIFS_I(inode);
770 struct cifsLockInfo *li, *tmp;
771 struct super_block *sb = inode->i_sb;
772
773 /*
774 * Delete any outstanding lock records. We'll lose them when the file
775 * is closed anyway.
776 */
777 cifs_down_write(&cifsi->lock_sem);
778 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
779 list_del(&li->llist);
780 cifs_del_lock_waiters(li);
781 kfree(li);
782 }
783 list_del(&cifs_file->llist->llist);
784 kfree(cifs_file->llist);
785 up_write(&cifsi->lock_sem);
786
787 cifs_put_tlink(cifs_file->tlink);
788 dput(cifs_file->dentry);
789 cifs_sb_deactive(sb);
790 kfree(cifs_file->symlink_target);
791 kfree(cifs_file);
792 }
793
cifsFileInfo_put_work(struct work_struct * work)794 static void cifsFileInfo_put_work(struct work_struct *work)
795 {
796 struct cifsFileInfo *cifs_file = container_of(work,
797 struct cifsFileInfo, put);
798
799 cifsFileInfo_put_final(cifs_file);
800 }
801
serverclose_work(struct work_struct * work)802 void serverclose_work(struct work_struct *work)
803 {
804 struct cifsFileInfo *cifs_file = container_of(work,
805 struct cifsFileInfo, serverclose);
806
807 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
808
809 struct TCP_Server_Info *server = tcon->ses->server;
810 int rc = 0;
811 int retries = 0;
812 int MAX_RETRIES = 4;
813
814 do {
815 if (server->ops->close_getattr)
816 rc = server->ops->close_getattr(0, tcon, cifs_file);
817 else if (server->ops->close)
818 rc = server->ops->close(0, tcon, &cifs_file->fid);
819
820 if (rc == -EBUSY || rc == -EAGAIN) {
821 retries++;
822 msleep(250);
823 }
824 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
825 );
826
827 if (retries == MAX_RETRIES)
828 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
829
830 if (cifs_file->offload)
831 queue_work(fileinfo_put_wq, &cifs_file->put);
832 else
833 cifsFileInfo_put_final(cifs_file);
834 }
835
836 /**
837 * cifsFileInfo_put - release a reference of file priv data
838 *
839 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
840 *
841 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
842 */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)843 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
844 {
845 _cifsFileInfo_put(cifs_file, true, true);
846 }
847
848 /**
849 * _cifsFileInfo_put - release a reference of file priv data
850 *
851 * This may involve closing the filehandle @cifs_file out on the
852 * server. Must be called without holding tcon->open_file_lock,
853 * cinode->open_file_lock and cifs_file->file_info_lock.
854 *
855 * If @wait_for_oplock_handler is true and we are releasing the last
856 * reference, wait for any running oplock break handler of the file
857 * and cancel any pending one.
858 *
859 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
860 * @wait_oplock_handler: must be false if called from oplock_break_handler
861 * @offload: not offloaded on close and oplock breaks
862 *
863 */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)864 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
865 bool wait_oplock_handler, bool offload)
866 {
867 struct inode *inode = d_inode(cifs_file->dentry);
868 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
869 struct TCP_Server_Info *server = tcon->ses->server;
870 struct cifsInodeInfo *cifsi = CIFS_I(inode);
871 struct super_block *sb = inode->i_sb;
872 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
873 struct cifs_fid fid = {};
874 struct cifs_pending_open open;
875 bool oplock_break_cancelled;
876 bool serverclose_offloaded = false;
877
878 spin_lock(&tcon->open_file_lock);
879 spin_lock(&cifsi->open_file_lock);
880 spin_lock(&cifs_file->file_info_lock);
881
882 cifs_file->offload = offload;
883 if (--cifs_file->count > 0) {
884 spin_unlock(&cifs_file->file_info_lock);
885 spin_unlock(&cifsi->open_file_lock);
886 spin_unlock(&tcon->open_file_lock);
887 return;
888 }
889 spin_unlock(&cifs_file->file_info_lock);
890
891 if (server->ops->get_lease_key)
892 server->ops->get_lease_key(inode, &fid);
893
894 /* store open in pending opens to make sure we don't miss lease break */
895 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
896
897 /* remove it from the lists */
898 list_del(&cifs_file->flist);
899 list_del(&cifs_file->tlist);
900 atomic_dec(&tcon->num_local_opens);
901
902 if (list_empty(&cifsi->openFileList)) {
903 cifs_dbg(FYI, "closing last open instance for inode %p\n",
904 d_inode(cifs_file->dentry));
905 /*
906 * In strict cache mode we need invalidate mapping on the last
907 * close because it may cause a error when we open this file
908 * again and get at least level II oplock.
909 */
910 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
911 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
912 cifs_set_oplock_level(cifsi, 0);
913 }
914
915 spin_unlock(&cifsi->open_file_lock);
916 spin_unlock(&tcon->open_file_lock);
917
918 oplock_break_cancelled = wait_oplock_handler ?
919 cancel_work_sync(&cifs_file->oplock_break) : false;
920
921 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
922 struct TCP_Server_Info *server = tcon->ses->server;
923 unsigned int xid;
924 int rc = 0;
925
926 xid = get_xid();
927 if (server->ops->close_getattr)
928 rc = server->ops->close_getattr(xid, tcon, cifs_file);
929 else if (server->ops->close)
930 rc = server->ops->close(xid, tcon, &cifs_file->fid);
931 _free_xid(xid);
932
933 if (rc == -EBUSY || rc == -EAGAIN) {
934 // Server close failed, hence offloading it as an async op
935 queue_work(serverclose_wq, &cifs_file->serverclose);
936 serverclose_offloaded = true;
937 }
938 }
939
940 if (oplock_break_cancelled)
941 cifs_done_oplock_break(cifsi);
942
943 cifs_del_pending_open(&open);
944
945 // if serverclose has been offloaded to wq (on failure), it will
946 // handle offloading put as well. If serverclose not offloaded,
947 // we need to handle offloading put here.
948 if (!serverclose_offloaded) {
949 if (offload)
950 queue_work(fileinfo_put_wq, &cifs_file->put);
951 else
952 cifsFileInfo_put_final(cifs_file);
953 }
954 }
955
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)956 int cifs_file_flush(const unsigned int xid, struct inode *inode,
957 struct cifsFileInfo *cfile)
958 {
959 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
960 struct cifs_tcon *tcon;
961 int rc;
962
963 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
964 return 0;
965
966 if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
967 tcon = tlink_tcon(cfile->tlink);
968 return tcon->ses->server->ops->flush(xid, tcon,
969 &cfile->fid);
970 }
971 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
972 if (!rc) {
973 tcon = tlink_tcon(cfile->tlink);
974 rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
975 cifsFileInfo_put(cfile);
976 } else if (rc == -EBADF) {
977 rc = 0;
978 }
979 return rc;
980 }
981
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)982 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
983 {
984 struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
985 struct inode *inode = d_inode(dentry);
986 struct cifsFileInfo *cfile = NULL;
987 struct TCP_Server_Info *server;
988 struct cifs_tcon *tcon;
989 int rc;
990
991 rc = filemap_write_and_wait(inode->i_mapping);
992 if (is_interrupt_error(rc))
993 return -ERESTARTSYS;
994 mapping_set_error(inode->i_mapping, rc);
995
996 cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
997 rc = cifs_file_flush(xid, inode, cfile);
998 if (!rc) {
999 if (cfile) {
1000 tcon = tlink_tcon(cfile->tlink);
1001 server = tcon->ses->server;
1002 rc = server->ops->set_file_size(xid, tcon,
1003 cfile, 0, false);
1004 }
1005 if (!rc) {
1006 netfs_resize_file(&cinode->netfs, 0, true);
1007 cifs_setsize(inode, 0);
1008 inode->i_blocks = 0;
1009 }
1010 }
1011 if (cfile)
1012 cifsFileInfo_put(cfile);
1013 return rc;
1014 }
1015
cifs_open(struct inode * inode,struct file * file)1016 int cifs_open(struct inode *inode, struct file *file)
1017
1018 {
1019 int rc = -EACCES;
1020 unsigned int xid;
1021 __u32 oplock;
1022 struct cifs_sb_info *cifs_sb;
1023 struct TCP_Server_Info *server;
1024 struct cifs_tcon *tcon;
1025 struct tcon_link *tlink;
1026 struct cifsFileInfo *cfile = NULL;
1027 void *page;
1028 const char *full_path;
1029 bool posix_open_ok = false;
1030 struct cifs_fid fid = {};
1031 struct cifs_pending_open open;
1032 struct cifs_open_info_data data = {};
1033
1034 xid = get_xid();
1035
1036 cifs_sb = CIFS_SB(inode->i_sb);
1037 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1038 free_xid(xid);
1039 return -EIO;
1040 }
1041
1042 tlink = cifs_sb_tlink(cifs_sb);
1043 if (IS_ERR(tlink)) {
1044 free_xid(xid);
1045 return PTR_ERR(tlink);
1046 }
1047 tcon = tlink_tcon(tlink);
1048 server = tcon->ses->server;
1049
1050 page = alloc_dentry_path();
1051 full_path = build_path_from_dentry(file_dentry(file), page);
1052 if (IS_ERR(full_path)) {
1053 rc = PTR_ERR(full_path);
1054 goto out;
1055 }
1056
1057 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1058 inode, file->f_flags, full_path);
1059
1060 if (file->f_flags & O_DIRECT &&
1061 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
1062 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
1063 file->f_op = &cifs_file_direct_nobrl_ops;
1064 else
1065 file->f_op = &cifs_file_direct_ops;
1066 }
1067
1068 if (file->f_flags & O_TRUNC) {
1069 rc = cifs_do_truncate(xid, file_dentry(file));
1070 if (rc)
1071 goto out;
1072 }
1073
1074 /* Get the cached handle as SMB2 close is deferred */
1075 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1076 rc = cifs_get_writable_path(tcon, full_path,
1077 FIND_WR_FSUID_ONLY |
1078 FIND_WR_NO_PENDING_DELETE,
1079 &cfile);
1080 } else {
1081 rc = cifs_get_readable_path(tcon, full_path, &cfile);
1082 }
1083 if (rc == 0) {
1084 unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1085 unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1086
1087 if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1088 (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1089 file->private_data = cfile;
1090 spin_lock(&CIFS_I(inode)->deferred_lock);
1091 cifs_del_deferred_close(cfile);
1092 spin_unlock(&CIFS_I(inode)->deferred_lock);
1093 goto use_cache;
1094 }
1095 _cifsFileInfo_put(cfile, true, false);
1096 } else {
1097 /* hard link on the defeered close file */
1098 rc = cifs_get_hardlink_path(tcon, inode, file);
1099 if (rc)
1100 cifs_close_deferred_file(CIFS_I(inode));
1101 }
1102
1103 if (server->oplocks)
1104 oplock = REQ_OPLOCK;
1105 else
1106 oplock = 0;
1107
1108 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1109 if (!tcon->broken_posix_open && tcon->unix_ext &&
1110 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1111 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1112 /* can not refresh inode info since size could be stale */
1113 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1114 cifs_sb->ctx->file_mode /* ignored */,
1115 file->f_flags, &oplock, &fid.netfid, xid);
1116 if (rc == 0) {
1117 cifs_dbg(FYI, "posix open succeeded\n");
1118 posix_open_ok = true;
1119 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1120 if (tcon->ses->serverNOS)
1121 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1122 tcon->ses->ip_addr,
1123 tcon->ses->serverNOS);
1124 tcon->broken_posix_open = true;
1125 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1126 (rc != -EOPNOTSUPP)) /* path not found or net err */
1127 goto out;
1128 /*
1129 * Else fallthrough to retry open the old way on network i/o
1130 * or DFS errors.
1131 */
1132 }
1133 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1134
1135 if (server->ops->get_lease_key)
1136 server->ops->get_lease_key(inode, &fid);
1137
1138 cifs_add_pending_open(&fid, tlink, &open);
1139
1140 if (!posix_open_ok) {
1141 if (server->ops->get_lease_key)
1142 server->ops->get_lease_key(inode, &fid);
1143
1144 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1145 xid, &data);
1146 if (rc) {
1147 cifs_del_pending_open(&open);
1148 goto out;
1149 }
1150 }
1151
1152 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1153 if (cfile == NULL) {
1154 if (server->ops->close)
1155 server->ops->close(xid, tcon, &fid);
1156 cifs_del_pending_open(&open);
1157 rc = -ENOMEM;
1158 goto out;
1159 }
1160
1161 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1162 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1163 /*
1164 * Time to set mode which we can not set earlier due to
1165 * problems creating new read-only files.
1166 */
1167 struct cifs_unix_set_info_args args = {
1168 .mode = inode->i_mode,
1169 .uid = INVALID_UID, /* no change */
1170 .gid = INVALID_GID, /* no change */
1171 .ctime = NO_CHANGE_64,
1172 .atime = NO_CHANGE_64,
1173 .mtime = NO_CHANGE_64,
1174 .device = 0,
1175 };
1176 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1177 cfile->pid);
1178 }
1179 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1180
1181 use_cache:
1182 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1183 file->f_mode & FMODE_WRITE);
1184 if (!(file->f_flags & O_DIRECT))
1185 goto out;
1186 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1187 goto out;
1188 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1189
1190 out:
1191 free_dentry_path(page);
1192 free_xid(xid);
1193 cifs_put_tlink(tlink);
1194 cifs_free_open_info(&data);
1195 return rc;
1196 }
1197
1198 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1199 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1200 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1201
1202 /*
1203 * Try to reacquire byte range locks that were released when session
1204 * to server was lost.
1205 */
1206 static int
cifs_relock_file(struct cifsFileInfo * cfile)1207 cifs_relock_file(struct cifsFileInfo *cfile)
1208 {
1209 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1210 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1211 int rc = 0;
1212 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1213 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1214 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1215
1216 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1217 if (cinode->can_cache_brlcks) {
1218 /* can cache locks - no need to relock */
1219 up_read(&cinode->lock_sem);
1220 return rc;
1221 }
1222
1223 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1224 if (cap_unix(tcon->ses) &&
1225 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1226 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1227 rc = cifs_push_posix_locks(cfile);
1228 else
1229 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1230 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1231
1232 up_read(&cinode->lock_sem);
1233 return rc;
1234 }
1235
1236 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1237 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1238 {
1239 int rc = -EACCES;
1240 unsigned int xid;
1241 __u32 oplock;
1242 struct cifs_sb_info *cifs_sb;
1243 struct cifs_tcon *tcon;
1244 struct TCP_Server_Info *server;
1245 struct cifsInodeInfo *cinode;
1246 struct inode *inode;
1247 void *page;
1248 const char *full_path;
1249 int desired_access;
1250 int disposition = FILE_OPEN;
1251 int create_options = CREATE_NOT_DIR;
1252 struct cifs_open_parms oparms;
1253 int rdwr_for_fscache = 0;
1254
1255 xid = get_xid();
1256 mutex_lock(&cfile->fh_mutex);
1257 if (!cfile->invalidHandle) {
1258 mutex_unlock(&cfile->fh_mutex);
1259 free_xid(xid);
1260 return 0;
1261 }
1262
1263 inode = d_inode(cfile->dentry);
1264 cifs_sb = CIFS_SB(inode->i_sb);
1265 tcon = tlink_tcon(cfile->tlink);
1266 server = tcon->ses->server;
1267
1268 /*
1269 * Can not grab rename sem here because various ops, including those
1270 * that already have the rename sem can end up causing writepage to get
1271 * called and if the server was down that means we end up here, and we
1272 * can never tell if the caller already has the rename_sem.
1273 */
1274 page = alloc_dentry_path();
1275 full_path = build_path_from_dentry(cfile->dentry, page);
1276 if (IS_ERR(full_path)) {
1277 mutex_unlock(&cfile->fh_mutex);
1278 free_dentry_path(page);
1279 free_xid(xid);
1280 return PTR_ERR(full_path);
1281 }
1282
1283 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1284 inode, cfile->f_flags, full_path);
1285
1286 if (tcon->ses->server->oplocks)
1287 oplock = REQ_OPLOCK;
1288 else
1289 oplock = 0;
1290
1291 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1292 if (tcon->unix_ext && cap_unix(tcon->ses) &&
1293 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1294 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1295 /*
1296 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1297 * original open. Must mask them off for a reopen.
1298 */
1299 unsigned int oflags = cfile->f_flags &
1300 ~(O_CREAT | O_EXCL | O_TRUNC);
1301
1302 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1303 cifs_sb->ctx->file_mode /* ignored */,
1304 oflags, &oplock, &cfile->fid.netfid, xid);
1305 if (rc == 0) {
1306 cifs_dbg(FYI, "posix reopen succeeded\n");
1307 oparms.reconnect = true;
1308 goto reopen_success;
1309 }
1310 /*
1311 * fallthrough to retry open the old way on errors, especially
1312 * in the reconnect path it is important to retry hard
1313 */
1314 }
1315 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1316
1317 /* If we're caching, we need to be able to fill in around partial writes. */
1318 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1319 rdwr_for_fscache = 1;
1320
1321 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1322
1323 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1324 if (cfile->f_flags & O_SYNC)
1325 create_options |= CREATE_WRITE_THROUGH;
1326
1327 if (cfile->f_flags & O_DIRECT)
1328 create_options |= CREATE_NO_BUFFER;
1329
1330 if (server->ops->get_lease_key)
1331 server->ops->get_lease_key(inode, &cfile->fid);
1332
1333 retry_open:
1334 oparms = (struct cifs_open_parms) {
1335 .tcon = tcon,
1336 .cifs_sb = cifs_sb,
1337 .desired_access = desired_access,
1338 .create_options = cifs_create_options(cifs_sb, create_options),
1339 .disposition = disposition,
1340 .path = full_path,
1341 .fid = &cfile->fid,
1342 .reconnect = true,
1343 };
1344
1345 /*
1346 * Can not refresh inode by passing in file_info buf to be returned by
1347 * ops->open and then calling get_inode_info with returned buf since
1348 * file might have write behind data that needs to be flushed and server
1349 * version of file size can be stale. If we knew for sure that inode was
1350 * not dirty locally we could do this.
1351 */
1352 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1353 if (rc == -ENOENT && oparms.reconnect == false) {
1354 /* durable handle timeout is expired - open the file again */
1355 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1356 /* indicate that we need to relock the file */
1357 oparms.reconnect = true;
1358 }
1359 if (rc == -EACCES && rdwr_for_fscache == 1) {
1360 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1361 rdwr_for_fscache = 2;
1362 goto retry_open;
1363 }
1364
1365 if (rc) {
1366 mutex_unlock(&cfile->fh_mutex);
1367 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1368 cifs_dbg(FYI, "oplock: %d\n", oplock);
1369 goto reopen_error_exit;
1370 }
1371
1372 if (rdwr_for_fscache == 2)
1373 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1374
1375 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1376 reopen_success:
1377 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1378 cfile->invalidHandle = false;
1379 mutex_unlock(&cfile->fh_mutex);
1380 cinode = CIFS_I(inode);
1381
1382 if (can_flush) {
1383 rc = filemap_write_and_wait(inode->i_mapping);
1384 if (!is_interrupt_error(rc))
1385 mapping_set_error(inode->i_mapping, rc);
1386
1387 if (tcon->posix_extensions) {
1388 rc = smb311_posix_get_inode_info(&inode, full_path,
1389 NULL, inode->i_sb, xid);
1390 } else if (tcon->unix_ext) {
1391 rc = cifs_get_inode_info_unix(&inode, full_path,
1392 inode->i_sb, xid);
1393 } else {
1394 rc = cifs_get_inode_info(&inode, full_path, NULL,
1395 inode->i_sb, xid, NULL);
1396 }
1397 }
1398 /*
1399 * Else we are writing out data to server already and could deadlock if
1400 * we tried to flush data, and since we do not know if we have data that
1401 * would invalidate the current end of file on the server we can not go
1402 * to the server to get the new inode info.
1403 */
1404
1405 /*
1406 * If the server returned a read oplock and we have mandatory brlocks,
1407 * set oplock level to None.
1408 */
1409 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1410 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1411 oplock = 0;
1412 }
1413
1414 server->ops->set_fid(cfile, &cfile->fid, oplock);
1415 if (oparms.reconnect)
1416 cifs_relock_file(cfile);
1417
1418 reopen_error_exit:
1419 free_dentry_path(page);
1420 free_xid(xid);
1421 return rc;
1422 }
1423
smb2_deferred_work_close(struct work_struct * work)1424 void smb2_deferred_work_close(struct work_struct *work)
1425 {
1426 struct cifsFileInfo *cfile = container_of(work,
1427 struct cifsFileInfo, deferred.work);
1428
1429 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1430 cifs_del_deferred_close(cfile);
1431 cfile->deferred_close_scheduled = false;
1432 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1433 _cifsFileInfo_put(cfile, true, false);
1434 }
1435
1436 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1437 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1438 {
1439 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1440 struct cifsInodeInfo *cinode = CIFS_I(inode);
1441
1442 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1443 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1444 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1445 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1446
1447 }
1448
cifs_close(struct inode * inode,struct file * file)1449 int cifs_close(struct inode *inode, struct file *file)
1450 {
1451 struct cifsFileInfo *cfile;
1452 struct cifsInodeInfo *cinode = CIFS_I(inode);
1453 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1454 struct cifs_deferred_close *dclose;
1455
1456 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1457
1458 if (file->private_data != NULL) {
1459 cfile = file->private_data;
1460 file->private_data = NULL;
1461 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1462 if ((cfile->status_file_deleted == false) &&
1463 (smb2_can_defer_close(inode, dclose))) {
1464 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1465 inode_set_mtime_to_ts(inode,
1466 inode_set_ctime_current(inode));
1467 }
1468 spin_lock(&cinode->deferred_lock);
1469 cifs_add_deferred_close(cfile, dclose);
1470 if (cfile->deferred_close_scheduled &&
1471 delayed_work_pending(&cfile->deferred)) {
1472 /*
1473 * If there is no pending work, mod_delayed_work queues new work.
1474 * So, Increase the ref count to avoid use-after-free.
1475 */
1476 if (!mod_delayed_work(deferredclose_wq,
1477 &cfile->deferred, cifs_sb->ctx->closetimeo))
1478 cifsFileInfo_get(cfile);
1479 } else {
1480 /* Deferred close for files */
1481 queue_delayed_work(deferredclose_wq,
1482 &cfile->deferred, cifs_sb->ctx->closetimeo);
1483 cfile->deferred_close_scheduled = true;
1484 spin_unlock(&cinode->deferred_lock);
1485 return 0;
1486 }
1487 spin_unlock(&cinode->deferred_lock);
1488 _cifsFileInfo_put(cfile, true, false);
1489 } else {
1490 _cifsFileInfo_put(cfile, true, false);
1491 kfree(dclose);
1492 }
1493 }
1494
1495 /* return code from the ->release op is always ignored */
1496 return 0;
1497 }
1498
1499 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1500 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1501 {
1502 struct cifsFileInfo *open_file, *tmp;
1503 LIST_HEAD(tmp_list);
1504
1505 if (!tcon->use_persistent || !tcon->need_reopen_files)
1506 return;
1507
1508 tcon->need_reopen_files = false;
1509
1510 cifs_dbg(FYI, "Reopen persistent handles\n");
1511
1512 /* list all files open on tree connection, reopen resilient handles */
1513 spin_lock(&tcon->open_file_lock);
1514 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1515 if (!open_file->invalidHandle)
1516 continue;
1517 cifsFileInfo_get(open_file);
1518 list_add_tail(&open_file->rlist, &tmp_list);
1519 }
1520 spin_unlock(&tcon->open_file_lock);
1521
1522 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1523 if (cifs_reopen_file(open_file, false /* do not flush */))
1524 tcon->need_reopen_files = true;
1525 list_del_init(&open_file->rlist);
1526 cifsFileInfo_put(open_file);
1527 }
1528 }
1529
cifs_closedir(struct inode * inode,struct file * file)1530 int cifs_closedir(struct inode *inode, struct file *file)
1531 {
1532 int rc = 0;
1533 unsigned int xid;
1534 struct cifsFileInfo *cfile = file->private_data;
1535 struct cifs_tcon *tcon;
1536 struct TCP_Server_Info *server;
1537 char *buf;
1538
1539 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1540
1541 if (cfile == NULL)
1542 return rc;
1543
1544 xid = get_xid();
1545 tcon = tlink_tcon(cfile->tlink);
1546 server = tcon->ses->server;
1547
1548 cifs_dbg(FYI, "Freeing private data in close dir\n");
1549 spin_lock(&cfile->file_info_lock);
1550 if (server->ops->dir_needs_close(cfile)) {
1551 cfile->invalidHandle = true;
1552 spin_unlock(&cfile->file_info_lock);
1553 if (server->ops->close_dir)
1554 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1555 else
1556 rc = -ENOSYS;
1557 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1558 /* not much we can do if it fails anyway, ignore rc */
1559 rc = 0;
1560 } else
1561 spin_unlock(&cfile->file_info_lock);
1562
1563 buf = cfile->srch_inf.ntwrk_buf_start;
1564 if (buf) {
1565 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1566 cfile->srch_inf.ntwrk_buf_start = NULL;
1567 if (cfile->srch_inf.smallBuf)
1568 cifs_small_buf_release(buf);
1569 else
1570 cifs_buf_release(buf);
1571 }
1572
1573 cifs_put_tlink(cfile->tlink);
1574 kfree(file->private_data);
1575 file->private_data = NULL;
1576 /* BB can we lock the filestruct while this is going on? */
1577 free_xid(xid);
1578 return rc;
1579 }
1580
1581 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1582 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1583 {
1584 struct cifsLockInfo *lock =
1585 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1586 if (!lock)
1587 return lock;
1588 lock->offset = offset;
1589 lock->length = length;
1590 lock->type = type;
1591 lock->pid = current->tgid;
1592 lock->flags = flags;
1593 INIT_LIST_HEAD(&lock->blist);
1594 init_waitqueue_head(&lock->block_q);
1595 return lock;
1596 }
1597
1598 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1599 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1600 {
1601 struct cifsLockInfo *li, *tmp;
1602 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1603 list_del_init(&li->blist);
1604 wake_up(&li->block_q);
1605 }
1606 }
1607
1608 #define CIFS_LOCK_OP 0
1609 #define CIFS_READ_OP 1
1610 #define CIFS_WRITE_OP 2
1611
1612 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1613 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1614 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1615 __u64 length, __u8 type, __u16 flags,
1616 struct cifsFileInfo *cfile,
1617 struct cifsLockInfo **conf_lock, int rw_check)
1618 {
1619 struct cifsLockInfo *li;
1620 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1621 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1622
1623 list_for_each_entry(li, &fdlocks->locks, llist) {
1624 if (offset + length <= li->offset ||
1625 offset >= li->offset + li->length)
1626 continue;
1627 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1628 server->ops->compare_fids(cfile, cur_cfile)) {
1629 /* shared lock prevents write op through the same fid */
1630 if (!(li->type & server->vals->shared_lock_type) ||
1631 rw_check != CIFS_WRITE_OP)
1632 continue;
1633 }
1634 if ((type & server->vals->shared_lock_type) &&
1635 ((server->ops->compare_fids(cfile, cur_cfile) &&
1636 current->tgid == li->pid) || type == li->type))
1637 continue;
1638 if (rw_check == CIFS_LOCK_OP &&
1639 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1640 server->ops->compare_fids(cfile, cur_cfile))
1641 continue;
1642 if (conf_lock)
1643 *conf_lock = li;
1644 return true;
1645 }
1646 return false;
1647 }
1648
1649 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1650 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1651 __u8 type, __u16 flags,
1652 struct cifsLockInfo **conf_lock, int rw_check)
1653 {
1654 bool rc = false;
1655 struct cifs_fid_locks *cur;
1656 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1657
1658 list_for_each_entry(cur, &cinode->llist, llist) {
1659 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1660 flags, cfile, conf_lock,
1661 rw_check);
1662 if (rc)
1663 break;
1664 }
1665
1666 return rc;
1667 }
1668
1669 /*
1670 * Check if there is another lock that prevents us to set the lock (mandatory
1671 * style). If such a lock exists, update the flock structure with its
1672 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1673 * or leave it the same if we can't. Returns 0 if we don't need to request to
1674 * the server or 1 otherwise.
1675 */
1676 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1677 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1678 __u8 type, struct file_lock *flock)
1679 {
1680 int rc = 0;
1681 struct cifsLockInfo *conf_lock;
1682 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1683 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1684 bool exist;
1685
1686 down_read(&cinode->lock_sem);
1687
1688 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1689 flock->c.flc_flags, &conf_lock,
1690 CIFS_LOCK_OP);
1691 if (exist) {
1692 flock->fl_start = conf_lock->offset;
1693 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1694 flock->c.flc_pid = conf_lock->pid;
1695 if (conf_lock->type & server->vals->shared_lock_type)
1696 flock->c.flc_type = F_RDLCK;
1697 else
1698 flock->c.flc_type = F_WRLCK;
1699 } else if (!cinode->can_cache_brlcks)
1700 rc = 1;
1701 else
1702 flock->c.flc_type = F_UNLCK;
1703
1704 up_read(&cinode->lock_sem);
1705 return rc;
1706 }
1707
1708 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1709 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1710 {
1711 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1712 cifs_down_write(&cinode->lock_sem);
1713 list_add_tail(&lock->llist, &cfile->llist->locks);
1714 up_write(&cinode->lock_sem);
1715 }
1716
1717 /*
1718 * Set the byte-range lock (mandatory style). Returns:
1719 * 1) 0, if we set the lock and don't need to request to the server;
1720 * 2) 1, if no locks prevent us but we need to request to the server;
1721 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1722 */
1723 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1724 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1725 bool wait)
1726 {
1727 struct cifsLockInfo *conf_lock;
1728 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1729 bool exist;
1730 int rc = 0;
1731
1732 try_again:
1733 exist = false;
1734 cifs_down_write(&cinode->lock_sem);
1735
1736 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1737 lock->type, lock->flags, &conf_lock,
1738 CIFS_LOCK_OP);
1739 if (!exist && cinode->can_cache_brlcks) {
1740 list_add_tail(&lock->llist, &cfile->llist->locks);
1741 up_write(&cinode->lock_sem);
1742 return rc;
1743 }
1744
1745 if (!exist)
1746 rc = 1;
1747 else if (!wait)
1748 rc = -EACCES;
1749 else {
1750 list_add_tail(&lock->blist, &conf_lock->blist);
1751 up_write(&cinode->lock_sem);
1752 rc = wait_event_interruptible(lock->block_q,
1753 (lock->blist.prev == &lock->blist) &&
1754 (lock->blist.next == &lock->blist));
1755 if (!rc)
1756 goto try_again;
1757 cifs_down_write(&cinode->lock_sem);
1758 list_del_init(&lock->blist);
1759 }
1760
1761 up_write(&cinode->lock_sem);
1762 return rc;
1763 }
1764
1765 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1766 /*
1767 * Check if there is another lock that prevents us to set the lock (posix
1768 * style). If such a lock exists, update the flock structure with its
1769 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1770 * or leave it the same if we can't. Returns 0 if we don't need to request to
1771 * the server or 1 otherwise.
1772 */
1773 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1774 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1775 {
1776 int rc = 0;
1777 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1778 unsigned char saved_type = flock->c.flc_type;
1779
1780 if ((flock->c.flc_flags & FL_POSIX) == 0)
1781 return 1;
1782
1783 down_read(&cinode->lock_sem);
1784 posix_test_lock(file, flock);
1785
1786 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1787 flock->c.flc_type = saved_type;
1788 rc = 1;
1789 }
1790
1791 up_read(&cinode->lock_sem);
1792 return rc;
1793 }
1794
1795 /*
1796 * Set the byte-range lock (posix style). Returns:
1797 * 1) <0, if the error occurs while setting the lock;
1798 * 2) 0, if we set the lock and don't need to request to the server;
1799 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1800 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1801 */
1802 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1803 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1804 {
1805 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1806 int rc = FILE_LOCK_DEFERRED + 1;
1807
1808 if ((flock->c.flc_flags & FL_POSIX) == 0)
1809 return rc;
1810
1811 cifs_down_write(&cinode->lock_sem);
1812 if (!cinode->can_cache_brlcks) {
1813 up_write(&cinode->lock_sem);
1814 return rc;
1815 }
1816
1817 rc = posix_lock_file(file, flock, NULL);
1818 up_write(&cinode->lock_sem);
1819 return rc;
1820 }
1821
1822 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1823 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1824 {
1825 unsigned int xid;
1826 int rc = 0, stored_rc;
1827 struct cifsLockInfo *li, *tmp;
1828 struct cifs_tcon *tcon;
1829 unsigned int num, max_num, max_buf;
1830 LOCKING_ANDX_RANGE *buf, *cur;
1831 static const int types[] = {
1832 LOCKING_ANDX_LARGE_FILES,
1833 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1834 };
1835 int i;
1836
1837 xid = get_xid();
1838 tcon = tlink_tcon(cfile->tlink);
1839
1840 /*
1841 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1842 * and check it before using.
1843 */
1844 max_buf = tcon->ses->server->maxBuf;
1845 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1846 free_xid(xid);
1847 return -EINVAL;
1848 }
1849
1850 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1851 PAGE_SIZE);
1852 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1853 PAGE_SIZE);
1854 max_num = (max_buf - sizeof(struct smb_hdr)) /
1855 sizeof(LOCKING_ANDX_RANGE);
1856 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1857 if (!buf) {
1858 free_xid(xid);
1859 return -ENOMEM;
1860 }
1861
1862 for (i = 0; i < 2; i++) {
1863 cur = buf;
1864 num = 0;
1865 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1866 if (li->type != types[i])
1867 continue;
1868 cur->Pid = cpu_to_le16(li->pid);
1869 cur->LengthLow = cpu_to_le32((u32)li->length);
1870 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1871 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1872 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1873 if (++num == max_num) {
1874 stored_rc = cifs_lockv(xid, tcon,
1875 cfile->fid.netfid,
1876 (__u8)li->type, 0, num,
1877 buf);
1878 if (stored_rc)
1879 rc = stored_rc;
1880 cur = buf;
1881 num = 0;
1882 } else
1883 cur++;
1884 }
1885
1886 if (num) {
1887 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1888 (__u8)types[i], 0, num, buf);
1889 if (stored_rc)
1890 rc = stored_rc;
1891 }
1892 }
1893
1894 kfree(buf);
1895 free_xid(xid);
1896 return rc;
1897 }
1898
1899 static __u32
hash_lockowner(fl_owner_t owner)1900 hash_lockowner(fl_owner_t owner)
1901 {
1902 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1903 }
1904 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1905
1906 struct lock_to_push {
1907 struct list_head llist;
1908 __u64 offset;
1909 __u64 length;
1910 __u32 pid;
1911 __u16 netfid;
1912 __u8 type;
1913 };
1914
1915 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1916 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1917 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1918 {
1919 struct inode *inode = d_inode(cfile->dentry);
1920 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1921 struct file_lock *flock;
1922 struct file_lock_context *flctx = locks_inode_context(inode);
1923 unsigned int count = 0, i;
1924 int rc = 0, xid, type;
1925 struct list_head locks_to_send, *el;
1926 struct lock_to_push *lck, *tmp;
1927 __u64 length;
1928
1929 xid = get_xid();
1930
1931 if (!flctx)
1932 goto out;
1933
1934 spin_lock(&flctx->flc_lock);
1935 list_for_each(el, &flctx->flc_posix) {
1936 count++;
1937 }
1938 spin_unlock(&flctx->flc_lock);
1939
1940 INIT_LIST_HEAD(&locks_to_send);
1941
1942 /*
1943 * Allocating count locks is enough because no FL_POSIX locks can be
1944 * added to the list while we are holding cinode->lock_sem that
1945 * protects locking operations of this inode.
1946 */
1947 for (i = 0; i < count; i++) {
1948 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1949 if (!lck) {
1950 rc = -ENOMEM;
1951 goto err_out;
1952 }
1953 list_add_tail(&lck->llist, &locks_to_send);
1954 }
1955
1956 el = locks_to_send.next;
1957 spin_lock(&flctx->flc_lock);
1958 for_each_file_lock(flock, &flctx->flc_posix) {
1959 unsigned char ftype = flock->c.flc_type;
1960
1961 if (el == &locks_to_send) {
1962 /*
1963 * The list ended. We don't have enough allocated
1964 * structures - something is really wrong.
1965 */
1966 cifs_dbg(VFS, "Can't push all brlocks!\n");
1967 break;
1968 }
1969 length = cifs_flock_len(flock);
1970 if (ftype == F_RDLCK || ftype == F_SHLCK)
1971 type = CIFS_RDLCK;
1972 else
1973 type = CIFS_WRLCK;
1974 lck = list_entry(el, struct lock_to_push, llist);
1975 lck->pid = hash_lockowner(flock->c.flc_owner);
1976 lck->netfid = cfile->fid.netfid;
1977 lck->length = length;
1978 lck->type = type;
1979 lck->offset = flock->fl_start;
1980 }
1981 spin_unlock(&flctx->flc_lock);
1982
1983 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1984 int stored_rc;
1985
1986 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1987 lck->offset, lck->length, NULL,
1988 lck->type, 0);
1989 if (stored_rc)
1990 rc = stored_rc;
1991 list_del(&lck->llist);
1992 kfree(lck);
1993 }
1994
1995 out:
1996 free_xid(xid);
1997 return rc;
1998 err_out:
1999 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
2000 list_del(&lck->llist);
2001 kfree(lck);
2002 }
2003 goto out;
2004 }
2005 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2006
2007 static int
cifs_push_locks(struct cifsFileInfo * cfile)2008 cifs_push_locks(struct cifsFileInfo *cfile)
2009 {
2010 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2011 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2012 int rc = 0;
2013 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2014 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2015 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2016
2017 /* we are going to update can_cache_brlcks here - need a write access */
2018 cifs_down_write(&cinode->lock_sem);
2019 if (!cinode->can_cache_brlcks) {
2020 up_write(&cinode->lock_sem);
2021 return rc;
2022 }
2023
2024 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2025 if (cap_unix(tcon->ses) &&
2026 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2027 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2028 rc = cifs_push_posix_locks(cfile);
2029 else
2030 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2031 rc = tcon->ses->server->ops->push_mand_locks(cfile);
2032
2033 cinode->can_cache_brlcks = false;
2034 up_write(&cinode->lock_sem);
2035 return rc;
2036 }
2037
2038 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2039 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2040 bool *wait_flag, struct TCP_Server_Info *server)
2041 {
2042 if (flock->c.flc_flags & FL_POSIX)
2043 cifs_dbg(FYI, "Posix\n");
2044 if (flock->c.flc_flags & FL_FLOCK)
2045 cifs_dbg(FYI, "Flock\n");
2046 if (flock->c.flc_flags & FL_SLEEP) {
2047 cifs_dbg(FYI, "Blocking lock\n");
2048 *wait_flag = true;
2049 }
2050 if (flock->c.flc_flags & FL_ACCESS)
2051 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2052 if (flock->c.flc_flags & FL_LEASE)
2053 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2054 if (flock->c.flc_flags &
2055 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2056 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2057 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2058 flock->c.flc_flags);
2059
2060 *type = server->vals->large_lock_type;
2061 if (lock_is_write(flock)) {
2062 cifs_dbg(FYI, "F_WRLCK\n");
2063 *type |= server->vals->exclusive_lock_type;
2064 *lock = 1;
2065 } else if (lock_is_unlock(flock)) {
2066 cifs_dbg(FYI, "F_UNLCK\n");
2067 *type |= server->vals->unlock_lock_type;
2068 *unlock = 1;
2069 /* Check if unlock includes more than one lock range */
2070 } else if (lock_is_read(flock)) {
2071 cifs_dbg(FYI, "F_RDLCK\n");
2072 *type |= server->vals->shared_lock_type;
2073 *lock = 1;
2074 } else if (flock->c.flc_type == F_EXLCK) {
2075 cifs_dbg(FYI, "F_EXLCK\n");
2076 *type |= server->vals->exclusive_lock_type;
2077 *lock = 1;
2078 } else if (flock->c.flc_type == F_SHLCK) {
2079 cifs_dbg(FYI, "F_SHLCK\n");
2080 *type |= server->vals->shared_lock_type;
2081 *lock = 1;
2082 } else
2083 cifs_dbg(FYI, "Unknown type of lock\n");
2084 }
2085
2086 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2087 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2088 bool wait_flag, bool posix_lck, unsigned int xid)
2089 {
2090 int rc = 0;
2091 __u64 length = cifs_flock_len(flock);
2092 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2093 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2094 struct TCP_Server_Info *server = tcon->ses->server;
2095 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2096 __u16 netfid = cfile->fid.netfid;
2097
2098 if (posix_lck) {
2099 int posix_lock_type;
2100
2101 rc = cifs_posix_lock_test(file, flock);
2102 if (!rc)
2103 return rc;
2104
2105 if (type & server->vals->shared_lock_type)
2106 posix_lock_type = CIFS_RDLCK;
2107 else
2108 posix_lock_type = CIFS_WRLCK;
2109 rc = CIFSSMBPosixLock(xid, tcon, netfid,
2110 hash_lockowner(flock->c.flc_owner),
2111 flock->fl_start, length, flock,
2112 posix_lock_type, wait_flag);
2113 return rc;
2114 }
2115 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2116
2117 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2118 if (!rc)
2119 return rc;
2120
2121 /* BB we could chain these into one lock request BB */
2122 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2123 1, 0, false);
2124 if (rc == 0) {
2125 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2126 type, 0, 1, false);
2127 flock->c.flc_type = F_UNLCK;
2128 if (rc != 0)
2129 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2130 rc);
2131 return 0;
2132 }
2133
2134 if (type & server->vals->shared_lock_type) {
2135 flock->c.flc_type = F_WRLCK;
2136 return 0;
2137 }
2138
2139 type &= ~server->vals->exclusive_lock_type;
2140
2141 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2142 type | server->vals->shared_lock_type,
2143 1, 0, false);
2144 if (rc == 0) {
2145 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2146 type | server->vals->shared_lock_type, 0, 1, false);
2147 flock->c.flc_type = F_RDLCK;
2148 if (rc != 0)
2149 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2150 rc);
2151 } else
2152 flock->c.flc_type = F_WRLCK;
2153
2154 return 0;
2155 }
2156
2157 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2158 cifs_move_llist(struct list_head *source, struct list_head *dest)
2159 {
2160 struct list_head *li, *tmp;
2161 list_for_each_safe(li, tmp, source)
2162 list_move(li, dest);
2163 }
2164
2165 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2166 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2167 struct file *file)
2168 {
2169 struct cifsFileInfo *open_file = NULL;
2170 struct cifsInodeInfo *cinode = CIFS_I(inode);
2171 int rc = 0;
2172
2173 spin_lock(&tcon->open_file_lock);
2174 spin_lock(&cinode->open_file_lock);
2175
2176 list_for_each_entry(open_file, &cinode->openFileList, flist) {
2177 if (file->f_flags == open_file->f_flags) {
2178 rc = -EINVAL;
2179 break;
2180 }
2181 }
2182
2183 spin_unlock(&cinode->open_file_lock);
2184 spin_unlock(&tcon->open_file_lock);
2185 return rc;
2186 }
2187
2188 void
cifs_free_llist(struct list_head * llist)2189 cifs_free_llist(struct list_head *llist)
2190 {
2191 struct cifsLockInfo *li, *tmp;
2192 list_for_each_entry_safe(li, tmp, llist, llist) {
2193 cifs_del_lock_waiters(li);
2194 list_del(&li->llist);
2195 kfree(li);
2196 }
2197 }
2198
2199 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2200 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2201 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2202 unsigned int xid)
2203 {
2204 int rc = 0, stored_rc;
2205 static const int types[] = {
2206 LOCKING_ANDX_LARGE_FILES,
2207 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2208 };
2209 unsigned int i;
2210 unsigned int max_num, num, max_buf;
2211 LOCKING_ANDX_RANGE *buf, *cur;
2212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2213 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2214 struct cifsLockInfo *li, *tmp;
2215 __u64 length = cifs_flock_len(flock);
2216 LIST_HEAD(tmp_llist);
2217
2218 /*
2219 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2220 * and check it before using.
2221 */
2222 max_buf = tcon->ses->server->maxBuf;
2223 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2224 return -EINVAL;
2225
2226 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2227 PAGE_SIZE);
2228 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2229 PAGE_SIZE);
2230 max_num = (max_buf - sizeof(struct smb_hdr)) /
2231 sizeof(LOCKING_ANDX_RANGE);
2232 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2233 if (!buf)
2234 return -ENOMEM;
2235
2236 cifs_down_write(&cinode->lock_sem);
2237 for (i = 0; i < 2; i++) {
2238 cur = buf;
2239 num = 0;
2240 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2241 if (flock->fl_start > li->offset ||
2242 (flock->fl_start + length) <
2243 (li->offset + li->length))
2244 continue;
2245 if (current->tgid != li->pid)
2246 continue;
2247 if (types[i] != li->type)
2248 continue;
2249 if (cinode->can_cache_brlcks) {
2250 /*
2251 * We can cache brlock requests - simply remove
2252 * a lock from the file's list.
2253 */
2254 list_del(&li->llist);
2255 cifs_del_lock_waiters(li);
2256 kfree(li);
2257 continue;
2258 }
2259 cur->Pid = cpu_to_le16(li->pid);
2260 cur->LengthLow = cpu_to_le32((u32)li->length);
2261 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2262 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2263 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2264 /*
2265 * We need to save a lock here to let us add it again to
2266 * the file's list if the unlock range request fails on
2267 * the server.
2268 */
2269 list_move(&li->llist, &tmp_llist);
2270 if (++num == max_num) {
2271 stored_rc = cifs_lockv(xid, tcon,
2272 cfile->fid.netfid,
2273 li->type, num, 0, buf);
2274 if (stored_rc) {
2275 /*
2276 * We failed on the unlock range
2277 * request - add all locks from the tmp
2278 * list to the head of the file's list.
2279 */
2280 cifs_move_llist(&tmp_llist,
2281 &cfile->llist->locks);
2282 rc = stored_rc;
2283 } else
2284 /*
2285 * The unlock range request succeed -
2286 * free the tmp list.
2287 */
2288 cifs_free_llist(&tmp_llist);
2289 cur = buf;
2290 num = 0;
2291 } else
2292 cur++;
2293 }
2294 if (num) {
2295 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2296 types[i], num, 0, buf);
2297 if (stored_rc) {
2298 cifs_move_llist(&tmp_llist,
2299 &cfile->llist->locks);
2300 rc = stored_rc;
2301 } else
2302 cifs_free_llist(&tmp_llist);
2303 }
2304 }
2305
2306 up_write(&cinode->lock_sem);
2307 kfree(buf);
2308 return rc;
2309 }
2310 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2311
2312 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2313 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2314 bool wait_flag, bool posix_lck, int lock, int unlock,
2315 unsigned int xid)
2316 {
2317 int rc = 0;
2318 __u64 length = cifs_flock_len(flock);
2319 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2320 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2321 struct TCP_Server_Info *server = tcon->ses->server;
2322 struct inode *inode = d_inode(cfile->dentry);
2323
2324 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2325 if (posix_lck) {
2326 int posix_lock_type;
2327
2328 rc = cifs_posix_lock_set(file, flock);
2329 if (rc <= FILE_LOCK_DEFERRED)
2330 return rc;
2331
2332 if (type & server->vals->shared_lock_type)
2333 posix_lock_type = CIFS_RDLCK;
2334 else
2335 posix_lock_type = CIFS_WRLCK;
2336
2337 if (unlock == 1)
2338 posix_lock_type = CIFS_UNLCK;
2339
2340 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2341 hash_lockowner(flock->c.flc_owner),
2342 flock->fl_start, length,
2343 NULL, posix_lock_type, wait_flag);
2344 goto out;
2345 }
2346 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2347 if (lock) {
2348 struct cifsLockInfo *lock;
2349
2350 lock = cifs_lock_init(flock->fl_start, length, type,
2351 flock->c.flc_flags);
2352 if (!lock)
2353 return -ENOMEM;
2354
2355 rc = cifs_lock_add_if(cfile, lock, wait_flag);
2356 if (rc < 0) {
2357 kfree(lock);
2358 return rc;
2359 }
2360 if (!rc)
2361 goto out;
2362
2363 /*
2364 * Windows 7 server can delay breaking lease from read to None
2365 * if we set a byte-range lock on a file - break it explicitly
2366 * before sending the lock to the server to be sure the next
2367 * read won't conflict with non-overlapted locks due to
2368 * pagereading.
2369 */
2370 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2371 CIFS_CACHE_READ(CIFS_I(inode))) {
2372 cifs_zap_mapping(inode);
2373 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2374 inode);
2375 CIFS_I(inode)->oplock = 0;
2376 }
2377
2378 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2379 type, 1, 0, wait_flag);
2380 if (rc) {
2381 kfree(lock);
2382 return rc;
2383 }
2384
2385 cifs_lock_add(cfile, lock);
2386 } else if (unlock)
2387 rc = server->ops->mand_unlock_range(cfile, flock, xid);
2388
2389 out:
2390 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2391 /*
2392 * If this is a request to remove all locks because we
2393 * are closing the file, it doesn't matter if the
2394 * unlocking failed as both cifs.ko and the SMB server
2395 * remove the lock on file close
2396 */
2397 if (rc) {
2398 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2399 if (!(flock->c.flc_flags & FL_CLOSE))
2400 return rc;
2401 }
2402 rc = locks_lock_file_wait(file, flock);
2403 }
2404 return rc;
2405 }
2406
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2407 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2408 {
2409 int rc, xid;
2410 int lock = 0, unlock = 0;
2411 bool wait_flag = false;
2412 bool posix_lck = false;
2413 struct cifs_sb_info *cifs_sb;
2414 struct cifs_tcon *tcon;
2415 struct cifsFileInfo *cfile;
2416 __u32 type;
2417
2418 xid = get_xid();
2419
2420 if (!(fl->c.flc_flags & FL_FLOCK)) {
2421 rc = -ENOLCK;
2422 free_xid(xid);
2423 return rc;
2424 }
2425
2426 cfile = (struct cifsFileInfo *)file->private_data;
2427 tcon = tlink_tcon(cfile->tlink);
2428
2429 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2430 tcon->ses->server);
2431 cifs_sb = CIFS_FILE_SB(file);
2432
2433 if (cap_unix(tcon->ses) &&
2434 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2435 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2436 posix_lck = true;
2437
2438 if (!lock && !unlock) {
2439 /*
2440 * if no lock or unlock then nothing to do since we do not
2441 * know what it is
2442 */
2443 rc = -EOPNOTSUPP;
2444 free_xid(xid);
2445 return rc;
2446 }
2447
2448 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2449 xid);
2450 free_xid(xid);
2451 return rc;
2452
2453
2454 }
2455
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2456 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2457 {
2458 int rc, xid;
2459 int lock = 0, unlock = 0;
2460 bool wait_flag = false;
2461 bool posix_lck = false;
2462 struct cifs_sb_info *cifs_sb;
2463 struct cifs_tcon *tcon;
2464 struct cifsFileInfo *cfile;
2465 __u32 type;
2466
2467 rc = -EACCES;
2468 xid = get_xid();
2469
2470 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2471 flock->c.flc_flags, flock->c.flc_type,
2472 (long long)flock->fl_start,
2473 (long long)flock->fl_end);
2474
2475 cfile = (struct cifsFileInfo *)file->private_data;
2476 tcon = tlink_tcon(cfile->tlink);
2477
2478 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2479 tcon->ses->server);
2480 cifs_sb = CIFS_FILE_SB(file);
2481 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2482
2483 if (cap_unix(tcon->ses) &&
2484 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2485 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2486 posix_lck = true;
2487 /*
2488 * BB add code here to normalize offset and length to account for
2489 * negative length which we can not accept over the wire.
2490 */
2491 if (IS_GETLK(cmd)) {
2492 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2493 free_xid(xid);
2494 return rc;
2495 }
2496
2497 if (!lock && !unlock) {
2498 /*
2499 * if no lock or unlock then nothing to do since we do not
2500 * know what it is
2501 */
2502 free_xid(xid);
2503 return -EOPNOTSUPP;
2504 }
2505
2506 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2507 xid);
2508 free_xid(xid);
2509 return rc;
2510 }
2511
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2512 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2513 {
2514 struct netfs_io_request *wreq = wdata->rreq;
2515 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2516 loff_t wrend;
2517
2518 if (result > 0) {
2519 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2520
2521 if (wrend > ictx->zero_point &&
2522 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2523 wdata->rreq->origin == NETFS_DIO_WRITE))
2524 ictx->zero_point = wrend;
2525 if (wrend > ictx->remote_i_size)
2526 netfs_resize_file(ictx, wrend, true);
2527 }
2528
2529 netfs_write_subrequest_terminated(&wdata->subreq, result);
2530 }
2531
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2532 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2533 bool fsuid_only)
2534 {
2535 struct cifsFileInfo *open_file = NULL;
2536 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2537
2538 /* only filter by fsuid on multiuser mounts */
2539 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2540 fsuid_only = false;
2541
2542 spin_lock(&cifs_inode->open_file_lock);
2543 /* we could simply get the first_list_entry since write-only entries
2544 are always at the end of the list but since the first entry might
2545 have a close pending, we go through the whole list */
2546 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2547 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2548 continue;
2549 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2550 if ((!open_file->invalidHandle)) {
2551 /* found a good file */
2552 /* lock it so it will not be closed on us */
2553 cifsFileInfo_get(open_file);
2554 spin_unlock(&cifs_inode->open_file_lock);
2555 return open_file;
2556 } /* else might as well continue, and look for
2557 another, or simply have the caller reopen it
2558 again rather than trying to fix this handle */
2559 } else /* write only file */
2560 break; /* write only files are last so must be done */
2561 }
2562 spin_unlock(&cifs_inode->open_file_lock);
2563 return NULL;
2564 }
2565
2566 /* Return -EBADF if no handle is found and general rc otherwise */
2567 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2568 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2569 struct cifsFileInfo **ret_file)
2570 {
2571 struct cifsFileInfo *open_file, *inv_file = NULL;
2572 struct cifs_sb_info *cifs_sb;
2573 bool any_available = false;
2574 int rc = -EBADF;
2575 unsigned int refind = 0;
2576 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2577 bool with_delete = flags & FIND_WR_WITH_DELETE;
2578 *ret_file = NULL;
2579
2580 /*
2581 * Having a null inode here (because mapping->host was set to zero by
2582 * the VFS or MM) should not happen but we had reports of on oops (due
2583 * to it being zero) during stress testcases so we need to check for it
2584 */
2585
2586 if (cifs_inode == NULL) {
2587 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2588 dump_stack();
2589 return rc;
2590 }
2591
2592 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2593
2594 /* only filter by fsuid on multiuser mounts */
2595 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2596 fsuid_only = false;
2597
2598 spin_lock(&cifs_inode->open_file_lock);
2599 refind_writable:
2600 if (refind > MAX_REOPEN_ATT) {
2601 spin_unlock(&cifs_inode->open_file_lock);
2602 return rc;
2603 }
2604 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2605 if (!any_available && open_file->pid != current->tgid)
2606 continue;
2607 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2608 continue;
2609 if (with_delete && !(open_file->fid.access & DELETE))
2610 continue;
2611 if ((flags & FIND_WR_NO_PENDING_DELETE) &&
2612 open_file->status_file_deleted)
2613 continue;
2614 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2615 if (!open_file->invalidHandle) {
2616 /* found a good writable file */
2617 cifsFileInfo_get(open_file);
2618 spin_unlock(&cifs_inode->open_file_lock);
2619 *ret_file = open_file;
2620 return 0;
2621 } else {
2622 if (!inv_file)
2623 inv_file = open_file;
2624 }
2625 }
2626 }
2627 /* couldn't find usable FH with same pid, try any available */
2628 if (!any_available) {
2629 any_available = true;
2630 goto refind_writable;
2631 }
2632
2633 if (inv_file) {
2634 any_available = false;
2635 cifsFileInfo_get(inv_file);
2636 }
2637
2638 spin_unlock(&cifs_inode->open_file_lock);
2639
2640 if (inv_file) {
2641 rc = cifs_reopen_file(inv_file, false);
2642 if (!rc) {
2643 *ret_file = inv_file;
2644 return 0;
2645 }
2646
2647 spin_lock(&cifs_inode->open_file_lock);
2648 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2649 spin_unlock(&cifs_inode->open_file_lock);
2650 cifsFileInfo_put(inv_file);
2651 ++refind;
2652 inv_file = NULL;
2653 spin_lock(&cifs_inode->open_file_lock);
2654 goto refind_writable;
2655 }
2656
2657 return rc;
2658 }
2659
2660 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2661 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2662 {
2663 struct cifsFileInfo *cfile;
2664 int rc;
2665
2666 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2667 if (rc)
2668 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2669
2670 return cfile;
2671 }
2672
2673 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2674 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2675 int flags,
2676 struct cifsFileInfo **ret_file)
2677 {
2678 struct cifsFileInfo *cfile;
2679 void *page = alloc_dentry_path();
2680
2681 *ret_file = NULL;
2682
2683 spin_lock(&tcon->open_file_lock);
2684 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2685 struct cifsInodeInfo *cinode;
2686 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2687 if (IS_ERR(full_path)) {
2688 spin_unlock(&tcon->open_file_lock);
2689 free_dentry_path(page);
2690 return PTR_ERR(full_path);
2691 }
2692 if (strcmp(full_path, name))
2693 continue;
2694
2695 cinode = CIFS_I(d_inode(cfile->dentry));
2696 spin_unlock(&tcon->open_file_lock);
2697 free_dentry_path(page);
2698 return cifs_get_writable_file(cinode, flags, ret_file);
2699 }
2700
2701 spin_unlock(&tcon->open_file_lock);
2702 free_dentry_path(page);
2703 return -ENOENT;
2704 }
2705
2706 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2707 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2708 struct cifsFileInfo **ret_file)
2709 {
2710 struct cifsFileInfo *cfile;
2711 void *page = alloc_dentry_path();
2712
2713 *ret_file = NULL;
2714
2715 spin_lock(&tcon->open_file_lock);
2716 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2717 struct cifsInodeInfo *cinode;
2718 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2719 if (IS_ERR(full_path)) {
2720 spin_unlock(&tcon->open_file_lock);
2721 free_dentry_path(page);
2722 return PTR_ERR(full_path);
2723 }
2724 if (strcmp(full_path, name))
2725 continue;
2726
2727 cinode = CIFS_I(d_inode(cfile->dentry));
2728 spin_unlock(&tcon->open_file_lock);
2729 free_dentry_path(page);
2730 *ret_file = find_readable_file(cinode, 0);
2731 if (*ret_file) {
2732 spin_lock(&cinode->open_file_lock);
2733 if ((*ret_file)->status_file_deleted) {
2734 spin_unlock(&cinode->open_file_lock);
2735 cifsFileInfo_put(*ret_file);
2736 *ret_file = NULL;
2737 } else {
2738 spin_unlock(&cinode->open_file_lock);
2739 }
2740 }
2741 return *ret_file ? 0 : -ENOENT;
2742 }
2743
2744 spin_unlock(&tcon->open_file_lock);
2745 free_dentry_path(page);
2746 return -ENOENT;
2747 }
2748
2749 /*
2750 * Flush data on a strict file.
2751 */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2752 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2753 int datasync)
2754 {
2755 struct cifsFileInfo *smbfile = file->private_data;
2756 struct inode *inode = file_inode(file);
2757 unsigned int xid;
2758 int rc;
2759
2760 rc = file_write_and_wait_range(file, start, end);
2761 if (rc) {
2762 trace_cifs_fsync_err(inode->i_ino, rc);
2763 return rc;
2764 }
2765
2766 cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2767
2768 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2769 rc = cifs_zap_mapping(inode);
2770 cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2771 }
2772
2773 xid = get_xid();
2774 rc = cifs_file_flush(xid, inode, smbfile);
2775 free_xid(xid);
2776 return rc;
2777 }
2778
2779 /*
2780 * Flush data on a non-strict data.
2781 */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2782 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2783 {
2784 unsigned int xid;
2785 int rc = 0;
2786 struct cifs_tcon *tcon;
2787 struct TCP_Server_Info *server;
2788 struct cifsFileInfo *smbfile = file->private_data;
2789 struct inode *inode = file_inode(file);
2790 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2791
2792 rc = file_write_and_wait_range(file, start, end);
2793 if (rc) {
2794 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2795 return rc;
2796 }
2797
2798 xid = get_xid();
2799
2800 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2801 file, datasync);
2802
2803 tcon = tlink_tcon(smbfile->tlink);
2804 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2805 server = tcon->ses->server;
2806 if (server->ops->flush == NULL) {
2807 rc = -ENOSYS;
2808 goto fsync_exit;
2809 }
2810
2811 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2812 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2813 if (smbfile) {
2814 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2815 cifsFileInfo_put(smbfile);
2816 } else
2817 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2818 } else
2819 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2820 }
2821
2822 fsync_exit:
2823 free_xid(xid);
2824 return rc;
2825 }
2826
2827 /*
2828 * As file closes, flush all cached write data for this inode checking
2829 * for write behind errors.
2830 */
cifs_flush(struct file * file,fl_owner_t id)2831 int cifs_flush(struct file *file, fl_owner_t id)
2832 {
2833 struct inode *inode = file_inode(file);
2834 int rc = 0;
2835
2836 if (file->f_mode & FMODE_WRITE)
2837 rc = filemap_write_and_wait(inode->i_mapping);
2838
2839 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2840 if (rc) {
2841 /* get more nuanced writeback errors */
2842 rc = filemap_check_wb_err(file->f_mapping, 0);
2843 trace_cifs_flush_err(inode->i_ino, rc);
2844 }
2845 return rc;
2846 }
2847
2848 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2849 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2850 {
2851 struct file *file = iocb->ki_filp;
2852 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2853 struct inode *inode = file->f_mapping->host;
2854 struct cifsInodeInfo *cinode = CIFS_I(inode);
2855 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2856 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2857 ssize_t rc;
2858
2859 rc = netfs_start_io_write(inode);
2860 if (rc < 0)
2861 return rc;
2862
2863 /*
2864 * We need to hold the sem to be sure nobody modifies lock list
2865 * with a brlock that prevents writing.
2866 */
2867 down_read(&cinode->lock_sem);
2868
2869 rc = generic_write_checks(iocb, from);
2870 if (rc <= 0)
2871 goto out;
2872
2873 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2874 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2875 server->vals->exclusive_lock_type, 0,
2876 NULL, CIFS_WRITE_OP))) {
2877 rc = -EACCES;
2878 goto out;
2879 }
2880
2881 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2882
2883 out:
2884 up_read(&cinode->lock_sem);
2885 netfs_end_io_write(inode);
2886 if (rc > 0)
2887 rc = generic_write_sync(iocb, rc);
2888 return rc;
2889 }
2890
2891 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2892 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2893 {
2894 struct inode *inode = file_inode(iocb->ki_filp);
2895 struct cifsInodeInfo *cinode = CIFS_I(inode);
2896 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2897 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2898 iocb->ki_filp->private_data;
2899 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2900 ssize_t written;
2901
2902 written = cifs_get_writer(cinode);
2903 if (written)
2904 return written;
2905
2906 if (CIFS_CACHE_WRITE(cinode)) {
2907 if (cap_unix(tcon->ses) &&
2908 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2909 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2910 written = netfs_file_write_iter(iocb, from);
2911 goto out;
2912 }
2913 written = cifs_writev(iocb, from);
2914 goto out;
2915 }
2916 /*
2917 * For non-oplocked files in strict cache mode we need to write the data
2918 * to the server exactly from the pos to pos+len-1 rather than flush all
2919 * affected pages because it may cause a error with mandatory locks on
2920 * these pages but not on the region from pos to ppos+len-1.
2921 */
2922 written = netfs_file_write_iter(iocb, from);
2923 if (CIFS_CACHE_READ(cinode)) {
2924 /*
2925 * We have read level caching and we have just sent a write
2926 * request to the server thus making data in the cache stale.
2927 * Zap the cache and set oplock/lease level to NONE to avoid
2928 * reading stale data from the cache. All subsequent read
2929 * operations will read new data from the server.
2930 */
2931 cifs_zap_mapping(inode);
2932 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2933 inode);
2934 cinode->oplock = 0;
2935 }
2936 out:
2937 cifs_put_writer(cinode);
2938 return written;
2939 }
2940
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2941 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2942 {
2943 ssize_t rc;
2944 struct inode *inode = file_inode(iocb->ki_filp);
2945
2946 if (iocb->ki_flags & IOCB_DIRECT)
2947 return netfs_unbuffered_read_iter(iocb, iter);
2948
2949 rc = cifs_revalidate_mapping(inode);
2950 if (rc)
2951 return rc;
2952
2953 return netfs_file_read_iter(iocb, iter);
2954 }
2955
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2956 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2957 {
2958 struct inode *inode = file_inode(iocb->ki_filp);
2959 struct cifsInodeInfo *cinode = CIFS_I(inode);
2960 ssize_t written;
2961 int rc;
2962
2963 if (iocb->ki_filp->f_flags & O_DIRECT) {
2964 written = netfs_unbuffered_write_iter(iocb, from);
2965 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2966 cifs_zap_mapping(inode);
2967 cifs_dbg(FYI,
2968 "Set no oplock for inode=%p after a write operation\n",
2969 inode);
2970 cinode->oplock = 0;
2971 }
2972 return written;
2973 }
2974
2975 written = cifs_get_writer(cinode);
2976 if (written)
2977 return written;
2978
2979 written = netfs_file_write_iter(iocb, from);
2980
2981 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2982 rc = filemap_fdatawrite(inode->i_mapping);
2983 if (rc)
2984 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2985 rc, inode);
2986 }
2987
2988 cifs_put_writer(cinode);
2989 return written;
2990 }
2991
2992 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2993 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2994 {
2995 struct inode *inode = file_inode(iocb->ki_filp);
2996 struct cifsInodeInfo *cinode = CIFS_I(inode);
2997 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2998 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2999 iocb->ki_filp->private_data;
3000 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3001 int rc = -EACCES;
3002
3003 /*
3004 * In strict cache mode we need to read from the server all the time
3005 * if we don't have level II oplock because the server can delay mtime
3006 * change - so we can't make a decision about inode invalidating.
3007 * And we can also fail with pagereading if there are mandatory locks
3008 * on pages affected by this read but not on the region from pos to
3009 * pos+len-1.
3010 */
3011 if (!CIFS_CACHE_READ(cinode))
3012 return netfs_unbuffered_read_iter(iocb, to);
3013
3014 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3015 if (iocb->ki_flags & IOCB_DIRECT)
3016 return netfs_unbuffered_read_iter(iocb, to);
3017 return netfs_buffered_read_iter(iocb, to);
3018 }
3019
3020 /*
3021 * We need to hold the sem to be sure nobody modifies lock list
3022 * with a brlock that prevents reading.
3023 */
3024 if (iocb->ki_flags & IOCB_DIRECT) {
3025 rc = netfs_start_io_direct(inode);
3026 if (rc < 0)
3027 goto out;
3028 rc = -EACCES;
3029 down_read(&cinode->lock_sem);
3030 if (!cifs_find_lock_conflict(
3031 cfile, iocb->ki_pos, iov_iter_count(to),
3032 tcon->ses->server->vals->shared_lock_type,
3033 0, NULL, CIFS_READ_OP))
3034 rc = netfs_unbuffered_read_iter_locked(iocb, to);
3035 up_read(&cinode->lock_sem);
3036 netfs_end_io_direct(inode);
3037 } else {
3038 rc = netfs_start_io_read(inode);
3039 if (rc < 0)
3040 goto out;
3041 rc = -EACCES;
3042 down_read(&cinode->lock_sem);
3043 if (!cifs_find_lock_conflict(
3044 cfile, iocb->ki_pos, iov_iter_count(to),
3045 tcon->ses->server->vals->shared_lock_type,
3046 0, NULL, CIFS_READ_OP))
3047 rc = filemap_read(iocb, to, 0);
3048 up_read(&cinode->lock_sem);
3049 netfs_end_io_read(inode);
3050 }
3051 out:
3052 return rc;
3053 }
3054
cifs_page_mkwrite(struct vm_fault * vmf)3055 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3056 {
3057 return netfs_page_mkwrite(vmf, NULL);
3058 }
3059
3060 static const struct vm_operations_struct cifs_file_vm_ops = {
3061 .fault = filemap_fault,
3062 .map_pages = filemap_map_pages,
3063 .page_mkwrite = cifs_page_mkwrite,
3064 };
3065
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3066 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3067 {
3068 int xid, rc = 0;
3069 struct inode *inode = file_inode(desc->file);
3070
3071 xid = get_xid();
3072
3073 if (!CIFS_CACHE_READ(CIFS_I(inode)))
3074 rc = cifs_zap_mapping(inode);
3075 if (!rc)
3076 rc = generic_file_mmap_prepare(desc);
3077 if (!rc)
3078 desc->vm_ops = &cifs_file_vm_ops;
3079
3080 free_xid(xid);
3081 return rc;
3082 }
3083
cifs_file_mmap_prepare(struct vm_area_desc * desc)3084 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3085 {
3086 int rc, xid;
3087
3088 xid = get_xid();
3089
3090 rc = cifs_revalidate_file(desc->file);
3091 if (rc)
3092 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3093 rc);
3094 if (!rc)
3095 rc = generic_file_mmap_prepare(desc);
3096 if (!rc)
3097 desc->vm_ops = &cifs_file_vm_ops;
3098
3099 free_xid(xid);
3100 return rc;
3101 }
3102
is_inode_writable(struct cifsInodeInfo * cifs_inode)3103 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3104 {
3105 struct cifsFileInfo *open_file;
3106
3107 spin_lock(&cifs_inode->open_file_lock);
3108 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3109 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3110 spin_unlock(&cifs_inode->open_file_lock);
3111 return 1;
3112 }
3113 }
3114 spin_unlock(&cifs_inode->open_file_lock);
3115 return 0;
3116 }
3117
3118 /* We do not want to update the file size from server for inodes
3119 open for write - to avoid races with writepage extending
3120 the file - in the future we could consider allowing
3121 refreshing the inode only on increases in the file size
3122 but this is tricky to do without racing with writebehind
3123 page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3124 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3125 bool from_readdir)
3126 {
3127 if (!cifsInode)
3128 return true;
3129
3130 if (is_inode_writable(cifsInode) ||
3131 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3132 /* This inode is open for write at least once */
3133 struct cifs_sb_info *cifs_sb;
3134
3135 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3136 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3137 /* since no page cache to corrupt on directio
3138 we can change size safely */
3139 return true;
3140 }
3141
3142 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3143 return true;
3144
3145 return false;
3146 } else
3147 return true;
3148 }
3149
cifs_oplock_break(struct work_struct * work)3150 void cifs_oplock_break(struct work_struct *work)
3151 {
3152 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3153 oplock_break);
3154 struct inode *inode = d_inode(cfile->dentry);
3155 struct super_block *sb = inode->i_sb;
3156 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3157 struct cifsInodeInfo *cinode = CIFS_I(inode);
3158 struct cifs_tcon *tcon;
3159 struct TCP_Server_Info *server;
3160 struct tcon_link *tlink;
3161 int rc = 0;
3162 bool purge_cache = false, oplock_break_cancelled;
3163 __u64 persistent_fid, volatile_fid;
3164 __u16 net_fid;
3165
3166 /*
3167 * Hold a reference to the superblock to prevent it and its inodes from
3168 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
3169 * may release the last reference to the sb and trigger inode eviction.
3170 */
3171 cifs_sb_active(sb);
3172 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3173 TASK_UNINTERRUPTIBLE);
3174
3175 tlink = cifs_sb_tlink(cifs_sb);
3176 if (IS_ERR(tlink))
3177 goto out;
3178 tcon = tlink_tcon(tlink);
3179 server = tcon->ses->server;
3180
3181 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3182 cfile->oplock_epoch, &purge_cache);
3183
3184 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3185 cifs_has_mand_locks(cinode)) {
3186 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3187 inode);
3188 cinode->oplock = 0;
3189 }
3190
3191 if (S_ISREG(inode->i_mode)) {
3192 if (CIFS_CACHE_READ(cinode))
3193 break_lease(inode, O_RDONLY);
3194 else
3195 break_lease(inode, O_WRONLY);
3196 rc = filemap_fdatawrite(inode->i_mapping);
3197 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3198 rc = filemap_fdatawait(inode->i_mapping);
3199 mapping_set_error(inode->i_mapping, rc);
3200 cifs_zap_mapping(inode);
3201 }
3202 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3203 if (CIFS_CACHE_WRITE(cinode))
3204 goto oplock_break_ack;
3205 }
3206
3207 rc = cifs_push_locks(cfile);
3208 if (rc)
3209 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3210
3211 oplock_break_ack:
3212 /*
3213 * When oplock break is received and there are no active
3214 * file handles but cached, then schedule deferred close immediately.
3215 * So, new open will not use cached handle.
3216 */
3217
3218 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3219 cifs_close_deferred_file(cinode);
3220
3221 persistent_fid = cfile->fid.persistent_fid;
3222 volatile_fid = cfile->fid.volatile_fid;
3223 net_fid = cfile->fid.netfid;
3224 oplock_break_cancelled = cfile->oplock_break_cancelled;
3225
3226 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3227 /*
3228 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3229 * an acknowledgment to be sent when the file has already been closed.
3230 */
3231 spin_lock(&cinode->open_file_lock);
3232 /* check list empty since can race with kill_sb calling tree disconnect */
3233 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3234 spin_unlock(&cinode->open_file_lock);
3235 rc = server->ops->oplock_response(tcon, persistent_fid,
3236 volatile_fid, net_fid, cinode);
3237 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3238 } else
3239 spin_unlock(&cinode->open_file_lock);
3240
3241 cifs_put_tlink(tlink);
3242 out:
3243 cifs_done_oplock_break(cinode);
3244 cifs_sb_deactive(sb);
3245 }
3246
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3247 static int cifs_swap_activate(struct swap_info_struct *sis,
3248 struct file *swap_file, sector_t *span)
3249 {
3250 struct cifsFileInfo *cfile = swap_file->private_data;
3251 struct inode *inode = swap_file->f_mapping->host;
3252 unsigned long blocks;
3253 long long isize;
3254
3255 cifs_dbg(FYI, "swap activate\n");
3256
3257 if (!swap_file->f_mapping->a_ops->swap_rw)
3258 /* Cannot support swap */
3259 return -EINVAL;
3260
3261 spin_lock(&inode->i_lock);
3262 blocks = inode->i_blocks;
3263 isize = inode->i_size;
3264 spin_unlock(&inode->i_lock);
3265 if (blocks*512 < isize) {
3266 pr_warn("swap activate: swapfile has holes\n");
3267 return -EINVAL;
3268 }
3269 *span = sis->pages;
3270
3271 pr_warn_once("Swap support over SMB3 is experimental\n");
3272
3273 /*
3274 * TODO: consider adding ACL (or documenting how) to prevent other
3275 * users (on this or other systems) from reading it
3276 */
3277
3278
3279 /* TODO: add sk_set_memalloc(inet) or similar */
3280
3281 if (cfile)
3282 cfile->swapfile = true;
3283 /*
3284 * TODO: Since file already open, we can't open with DENY_ALL here
3285 * but we could add call to grab a byte range lock to prevent others
3286 * from reading or writing the file
3287 */
3288
3289 sis->flags |= SWP_FS_OPS;
3290 return add_swap_extent(sis, 0, sis->max, 0);
3291 }
3292
cifs_swap_deactivate(struct file * file)3293 static void cifs_swap_deactivate(struct file *file)
3294 {
3295 struct cifsFileInfo *cfile = file->private_data;
3296
3297 cifs_dbg(FYI, "swap deactivate\n");
3298
3299 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3300
3301 if (cfile)
3302 cfile->swapfile = false;
3303
3304 /* do we need to unpin (or unlock) the file */
3305 }
3306
3307 /**
3308 * cifs_swap_rw - SMB3 address space operation for swap I/O
3309 * @iocb: target I/O control block
3310 * @iter: I/O buffer
3311 *
3312 * Perform IO to the swap-file. This is much like direct IO.
3313 */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3314 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3315 {
3316 ssize_t ret;
3317
3318 if (iov_iter_rw(iter) == READ)
3319 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3320 else
3321 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3322 if (ret < 0)
3323 return ret;
3324 return 0;
3325 }
3326
3327 const struct address_space_operations cifs_addr_ops = {
3328 .read_folio = netfs_read_folio,
3329 .readahead = netfs_readahead,
3330 .writepages = netfs_writepages,
3331 .dirty_folio = netfs_dirty_folio,
3332 .release_folio = netfs_release_folio,
3333 .direct_IO = noop_direct_IO,
3334 .invalidate_folio = netfs_invalidate_folio,
3335 .migrate_folio = filemap_migrate_folio,
3336 /*
3337 * TODO: investigate and if useful we could add an is_dirty_writeback
3338 * helper if needed
3339 */
3340 .swap_activate = cifs_swap_activate,
3341 .swap_deactivate = cifs_swap_deactivate,
3342 .swap_rw = cifs_swap_rw,
3343 };
3344
3345 /*
3346 * cifs_readahead requires the server to support a buffer large enough to
3347 * contain the header plus one complete page of data. Otherwise, we need
3348 * to leave cifs_readahead out of the address space operations.
3349 */
3350 const struct address_space_operations cifs_addr_ops_smallbuf = {
3351 .read_folio = netfs_read_folio,
3352 .writepages = netfs_writepages,
3353 .dirty_folio = netfs_dirty_folio,
3354 .release_folio = netfs_release_folio,
3355 .invalidate_folio = netfs_invalidate_folio,
3356 .migrate_folio = filemap_migrate_folio,
3357 };
3358