1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * vfs operations that deal with files 5 * 6 * Copyright (C) International Business Machines Corp., 2002,2010 7 * Author(s): Steve French (sfrench@us.ibm.com) 8 * Jeremy Allison (jra@samba.org) 9 * 10 */ 11 #include <linux/fs.h> 12 #include <linux/fs_struct.h> 13 #include <linux/filelock.h> 14 #include <linux/backing-dev.h> 15 #include <linux/stat.h> 16 #include <linux/fcntl.h> 17 #include <linux/pagemap.h> 18 #include <linux/pagevec.h> 19 #include <linux/writeback.h> 20 #include <linux/task_io_accounting_ops.h> 21 #include <linux/delay.h> 22 #include <linux/mount.h> 23 #include <linux/slab.h> 24 #include <linux/swap.h> 25 #include <linux/mm.h> 26 #include <asm/div64.h> 27 #include "cifsfs.h" 28 #include "cifsglob.h" 29 #include "cifsproto.h" 30 #include "smb2proto.h" 31 #include "cifs_unicode.h" 32 #include "cifs_debug.h" 33 #include "cifs_fs_sb.h" 34 #include "fscache.h" 35 #include "smbdirect.h" 36 #include "fs_context.h" 37 #include "cifs_ioctl.h" 38 #include "cached_dir.h" 39 #include <trace/events/netfs.h> 40 41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush); 42 43 /* 44 * Prepare a subrequest to upload to the server. We need to allocate credits 45 * so that we know the maximum amount of data that we can include in it. 46 */ 47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq) 48 { 49 struct cifs_io_subrequest *wdata = 50 container_of(subreq, struct cifs_io_subrequest, subreq); 51 struct cifs_io_request *req = wdata->req; 52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr]; 53 struct TCP_Server_Info *server; 54 struct cifsFileInfo *open_file = req->cfile; 55 struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb); 56 size_t wsize = req->rreq.wsize; 57 int rc; 58 59 if (!wdata->have_xid) { 60 wdata->xid = get_xid(); 61 wdata->have_xid = true; 62 } 63 64 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 65 wdata->server = server; 66 67 if (cifs_sb->ctx->wsize == 0) 68 cifs_negotiate_wsize(server, cifs_sb->ctx, 69 tlink_tcon(req->cfile->tlink)); 70 71 retry: 72 if (open_file->invalidHandle) { 73 rc = cifs_reopen_file(open_file, false); 74 if (rc < 0) { 75 if (rc == -EAGAIN) 76 goto retry; 77 subreq->error = rc; 78 return netfs_prepare_write_failed(subreq); 79 } 80 } 81 82 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len, 83 &wdata->credits); 84 if (rc < 0) { 85 subreq->error = rc; 86 return netfs_prepare_write_failed(subreq); 87 } 88 89 wdata->credits.rreq_debug_id = subreq->rreq->debug_id; 90 wdata->credits.rreq_debug_index = subreq->debug_index; 91 wdata->credits.in_flight_check = 1; 92 trace_smb3_rw_credits(wdata->rreq->debug_id, 93 wdata->subreq.debug_index, 94 wdata->credits.value, 95 server->credits, server->in_flight, 96 wdata->credits.value, 97 cifs_trace_rw_credits_write_prepare); 98 99 #ifdef CONFIG_CIFS_SMB_DIRECT 100 if (server->smbd_conn) { 101 const struct smbdirect_socket_parameters *sp = 102 smbd_get_parameters(server->smbd_conn); 103 104 stream->sreq_max_segs = sp->max_frmr_depth; 105 } 106 #endif 107 } 108 109 /* 110 * Issue a subrequest to upload to the server. 111 */ 112 static void cifs_issue_write(struct netfs_io_subrequest *subreq) 113 { 114 struct cifs_io_subrequest *wdata = 115 container_of(subreq, struct cifs_io_subrequest, subreq); 116 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb); 117 int rc; 118 119 if (cifs_forced_shutdown(sbi)) { 120 rc = smb_EIO(smb_eio_trace_forced_shutdown); 121 goto fail; 122 } 123 124 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust); 125 if (rc) 126 goto fail; 127 128 rc = -EAGAIN; 129 if (wdata->req->cfile->invalidHandle) 130 goto fail; 131 132 wdata->server->ops->async_writev(wdata); 133 out: 134 return; 135 136 fail: 137 if (rc == -EAGAIN) 138 trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 139 else 140 trace_netfs_sreq(subreq, netfs_sreq_trace_fail); 141 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 142 cifs_write_subrequest_terminated(wdata, rc); 143 goto out; 144 } 145 146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq) 147 { 148 cifs_invalidate_cache(wreq->inode, 0); 149 } 150 151 /* 152 * Negotiate the size of a read operation on behalf of the netfs library. 153 */ 154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq) 155 { 156 struct netfs_io_request *rreq = subreq->rreq; 157 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 158 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 159 struct TCP_Server_Info *server; 160 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 161 size_t size; 162 int rc = 0; 163 164 if (!rdata->have_xid) { 165 rdata->xid = get_xid(); 166 rdata->have_xid = true; 167 } 168 169 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 170 rdata->server = server; 171 172 if (cifs_sb->ctx->rsize == 0) 173 cifs_negotiate_rsize(server, cifs_sb->ctx, 174 tlink_tcon(req->cfile->tlink)); 175 176 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 177 &size, &rdata->credits); 178 if (rc) 179 return rc; 180 181 rreq->io_streams[0].sreq_max_len = size; 182 183 rdata->credits.in_flight_check = 1; 184 rdata->credits.rreq_debug_id = rreq->debug_id; 185 rdata->credits.rreq_debug_index = subreq->debug_index; 186 187 trace_smb3_rw_credits(rdata->rreq->debug_id, 188 rdata->subreq.debug_index, 189 rdata->credits.value, 190 server->credits, server->in_flight, 0, 191 cifs_trace_rw_credits_read_submit); 192 193 #ifdef CONFIG_CIFS_SMB_DIRECT 194 if (server->smbd_conn) { 195 const struct smbdirect_socket_parameters *sp = 196 smbd_get_parameters(server->smbd_conn); 197 198 rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth; 199 } 200 #endif 201 return 0; 202 } 203 204 /* 205 * Issue a read operation on behalf of the netfs helper functions. We're asked 206 * to make a read of a certain size at a point in the file. We are permitted 207 * to only read a portion of that, but as long as we read something, the netfs 208 * helper will call us again so that we can issue another read. 209 */ 210 static void cifs_issue_read(struct netfs_io_subrequest *subreq) 211 { 212 struct netfs_io_request *rreq = subreq->rreq; 213 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 214 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 215 struct TCP_Server_Info *server = rdata->server; 216 int rc = 0; 217 218 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", 219 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, 220 subreq->transferred, subreq->len); 221 222 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust); 223 if (rc) 224 goto failed; 225 226 if (req->cfile->invalidHandle) { 227 do { 228 rc = cifs_reopen_file(req->cfile, true); 229 } while (rc == -EAGAIN); 230 if (rc) 231 goto failed; 232 } 233 234 if (subreq->rreq->origin != NETFS_UNBUFFERED_READ && 235 subreq->rreq->origin != NETFS_DIO_READ) 236 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 237 238 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 239 rc = rdata->server->ops->async_readv(rdata); 240 if (rc) 241 goto failed; 242 return; 243 244 failed: 245 subreq->error = rc; 246 netfs_read_subreq_terminated(subreq); 247 } 248 249 /* 250 * Writeback calls this when it finds a folio that needs uploading. This isn't 251 * called if writeback only has copy-to-cache to deal with. 252 */ 253 static void cifs_begin_writeback(struct netfs_io_request *wreq) 254 { 255 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); 256 int ret; 257 258 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); 259 if (ret) { 260 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); 261 return; 262 } 263 264 wreq->io_streams[0].avail = true; 265 } 266 267 /* 268 * Initialise a request. 269 */ 270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) 271 { 272 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 273 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 274 struct cifsFileInfo *open_file = NULL; 275 276 rreq->rsize = cifs_sb->ctx->rsize; 277 rreq->wsize = cifs_sb->ctx->wsize; 278 req->pid = current->tgid; // Ummm... This may be a workqueue 279 280 if (file) { 281 open_file = file->private_data; 282 rreq->netfs_priv = file->private_data; 283 req->cfile = cifsFileInfo_get(open_file); 284 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 285 req->pid = req->cfile->pid; 286 } else if (rreq->origin != NETFS_WRITEBACK) { 287 WARN_ON_ONCE(1); 288 return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin); 289 } 290 291 return 0; 292 } 293 294 /* 295 * Completion of a request operation. 296 */ 297 static void cifs_rreq_done(struct netfs_io_request *rreq) 298 { 299 struct timespec64 atime, mtime; 300 struct inode *inode = rreq->inode; 301 302 /* we do not want atime to be less than mtime, it broke some apps */ 303 atime = inode_set_atime_to_ts(inode, current_time(inode)); 304 mtime = inode_get_mtime(inode); 305 if (timespec64_compare(&atime, &mtime)) 306 inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 307 } 308 309 static void cifs_free_request(struct netfs_io_request *rreq) 310 { 311 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 312 313 if (req->cfile) 314 cifsFileInfo_put(req->cfile); 315 } 316 317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq) 318 { 319 struct cifs_io_subrequest *rdata = 320 container_of(subreq, struct cifs_io_subrequest, subreq); 321 int rc = subreq->error; 322 323 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) { 324 #ifdef CONFIG_CIFS_SMB_DIRECT 325 if (rdata->mr) { 326 smbd_deregister_mr(rdata->mr); 327 rdata->mr = NULL; 328 } 329 #endif 330 } 331 332 if (rdata->credits.value != 0) { 333 trace_smb3_rw_credits(rdata->rreq->debug_id, 334 rdata->subreq.debug_index, 335 rdata->credits.value, 336 rdata->server ? rdata->server->credits : 0, 337 rdata->server ? rdata->server->in_flight : 0, 338 -rdata->credits.value, 339 cifs_trace_rw_credits_free_subreq); 340 if (rdata->server) 341 add_credits_and_wake_if(rdata->server, &rdata->credits, 0); 342 else 343 rdata->credits.value = 0; 344 } 345 346 if (rdata->have_xid) 347 free_xid(rdata->xid); 348 } 349 350 const struct netfs_request_ops cifs_req_ops = { 351 .request_pool = &cifs_io_request_pool, 352 .subrequest_pool = &cifs_io_subrequest_pool, 353 .init_request = cifs_init_request, 354 .free_request = cifs_free_request, 355 .free_subrequest = cifs_free_subrequest, 356 .prepare_read = cifs_prepare_read, 357 .issue_read = cifs_issue_read, 358 .done = cifs_rreq_done, 359 .begin_writeback = cifs_begin_writeback, 360 .prepare_write = cifs_prepare_write, 361 .issue_write = cifs_issue_write, 362 .invalidate_cache = cifs_netfs_invalidate_cache, 363 }; 364 365 /* 366 * Mark as invalid, all open files on tree connections since they 367 * were closed when session to server was lost. 368 */ 369 void 370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon) 371 { 372 struct cifsFileInfo *open_file = NULL; 373 struct list_head *tmp; 374 struct list_head *tmp1; 375 376 /* only send once per connect */ 377 spin_lock(&tcon->tc_lock); 378 if (tcon->need_reconnect) 379 tcon->status = TID_NEED_RECON; 380 381 if (tcon->status != TID_NEED_RECON) { 382 spin_unlock(&tcon->tc_lock); 383 return; 384 } 385 tcon->status = TID_IN_FILES_INVALIDATE; 386 spin_unlock(&tcon->tc_lock); 387 388 /* list all files open on tree connection and mark them invalid */ 389 spin_lock(&tcon->open_file_lock); 390 list_for_each_safe(tmp, tmp1, &tcon->openFileList) { 391 open_file = list_entry(tmp, struct cifsFileInfo, tlist); 392 open_file->invalidHandle = true; 393 open_file->oplock_break_cancelled = true; 394 } 395 spin_unlock(&tcon->open_file_lock); 396 397 invalidate_all_cached_dirs(tcon); 398 spin_lock(&tcon->tc_lock); 399 if (tcon->status == TID_IN_FILES_INVALIDATE) 400 tcon->status = TID_NEED_TCON; 401 spin_unlock(&tcon->tc_lock); 402 403 /* 404 * BB Add call to evict_inodes(sb) for all superblocks mounted 405 * to this tcon. 406 */ 407 } 408 409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache) 410 { 411 if ((flags & O_ACCMODE) == O_RDONLY) 412 return GENERIC_READ; 413 else if ((flags & O_ACCMODE) == O_WRONLY) 414 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE; 415 else if ((flags & O_ACCMODE) == O_RDWR) { 416 /* GENERIC_ALL is too much permission to request 417 can cause unnecessary access denied on create */ 418 /* return GENERIC_ALL; */ 419 return (GENERIC_READ | GENERIC_WRITE); 420 } 421 422 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | 423 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | 424 FILE_READ_DATA); 425 } 426 427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 428 static u32 cifs_posix_convert_flags(unsigned int flags) 429 { 430 u32 posix_flags = 0; 431 432 if ((flags & O_ACCMODE) == O_RDONLY) 433 posix_flags = SMB_O_RDONLY; 434 else if ((flags & O_ACCMODE) == O_WRONLY) 435 posix_flags = SMB_O_WRONLY; 436 else if ((flags & O_ACCMODE) == O_RDWR) 437 posix_flags = SMB_O_RDWR; 438 439 if (flags & O_CREAT) { 440 posix_flags |= SMB_O_CREAT; 441 if (flags & O_EXCL) 442 posix_flags |= SMB_O_EXCL; 443 } else if (flags & O_EXCL) 444 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n", 445 current->comm, current->tgid); 446 447 if (flags & O_TRUNC) 448 posix_flags |= SMB_O_TRUNC; 449 /* be safe and imply O_SYNC for O_DSYNC */ 450 if (flags & O_DSYNC) 451 posix_flags |= SMB_O_SYNC; 452 if (flags & O_DIRECTORY) 453 posix_flags |= SMB_O_DIRECTORY; 454 if (flags & O_NOFOLLOW) 455 posix_flags |= SMB_O_NOFOLLOW; 456 if (flags & O_DIRECT) 457 posix_flags |= SMB_O_DIRECT; 458 459 return posix_flags; 460 } 461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 462 463 static inline int cifs_get_disposition(unsigned int flags) 464 { 465 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 466 return FILE_CREATE; 467 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) 468 return FILE_OVERWRITE_IF; 469 else if ((flags & O_CREAT) == O_CREAT) 470 return FILE_OPEN_IF; 471 else if ((flags & O_TRUNC) == O_TRUNC) 472 return FILE_OVERWRITE; 473 else 474 return FILE_OPEN; 475 } 476 477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 478 int cifs_posix_open(const char *full_path, struct inode **pinode, 479 struct super_block *sb, int mode, unsigned int f_flags, 480 __u32 *poplock, __u16 *pnetfid, unsigned int xid) 481 { 482 int rc; 483 FILE_UNIX_BASIC_INFO *presp_data; 484 __u32 posix_flags = 0; 485 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 486 struct cifs_fattr fattr; 487 struct tcon_link *tlink; 488 struct cifs_tcon *tcon; 489 490 cifs_dbg(FYI, "posix open %s\n", full_path); 491 492 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); 493 if (presp_data == NULL) 494 return -ENOMEM; 495 496 tlink = cifs_sb_tlink(cifs_sb); 497 if (IS_ERR(tlink)) { 498 rc = PTR_ERR(tlink); 499 goto posix_open_ret; 500 } 501 502 tcon = tlink_tcon(tlink); 503 mode &= ~current_umask(); 504 505 posix_flags = cifs_posix_convert_flags(f_flags); 506 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, 507 poplock, full_path, cifs_sb->local_nls, 508 cifs_remap(cifs_sb)); 509 cifs_put_tlink(tlink); 510 511 if (rc) 512 goto posix_open_ret; 513 514 if (presp_data->Type == cpu_to_le32(-1)) 515 goto posix_open_ret; /* open ok, caller does qpathinfo */ 516 517 if (!pinode) 518 goto posix_open_ret; /* caller does not need info */ 519 520 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); 521 522 /* get new inode and set it up */ 523 if (*pinode == NULL) { 524 cifs_fill_uniqueid(sb, &fattr); 525 *pinode = cifs_iget(sb, &fattr); 526 if (!*pinode) { 527 rc = -ENOMEM; 528 goto posix_open_ret; 529 } 530 } else { 531 cifs_revalidate_mapping(*pinode); 532 rc = cifs_fattr_to_inode(*pinode, &fattr, false); 533 } 534 535 posix_open_ret: 536 kfree(presp_data); 537 return rc; 538 } 539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 540 541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, 542 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, 543 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf) 544 { 545 int rc; 546 int desired_access; 547 int disposition; 548 int create_options = CREATE_NOT_DIR; 549 struct TCP_Server_Info *server = tcon->ses->server; 550 struct cifs_open_parms oparms; 551 int rdwr_for_fscache = 0; 552 553 if (!server->ops->open) 554 return -ENOSYS; 555 556 /* If we're caching, we need to be able to fill in around partial writes. */ 557 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY) 558 rdwr_for_fscache = 1; 559 560 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache); 561 562 /********************************************************************* 563 * open flag mapping table: 564 * 565 * POSIX Flag CIFS Disposition 566 * ---------- ---------------- 567 * O_CREAT FILE_OPEN_IF 568 * O_CREAT | O_EXCL FILE_CREATE 569 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF 570 * O_TRUNC FILE_OVERWRITE 571 * none of the above FILE_OPEN 572 * 573 * Note that there is not a direct match between disposition 574 * FILE_SUPERSEDE (ie create whether or not file exists although 575 * O_CREAT | O_TRUNC is similar but truncates the existing 576 * file rather than creating a new file as FILE_SUPERSEDE does 577 * (which uses the attributes / metadata passed in on open call) 578 *? 579 *? O_SYNC is a reasonable match to CIFS writethrough flag 580 *? and the read write flags match reasonably. O_LARGEFILE 581 *? is irrelevant because largefile support is always used 582 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, 583 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation 584 *********************************************************************/ 585 586 disposition = cifs_get_disposition(f_flags); 587 588 /* BB pass O_SYNC flag through on file attributes .. BB */ 589 590 /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 591 if (f_flags & O_SYNC) 592 create_options |= CREATE_WRITE_THROUGH; 593 594 if (f_flags & O_DIRECT) 595 create_options |= CREATE_NO_BUFFER; 596 597 retry_open: 598 oparms = (struct cifs_open_parms) { 599 .tcon = tcon, 600 .cifs_sb = cifs_sb, 601 .desired_access = desired_access, 602 .create_options = cifs_create_options(cifs_sb, create_options), 603 .disposition = disposition, 604 .path = full_path, 605 .fid = fid, 606 }; 607 608 rc = server->ops->open(xid, &oparms, oplock, buf); 609 if (rc) { 610 if (rc == -EACCES && rdwr_for_fscache == 1) { 611 desired_access = cifs_convert_flags(f_flags, 0); 612 rdwr_for_fscache = 2; 613 goto retry_open; 614 } 615 return rc; 616 } 617 if (rdwr_for_fscache == 2) 618 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 619 620 /* TODO: Add support for calling posix query info but with passing in fid */ 621 if (tcon->unix_ext) 622 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, 623 xid); 624 else 625 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 626 xid, fid); 627 628 if (rc) { 629 server->ops->close(xid, tcon, fid); 630 if (rc == -ESTALE) 631 rc = -EOPENSTALE; 632 } 633 634 return rc; 635 } 636 637 static bool 638 cifs_has_mand_locks(struct cifsInodeInfo *cinode) 639 { 640 struct cifs_fid_locks *cur; 641 bool has_locks = false; 642 643 down_read(&cinode->lock_sem); 644 list_for_each_entry(cur, &cinode->llist, llist) { 645 if (!list_empty(&cur->locks)) { 646 has_locks = true; 647 break; 648 } 649 } 650 up_read(&cinode->lock_sem); 651 return has_locks; 652 } 653 654 void 655 cifs_down_write(struct rw_semaphore *sem) 656 { 657 while (!down_write_trylock(sem)) 658 msleep(10); 659 } 660 661 static void cifsFileInfo_put_work(struct work_struct *work); 662 void serverclose_work(struct work_struct *work); 663 664 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, 665 struct tcon_link *tlink, __u32 oplock, 666 const char *symlink_target) 667 { 668 struct dentry *dentry = file_dentry(file); 669 struct inode *inode = d_inode(dentry); 670 struct cifsInodeInfo *cinode = CIFS_I(inode); 671 struct cifsFileInfo *cfile; 672 struct cifs_fid_locks *fdlocks; 673 struct cifs_tcon *tcon = tlink_tcon(tlink); 674 struct TCP_Server_Info *server = tcon->ses->server; 675 676 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 677 if (cfile == NULL) 678 return cfile; 679 680 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL); 681 if (!fdlocks) { 682 kfree(cfile); 683 return NULL; 684 } 685 686 if (symlink_target) { 687 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL); 688 if (!cfile->symlink_target) { 689 kfree(fdlocks); 690 kfree(cfile); 691 return NULL; 692 } 693 } 694 695 INIT_LIST_HEAD(&fdlocks->locks); 696 fdlocks->cfile = cfile; 697 cfile->llist = fdlocks; 698 699 cfile->count = 1; 700 cfile->pid = current->tgid; 701 cfile->uid = current_fsuid(); 702 cfile->dentry = dget(dentry); 703 cfile->f_flags = file->f_flags; 704 cfile->invalidHandle = false; 705 cfile->deferred_close_scheduled = false; 706 cfile->tlink = cifs_get_tlink(tlink); 707 INIT_WORK(&cfile->oplock_break, cifs_oplock_break); 708 INIT_WORK(&cfile->put, cifsFileInfo_put_work); 709 INIT_WORK(&cfile->serverclose, serverclose_work); 710 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); 711 mutex_init(&cfile->fh_mutex); 712 spin_lock_init(&cfile->file_info_lock); 713 714 cifs_sb_active(inode->i_sb); 715 716 /* 717 * If the server returned a read oplock and we have mandatory brlocks, 718 * set oplock level to None. 719 */ 720 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 721 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 722 oplock = 0; 723 } 724 725 cifs_down_write(&cinode->lock_sem); 726 list_add(&fdlocks->llist, &cinode->llist); 727 up_write(&cinode->lock_sem); 728 729 spin_lock(&tcon->open_file_lock); 730 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) 731 oplock = fid->pending_open->oplock; 732 list_del(&fid->pending_open->olist); 733 734 list_add(&cfile->tlist, &tcon->openFileList); 735 atomic_inc(&tcon->num_local_opens); 736 737 /* if readable file instance put first in list*/ 738 spin_lock(&cinode->open_file_lock); 739 fid->purge_cache = false; 740 server->ops->set_fid(cfile, fid, oplock); 741 742 if (file->f_mode & FMODE_READ) 743 list_add(&cfile->flist, &cinode->openFileList); 744 else 745 list_add_tail(&cfile->flist, &cinode->openFileList); 746 spin_unlock(&cinode->open_file_lock); 747 spin_unlock(&tcon->open_file_lock); 748 749 if (fid->purge_cache) 750 cifs_zap_mapping(inode); 751 752 file->private_data = cfile; 753 return cfile; 754 } 755 756 struct cifsFileInfo * 757 cifsFileInfo_get(struct cifsFileInfo *cifs_file) 758 { 759 spin_lock(&cifs_file->file_info_lock); 760 cifsFileInfo_get_locked(cifs_file); 761 spin_unlock(&cifs_file->file_info_lock); 762 return cifs_file; 763 } 764 765 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file) 766 { 767 struct inode *inode = d_inode(cifs_file->dentry); 768 struct cifsInodeInfo *cifsi = CIFS_I(inode); 769 struct cifsLockInfo *li, *tmp; 770 struct super_block *sb = inode->i_sb; 771 772 /* 773 * Delete any outstanding lock records. We'll lose them when the file 774 * is closed anyway. 775 */ 776 cifs_down_write(&cifsi->lock_sem); 777 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { 778 list_del(&li->llist); 779 cifs_del_lock_waiters(li); 780 kfree(li); 781 } 782 list_del(&cifs_file->llist->llist); 783 kfree(cifs_file->llist); 784 up_write(&cifsi->lock_sem); 785 786 cifs_put_tlink(cifs_file->tlink); 787 dput(cifs_file->dentry); 788 cifs_sb_deactive(sb); 789 kfree(cifs_file->symlink_target); 790 kfree(cifs_file); 791 } 792 793 static void cifsFileInfo_put_work(struct work_struct *work) 794 { 795 struct cifsFileInfo *cifs_file = container_of(work, 796 struct cifsFileInfo, put); 797 798 cifsFileInfo_put_final(cifs_file); 799 } 800 801 void serverclose_work(struct work_struct *work) 802 { 803 struct cifsFileInfo *cifs_file = container_of(work, 804 struct cifsFileInfo, serverclose); 805 806 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 807 808 struct TCP_Server_Info *server = tcon->ses->server; 809 int rc = 0; 810 int retries = 0; 811 int MAX_RETRIES = 4; 812 813 do { 814 if (server->ops->close_getattr) 815 rc = server->ops->close_getattr(0, tcon, cifs_file); 816 else if (server->ops->close) 817 rc = server->ops->close(0, tcon, &cifs_file->fid); 818 819 if (rc == -EBUSY || rc == -EAGAIN) { 820 retries++; 821 msleep(250); 822 } 823 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES) 824 ); 825 826 if (retries == MAX_RETRIES) 827 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES); 828 829 if (cifs_file->offload) 830 queue_work(fileinfo_put_wq, &cifs_file->put); 831 else 832 cifsFileInfo_put_final(cifs_file); 833 } 834 835 /** 836 * cifsFileInfo_put - release a reference of file priv data 837 * 838 * Always potentially wait for oplock handler. See _cifsFileInfo_put(). 839 * 840 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 841 */ 842 void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 843 { 844 _cifsFileInfo_put(cifs_file, true, true); 845 } 846 847 /** 848 * _cifsFileInfo_put - release a reference of file priv data 849 * 850 * This may involve closing the filehandle @cifs_file out on the 851 * server. Must be called without holding tcon->open_file_lock, 852 * cinode->open_file_lock and cifs_file->file_info_lock. 853 * 854 * If @wait_for_oplock_handler is true and we are releasing the last 855 * reference, wait for any running oplock break handler of the file 856 * and cancel any pending one. 857 * 858 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 859 * @wait_oplock_handler: must be false if called from oplock_break_handler 860 * @offload: not offloaded on close and oplock breaks 861 * 862 */ 863 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, 864 bool wait_oplock_handler, bool offload) 865 { 866 struct inode *inode = d_inode(cifs_file->dentry); 867 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 868 struct TCP_Server_Info *server = tcon->ses->server; 869 struct cifsInodeInfo *cifsi = CIFS_I(inode); 870 struct super_block *sb = inode->i_sb; 871 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 872 struct cifs_fid fid = {}; 873 struct cifs_pending_open open; 874 bool oplock_break_cancelled; 875 bool serverclose_offloaded = false; 876 877 spin_lock(&tcon->open_file_lock); 878 spin_lock(&cifsi->open_file_lock); 879 spin_lock(&cifs_file->file_info_lock); 880 881 cifs_file->offload = offload; 882 if (--cifs_file->count > 0) { 883 spin_unlock(&cifs_file->file_info_lock); 884 spin_unlock(&cifsi->open_file_lock); 885 spin_unlock(&tcon->open_file_lock); 886 return; 887 } 888 spin_unlock(&cifs_file->file_info_lock); 889 890 if (server->ops->get_lease_key) 891 server->ops->get_lease_key(inode, &fid); 892 893 /* store open in pending opens to make sure we don't miss lease break */ 894 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); 895 896 /* remove it from the lists */ 897 list_del(&cifs_file->flist); 898 list_del(&cifs_file->tlist); 899 atomic_dec(&tcon->num_local_opens); 900 901 if (list_empty(&cifsi->openFileList)) { 902 cifs_dbg(FYI, "closing last open instance for inode %p\n", 903 d_inode(cifs_file->dentry)); 904 /* 905 * In strict cache mode we need invalidate mapping on the last 906 * close because it may cause a error when we open this file 907 * again and get at least level II oplock. 908 */ 909 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 910 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); 911 cifs_set_oplock_level(cifsi, 0); 912 } 913 914 spin_unlock(&cifsi->open_file_lock); 915 spin_unlock(&tcon->open_file_lock); 916 917 oplock_break_cancelled = wait_oplock_handler ? 918 cancel_work_sync(&cifs_file->oplock_break) : false; 919 920 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 921 struct TCP_Server_Info *server = tcon->ses->server; 922 unsigned int xid; 923 int rc = 0; 924 925 xid = get_xid(); 926 if (server->ops->close_getattr) 927 rc = server->ops->close_getattr(xid, tcon, cifs_file); 928 else if (server->ops->close) 929 rc = server->ops->close(xid, tcon, &cifs_file->fid); 930 _free_xid(xid); 931 932 if (rc == -EBUSY || rc == -EAGAIN) { 933 // Server close failed, hence offloading it as an async op 934 queue_work(serverclose_wq, &cifs_file->serverclose); 935 serverclose_offloaded = true; 936 } 937 } 938 939 if (oplock_break_cancelled) 940 cifs_done_oplock_break(cifsi); 941 942 cifs_del_pending_open(&open); 943 944 // if serverclose has been offloaded to wq (on failure), it will 945 // handle offloading put as well. If serverclose not offloaded, 946 // we need to handle offloading put here. 947 if (!serverclose_offloaded) { 948 if (offload) 949 queue_work(fileinfo_put_wq, &cifs_file->put); 950 else 951 cifsFileInfo_put_final(cifs_file); 952 } 953 } 954 955 int cifs_file_flush(const unsigned int xid, struct inode *inode, 956 struct cifsFileInfo *cfile) 957 { 958 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 959 struct cifs_tcon *tcon; 960 int rc; 961 962 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 963 return 0; 964 965 if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) { 966 tcon = tlink_tcon(cfile->tlink); 967 return tcon->ses->server->ops->flush(xid, tcon, 968 &cfile->fid); 969 } 970 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); 971 if (!rc) { 972 tcon = tlink_tcon(cfile->tlink); 973 rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid); 974 cifsFileInfo_put(cfile); 975 } else if (rc == -EBADF) { 976 rc = 0; 977 } 978 return rc; 979 } 980 981 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry) 982 { 983 struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry)); 984 struct inode *inode = d_inode(dentry); 985 struct cifsFileInfo *cfile = NULL; 986 struct TCP_Server_Info *server; 987 struct cifs_tcon *tcon; 988 int rc; 989 990 rc = filemap_write_and_wait(inode->i_mapping); 991 if (is_interrupt_error(rc)) 992 return -ERESTARTSYS; 993 mapping_set_error(inode->i_mapping, rc); 994 995 cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 996 rc = cifs_file_flush(xid, inode, cfile); 997 if (!rc) { 998 if (cfile) { 999 tcon = tlink_tcon(cfile->tlink); 1000 server = tcon->ses->server; 1001 rc = server->ops->set_file_size(xid, tcon, 1002 cfile, 0, false); 1003 } 1004 if (!rc) { 1005 netfs_resize_file(&cinode->netfs, 0, true); 1006 cifs_setsize(inode, 0); 1007 inode->i_blocks = 0; 1008 } 1009 } 1010 if (cfile) 1011 cifsFileInfo_put(cfile); 1012 return rc; 1013 } 1014 1015 int cifs_open(struct inode *inode, struct file *file) 1016 1017 { 1018 int rc = -EACCES; 1019 unsigned int xid; 1020 __u32 oplock; 1021 struct cifs_sb_info *cifs_sb; 1022 struct TCP_Server_Info *server; 1023 struct cifs_tcon *tcon; 1024 struct tcon_link *tlink; 1025 struct cifsFileInfo *cfile = NULL; 1026 void *page; 1027 const char *full_path; 1028 bool posix_open_ok = false; 1029 struct cifs_fid fid = {}; 1030 struct cifs_pending_open open; 1031 struct cifs_open_info_data data = {}; 1032 1033 xid = get_xid(); 1034 1035 cifs_sb = CIFS_SB(inode->i_sb); 1036 if (unlikely(cifs_forced_shutdown(cifs_sb))) { 1037 free_xid(xid); 1038 return smb_EIO(smb_eio_trace_forced_shutdown); 1039 } 1040 1041 tlink = cifs_sb_tlink(cifs_sb); 1042 if (IS_ERR(tlink)) { 1043 free_xid(xid); 1044 return PTR_ERR(tlink); 1045 } 1046 tcon = tlink_tcon(tlink); 1047 server = tcon->ses->server; 1048 1049 page = alloc_dentry_path(); 1050 full_path = build_path_from_dentry(file_dentry(file), page); 1051 if (IS_ERR(full_path)) { 1052 rc = PTR_ERR(full_path); 1053 goto out; 1054 } 1055 1056 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", 1057 inode, file->f_flags, full_path); 1058 1059 if (file->f_flags & O_DIRECT && 1060 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 1061 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 1062 file->f_op = &cifs_file_direct_nobrl_ops; 1063 else 1064 file->f_op = &cifs_file_direct_ops; 1065 } 1066 1067 if (file->f_flags & O_TRUNC) { 1068 rc = cifs_do_truncate(xid, file_dentry(file)); 1069 if (rc) 1070 goto out; 1071 } 1072 1073 /* Get the cached handle as SMB2 close is deferred */ 1074 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) { 1075 rc = cifs_get_writable_path(tcon, full_path, 1076 FIND_WR_FSUID_ONLY | 1077 FIND_WR_NO_PENDING_DELETE, 1078 &cfile); 1079 } else { 1080 rc = cifs_get_readable_path(tcon, full_path, &cfile); 1081 } 1082 if (rc == 0) { 1083 unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1084 unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1085 1086 if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) && 1087 (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) { 1088 file->private_data = cfile; 1089 spin_lock(&CIFS_I(inode)->deferred_lock); 1090 cifs_del_deferred_close(cfile); 1091 spin_unlock(&CIFS_I(inode)->deferred_lock); 1092 goto use_cache; 1093 } 1094 _cifsFileInfo_put(cfile, true, false); 1095 } else { 1096 /* hard link on the defeered close file */ 1097 rc = cifs_get_hardlink_path(tcon, inode, file); 1098 if (rc) 1099 cifs_close_deferred_file(CIFS_I(inode)); 1100 } 1101 1102 if (server->oplocks) 1103 oplock = REQ_OPLOCK; 1104 else 1105 oplock = 0; 1106 1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1108 if (!tcon->broken_posix_open && tcon->unix_ext && 1109 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1110 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1111 /* can not refresh inode info since size could be stale */ 1112 rc = cifs_posix_open(full_path, &inode, inode->i_sb, 1113 cifs_sb->ctx->file_mode /* ignored */, 1114 file->f_flags, &oplock, &fid.netfid, xid); 1115 if (rc == 0) { 1116 cifs_dbg(FYI, "posix open succeeded\n"); 1117 posix_open_ok = true; 1118 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 1119 if (tcon->ses->serverNOS) 1120 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n", 1121 tcon->ses->ip_addr, 1122 tcon->ses->serverNOS); 1123 tcon->broken_posix_open = true; 1124 } else if ((rc != -EIO) && (rc != -EREMOTE) && 1125 (rc != -EOPNOTSUPP)) /* path not found or net err */ 1126 goto out; 1127 /* 1128 * Else fallthrough to retry open the old way on network i/o 1129 * or DFS errors. 1130 */ 1131 } 1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1133 1134 if (server->ops->get_lease_key) 1135 server->ops->get_lease_key(inode, &fid); 1136 1137 cifs_add_pending_open(&fid, tlink, &open); 1138 1139 if (!posix_open_ok) { 1140 if (server->ops->get_lease_key) 1141 server->ops->get_lease_key(inode, &fid); 1142 1143 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, 1144 xid, &data); 1145 if (rc) { 1146 cifs_del_pending_open(&open); 1147 goto out; 1148 } 1149 } 1150 1151 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target); 1152 if (cfile == NULL) { 1153 if (server->ops->close) 1154 server->ops->close(xid, tcon, &fid); 1155 cifs_del_pending_open(&open); 1156 rc = -ENOMEM; 1157 goto out; 1158 } 1159 1160 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1161 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { 1162 /* 1163 * Time to set mode which we can not set earlier due to 1164 * problems creating new read-only files. 1165 */ 1166 struct cifs_unix_set_info_args args = { 1167 .mode = inode->i_mode, 1168 .uid = INVALID_UID, /* no change */ 1169 .gid = INVALID_GID, /* no change */ 1170 .ctime = NO_CHANGE_64, 1171 .atime = NO_CHANGE_64, 1172 .mtime = NO_CHANGE_64, 1173 .device = 0, 1174 }; 1175 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, 1176 cfile->pid); 1177 } 1178 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1179 1180 use_cache: 1181 fscache_use_cookie(cifs_inode_cookie(file_inode(file)), 1182 file->f_mode & FMODE_WRITE); 1183 if (!(file->f_flags & O_DIRECT)) 1184 goto out; 1185 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY) 1186 goto out; 1187 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE); 1188 1189 out: 1190 free_dentry_path(page); 1191 free_xid(xid); 1192 cifs_put_tlink(tlink); 1193 cifs_free_open_info(&data); 1194 return rc; 1195 } 1196 1197 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1198 static int cifs_push_posix_locks(struct cifsFileInfo *cfile); 1199 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1200 1201 /* 1202 * Try to reacquire byte range locks that were released when session 1203 * to server was lost. 1204 */ 1205 static int 1206 cifs_relock_file(struct cifsFileInfo *cfile) 1207 { 1208 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1209 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1210 int rc = 0; 1211 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1212 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1213 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1214 1215 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); 1216 if (cinode->can_cache_brlcks) { 1217 /* can cache locks - no need to relock */ 1218 up_read(&cinode->lock_sem); 1219 return rc; 1220 } 1221 1222 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1223 if (cap_unix(tcon->ses) && 1224 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1225 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1226 rc = cifs_push_posix_locks(cfile); 1227 else 1228 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1229 rc = tcon->ses->server->ops->push_mand_locks(cfile); 1230 1231 up_read(&cinode->lock_sem); 1232 return rc; 1233 } 1234 1235 static int 1236 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) 1237 { 1238 int rc = -EACCES; 1239 unsigned int xid; 1240 __u32 oplock; 1241 struct cifs_sb_info *cifs_sb; 1242 struct cifs_tcon *tcon; 1243 struct TCP_Server_Info *server; 1244 struct cifsInodeInfo *cinode; 1245 struct inode *inode; 1246 void *page; 1247 const char *full_path; 1248 int desired_access; 1249 int disposition = FILE_OPEN; 1250 int create_options = CREATE_NOT_DIR; 1251 struct cifs_open_parms oparms; 1252 int rdwr_for_fscache = 0; 1253 1254 xid = get_xid(); 1255 mutex_lock(&cfile->fh_mutex); 1256 if (!cfile->invalidHandle) { 1257 mutex_unlock(&cfile->fh_mutex); 1258 free_xid(xid); 1259 return 0; 1260 } 1261 1262 inode = d_inode(cfile->dentry); 1263 cifs_sb = CIFS_SB(inode->i_sb); 1264 tcon = tlink_tcon(cfile->tlink); 1265 server = tcon->ses->server; 1266 1267 /* 1268 * Can not grab rename sem here because various ops, including those 1269 * that already have the rename sem can end up causing writepage to get 1270 * called and if the server was down that means we end up here, and we 1271 * can never tell if the caller already has the rename_sem. 1272 */ 1273 page = alloc_dentry_path(); 1274 full_path = build_path_from_dentry(cfile->dentry, page); 1275 if (IS_ERR(full_path)) { 1276 mutex_unlock(&cfile->fh_mutex); 1277 free_dentry_path(page); 1278 free_xid(xid); 1279 return PTR_ERR(full_path); 1280 } 1281 1282 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n", 1283 inode, cfile->f_flags, full_path); 1284 1285 if (tcon->ses->server->oplocks) 1286 oplock = REQ_OPLOCK; 1287 else 1288 oplock = 0; 1289 1290 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1291 if (tcon->unix_ext && cap_unix(tcon->ses) && 1292 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1293 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1294 /* 1295 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the 1296 * original open. Must mask them off for a reopen. 1297 */ 1298 unsigned int oflags = cfile->f_flags & 1299 ~(O_CREAT | O_EXCL | O_TRUNC); 1300 1301 rc = cifs_posix_open(full_path, NULL, inode->i_sb, 1302 cifs_sb->ctx->file_mode /* ignored */, 1303 oflags, &oplock, &cfile->fid.netfid, xid); 1304 if (rc == 0) { 1305 cifs_dbg(FYI, "posix reopen succeeded\n"); 1306 oparms.reconnect = true; 1307 goto reopen_success; 1308 } 1309 /* 1310 * fallthrough to retry open the old way on errors, especially 1311 * in the reconnect path it is important to retry hard 1312 */ 1313 } 1314 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1315 1316 /* If we're caching, we need to be able to fill in around partial writes. */ 1317 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY) 1318 rdwr_for_fscache = 1; 1319 1320 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache); 1321 1322 /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 1323 if (cfile->f_flags & O_SYNC) 1324 create_options |= CREATE_WRITE_THROUGH; 1325 1326 if (cfile->f_flags & O_DIRECT) 1327 create_options |= CREATE_NO_BUFFER; 1328 1329 if (server->ops->get_lease_key) 1330 server->ops->get_lease_key(inode, &cfile->fid); 1331 1332 retry_open: 1333 oparms = (struct cifs_open_parms) { 1334 .tcon = tcon, 1335 .cifs_sb = cifs_sb, 1336 .desired_access = desired_access, 1337 .create_options = cifs_create_options(cifs_sb, create_options), 1338 .disposition = disposition, 1339 .path = full_path, 1340 .fid = &cfile->fid, 1341 .reconnect = true, 1342 }; 1343 1344 /* 1345 * Can not refresh inode by passing in file_info buf to be returned by 1346 * ops->open and then calling get_inode_info with returned buf since 1347 * file might have write behind data that needs to be flushed and server 1348 * version of file size can be stale. If we knew for sure that inode was 1349 * not dirty locally we could do this. 1350 */ 1351 rc = server->ops->open(xid, &oparms, &oplock, NULL); 1352 if (rc == -ENOENT && oparms.reconnect == false) { 1353 /* durable handle timeout is expired - open the file again */ 1354 rc = server->ops->open(xid, &oparms, &oplock, NULL); 1355 /* indicate that we need to relock the file */ 1356 oparms.reconnect = true; 1357 } 1358 if (rc == -EACCES && rdwr_for_fscache == 1) { 1359 desired_access = cifs_convert_flags(cfile->f_flags, 0); 1360 rdwr_for_fscache = 2; 1361 goto retry_open; 1362 } 1363 1364 if (rc) { 1365 mutex_unlock(&cfile->fh_mutex); 1366 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc); 1367 cifs_dbg(FYI, "oplock: %d\n", oplock); 1368 goto reopen_error_exit; 1369 } 1370 1371 if (rdwr_for_fscache == 2) 1372 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 1373 1374 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1375 reopen_success: 1376 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1377 cfile->invalidHandle = false; 1378 mutex_unlock(&cfile->fh_mutex); 1379 cinode = CIFS_I(inode); 1380 1381 if (can_flush) { 1382 rc = filemap_write_and_wait(inode->i_mapping); 1383 if (!is_interrupt_error(rc)) 1384 mapping_set_error(inode->i_mapping, rc); 1385 1386 if (tcon->posix_extensions) { 1387 rc = smb311_posix_get_inode_info(&inode, full_path, 1388 NULL, inode->i_sb, xid); 1389 } else if (tcon->unix_ext) { 1390 rc = cifs_get_inode_info_unix(&inode, full_path, 1391 inode->i_sb, xid); 1392 } else { 1393 rc = cifs_get_inode_info(&inode, full_path, NULL, 1394 inode->i_sb, xid, NULL); 1395 } 1396 } 1397 /* 1398 * Else we are writing out data to server already and could deadlock if 1399 * we tried to flush data, and since we do not know if we have data that 1400 * would invalidate the current end of file on the server we can not go 1401 * to the server to get the new inode info. 1402 */ 1403 1404 /* 1405 * If the server returned a read oplock and we have mandatory brlocks, 1406 * set oplock level to None. 1407 */ 1408 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 1409 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 1410 oplock = 0; 1411 } 1412 1413 scoped_guard(spinlock, &cinode->open_file_lock) 1414 server->ops->set_fid(cfile, &cfile->fid, oplock); 1415 if (oparms.reconnect) 1416 cifs_relock_file(cfile); 1417 1418 reopen_error_exit: 1419 free_dentry_path(page); 1420 free_xid(xid); 1421 return rc; 1422 } 1423 1424 void smb2_deferred_work_close(struct work_struct *work) 1425 { 1426 struct cifsFileInfo *cfile = container_of(work, 1427 struct cifsFileInfo, deferred.work); 1428 1429 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 1430 cifs_del_deferred_close(cfile); 1431 cfile->deferred_close_scheduled = false; 1432 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 1433 _cifsFileInfo_put(cfile, true, false); 1434 } 1435 1436 static bool 1437 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose) 1438 { 1439 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1440 struct cifsInodeInfo *cinode = CIFS_I(inode); 1441 unsigned int oplock = READ_ONCE(cinode->oplock); 1442 1443 return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose && 1444 (oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) && 1445 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags); 1446 1447 } 1448 1449 int cifs_close(struct inode *inode, struct file *file) 1450 { 1451 struct cifsFileInfo *cfile; 1452 struct cifsInodeInfo *cinode = CIFS_I(inode); 1453 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1454 struct cifs_deferred_close *dclose; 1455 1456 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE); 1457 1458 if (file->private_data != NULL) { 1459 cfile = file->private_data; 1460 file->private_data = NULL; 1461 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); 1462 if ((cfile->status_file_deleted == false) && 1463 (smb2_can_defer_close(inode, dclose))) { 1464 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) { 1465 inode_set_mtime_to_ts(inode, 1466 inode_set_ctime_current(inode)); 1467 } 1468 spin_lock(&cinode->deferred_lock); 1469 cifs_add_deferred_close(cfile, dclose); 1470 if (cfile->deferred_close_scheduled && 1471 delayed_work_pending(&cfile->deferred)) { 1472 /* 1473 * If there is no pending work, mod_delayed_work queues new work. 1474 * So, Increase the ref count to avoid use-after-free. 1475 */ 1476 if (!mod_delayed_work(deferredclose_wq, 1477 &cfile->deferred, cifs_sb->ctx->closetimeo)) 1478 cifsFileInfo_get(cfile); 1479 } else { 1480 /* Deferred close for files */ 1481 queue_delayed_work(deferredclose_wq, 1482 &cfile->deferred, cifs_sb->ctx->closetimeo); 1483 cfile->deferred_close_scheduled = true; 1484 spin_unlock(&cinode->deferred_lock); 1485 return 0; 1486 } 1487 spin_unlock(&cinode->deferred_lock); 1488 _cifsFileInfo_put(cfile, true, false); 1489 } else { 1490 _cifsFileInfo_put(cfile, true, false); 1491 kfree(dclose); 1492 } 1493 } 1494 1495 /* return code from the ->release op is always ignored */ 1496 return 0; 1497 } 1498 1499 void 1500 cifs_reopen_persistent_handles(struct cifs_tcon *tcon) 1501 { 1502 struct cifsFileInfo *open_file, *tmp; 1503 LIST_HEAD(tmp_list); 1504 1505 if (!tcon->use_persistent || !tcon->need_reopen_files) 1506 return; 1507 1508 tcon->need_reopen_files = false; 1509 1510 cifs_dbg(FYI, "Reopen persistent handles\n"); 1511 1512 /* list all files open on tree connection, reopen resilient handles */ 1513 spin_lock(&tcon->open_file_lock); 1514 list_for_each_entry(open_file, &tcon->openFileList, tlist) { 1515 if (!open_file->invalidHandle) 1516 continue; 1517 cifsFileInfo_get(open_file); 1518 list_add_tail(&open_file->rlist, &tmp_list); 1519 } 1520 spin_unlock(&tcon->open_file_lock); 1521 1522 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { 1523 if (cifs_reopen_file(open_file, false /* do not flush */)) 1524 tcon->need_reopen_files = true; 1525 list_del_init(&open_file->rlist); 1526 cifsFileInfo_put(open_file); 1527 } 1528 } 1529 1530 int cifs_closedir(struct inode *inode, struct file *file) 1531 { 1532 int rc = 0; 1533 unsigned int xid; 1534 struct cifsFileInfo *cfile = file->private_data; 1535 struct cifs_tcon *tcon; 1536 struct TCP_Server_Info *server; 1537 char *buf; 1538 1539 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode); 1540 1541 if (cfile == NULL) 1542 return rc; 1543 1544 xid = get_xid(); 1545 tcon = tlink_tcon(cfile->tlink); 1546 server = tcon->ses->server; 1547 1548 cifs_dbg(FYI, "Freeing private data in close dir\n"); 1549 spin_lock(&cfile->file_info_lock); 1550 if (server->ops->dir_needs_close(cfile)) { 1551 cfile->invalidHandle = true; 1552 spin_unlock(&cfile->file_info_lock); 1553 if (server->ops->close_dir) 1554 rc = server->ops->close_dir(xid, tcon, &cfile->fid); 1555 else 1556 rc = -ENOSYS; 1557 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); 1558 /* not much we can do if it fails anyway, ignore rc */ 1559 rc = 0; 1560 } else 1561 spin_unlock(&cfile->file_info_lock); 1562 1563 buf = cfile->srch_inf.ntwrk_buf_start; 1564 if (buf) { 1565 cifs_dbg(FYI, "closedir free smb buf in srch struct\n"); 1566 cfile->srch_inf.ntwrk_buf_start = NULL; 1567 if (cfile->srch_inf.smallBuf) 1568 cifs_small_buf_release(buf); 1569 else 1570 cifs_buf_release(buf); 1571 } 1572 1573 cifs_put_tlink(cfile->tlink); 1574 kfree(file->private_data); 1575 file->private_data = NULL; 1576 /* BB can we lock the filestruct while this is going on? */ 1577 free_xid(xid); 1578 return rc; 1579 } 1580 1581 static struct cifsLockInfo * 1582 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags) 1583 { 1584 struct cifsLockInfo *lock = 1585 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 1586 if (!lock) 1587 return lock; 1588 lock->offset = offset; 1589 lock->length = length; 1590 lock->type = type; 1591 lock->pid = current->tgid; 1592 lock->flags = flags; 1593 INIT_LIST_HEAD(&lock->blist); 1594 init_waitqueue_head(&lock->block_q); 1595 return lock; 1596 } 1597 1598 void 1599 cifs_del_lock_waiters(struct cifsLockInfo *lock) 1600 { 1601 struct cifsLockInfo *li, *tmp; 1602 list_for_each_entry_safe(li, tmp, &lock->blist, blist) { 1603 list_del_init(&li->blist); 1604 wake_up(&li->block_q); 1605 } 1606 } 1607 1608 #define CIFS_LOCK_OP 0 1609 #define CIFS_READ_OP 1 1610 #define CIFS_WRITE_OP 2 1611 1612 /* @rw_check : 0 - no op, 1 - read, 2 - write */ 1613 static bool 1614 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset, 1615 __u64 length, __u8 type, __u16 flags, 1616 struct cifsFileInfo *cfile, 1617 struct cifsLockInfo **conf_lock, int rw_check) 1618 { 1619 struct cifsLockInfo *li; 1620 struct cifsFileInfo *cur_cfile = fdlocks->cfile; 1621 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 1622 1623 list_for_each_entry(li, &fdlocks->locks, llist) { 1624 if (offset + length <= li->offset || 1625 offset >= li->offset + li->length) 1626 continue; 1627 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && 1628 server->ops->compare_fids(cfile, cur_cfile)) { 1629 /* shared lock prevents write op through the same fid */ 1630 if (!(li->type & server->vals->shared_lock_type) || 1631 rw_check != CIFS_WRITE_OP) 1632 continue; 1633 } 1634 if ((type & server->vals->shared_lock_type) && 1635 ((server->ops->compare_fids(cfile, cur_cfile) && 1636 current->tgid == li->pid) || type == li->type)) 1637 continue; 1638 if (rw_check == CIFS_LOCK_OP && 1639 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) && 1640 server->ops->compare_fids(cfile, cur_cfile)) 1641 continue; 1642 if (conf_lock) 1643 *conf_lock = li; 1644 return true; 1645 } 1646 return false; 1647 } 1648 1649 bool 1650 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 1651 __u8 type, __u16 flags, 1652 struct cifsLockInfo **conf_lock, int rw_check) 1653 { 1654 bool rc = false; 1655 struct cifs_fid_locks *cur; 1656 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1657 1658 list_for_each_entry(cur, &cinode->llist, llist) { 1659 rc = cifs_find_fid_lock_conflict(cur, offset, length, type, 1660 flags, cfile, conf_lock, 1661 rw_check); 1662 if (rc) 1663 break; 1664 } 1665 1666 return rc; 1667 } 1668 1669 /* 1670 * Check if there is another lock that prevents us to set the lock (mandatory 1671 * style). If such a lock exists, update the flock structure with its 1672 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 1673 * or leave it the same if we can't. Returns 0 if we don't need to request to 1674 * the server or 1 otherwise. 1675 */ 1676 static int 1677 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 1678 __u8 type, struct file_lock *flock) 1679 { 1680 int rc = 0; 1681 struct cifsLockInfo *conf_lock; 1682 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1683 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 1684 bool exist; 1685 1686 down_read(&cinode->lock_sem); 1687 1688 exist = cifs_find_lock_conflict(cfile, offset, length, type, 1689 flock->c.flc_flags, &conf_lock, 1690 CIFS_LOCK_OP); 1691 if (exist) { 1692 flock->fl_start = conf_lock->offset; 1693 flock->fl_end = conf_lock->offset + conf_lock->length - 1; 1694 flock->c.flc_pid = conf_lock->pid; 1695 if (conf_lock->type & server->vals->shared_lock_type) 1696 flock->c.flc_type = F_RDLCK; 1697 else 1698 flock->c.flc_type = F_WRLCK; 1699 } else if (!cinode->can_cache_brlcks) 1700 rc = 1; 1701 else 1702 flock->c.flc_type = F_UNLCK; 1703 1704 up_read(&cinode->lock_sem); 1705 return rc; 1706 } 1707 1708 static void 1709 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) 1710 { 1711 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1712 cifs_down_write(&cinode->lock_sem); 1713 list_add_tail(&lock->llist, &cfile->llist->locks); 1714 up_write(&cinode->lock_sem); 1715 } 1716 1717 /* 1718 * Set the byte-range lock (mandatory style). Returns: 1719 * 1) 0, if we set the lock and don't need to request to the server; 1720 * 2) 1, if no locks prevent us but we need to request to the server; 1721 * 3) -EACCES, if there is a lock that prevents us and wait is false. 1722 */ 1723 static int 1724 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, 1725 bool wait) 1726 { 1727 struct cifsLockInfo *conf_lock; 1728 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1729 bool exist; 1730 int rc = 0; 1731 1732 try_again: 1733 exist = false; 1734 cifs_down_write(&cinode->lock_sem); 1735 1736 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, 1737 lock->type, lock->flags, &conf_lock, 1738 CIFS_LOCK_OP); 1739 if (!exist && cinode->can_cache_brlcks) { 1740 list_add_tail(&lock->llist, &cfile->llist->locks); 1741 up_write(&cinode->lock_sem); 1742 return rc; 1743 } 1744 1745 if (!exist) 1746 rc = 1; 1747 else if (!wait) 1748 rc = -EACCES; 1749 else { 1750 list_add_tail(&lock->blist, &conf_lock->blist); 1751 up_write(&cinode->lock_sem); 1752 rc = wait_event_interruptible(lock->block_q, 1753 (lock->blist.prev == &lock->blist) && 1754 (lock->blist.next == &lock->blist)); 1755 if (!rc) 1756 goto try_again; 1757 cifs_down_write(&cinode->lock_sem); 1758 list_del_init(&lock->blist); 1759 } 1760 1761 up_write(&cinode->lock_sem); 1762 return rc; 1763 } 1764 1765 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1766 /* 1767 * Check if there is another lock that prevents us to set the lock (posix 1768 * style). If such a lock exists, update the flock structure with its 1769 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 1770 * or leave it the same if we can't. Returns 0 if we don't need to request to 1771 * the server or 1 otherwise. 1772 */ 1773 static int 1774 cifs_posix_lock_test(struct file *file, struct file_lock *flock) 1775 { 1776 int rc = 0; 1777 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 1778 unsigned char saved_type = flock->c.flc_type; 1779 1780 if ((flock->c.flc_flags & FL_POSIX) == 0) 1781 return 1; 1782 1783 down_read(&cinode->lock_sem); 1784 posix_test_lock(file, flock); 1785 1786 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) { 1787 flock->c.flc_type = saved_type; 1788 rc = 1; 1789 } 1790 1791 up_read(&cinode->lock_sem); 1792 return rc; 1793 } 1794 1795 /* 1796 * Set the byte-range lock (posix style). Returns: 1797 * 1) <0, if the error occurs while setting the lock; 1798 * 2) 0, if we set the lock and don't need to request to the server; 1799 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; 1800 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. 1801 */ 1802 static int 1803 cifs_posix_lock_set(struct file *file, struct file_lock *flock) 1804 { 1805 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 1806 int rc = FILE_LOCK_DEFERRED + 1; 1807 1808 if ((flock->c.flc_flags & FL_POSIX) == 0) 1809 return rc; 1810 1811 cifs_down_write(&cinode->lock_sem); 1812 if (!cinode->can_cache_brlcks) { 1813 up_write(&cinode->lock_sem); 1814 return rc; 1815 } 1816 1817 rc = posix_lock_file(file, flock, NULL); 1818 up_write(&cinode->lock_sem); 1819 return rc; 1820 } 1821 1822 int 1823 cifs_push_mandatory_locks(struct cifsFileInfo *cfile) 1824 { 1825 unsigned int xid; 1826 int rc = 0, stored_rc; 1827 struct cifsLockInfo *li, *tmp; 1828 struct cifs_tcon *tcon; 1829 unsigned int num, max_num, max_buf; 1830 LOCKING_ANDX_RANGE *buf, *cur; 1831 static const int types[] = { 1832 LOCKING_ANDX_LARGE_FILES, 1833 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 1834 }; 1835 int i; 1836 1837 xid = get_xid(); 1838 tcon = tlink_tcon(cfile->tlink); 1839 1840 /* 1841 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1842 * and check it before using. 1843 */ 1844 max_buf = tcon->ses->server->maxBuf; 1845 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { 1846 free_xid(xid); 1847 return -EINVAL; 1848 } 1849 1850 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 1851 PAGE_SIZE); 1852 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 1853 PAGE_SIZE); 1854 max_num = (max_buf - sizeof(struct smb_hdr)) / 1855 sizeof(LOCKING_ANDX_RANGE); 1856 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1857 if (!buf) { 1858 free_xid(xid); 1859 return -ENOMEM; 1860 } 1861 1862 for (i = 0; i < 2; i++) { 1863 cur = buf; 1864 num = 0; 1865 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 1866 if (li->type != types[i]) 1867 continue; 1868 cur->Pid = cpu_to_le16(li->pid); 1869 cur->LengthLow = cpu_to_le32((u32)li->length); 1870 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 1871 cur->OffsetLow = cpu_to_le32((u32)li->offset); 1872 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 1873 if (++num == max_num) { 1874 stored_rc = cifs_lockv(xid, tcon, 1875 cfile->fid.netfid, 1876 (__u8)li->type, 0, num, 1877 buf); 1878 if (stored_rc) 1879 rc = stored_rc; 1880 cur = buf; 1881 num = 0; 1882 } else 1883 cur++; 1884 } 1885 1886 if (num) { 1887 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 1888 (__u8)types[i], 0, num, buf); 1889 if (stored_rc) 1890 rc = stored_rc; 1891 } 1892 } 1893 1894 kfree(buf); 1895 free_xid(xid); 1896 return rc; 1897 } 1898 1899 static __u32 1900 hash_lockowner(fl_owner_t owner) 1901 { 1902 return cifs_lock_secret ^ hash32_ptr((const void *)owner); 1903 } 1904 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1905 1906 struct lock_to_push { 1907 struct list_head llist; 1908 __u64 offset; 1909 __u64 length; 1910 __u32 pid; 1911 __u16 netfid; 1912 __u8 type; 1913 }; 1914 1915 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1916 static int 1917 cifs_push_posix_locks(struct cifsFileInfo *cfile) 1918 { 1919 struct inode *inode = d_inode(cfile->dentry); 1920 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1921 struct file_lock *flock; 1922 struct file_lock_context *flctx = locks_inode_context(inode); 1923 unsigned int count = 0, i; 1924 int rc = 0, xid, type; 1925 struct list_head locks_to_send, *el; 1926 struct lock_to_push *lck, *tmp; 1927 __u64 length; 1928 1929 xid = get_xid(); 1930 1931 if (!flctx) 1932 goto out; 1933 1934 spin_lock(&flctx->flc_lock); 1935 list_for_each(el, &flctx->flc_posix) { 1936 count++; 1937 } 1938 spin_unlock(&flctx->flc_lock); 1939 1940 INIT_LIST_HEAD(&locks_to_send); 1941 1942 /* 1943 * Allocating count locks is enough because no FL_POSIX locks can be 1944 * added to the list while we are holding cinode->lock_sem that 1945 * protects locking operations of this inode. 1946 */ 1947 for (i = 0; i < count; i++) { 1948 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 1949 if (!lck) { 1950 rc = -ENOMEM; 1951 goto err_out; 1952 } 1953 list_add_tail(&lck->llist, &locks_to_send); 1954 } 1955 1956 el = locks_to_send.next; 1957 spin_lock(&flctx->flc_lock); 1958 for_each_file_lock(flock, &flctx->flc_posix) { 1959 unsigned char ftype = flock->c.flc_type; 1960 1961 if (el == &locks_to_send) { 1962 /* 1963 * The list ended. We don't have enough allocated 1964 * structures - something is really wrong. 1965 */ 1966 cifs_dbg(VFS, "Can't push all brlocks!\n"); 1967 break; 1968 } 1969 length = cifs_flock_len(flock); 1970 if (ftype == F_RDLCK || ftype == F_SHLCK) 1971 type = CIFS_RDLCK; 1972 else 1973 type = CIFS_WRLCK; 1974 lck = list_entry(el, struct lock_to_push, llist); 1975 lck->pid = hash_lockowner(flock->c.flc_owner); 1976 lck->netfid = cfile->fid.netfid; 1977 lck->length = length; 1978 lck->type = type; 1979 lck->offset = flock->fl_start; 1980 } 1981 spin_unlock(&flctx->flc_lock); 1982 1983 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1984 int stored_rc; 1985 1986 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, 1987 lck->offset, lck->length, NULL, 1988 lck->type, 0); 1989 if (stored_rc) 1990 rc = stored_rc; 1991 list_del(&lck->llist); 1992 kfree(lck); 1993 } 1994 1995 out: 1996 free_xid(xid); 1997 return rc; 1998 err_out: 1999 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 2000 list_del(&lck->llist); 2001 kfree(lck); 2002 } 2003 goto out; 2004 } 2005 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2006 2007 static int 2008 cifs_push_locks(struct cifsFileInfo *cfile) 2009 { 2010 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 2011 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2012 int rc = 0; 2013 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2014 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 2015 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2016 2017 /* we are going to update can_cache_brlcks here - need a write access */ 2018 cifs_down_write(&cinode->lock_sem); 2019 if (!cinode->can_cache_brlcks) { 2020 up_write(&cinode->lock_sem); 2021 return rc; 2022 } 2023 2024 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2025 if (cap_unix(tcon->ses) && 2026 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2027 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2028 rc = cifs_push_posix_locks(cfile); 2029 else 2030 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2031 rc = tcon->ses->server->ops->push_mand_locks(cfile); 2032 2033 cinode->can_cache_brlcks = false; 2034 up_write(&cinode->lock_sem); 2035 return rc; 2036 } 2037 2038 static void 2039 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, 2040 bool *wait_flag, struct TCP_Server_Info *server) 2041 { 2042 if (flock->c.flc_flags & FL_POSIX) 2043 cifs_dbg(FYI, "Posix\n"); 2044 if (flock->c.flc_flags & FL_FLOCK) 2045 cifs_dbg(FYI, "Flock\n"); 2046 if (flock->c.flc_flags & FL_SLEEP) { 2047 cifs_dbg(FYI, "Blocking lock\n"); 2048 *wait_flag = true; 2049 } 2050 if (flock->c.flc_flags & FL_ACCESS) 2051 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); 2052 if (flock->c.flc_flags & FL_LEASE) 2053 cifs_dbg(FYI, "Lease on file - not implemented yet\n"); 2054 if (flock->c.flc_flags & 2055 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | 2056 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK))) 2057 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", 2058 flock->c.flc_flags); 2059 2060 *type = server->vals->large_lock_type; 2061 if (lock_is_write(flock)) { 2062 cifs_dbg(FYI, "F_WRLCK\n"); 2063 *type |= server->vals->exclusive_lock_type; 2064 *lock = 1; 2065 } else if (lock_is_unlock(flock)) { 2066 cifs_dbg(FYI, "F_UNLCK\n"); 2067 *type |= server->vals->unlock_lock_type; 2068 *unlock = 1; 2069 /* Check if unlock includes more than one lock range */ 2070 } else if (lock_is_read(flock)) { 2071 cifs_dbg(FYI, "F_RDLCK\n"); 2072 *type |= server->vals->shared_lock_type; 2073 *lock = 1; 2074 } else if (flock->c.flc_type == F_EXLCK) { 2075 cifs_dbg(FYI, "F_EXLCK\n"); 2076 *type |= server->vals->exclusive_lock_type; 2077 *lock = 1; 2078 } else if (flock->c.flc_type == F_SHLCK) { 2079 cifs_dbg(FYI, "F_SHLCK\n"); 2080 *type |= server->vals->shared_lock_type; 2081 *lock = 1; 2082 } else 2083 cifs_dbg(FYI, "Unknown type of lock\n"); 2084 } 2085 2086 static int 2087 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, 2088 bool wait_flag, bool posix_lck, unsigned int xid) 2089 { 2090 int rc = 0; 2091 __u64 length = cifs_flock_len(flock); 2092 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 2093 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2094 struct TCP_Server_Info *server = tcon->ses->server; 2095 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2096 __u16 netfid = cfile->fid.netfid; 2097 2098 if (posix_lck) { 2099 int posix_lock_type; 2100 2101 rc = cifs_posix_lock_test(file, flock); 2102 if (!rc) 2103 return rc; 2104 2105 if (type & server->vals->shared_lock_type) 2106 posix_lock_type = CIFS_RDLCK; 2107 else 2108 posix_lock_type = CIFS_WRLCK; 2109 rc = CIFSSMBPosixLock(xid, tcon, netfid, 2110 hash_lockowner(flock->c.flc_owner), 2111 flock->fl_start, length, flock, 2112 posix_lock_type, wait_flag); 2113 return rc; 2114 } 2115 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2116 2117 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); 2118 if (!rc) 2119 return rc; 2120 2121 /* BB we could chain these into one lock request BB */ 2122 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 2123 1, 0, false); 2124 if (rc == 0) { 2125 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2126 type, 0, 1, false); 2127 flock->c.flc_type = F_UNLCK; 2128 if (rc != 0) 2129 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 2130 rc); 2131 return 0; 2132 } 2133 2134 if (type & server->vals->shared_lock_type) { 2135 flock->c.flc_type = F_WRLCK; 2136 return 0; 2137 } 2138 2139 type &= ~server->vals->exclusive_lock_type; 2140 2141 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2142 type | server->vals->shared_lock_type, 2143 1, 0, false); 2144 if (rc == 0) { 2145 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2146 type | server->vals->shared_lock_type, 0, 1, false); 2147 flock->c.flc_type = F_RDLCK; 2148 if (rc != 0) 2149 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 2150 rc); 2151 } else 2152 flock->c.flc_type = F_WRLCK; 2153 2154 return 0; 2155 } 2156 2157 void 2158 cifs_move_llist(struct list_head *source, struct list_head *dest) 2159 { 2160 struct list_head *li, *tmp; 2161 list_for_each_safe(li, tmp, source) 2162 list_move(li, dest); 2163 } 2164 2165 int 2166 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, 2167 struct file *file) 2168 { 2169 struct cifsFileInfo *open_file = NULL; 2170 struct cifsInodeInfo *cinode = CIFS_I(inode); 2171 int rc = 0; 2172 2173 spin_lock(&tcon->open_file_lock); 2174 spin_lock(&cinode->open_file_lock); 2175 2176 list_for_each_entry(open_file, &cinode->openFileList, flist) { 2177 if (file->f_flags == open_file->f_flags) { 2178 rc = -EINVAL; 2179 break; 2180 } 2181 } 2182 2183 spin_unlock(&cinode->open_file_lock); 2184 spin_unlock(&tcon->open_file_lock); 2185 return rc; 2186 } 2187 2188 void 2189 cifs_free_llist(struct list_head *llist) 2190 { 2191 struct cifsLockInfo *li, *tmp; 2192 list_for_each_entry_safe(li, tmp, llist, llist) { 2193 cifs_del_lock_waiters(li); 2194 list_del(&li->llist); 2195 kfree(li); 2196 } 2197 } 2198 2199 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2200 int 2201 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, 2202 unsigned int xid) 2203 { 2204 int rc = 0, stored_rc; 2205 static const int types[] = { 2206 LOCKING_ANDX_LARGE_FILES, 2207 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 2208 }; 2209 unsigned int i; 2210 unsigned int max_num, num, max_buf; 2211 LOCKING_ANDX_RANGE *buf, *cur; 2212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2213 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 2214 struct cifsLockInfo *li, *tmp; 2215 __u64 length = cifs_flock_len(flock); 2216 LIST_HEAD(tmp_llist); 2217 2218 /* 2219 * Accessing maxBuf is racy with cifs_reconnect - need to store value 2220 * and check it before using. 2221 */ 2222 max_buf = tcon->ses->server->maxBuf; 2223 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) 2224 return -EINVAL; 2225 2226 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 2227 PAGE_SIZE); 2228 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 2229 PAGE_SIZE); 2230 max_num = (max_buf - sizeof(struct smb_hdr)) / 2231 sizeof(LOCKING_ANDX_RANGE); 2232 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 2233 if (!buf) 2234 return -ENOMEM; 2235 2236 cifs_down_write(&cinode->lock_sem); 2237 for (i = 0; i < 2; i++) { 2238 cur = buf; 2239 num = 0; 2240 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 2241 if (flock->fl_start > li->offset || 2242 (flock->fl_start + length) < 2243 (li->offset + li->length)) 2244 continue; 2245 if (current->tgid != li->pid) 2246 continue; 2247 if (types[i] != li->type) 2248 continue; 2249 if (cinode->can_cache_brlcks) { 2250 /* 2251 * We can cache brlock requests - simply remove 2252 * a lock from the file's list. 2253 */ 2254 list_del(&li->llist); 2255 cifs_del_lock_waiters(li); 2256 kfree(li); 2257 continue; 2258 } 2259 cur->Pid = cpu_to_le16(li->pid); 2260 cur->LengthLow = cpu_to_le32((u32)li->length); 2261 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 2262 cur->OffsetLow = cpu_to_le32((u32)li->offset); 2263 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 2264 /* 2265 * We need to save a lock here to let us add it again to 2266 * the file's list if the unlock range request fails on 2267 * the server. 2268 */ 2269 list_move(&li->llist, &tmp_llist); 2270 if (++num == max_num) { 2271 stored_rc = cifs_lockv(xid, tcon, 2272 cfile->fid.netfid, 2273 li->type, num, 0, buf); 2274 if (stored_rc) { 2275 /* 2276 * We failed on the unlock range 2277 * request - add all locks from the tmp 2278 * list to the head of the file's list. 2279 */ 2280 cifs_move_llist(&tmp_llist, 2281 &cfile->llist->locks); 2282 rc = stored_rc; 2283 } else 2284 /* 2285 * The unlock range request succeed - 2286 * free the tmp list. 2287 */ 2288 cifs_free_llist(&tmp_llist); 2289 cur = buf; 2290 num = 0; 2291 } else 2292 cur++; 2293 } 2294 if (num) { 2295 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 2296 types[i], num, 0, buf); 2297 if (stored_rc) { 2298 cifs_move_llist(&tmp_llist, 2299 &cfile->llist->locks); 2300 rc = stored_rc; 2301 } else 2302 cifs_free_llist(&tmp_llist); 2303 } 2304 } 2305 2306 up_write(&cinode->lock_sem); 2307 kfree(buf); 2308 return rc; 2309 } 2310 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2311 2312 static int 2313 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, 2314 bool wait_flag, bool posix_lck, int lock, int unlock, 2315 unsigned int xid) 2316 { 2317 int rc = 0; 2318 __u64 length = cifs_flock_len(flock); 2319 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 2320 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2321 struct TCP_Server_Info *server = tcon->ses->server; 2322 struct inode *inode = d_inode(cfile->dentry); 2323 2324 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2325 if (posix_lck) { 2326 int posix_lock_type; 2327 2328 rc = cifs_posix_lock_set(file, flock); 2329 if (rc <= FILE_LOCK_DEFERRED) 2330 return rc; 2331 2332 if (type & server->vals->shared_lock_type) 2333 posix_lock_type = CIFS_RDLCK; 2334 else 2335 posix_lock_type = CIFS_WRLCK; 2336 2337 if (unlock == 1) 2338 posix_lock_type = CIFS_UNLCK; 2339 2340 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, 2341 hash_lockowner(flock->c.flc_owner), 2342 flock->fl_start, length, 2343 NULL, posix_lock_type, wait_flag); 2344 goto out; 2345 } 2346 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2347 if (lock) { 2348 struct cifsLockInfo *lock; 2349 2350 lock = cifs_lock_init(flock->fl_start, length, type, 2351 flock->c.flc_flags); 2352 if (!lock) 2353 return -ENOMEM; 2354 2355 rc = cifs_lock_add_if(cfile, lock, wait_flag); 2356 if (rc < 0) { 2357 kfree(lock); 2358 return rc; 2359 } 2360 if (!rc) 2361 goto out; 2362 2363 /* 2364 * Windows 7 server can delay breaking lease from read to None 2365 * if we set a byte-range lock on a file - break it explicitly 2366 * before sending the lock to the server to be sure the next 2367 * read won't conflict with non-overlapted locks due to 2368 * pagereading. 2369 */ 2370 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) && 2371 CIFS_CACHE_READ(CIFS_I(inode))) { 2372 cifs_zap_mapping(inode); 2373 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n", 2374 inode); 2375 cifs_reset_oplock(CIFS_I(inode)); 2376 } 2377 2378 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2379 type, 1, 0, wait_flag); 2380 if (rc) { 2381 kfree(lock); 2382 return rc; 2383 } 2384 2385 cifs_lock_add(cfile, lock); 2386 } else if (unlock) 2387 rc = server->ops->mand_unlock_range(cfile, flock, xid); 2388 2389 out: 2390 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) { 2391 /* 2392 * If this is a request to remove all locks because we 2393 * are closing the file, it doesn't matter if the 2394 * unlocking failed as both cifs.ko and the SMB server 2395 * remove the lock on file close 2396 */ 2397 if (rc) { 2398 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); 2399 if (!(flock->c.flc_flags & FL_CLOSE)) 2400 return rc; 2401 } 2402 rc = locks_lock_file_wait(file, flock); 2403 } 2404 return rc; 2405 } 2406 2407 int cifs_flock(struct file *file, int cmd, struct file_lock *fl) 2408 { 2409 int rc, xid; 2410 int lock = 0, unlock = 0; 2411 bool wait_flag = false; 2412 bool posix_lck = false; 2413 struct cifs_sb_info *cifs_sb; 2414 struct cifs_tcon *tcon; 2415 struct cifsFileInfo *cfile; 2416 __u32 type; 2417 2418 xid = get_xid(); 2419 2420 if (!(fl->c.flc_flags & FL_FLOCK)) { 2421 rc = -ENOLCK; 2422 free_xid(xid); 2423 return rc; 2424 } 2425 2426 cfile = (struct cifsFileInfo *)file->private_data; 2427 tcon = tlink_tcon(cfile->tlink); 2428 2429 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag, 2430 tcon->ses->server); 2431 cifs_sb = CIFS_FILE_SB(file); 2432 2433 if (cap_unix(tcon->ses) && 2434 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2435 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2436 posix_lck = true; 2437 2438 if (!lock && !unlock) { 2439 /* 2440 * if no lock or unlock then nothing to do since we do not 2441 * know what it is 2442 */ 2443 rc = -EOPNOTSUPP; 2444 free_xid(xid); 2445 return rc; 2446 } 2447 2448 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, 2449 xid); 2450 free_xid(xid); 2451 return rc; 2452 2453 2454 } 2455 2456 int cifs_lock(struct file *file, int cmd, struct file_lock *flock) 2457 { 2458 int rc, xid; 2459 int lock = 0, unlock = 0; 2460 bool wait_flag = false; 2461 bool posix_lck = false; 2462 struct cifs_sb_info *cifs_sb; 2463 struct cifs_tcon *tcon; 2464 struct cifsFileInfo *cfile; 2465 __u32 type; 2466 2467 rc = -EACCES; 2468 xid = get_xid(); 2469 2470 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, 2471 flock->c.flc_flags, flock->c.flc_type, 2472 (long long)flock->fl_start, 2473 (long long)flock->fl_end); 2474 2475 cfile = (struct cifsFileInfo *)file->private_data; 2476 tcon = tlink_tcon(cfile->tlink); 2477 2478 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, 2479 tcon->ses->server); 2480 cifs_sb = CIFS_FILE_SB(file); 2481 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); 2482 2483 if (cap_unix(tcon->ses) && 2484 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2485 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2486 posix_lck = true; 2487 /* 2488 * BB add code here to normalize offset and length to account for 2489 * negative length which we can not accept over the wire. 2490 */ 2491 if (IS_GETLK(cmd)) { 2492 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid); 2493 free_xid(xid); 2494 return rc; 2495 } 2496 2497 if (!lock && !unlock) { 2498 /* 2499 * if no lock or unlock then nothing to do since we do not 2500 * know what it is 2501 */ 2502 free_xid(xid); 2503 return -EOPNOTSUPP; 2504 } 2505 2506 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, 2507 xid); 2508 free_xid(xid); 2509 return rc; 2510 } 2511 2512 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result) 2513 { 2514 struct netfs_io_request *wreq = wdata->rreq; 2515 struct netfs_inode *ictx = netfs_inode(wreq->inode); 2516 loff_t wrend; 2517 2518 if (result > 0) { 2519 wrend = wdata->subreq.start + wdata->subreq.transferred + result; 2520 2521 if (wrend > ictx->zero_point && 2522 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE || 2523 wdata->rreq->origin == NETFS_DIO_WRITE)) 2524 ictx->zero_point = wrend; 2525 if (wrend > ictx->remote_i_size) 2526 netfs_resize_file(ictx, wrend, true); 2527 } 2528 2529 netfs_write_subrequest_terminated(&wdata->subreq, result); 2530 } 2531 2532 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2533 bool fsuid_only) 2534 { 2535 struct cifsFileInfo *open_file = NULL; 2536 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2537 2538 /* only filter by fsuid on multiuser mounts */ 2539 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2540 fsuid_only = false; 2541 2542 spin_lock(&cifs_inode->open_file_lock); 2543 /* we could simply get the first_list_entry since write-only entries 2544 are always at the end of the list but since the first entry might 2545 have a close pending, we go through the whole list */ 2546 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2547 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2548 continue; 2549 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { 2550 if ((!open_file->invalidHandle)) { 2551 /* found a good file */ 2552 /* lock it so it will not be closed on us */ 2553 cifsFileInfo_get(open_file); 2554 spin_unlock(&cifs_inode->open_file_lock); 2555 return open_file; 2556 } /* else might as well continue, and look for 2557 another, or simply have the caller reopen it 2558 again rather than trying to fix this handle */ 2559 } else /* write only file */ 2560 break; /* write only files are last so must be done */ 2561 } 2562 spin_unlock(&cifs_inode->open_file_lock); 2563 return NULL; 2564 } 2565 2566 /* Return -EBADF if no handle is found and general rc otherwise */ 2567 int 2568 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 2569 struct cifsFileInfo **ret_file) 2570 { 2571 struct cifsFileInfo *open_file, *inv_file = NULL; 2572 struct cifs_sb_info *cifs_sb; 2573 bool any_available = false; 2574 int rc = -EBADF; 2575 unsigned int refind = 0; 2576 bool fsuid_only = flags & FIND_WR_FSUID_ONLY; 2577 bool with_delete = flags & FIND_WR_WITH_DELETE; 2578 *ret_file = NULL; 2579 2580 /* 2581 * Having a null inode here (because mapping->host was set to zero by 2582 * the VFS or MM) should not happen but we had reports of on oops (due 2583 * to it being zero) during stress testcases so we need to check for it 2584 */ 2585 2586 if (cifs_inode == NULL) { 2587 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n"); 2588 dump_stack(); 2589 return rc; 2590 } 2591 2592 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2593 2594 /* only filter by fsuid on multiuser mounts */ 2595 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2596 fsuid_only = false; 2597 2598 spin_lock(&cifs_inode->open_file_lock); 2599 refind_writable: 2600 if (refind > MAX_REOPEN_ATT) { 2601 spin_unlock(&cifs_inode->open_file_lock); 2602 return rc; 2603 } 2604 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2605 if (!any_available && open_file->pid != current->tgid) 2606 continue; 2607 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2608 continue; 2609 if (with_delete && !(open_file->fid.access & DELETE)) 2610 continue; 2611 if ((flags & FIND_WR_NO_PENDING_DELETE) && 2612 open_file->status_file_deleted) 2613 continue; 2614 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 2615 if (!open_file->invalidHandle) { 2616 /* found a good writable file */ 2617 cifsFileInfo_get(open_file); 2618 spin_unlock(&cifs_inode->open_file_lock); 2619 *ret_file = open_file; 2620 return 0; 2621 } else { 2622 if (!inv_file) 2623 inv_file = open_file; 2624 } 2625 } 2626 } 2627 /* couldn't find usable FH with same pid, try any available */ 2628 if (!any_available) { 2629 any_available = true; 2630 goto refind_writable; 2631 } 2632 2633 if (inv_file) { 2634 any_available = false; 2635 cifsFileInfo_get(inv_file); 2636 } 2637 2638 spin_unlock(&cifs_inode->open_file_lock); 2639 2640 if (inv_file) { 2641 rc = cifs_reopen_file(inv_file, false); 2642 if (!rc) { 2643 *ret_file = inv_file; 2644 return 0; 2645 } 2646 2647 spin_lock(&cifs_inode->open_file_lock); 2648 list_move_tail(&inv_file->flist, &cifs_inode->openFileList); 2649 spin_unlock(&cifs_inode->open_file_lock); 2650 cifsFileInfo_put(inv_file); 2651 ++refind; 2652 inv_file = NULL; 2653 spin_lock(&cifs_inode->open_file_lock); 2654 goto refind_writable; 2655 } 2656 2657 return rc; 2658 } 2659 2660 struct cifsFileInfo * 2661 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags) 2662 { 2663 struct cifsFileInfo *cfile; 2664 int rc; 2665 2666 rc = cifs_get_writable_file(cifs_inode, flags, &cfile); 2667 if (rc) 2668 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc); 2669 2670 return cfile; 2671 } 2672 2673 int 2674 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, 2675 int flags, 2676 struct cifsFileInfo **ret_file) 2677 { 2678 struct cifsFileInfo *cfile; 2679 void *page = alloc_dentry_path(); 2680 2681 *ret_file = NULL; 2682 2683 spin_lock(&tcon->open_file_lock); 2684 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 2685 struct cifsInodeInfo *cinode; 2686 const char *full_path = build_path_from_dentry(cfile->dentry, page); 2687 if (IS_ERR(full_path)) { 2688 spin_unlock(&tcon->open_file_lock); 2689 free_dentry_path(page); 2690 return PTR_ERR(full_path); 2691 } 2692 if (strcmp(full_path, name)) 2693 continue; 2694 2695 cinode = CIFS_I(d_inode(cfile->dentry)); 2696 spin_unlock(&tcon->open_file_lock); 2697 free_dentry_path(page); 2698 return cifs_get_writable_file(cinode, flags, ret_file); 2699 } 2700 2701 spin_unlock(&tcon->open_file_lock); 2702 free_dentry_path(page); 2703 return -ENOENT; 2704 } 2705 2706 int 2707 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, 2708 struct cifsFileInfo **ret_file) 2709 { 2710 struct cifsFileInfo *cfile; 2711 void *page = alloc_dentry_path(); 2712 2713 *ret_file = NULL; 2714 2715 spin_lock(&tcon->open_file_lock); 2716 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 2717 struct cifsInodeInfo *cinode; 2718 const char *full_path = build_path_from_dentry(cfile->dentry, page); 2719 if (IS_ERR(full_path)) { 2720 spin_unlock(&tcon->open_file_lock); 2721 free_dentry_path(page); 2722 return PTR_ERR(full_path); 2723 } 2724 if (strcmp(full_path, name)) 2725 continue; 2726 2727 cinode = CIFS_I(d_inode(cfile->dentry)); 2728 spin_unlock(&tcon->open_file_lock); 2729 free_dentry_path(page); 2730 *ret_file = find_readable_file(cinode, 0); 2731 if (*ret_file) { 2732 spin_lock(&cinode->open_file_lock); 2733 if ((*ret_file)->status_file_deleted) { 2734 spin_unlock(&cinode->open_file_lock); 2735 cifsFileInfo_put(*ret_file); 2736 *ret_file = NULL; 2737 } else { 2738 spin_unlock(&cinode->open_file_lock); 2739 } 2740 } 2741 return *ret_file ? 0 : -ENOENT; 2742 } 2743 2744 spin_unlock(&tcon->open_file_lock); 2745 free_dentry_path(page); 2746 return -ENOENT; 2747 } 2748 2749 /* 2750 * Flush data on a strict file. 2751 */ 2752 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 2753 int datasync) 2754 { 2755 struct cifsFileInfo *smbfile = file->private_data; 2756 struct inode *inode = file_inode(file); 2757 unsigned int xid; 2758 int rc; 2759 2760 rc = file_write_and_wait_range(file, start, end); 2761 if (rc) { 2762 trace_cifs_fsync_err(inode->i_ino, rc); 2763 return rc; 2764 } 2765 2766 cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync); 2767 2768 if (!CIFS_CACHE_READ(CIFS_I(inode))) { 2769 rc = cifs_zap_mapping(inode); 2770 cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc); 2771 } 2772 2773 xid = get_xid(); 2774 rc = cifs_file_flush(xid, inode, smbfile); 2775 free_xid(xid); 2776 return rc; 2777 } 2778 2779 /* 2780 * Flush data on a non-strict data. 2781 */ 2782 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 2783 { 2784 unsigned int xid; 2785 int rc = 0; 2786 struct cifs_tcon *tcon; 2787 struct TCP_Server_Info *server; 2788 struct cifsFileInfo *smbfile = file->private_data; 2789 struct inode *inode = file_inode(file); 2790 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 2791 2792 rc = file_write_and_wait_range(file, start, end); 2793 if (rc) { 2794 trace_cifs_fsync_err(file_inode(file)->i_ino, rc); 2795 return rc; 2796 } 2797 2798 xid = get_xid(); 2799 2800 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 2801 file, datasync); 2802 2803 tcon = tlink_tcon(smbfile->tlink); 2804 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 2805 server = tcon->ses->server; 2806 if (server->ops->flush == NULL) { 2807 rc = -ENOSYS; 2808 goto fsync_exit; 2809 } 2810 2811 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2812 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2813 if (smbfile) { 2814 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2815 cifsFileInfo_put(smbfile); 2816 } else 2817 cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 2818 } else 2819 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2820 } 2821 2822 fsync_exit: 2823 free_xid(xid); 2824 return rc; 2825 } 2826 2827 /* 2828 * As file closes, flush all cached write data for this inode checking 2829 * for write behind errors. 2830 */ 2831 int cifs_flush(struct file *file, fl_owner_t id) 2832 { 2833 struct inode *inode = file_inode(file); 2834 int rc = 0; 2835 2836 if (file->f_mode & FMODE_WRITE) 2837 rc = filemap_write_and_wait(inode->i_mapping); 2838 2839 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc); 2840 if (rc) { 2841 /* get more nuanced writeback errors */ 2842 rc = filemap_check_wb_err(file->f_mapping, 0); 2843 trace_cifs_flush_err(inode->i_ino, rc); 2844 } 2845 return rc; 2846 } 2847 2848 static ssize_t 2849 cifs_writev(struct kiocb *iocb, struct iov_iter *from) 2850 { 2851 struct file *file = iocb->ki_filp; 2852 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 2853 struct inode *inode = file->f_mapping->host; 2854 struct cifsInodeInfo *cinode = CIFS_I(inode); 2855 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2856 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2857 ssize_t rc; 2858 2859 rc = netfs_start_io_write(inode); 2860 if (rc < 0) 2861 return rc; 2862 2863 /* 2864 * We need to hold the sem to be sure nobody modifies lock list 2865 * with a brlock that prevents writing. 2866 */ 2867 down_read(&cinode->lock_sem); 2868 2869 rc = generic_write_checks(iocb, from); 2870 if (rc <= 0) 2871 goto out; 2872 2873 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) && 2874 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), 2875 server->vals->exclusive_lock_type, 0, 2876 NULL, CIFS_WRITE_OP))) { 2877 rc = -EACCES; 2878 goto out; 2879 } 2880 2881 rc = netfs_buffered_write_iter_locked(iocb, from, NULL); 2882 2883 out: 2884 up_read(&cinode->lock_sem); 2885 netfs_end_io_write(inode); 2886 if (rc > 0) 2887 rc = generic_write_sync(iocb, rc); 2888 return rc; 2889 } 2890 2891 ssize_t 2892 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) 2893 { 2894 struct inode *inode = file_inode(iocb->ki_filp); 2895 struct cifsInodeInfo *cinode = CIFS_I(inode); 2896 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2897 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2898 iocb->ki_filp->private_data; 2899 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2900 ssize_t written; 2901 2902 written = cifs_get_writer(cinode); 2903 if (written) 2904 return written; 2905 2906 if (CIFS_CACHE_WRITE(cinode)) { 2907 if (cap_unix(tcon->ses) && 2908 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2909 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 2910 written = netfs_file_write_iter(iocb, from); 2911 goto out; 2912 } 2913 written = cifs_writev(iocb, from); 2914 goto out; 2915 } 2916 /* 2917 * For non-oplocked files in strict cache mode we need to write the data 2918 * to the server exactly from the pos to pos+len-1 rather than flush all 2919 * affected pages because it may cause a error with mandatory locks on 2920 * these pages but not on the region from pos to ppos+len-1. 2921 */ 2922 written = netfs_file_write_iter(iocb, from); 2923 if (CIFS_CACHE_READ(cinode)) { 2924 /* 2925 * We have read level caching and we have just sent a write 2926 * request to the server thus making data in the cache stale. 2927 * Zap the cache and set oplock/lease level to NONE to avoid 2928 * reading stale data from the cache. All subsequent read 2929 * operations will read new data from the server. 2930 */ 2931 cifs_zap_mapping(inode); 2932 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n", 2933 inode); 2934 cifs_reset_oplock(cinode); 2935 } 2936 out: 2937 cifs_put_writer(cinode); 2938 return written; 2939 } 2940 2941 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2942 { 2943 ssize_t rc; 2944 struct inode *inode = file_inode(iocb->ki_filp); 2945 2946 if (iocb->ki_flags & IOCB_DIRECT) 2947 return netfs_unbuffered_read_iter(iocb, iter); 2948 2949 rc = cifs_revalidate_mapping(inode); 2950 if (rc) 2951 return rc; 2952 2953 return netfs_file_read_iter(iocb, iter); 2954 } 2955 2956 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2957 { 2958 struct inode *inode = file_inode(iocb->ki_filp); 2959 struct cifsInodeInfo *cinode = CIFS_I(inode); 2960 ssize_t written; 2961 int rc; 2962 2963 if (iocb->ki_filp->f_flags & O_DIRECT) { 2964 written = netfs_unbuffered_write_iter(iocb, from); 2965 if (written > 0 && CIFS_CACHE_READ(cinode)) { 2966 cifs_zap_mapping(inode); 2967 cifs_dbg(FYI, 2968 "Set no oplock for inode=%p after a write operation\n", 2969 inode); 2970 cifs_reset_oplock(cinode); 2971 } 2972 return written; 2973 } 2974 2975 written = cifs_get_writer(cinode); 2976 if (written) 2977 return written; 2978 2979 written = netfs_file_write_iter(iocb, from); 2980 2981 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) { 2982 rc = filemap_fdatawrite(inode->i_mapping); 2983 if (rc) 2984 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 2985 rc, inode); 2986 } 2987 2988 cifs_put_writer(cinode); 2989 return written; 2990 } 2991 2992 ssize_t 2993 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to) 2994 { 2995 struct inode *inode = file_inode(iocb->ki_filp); 2996 struct cifsInodeInfo *cinode = CIFS_I(inode); 2997 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2998 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2999 iocb->ki_filp->private_data; 3000 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 3001 int rc = -EACCES; 3002 3003 /* 3004 * In strict cache mode we need to read from the server all the time 3005 * if we don't have level II oplock because the server can delay mtime 3006 * change - so we can't make a decision about inode invalidating. 3007 * And we can also fail with pagereading if there are mandatory locks 3008 * on pages affected by this read but not on the region from pos to 3009 * pos+len-1. 3010 */ 3011 if (!CIFS_CACHE_READ(cinode)) 3012 return netfs_unbuffered_read_iter(iocb, to); 3013 3014 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) { 3015 if (iocb->ki_flags & IOCB_DIRECT) 3016 return netfs_unbuffered_read_iter(iocb, to); 3017 return netfs_buffered_read_iter(iocb, to); 3018 } 3019 3020 /* 3021 * We need to hold the sem to be sure nobody modifies lock list 3022 * with a brlock that prevents reading. 3023 */ 3024 if (iocb->ki_flags & IOCB_DIRECT) { 3025 rc = netfs_start_io_direct(inode); 3026 if (rc < 0) 3027 goto out; 3028 rc = -EACCES; 3029 down_read(&cinode->lock_sem); 3030 if (!cifs_find_lock_conflict( 3031 cfile, iocb->ki_pos, iov_iter_count(to), 3032 tcon->ses->server->vals->shared_lock_type, 3033 0, NULL, CIFS_READ_OP)) 3034 rc = netfs_unbuffered_read_iter_locked(iocb, to); 3035 up_read(&cinode->lock_sem); 3036 netfs_end_io_direct(inode); 3037 } else { 3038 rc = netfs_start_io_read(inode); 3039 if (rc < 0) 3040 goto out; 3041 rc = -EACCES; 3042 down_read(&cinode->lock_sem); 3043 if (!cifs_find_lock_conflict( 3044 cfile, iocb->ki_pos, iov_iter_count(to), 3045 tcon->ses->server->vals->shared_lock_type, 3046 0, NULL, CIFS_READ_OP)) 3047 rc = filemap_read(iocb, to, 0); 3048 up_read(&cinode->lock_sem); 3049 netfs_end_io_read(inode); 3050 } 3051 out: 3052 return rc; 3053 } 3054 3055 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) 3056 { 3057 return netfs_page_mkwrite(vmf, NULL); 3058 } 3059 3060 static const struct vm_operations_struct cifs_file_vm_ops = { 3061 .fault = filemap_fault, 3062 .map_pages = filemap_map_pages, 3063 .page_mkwrite = cifs_page_mkwrite, 3064 }; 3065 3066 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc) 3067 { 3068 int xid, rc = 0; 3069 struct inode *inode = file_inode(desc->file); 3070 3071 xid = get_xid(); 3072 3073 if (!CIFS_CACHE_READ(CIFS_I(inode))) 3074 rc = cifs_zap_mapping(inode); 3075 if (!rc) 3076 rc = generic_file_mmap_prepare(desc); 3077 if (!rc) 3078 desc->vm_ops = &cifs_file_vm_ops; 3079 3080 free_xid(xid); 3081 return rc; 3082 } 3083 3084 int cifs_file_mmap_prepare(struct vm_area_desc *desc) 3085 { 3086 int rc, xid; 3087 3088 xid = get_xid(); 3089 3090 rc = cifs_revalidate_file(desc->file); 3091 if (rc) 3092 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", 3093 rc); 3094 if (!rc) 3095 rc = generic_file_mmap_prepare(desc); 3096 if (!rc) 3097 desc->vm_ops = &cifs_file_vm_ops; 3098 3099 free_xid(xid); 3100 return rc; 3101 } 3102 3103 static int is_inode_writable(struct cifsInodeInfo *cifs_inode) 3104 { 3105 struct cifsFileInfo *open_file; 3106 3107 spin_lock(&cifs_inode->open_file_lock); 3108 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 3109 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 3110 spin_unlock(&cifs_inode->open_file_lock); 3111 return 1; 3112 } 3113 } 3114 spin_unlock(&cifs_inode->open_file_lock); 3115 return 0; 3116 } 3117 3118 /* We do not want to update the file size from server for inodes 3119 open for write - to avoid races with writepage extending 3120 the file - in the future we could consider allowing 3121 refreshing the inode only on increases in the file size 3122 but this is tricky to do without racing with writebehind 3123 page caching in the current Linux kernel design */ 3124 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file, 3125 bool from_readdir) 3126 { 3127 if (!cifsInode) 3128 return true; 3129 3130 if (is_inode_writable(cifsInode) || 3131 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { 3132 /* This inode is open for write at least once */ 3133 struct cifs_sb_info *cifs_sb; 3134 3135 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); 3136 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 3137 /* since no page cache to corrupt on directio 3138 we can change size safely */ 3139 return true; 3140 } 3141 3142 if (i_size_read(&cifsInode->netfs.inode) < end_of_file) 3143 return true; 3144 3145 return false; 3146 } else 3147 return true; 3148 } 3149 3150 void cifs_oplock_break(struct work_struct *work) 3151 { 3152 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, 3153 oplock_break); 3154 struct inode *inode = d_inode(cfile->dentry); 3155 struct super_block *sb = inode->i_sb; 3156 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 3157 struct cifsInodeInfo *cinode = CIFS_I(inode); 3158 bool cache_read, cache_write, cache_handle; 3159 struct cifs_tcon *tcon; 3160 struct TCP_Server_Info *server; 3161 struct tcon_link *tlink; 3162 unsigned int oplock; 3163 int rc = 0; 3164 bool purge_cache = false, oplock_break_cancelled; 3165 __u64 persistent_fid, volatile_fid; 3166 __u16 net_fid; 3167 3168 /* 3169 * Hold a reference to the superblock to prevent it and its inodes from 3170 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put() 3171 * may release the last reference to the sb and trigger inode eviction. 3172 */ 3173 cifs_sb_active(sb); 3174 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, 3175 TASK_UNINTERRUPTIBLE); 3176 3177 tlink = cifs_sb_tlink(cifs_sb); 3178 if (IS_ERR(tlink)) 3179 goto out; 3180 tcon = tlink_tcon(tlink); 3181 server = tcon->ses->server; 3182 3183 scoped_guard(spinlock, &cinode->open_file_lock) { 3184 unsigned int sbflags = cifs_sb->mnt_cifs_flags; 3185 3186 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, 3187 cfile->oplock_epoch, &purge_cache); 3188 oplock = READ_ONCE(cinode->oplock); 3189 cache_read = (oplock & CIFS_CACHE_READ_FLG) || 3190 (sbflags & CIFS_MOUNT_RO_CACHE); 3191 cache_write = (oplock & CIFS_CACHE_WRITE_FLG) || 3192 (sbflags & CIFS_MOUNT_RW_CACHE); 3193 cache_handle = oplock & CIFS_CACHE_HANDLE_FLG; 3194 } 3195 3196 if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) { 3197 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", 3198 inode); 3199 cifs_reset_oplock(cinode); 3200 oplock = 0; 3201 cache_read = cache_write = cache_handle = false; 3202 } 3203 3204 if (S_ISREG(inode->i_mode)) { 3205 if (cache_read) 3206 break_lease(inode, O_RDONLY); 3207 else 3208 break_lease(inode, O_WRONLY); 3209 rc = filemap_fdatawrite(inode->i_mapping); 3210 if (!cache_read || purge_cache) { 3211 rc = filemap_fdatawait(inode->i_mapping); 3212 mapping_set_error(inode->i_mapping, rc); 3213 cifs_zap_mapping(inode); 3214 } 3215 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); 3216 if (cache_write) 3217 goto oplock_break_ack; 3218 } 3219 3220 rc = cifs_push_locks(cfile); 3221 if (rc) 3222 cifs_dbg(VFS, "Push locks rc = %d\n", rc); 3223 3224 oplock_break_ack: 3225 /* 3226 * When oplock break is received and there are no active 3227 * file handles but cached, then schedule deferred close immediately. 3228 * So, new open will not use cached handle. 3229 */ 3230 3231 if (!cache_handle && !list_empty(&cinode->deferred_closes)) 3232 cifs_close_deferred_file(cinode); 3233 3234 persistent_fid = cfile->fid.persistent_fid; 3235 volatile_fid = cfile->fid.volatile_fid; 3236 net_fid = cfile->fid.netfid; 3237 oplock_break_cancelled = cfile->oplock_break_cancelled; 3238 3239 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); 3240 /* 3241 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require 3242 * an acknowledgment to be sent when the file has already been closed. 3243 */ 3244 spin_lock(&cinode->open_file_lock); 3245 /* check list empty since can race with kill_sb calling tree disconnect */ 3246 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { 3247 spin_unlock(&cinode->open_file_lock); 3248 rc = server->ops->oplock_response(tcon, persistent_fid, 3249 volatile_fid, net_fid, 3250 cinode, oplock); 3251 cifs_dbg(FYI, "Oplock release rc = %d\n", rc); 3252 } else 3253 spin_unlock(&cinode->open_file_lock); 3254 3255 cifs_put_tlink(tlink); 3256 out: 3257 cifs_done_oplock_break(cinode); 3258 cifs_sb_deactive(sb); 3259 } 3260 3261 static int cifs_swap_activate(struct swap_info_struct *sis, 3262 struct file *swap_file, sector_t *span) 3263 { 3264 struct cifsFileInfo *cfile = swap_file->private_data; 3265 struct inode *inode = swap_file->f_mapping->host; 3266 unsigned long blocks; 3267 long long isize; 3268 3269 cifs_dbg(FYI, "swap activate\n"); 3270 3271 if (!swap_file->f_mapping->a_ops->swap_rw) 3272 /* Cannot support swap */ 3273 return -EINVAL; 3274 3275 spin_lock(&inode->i_lock); 3276 blocks = inode->i_blocks; 3277 isize = inode->i_size; 3278 spin_unlock(&inode->i_lock); 3279 if (blocks*512 < isize) { 3280 pr_warn("swap activate: swapfile has holes\n"); 3281 return -EINVAL; 3282 } 3283 *span = sis->pages; 3284 3285 pr_warn_once("Swap support over SMB3 is experimental\n"); 3286 3287 /* 3288 * TODO: consider adding ACL (or documenting how) to prevent other 3289 * users (on this or other systems) from reading it 3290 */ 3291 3292 3293 /* TODO: add sk_set_memalloc(inet) or similar */ 3294 3295 if (cfile) 3296 cfile->swapfile = true; 3297 /* 3298 * TODO: Since file already open, we can't open with DENY_ALL here 3299 * but we could add call to grab a byte range lock to prevent others 3300 * from reading or writing the file 3301 */ 3302 3303 sis->flags |= SWP_FS_OPS; 3304 return add_swap_extent(sis, 0, sis->max, 0); 3305 } 3306 3307 static void cifs_swap_deactivate(struct file *file) 3308 { 3309 struct cifsFileInfo *cfile = file->private_data; 3310 3311 cifs_dbg(FYI, "swap deactivate\n"); 3312 3313 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */ 3314 3315 if (cfile) 3316 cfile->swapfile = false; 3317 3318 /* do we need to unpin (or unlock) the file */ 3319 } 3320 3321 /** 3322 * cifs_swap_rw - SMB3 address space operation for swap I/O 3323 * @iocb: target I/O control block 3324 * @iter: I/O buffer 3325 * 3326 * Perform IO to the swap-file. This is much like direct IO. 3327 */ 3328 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter) 3329 { 3330 ssize_t ret; 3331 3332 if (iov_iter_rw(iter) == READ) 3333 ret = netfs_unbuffered_read_iter_locked(iocb, iter); 3334 else 3335 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL); 3336 if (ret < 0) 3337 return ret; 3338 return 0; 3339 } 3340 3341 const struct address_space_operations cifs_addr_ops = { 3342 .read_folio = netfs_read_folio, 3343 .readahead = netfs_readahead, 3344 .writepages = netfs_writepages, 3345 .dirty_folio = netfs_dirty_folio, 3346 .release_folio = netfs_release_folio, 3347 .direct_IO = noop_direct_IO, 3348 .invalidate_folio = netfs_invalidate_folio, 3349 .migrate_folio = filemap_migrate_folio, 3350 /* 3351 * TODO: investigate and if useful we could add an is_dirty_writeback 3352 * helper if needed 3353 */ 3354 .swap_activate = cifs_swap_activate, 3355 .swap_deactivate = cifs_swap_deactivate, 3356 .swap_rw = cifs_swap_rw, 3357 }; 3358 3359 /* 3360 * cifs_readahead requires the server to support a buffer large enough to 3361 * contain the header plus one complete page of data. Otherwise, we need 3362 * to leave cifs_readahead out of the address space operations. 3363 */ 3364 const struct address_space_operations cifs_addr_ops_smallbuf = { 3365 .read_folio = netfs_read_folio, 3366 .writepages = netfs_writepages, 3367 .dirty_folio = netfs_dirty_folio, 3368 .release_folio = netfs_release_folio, 3369 .invalidate_folio = netfs_invalidate_folio, 3370 .migrate_folio = filemap_migrate_folio, 3371 }; 3372