1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SMB2 version specific operations 4 * 5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com> 6 */ 7 8 #include <linux/pagemap.h> 9 #include <linux/vfs.h> 10 #include <linux/falloc.h> 11 #include <linux/scatterlist.h> 12 #include <linux/uuid.h> 13 #include <linux/sort.h> 14 #include <crypto/aead.h> 15 #include <linux/fiemap.h> 16 #include <linux/folio_queue.h> 17 #include <uapi/linux/magic.h> 18 #include "cifsfs.h" 19 #include "cifsglob.h" 20 #include "cifsproto.h" 21 #include "smb2proto.h" 22 #include "smb2pdu.h" 23 #include "cifs_debug.h" 24 #include "cifs_unicode.h" 25 #include "../common/smb2status.h" 26 #include "smb2glob.h" 27 #include "cifs_ioctl.h" 28 #include "smbdirect.h" 29 #include "fscache.h" 30 #include "fs_context.h" 31 #include "cached_dir.h" 32 #include "reparse.h" 33 34 /* Change credits for different ops and return the total number of credits */ 35 static int 36 change_conf(struct TCP_Server_Info *server) 37 { 38 server->credits += server->echo_credits + server->oplock_credits; 39 if (server->credits > server->max_credits) 40 server->credits = server->max_credits; 41 server->oplock_credits = server->echo_credits = 0; 42 switch (server->credits) { 43 case 0: 44 return 0; 45 case 1: 46 server->echoes = false; 47 server->oplocks = false; 48 break; 49 case 2: 50 server->echoes = true; 51 server->oplocks = false; 52 server->echo_credits = 1; 53 break; 54 default: 55 server->echoes = true; 56 if (enable_oplocks) { 57 server->oplocks = true; 58 server->oplock_credits = 1; 59 } else 60 server->oplocks = false; 61 62 server->echo_credits = 1; 63 } 64 server->credits -= server->echo_credits + server->oplock_credits; 65 return server->credits + server->echo_credits + server->oplock_credits; 66 } 67 68 static void 69 smb2_add_credits(struct TCP_Server_Info *server, 70 struct cifs_credits *credits, const int optype) 71 { 72 int *val, rc = -1; 73 int scredits, in_flight; 74 unsigned int add = credits->value; 75 unsigned int instance = credits->instance; 76 bool reconnect_detected = false; 77 bool reconnect_with_invalid_credits = false; 78 79 spin_lock(&server->req_lock); 80 val = server->ops->get_credits_field(server, optype); 81 82 /* eg found case where write overlapping reconnect messed up credits */ 83 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0)) 84 reconnect_with_invalid_credits = true; 85 86 if ((instance == 0) || (instance == server->reconnect_instance)) 87 *val += add; 88 else 89 reconnect_detected = true; 90 91 if (*val > 65000) { 92 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */ 93 pr_warn_once("server overflowed SMB3 credits\n"); 94 trace_smb3_overflow_credits(server->current_mid, 95 server->conn_id, server->hostname, *val, 96 add, server->in_flight); 97 } 98 if (credits->in_flight_check > 1) { 99 pr_warn_once("rreq R=%08x[%x] Credits not in flight\n", 100 credits->rreq_debug_id, credits->rreq_debug_index); 101 } else { 102 credits->in_flight_check = 2; 103 } 104 if (WARN_ON_ONCE(server->in_flight == 0)) { 105 pr_warn_once("rreq R=%08x[%x] Zero in_flight\n", 106 credits->rreq_debug_id, credits->rreq_debug_index); 107 trace_smb3_rw_credits(credits->rreq_debug_id, 108 credits->rreq_debug_index, 109 credits->value, 110 server->credits, server->in_flight, 0, 111 cifs_trace_rw_credits_zero_in_flight); 112 } 113 server->in_flight--; 114 if (server->in_flight == 0 && 115 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) && 116 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP)) 117 rc = change_conf(server); 118 /* 119 * Sometimes server returns 0 credits on oplock break ack - we need to 120 * rebalance credits in this case. 121 */ 122 else if (server->in_flight > 0 && server->oplock_credits == 0 && 123 server->oplocks) { 124 if (server->credits > 1) { 125 server->credits--; 126 server->oplock_credits++; 127 } 128 } else if ((server->in_flight > 0) && (server->oplock_credits > 3) && 129 ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP)) 130 /* if now have too many oplock credits, rebalance so don't starve normal ops */ 131 change_conf(server); 132 133 scredits = *val; 134 in_flight = server->in_flight; 135 spin_unlock(&server->req_lock); 136 wake_up(&server->request_q); 137 138 if (reconnect_detected) { 139 trace_smb3_reconnect_detected(server->current_mid, 140 server->conn_id, server->hostname, scredits, add, in_flight); 141 142 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n", 143 add, instance); 144 } 145 146 if (reconnect_with_invalid_credits) { 147 trace_smb3_reconnect_with_invalid_credits(server->current_mid, 148 server->conn_id, server->hostname, scredits, add, in_flight); 149 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n", 150 optype, scredits, add); 151 } 152 153 spin_lock(&server->srv_lock); 154 if (server->tcpStatus == CifsNeedReconnect 155 || server->tcpStatus == CifsExiting) { 156 spin_unlock(&server->srv_lock); 157 return; 158 } 159 spin_unlock(&server->srv_lock); 160 161 switch (rc) { 162 case -1: 163 /* change_conf hasn't been executed */ 164 break; 165 case 0: 166 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n"); 167 break; 168 case 1: 169 cifs_server_dbg(VFS, "disabling echoes and oplocks\n"); 170 break; 171 case 2: 172 cifs_dbg(FYI, "disabling oplocks\n"); 173 break; 174 default: 175 /* change_conf rebalanced credits for different types */ 176 break; 177 } 178 179 trace_smb3_add_credits(server->current_mid, 180 server->conn_id, server->hostname, scredits, add, in_flight); 181 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits); 182 } 183 184 static void 185 smb2_set_credits(struct TCP_Server_Info *server, const int val) 186 { 187 int scredits, in_flight; 188 189 spin_lock(&server->req_lock); 190 server->credits = val; 191 if (val == 1) { 192 server->reconnect_instance++; 193 /* 194 * ChannelSequence updated for all channels in primary channel so that consistent 195 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1 196 */ 197 if (SERVER_IS_CHAN(server)) 198 server->primary_server->channel_sequence_num++; 199 else 200 server->channel_sequence_num++; 201 } 202 scredits = server->credits; 203 in_flight = server->in_flight; 204 spin_unlock(&server->req_lock); 205 206 trace_smb3_set_credits(server->current_mid, 207 server->conn_id, server->hostname, scredits, val, in_flight); 208 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val); 209 210 /* don't log while holding the lock */ 211 if (val == 1) 212 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n"); 213 } 214 215 static int * 216 smb2_get_credits_field(struct TCP_Server_Info *server, const int optype) 217 { 218 switch (optype) { 219 case CIFS_ECHO_OP: 220 return &server->echo_credits; 221 case CIFS_OBREAK_OP: 222 return &server->oplock_credits; 223 default: 224 return &server->credits; 225 } 226 } 227 228 static unsigned int 229 smb2_get_credits(struct mid_q_entry *mid) 230 { 231 return mid->credits_received; 232 } 233 234 static int 235 smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size, 236 size_t *num, struct cifs_credits *credits) 237 { 238 int rc = 0; 239 unsigned int scredits, in_flight; 240 241 spin_lock(&server->req_lock); 242 while (1) { 243 spin_unlock(&server->req_lock); 244 245 spin_lock(&server->srv_lock); 246 if (server->tcpStatus == CifsExiting) { 247 spin_unlock(&server->srv_lock); 248 return -ENOENT; 249 } 250 spin_unlock(&server->srv_lock); 251 252 spin_lock(&server->req_lock); 253 if (server->credits <= 0) { 254 spin_unlock(&server->req_lock); 255 cifs_num_waiters_inc(server); 256 rc = wait_event_killable(server->request_q, 257 has_credits(server, &server->credits, 1)); 258 cifs_num_waiters_dec(server); 259 if (rc) 260 return rc; 261 spin_lock(&server->req_lock); 262 } else { 263 scredits = server->credits; 264 /* can deadlock with reopen */ 265 if (scredits <= 8) { 266 *num = SMB2_MAX_BUFFER_SIZE; 267 credits->value = 0; 268 credits->instance = 0; 269 break; 270 } 271 272 /* leave some credits for reopen and other ops */ 273 scredits -= 8; 274 *num = min_t(unsigned int, size, 275 scredits * SMB2_MAX_BUFFER_SIZE); 276 277 credits->value = 278 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE); 279 credits->instance = server->reconnect_instance; 280 server->credits -= credits->value; 281 server->in_flight++; 282 if (server->in_flight > server->max_in_flight) 283 server->max_in_flight = server->in_flight; 284 break; 285 } 286 } 287 scredits = server->credits; 288 in_flight = server->in_flight; 289 spin_unlock(&server->req_lock); 290 291 trace_smb3_wait_credits(server->current_mid, 292 server->conn_id, server->hostname, scredits, -(credits->value), in_flight); 293 cifs_dbg(FYI, "%s: removed %u credits total=%d\n", 294 __func__, credits->value, scredits); 295 296 return rc; 297 } 298 299 static int 300 smb2_adjust_credits(struct TCP_Server_Info *server, 301 struct cifs_io_subrequest *subreq, 302 unsigned int /*enum smb3_rw_credits_trace*/ trace) 303 { 304 struct cifs_credits *credits = &subreq->credits; 305 int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred, 306 SMB2_MAX_BUFFER_SIZE); 307 int scredits, in_flight; 308 309 if (!credits->value || credits->value == new_val) 310 return 0; 311 312 if (credits->value < new_val) { 313 trace_smb3_rw_credits(subreq->rreq->debug_id, 314 subreq->subreq.debug_index, 315 credits->value, 316 server->credits, server->in_flight, 317 new_val - credits->value, 318 cifs_trace_rw_credits_no_adjust_up); 319 trace_smb3_too_many_credits(server->current_mid, 320 server->conn_id, server->hostname, 0, credits->value - new_val, 0); 321 cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)", 322 subreq->rreq->debug_id, subreq->subreq.debug_index, 323 credits->value, new_val); 324 325 return -EOPNOTSUPP; 326 } 327 328 spin_lock(&server->req_lock); 329 330 if (server->reconnect_instance != credits->instance) { 331 scredits = server->credits; 332 in_flight = server->in_flight; 333 spin_unlock(&server->req_lock); 334 335 trace_smb3_rw_credits(subreq->rreq->debug_id, 336 subreq->subreq.debug_index, 337 credits->value, 338 server->credits, server->in_flight, 339 new_val - credits->value, 340 cifs_trace_rw_credits_old_session); 341 trace_smb3_reconnect_detected(server->current_mid, 342 server->conn_id, server->hostname, scredits, 343 credits->value - new_val, in_flight); 344 cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n", 345 subreq->rreq->debug_id, subreq->subreq.debug_index, 346 credits->value - new_val); 347 return -EAGAIN; 348 } 349 350 trace_smb3_rw_credits(subreq->rreq->debug_id, 351 subreq->subreq.debug_index, 352 credits->value, 353 server->credits, server->in_flight, 354 new_val - credits->value, trace); 355 server->credits += credits->value - new_val; 356 scredits = server->credits; 357 in_flight = server->in_flight; 358 spin_unlock(&server->req_lock); 359 wake_up(&server->request_q); 360 361 trace_smb3_adj_credits(server->current_mid, 362 server->conn_id, server->hostname, scredits, 363 credits->value - new_val, in_flight); 364 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n", 365 __func__, credits->value - new_val, scredits); 366 367 credits->value = new_val; 368 369 return 0; 370 } 371 372 static __u64 373 smb2_get_next_mid(struct TCP_Server_Info *server) 374 { 375 __u64 mid; 376 /* for SMB2 we need the current value */ 377 spin_lock(&server->mid_counter_lock); 378 mid = server->current_mid++; 379 spin_unlock(&server->mid_counter_lock); 380 return mid; 381 } 382 383 static void 384 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) 385 { 386 spin_lock(&server->mid_counter_lock); 387 if (server->current_mid >= val) 388 server->current_mid -= val; 389 spin_unlock(&server->mid_counter_lock); 390 } 391 392 static struct mid_q_entry * 393 __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) 394 { 395 struct mid_q_entry *mid; 396 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 397 __u64 wire_mid = le64_to_cpu(shdr->MessageId); 398 399 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { 400 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n"); 401 return NULL; 402 } 403 404 spin_lock(&server->mid_queue_lock); 405 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 406 if ((mid->mid == wire_mid) && 407 (mid->mid_state == MID_REQUEST_SUBMITTED) && 408 (mid->command == shdr->Command)) { 409 smb_get_mid(mid); 410 if (dequeue) { 411 list_del_init(&mid->qhead); 412 mid->deleted_from_q = true; 413 } 414 spin_unlock(&server->mid_queue_lock); 415 return mid; 416 } 417 } 418 spin_unlock(&server->mid_queue_lock); 419 return NULL; 420 } 421 422 static struct mid_q_entry * 423 smb2_find_mid(struct TCP_Server_Info *server, char *buf) 424 { 425 return __smb2_find_mid(server, buf, false); 426 } 427 428 static struct mid_q_entry * 429 smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf) 430 { 431 return __smb2_find_mid(server, buf, true); 432 } 433 434 static void 435 smb2_dump_detail(void *buf, size_t buf_len, struct TCP_Server_Info *server) 436 { 437 #ifdef CONFIG_CIFS_DEBUG2 438 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 439 440 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n", 441 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId, 442 shdr->Id.SyncId.ProcessId); 443 if (!server->ops->check_message(buf, buf_len, server->total_read, server)) { 444 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, 445 server->ops->calc_smb_size(buf)); 446 } 447 #endif 448 } 449 450 static bool 451 smb2_need_neg(struct TCP_Server_Info *server) 452 { 453 return server->max_read == 0; 454 } 455 456 static int 457 smb2_negotiate(const unsigned int xid, 458 struct cifs_ses *ses, 459 struct TCP_Server_Info *server) 460 { 461 int rc; 462 463 spin_lock(&server->mid_counter_lock); 464 server->current_mid = 0; 465 spin_unlock(&server->mid_counter_lock); 466 rc = SMB2_negotiate(xid, ses, server); 467 return rc; 468 } 469 470 static inline unsigned int 471 prevent_zero_iosize(unsigned int size, const char *type) 472 { 473 if (size == 0) { 474 cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n", 475 type, CIFS_MIN_DEFAULT_IOSIZE); 476 return CIFS_MIN_DEFAULT_IOSIZE; 477 } 478 return size; 479 } 480 481 static unsigned int 482 smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 483 { 484 struct TCP_Server_Info *server = tcon->ses->server; 485 unsigned int wsize; 486 487 /* start with specified wsize, or default */ 488 wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE; 489 wsize = min_t(unsigned int, wsize, server->max_write); 490 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 491 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 492 493 return prevent_zero_iosize(wsize, "w"); 494 } 495 496 static unsigned int 497 smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 498 { 499 struct TCP_Server_Info *server = tcon->ses->server; 500 unsigned int wsize; 501 502 /* start with specified wsize, or default */ 503 wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE; 504 wsize = min_t(unsigned int, wsize, server->max_write); 505 #ifdef CONFIG_CIFS_SMB_DIRECT 506 if (server->rdma) { 507 const struct smbdirect_socket_parameters *sp = 508 smbd_get_parameters(server->smbd_conn); 509 510 if (server->sign) 511 /* 512 * Account for SMB2 data transfer packet header and 513 * possible encryption header 514 */ 515 wsize = min_t(unsigned int, 516 wsize, 517 sp->max_fragmented_send_size - 518 SMB2_READWRITE_PDU_HEADER_SIZE - 519 sizeof(struct smb2_transform_hdr)); 520 else 521 wsize = min_t(unsigned int, 522 wsize, sp->max_read_write_size); 523 } 524 #endif 525 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 526 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 527 528 return prevent_zero_iosize(wsize, "w"); 529 } 530 531 static unsigned int 532 smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 533 { 534 struct TCP_Server_Info *server = tcon->ses->server; 535 unsigned int rsize; 536 537 /* start with specified rsize, or default */ 538 rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE; 539 rsize = min_t(unsigned int, rsize, server->max_read); 540 541 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 542 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); 543 544 return prevent_zero_iosize(rsize, "r"); 545 } 546 547 static unsigned int 548 smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 549 { 550 struct TCP_Server_Info *server = tcon->ses->server; 551 unsigned int rsize; 552 553 /* start with specified rsize, or default */ 554 rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE; 555 rsize = min_t(unsigned int, rsize, server->max_read); 556 #ifdef CONFIG_CIFS_SMB_DIRECT 557 if (server->rdma) { 558 const struct smbdirect_socket_parameters *sp = 559 smbd_get_parameters(server->smbd_conn); 560 561 if (server->sign) 562 /* 563 * Account for SMB2 data transfer packet header and 564 * possible encryption header 565 */ 566 rsize = min_t(unsigned int, 567 rsize, 568 sp->max_fragmented_recv_size - 569 SMB2_READWRITE_PDU_HEADER_SIZE - 570 sizeof(struct smb2_transform_hdr)); 571 else 572 rsize = min_t(unsigned int, 573 rsize, sp->max_read_write_size); 574 } 575 #endif 576 577 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 578 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); 579 580 return prevent_zero_iosize(rsize, "r"); 581 } 582 583 /* 584 * compare two interfaces a and b 585 * return 0 if everything matches. 586 * return 1 if a is rdma capable, or rss capable, or has higher link speed 587 * return -1 otherwise. 588 */ 589 static int 590 iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b) 591 { 592 int cmp_ret = 0; 593 594 WARN_ON(!a || !b); 595 if (a->rdma_capable == b->rdma_capable) { 596 if (a->rss_capable == b->rss_capable) { 597 if (a->speed == b->speed) { 598 cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr, 599 (struct sockaddr *) &b->sockaddr); 600 if (!cmp_ret) 601 return 0; 602 else if (cmp_ret > 0) 603 return 1; 604 else 605 return -1; 606 } else if (a->speed > b->speed) 607 return 1; 608 else 609 return -1; 610 } else if (a->rss_capable > b->rss_capable) 611 return 1; 612 else 613 return -1; 614 } else if (a->rdma_capable > b->rdma_capable) 615 return 1; 616 else 617 return -1; 618 } 619 620 static int 621 parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, 622 size_t buf_len, struct cifs_ses *ses, bool in_mount) 623 { 624 struct network_interface_info_ioctl_rsp *p; 625 struct sockaddr_in *addr4; 626 struct sockaddr_in6 *addr6; 627 struct smb_sockaddr_in *p4; 628 struct smb_sockaddr_in6 *p6; 629 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL; 630 struct cifs_server_iface tmp_iface; 631 ssize_t bytes_left; 632 size_t next = 0; 633 int nb_iface = 0; 634 int rc = 0, ret = 0; 635 636 bytes_left = buf_len; 637 p = buf; 638 639 spin_lock(&ses->iface_lock); 640 641 /* 642 * Go through iface_list and mark them as inactive 643 */ 644 list_for_each_entry_safe(iface, niface, &ses->iface_list, 645 iface_head) 646 iface->is_active = 0; 647 648 spin_unlock(&ses->iface_lock); 649 650 /* 651 * Samba server e.g. can return an empty interface list in some cases, 652 * which would only be a problem if we were requesting multichannel 653 */ 654 if (bytes_left == 0) { 655 /* avoid spamming logs every 10 minutes, so log only in mount */ 656 if ((ses->chan_max > 1) && in_mount) 657 cifs_dbg(VFS, 658 "multichannel not available\n" 659 "Empty network interface list returned by server %s\n", 660 ses->server->hostname); 661 rc = -EOPNOTSUPP; 662 goto out; 663 } 664 665 while (bytes_left >= (ssize_t)sizeof(*p)) { 666 memset(&tmp_iface, 0, sizeof(tmp_iface)); 667 /* default to 1Gbps when link speed is unset */ 668 tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000; 669 tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; 670 tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; 671 672 switch (p->Family) { 673 /* 674 * The kernel and wire socket structures have the same 675 * layout and use network byte order but make the 676 * conversion explicit in case either one changes. 677 */ 678 case INTERNETWORK: 679 addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr; 680 p4 = (struct smb_sockaddr_in *)p->Buffer; 681 addr4->sin_family = AF_INET; 682 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4); 683 684 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */ 685 addr4->sin_port = cpu_to_be16(CIFS_PORT); 686 687 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__, 688 &addr4->sin_addr); 689 break; 690 case INTERNETWORKV6: 691 addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr; 692 p6 = (struct smb_sockaddr_in6 *)p->Buffer; 693 addr6->sin6_family = AF_INET6; 694 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16); 695 696 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */ 697 addr6->sin6_flowinfo = 0; 698 addr6->sin6_scope_id = 0; 699 addr6->sin6_port = cpu_to_be16(CIFS_PORT); 700 701 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__, 702 &addr6->sin6_addr); 703 break; 704 default: 705 cifs_dbg(VFS, 706 "%s: skipping unsupported socket family\n", 707 __func__); 708 goto next_iface; 709 } 710 711 /* 712 * The iface_list is assumed to be sorted by speed. 713 * Check if the new interface exists in that list. 714 * NEVER change iface. it could be in use. 715 * Add a new one instead 716 */ 717 spin_lock(&ses->iface_lock); 718 list_for_each_entry_safe(iface, niface, &ses->iface_list, 719 iface_head) { 720 ret = iface_cmp(iface, &tmp_iface); 721 if (!ret) { 722 iface->is_active = 1; 723 spin_unlock(&ses->iface_lock); 724 goto next_iface; 725 } else if (ret < 0) { 726 /* all remaining ifaces are slower */ 727 kref_get(&iface->refcount); 728 break; 729 } 730 } 731 spin_unlock(&ses->iface_lock); 732 733 /* no match. insert the entry in the list */ 734 info = kmalloc(sizeof(struct cifs_server_iface), 735 GFP_KERNEL); 736 if (!info) { 737 rc = -ENOMEM; 738 goto out; 739 } 740 memcpy(info, &tmp_iface, sizeof(tmp_iface)); 741 742 /* add this new entry to the list */ 743 kref_init(&info->refcount); 744 info->is_active = 1; 745 746 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count); 747 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); 748 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__, 749 le32_to_cpu(p->Capability)); 750 751 spin_lock(&ses->iface_lock); 752 if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) { 753 list_add_tail(&info->iface_head, &iface->iface_head); 754 kref_put(&iface->refcount, release_iface); 755 } else 756 list_add_tail(&info->iface_head, &ses->iface_list); 757 758 ses->iface_count++; 759 spin_unlock(&ses->iface_lock); 760 next_iface: 761 nb_iface++; 762 next = le32_to_cpu(p->Next); 763 if (!next) { 764 bytes_left -= sizeof(*p); 765 break; 766 } 767 /* Validate that Next doesn't point beyond the buffer */ 768 if (next > bytes_left) { 769 cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n", 770 __func__, next, bytes_left); 771 rc = -EINVAL; 772 goto out; 773 } 774 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); 775 bytes_left -= next; 776 } 777 778 if (!nb_iface) { 779 cifs_dbg(VFS, "%s: malformed interface info\n", __func__); 780 rc = -EINVAL; 781 goto out; 782 } 783 784 /* Azure rounds the buffer size up 8, to a 16 byte boundary */ 785 if ((bytes_left > 8) || 786 (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next) 787 + sizeof(p->Next) && p->Next)) 788 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); 789 790 out: 791 /* 792 * Go through the list again and put the inactive entries 793 */ 794 spin_lock(&ses->iface_lock); 795 list_for_each_entry_safe(iface, niface, &ses->iface_list, 796 iface_head) { 797 if (!iface->is_active) { 798 list_del(&iface->iface_head); 799 kref_put(&iface->refcount, release_iface); 800 ses->iface_count--; 801 } 802 } 803 spin_unlock(&ses->iface_lock); 804 805 return rc; 806 } 807 808 int 809 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount) 810 { 811 int rc; 812 unsigned int ret_data_len = 0; 813 struct network_interface_info_ioctl_rsp *out_buf = NULL; 814 struct cifs_ses *ses = tcon->ses; 815 struct TCP_Server_Info *pserver; 816 817 /* do not query too frequently */ 818 spin_lock(&ses->iface_lock); 819 if (ses->iface_last_update && 820 time_before(jiffies, ses->iface_last_update + 821 (SMB_INTERFACE_POLL_INTERVAL * HZ))) { 822 spin_unlock(&ses->iface_lock); 823 return 0; 824 } 825 826 ses->iface_last_update = jiffies; 827 828 spin_unlock(&ses->iface_lock); 829 830 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 831 FSCTL_QUERY_NETWORK_INTERFACE_INFO, 832 NULL /* no data input */, 0 /* no data input */, 833 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); 834 if (rc == -EOPNOTSUPP) { 835 cifs_dbg(FYI, 836 "server does not support query network interfaces\n"); 837 ret_data_len = 0; 838 } else if (rc != 0) { 839 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc); 840 goto out; 841 } 842 843 rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount); 844 if (rc) 845 goto out; 846 847 /* check if iface is still active */ 848 spin_lock(&ses->chan_lock); 849 pserver = ses->chans[0].server; 850 if (pserver && !cifs_chan_is_iface_active(ses, pserver)) { 851 spin_unlock(&ses->chan_lock); 852 cifs_chan_update_iface(ses, pserver); 853 spin_lock(&ses->chan_lock); 854 } 855 spin_unlock(&ses->chan_lock); 856 857 out: 858 kfree(out_buf); 859 return rc; 860 } 861 862 static void 863 smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, 864 struct cifs_sb_info *cifs_sb) 865 { 866 int rc; 867 __le16 srch_path = 0; /* Null - open root of share */ 868 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 869 struct cifs_open_parms oparms; 870 struct cifs_fid fid; 871 struct cached_fid *cfid = NULL; 872 873 oparms = (struct cifs_open_parms) { 874 .tcon = tcon, 875 .path = "", 876 .desired_access = FILE_READ_ATTRIBUTES, 877 .disposition = FILE_OPEN, 878 .create_options = cifs_create_options(cifs_sb, 0), 879 .fid = &fid, 880 }; 881 882 rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid); 883 if (rc == 0) 884 memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid)); 885 else 886 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, 887 NULL, NULL); 888 if (rc) 889 return; 890 891 SMB3_request_interfaces(xid, tcon, true /* called during mount */); 892 893 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 894 FS_ATTRIBUTE_INFORMATION); 895 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 896 FS_DEVICE_INFORMATION); 897 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 898 FS_VOLUME_INFORMATION); 899 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 900 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */ 901 if (cfid == NULL) 902 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 903 else 904 close_cached_dir(cfid); 905 } 906 907 static void 908 smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, 909 struct cifs_sb_info *cifs_sb) 910 { 911 int rc; 912 __le16 srch_path = 0; /* Null - open root of share */ 913 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 914 struct cifs_open_parms oparms; 915 struct cifs_fid fid; 916 917 oparms = (struct cifs_open_parms) { 918 .tcon = tcon, 919 .path = "", 920 .desired_access = FILE_READ_ATTRIBUTES, 921 .disposition = FILE_OPEN, 922 .create_options = cifs_create_options(cifs_sb, 0), 923 .fid = &fid, 924 }; 925 926 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, 927 NULL, NULL); 928 if (rc) 929 return; 930 931 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 932 FS_ATTRIBUTE_INFORMATION); 933 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, 934 FS_DEVICE_INFORMATION); 935 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 936 } 937 938 static int 939 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, 940 struct cifs_sb_info *cifs_sb, const char *full_path) 941 { 942 __le16 *utf16_path; 943 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 944 int err_buftype = CIFS_NO_BUFFER; 945 struct cifs_open_parms oparms; 946 struct kvec err_iov = {}; 947 struct cifs_fid fid; 948 struct cached_fid *cfid; 949 bool islink; 950 int rc, rc2; 951 952 rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); 953 if (!rc) { 954 close_cached_dir(cfid); 955 return 0; 956 } 957 958 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 959 if (!utf16_path) 960 return -ENOMEM; 961 962 oparms = (struct cifs_open_parms) { 963 .tcon = tcon, 964 .path = full_path, 965 .desired_access = FILE_READ_ATTRIBUTES, 966 .disposition = FILE_OPEN, 967 .create_options = cifs_create_options(cifs_sb, 0), 968 .fid = &fid, 969 }; 970 971 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, 972 &err_iov, &err_buftype); 973 if (rc) { 974 struct smb2_hdr *hdr = err_iov.iov_base; 975 976 if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER)) 977 goto out; 978 979 if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) { 980 rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb, 981 full_path, &islink); 982 if (rc2) { 983 rc = rc2; 984 goto out; 985 } 986 if (islink) 987 rc = -EREMOTE; 988 } 989 if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && 990 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)) 991 rc = -EOPNOTSUPP; 992 goto out; 993 } 994 995 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 996 997 out: 998 free_rsp_buf(err_buftype, err_iov.iov_base); 999 kfree(utf16_path); 1000 return rc; 1001 } 1002 1003 static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon, 1004 struct cifs_sb_info *cifs_sb, const char *full_path, 1005 u64 *uniqueid, struct cifs_open_info_data *data) 1006 { 1007 *uniqueid = le64_to_cpu(data->fi.IndexNumber); 1008 return 0; 1009 } 1010 1011 static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, 1012 struct cifsFileInfo *cfile, struct cifs_open_info_data *data) 1013 { 1014 struct cifs_fid *fid = &cfile->fid; 1015 1016 if (cfile->symlink_target) { 1017 data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); 1018 if (!data->symlink_target) 1019 return -ENOMEM; 1020 } 1021 data->contains_posix_file_info = false; 1022 return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi); 1023 } 1024 1025 #ifdef CONFIG_CIFS_XATTR 1026 static ssize_t 1027 move_smb2_ea_to_cifs(char *dst, size_t dst_size, 1028 struct smb2_file_full_ea_info *src, size_t src_size, 1029 const unsigned char *ea_name) 1030 { 1031 int rc = 0; 1032 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; 1033 char *name, *value; 1034 size_t buf_size = dst_size; 1035 size_t name_len, value_len, user_name_len; 1036 1037 while (src_size > 0) { 1038 name_len = (size_t)src->ea_name_length; 1039 value_len = (size_t)le16_to_cpu(src->ea_value_length); 1040 1041 if (name_len == 0) 1042 break; 1043 1044 if (src_size < 8 + name_len + 1 + value_len) { 1045 cifs_dbg(FYI, "EA entry goes beyond length of list\n"); 1046 rc = smb_EIO2(smb_eio_trace_ea_overrun, 1047 src_size, 8 + name_len + 1 + value_len); 1048 goto out; 1049 } 1050 1051 name = &src->ea_data[0]; 1052 value = &src->ea_data[src->ea_name_length + 1]; 1053 1054 if (ea_name) { 1055 if (ea_name_len == name_len && 1056 memcmp(ea_name, name, name_len) == 0) { 1057 rc = value_len; 1058 if (dst_size == 0) 1059 goto out; 1060 if (dst_size < value_len) { 1061 rc = -ERANGE; 1062 goto out; 1063 } 1064 memcpy(dst, value, value_len); 1065 goto out; 1066 } 1067 } else { 1068 /* 'user.' plus a terminating null */ 1069 user_name_len = 5 + 1 + name_len; 1070 1071 if (buf_size == 0) { 1072 /* skip copy - calc size only */ 1073 rc += user_name_len; 1074 } else if (dst_size >= user_name_len) { 1075 dst_size -= user_name_len; 1076 memcpy(dst, "user.", 5); 1077 dst += 5; 1078 memcpy(dst, src->ea_data, name_len); 1079 dst += name_len; 1080 *dst = 0; 1081 ++dst; 1082 rc += user_name_len; 1083 } else { 1084 /* stop before overrun buffer */ 1085 rc = -ERANGE; 1086 break; 1087 } 1088 } 1089 1090 if (!src->next_entry_offset) 1091 break; 1092 1093 if (src_size < le32_to_cpu(src->next_entry_offset)) { 1094 /* stop before overrun buffer */ 1095 rc = -ERANGE; 1096 break; 1097 } 1098 src_size -= le32_to_cpu(src->next_entry_offset); 1099 src = (void *)((char *)src + 1100 le32_to_cpu(src->next_entry_offset)); 1101 } 1102 1103 /* didn't find the named attribute */ 1104 if (ea_name) 1105 rc = -ENODATA; 1106 1107 out: 1108 return (ssize_t)rc; 1109 } 1110 1111 static ssize_t 1112 smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, 1113 const unsigned char *path, const unsigned char *ea_name, 1114 char *ea_data, size_t buf_size, 1115 struct cifs_sb_info *cifs_sb) 1116 { 1117 int rc; 1118 struct kvec rsp_iov = {NULL, 0}; 1119 int buftype = CIFS_NO_BUFFER; 1120 struct smb2_query_info_rsp *rsp; 1121 struct smb2_file_full_ea_info *info = NULL; 1122 1123 rc = smb2_query_info_compound(xid, tcon, path, 1124 FILE_READ_EA, 1125 FILE_FULL_EA_INFORMATION, 1126 SMB2_O_INFO_FILE, 1127 CIFSMaxBufSize - 1128 MAX_SMB2_CREATE_RESPONSE_SIZE - 1129 MAX_SMB2_CLOSE_RESPONSE_SIZE, 1130 &rsp_iov, &buftype, cifs_sb); 1131 if (rc) { 1132 /* 1133 * If ea_name is NULL (listxattr) and there are no EAs, 1134 * return 0 as it's not an error. Otherwise, the specified 1135 * ea_name was not found. 1136 */ 1137 if (!ea_name && rc == -ENODATA) 1138 rc = 0; 1139 goto qeas_exit; 1140 } 1141 1142 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 1143 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 1144 le32_to_cpu(rsp->OutputBufferLength), 1145 &rsp_iov, 1146 sizeof(struct smb2_file_full_ea_info)); 1147 if (rc) 1148 goto qeas_exit; 1149 1150 info = (struct smb2_file_full_ea_info *)( 1151 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 1152 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info, 1153 le32_to_cpu(rsp->OutputBufferLength), ea_name); 1154 1155 qeas_exit: 1156 free_rsp_buf(buftype, rsp_iov.iov_base); 1157 return rc; 1158 } 1159 1160 static int 1161 smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, 1162 const char *path, const char *ea_name, const void *ea_value, 1163 const __u16 ea_value_len, const struct nls_table *nls_codepage, 1164 struct cifs_sb_info *cifs_sb) 1165 { 1166 struct smb2_compound_vars *vars; 1167 struct cifs_ses *ses = tcon->ses; 1168 struct TCP_Server_Info *server; 1169 struct smb_rqst *rqst; 1170 struct kvec *rsp_iov; 1171 __le16 *utf16_path = NULL; 1172 int ea_name_len = strlen(ea_name); 1173 int flags = CIFS_CP_CREATE_CLOSE_OP; 1174 int len; 1175 int resp_buftype[3]; 1176 struct cifs_open_parms oparms; 1177 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 1178 struct cifs_fid fid; 1179 unsigned int size[1]; 1180 void *data[1]; 1181 struct smb2_file_full_ea_info *ea; 1182 struct smb2_query_info_rsp *rsp; 1183 int rc, used_len = 0; 1184 int retries = 0, cur_sleep = 0; 1185 1186 replay_again: 1187 /* reinitialize for possible replay */ 1188 flags = CIFS_CP_CREATE_CLOSE_OP; 1189 oplock = SMB2_OPLOCK_LEVEL_NONE; 1190 server = cifs_pick_channel(ses); 1191 1192 if (smb3_encryption_required(tcon)) 1193 flags |= CIFS_TRANSFORM_REQ; 1194 1195 if (ea_name_len > 255) 1196 return -EINVAL; 1197 1198 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 1199 if (!utf16_path) 1200 return -ENOMEM; 1201 1202 ea = NULL; 1203 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; 1204 vars = kzalloc(sizeof(*vars), GFP_KERNEL); 1205 if (!vars) { 1206 rc = -ENOMEM; 1207 goto out_free_path; 1208 } 1209 rqst = vars->rqst; 1210 rsp_iov = vars->rsp_iov; 1211 1212 if (ses->server->ops->query_all_EAs) { 1213 if (!ea_value) { 1214 rc = ses->server->ops->query_all_EAs(xid, tcon, path, 1215 ea_name, NULL, 0, 1216 cifs_sb); 1217 if (rc == -ENODATA) 1218 goto sea_exit; 1219 } else { 1220 /* If we are adding a attribute we should first check 1221 * if there will be enough space available to store 1222 * the new EA. If not we should not add it since we 1223 * would not be able to even read the EAs back. 1224 */ 1225 rc = smb2_query_info_compound(xid, tcon, path, 1226 FILE_READ_EA, 1227 FILE_FULL_EA_INFORMATION, 1228 SMB2_O_INFO_FILE, 1229 CIFSMaxBufSize - 1230 MAX_SMB2_CREATE_RESPONSE_SIZE - 1231 MAX_SMB2_CLOSE_RESPONSE_SIZE, 1232 &rsp_iov[1], &resp_buftype[1], cifs_sb); 1233 if (rc == 0) { 1234 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 1235 used_len = le32_to_cpu(rsp->OutputBufferLength); 1236 } 1237 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 1238 resp_buftype[1] = CIFS_NO_BUFFER; 1239 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1])); 1240 rc = 0; 1241 1242 /* Use a fudge factor of 256 bytes in case we collide 1243 * with a different set_EAs command. 1244 */ 1245 if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - 1246 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 < 1247 used_len + ea_name_len + ea_value_len + 1) { 1248 rc = -ENOSPC; 1249 goto sea_exit; 1250 } 1251 } 1252 } 1253 1254 /* Open */ 1255 rqst[0].rq_iov = vars->open_iov; 1256 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 1257 1258 oparms = (struct cifs_open_parms) { 1259 .tcon = tcon, 1260 .path = path, 1261 .desired_access = FILE_WRITE_EA, 1262 .disposition = FILE_OPEN, 1263 .create_options = cifs_create_options(cifs_sb, 0), 1264 .fid = &fid, 1265 .replay = !!(retries), 1266 }; 1267 1268 rc = SMB2_open_init(tcon, server, 1269 &rqst[0], &oplock, &oparms, utf16_path); 1270 if (rc) 1271 goto sea_exit; 1272 smb2_set_next_command(tcon, &rqst[0]); 1273 1274 1275 /* Set Info */ 1276 rqst[1].rq_iov = vars->si_iov; 1277 rqst[1].rq_nvec = 1; 1278 1279 len = sizeof(*ea) + ea_name_len + ea_value_len + 1; 1280 ea = kzalloc(len, GFP_KERNEL); 1281 if (ea == NULL) { 1282 rc = -ENOMEM; 1283 goto sea_exit; 1284 } 1285 1286 ea->ea_name_length = ea_name_len; 1287 ea->ea_value_length = cpu_to_le16(ea_value_len); 1288 memcpy(ea->ea_data, ea_name, ea_name_len + 1); 1289 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len); 1290 1291 size[0] = len; 1292 data[0] = ea; 1293 1294 rc = SMB2_set_info_init(tcon, server, 1295 &rqst[1], COMPOUND_FID, 1296 COMPOUND_FID, current->tgid, 1297 FILE_FULL_EA_INFORMATION, 1298 SMB2_O_INFO_FILE, 0, data, size); 1299 if (rc) 1300 goto sea_exit; 1301 smb2_set_next_command(tcon, &rqst[1]); 1302 smb2_set_related(&rqst[1]); 1303 1304 /* Close */ 1305 rqst[2].rq_iov = &vars->close_iov; 1306 rqst[2].rq_nvec = 1; 1307 rc = SMB2_close_init(tcon, server, 1308 &rqst[2], COMPOUND_FID, COMPOUND_FID, false); 1309 if (rc) 1310 goto sea_exit; 1311 smb2_set_related(&rqst[2]); 1312 1313 if (retries) { 1314 /* Back-off before retry */ 1315 if (cur_sleep) 1316 msleep(cur_sleep); 1317 smb2_set_replay(server, &rqst[0]); 1318 smb2_set_replay(server, &rqst[1]); 1319 smb2_set_replay(server, &rqst[2]); 1320 } 1321 1322 rc = compound_send_recv(xid, ses, server, 1323 flags, 3, rqst, 1324 resp_buftype, rsp_iov); 1325 /* no need to bump num_remote_opens because handle immediately closed */ 1326 1327 sea_exit: 1328 kfree(ea); 1329 SMB2_open_free(&rqst[0]); 1330 SMB2_set_info_free(&rqst[1]); 1331 SMB2_close_free(&rqst[2]); 1332 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 1333 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 1334 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); 1335 kfree(vars); 1336 out_free_path: 1337 kfree(utf16_path); 1338 1339 if (is_replayable_error(rc) && 1340 smb2_should_replay(tcon, &retries, &cur_sleep)) 1341 goto replay_again; 1342 1343 return rc; 1344 } 1345 #endif 1346 1347 static bool 1348 smb2_can_echo(struct TCP_Server_Info *server) 1349 { 1350 return server->echoes; 1351 } 1352 1353 static void 1354 smb2_clear_stats(struct cifs_tcon *tcon) 1355 { 1356 int i; 1357 1358 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { 1359 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0); 1360 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0); 1361 } 1362 } 1363 1364 static void 1365 smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon) 1366 { 1367 seq_puts(m, "\n\tShare Capabilities:"); 1368 if (tcon->capabilities & SMB2_SHARE_CAP_DFS) 1369 seq_puts(m, " DFS,"); 1370 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) 1371 seq_puts(m, " CONTINUOUS AVAILABILITY,"); 1372 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT) 1373 seq_puts(m, " SCALEOUT,"); 1374 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) 1375 seq_puts(m, " CLUSTER,"); 1376 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC) 1377 seq_puts(m, " ASYMMETRIC,"); 1378 if (tcon->capabilities == 0) 1379 seq_puts(m, " None"); 1380 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE) 1381 seq_puts(m, " Aligned,"); 1382 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE) 1383 seq_puts(m, " Partition Aligned,"); 1384 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY) 1385 seq_puts(m, " SSD,"); 1386 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED) 1387 seq_puts(m, " TRIM-support,"); 1388 1389 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags); 1390 seq_printf(m, "\n\ttid: 0x%x", tcon->tid); 1391 if (tcon->perf_sector_size) 1392 seq_printf(m, "\tOptimal sector size: 0x%x", 1393 tcon->perf_sector_size); 1394 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access); 1395 } 1396 1397 static void 1398 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) 1399 { 1400 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent; 1401 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed; 1402 1403 /* 1404 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO 1405 * totals (requests sent) since those SMBs are per-session not per tcon 1406 */ 1407 seq_printf(m, "\nBytes read: %llu Bytes written: %llu", 1408 (long long)(tcon->bytes_read), 1409 (long long)(tcon->bytes_written)); 1410 seq_printf(m, "\nOpen files: %d total (local), %d open on server", 1411 atomic_read(&tcon->num_local_opens), 1412 atomic_read(&tcon->num_remote_opens)); 1413 seq_printf(m, "\nTreeConnects: %d total %d failed", 1414 atomic_read(&sent[SMB2_TREE_CONNECT_HE]), 1415 atomic_read(&failed[SMB2_TREE_CONNECT_HE])); 1416 seq_printf(m, "\nTreeDisconnects: %d total %d failed", 1417 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]), 1418 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE])); 1419 seq_printf(m, "\nCreates: %d total %d failed", 1420 atomic_read(&sent[SMB2_CREATE_HE]), 1421 atomic_read(&failed[SMB2_CREATE_HE])); 1422 seq_printf(m, "\nCloses: %d total %d failed", 1423 atomic_read(&sent[SMB2_CLOSE_HE]), 1424 atomic_read(&failed[SMB2_CLOSE_HE])); 1425 seq_printf(m, "\nFlushes: %d total %d failed", 1426 atomic_read(&sent[SMB2_FLUSH_HE]), 1427 atomic_read(&failed[SMB2_FLUSH_HE])); 1428 seq_printf(m, "\nReads: %d total %d failed", 1429 atomic_read(&sent[SMB2_READ_HE]), 1430 atomic_read(&failed[SMB2_READ_HE])); 1431 seq_printf(m, "\nWrites: %d total %d failed", 1432 atomic_read(&sent[SMB2_WRITE_HE]), 1433 atomic_read(&failed[SMB2_WRITE_HE])); 1434 seq_printf(m, "\nLocks: %d total %d failed", 1435 atomic_read(&sent[SMB2_LOCK_HE]), 1436 atomic_read(&failed[SMB2_LOCK_HE])); 1437 seq_printf(m, "\nIOCTLs: %d total %d failed", 1438 atomic_read(&sent[SMB2_IOCTL_HE]), 1439 atomic_read(&failed[SMB2_IOCTL_HE])); 1440 seq_printf(m, "\nQueryDirectories: %d total %d failed", 1441 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]), 1442 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE])); 1443 seq_printf(m, "\nChangeNotifies: %d total %d failed", 1444 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]), 1445 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE])); 1446 seq_printf(m, "\nQueryInfos: %d total %d failed", 1447 atomic_read(&sent[SMB2_QUERY_INFO_HE]), 1448 atomic_read(&failed[SMB2_QUERY_INFO_HE])); 1449 seq_printf(m, "\nSetInfos: %d total %d failed", 1450 atomic_read(&sent[SMB2_SET_INFO_HE]), 1451 atomic_read(&failed[SMB2_SET_INFO_HE])); 1452 seq_printf(m, "\nOplockBreaks: %d sent %d failed", 1453 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]), 1454 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE])); 1455 } 1456 1457 static void 1458 smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) 1459 { 1460 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1461 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 1462 1463 cfile->fid.persistent_fid = fid->persistent_fid; 1464 cfile->fid.volatile_fid = fid->volatile_fid; 1465 cfile->fid.access = fid->access; 1466 #ifdef CONFIG_CIFS_DEBUG2 1467 cfile->fid.mid = fid->mid; 1468 #endif /* CIFS_DEBUG2 */ 1469 server->ops->set_oplock_level(cinode, oplock, fid->epoch, 1470 &fid->purge_cache); 1471 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); 1472 memcpy(cfile->fid.create_guid, fid->create_guid, 16); 1473 } 1474 1475 static int 1476 smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon, 1477 struct cifs_fid *fid) 1478 { 1479 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 1480 } 1481 1482 static int 1483 smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon, 1484 struct cifsFileInfo *cfile) 1485 { 1486 struct smb2_file_network_open_info file_inf; 1487 struct inode *inode; 1488 int rc; 1489 1490 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid, 1491 cfile->fid.volatile_fid, &file_inf); 1492 if (rc) 1493 return rc; 1494 1495 inode = d_inode(cfile->dentry); 1496 1497 spin_lock(&inode->i_lock); 1498 CIFS_I(inode)->time = jiffies; 1499 1500 /* Creation time should not need to be updated on close */ 1501 if (file_inf.LastWriteTime) 1502 inode_set_mtime_to_ts(inode, 1503 cifs_NTtimeToUnix(file_inf.LastWriteTime)); 1504 if (file_inf.ChangeTime) 1505 inode_set_ctime_to_ts(inode, 1506 cifs_NTtimeToUnix(file_inf.ChangeTime)); 1507 if (file_inf.LastAccessTime) 1508 inode_set_atime_to_ts(inode, 1509 cifs_NTtimeToUnix(file_inf.LastAccessTime)); 1510 1511 /* 1512 * i_blocks is not related to (i_size / i_blksize), 1513 * but instead 512 byte (2**9) size is required for 1514 * calculating num blocks. 1515 */ 1516 if (le64_to_cpu(file_inf.AllocationSize) > 4096) 1517 inode->i_blocks = 1518 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9; 1519 1520 /* End of file and Attributes should not have to be updated on close */ 1521 spin_unlock(&inode->i_lock); 1522 return rc; 1523 } 1524 1525 static int 1526 SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, 1527 u64 persistent_fid, u64 volatile_fid, 1528 struct copychunk_ioctl_req *pcchunk) 1529 { 1530 int rc; 1531 unsigned int ret_data_len; 1532 struct resume_key_ioctl_rsp *res_key; 1533 1534 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 1535 FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, 1536 CIFSMaxBufSize, (char **)&res_key, &ret_data_len); 1537 1538 if (rc == -EOPNOTSUPP) { 1539 pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name); 1540 goto req_res_key_exit; 1541 } else if (rc) { 1542 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc); 1543 goto req_res_key_exit; 1544 } 1545 if (ret_data_len < sizeof(struct resume_key_ioctl_rsp)) { 1546 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n"); 1547 rc = -EINVAL; 1548 goto req_res_key_exit; 1549 } 1550 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE); 1551 1552 req_res_key_exit: 1553 kfree(res_key); 1554 return rc; 1555 } 1556 1557 static int 1558 smb2_ioctl_query_info(const unsigned int xid, 1559 struct cifs_tcon *tcon, 1560 struct cifs_sb_info *cifs_sb, 1561 __le16 *path, int is_dir, 1562 unsigned long p) 1563 { 1564 struct smb2_compound_vars *vars; 1565 struct smb_rqst *rqst; 1566 struct kvec *rsp_iov; 1567 struct cifs_ses *ses = tcon->ses; 1568 struct TCP_Server_Info *server; 1569 char __user *arg = (char __user *)p; 1570 struct smb_query_info qi; 1571 struct smb_query_info __user *pqi; 1572 int rc = 0; 1573 int flags = CIFS_CP_CREATE_CLOSE_OP; 1574 struct smb2_query_info_rsp *qi_rsp = NULL; 1575 struct smb2_ioctl_rsp *io_rsp = NULL; 1576 void *buffer = NULL; 1577 int resp_buftype[3]; 1578 struct cifs_open_parms oparms; 1579 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 1580 struct cifs_fid fid; 1581 unsigned int size[2]; 1582 void *data[2]; 1583 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR; 1584 void (*free_req1_func)(struct smb_rqst *r); 1585 int retries = 0, cur_sleep = 0; 1586 1587 replay_again: 1588 /* reinitialize for possible replay */ 1589 flags = CIFS_CP_CREATE_CLOSE_OP; 1590 oplock = SMB2_OPLOCK_LEVEL_NONE; 1591 server = cifs_pick_channel(ses); 1592 1593 vars = kzalloc(sizeof(*vars), GFP_ATOMIC); 1594 if (vars == NULL) 1595 return -ENOMEM; 1596 rqst = &vars->rqst[0]; 1597 rsp_iov = &vars->rsp_iov[0]; 1598 1599 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; 1600 1601 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) { 1602 rc = -EFAULT; 1603 goto free_vars; 1604 } 1605 if (qi.output_buffer_length > 1024) { 1606 rc = -EINVAL; 1607 goto free_vars; 1608 } 1609 1610 if (!ses || !server) { 1611 rc = smb_EIO(smb_eio_trace_null_pointers); 1612 goto free_vars; 1613 } 1614 1615 if (smb3_encryption_required(tcon)) 1616 flags |= CIFS_TRANSFORM_REQ; 1617 1618 if (qi.output_buffer_length) { 1619 buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length); 1620 if (IS_ERR(buffer)) { 1621 rc = PTR_ERR(buffer); 1622 goto free_vars; 1623 } 1624 } 1625 1626 /* Open */ 1627 rqst[0].rq_iov = &vars->open_iov[0]; 1628 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 1629 1630 oparms = (struct cifs_open_parms) { 1631 .tcon = tcon, 1632 .disposition = FILE_OPEN, 1633 .create_options = cifs_create_options(cifs_sb, create_options), 1634 .fid = &fid, 1635 .replay = !!(retries), 1636 }; 1637 1638 if (qi.flags & PASSTHRU_FSCTL) { 1639 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) { 1640 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS: 1641 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE; 1642 break; 1643 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS: 1644 oparms.desired_access = GENERIC_ALL; 1645 break; 1646 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS: 1647 oparms.desired_access = GENERIC_READ; 1648 break; 1649 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS: 1650 oparms.desired_access = GENERIC_WRITE; 1651 break; 1652 } 1653 } else if (qi.flags & PASSTHRU_SET_INFO) { 1654 oparms.desired_access = GENERIC_WRITE; 1655 } else { 1656 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL; 1657 } 1658 1659 rc = SMB2_open_init(tcon, server, 1660 &rqst[0], &oplock, &oparms, path); 1661 if (rc) 1662 goto free_output_buffer; 1663 smb2_set_next_command(tcon, &rqst[0]); 1664 1665 /* Query */ 1666 if (qi.flags & PASSTHRU_FSCTL) { 1667 /* Can eventually relax perm check since server enforces too */ 1668 if (!capable(CAP_SYS_ADMIN)) { 1669 rc = -EPERM; 1670 goto free_open_req; 1671 } 1672 rqst[1].rq_iov = &vars->io_iov[0]; 1673 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; 1674 1675 rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, 1676 qi.info_type, buffer, qi.output_buffer_length, 1677 CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - 1678 MAX_SMB2_CLOSE_RESPONSE_SIZE); 1679 free_req1_func = SMB2_ioctl_free; 1680 } else if (qi.flags == PASSTHRU_SET_INFO) { 1681 /* Can eventually relax perm check since server enforces too */ 1682 if (!capable(CAP_SYS_ADMIN)) { 1683 rc = -EPERM; 1684 goto free_open_req; 1685 } 1686 if (qi.output_buffer_length < 8) { 1687 rc = -EINVAL; 1688 goto free_open_req; 1689 } 1690 rqst[1].rq_iov = vars->si_iov; 1691 rqst[1].rq_nvec = 1; 1692 1693 /* MS-FSCC 2.4.13 FileEndOfFileInformation */ 1694 size[0] = 8; 1695 data[0] = buffer; 1696 1697 rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, 1698 current->tgid, FILE_END_OF_FILE_INFORMATION, 1699 SMB2_O_INFO_FILE, 0, data, size); 1700 free_req1_func = SMB2_set_info_free; 1701 } else if (qi.flags == PASSTHRU_QUERY_INFO) { 1702 rqst[1].rq_iov = &vars->qi_iov; 1703 rqst[1].rq_nvec = 1; 1704 1705 rc = SMB2_query_info_init(tcon, server, 1706 &rqst[1], COMPOUND_FID, 1707 COMPOUND_FID, qi.file_info_class, 1708 qi.info_type, qi.additional_information, 1709 qi.input_buffer_length, 1710 qi.output_buffer_length, buffer); 1711 free_req1_func = SMB2_query_info_free; 1712 } else { /* unknown flags */ 1713 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n", 1714 qi.flags); 1715 rc = -EINVAL; 1716 } 1717 1718 if (rc) 1719 goto free_open_req; 1720 smb2_set_next_command(tcon, &rqst[1]); 1721 smb2_set_related(&rqst[1]); 1722 1723 /* Close */ 1724 rqst[2].rq_iov = &vars->close_iov; 1725 rqst[2].rq_nvec = 1; 1726 1727 rc = SMB2_close_init(tcon, server, 1728 &rqst[2], COMPOUND_FID, COMPOUND_FID, false); 1729 if (rc) 1730 goto free_req_1; 1731 smb2_set_related(&rqst[2]); 1732 1733 if (retries) { 1734 /* Back-off before retry */ 1735 if (cur_sleep) 1736 msleep(cur_sleep); 1737 smb2_set_replay(server, &rqst[0]); 1738 smb2_set_replay(server, &rqst[1]); 1739 smb2_set_replay(server, &rqst[2]); 1740 } 1741 1742 rc = compound_send_recv(xid, ses, server, 1743 flags, 3, rqst, 1744 resp_buftype, rsp_iov); 1745 if (rc) 1746 goto out; 1747 1748 /* No need to bump num_remote_opens since handle immediately closed */ 1749 if (qi.flags & PASSTHRU_FSCTL) { 1750 pqi = (struct smb_query_info __user *)arg; 1751 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base; 1752 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length) 1753 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); 1754 if (qi.input_buffer_length > 0 && 1755 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length 1756 > rsp_iov[1].iov_len) { 1757 rc = -EFAULT; 1758 goto out; 1759 } 1760 1761 if (copy_to_user(&pqi->input_buffer_length, 1762 &qi.input_buffer_length, 1763 sizeof(qi.input_buffer_length))) { 1764 rc = -EFAULT; 1765 goto out; 1766 } 1767 1768 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), 1769 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), 1770 qi.input_buffer_length)) 1771 rc = -EFAULT; 1772 } else { 1773 pqi = (struct smb_query_info __user *)arg; 1774 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 1775 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length) 1776 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); 1777 if (copy_to_user(&pqi->input_buffer_length, 1778 &qi.input_buffer_length, 1779 sizeof(qi.input_buffer_length))) { 1780 rc = -EFAULT; 1781 goto out; 1782 } 1783 1784 if (copy_to_user(pqi + 1, qi_rsp->Buffer, 1785 qi.input_buffer_length)) 1786 rc = -EFAULT; 1787 } 1788 1789 out: 1790 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 1791 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 1792 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); 1793 SMB2_close_free(&rqst[2]); 1794 free_req_1: 1795 free_req1_func(&rqst[1]); 1796 free_open_req: 1797 SMB2_open_free(&rqst[0]); 1798 free_output_buffer: 1799 kfree(buffer); 1800 free_vars: 1801 kfree(vars); 1802 1803 if (is_replayable_error(rc) && 1804 smb2_should_replay(tcon, &retries, &cur_sleep)) 1805 goto replay_again; 1806 1807 return rc; 1808 } 1809 1810 /** 1811 * calc_chunk_count - calculates the number chunks to be filled in the Chunks[] 1812 * array of struct copychunk_ioctl 1813 * 1814 * @tcon: destination file tcon 1815 * @bytes_left: how many bytes are left to copy 1816 * 1817 * Return: maximum number of chunks with which Chunks[] can be filled. 1818 */ 1819 static inline u32 1820 calc_chunk_count(struct cifs_tcon *tcon, u64 bytes_left) 1821 { 1822 u32 max_chunks = READ_ONCE(tcon->max_chunks); 1823 u32 max_bytes_copy = READ_ONCE(tcon->max_bytes_copy); 1824 u32 max_bytes_chunk = READ_ONCE(tcon->max_bytes_chunk); 1825 u64 need; 1826 u32 allowed; 1827 1828 if (!max_bytes_chunk || !max_bytes_copy || !max_chunks) 1829 return 0; 1830 1831 /* chunks needed for the remaining bytes */ 1832 need = DIV_ROUND_UP_ULL(bytes_left, max_bytes_chunk); 1833 /* chunks allowed per cc request */ 1834 allowed = DIV_ROUND_UP(max_bytes_copy, max_bytes_chunk); 1835 1836 return (u32)umin(need, umin(max_chunks, allowed)); 1837 } 1838 1839 /** 1840 * smb2_copychunk_range - server-side copy of data range 1841 * 1842 * @xid: transaction id 1843 * @src_file: source file 1844 * @dst_file: destination file 1845 * @src_off: source file byte offset 1846 * @len: number of bytes to copy 1847 * @dst_off: destination file byte offset 1848 * 1849 * Obtains a resume key for @src_file and issues FSCTL_SRV_COPYCHUNK_WRITE 1850 * IOCTLs, splitting the request into chunks limited by tcon->max_*. 1851 * 1852 * Return: @len on success; negative errno on failure. 1853 */ 1854 static ssize_t 1855 smb2_copychunk_range(const unsigned int xid, 1856 struct cifsFileInfo *src_file, 1857 struct cifsFileInfo *dst_file, 1858 u64 src_off, 1859 u64 len, 1860 u64 dst_off) 1861 { 1862 int rc = 0; 1863 unsigned int ret_data_len = 0; 1864 struct copychunk_ioctl_req *cc_req = NULL; 1865 struct copychunk_ioctl_rsp *cc_rsp = NULL; 1866 struct cifs_tcon *tcon; 1867 struct srv_copychunk *chunk; 1868 u32 chunks, chunk_count, chunk_bytes; 1869 u32 copy_bytes, copy_bytes_left; 1870 u32 chunks_written, bytes_written; 1871 u64 total_bytes_left = len; 1872 u64 src_off_prev, dst_off_prev; 1873 u32 retries = 0; 1874 1875 tcon = tlink_tcon(dst_file->tlink); 1876 1877 trace_smb3_copychunk_enter(xid, src_file->fid.volatile_fid, 1878 dst_file->fid.volatile_fid, tcon->tid, 1879 tcon->ses->Suid, src_off, dst_off, len); 1880 1881 retry: 1882 chunk_count = calc_chunk_count(tcon, total_bytes_left); 1883 if (!chunk_count) { 1884 rc = -EOPNOTSUPP; 1885 goto out; 1886 } 1887 1888 cc_req = kzalloc(struct_size(cc_req, Chunks, chunk_count), GFP_KERNEL); 1889 if (!cc_req) { 1890 rc = -ENOMEM; 1891 goto out; 1892 } 1893 1894 /* Request a key from the server to identify the source of the copy */ 1895 rc = SMB2_request_res_key(xid, 1896 tlink_tcon(src_file->tlink), 1897 src_file->fid.persistent_fid, 1898 src_file->fid.volatile_fid, 1899 cc_req); 1900 1901 /* Note: request_res_key sets res_key null only if rc != 0 */ 1902 if (rc) 1903 goto out; 1904 1905 while (total_bytes_left > 0) { 1906 1907 /* Store previous offsets to allow rewind */ 1908 src_off_prev = src_off; 1909 dst_off_prev = dst_off; 1910 1911 /* 1912 * __counted_by_le(ChunkCount): set to allocated chunks before 1913 * populating Chunks[] 1914 */ 1915 cc_req->ChunkCount = cpu_to_le32(chunk_count); 1916 1917 chunks = 0; 1918 copy_bytes = 0; 1919 copy_bytes_left = umin(total_bytes_left, tcon->max_bytes_copy); 1920 while (copy_bytes_left > 0 && chunks < chunk_count) { 1921 chunk = &cc_req->Chunks[chunks++]; 1922 1923 chunk->SourceOffset = cpu_to_le64(src_off); 1924 chunk->TargetOffset = cpu_to_le64(dst_off); 1925 1926 chunk_bytes = umin(copy_bytes_left, tcon->max_bytes_chunk); 1927 1928 chunk->Length = cpu_to_le32(chunk_bytes); 1929 /* Buffer is zeroed, no need to set chunk->Reserved = 0 */ 1930 1931 src_off += chunk_bytes; 1932 dst_off += chunk_bytes; 1933 1934 copy_bytes_left -= chunk_bytes; 1935 copy_bytes += chunk_bytes; 1936 } 1937 1938 cc_req->ChunkCount = cpu_to_le32(chunks); 1939 /* Buffer is zeroed, no need to set cc_req->Reserved = 0 */ 1940 1941 /* Request server copy to target from src identified by key */ 1942 kfree(cc_rsp); 1943 cc_rsp = NULL; 1944 rc = SMB2_ioctl(xid, tcon, dst_file->fid.persistent_fid, 1945 dst_file->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, 1946 (char *)cc_req, struct_size(cc_req, Chunks, chunks), 1947 CIFSMaxBufSize, (char **)&cc_rsp, &ret_data_len); 1948 1949 if (rc && rc != -EINVAL) 1950 goto out; 1951 1952 if (unlikely(ret_data_len != sizeof(*cc_rsp))) { 1953 cifs_tcon_dbg(VFS, "Copychunk invalid response: size %u/%zu\n", 1954 ret_data_len, sizeof(*cc_rsp)); 1955 rc = smb_EIO1(smb_eio_trace_copychunk_inv_rsp, ret_data_len); 1956 goto out; 1957 } 1958 1959 bytes_written = le32_to_cpu(cc_rsp->TotalBytesWritten); 1960 chunks_written = le32_to_cpu(cc_rsp->ChunksWritten); 1961 chunk_bytes = le32_to_cpu(cc_rsp->ChunkBytesWritten); 1962 1963 if (rc == 0) { 1964 /* Check if server claimed to write more than we asked */ 1965 if (unlikely(!bytes_written || bytes_written > copy_bytes)) { 1966 cifs_tcon_dbg(VFS, "Copychunk invalid response: bytes written %u/%u\n", 1967 bytes_written, copy_bytes); 1968 rc = smb_EIO2(smb_eio_trace_copychunk_overcopy_b, 1969 bytes_written, copy_bytes); 1970 goto out; 1971 } 1972 if (unlikely(!chunks_written || chunks_written > chunks)) { 1973 cifs_tcon_dbg(VFS, "Copychunk invalid response: chunks written %u/%u\n", 1974 chunks_written, chunks); 1975 rc = smb_EIO2(smb_eio_trace_copychunk_overcopy_c, 1976 chunks_written, chunks); 1977 goto out; 1978 } 1979 1980 /* Partial write: rewind */ 1981 if (bytes_written < copy_bytes) { 1982 u32 delta = copy_bytes - bytes_written; 1983 1984 src_off -= delta; 1985 dst_off -= delta; 1986 } 1987 1988 total_bytes_left -= bytes_written; 1989 continue; 1990 } 1991 1992 /* 1993 * Check if server is not asking us to reduce size. 1994 * 1995 * Note: As per MS-SMB2 2.2.32.1, the values returned 1996 * in cc_rsp are not strictly lower than what existed 1997 * before. 1998 */ 1999 if (bytes_written < tcon->max_bytes_copy) { 2000 cifs_tcon_dbg(FYI, "Copychunk MaxBytesCopy updated: %u -> %u\n", 2001 tcon->max_bytes_copy, bytes_written); 2002 tcon->max_bytes_copy = bytes_written; 2003 } 2004 2005 if (chunks_written < tcon->max_chunks) { 2006 cifs_tcon_dbg(FYI, "Copychunk MaxChunks updated: %u -> %u\n", 2007 tcon->max_chunks, chunks_written); 2008 tcon->max_chunks = chunks_written; 2009 } 2010 2011 if (chunk_bytes < tcon->max_bytes_chunk) { 2012 cifs_tcon_dbg(FYI, "Copychunk MaxBytesChunk updated: %u -> %u\n", 2013 tcon->max_bytes_chunk, chunk_bytes); 2014 tcon->max_bytes_chunk = chunk_bytes; 2015 } 2016 2017 /* reset to last offsets */ 2018 if (retries++ < 2) { 2019 src_off = src_off_prev; 2020 dst_off = dst_off_prev; 2021 kfree(cc_req); 2022 cc_req = NULL; 2023 goto retry; 2024 } 2025 2026 break; 2027 } 2028 2029 out: 2030 kfree(cc_req); 2031 kfree(cc_rsp); 2032 if (rc) { 2033 trace_smb3_copychunk_err(xid, src_file->fid.volatile_fid, 2034 dst_file->fid.volatile_fid, tcon->tid, 2035 tcon->ses->Suid, src_off, dst_off, len, rc); 2036 return rc; 2037 } else { 2038 trace_smb3_copychunk_done(xid, src_file->fid.volatile_fid, 2039 dst_file->fid.volatile_fid, tcon->tid, 2040 tcon->ses->Suid, src_off, dst_off, len); 2041 return len; 2042 } 2043 } 2044 2045 static int 2046 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon, 2047 struct cifs_fid *fid) 2048 { 2049 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid); 2050 } 2051 2052 static unsigned int 2053 smb2_read_data_offset(char *buf) 2054 { 2055 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; 2056 2057 return rsp->DataOffset; 2058 } 2059 2060 static unsigned int 2061 smb2_read_data_length(char *buf, bool in_remaining) 2062 { 2063 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; 2064 2065 if (in_remaining) 2066 return le32_to_cpu(rsp->DataRemaining); 2067 2068 return le32_to_cpu(rsp->DataLength); 2069 } 2070 2071 2072 static int 2073 smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid, 2074 struct cifs_io_parms *parms, unsigned int *bytes_read, 2075 char **buf, int *buf_type) 2076 { 2077 parms->persistent_fid = pfid->persistent_fid; 2078 parms->volatile_fid = pfid->volatile_fid; 2079 return SMB2_read(xid, parms, bytes_read, buf, buf_type); 2080 } 2081 2082 static int 2083 smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid, 2084 struct cifs_io_parms *parms, unsigned int *written, 2085 struct kvec *iov, unsigned long nr_segs) 2086 { 2087 2088 parms->persistent_fid = pfid->persistent_fid; 2089 parms->volatile_fid = pfid->volatile_fid; 2090 return SMB2_write(xid, parms, written, iov, nr_segs); 2091 } 2092 2093 /* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */ 2094 static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, 2095 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse) 2096 { 2097 struct cifsInodeInfo *cifsi; 2098 int rc; 2099 2100 cifsi = CIFS_I(inode); 2101 2102 /* if file already sparse don't bother setting sparse again */ 2103 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse) 2104 return true; /* already sparse */ 2105 2106 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse) 2107 return true; /* already not sparse */ 2108 2109 /* 2110 * Can't check for sparse support on share the usual way via the 2111 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share 2112 * since Samba server doesn't set the flag on the share, yet 2113 * supports the set sparse FSCTL and returns sparse correctly 2114 * in the file attributes. If we fail setting sparse though we 2115 * mark that server does not support sparse files for this share 2116 * to avoid repeatedly sending the unsupported fsctl to server 2117 * if the file is repeatedly extended. 2118 */ 2119 if (tcon->broken_sparse_sup) 2120 return false; 2121 2122 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 2123 cfile->fid.volatile_fid, FSCTL_SET_SPARSE, 2124 &setsparse, 1, CIFSMaxBufSize, NULL, NULL); 2125 if (rc) { 2126 tcon->broken_sparse_sup = true; 2127 cifs_dbg(FYI, "set sparse rc = %d\n", rc); 2128 return false; 2129 } 2130 2131 if (setsparse) 2132 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE; 2133 else 2134 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE); 2135 2136 return true; 2137 } 2138 2139 static int 2140 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon, 2141 struct cifsFileInfo *cfile, __u64 size, bool set_alloc) 2142 { 2143 struct inode *inode; 2144 2145 /* 2146 * If extending file more than one page make sparse. Many Linux fs 2147 * make files sparse by default when extending via ftruncate 2148 */ 2149 inode = d_inode(cfile->dentry); 2150 2151 if (!set_alloc && (size > inode->i_size + 8192)) { 2152 __u8 set_sparse = 1; 2153 2154 /* whether set sparse succeeds or not, extend the file */ 2155 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse); 2156 } 2157 2158 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 2159 cfile->fid.volatile_fid, cfile->pid, size); 2160 } 2161 2162 static int 2163 smb2_duplicate_extents(const unsigned int xid, 2164 struct cifsFileInfo *srcfile, 2165 struct cifsFileInfo *trgtfile, u64 src_off, 2166 u64 len, u64 dest_off) 2167 { 2168 int rc; 2169 unsigned int ret_data_len; 2170 struct inode *inode; 2171 struct duplicate_extents_to_file dup_ext_buf; 2172 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink); 2173 2174 /* server fileays advertise duplicate extent support with this flag */ 2175 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & 2176 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0) 2177 return -EOPNOTSUPP; 2178 2179 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid; 2180 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid; 2181 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off); 2182 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off); 2183 dup_ext_buf.ByteCount = cpu_to_le64(len); 2184 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n", 2185 src_off, dest_off, len); 2186 trace_smb3_clone_enter(xid, srcfile->fid.volatile_fid, 2187 trgtfile->fid.volatile_fid, tcon->tid, 2188 tcon->ses->Suid, src_off, dest_off, len); 2189 inode = d_inode(trgtfile->dentry); 2190 if (inode->i_size < dest_off + len) { 2191 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false); 2192 if (rc) 2193 goto duplicate_extents_out; 2194 2195 /* 2196 * Although also could set plausible allocation size (i_blocks) 2197 * here in addition to setting the file size, in reflink 2198 * it is likely that the target file is sparse. Its allocation 2199 * size will be queried on next revalidate, but it is important 2200 * to make sure that file's cached size is updated immediately 2201 */ 2202 netfs_resize_file(netfs_inode(inode), dest_off + len, true); 2203 cifs_setsize(inode, dest_off + len); 2204 } 2205 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, 2206 trgtfile->fid.volatile_fid, 2207 FSCTL_DUPLICATE_EXTENTS_TO_FILE, 2208 (char *)&dup_ext_buf, 2209 sizeof(struct duplicate_extents_to_file), 2210 CIFSMaxBufSize, NULL, 2211 &ret_data_len); 2212 2213 if (ret_data_len > 0) 2214 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n"); 2215 2216 duplicate_extents_out: 2217 if (rc) 2218 trace_smb3_clone_err(xid, srcfile->fid.volatile_fid, 2219 trgtfile->fid.volatile_fid, 2220 tcon->tid, tcon->ses->Suid, src_off, 2221 dest_off, len, rc); 2222 else 2223 trace_smb3_clone_done(xid, srcfile->fid.volatile_fid, 2224 trgtfile->fid.volatile_fid, tcon->tid, 2225 tcon->ses->Suid, src_off, dest_off, len); 2226 return rc; 2227 } 2228 2229 static int 2230 smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, 2231 struct cifsFileInfo *cfile) 2232 { 2233 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid, 2234 cfile->fid.volatile_fid); 2235 } 2236 2237 static int 2238 smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, 2239 struct cifsFileInfo *cfile) 2240 { 2241 struct fsctl_set_integrity_information_req integr_info; 2242 unsigned int ret_data_len; 2243 2244 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED); 2245 integr_info.Flags = 0; 2246 integr_info.Reserved = 0; 2247 2248 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 2249 cfile->fid.volatile_fid, 2250 FSCTL_SET_INTEGRITY_INFORMATION, 2251 (char *)&integr_info, 2252 sizeof(struct fsctl_set_integrity_information_req), 2253 CIFSMaxBufSize, NULL, 2254 &ret_data_len); 2255 2256 } 2257 2258 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ 2259 #define GMT_TOKEN_SIZE 50 2260 2261 #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */ 2262 2263 /* 2264 * Input buffer contains (empty) struct smb_snapshot array with size filled in 2265 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 2266 */ 2267 static int 2268 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, 2269 struct cifsFileInfo *cfile, void __user *ioc_buf) 2270 { 2271 char *retbuf = NULL; 2272 unsigned int ret_data_len = 0; 2273 int rc; 2274 u32 max_response_size; 2275 struct smb_snapshot_array snapshot_in; 2276 2277 /* 2278 * On the first query to enumerate the list of snapshots available 2279 * for this volume the buffer begins with 0 (number of snapshots 2280 * which can be returned is zero since at that point we do not know 2281 * how big the buffer needs to be). On the second query, 2282 * it (ret_data_len) is set to number of snapshots so we can 2283 * know to set the maximum response size larger (see below). 2284 */ 2285 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf)) 2286 return -EFAULT; 2287 2288 /* 2289 * Note that for snapshot queries that servers like Azure expect that 2290 * the first query be minimal size (and just used to get the number/size 2291 * of previous versions) so response size must be specified as EXACTLY 2292 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple 2293 * of eight bytes. 2294 */ 2295 if (ret_data_len == 0) 2296 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE; 2297 else 2298 max_response_size = CIFSMaxBufSize; 2299 2300 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 2301 cfile->fid.volatile_fid, 2302 FSCTL_SRV_ENUMERATE_SNAPSHOTS, 2303 NULL, 0 /* no input data */, max_response_size, 2304 (char **)&retbuf, 2305 &ret_data_len); 2306 cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n", 2307 rc, ret_data_len); 2308 if (rc) 2309 return rc; 2310 2311 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) { 2312 /* Fixup buffer */ 2313 if (copy_from_user(&snapshot_in, ioc_buf, 2314 sizeof(struct smb_snapshot_array))) { 2315 rc = -EFAULT; 2316 kfree(retbuf); 2317 return rc; 2318 } 2319 2320 /* 2321 * Check for min size, ie not large enough to fit even one GMT 2322 * token (snapshot). On the first ioctl some users may pass in 2323 * smaller size (or zero) to simply get the size of the array 2324 * so the user space caller can allocate sufficient memory 2325 * and retry the ioctl again with larger array size sufficient 2326 * to hold all of the snapshot GMT tokens on the second try. 2327 */ 2328 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) 2329 ret_data_len = sizeof(struct smb_snapshot_array); 2330 2331 /* 2332 * We return struct SRV_SNAPSHOT_ARRAY, followed by 2333 * the snapshot array (of 50 byte GMT tokens) each 2334 * representing an available previous version of the data 2335 */ 2336 if (ret_data_len > (snapshot_in.snapshot_array_size + 2337 sizeof(struct smb_snapshot_array))) 2338 ret_data_len = snapshot_in.snapshot_array_size + 2339 sizeof(struct smb_snapshot_array); 2340 2341 if (copy_to_user(ioc_buf, retbuf, ret_data_len)) 2342 rc = -EFAULT; 2343 } 2344 2345 kfree(retbuf); 2346 return rc; 2347 } 2348 2349 2350 2351 static int 2352 smb3_notify(const unsigned int xid, struct file *pfile, 2353 void __user *ioc_buf, bool return_changes) 2354 { 2355 struct smb3_notify_info notify; 2356 struct smb3_notify_info __user *pnotify_buf; 2357 struct dentry *dentry = pfile->f_path.dentry; 2358 struct inode *inode = file_inode(pfile); 2359 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2360 struct cifs_open_parms oparms; 2361 struct cifs_fid fid; 2362 struct cifs_tcon *tcon; 2363 const unsigned char *path; 2364 char *returned_ioctl_info = NULL; 2365 void *page = alloc_dentry_path(); 2366 __le16 *utf16_path = NULL; 2367 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 2368 int rc = 0; 2369 __u32 ret_len = 0; 2370 2371 path = build_path_from_dentry(dentry, page); 2372 if (IS_ERR(path)) { 2373 rc = PTR_ERR(path); 2374 goto notify_exit; 2375 } 2376 2377 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 2378 if (utf16_path == NULL) { 2379 rc = -ENOMEM; 2380 goto notify_exit; 2381 } 2382 2383 if (return_changes) { 2384 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify_info))) { 2385 rc = -EFAULT; 2386 goto notify_exit; 2387 } 2388 } else { 2389 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify))) { 2390 rc = -EFAULT; 2391 goto notify_exit; 2392 } 2393 notify.data_len = 0; 2394 } 2395 2396 tcon = cifs_sb_master_tcon(cifs_sb); 2397 oparms = (struct cifs_open_parms) { 2398 .tcon = tcon, 2399 .path = path, 2400 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA, 2401 .disposition = FILE_OPEN, 2402 .create_options = cifs_create_options(cifs_sb, 0), 2403 .fid = &fid, 2404 }; 2405 2406 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, 2407 NULL); 2408 if (rc) 2409 goto notify_exit; 2410 2411 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid, 2412 notify.watch_tree, notify.completion_filter, 2413 notify.data_len, &returned_ioctl_info, &ret_len); 2414 2415 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 2416 2417 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc); 2418 if (return_changes && (ret_len > 0) && (notify.data_len > 0)) { 2419 if (ret_len > notify.data_len) 2420 ret_len = notify.data_len; 2421 pnotify_buf = (struct smb3_notify_info __user *)ioc_buf; 2422 if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len)) 2423 rc = -EFAULT; 2424 else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len))) 2425 rc = -EFAULT; 2426 } 2427 kfree(returned_ioctl_info); 2428 notify_exit: 2429 free_dentry_path(page); 2430 kfree(utf16_path); 2431 return rc; 2432 } 2433 2434 static int 2435 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, 2436 const char *path, struct cifs_sb_info *cifs_sb, 2437 struct cifs_fid *fid, __u16 search_flags, 2438 struct cifs_search_info *srch_inf) 2439 { 2440 __le16 *utf16_path; 2441 struct smb_rqst rqst[2]; 2442 struct kvec rsp_iov[2]; 2443 int resp_buftype[2]; 2444 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 2445 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; 2446 int rc, flags = 0; 2447 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 2448 struct cifs_open_parms oparms; 2449 struct smb2_query_directory_rsp *qd_rsp = NULL; 2450 struct smb2_create_rsp *op_rsp = NULL; 2451 struct TCP_Server_Info *server; 2452 int retries = 0, cur_sleep = 0; 2453 2454 replay_again: 2455 /* reinitialize for possible replay */ 2456 flags = 0; 2457 oplock = SMB2_OPLOCK_LEVEL_NONE; 2458 server = cifs_pick_channel(tcon->ses); 2459 2460 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 2461 if (!utf16_path) 2462 return -ENOMEM; 2463 2464 if (smb3_encryption_required(tcon)) 2465 flags |= CIFS_TRANSFORM_REQ; 2466 2467 memset(rqst, 0, sizeof(rqst)); 2468 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 2469 memset(rsp_iov, 0, sizeof(rsp_iov)); 2470 2471 /* Open */ 2472 memset(&open_iov, 0, sizeof(open_iov)); 2473 rqst[0].rq_iov = open_iov; 2474 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 2475 2476 oparms = (struct cifs_open_parms) { 2477 .tcon = tcon, 2478 .path = path, 2479 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA, 2480 .disposition = FILE_OPEN, 2481 .create_options = cifs_create_options(cifs_sb, 0), 2482 .fid = fid, 2483 .replay = !!(retries), 2484 }; 2485 2486 rc = SMB2_open_init(tcon, server, 2487 &rqst[0], &oplock, &oparms, utf16_path); 2488 if (rc) 2489 goto qdf_free; 2490 smb2_set_next_command(tcon, &rqst[0]); 2491 2492 /* Query directory */ 2493 srch_inf->entries_in_buffer = 0; 2494 srch_inf->index_of_last_entry = 2; 2495 2496 memset(&qd_iov, 0, sizeof(qd_iov)); 2497 rqst[1].rq_iov = qd_iov; 2498 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; 2499 2500 rc = SMB2_query_directory_init(xid, tcon, server, 2501 &rqst[1], 2502 COMPOUND_FID, COMPOUND_FID, 2503 0, srch_inf->info_level); 2504 if (rc) 2505 goto qdf_free; 2506 2507 smb2_set_related(&rqst[1]); 2508 2509 if (retries) { 2510 /* Back-off before retry */ 2511 if (cur_sleep) 2512 msleep(cur_sleep); 2513 smb2_set_replay(server, &rqst[0]); 2514 smb2_set_replay(server, &rqst[1]); 2515 } 2516 2517 rc = compound_send_recv(xid, tcon->ses, server, 2518 flags, 2, rqst, 2519 resp_buftype, rsp_iov); 2520 2521 /* If the open failed there is nothing to do */ 2522 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 2523 if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) { 2524 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc); 2525 goto qdf_free; 2526 } 2527 fid->persistent_fid = op_rsp->PersistentFileId; 2528 fid->volatile_fid = op_rsp->VolatileFileId; 2529 2530 /* Anything else than ENODATA means a genuine error */ 2531 if (rc && rc != -ENODATA) { 2532 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 2533 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc); 2534 trace_smb3_query_dir_err(xid, fid->persistent_fid, 2535 tcon->tid, tcon->ses->Suid, 0, 0, rc); 2536 goto qdf_free; 2537 } 2538 2539 atomic_inc(&tcon->num_remote_opens); 2540 2541 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base; 2542 if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) { 2543 trace_smb3_query_dir_done(xid, fid->persistent_fid, 2544 tcon->tid, tcon->ses->Suid, 0, 0); 2545 srch_inf->endOfSearch = true; 2546 rc = 0; 2547 goto qdf_free; 2548 } 2549 2550 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1], 2551 srch_inf); 2552 if (rc) { 2553 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid, 2554 tcon->ses->Suid, 0, 0, rc); 2555 goto qdf_free; 2556 } 2557 resp_buftype[1] = CIFS_NO_BUFFER; 2558 2559 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid, 2560 tcon->ses->Suid, 0, srch_inf->entries_in_buffer); 2561 2562 qdf_free: 2563 kfree(utf16_path); 2564 SMB2_open_free(&rqst[0]); 2565 SMB2_query_directory_free(&rqst[1]); 2566 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 2567 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 2568 2569 if (is_replayable_error(rc) && 2570 smb2_should_replay(tcon, &retries, &cur_sleep)) 2571 goto replay_again; 2572 2573 return rc; 2574 } 2575 2576 static int 2577 smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon, 2578 struct cifs_fid *fid, __u16 search_flags, 2579 struct cifs_search_info *srch_inf) 2580 { 2581 return SMB2_query_directory(xid, tcon, fid->persistent_fid, 2582 fid->volatile_fid, 0, srch_inf); 2583 } 2584 2585 static int 2586 smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon, 2587 struct cifs_fid *fid) 2588 { 2589 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 2590 } 2591 2592 /* 2593 * If we negotiate SMB2 protocol and get STATUS_PENDING - update 2594 * the number of credits and return true. Otherwise - return false. 2595 */ 2596 static bool 2597 smb2_is_status_pending(char *buf, struct TCP_Server_Info *server) 2598 { 2599 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2600 int scredits, in_flight; 2601 2602 if (shdr->Status != STATUS_PENDING) 2603 return false; 2604 2605 if (shdr->CreditRequest) { 2606 spin_lock(&server->req_lock); 2607 server->credits += le16_to_cpu(shdr->CreditRequest); 2608 scredits = server->credits; 2609 in_flight = server->in_flight; 2610 spin_unlock(&server->req_lock); 2611 wake_up(&server->request_q); 2612 2613 trace_smb3_pend_credits(server->current_mid, 2614 server->conn_id, server->hostname, scredits, 2615 le16_to_cpu(shdr->CreditRequest), in_flight); 2616 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n", 2617 __func__, le16_to_cpu(shdr->CreditRequest), scredits); 2618 } 2619 2620 return true; 2621 } 2622 2623 static bool 2624 smb2_is_session_expired(char *buf) 2625 { 2626 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2627 2628 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED && 2629 shdr->Status != STATUS_USER_SESSION_DELETED) 2630 return false; 2631 2632 trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId), 2633 le64_to_cpu(shdr->SessionId), 2634 le16_to_cpu(shdr->Command), 2635 le64_to_cpu(shdr->MessageId)); 2636 cifs_dbg(FYI, "Session expired or deleted\n"); 2637 2638 return true; 2639 } 2640 2641 static bool 2642 smb2_is_status_io_timeout(char *buf) 2643 { 2644 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2645 2646 if (shdr->Status == STATUS_IO_TIMEOUT) 2647 return true; 2648 else 2649 return false; 2650 } 2651 2652 static bool 2653 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) 2654 { 2655 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2656 struct TCP_Server_Info *pserver; 2657 struct cifs_ses *ses; 2658 struct cifs_tcon *tcon; 2659 2660 if (shdr->Status != STATUS_NETWORK_NAME_DELETED) 2661 return false; 2662 2663 /* If server is a channel, select the primary channel */ 2664 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 2665 2666 spin_lock(&cifs_tcp_ses_lock); 2667 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 2668 if (cifs_ses_exiting(ses)) 2669 continue; 2670 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2671 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { 2672 spin_lock(&tcon->tc_lock); 2673 tcon->need_reconnect = true; 2674 spin_unlock(&tcon->tc_lock); 2675 spin_unlock(&cifs_tcp_ses_lock); 2676 pr_warn_once("Server share %s deleted.\n", 2677 tcon->tree_name); 2678 return true; 2679 } 2680 } 2681 } 2682 spin_unlock(&cifs_tcp_ses_lock); 2683 2684 return false; 2685 } 2686 2687 static int 2688 smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid, 2689 __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode) 2690 { 2691 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) 2692 return SMB2_lease_break(0, tcon, cinode->lease_key, 2693 smb2_get_lease_state(cinode)); 2694 2695 return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid, 2696 CIFS_CACHE_READ(cinode) ? 1 : 0); 2697 } 2698 2699 void 2700 smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst) 2701 { 2702 struct smb2_hdr *shdr; 2703 2704 if (server->dialect < SMB30_PROT_ID) 2705 return; 2706 2707 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base); 2708 if (shdr == NULL) { 2709 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n"); 2710 return; 2711 } 2712 shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION; 2713 } 2714 2715 void 2716 smb2_set_related(struct smb_rqst *rqst) 2717 { 2718 struct smb2_hdr *shdr; 2719 2720 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base); 2721 if (shdr == NULL) { 2722 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n"); 2723 return; 2724 } 2725 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; 2726 } 2727 2728 char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0}; 2729 2730 void 2731 smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst) 2732 { 2733 struct smb2_hdr *shdr; 2734 struct cifs_ses *ses = tcon->ses; 2735 struct TCP_Server_Info *server = ses->server; 2736 unsigned long len = smb_rqst_len(server, rqst); 2737 int num_padding; 2738 2739 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base); 2740 if (shdr == NULL) { 2741 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n"); 2742 return; 2743 } 2744 2745 /* SMB headers in a compound are 8 byte aligned. */ 2746 if (IS_ALIGNED(len, 8)) 2747 goto out; 2748 2749 num_padding = 8 - (len & 7); 2750 if (smb3_encryption_required(tcon)) { 2751 int i; 2752 2753 /* 2754 * Flatten request into a single buffer with required padding as 2755 * the encryption layer can't handle the padding iovs. 2756 */ 2757 for (i = 1; i < rqst->rq_nvec; i++) { 2758 memcpy(rqst->rq_iov[0].iov_base + 2759 rqst->rq_iov[0].iov_len, 2760 rqst->rq_iov[i].iov_base, 2761 rqst->rq_iov[i].iov_len); 2762 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len; 2763 } 2764 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len, 2765 0, num_padding); 2766 rqst->rq_iov[0].iov_len += num_padding; 2767 rqst->rq_nvec = 1; 2768 } else { 2769 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding; 2770 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding; 2771 rqst->rq_nvec++; 2772 } 2773 len += num_padding; 2774 out: 2775 shdr->NextCommand = cpu_to_le32(len); 2776 } 2777 2778 /* 2779 * helper function for exponential backoff and check if replayable 2780 */ 2781 bool smb2_should_replay(struct cifs_tcon *tcon, 2782 int *pretries, 2783 int *pcur_sleep) 2784 { 2785 if (!pretries || !pcur_sleep) 2786 return false; 2787 2788 if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) { 2789 /* Update sleep time for exponential backoff */ 2790 if (!(*pcur_sleep)) 2791 (*pcur_sleep) = 1; 2792 else { 2793 (*pcur_sleep) = ((*pcur_sleep) << 1); 2794 if ((*pcur_sleep) > CIFS_MAX_SLEEP) 2795 (*pcur_sleep) = CIFS_MAX_SLEEP; 2796 } 2797 return true; 2798 } 2799 2800 return false; 2801 } 2802 2803 /* 2804 * Passes the query info response back to the caller on success. 2805 * Caller need to free this with free_rsp_buf(). 2806 */ 2807 int 2808 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, 2809 const char *path, u32 desired_access, 2810 u32 class, u32 type, u32 output_len, 2811 struct kvec *rsp, int *buftype, 2812 struct cifs_sb_info *cifs_sb) 2813 { 2814 struct smb2_compound_vars *vars; 2815 struct cifs_ses *ses = tcon->ses; 2816 struct TCP_Server_Info *server; 2817 int flags = CIFS_CP_CREATE_CLOSE_OP; 2818 struct smb_rqst *rqst; 2819 int resp_buftype[3]; 2820 struct kvec *rsp_iov; 2821 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 2822 struct cifs_open_parms oparms; 2823 struct cifs_fid fid; 2824 int rc; 2825 __le16 *utf16_path; 2826 struct cached_fid *cfid; 2827 int retries = 0, cur_sleep = 0; 2828 2829 replay_again: 2830 /* reinitialize for possible replay */ 2831 cfid = NULL; 2832 flags = CIFS_CP_CREATE_CLOSE_OP; 2833 oplock = SMB2_OPLOCK_LEVEL_NONE; 2834 server = cifs_pick_channel(ses); 2835 2836 if (!path) 2837 path = ""; 2838 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 2839 if (!utf16_path) 2840 return -ENOMEM; 2841 2842 if (smb3_encryption_required(tcon)) 2843 flags |= CIFS_TRANSFORM_REQ; 2844 2845 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; 2846 vars = kzalloc(sizeof(*vars), GFP_KERNEL); 2847 if (!vars) { 2848 rc = -ENOMEM; 2849 goto out_free_path; 2850 } 2851 rqst = vars->rqst; 2852 rsp_iov = vars->rsp_iov; 2853 2854 /* 2855 * We can only call this for things we know are directories. 2856 */ 2857 if (!strcmp(path, "")) 2858 open_cached_dir(xid, tcon, path, cifs_sb, false, 2859 &cfid); /* cfid null if open dir failed */ 2860 2861 rqst[0].rq_iov = vars->open_iov; 2862 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 2863 2864 oparms = (struct cifs_open_parms) { 2865 .tcon = tcon, 2866 .path = path, 2867 .desired_access = desired_access, 2868 .disposition = FILE_OPEN, 2869 .create_options = cifs_create_options(cifs_sb, 0), 2870 .fid = &fid, 2871 .replay = !!(retries), 2872 }; 2873 2874 rc = SMB2_open_init(tcon, server, 2875 &rqst[0], &oplock, &oparms, utf16_path); 2876 if (rc) 2877 goto qic_exit; 2878 smb2_set_next_command(tcon, &rqst[0]); 2879 2880 rqst[1].rq_iov = &vars->qi_iov; 2881 rqst[1].rq_nvec = 1; 2882 2883 if (cfid) { 2884 rc = SMB2_query_info_init(tcon, server, 2885 &rqst[1], 2886 cfid->fid.persistent_fid, 2887 cfid->fid.volatile_fid, 2888 class, type, 0, 2889 output_len, 0, 2890 NULL); 2891 } else { 2892 rc = SMB2_query_info_init(tcon, server, 2893 &rqst[1], 2894 COMPOUND_FID, 2895 COMPOUND_FID, 2896 class, type, 0, 2897 output_len, 0, 2898 NULL); 2899 } 2900 if (rc) 2901 goto qic_exit; 2902 if (!cfid) { 2903 smb2_set_next_command(tcon, &rqst[1]); 2904 smb2_set_related(&rqst[1]); 2905 } 2906 2907 rqst[2].rq_iov = &vars->close_iov; 2908 rqst[2].rq_nvec = 1; 2909 2910 rc = SMB2_close_init(tcon, server, 2911 &rqst[2], COMPOUND_FID, COMPOUND_FID, false); 2912 if (rc) 2913 goto qic_exit; 2914 smb2_set_related(&rqst[2]); 2915 2916 if (retries) { 2917 /* Back-off before retry */ 2918 if (cur_sleep) 2919 msleep(cur_sleep); 2920 if (!cfid) { 2921 smb2_set_replay(server, &rqst[0]); 2922 smb2_set_replay(server, &rqst[2]); 2923 } 2924 smb2_set_replay(server, &rqst[1]); 2925 } 2926 2927 if (cfid) { 2928 rc = compound_send_recv(xid, ses, server, 2929 flags, 1, &rqst[1], 2930 &resp_buftype[1], &rsp_iov[1]); 2931 } else { 2932 rc = compound_send_recv(xid, ses, server, 2933 flags, 3, rqst, 2934 resp_buftype, rsp_iov); 2935 } 2936 if (rc) { 2937 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 2938 if (rc == -EREMCHG) { 2939 tcon->need_reconnect = true; 2940 pr_warn_once("server share %s deleted\n", 2941 tcon->tree_name); 2942 } 2943 goto qic_exit; 2944 } 2945 *rsp = rsp_iov[1]; 2946 *buftype = resp_buftype[1]; 2947 2948 qic_exit: 2949 SMB2_open_free(&rqst[0]); 2950 SMB2_query_info_free(&rqst[1]); 2951 SMB2_close_free(&rqst[2]); 2952 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 2953 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); 2954 if (cfid) 2955 close_cached_dir(cfid); 2956 kfree(vars); 2957 out_free_path: 2958 kfree(utf16_path); 2959 2960 if (is_replayable_error(rc) && 2961 smb2_should_replay(tcon, &retries, &cur_sleep)) 2962 goto replay_again; 2963 2964 return rc; 2965 } 2966 2967 static int 2968 smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon, 2969 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf) 2970 { 2971 struct smb2_query_info_rsp *rsp; 2972 struct smb2_fs_full_size_info *info = NULL; 2973 struct kvec rsp_iov = {NULL, 0}; 2974 int buftype = CIFS_NO_BUFFER; 2975 int rc; 2976 2977 2978 rc = smb2_query_info_compound(xid, tcon, path, 2979 FILE_READ_ATTRIBUTES, 2980 FS_FULL_SIZE_INFORMATION, 2981 SMB2_O_INFO_FILESYSTEM, 2982 sizeof(struct smb2_fs_full_size_info), 2983 &rsp_iov, &buftype, cifs_sb); 2984 if (rc) 2985 goto qfs_exit; 2986 2987 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 2988 buf->f_type = SMB2_SUPER_MAGIC; 2989 info = (struct smb2_fs_full_size_info *)( 2990 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 2991 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 2992 le32_to_cpu(rsp->OutputBufferLength), 2993 &rsp_iov, 2994 sizeof(struct smb2_fs_full_size_info)); 2995 if (!rc) 2996 smb2_copy_fs_info_to_kstatfs(info, buf); 2997 2998 qfs_exit: 2999 trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc); 3000 free_rsp_buf(buftype, rsp_iov.iov_base); 3001 return rc; 3002 } 3003 3004 static int 3005 smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon, 3006 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf) 3007 { 3008 int rc; 3009 __le16 *utf16_path = NULL; 3010 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 3011 struct cifs_open_parms oparms; 3012 struct cifs_fid fid; 3013 3014 if (!tcon->posix_extensions) 3015 return smb2_queryfs(xid, tcon, path, cifs_sb, buf); 3016 3017 oparms = (struct cifs_open_parms) { 3018 .tcon = tcon, 3019 .path = path, 3020 .desired_access = FILE_READ_ATTRIBUTES, 3021 .disposition = FILE_OPEN, 3022 .create_options = cifs_create_options(cifs_sb, 0), 3023 .fid = &fid, 3024 }; 3025 3026 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 3027 if (utf16_path == NULL) 3028 return -ENOMEM; 3029 3030 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, 3031 NULL, NULL); 3032 kfree(utf16_path); 3033 if (rc) 3034 return rc; 3035 3036 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid, 3037 fid.volatile_fid, buf); 3038 buf->f_type = SMB2_SUPER_MAGIC; 3039 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 3040 return rc; 3041 } 3042 3043 static bool 3044 smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2) 3045 { 3046 return ob1->fid.persistent_fid == ob2->fid.persistent_fid && 3047 ob1->fid.volatile_fid == ob2->fid.volatile_fid; 3048 } 3049 3050 static int 3051 smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset, 3052 __u64 length, __u32 type, int lock, int unlock, bool wait) 3053 { 3054 if (unlock && !lock) 3055 type = SMB2_LOCKFLAG_UNLOCK; 3056 return SMB2_lock(xid, tlink_tcon(cfile->tlink), 3057 cfile->fid.persistent_fid, cfile->fid.volatile_fid, 3058 current->tgid, length, offset, type, wait); 3059 } 3060 3061 static void 3062 smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid) 3063 { 3064 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE); 3065 } 3066 3067 static void 3068 smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid) 3069 { 3070 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); 3071 } 3072 3073 static void 3074 smb2_new_lease_key(struct cifs_fid *fid) 3075 { 3076 generate_random_uuid(fid->lease_key); 3077 } 3078 3079 static int 3080 smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, 3081 const char *search_name, 3082 struct dfs_info3_param **target_nodes, 3083 unsigned int *num_of_nodes, 3084 const struct nls_table *nls_codepage, int remap) 3085 { 3086 int rc; 3087 __le16 *utf16_path = NULL; 3088 int utf16_path_len = 0; 3089 struct cifs_tcon *tcon; 3090 struct fsctl_get_dfs_referral_req *dfs_req = NULL; 3091 struct get_dfs_referral_rsp *dfs_rsp = NULL; 3092 u32 dfs_req_size = 0, dfs_rsp_size = 0; 3093 int retry_once = 0; 3094 3095 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name); 3096 3097 /* 3098 * Try to use the IPC tcon, otherwise just use any 3099 */ 3100 tcon = ses->tcon_ipc; 3101 if (tcon == NULL) { 3102 spin_lock(&cifs_tcp_ses_lock); 3103 tcon = list_first_entry_or_null(&ses->tcon_list, 3104 struct cifs_tcon, 3105 tcon_list); 3106 if (tcon) { 3107 spin_lock(&tcon->tc_lock); 3108 tcon->tc_count++; 3109 spin_unlock(&tcon->tc_lock); 3110 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 3111 netfs_trace_tcon_ref_get_dfs_refer); 3112 } 3113 spin_unlock(&cifs_tcp_ses_lock); 3114 } 3115 3116 if (tcon == NULL) { 3117 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", 3118 ses); 3119 rc = -ENOTCONN; 3120 goto out; 3121 } 3122 3123 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX, 3124 &utf16_path_len, 3125 nls_codepage, remap); 3126 if (!utf16_path) { 3127 rc = -ENOMEM; 3128 goto out; 3129 } 3130 3131 dfs_req_size = sizeof(*dfs_req) + utf16_path_len; 3132 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL); 3133 if (!dfs_req) { 3134 rc = -ENOMEM; 3135 goto out; 3136 } 3137 3138 /* Highest DFS referral version understood */ 3139 dfs_req->MaxReferralLevel = DFS_VERSION; 3140 3141 /* Path to resolve in an UTF-16 null-terminated string */ 3142 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); 3143 3144 for (;;) { 3145 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 3146 FSCTL_DFS_GET_REFERRALS, 3147 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, 3148 (char **)&dfs_rsp, &dfs_rsp_size); 3149 if (fatal_signal_pending(current)) { 3150 rc = -EINTR; 3151 break; 3152 } 3153 if (!is_retryable_error(rc) || retry_once++) 3154 break; 3155 usleep_range(512, 2048); 3156 } 3157 3158 if (!rc && !dfs_rsp) 3159 rc = smb_EIO(smb_eio_trace_dfsref_no_rsp); 3160 if (rc) { 3161 if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP) 3162 cifs_tcon_dbg(FYI, "%s: ioctl error: rc=%d\n", __func__, rc); 3163 goto out; 3164 } 3165 3166 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size, 3167 num_of_nodes, target_nodes, 3168 nls_codepage, remap, search_name, 3169 true /* is_unicode */); 3170 if (rc && rc != -ENOENT) { 3171 cifs_tcon_dbg(VFS, "%s: failed to parse DFS referral %s: %d\n", 3172 __func__, search_name, rc); 3173 } 3174 3175 out: 3176 if (tcon && !tcon->ipc) { 3177 /* ipc tcons are not refcounted */ 3178 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_dfs_refer); 3179 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 3180 netfs_trace_tcon_ref_dec_dfs_refer); 3181 } 3182 kfree(utf16_path); 3183 kfree(dfs_req); 3184 kfree(dfs_rsp); 3185 return rc; 3186 } 3187 3188 static struct smb_ntsd * 3189 get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb, 3190 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info) 3191 { 3192 struct smb_ntsd *pntsd = NULL; 3193 unsigned int xid; 3194 int rc = -EOPNOTSUPP; 3195 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 3196 3197 if (IS_ERR(tlink)) 3198 return ERR_CAST(tlink); 3199 3200 xid = get_xid(); 3201 cifs_dbg(FYI, "trying to get acl\n"); 3202 3203 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid, 3204 cifsfid->volatile_fid, (void **)&pntsd, pacllen, 3205 info); 3206 free_xid(xid); 3207 3208 cifs_put_tlink(tlink); 3209 3210 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); 3211 if (rc) 3212 return ERR_PTR(rc); 3213 return pntsd; 3214 3215 } 3216 3217 static struct smb_ntsd * 3218 get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, 3219 const char *path, u32 *pacllen, u32 info) 3220 { 3221 struct smb_ntsd *pntsd = NULL; 3222 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 3223 unsigned int xid; 3224 int rc; 3225 struct cifs_tcon *tcon; 3226 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 3227 struct cifs_fid fid; 3228 struct cifs_open_parms oparms; 3229 __le16 *utf16_path; 3230 3231 cifs_dbg(FYI, "get smb3 acl for path %s\n", path); 3232 if (IS_ERR(tlink)) 3233 return ERR_CAST(tlink); 3234 3235 tcon = tlink_tcon(tlink); 3236 xid = get_xid(); 3237 3238 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 3239 if (!utf16_path) { 3240 rc = -ENOMEM; 3241 goto put_tlink; 3242 } 3243 3244 oparms = (struct cifs_open_parms) { 3245 .tcon = tcon, 3246 .path = path, 3247 .desired_access = READ_CONTROL, 3248 .disposition = FILE_OPEN, 3249 /* 3250 * When querying an ACL, even if the file is a symlink 3251 * we want to open the source not the target, and so 3252 * the protocol requires that the client specify this 3253 * flag when opening a reparse point 3254 */ 3255 .create_options = cifs_create_options(cifs_sb, 0) | 3256 OPEN_REPARSE_POINT, 3257 .fid = &fid, 3258 }; 3259 3260 if (info & SACL_SECINFO) 3261 oparms.desired_access |= SYSTEM_SECURITY; 3262 3263 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, 3264 NULL); 3265 kfree(utf16_path); 3266 if (!rc) { 3267 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid, 3268 fid.volatile_fid, (void **)&pntsd, pacllen, 3269 info); 3270 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 3271 } 3272 3273 put_tlink: 3274 cifs_put_tlink(tlink); 3275 free_xid(xid); 3276 3277 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); 3278 if (rc) 3279 return ERR_PTR(rc); 3280 return pntsd; 3281 } 3282 3283 static int 3284 set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen, 3285 struct inode *inode, const char *path, int aclflag) 3286 { 3287 u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 3288 unsigned int xid; 3289 int rc, access_flags = 0; 3290 struct cifs_tcon *tcon; 3291 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3292 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 3293 struct cifs_fid fid; 3294 struct cifs_open_parms oparms; 3295 __le16 *utf16_path; 3296 3297 cifs_dbg(FYI, "set smb3 acl for path %s\n", path); 3298 if (IS_ERR(tlink)) 3299 return PTR_ERR(tlink); 3300 3301 tcon = tlink_tcon(tlink); 3302 xid = get_xid(); 3303 3304 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP) 3305 access_flags |= WRITE_OWNER; 3306 if (aclflag & CIFS_ACL_SACL) 3307 access_flags |= SYSTEM_SECURITY; 3308 if (aclflag & CIFS_ACL_DACL) 3309 access_flags |= WRITE_DAC; 3310 3311 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 3312 if (!utf16_path) { 3313 rc = -ENOMEM; 3314 goto put_tlink; 3315 } 3316 3317 oparms = (struct cifs_open_parms) { 3318 .tcon = tcon, 3319 .desired_access = access_flags, 3320 .create_options = cifs_create_options(cifs_sb, 0), 3321 .disposition = FILE_OPEN, 3322 .path = path, 3323 .fid = &fid, 3324 }; 3325 3326 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, 3327 NULL, NULL); 3328 kfree(utf16_path); 3329 if (!rc) { 3330 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid, 3331 fid.volatile_fid, pnntsd, acllen, aclflag); 3332 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 3333 } 3334 3335 put_tlink: 3336 cifs_put_tlink(tlink); 3337 free_xid(xid); 3338 return rc; 3339 } 3340 3341 /* Retrieve an ACL from the server */ 3342 static struct smb_ntsd * 3343 get_smb2_acl(struct cifs_sb_info *cifs_sb, 3344 struct inode *inode, const char *path, 3345 u32 *pacllen, u32 info) 3346 { 3347 struct smb_ntsd *pntsd = NULL; 3348 struct cifsFileInfo *open_file = NULL; 3349 3350 if (inode && !(info & SACL_SECINFO)) 3351 open_file = find_readable_file(CIFS_I(inode), true); 3352 if (!open_file || (info & SACL_SECINFO)) 3353 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info); 3354 3355 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info); 3356 cifsFileInfo_put(open_file); 3357 return pntsd; 3358 } 3359 3360 static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, 3361 loff_t offset, loff_t len, unsigned int xid) 3362 { 3363 struct cifsFileInfo *cfile = file->private_data; 3364 struct file_zero_data_information fsctl_buf; 3365 3366 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); 3367 3368 fsctl_buf.FileOffset = cpu_to_le64(offset); 3369 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); 3370 3371 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 3372 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 3373 (char *)&fsctl_buf, 3374 sizeof(struct file_zero_data_information), 3375 0, NULL, NULL); 3376 } 3377 3378 static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, 3379 unsigned long long offset, unsigned long long len, 3380 bool keep_size) 3381 { 3382 struct cifs_ses *ses = tcon->ses; 3383 struct inode *inode = file_inode(file); 3384 struct cifsInodeInfo *cifsi = CIFS_I(inode); 3385 struct cifsFileInfo *cfile = file->private_data; 3386 struct netfs_inode *ictx = netfs_inode(inode); 3387 unsigned long long i_size, new_size, remote_size; 3388 long rc; 3389 unsigned int xid; 3390 3391 xid = get_xid(); 3392 3393 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, 3394 ses->Suid, offset, len); 3395 3396 filemap_invalidate_lock(inode->i_mapping); 3397 3398 i_size = i_size_read(inode); 3399 remote_size = ictx->remote_i_size; 3400 if (offset + len >= remote_size && offset < i_size) { 3401 unsigned long long top = umin(offset + len, i_size); 3402 3403 rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1); 3404 if (rc < 0) 3405 goto zero_range_exit; 3406 } 3407 3408 /* 3409 * We zero the range through ioctl, so we need remove the page caches 3410 * first, otherwise the data may be inconsistent with the server. 3411 */ 3412 truncate_pagecache_range(inode, offset, offset + len - 1); 3413 netfs_wait_for_outstanding_io(inode); 3414 3415 /* if file not oplocked can't be sure whether asking to extend size */ 3416 rc = -EOPNOTSUPP; 3417 if (keep_size == false && !CIFS_CACHE_READ(cifsi)) 3418 goto zero_range_exit; 3419 3420 rc = smb3_zero_data(file, tcon, offset, len, xid); 3421 if (rc < 0) 3422 goto zero_range_exit; 3423 3424 /* 3425 * do we also need to change the size of the file? 3426 */ 3427 new_size = offset + len; 3428 if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) { 3429 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3430 cfile->fid.volatile_fid, cfile->pid, new_size); 3431 if (rc >= 0) { 3432 truncate_setsize(inode, new_size); 3433 netfs_resize_file(&cifsi->netfs, new_size, true); 3434 if (offset < cifsi->netfs.zero_point) 3435 cifsi->netfs.zero_point = offset; 3436 fscache_resize_cookie(cifs_inode_cookie(inode), new_size); 3437 } 3438 } 3439 3440 zero_range_exit: 3441 filemap_invalidate_unlock(inode->i_mapping); 3442 free_xid(xid); 3443 if (rc) 3444 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid, 3445 ses->Suid, offset, len, rc); 3446 else 3447 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid, 3448 ses->Suid, offset, len); 3449 return rc; 3450 } 3451 3452 static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, 3453 loff_t offset, loff_t len) 3454 { 3455 struct inode *inode = file_inode(file); 3456 struct cifsFileInfo *cfile = file->private_data; 3457 struct file_zero_data_information fsctl_buf; 3458 unsigned long long end = offset + len, i_size, remote_i_size; 3459 long rc; 3460 unsigned int xid; 3461 __u8 set_sparse = 1; 3462 3463 xid = get_xid(); 3464 3465 /* Need to make file sparse, if not already, before freeing range. */ 3466 /* Consider adding equivalent for compressed since it could also work */ 3467 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { 3468 rc = -EOPNOTSUPP; 3469 goto out; 3470 } 3471 3472 filemap_invalidate_lock(inode->i_mapping); 3473 /* 3474 * We implement the punch hole through ioctl, so we need remove the page 3475 * caches first, otherwise the data may be inconsistent with the server. 3476 */ 3477 truncate_pagecache_range(inode, offset, offset + len - 1); 3478 netfs_wait_for_outstanding_io(inode); 3479 3480 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); 3481 3482 fsctl_buf.FileOffset = cpu_to_le64(offset); 3483 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); 3484 3485 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 3486 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 3487 (char *)&fsctl_buf, 3488 sizeof(struct file_zero_data_information), 3489 CIFSMaxBufSize, NULL, NULL); 3490 3491 if (rc) 3492 goto unlock; 3493 3494 /* If there's dirty data in the buffer that would extend the EOF if it 3495 * were written, then we need to move the EOF marker over to the lower 3496 * of the high end of the hole and the proposed EOF. The problem is 3497 * that we locally hole-punch the tail of the dirty data, the proposed 3498 * EOF update will end up in the wrong place. 3499 */ 3500 i_size = i_size_read(inode); 3501 remote_i_size = netfs_inode(inode)->remote_i_size; 3502 if (end > remote_i_size && i_size > remote_i_size) { 3503 unsigned long long extend_to = umin(end, i_size); 3504 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3505 cfile->fid.volatile_fid, cfile->pid, extend_to); 3506 if (rc >= 0) 3507 netfs_inode(inode)->remote_i_size = extend_to; 3508 } 3509 3510 unlock: 3511 filemap_invalidate_unlock(inode->i_mapping); 3512 out: 3513 free_xid(xid); 3514 return rc; 3515 } 3516 3517 static int smb3_simple_fallocate_write_range(unsigned int xid, 3518 struct cifs_tcon *tcon, 3519 struct cifsFileInfo *cfile, 3520 loff_t off, loff_t len, 3521 char *buf) 3522 { 3523 struct cifs_io_parms io_parms = {0}; 3524 int nbytes; 3525 int rc = 0; 3526 struct kvec iov[2]; 3527 3528 io_parms.netfid = cfile->fid.netfid; 3529 io_parms.pid = current->tgid; 3530 io_parms.tcon = tcon; 3531 io_parms.persistent_fid = cfile->fid.persistent_fid; 3532 io_parms.volatile_fid = cfile->fid.volatile_fid; 3533 3534 while (len) { 3535 io_parms.offset = off; 3536 io_parms.length = len; 3537 if (io_parms.length > SMB2_MAX_BUFFER_SIZE) 3538 io_parms.length = SMB2_MAX_BUFFER_SIZE; 3539 /* iov[0] is reserved for smb header */ 3540 iov[1].iov_base = buf; 3541 iov[1].iov_len = io_parms.length; 3542 rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1); 3543 if (rc) 3544 break; 3545 if (nbytes > len) 3546 return -EINVAL; 3547 buf += nbytes; 3548 off += nbytes; 3549 len -= nbytes; 3550 } 3551 return rc; 3552 } 3553 3554 static int smb3_simple_fallocate_range(unsigned int xid, 3555 struct cifs_tcon *tcon, 3556 struct cifsFileInfo *cfile, 3557 loff_t off, loff_t len) 3558 { 3559 struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data; 3560 u32 out_data_len; 3561 char *buf = NULL; 3562 loff_t l; 3563 int rc; 3564 3565 in_data.file_offset = cpu_to_le64(off); 3566 in_data.length = cpu_to_le64(len); 3567 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 3568 cfile->fid.volatile_fid, 3569 FSCTL_QUERY_ALLOCATED_RANGES, 3570 (char *)&in_data, sizeof(in_data), 3571 1024 * sizeof(struct file_allocated_range_buffer), 3572 (char **)&out_data, &out_data_len); 3573 if (rc) 3574 goto out; 3575 3576 buf = kzalloc(1024 * 1024, GFP_KERNEL); 3577 if (buf == NULL) { 3578 rc = -ENOMEM; 3579 goto out; 3580 } 3581 3582 tmp_data = out_data; 3583 while (len) { 3584 /* 3585 * The rest of the region is unmapped so write it all. 3586 */ 3587 if (out_data_len == 0) { 3588 rc = smb3_simple_fallocate_write_range(xid, tcon, 3589 cfile, off, len, buf); 3590 goto out; 3591 } 3592 3593 if (out_data_len < sizeof(struct file_allocated_range_buffer)) { 3594 rc = -EINVAL; 3595 goto out; 3596 } 3597 3598 if (off < le64_to_cpu(tmp_data->file_offset)) { 3599 /* 3600 * We are at a hole. Write until the end of the region 3601 * or until the next allocated data, 3602 * whichever comes next. 3603 */ 3604 l = le64_to_cpu(tmp_data->file_offset) - off; 3605 if (len < l) 3606 l = len; 3607 rc = smb3_simple_fallocate_write_range(xid, tcon, 3608 cfile, off, l, buf); 3609 if (rc) 3610 goto out; 3611 off = off + l; 3612 len = len - l; 3613 if (len == 0) 3614 goto out; 3615 } 3616 /* 3617 * We are at a section of allocated data, just skip forward 3618 * until the end of the data or the end of the region 3619 * we are supposed to fallocate, whichever comes first. 3620 */ 3621 l = le64_to_cpu(tmp_data->length); 3622 if (len < l) 3623 l = len; 3624 off += l; 3625 len -= l; 3626 3627 tmp_data = &tmp_data[1]; 3628 out_data_len -= sizeof(struct file_allocated_range_buffer); 3629 } 3630 3631 out: 3632 kfree(out_data); 3633 kfree(buf); 3634 return rc; 3635 } 3636 3637 3638 static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, 3639 loff_t off, loff_t len, bool keep_size) 3640 { 3641 struct inode *inode; 3642 struct cifsInodeInfo *cifsi; 3643 struct cifsFileInfo *cfile = file->private_data; 3644 long rc = -EOPNOTSUPP; 3645 unsigned int xid; 3646 loff_t new_eof; 3647 3648 xid = get_xid(); 3649 3650 inode = d_inode(cfile->dentry); 3651 cifsi = CIFS_I(inode); 3652 3653 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid, 3654 tcon->ses->Suid, off, len); 3655 /* if file not oplocked can't be sure whether asking to extend size */ 3656 if (!CIFS_CACHE_READ(cifsi)) 3657 if (keep_size == false) { 3658 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, 3659 tcon->tid, tcon->ses->Suid, off, len, rc); 3660 free_xid(xid); 3661 return rc; 3662 } 3663 3664 /* 3665 * Extending the file 3666 */ 3667 if ((keep_size == false) && i_size_read(inode) < off + len) { 3668 rc = inode_newsize_ok(inode, off + len); 3669 if (rc) 3670 goto out; 3671 3672 if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) 3673 smb2_set_sparse(xid, tcon, cfile, inode, false); 3674 3675 new_eof = off + len; 3676 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3677 cfile->fid.volatile_fid, cfile->pid, new_eof); 3678 if (rc == 0) { 3679 netfs_resize_file(&cifsi->netfs, new_eof, true); 3680 cifs_setsize(inode, new_eof); 3681 } 3682 goto out; 3683 } 3684 3685 /* 3686 * Files are non-sparse by default so falloc may be a no-op 3687 * Must check if file sparse. If not sparse, and since we are not 3688 * extending then no need to do anything since file already allocated 3689 */ 3690 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) { 3691 rc = 0; 3692 goto out; 3693 } 3694 3695 if (keep_size == true) { 3696 /* 3697 * We can not preallocate pages beyond the end of the file 3698 * in SMB2 3699 */ 3700 if (off >= i_size_read(inode)) { 3701 rc = 0; 3702 goto out; 3703 } 3704 /* 3705 * For fallocates that are partially beyond the end of file, 3706 * clamp len so we only fallocate up to the end of file. 3707 */ 3708 if (off + len > i_size_read(inode)) { 3709 len = i_size_read(inode) - off; 3710 } 3711 } 3712 3713 if ((keep_size == true) || (i_size_read(inode) >= off + len)) { 3714 /* 3715 * At this point, we are trying to fallocate an internal 3716 * regions of a sparse file. Since smb2 does not have a 3717 * fallocate command we have two options on how to emulate this. 3718 * We can either turn the entire file to become non-sparse 3719 * which we only do if the fallocate is for virtually 3720 * the whole file, or we can overwrite the region with zeroes 3721 * using SMB2_write, which could be prohibitevly expensive 3722 * if len is large. 3723 */ 3724 /* 3725 * We are only trying to fallocate a small region so 3726 * just write it with zero. 3727 */ 3728 if (len <= 1024 * 1024) { 3729 rc = smb3_simple_fallocate_range(xid, tcon, cfile, 3730 off, len); 3731 goto out; 3732 } 3733 3734 /* 3735 * Check if falloc starts within first few pages of file 3736 * and ends within a few pages of the end of file to 3737 * ensure that most of file is being forced to be 3738 * fallocated now. If so then setting whole file sparse 3739 * ie potentially making a few extra pages at the beginning 3740 * or end of the file non-sparse via set_sparse is harmless. 3741 */ 3742 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) { 3743 rc = -EOPNOTSUPP; 3744 goto out; 3745 } 3746 } 3747 3748 smb2_set_sparse(xid, tcon, cfile, inode, false); 3749 rc = 0; 3750 3751 out: 3752 if (rc) 3753 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid, 3754 tcon->ses->Suid, off, len, rc); 3755 else 3756 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid, 3757 tcon->ses->Suid, off, len); 3758 3759 free_xid(xid); 3760 return rc; 3761 } 3762 3763 static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon, 3764 loff_t off, loff_t len) 3765 { 3766 int rc; 3767 unsigned int xid; 3768 struct inode *inode = file_inode(file); 3769 struct cifsInodeInfo *cifsi = CIFS_I(inode); 3770 struct cifsFileInfo *cfile = file->private_data; 3771 struct netfs_inode *ictx = &cifsi->netfs; 3772 loff_t old_eof, new_eof; 3773 3774 xid = get_xid(); 3775 3776 old_eof = i_size_read(inode); 3777 if ((off >= old_eof) || 3778 off + len >= old_eof) { 3779 rc = -EINVAL; 3780 goto out; 3781 } 3782 3783 filemap_invalidate_lock(inode->i_mapping); 3784 rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1); 3785 if (rc < 0) 3786 goto out_2; 3787 3788 truncate_pagecache_range(inode, off, old_eof); 3789 ictx->zero_point = old_eof; 3790 netfs_wait_for_outstanding_io(inode); 3791 3792 rc = smb2_copychunk_range(xid, cfile, cfile, off + len, 3793 old_eof - off - len, off); 3794 if (rc < 0) 3795 goto out_2; 3796 3797 new_eof = old_eof - len; 3798 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3799 cfile->fid.volatile_fid, cfile->pid, new_eof); 3800 if (rc < 0) 3801 goto out_2; 3802 3803 rc = 0; 3804 3805 truncate_setsize(inode, new_eof); 3806 netfs_resize_file(&cifsi->netfs, new_eof, true); 3807 ictx->zero_point = new_eof; 3808 fscache_resize_cookie(cifs_inode_cookie(inode), new_eof); 3809 out_2: 3810 filemap_invalidate_unlock(inode->i_mapping); 3811 out: 3812 free_xid(xid); 3813 return rc; 3814 } 3815 3816 static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon, 3817 loff_t off, loff_t len) 3818 { 3819 int rc; 3820 unsigned int xid; 3821 struct cifsFileInfo *cfile = file->private_data; 3822 struct inode *inode = file_inode(file); 3823 struct cifsInodeInfo *cifsi = CIFS_I(inode); 3824 __u64 count, old_eof, new_eof; 3825 3826 xid = get_xid(); 3827 3828 old_eof = i_size_read(inode); 3829 if (off >= old_eof) { 3830 rc = -EINVAL; 3831 goto out; 3832 } 3833 3834 count = old_eof - off; 3835 new_eof = old_eof + len; 3836 3837 filemap_invalidate_lock(inode->i_mapping); 3838 rc = filemap_write_and_wait_range(inode->i_mapping, off, new_eof - 1); 3839 if (rc < 0) 3840 goto out_2; 3841 truncate_pagecache_range(inode, off, old_eof); 3842 netfs_wait_for_outstanding_io(inode); 3843 3844 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3845 cfile->fid.volatile_fid, cfile->pid, new_eof); 3846 if (rc < 0) 3847 goto out_2; 3848 3849 truncate_setsize(inode, new_eof); 3850 netfs_resize_file(&cifsi->netfs, i_size_read(inode), true); 3851 fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode)); 3852 3853 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len); 3854 if (rc < 0) 3855 goto out_2; 3856 cifsi->netfs.zero_point = new_eof; 3857 3858 rc = smb3_zero_data(file, tcon, off, len, xid); 3859 if (rc < 0) 3860 goto out_2; 3861 3862 rc = 0; 3863 out_2: 3864 filemap_invalidate_unlock(inode->i_mapping); 3865 out: 3866 free_xid(xid); 3867 return rc; 3868 } 3869 3870 static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence) 3871 { 3872 struct cifsFileInfo *wrcfile, *cfile = file->private_data; 3873 struct cifsInodeInfo *cifsi; 3874 struct inode *inode; 3875 int rc = 0; 3876 struct file_allocated_range_buffer in_data, *out_data = NULL; 3877 u32 out_data_len; 3878 unsigned int xid; 3879 3880 if (whence != SEEK_HOLE && whence != SEEK_DATA) 3881 return generic_file_llseek(file, offset, whence); 3882 3883 inode = d_inode(cfile->dentry); 3884 cifsi = CIFS_I(inode); 3885 3886 if (offset < 0 || offset >= i_size_read(inode)) 3887 return -ENXIO; 3888 3889 xid = get_xid(); 3890 /* 3891 * We need to be sure that all dirty pages are written as they 3892 * might fill holes on the server. 3893 * Note that we also MUST flush any written pages since at least 3894 * some servers (Windows2016) will not reflect recent writes in 3895 * QUERY_ALLOCATED_RANGES until SMB2_flush is called. 3896 */ 3897 wrcfile = find_writable_file(cifsi, FIND_WR_ANY); 3898 if (wrcfile) { 3899 filemap_write_and_wait(inode->i_mapping); 3900 smb2_flush_file(xid, tcon, &wrcfile->fid); 3901 cifsFileInfo_put(wrcfile); 3902 } 3903 3904 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) { 3905 if (whence == SEEK_HOLE) 3906 offset = i_size_read(inode); 3907 goto lseek_exit; 3908 } 3909 3910 in_data.file_offset = cpu_to_le64(offset); 3911 in_data.length = cpu_to_le64(i_size_read(inode)); 3912 3913 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 3914 cfile->fid.volatile_fid, 3915 FSCTL_QUERY_ALLOCATED_RANGES, 3916 (char *)&in_data, sizeof(in_data), 3917 sizeof(struct file_allocated_range_buffer), 3918 (char **)&out_data, &out_data_len); 3919 if (rc == -E2BIG) 3920 rc = 0; 3921 if (rc) 3922 goto lseek_exit; 3923 3924 if (whence == SEEK_HOLE && out_data_len == 0) 3925 goto lseek_exit; 3926 3927 if (whence == SEEK_DATA && out_data_len == 0) { 3928 rc = -ENXIO; 3929 goto lseek_exit; 3930 } 3931 3932 if (out_data_len < sizeof(struct file_allocated_range_buffer)) { 3933 rc = -EINVAL; 3934 goto lseek_exit; 3935 } 3936 if (whence == SEEK_DATA) { 3937 offset = le64_to_cpu(out_data->file_offset); 3938 goto lseek_exit; 3939 } 3940 if (offset < le64_to_cpu(out_data->file_offset)) 3941 goto lseek_exit; 3942 3943 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length); 3944 3945 lseek_exit: 3946 free_xid(xid); 3947 kfree(out_data); 3948 if (!rc) 3949 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 3950 else 3951 return rc; 3952 } 3953 3954 static int smb3_fiemap(struct cifs_tcon *tcon, 3955 struct cifsFileInfo *cfile, 3956 struct fiemap_extent_info *fei, u64 start, u64 len) 3957 { 3958 unsigned int xid; 3959 struct file_allocated_range_buffer in_data, *out_data; 3960 u32 out_data_len; 3961 int i, num, rc, flags, last_blob; 3962 u64 next; 3963 3964 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0); 3965 if (rc) 3966 return rc; 3967 3968 xid = get_xid(); 3969 again: 3970 in_data.file_offset = cpu_to_le64(start); 3971 in_data.length = cpu_to_le64(len); 3972 3973 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 3974 cfile->fid.volatile_fid, 3975 FSCTL_QUERY_ALLOCATED_RANGES, 3976 (char *)&in_data, sizeof(in_data), 3977 1024 * sizeof(struct file_allocated_range_buffer), 3978 (char **)&out_data, &out_data_len); 3979 if (rc == -E2BIG) { 3980 last_blob = 0; 3981 rc = 0; 3982 } else 3983 last_blob = 1; 3984 if (rc) 3985 goto out; 3986 3987 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) { 3988 rc = -EINVAL; 3989 goto out; 3990 } 3991 if (out_data_len % sizeof(struct file_allocated_range_buffer)) { 3992 rc = -EINVAL; 3993 goto out; 3994 } 3995 3996 num = out_data_len / sizeof(struct file_allocated_range_buffer); 3997 for (i = 0; i < num; i++) { 3998 flags = 0; 3999 if (i == num - 1 && last_blob) 4000 flags |= FIEMAP_EXTENT_LAST; 4001 4002 rc = fiemap_fill_next_extent(fei, 4003 le64_to_cpu(out_data[i].file_offset), 4004 le64_to_cpu(out_data[i].file_offset), 4005 le64_to_cpu(out_data[i].length), 4006 flags); 4007 if (rc < 0) 4008 goto out; 4009 if (rc == 1) { 4010 rc = 0; 4011 goto out; 4012 } 4013 } 4014 4015 if (!last_blob) { 4016 next = le64_to_cpu(out_data[num - 1].file_offset) + 4017 le64_to_cpu(out_data[num - 1].length); 4018 len = len - (next - start); 4019 start = next; 4020 goto again; 4021 } 4022 4023 out: 4024 free_xid(xid); 4025 kfree(out_data); 4026 return rc; 4027 } 4028 4029 static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, 4030 loff_t off, loff_t len) 4031 { 4032 /* KEEP_SIZE already checked for by do_fallocate */ 4033 if (mode & FALLOC_FL_PUNCH_HOLE) 4034 return smb3_punch_hole(file, tcon, off, len); 4035 else if (mode & FALLOC_FL_ZERO_RANGE) { 4036 if (mode & FALLOC_FL_KEEP_SIZE) 4037 return smb3_zero_range(file, tcon, off, len, true); 4038 return smb3_zero_range(file, tcon, off, len, false); 4039 } else if (mode == FALLOC_FL_KEEP_SIZE) 4040 return smb3_simple_falloc(file, tcon, off, len, true); 4041 else if (mode == FALLOC_FL_COLLAPSE_RANGE) 4042 return smb3_collapse_range(file, tcon, off, len); 4043 else if (mode == FALLOC_FL_INSERT_RANGE) 4044 return smb3_insert_range(file, tcon, off, len); 4045 else if (mode == 0) 4046 return smb3_simple_falloc(file, tcon, off, len, false); 4047 4048 return -EOPNOTSUPP; 4049 } 4050 4051 static void 4052 smb2_downgrade_oplock(struct TCP_Server_Info *server, 4053 struct cifsInodeInfo *cinode, __u32 oplock, 4054 __u16 epoch, bool *purge_cache) 4055 { 4056 server->ops->set_oplock_level(cinode, oplock, 0, NULL); 4057 } 4058 4059 static void 4060 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, 4061 __u16 epoch, bool *purge_cache); 4062 4063 static void 4064 smb3_downgrade_oplock(struct TCP_Server_Info *server, 4065 struct cifsInodeInfo *cinode, __u32 oplock, 4066 __u16 epoch, bool *purge_cache) 4067 { 4068 unsigned int old_state = cinode->oplock; 4069 __u16 old_epoch = cinode->epoch; 4070 unsigned int new_state; 4071 4072 if (epoch > old_epoch) { 4073 smb21_set_oplock_level(cinode, oplock, 0, NULL); 4074 cinode->epoch = epoch; 4075 } 4076 4077 new_state = cinode->oplock; 4078 *purge_cache = false; 4079 4080 if ((old_state & CIFS_CACHE_READ_FLG) != 0 && 4081 (new_state & CIFS_CACHE_READ_FLG) == 0) 4082 *purge_cache = true; 4083 else if (old_state == new_state && (epoch - old_epoch > 1)) 4084 *purge_cache = true; 4085 } 4086 4087 static void 4088 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, 4089 __u16 epoch, bool *purge_cache) 4090 { 4091 oplock &= 0xFF; 4092 cinode->lease_granted = false; 4093 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) 4094 return; 4095 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) { 4096 cinode->oplock = CIFS_CACHE_RHW_FLG; 4097 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n", 4098 &cinode->netfs.inode); 4099 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { 4100 cinode->oplock = CIFS_CACHE_RW_FLG; 4101 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", 4102 &cinode->netfs.inode); 4103 } else if (oplock == SMB2_OPLOCK_LEVEL_II) { 4104 cinode->oplock = CIFS_CACHE_READ_FLG; 4105 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", 4106 &cinode->netfs.inode); 4107 } else 4108 cinode->oplock = 0; 4109 } 4110 4111 static void 4112 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, 4113 __u16 epoch, bool *purge_cache) 4114 { 4115 char message[5] = {0}; 4116 unsigned int new_oplock = 0; 4117 4118 oplock &= 0xFF; 4119 cinode->lease_granted = true; 4120 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) 4121 return; 4122 4123 /* Check if the server granted an oplock rather than a lease */ 4124 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE) 4125 return smb2_set_oplock_level(cinode, oplock, epoch, 4126 purge_cache); 4127 4128 if (oplock & SMB2_LEASE_READ_CACHING_HE) { 4129 new_oplock |= CIFS_CACHE_READ_FLG; 4130 strcat(message, "R"); 4131 } 4132 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) { 4133 new_oplock |= CIFS_CACHE_HANDLE_FLG; 4134 strcat(message, "H"); 4135 } 4136 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) { 4137 new_oplock |= CIFS_CACHE_WRITE_FLG; 4138 strcat(message, "W"); 4139 } 4140 if (!new_oplock) 4141 strscpy(message, "None"); 4142 4143 cinode->oplock = new_oplock; 4144 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message, 4145 &cinode->netfs.inode); 4146 } 4147 4148 static void 4149 smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, 4150 __u16 epoch, bool *purge_cache) 4151 { 4152 unsigned int old_oplock = cinode->oplock; 4153 4154 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache); 4155 4156 if (purge_cache) { 4157 *purge_cache = false; 4158 if (old_oplock == CIFS_CACHE_READ_FLG) { 4159 if (cinode->oplock == CIFS_CACHE_READ_FLG && 4160 (epoch - cinode->epoch > 0)) 4161 *purge_cache = true; 4162 else if (cinode->oplock == CIFS_CACHE_RH_FLG && 4163 (epoch - cinode->epoch > 1)) 4164 *purge_cache = true; 4165 else if (cinode->oplock == CIFS_CACHE_RHW_FLG && 4166 (epoch - cinode->epoch > 1)) 4167 *purge_cache = true; 4168 else if (cinode->oplock == 0 && 4169 (epoch - cinode->epoch > 0)) 4170 *purge_cache = true; 4171 } else if (old_oplock == CIFS_CACHE_RH_FLG) { 4172 if (cinode->oplock == CIFS_CACHE_RH_FLG && 4173 (epoch - cinode->epoch > 0)) 4174 *purge_cache = true; 4175 else if (cinode->oplock == CIFS_CACHE_RHW_FLG && 4176 (epoch - cinode->epoch > 1)) 4177 *purge_cache = true; 4178 } 4179 cinode->epoch = epoch; 4180 } 4181 } 4182 4183 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 4184 static bool 4185 smb2_is_read_op(__u32 oplock) 4186 { 4187 return oplock == SMB2_OPLOCK_LEVEL_II; 4188 } 4189 #endif /* CIFS_ALLOW_INSECURE_LEGACY */ 4190 4191 static bool 4192 smb21_is_read_op(__u32 oplock) 4193 { 4194 return (oplock & SMB2_LEASE_READ_CACHING_HE) && 4195 !(oplock & SMB2_LEASE_WRITE_CACHING_HE); 4196 } 4197 4198 static __le32 4199 map_oplock_to_lease(u8 oplock) 4200 { 4201 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) 4202 return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE; 4203 else if (oplock == SMB2_OPLOCK_LEVEL_II) 4204 return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE; 4205 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH) 4206 return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE | 4207 SMB2_LEASE_WRITE_CACHING_LE; 4208 return 0; 4209 } 4210 4211 static char * 4212 smb2_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags) 4213 { 4214 struct create_lease *buf; 4215 4216 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL); 4217 if (!buf) 4218 return NULL; 4219 4220 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); 4221 buf->lcontext.LeaseState = map_oplock_to_lease(oplock); 4222 4223 buf->ccontext.DataOffset = cpu_to_le16(offsetof 4224 (struct create_lease, lcontext)); 4225 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context)); 4226 buf->ccontext.NameOffset = cpu_to_le16(offsetof 4227 (struct create_lease, Name)); 4228 buf->ccontext.NameLength = cpu_to_le16(4); 4229 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */ 4230 buf->Name[0] = 'R'; 4231 buf->Name[1] = 'q'; 4232 buf->Name[2] = 'L'; 4233 buf->Name[3] = 's'; 4234 return (char *)buf; 4235 } 4236 4237 static char * 4238 smb3_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags) 4239 { 4240 struct create_lease_v2 *buf; 4241 4242 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL); 4243 if (!buf) 4244 return NULL; 4245 4246 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); 4247 buf->lcontext.LeaseState = map_oplock_to_lease(oplock); 4248 buf->lcontext.LeaseFlags = flags; 4249 if (flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) 4250 memcpy(&buf->lcontext.ParentLeaseKey, parent_lease_key, SMB2_LEASE_KEY_SIZE); 4251 4252 buf->ccontext.DataOffset = cpu_to_le16(offsetof 4253 (struct create_lease_v2, lcontext)); 4254 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2)); 4255 buf->ccontext.NameOffset = cpu_to_le16(offsetof 4256 (struct create_lease_v2, Name)); 4257 buf->ccontext.NameLength = cpu_to_le16(4); 4258 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */ 4259 buf->Name[0] = 'R'; 4260 buf->Name[1] = 'q'; 4261 buf->Name[2] = 'L'; 4262 buf->Name[3] = 's'; 4263 return (char *)buf; 4264 } 4265 4266 static __u8 4267 smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) 4268 { 4269 struct create_lease *lc = (struct create_lease *)buf; 4270 4271 *epoch = 0; /* not used */ 4272 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) 4273 return SMB2_OPLOCK_LEVEL_NOCHANGE; 4274 return le32_to_cpu(lc->lcontext.LeaseState); 4275 } 4276 4277 static __u8 4278 smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) 4279 { 4280 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf; 4281 4282 *epoch = le16_to_cpu(lc->lcontext.Epoch); 4283 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) 4284 return SMB2_OPLOCK_LEVEL_NOCHANGE; 4285 if (lease_key) 4286 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); 4287 return le32_to_cpu(lc->lcontext.LeaseState); 4288 } 4289 4290 static unsigned int 4291 smb2_wp_retry_size(struct inode *inode) 4292 { 4293 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize, 4294 SMB2_MAX_BUFFER_SIZE); 4295 } 4296 4297 static bool 4298 smb2_dir_needs_close(struct cifsFileInfo *cfile) 4299 { 4300 return !cfile->invalidHandle; 4301 } 4302 4303 static void 4304 fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, 4305 struct smb_rqst *old_rq, __le16 cipher_type) 4306 { 4307 struct smb2_hdr *shdr = 4308 (struct smb2_hdr *)old_rq->rq_iov[0].iov_base; 4309 4310 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr)); 4311 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM; 4312 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len); 4313 tr_hdr->Flags = cpu_to_le16(0x01); 4314 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) || 4315 (cipher_type == SMB2_ENCRYPTION_AES256_GCM)) 4316 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE); 4317 else 4318 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE); 4319 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8); 4320 } 4321 4322 static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst, 4323 int num_rqst, const u8 *sig, u8 **iv, 4324 struct aead_request **req, struct sg_table *sgt, 4325 unsigned int *num_sgs) 4326 { 4327 unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm); 4328 unsigned int iv_size = crypto_aead_ivsize(tfm); 4329 unsigned int len; 4330 u8 *p; 4331 4332 *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig); 4333 if (IS_ERR_VALUE((long)(int)*num_sgs)) 4334 return ERR_PTR(*num_sgs); 4335 4336 len = iv_size; 4337 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); 4338 len = ALIGN(len, crypto_tfm_ctx_alignment()); 4339 len += req_size; 4340 len = ALIGN(len, __alignof__(struct scatterlist)); 4341 len += array_size(*num_sgs, sizeof(struct scatterlist)); 4342 4343 p = kzalloc(len, GFP_NOFS); 4344 if (!p) 4345 return ERR_PTR(-ENOMEM); 4346 4347 *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1); 4348 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, 4349 crypto_tfm_ctx_alignment()); 4350 sgt->sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, 4351 __alignof__(struct scatterlist)); 4352 return p; 4353 } 4354 4355 static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst, 4356 int num_rqst, const u8 *sig, u8 **iv, 4357 struct aead_request **req, struct scatterlist **sgl) 4358 { 4359 struct sg_table sgtable = {}; 4360 unsigned int skip, num_sgs, i, j; 4361 ssize_t rc; 4362 void *p; 4363 4364 p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs); 4365 if (IS_ERR(p)) 4366 return ERR_CAST(p); 4367 4368 sg_init_marker(sgtable.sgl, num_sgs); 4369 4370 /* 4371 * The first rqst has a transform header where the 4372 * first 20 bytes are not part of the encrypted blob. 4373 */ 4374 skip = 20; 4375 4376 for (i = 0; i < num_rqst; i++) { 4377 struct iov_iter *iter = &rqst[i].rq_iter; 4378 size_t count = iov_iter_count(iter); 4379 4380 for (j = 0; j < rqst[i].rq_nvec; j++) { 4381 cifs_sg_set_buf(&sgtable, 4382 rqst[i].rq_iov[j].iov_base + skip, 4383 rqst[i].rq_iov[j].iov_len - skip); 4384 4385 /* See the above comment on the 'skip' assignment */ 4386 skip = 0; 4387 } 4388 sgtable.orig_nents = sgtable.nents; 4389 4390 rc = extract_iter_to_sg(iter, count, &sgtable, 4391 num_sgs - sgtable.nents, 0); 4392 iov_iter_revert(iter, rc); 4393 sgtable.orig_nents = sgtable.nents; 4394 } 4395 4396 cifs_sg_set_buf(&sgtable, sig, SMB2_SIGNATURE_SIZE); 4397 sg_mark_end(&sgtable.sgl[sgtable.nents - 1]); 4398 *sgl = sgtable.sgl; 4399 return p; 4400 } 4401 4402 static int 4403 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) 4404 { 4405 struct TCP_Server_Info *pserver; 4406 struct cifs_ses *ses; 4407 u8 *ses_enc_key; 4408 4409 /* If server is a channel, select the primary channel */ 4410 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 4411 4412 spin_lock(&cifs_tcp_ses_lock); 4413 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4414 if (ses->Suid == ses_id) { 4415 spin_lock(&ses->ses_lock); 4416 ses_enc_key = enc ? ses->smb3encryptionkey : 4417 ses->smb3decryptionkey; 4418 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); 4419 spin_unlock(&ses->ses_lock); 4420 spin_unlock(&cifs_tcp_ses_lock); 4421 return 0; 4422 } 4423 } 4424 spin_unlock(&cifs_tcp_ses_lock); 4425 4426 trace_smb3_ses_not_found(ses_id); 4427 4428 return -EAGAIN; 4429 } 4430 /* 4431 * Encrypt or decrypt @rqst message. @rqst[0] has the following format: 4432 * iov[0] - transform header (associate data), 4433 * iov[1-N] - SMB2 header and pages - data to encrypt. 4434 * On success return encrypted data in iov[1-N] and pages, leave iov[0] 4435 * untouched. 4436 */ 4437 static int 4438 crypt_message(struct TCP_Server_Info *server, int num_rqst, 4439 struct smb_rqst *rqst, int enc, struct crypto_aead *tfm) 4440 { 4441 struct smb2_transform_hdr *tr_hdr = 4442 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base; 4443 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20; 4444 int rc = 0; 4445 struct scatterlist *sg; 4446 u8 sign[SMB2_SIGNATURE_SIZE] = {}; 4447 u8 key[SMB3_ENC_DEC_KEY_SIZE]; 4448 struct aead_request *req; 4449 u8 *iv; 4450 DECLARE_CRYPTO_WAIT(wait); 4451 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); 4452 void *creq; 4453 4454 rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key); 4455 if (rc) { 4456 cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__, 4457 enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId)); 4458 return rc; 4459 } 4460 4461 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || 4462 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) 4463 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); 4464 else 4465 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE); 4466 4467 if (rc) { 4468 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); 4469 return rc; 4470 } 4471 4472 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE); 4473 if (rc) { 4474 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc); 4475 return rc; 4476 } 4477 4478 creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg); 4479 if (IS_ERR(creq)) 4480 return PTR_ERR(creq); 4481 4482 if (!enc) { 4483 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); 4484 crypt_len += SMB2_SIGNATURE_SIZE; 4485 } 4486 4487 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || 4488 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) 4489 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE); 4490 else { 4491 iv[0] = 3; 4492 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE); 4493 } 4494 4495 aead_request_set_tfm(req, tfm); 4496 aead_request_set_crypt(req, sg, sg, crypt_len, iv); 4497 aead_request_set_ad(req, assoc_data_len); 4498 4499 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 4500 crypto_req_done, &wait); 4501 4502 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) 4503 : crypto_aead_decrypt(req), &wait); 4504 4505 if (!rc && enc) 4506 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); 4507 4508 kfree_sensitive(creq); 4509 return rc; 4510 } 4511 4512 /* 4513 * Copy data from an iterator to the folios in a folio queue buffer. 4514 */ 4515 static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size, 4516 struct folio_queue *buffer) 4517 { 4518 for (; buffer; buffer = buffer->next) { 4519 for (int s = 0; s < folioq_count(buffer); s++) { 4520 struct folio *folio = folioq_folio(buffer, s); 4521 size_t part = folioq_folio_size(buffer, s); 4522 4523 part = umin(part, size); 4524 4525 if (copy_folio_from_iter(folio, 0, part, iter) != part) 4526 return false; 4527 size -= part; 4528 } 4529 } 4530 return true; 4531 } 4532 4533 void 4534 smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst) 4535 { 4536 for (int i = 0; i < num_rqst; i++) 4537 netfs_free_folioq_buffer(rqst[i].rq_buffer); 4538 } 4539 4540 /* 4541 * This function will initialize new_rq and encrypt the content. 4542 * The first entry, new_rq[0], only contains a single iov which contains 4543 * a smb2_transform_hdr and is pre-allocated by the caller. 4544 * This function then populates new_rq[1+] with the content from olq_rq[0+]. 4545 * 4546 * The end result is an array of smb_rqst structures where the first structure 4547 * only contains a single iov for the transform header which we then can pass 4548 * to crypt_message(). 4549 * 4550 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller 4551 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests 4552 */ 4553 static int 4554 smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst, 4555 struct smb_rqst *new_rq, struct smb_rqst *old_rq) 4556 { 4557 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base; 4558 unsigned int orig_len = 0; 4559 int rc = -ENOMEM; 4560 4561 for (int i = 1; i < num_rqst; i++) { 4562 struct smb_rqst *old = &old_rq[i - 1]; 4563 struct smb_rqst *new = &new_rq[i]; 4564 struct folio_queue *buffer = NULL; 4565 size_t size = iov_iter_count(&old->rq_iter); 4566 4567 orig_len += smb_rqst_len(server, old); 4568 new->rq_iov = old->rq_iov; 4569 new->rq_nvec = old->rq_nvec; 4570 4571 if (size > 0) { 4572 size_t cur_size = 0; 4573 rc = netfs_alloc_folioq_buffer(NULL, &buffer, &cur_size, 4574 size, GFP_NOFS); 4575 if (rc < 0) 4576 goto err_free; 4577 4578 new->rq_buffer = buffer; 4579 iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE, 4580 buffer, 0, 0, size); 4581 4582 if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) { 4583 rc = smb_EIO1(smb_eio_trace_tx_copy_iter_to_buf, size); 4584 goto err_free; 4585 } 4586 } 4587 } 4588 4589 /* fill the 1st iov with a transform header */ 4590 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type); 4591 4592 rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc); 4593 cifs_dbg(FYI, "Encrypt message returned %d\n", rc); 4594 if (rc) 4595 goto err_free; 4596 4597 return rc; 4598 4599 err_free: 4600 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]); 4601 return rc; 4602 } 4603 4604 static int 4605 smb3_is_transform_hdr(void *buf) 4606 { 4607 struct smb2_transform_hdr *trhdr = buf; 4608 4609 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM; 4610 } 4611 4612 static int 4613 decrypt_raw_data(struct TCP_Server_Info *server, char *buf, 4614 unsigned int buf_data_size, struct iov_iter *iter, 4615 bool is_offloaded) 4616 { 4617 struct crypto_aead *tfm; 4618 struct smb_rqst rqst = {NULL}; 4619 struct kvec iov[2]; 4620 size_t iter_size = 0; 4621 int rc; 4622 4623 iov[0].iov_base = buf; 4624 iov[0].iov_len = sizeof(struct smb2_transform_hdr); 4625 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr); 4626 iov[1].iov_len = buf_data_size; 4627 4628 rqst.rq_iov = iov; 4629 rqst.rq_nvec = 2; 4630 if (iter) { 4631 rqst.rq_iter = *iter; 4632 iter_size = iov_iter_count(iter); 4633 } 4634 4635 if (is_offloaded) { 4636 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || 4637 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) 4638 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 4639 else 4640 tfm = crypto_alloc_aead("ccm(aes)", 0, 0); 4641 if (IS_ERR(tfm)) { 4642 rc = PTR_ERR(tfm); 4643 cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc); 4644 4645 return rc; 4646 } 4647 } else { 4648 rc = smb3_crypto_aead_allocate(server); 4649 if (unlikely(rc)) 4650 return rc; 4651 tfm = server->secmech.dec; 4652 } 4653 4654 rc = crypt_message(server, 1, &rqst, 0, tfm); 4655 cifs_dbg(FYI, "Decrypt message returned %d\n", rc); 4656 4657 if (is_offloaded) 4658 crypto_free_aead(tfm); 4659 4660 if (rc) 4661 return rc; 4662 4663 memmove(buf, iov[1].iov_base, buf_data_size); 4664 4665 if (!is_offloaded) 4666 server->total_read = buf_data_size + iter_size; 4667 4668 return rc; 4669 } 4670 4671 static int 4672 cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size, 4673 size_t skip, struct iov_iter *iter) 4674 { 4675 for (; folioq; folioq = folioq->next) { 4676 for (int s = 0; s < folioq_count(folioq); s++) { 4677 struct folio *folio = folioq_folio(folioq, s); 4678 size_t fsize = folio_size(folio); 4679 size_t n, len = umin(fsize - skip, data_size); 4680 4681 n = copy_folio_to_iter(folio, skip, len, iter); 4682 if (n != len) { 4683 cifs_dbg(VFS, "%s: something went wrong\n", __func__); 4684 return smb_EIO2(smb_eio_trace_rx_copy_to_iter, 4685 n, len); 4686 } 4687 data_size -= n; 4688 skip = 0; 4689 } 4690 } 4691 4692 return 0; 4693 } 4694 4695 static int 4696 handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, 4697 char *buf, unsigned int buf_len, struct folio_queue *buffer, 4698 unsigned int buffer_len, bool is_offloaded) 4699 { 4700 unsigned int data_offset; 4701 unsigned int data_len; 4702 unsigned int cur_off; 4703 unsigned int cur_page_idx; 4704 unsigned int pad_len; 4705 struct cifs_io_subrequest *rdata = mid->callback_data; 4706 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 4707 size_t copied; 4708 bool use_rdma_mr = false; 4709 4710 if (shdr->Command != SMB2_READ) { 4711 cifs_server_dbg(VFS, "only big read responses are supported\n"); 4712 return -EOPNOTSUPP; 4713 } 4714 4715 if (server->ops->is_session_expired && 4716 server->ops->is_session_expired(buf)) { 4717 if (!is_offloaded) 4718 cifs_reconnect(server, true); 4719 return -1; 4720 } 4721 4722 if (server->ops->is_status_pending && 4723 server->ops->is_status_pending(buf, server)) 4724 return -1; 4725 4726 /* set up first two iov to get credits */ 4727 rdata->iov[0].iov_base = buf; 4728 rdata->iov[0].iov_len = 0; 4729 rdata->iov[1].iov_base = buf; 4730 rdata->iov[1].iov_len = 4731 min_t(unsigned int, buf_len, server->vals->read_rsp_size); 4732 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", 4733 rdata->iov[0].iov_base, rdata->iov[0].iov_len); 4734 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", 4735 rdata->iov[1].iov_base, rdata->iov[1].iov_len); 4736 4737 rdata->result = server->ops->map_error(buf, true); 4738 if (rdata->result != 0) { 4739 cifs_dbg(FYI, "%s: server returned error %d\n", 4740 __func__, rdata->result); 4741 /* normal error on read response */ 4742 if (is_offloaded) 4743 mid->mid_state = MID_RESPONSE_RECEIVED; 4744 else 4745 dequeue_mid(server, mid, false); 4746 return 0; 4747 } 4748 4749 data_offset = server->ops->read_data_offset(buf); 4750 #ifdef CONFIG_CIFS_SMB_DIRECT 4751 use_rdma_mr = rdata->mr; 4752 #endif 4753 data_len = server->ops->read_data_length(buf, use_rdma_mr); 4754 4755 if (data_offset < server->vals->read_rsp_size) { 4756 /* 4757 * win2k8 sometimes sends an offset of 0 when the read 4758 * is beyond the EOF. Treat it as if the data starts just after 4759 * the header. 4760 */ 4761 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", 4762 __func__, data_offset); 4763 data_offset = server->vals->read_rsp_size; 4764 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { 4765 /* data_offset is beyond the end of smallbuf */ 4766 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", 4767 __func__, data_offset); 4768 rdata->result = smb_EIO1(smb_eio_trace_rx_overlong, data_offset); 4769 if (is_offloaded) 4770 mid->mid_state = MID_RESPONSE_MALFORMED; 4771 else 4772 dequeue_mid(server, mid, rdata->result); 4773 return 0; 4774 } 4775 4776 pad_len = data_offset - server->vals->read_rsp_size; 4777 4778 if (buf_len <= data_offset) { 4779 /* read response payload is in pages */ 4780 cur_page_idx = pad_len / PAGE_SIZE; 4781 cur_off = pad_len % PAGE_SIZE; 4782 4783 if (cur_page_idx != 0) { 4784 /* data offset is beyond the 1st page of response */ 4785 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n", 4786 __func__, data_offset); 4787 rdata->result = smb_EIO1(smb_eio_trace_rx_overpage, data_offset); 4788 if (is_offloaded) 4789 mid->mid_state = MID_RESPONSE_MALFORMED; 4790 else 4791 dequeue_mid(server, mid, rdata->result); 4792 return 0; 4793 } 4794 4795 if (data_len > buffer_len - pad_len) { 4796 /* data_len is corrupt -- discard frame */ 4797 rdata->result = smb_EIO1(smb_eio_trace_rx_bad_datalen, data_len); 4798 if (is_offloaded) 4799 mid->mid_state = MID_RESPONSE_MALFORMED; 4800 else 4801 dequeue_mid(server, mid, rdata->result); 4802 return 0; 4803 } 4804 4805 /* Copy the data to the output I/O iterator. */ 4806 rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len, 4807 cur_off, &rdata->subreq.io_iter); 4808 if (rdata->result != 0) { 4809 if (is_offloaded) 4810 mid->mid_state = MID_RESPONSE_MALFORMED; 4811 else 4812 dequeue_mid(server, mid, rdata->result); 4813 return 0; 4814 } 4815 rdata->got_bytes = buffer_len; 4816 4817 } else if (buf_len >= data_offset + data_len) { 4818 /* read response payload is in buf */ 4819 WARN_ONCE(buffer, "read data can be either in buf or in buffer"); 4820 copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); 4821 if (copied == 0) 4822 return smb_EIO2(smb_eio_trace_rx_copy_to_iter, copied, data_len); 4823 rdata->got_bytes = copied; 4824 } else { 4825 /* read response payload cannot be in both buf and pages */ 4826 WARN_ONCE(1, "buf can not contain only a part of read data"); 4827 rdata->result = smb_EIO(smb_eio_trace_rx_both_buf); 4828 if (is_offloaded) 4829 mid->mid_state = MID_RESPONSE_MALFORMED; 4830 else 4831 dequeue_mid(server, mid, rdata->result); 4832 return 0; 4833 } 4834 4835 if (is_offloaded) 4836 mid->mid_state = MID_RESPONSE_RECEIVED; 4837 else 4838 dequeue_mid(server, mid, false); 4839 return 0; 4840 } 4841 4842 struct smb2_decrypt_work { 4843 struct work_struct decrypt; 4844 struct TCP_Server_Info *server; 4845 struct folio_queue *buffer; 4846 char *buf; 4847 unsigned int len; 4848 }; 4849 4850 4851 static void smb2_decrypt_offload(struct work_struct *work) 4852 { 4853 struct smb2_decrypt_work *dw = container_of(work, 4854 struct smb2_decrypt_work, decrypt); 4855 int rc; 4856 struct mid_q_entry *mid; 4857 struct iov_iter iter; 4858 4859 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len); 4860 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size, 4861 &iter, true); 4862 if (rc) { 4863 cifs_dbg(VFS, "error decrypting rc=%d\n", rc); 4864 goto free_pages; 4865 } 4866 4867 dw->server->lstrp = jiffies; 4868 mid = smb2_find_dequeue_mid(dw->server, dw->buf); 4869 if (mid == NULL) 4870 cifs_dbg(FYI, "mid not found\n"); 4871 else { 4872 mid->decrypted = true; 4873 rc = handle_read_data(dw->server, mid, dw->buf, 4874 dw->server->vals->read_rsp_size, 4875 dw->buffer, dw->len, 4876 true); 4877 if (rc >= 0) { 4878 #ifdef CONFIG_CIFS_STATS2 4879 mid->when_received = jiffies; 4880 #endif 4881 if (dw->server->ops->is_network_name_deleted) 4882 dw->server->ops->is_network_name_deleted(dw->buf, 4883 dw->server); 4884 4885 mid_execute_callback(dw->server, mid); 4886 } else { 4887 spin_lock(&dw->server->srv_lock); 4888 if (dw->server->tcpStatus == CifsNeedReconnect) { 4889 spin_lock(&dw->server->mid_queue_lock); 4890 mid->mid_state = MID_RETRY_NEEDED; 4891 spin_unlock(&dw->server->mid_queue_lock); 4892 spin_unlock(&dw->server->srv_lock); 4893 mid_execute_callback(dw->server, mid); 4894 } else { 4895 spin_lock(&dw->server->mid_queue_lock); 4896 mid->mid_state = MID_REQUEST_SUBMITTED; 4897 mid->deleted_from_q = false; 4898 list_add_tail(&mid->qhead, 4899 &dw->server->pending_mid_q); 4900 spin_unlock(&dw->server->mid_queue_lock); 4901 spin_unlock(&dw->server->srv_lock); 4902 } 4903 } 4904 release_mid(dw->server, mid); 4905 } 4906 4907 free_pages: 4908 netfs_free_folioq_buffer(dw->buffer); 4909 cifs_small_buf_release(dw->buf); 4910 kfree(dw); 4911 } 4912 4913 4914 static int 4915 receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, 4916 int *num_mids) 4917 { 4918 char *buf = server->smallbuf; 4919 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; 4920 struct iov_iter iter; 4921 unsigned int len; 4922 unsigned int buflen = server->pdu_size; 4923 int rc; 4924 struct smb2_decrypt_work *dw; 4925 4926 dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL); 4927 if (!dw) 4928 return -ENOMEM; 4929 INIT_WORK(&dw->decrypt, smb2_decrypt_offload); 4930 dw->server = server; 4931 4932 *num_mids = 1; 4933 len = min_t(unsigned int, buflen, server->vals->read_rsp_size + 4934 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1; 4935 4936 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len); 4937 if (rc < 0) 4938 goto free_dw; 4939 server->total_read += rc; 4940 4941 len = le32_to_cpu(tr_hdr->OriginalMessageSize) - 4942 server->vals->read_rsp_size; 4943 dw->len = len; 4944 len = round_up(dw->len, PAGE_SIZE); 4945 4946 size_t cur_size = 0; 4947 rc = netfs_alloc_folioq_buffer(NULL, &dw->buffer, &cur_size, len, GFP_NOFS); 4948 if (rc < 0) 4949 goto discard_data; 4950 4951 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len); 4952 4953 /* Read the data into the buffer and clear excess bufferage. */ 4954 rc = cifs_read_iter_from_socket(server, &iter, dw->len); 4955 if (rc < 0) 4956 goto discard_data; 4957 4958 server->total_read += rc; 4959 if (rc < len) { 4960 struct iov_iter tmp = iter; 4961 4962 iov_iter_advance(&tmp, rc); 4963 iov_iter_zero(len - rc, &tmp); 4964 } 4965 iov_iter_truncate(&iter, dw->len); 4966 4967 rc = cifs_discard_remaining_data(server); 4968 if (rc) 4969 goto free_pages; 4970 4971 /* 4972 * For large reads, offload to different thread for better performance, 4973 * use more cores decrypting which can be expensive 4974 */ 4975 4976 if ((server->min_offload) && (server->in_flight > 1) && 4977 (server->pdu_size >= server->min_offload)) { 4978 dw->buf = server->smallbuf; 4979 server->smallbuf = (char *)cifs_small_buf_get(); 4980 4981 queue_work(decrypt_wq, &dw->decrypt); 4982 *num_mids = 0; /* worker thread takes care of finding mid */ 4983 return -1; 4984 } 4985 4986 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size, 4987 &iter, false); 4988 if (rc) 4989 goto free_pages; 4990 4991 *mid = smb2_find_mid(server, buf); 4992 if (*mid == NULL) { 4993 cifs_dbg(FYI, "mid not found\n"); 4994 } else { 4995 cifs_dbg(FYI, "mid found\n"); 4996 (*mid)->decrypted = true; 4997 rc = handle_read_data(server, *mid, buf, 4998 server->vals->read_rsp_size, 4999 dw->buffer, dw->len, false); 5000 if (rc >= 0) { 5001 if (server->ops->is_network_name_deleted) { 5002 server->ops->is_network_name_deleted(buf, 5003 server); 5004 } 5005 } 5006 } 5007 5008 free_pages: 5009 netfs_free_folioq_buffer(dw->buffer); 5010 free_dw: 5011 kfree(dw); 5012 return rc; 5013 discard_data: 5014 cifs_discard_remaining_data(server); 5015 goto free_pages; 5016 } 5017 5018 static int 5019 receive_encrypted_standard(struct TCP_Server_Info *server, 5020 struct mid_q_entry **mids, char **bufs, 5021 int *num_mids) 5022 { 5023 int ret, length; 5024 char *buf = server->smallbuf; 5025 struct smb2_hdr *shdr; 5026 unsigned int pdu_length = server->pdu_size; 5027 unsigned int buf_size; 5028 unsigned int next_cmd; 5029 struct mid_q_entry *mid_entry; 5030 int next_is_large; 5031 char *next_buffer = NULL; 5032 5033 *num_mids = 0; 5034 5035 /* switch to large buffer if too big for a small one */ 5036 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) { 5037 server->large_buf = true; 5038 memcpy(server->bigbuf, buf, server->total_read); 5039 buf = server->bigbuf; 5040 } 5041 5042 /* now read the rest */ 5043 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, 5044 pdu_length - HEADER_SIZE(server) + 1); 5045 if (length < 0) 5046 return length; 5047 server->total_read += length; 5048 5049 buf_size = pdu_length - sizeof(struct smb2_transform_hdr); 5050 length = decrypt_raw_data(server, buf, buf_size, NULL, false); 5051 if (length) 5052 return length; 5053 5054 next_is_large = server->large_buf; 5055 one_more: 5056 shdr = (struct smb2_hdr *)buf; 5057 next_cmd = le32_to_cpu(shdr->NextCommand); 5058 if (next_cmd) { 5059 if (WARN_ON_ONCE(next_cmd > pdu_length)) 5060 return -1; 5061 if (next_is_large) 5062 next_buffer = (char *)cifs_buf_get(); 5063 else 5064 next_buffer = (char *)cifs_small_buf_get(); 5065 if (!next_buffer) { 5066 cifs_server_dbg(VFS, "No memory for (large) SMB response\n"); 5067 return -1; 5068 } 5069 memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd); 5070 } 5071 5072 mid_entry = smb2_find_mid(server, buf); 5073 if (mid_entry == NULL) 5074 cifs_dbg(FYI, "mid not found\n"); 5075 else { 5076 cifs_dbg(FYI, "mid found\n"); 5077 mid_entry->decrypted = true; 5078 mid_entry->resp_buf_size = server->pdu_size; 5079 } 5080 5081 if (*num_mids >= MAX_COMPOUND) { 5082 cifs_server_dbg(VFS, "too many PDUs in compound\n"); 5083 return -1; 5084 } 5085 bufs[*num_mids] = buf; 5086 mids[(*num_mids)++] = mid_entry; 5087 5088 if (mid_entry && mid_entry->handle) 5089 ret = mid_entry->handle(server, mid_entry); 5090 else 5091 ret = cifs_handle_standard(server, mid_entry); 5092 5093 if (ret == 0 && next_cmd) { 5094 pdu_length -= next_cmd; 5095 server->large_buf = next_is_large; 5096 if (next_is_large) 5097 server->bigbuf = buf = next_buffer; 5098 else 5099 server->smallbuf = buf = next_buffer; 5100 goto one_more; 5101 } else if (ret != 0) { 5102 /* 5103 * ret != 0 here means that we didn't get to handle_mid() thus 5104 * server->smallbuf and server->bigbuf are still valid. We need 5105 * to free next_buffer because it is not going to be used 5106 * anywhere. 5107 */ 5108 if (next_is_large) 5109 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer); 5110 else 5111 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer); 5112 } 5113 5114 return ret; 5115 } 5116 5117 static int 5118 smb3_receive_transform(struct TCP_Server_Info *server, 5119 struct mid_q_entry **mids, char **bufs, int *num_mids) 5120 { 5121 char *buf = server->smallbuf; 5122 unsigned int pdu_length = server->pdu_size; 5123 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; 5124 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize); 5125 5126 if (pdu_length < sizeof(struct smb2_transform_hdr) + 5127 sizeof(struct smb2_hdr)) { 5128 cifs_server_dbg(VFS, "Transform message is too small (%u)\n", 5129 pdu_length); 5130 cifs_reconnect(server, true); 5131 return -ECONNABORTED; 5132 } 5133 5134 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) { 5135 cifs_server_dbg(VFS, "Transform message is broken\n"); 5136 cifs_reconnect(server, true); 5137 return -ECONNABORTED; 5138 } 5139 5140 /* TODO: add support for compounds containing READ. */ 5141 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) { 5142 return receive_encrypted_read(server, &mids[0], num_mids); 5143 } 5144 5145 return receive_encrypted_standard(server, mids, bufs, num_mids); 5146 } 5147 5148 int 5149 smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid) 5150 { 5151 char *buf = server->large_buf ? server->bigbuf : server->smallbuf; 5152 5153 return handle_read_data(server, mid, buf, server->pdu_size, 5154 NULL, 0, false); 5155 } 5156 5157 static int smb2_next_header(struct TCP_Server_Info *server, char *buf, 5158 unsigned int *noff) 5159 { 5160 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 5161 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf; 5162 5163 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { 5164 *noff = le32_to_cpu(t_hdr->OriginalMessageSize); 5165 if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff))) 5166 return -EINVAL; 5167 } else { 5168 *noff = le32_to_cpu(hdr->NextCommand); 5169 } 5170 if (unlikely(*noff && *noff < MID_HEADER_SIZE(server))) 5171 return -EINVAL; 5172 return 0; 5173 } 5174 5175 int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, 5176 struct dentry *dentry, struct cifs_tcon *tcon, 5177 const char *full_path, umode_t mode, dev_t dev, 5178 const char *symname) 5179 { 5180 struct TCP_Server_Info *server = tcon->ses->server; 5181 struct cifs_open_parms oparms; 5182 struct cifs_open_info_data idata; 5183 struct cifs_io_parms io_parms = {}; 5184 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5185 struct cifs_fid fid; 5186 unsigned int bytes_written; 5187 u8 type[8]; 5188 int type_len = 0; 5189 struct { 5190 __le64 major; 5191 __le64 minor; 5192 } __packed pdev = {}; 5193 __le16 *symname_utf16 = NULL; 5194 u8 *data = NULL; 5195 int data_len = 0; 5196 struct kvec iov[3]; 5197 __u32 oplock = server->oplocks ? REQ_OPLOCK : 0; 5198 int rc; 5199 5200 switch (mode & S_IFMT) { 5201 case S_IFCHR: 5202 type_len = 8; 5203 memcpy(type, "IntxCHR\0", type_len); 5204 pdev.major = cpu_to_le64(MAJOR(dev)); 5205 pdev.minor = cpu_to_le64(MINOR(dev)); 5206 data = (u8 *)&pdev; 5207 data_len = sizeof(pdev); 5208 break; 5209 case S_IFBLK: 5210 type_len = 8; 5211 memcpy(type, "IntxBLK\0", type_len); 5212 pdev.major = cpu_to_le64(MAJOR(dev)); 5213 pdev.minor = cpu_to_le64(MINOR(dev)); 5214 data = (u8 *)&pdev; 5215 data_len = sizeof(pdev); 5216 break; 5217 case S_IFLNK: 5218 type_len = 8; 5219 memcpy(type, "IntxLNK\1", type_len); 5220 symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname), 5221 &data_len, cifs_sb->local_nls, 5222 NO_MAP_UNI_RSVD); 5223 if (!symname_utf16) { 5224 rc = -ENOMEM; 5225 goto out; 5226 } 5227 data_len -= 2; /* symlink is without trailing wide-nul */ 5228 data = (u8 *)symname_utf16; 5229 break; 5230 case S_IFSOCK: 5231 type_len = 8; 5232 strscpy(type, "LnxSOCK"); 5233 data = (u8 *)&pdev; 5234 data_len = sizeof(pdev); 5235 break; 5236 case S_IFIFO: 5237 type_len = 8; 5238 strscpy(type, "LnxFIFO"); 5239 data = (u8 *)&pdev; 5240 data_len = sizeof(pdev); 5241 break; 5242 default: 5243 rc = -EPERM; 5244 goto out; 5245 } 5246 5247 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE, 5248 FILE_CREATE, CREATE_NOT_DIR | 5249 CREATE_OPTION_SPECIAL, ACL_NO_MODE); 5250 oparms.fid = &fid; 5251 idata.contains_posix_file_info = false; 5252 rc = server->ops->open(xid, &oparms, &oplock, &idata); 5253 if (rc) 5254 goto out; 5255 5256 /* 5257 * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL 5258 * option. If not then server does not support ATTR_SYSTEM and newly 5259 * created file is not SFU compatible, which means that the call failed. 5260 */ 5261 if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) { 5262 rc = -EOPNOTSUPP; 5263 goto out_close; 5264 } 5265 5266 if (type_len + data_len > 0) { 5267 io_parms.pid = current->tgid; 5268 io_parms.tcon = tcon; 5269 io_parms.length = type_len + data_len; 5270 iov[1].iov_base = type; 5271 iov[1].iov_len = type_len; 5272 iov[2].iov_base = data; 5273 iov[2].iov_len = data_len; 5274 5275 rc = server->ops->sync_write(xid, &fid, &io_parms, 5276 &bytes_written, 5277 iov, ARRAY_SIZE(iov)-1); 5278 } 5279 5280 out_close: 5281 server->ops->close(xid, tcon, &fid); 5282 5283 /* 5284 * If CREATE was successful but either setting ATTR_SYSTEM failed or 5285 * writing type/data information failed then remove the intermediate 5286 * object created by CREATE. Otherwise intermediate empty object stay 5287 * on the server. 5288 */ 5289 if (rc) 5290 server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL); 5291 5292 out: 5293 kfree(symname_utf16); 5294 return rc; 5295 } 5296 5297 int cifs_sfu_make_node(unsigned int xid, struct inode *inode, 5298 struct dentry *dentry, struct cifs_tcon *tcon, 5299 const char *full_path, umode_t mode, dev_t dev) 5300 { 5301 struct inode *new = NULL; 5302 int rc; 5303 5304 rc = __cifs_sfu_make_node(xid, inode, dentry, tcon, 5305 full_path, mode, dev, NULL); 5306 if (rc) 5307 return rc; 5308 5309 if (tcon->posix_extensions) { 5310 rc = smb311_posix_get_inode_info(&new, full_path, NULL, 5311 inode->i_sb, xid); 5312 } else if (tcon->unix_ext) { 5313 rc = cifs_get_inode_info_unix(&new, full_path, 5314 inode->i_sb, xid); 5315 } else { 5316 rc = cifs_get_inode_info(&new, full_path, NULL, 5317 inode->i_sb, xid, NULL); 5318 } 5319 if (!rc) 5320 d_instantiate(dentry, new); 5321 return rc; 5322 } 5323 5324 static int smb2_make_node(unsigned int xid, struct inode *inode, 5325 struct dentry *dentry, struct cifs_tcon *tcon, 5326 const char *full_path, umode_t mode, dev_t dev) 5327 { 5328 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 5329 int rc = -EOPNOTSUPP; 5330 5331 /* 5332 * Check if mounted with mount parm 'sfu' mount parm. 5333 * SFU emulation should work with all servers, but only 5334 * supports block and char device, socket & fifo, 5335 * and was used by default in earlier versions of Windows 5336 */ 5337 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { 5338 rc = cifs_sfu_make_node(xid, inode, dentry, tcon, 5339 full_path, mode, dev); 5340 } else if (CIFS_REPARSE_SUPPORT(tcon)) { 5341 rc = mknod_reparse(xid, inode, dentry, tcon, 5342 full_path, mode, dev); 5343 } 5344 return rc; 5345 } 5346 5347 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 5348 struct smb_version_operations smb20_operations = { 5349 .compare_fids = smb2_compare_fids, 5350 .setup_request = smb2_setup_request, 5351 .setup_async_request = smb2_setup_async_request, 5352 .check_receive = smb2_check_receive, 5353 .add_credits = smb2_add_credits, 5354 .set_credits = smb2_set_credits, 5355 .get_credits_field = smb2_get_credits_field, 5356 .get_credits = smb2_get_credits, 5357 .wait_mtu_credits = cifs_wait_mtu_credits, 5358 .get_next_mid = smb2_get_next_mid, 5359 .revert_current_mid = smb2_revert_current_mid, 5360 .read_data_offset = smb2_read_data_offset, 5361 .read_data_length = smb2_read_data_length, 5362 .map_error = map_smb2_to_linux_error, 5363 .find_mid = smb2_find_mid, 5364 .check_message = smb2_check_message, 5365 .dump_detail = smb2_dump_detail, 5366 .clear_stats = smb2_clear_stats, 5367 .print_stats = smb2_print_stats, 5368 .is_oplock_break = smb2_is_valid_oplock_break, 5369 .handle_cancelled_mid = smb2_handle_cancelled_mid, 5370 .downgrade_oplock = smb2_downgrade_oplock, 5371 .need_neg = smb2_need_neg, 5372 .negotiate = smb2_negotiate, 5373 .negotiate_wsize = smb2_negotiate_wsize, 5374 .negotiate_rsize = smb2_negotiate_rsize, 5375 .sess_setup = SMB2_sess_setup, 5376 .logoff = SMB2_logoff, 5377 .tree_connect = SMB2_tcon, 5378 .tree_disconnect = SMB2_tdis, 5379 .qfs_tcon = smb2_qfs_tcon, 5380 .is_path_accessible = smb2_is_path_accessible, 5381 .can_echo = smb2_can_echo, 5382 .echo = SMB2_echo, 5383 .query_path_info = smb2_query_path_info, 5384 .query_reparse_point = smb2_query_reparse_point, 5385 .get_srv_inum = smb2_get_srv_inum, 5386 .query_file_info = smb2_query_file_info, 5387 .set_path_size = smb2_set_path_size, 5388 .set_file_size = smb2_set_file_size, 5389 .set_file_info = smb2_set_file_info, 5390 .set_compression = smb2_set_compression, 5391 .mkdir = smb2_mkdir, 5392 .mkdir_setinfo = smb2_mkdir_setinfo, 5393 .rmdir = smb2_rmdir, 5394 .unlink = smb2_unlink, 5395 .rename = smb2_rename_path, 5396 .create_hardlink = smb2_create_hardlink, 5397 .get_reparse_point_buffer = smb2_get_reparse_point_buffer, 5398 .query_mf_symlink = smb3_query_mf_symlink, 5399 .create_mf_symlink = smb3_create_mf_symlink, 5400 .create_reparse_inode = smb2_create_reparse_inode, 5401 .open = smb2_open_file, 5402 .set_fid = smb2_set_fid, 5403 .close = smb2_close_file, 5404 .flush = smb2_flush_file, 5405 .async_readv = smb2_async_readv, 5406 .async_writev = smb2_async_writev, 5407 .sync_read = smb2_sync_read, 5408 .sync_write = smb2_sync_write, 5409 .query_dir_first = smb2_query_dir_first, 5410 .query_dir_next = smb2_query_dir_next, 5411 .close_dir = smb2_close_dir, 5412 .calc_smb_size = smb2_calc_size, 5413 .is_status_pending = smb2_is_status_pending, 5414 .is_session_expired = smb2_is_session_expired, 5415 .oplock_response = smb2_oplock_response, 5416 .queryfs = smb2_queryfs, 5417 .mand_lock = smb2_mand_lock, 5418 .mand_unlock_range = smb2_unlock_range, 5419 .push_mand_locks = smb2_push_mandatory_locks, 5420 .get_lease_key = smb2_get_lease_key, 5421 .set_lease_key = smb2_set_lease_key, 5422 .new_lease_key = smb2_new_lease_key, 5423 .is_read_op = smb2_is_read_op, 5424 .set_oplock_level = smb2_set_oplock_level, 5425 .create_lease_buf = smb2_create_lease_buf, 5426 .parse_lease_buf = smb2_parse_lease_buf, 5427 .copychunk_range = smb2_copychunk_range, 5428 .wp_retry_size = smb2_wp_retry_size, 5429 .dir_needs_close = smb2_dir_needs_close, 5430 .get_dfs_refer = smb2_get_dfs_refer, 5431 .select_sectype = smb2_select_sectype, 5432 #ifdef CONFIG_CIFS_XATTR 5433 .query_all_EAs = smb2_query_eas, 5434 .set_EA = smb2_set_ea, 5435 #endif /* CIFS_XATTR */ 5436 .get_acl = get_smb2_acl, 5437 .get_acl_by_fid = get_smb2_acl_by_fid, 5438 .set_acl = set_smb2_acl, 5439 .next_header = smb2_next_header, 5440 .ioctl_query_info = smb2_ioctl_query_info, 5441 .make_node = smb2_make_node, 5442 .fiemap = smb3_fiemap, 5443 .llseek = smb3_llseek, 5444 .is_status_io_timeout = smb2_is_status_io_timeout, 5445 .is_network_name_deleted = smb2_is_network_name_deleted, 5446 .rename_pending_delete = smb2_rename_pending_delete, 5447 }; 5448 #endif /* CIFS_ALLOW_INSECURE_LEGACY */ 5449 5450 struct smb_version_operations smb21_operations = { 5451 .compare_fids = smb2_compare_fids, 5452 .setup_request = smb2_setup_request, 5453 .setup_async_request = smb2_setup_async_request, 5454 .check_receive = smb2_check_receive, 5455 .add_credits = smb2_add_credits, 5456 .set_credits = smb2_set_credits, 5457 .get_credits_field = smb2_get_credits_field, 5458 .get_credits = smb2_get_credits, 5459 .wait_mtu_credits = smb2_wait_mtu_credits, 5460 .adjust_credits = smb2_adjust_credits, 5461 .get_next_mid = smb2_get_next_mid, 5462 .revert_current_mid = smb2_revert_current_mid, 5463 .read_data_offset = smb2_read_data_offset, 5464 .read_data_length = smb2_read_data_length, 5465 .map_error = map_smb2_to_linux_error, 5466 .find_mid = smb2_find_mid, 5467 .check_message = smb2_check_message, 5468 .dump_detail = smb2_dump_detail, 5469 .clear_stats = smb2_clear_stats, 5470 .print_stats = smb2_print_stats, 5471 .is_oplock_break = smb2_is_valid_oplock_break, 5472 .handle_cancelled_mid = smb2_handle_cancelled_mid, 5473 .downgrade_oplock = smb2_downgrade_oplock, 5474 .need_neg = smb2_need_neg, 5475 .negotiate = smb2_negotiate, 5476 .negotiate_wsize = smb2_negotiate_wsize, 5477 .negotiate_rsize = smb2_negotiate_rsize, 5478 .sess_setup = SMB2_sess_setup, 5479 .logoff = SMB2_logoff, 5480 .tree_connect = SMB2_tcon, 5481 .tree_disconnect = SMB2_tdis, 5482 .qfs_tcon = smb2_qfs_tcon, 5483 .is_path_accessible = smb2_is_path_accessible, 5484 .can_echo = smb2_can_echo, 5485 .echo = SMB2_echo, 5486 .query_path_info = smb2_query_path_info, 5487 .query_reparse_point = smb2_query_reparse_point, 5488 .get_srv_inum = smb2_get_srv_inum, 5489 .query_file_info = smb2_query_file_info, 5490 .set_path_size = smb2_set_path_size, 5491 .set_file_size = smb2_set_file_size, 5492 .set_file_info = smb2_set_file_info, 5493 .set_compression = smb2_set_compression, 5494 .mkdir = smb2_mkdir, 5495 .mkdir_setinfo = smb2_mkdir_setinfo, 5496 .rmdir = smb2_rmdir, 5497 .unlink = smb2_unlink, 5498 .rename = smb2_rename_path, 5499 .create_hardlink = smb2_create_hardlink, 5500 .get_reparse_point_buffer = smb2_get_reparse_point_buffer, 5501 .query_mf_symlink = smb3_query_mf_symlink, 5502 .create_mf_symlink = smb3_create_mf_symlink, 5503 .create_reparse_inode = smb2_create_reparse_inode, 5504 .open = smb2_open_file, 5505 .set_fid = smb2_set_fid, 5506 .close = smb2_close_file, 5507 .flush = smb2_flush_file, 5508 .async_readv = smb2_async_readv, 5509 .async_writev = smb2_async_writev, 5510 .sync_read = smb2_sync_read, 5511 .sync_write = smb2_sync_write, 5512 .query_dir_first = smb2_query_dir_first, 5513 .query_dir_next = smb2_query_dir_next, 5514 .close_dir = smb2_close_dir, 5515 .calc_smb_size = smb2_calc_size, 5516 .is_status_pending = smb2_is_status_pending, 5517 .is_session_expired = smb2_is_session_expired, 5518 .oplock_response = smb2_oplock_response, 5519 .queryfs = smb2_queryfs, 5520 .mand_lock = smb2_mand_lock, 5521 .mand_unlock_range = smb2_unlock_range, 5522 .push_mand_locks = smb2_push_mandatory_locks, 5523 .get_lease_key = smb2_get_lease_key, 5524 .set_lease_key = smb2_set_lease_key, 5525 .new_lease_key = smb2_new_lease_key, 5526 .is_read_op = smb21_is_read_op, 5527 .set_oplock_level = smb21_set_oplock_level, 5528 .create_lease_buf = smb2_create_lease_buf, 5529 .parse_lease_buf = smb2_parse_lease_buf, 5530 .copychunk_range = smb2_copychunk_range, 5531 .wp_retry_size = smb2_wp_retry_size, 5532 .dir_needs_close = smb2_dir_needs_close, 5533 .enum_snapshots = smb3_enum_snapshots, 5534 .notify = smb3_notify, 5535 .get_dfs_refer = smb2_get_dfs_refer, 5536 .select_sectype = smb2_select_sectype, 5537 #ifdef CONFIG_CIFS_XATTR 5538 .query_all_EAs = smb2_query_eas, 5539 .set_EA = smb2_set_ea, 5540 #endif /* CIFS_XATTR */ 5541 .get_acl = get_smb2_acl, 5542 .get_acl_by_fid = get_smb2_acl_by_fid, 5543 .set_acl = set_smb2_acl, 5544 .next_header = smb2_next_header, 5545 .ioctl_query_info = smb2_ioctl_query_info, 5546 .make_node = smb2_make_node, 5547 .fiemap = smb3_fiemap, 5548 .llseek = smb3_llseek, 5549 .is_status_io_timeout = smb2_is_status_io_timeout, 5550 .is_network_name_deleted = smb2_is_network_name_deleted, 5551 .rename_pending_delete = smb2_rename_pending_delete, 5552 }; 5553 5554 struct smb_version_operations smb30_operations = { 5555 .compare_fids = smb2_compare_fids, 5556 .setup_request = smb2_setup_request, 5557 .setup_async_request = smb2_setup_async_request, 5558 .check_receive = smb2_check_receive, 5559 .add_credits = smb2_add_credits, 5560 .set_credits = smb2_set_credits, 5561 .get_credits_field = smb2_get_credits_field, 5562 .get_credits = smb2_get_credits, 5563 .wait_mtu_credits = smb2_wait_mtu_credits, 5564 .adjust_credits = smb2_adjust_credits, 5565 .get_next_mid = smb2_get_next_mid, 5566 .revert_current_mid = smb2_revert_current_mid, 5567 .read_data_offset = smb2_read_data_offset, 5568 .read_data_length = smb2_read_data_length, 5569 .map_error = map_smb2_to_linux_error, 5570 .find_mid = smb2_find_mid, 5571 .check_message = smb2_check_message, 5572 .dump_detail = smb2_dump_detail, 5573 .clear_stats = smb2_clear_stats, 5574 .print_stats = smb2_print_stats, 5575 .dump_share_caps = smb2_dump_share_caps, 5576 .is_oplock_break = smb2_is_valid_oplock_break, 5577 .handle_cancelled_mid = smb2_handle_cancelled_mid, 5578 .downgrade_oplock = smb3_downgrade_oplock, 5579 .need_neg = smb2_need_neg, 5580 .negotiate = smb2_negotiate, 5581 .negotiate_wsize = smb3_negotiate_wsize, 5582 .negotiate_rsize = smb3_negotiate_rsize, 5583 .sess_setup = SMB2_sess_setup, 5584 .logoff = SMB2_logoff, 5585 .tree_connect = SMB2_tcon, 5586 .tree_disconnect = SMB2_tdis, 5587 .qfs_tcon = smb3_qfs_tcon, 5588 .query_server_interfaces = SMB3_request_interfaces, 5589 .is_path_accessible = smb2_is_path_accessible, 5590 .can_echo = smb2_can_echo, 5591 .echo = SMB2_echo, 5592 .query_path_info = smb2_query_path_info, 5593 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */ 5594 .query_reparse_point = smb2_query_reparse_point, 5595 .get_srv_inum = smb2_get_srv_inum, 5596 .query_file_info = smb2_query_file_info, 5597 .set_path_size = smb2_set_path_size, 5598 .set_file_size = smb2_set_file_size, 5599 .set_file_info = smb2_set_file_info, 5600 .set_compression = smb2_set_compression, 5601 .mkdir = smb2_mkdir, 5602 .mkdir_setinfo = smb2_mkdir_setinfo, 5603 .rmdir = smb2_rmdir, 5604 .unlink = smb2_unlink, 5605 .rename = smb2_rename_path, 5606 .create_hardlink = smb2_create_hardlink, 5607 .get_reparse_point_buffer = smb2_get_reparse_point_buffer, 5608 .query_mf_symlink = smb3_query_mf_symlink, 5609 .create_mf_symlink = smb3_create_mf_symlink, 5610 .create_reparse_inode = smb2_create_reparse_inode, 5611 .open = smb2_open_file, 5612 .set_fid = smb2_set_fid, 5613 .close = smb2_close_file, 5614 .close_getattr = smb2_close_getattr, 5615 .flush = smb2_flush_file, 5616 .async_readv = smb2_async_readv, 5617 .async_writev = smb2_async_writev, 5618 .sync_read = smb2_sync_read, 5619 .sync_write = smb2_sync_write, 5620 .query_dir_first = smb2_query_dir_first, 5621 .query_dir_next = smb2_query_dir_next, 5622 .close_dir = smb2_close_dir, 5623 .calc_smb_size = smb2_calc_size, 5624 .is_status_pending = smb2_is_status_pending, 5625 .is_session_expired = smb2_is_session_expired, 5626 .oplock_response = smb2_oplock_response, 5627 .queryfs = smb2_queryfs, 5628 .mand_lock = smb2_mand_lock, 5629 .mand_unlock_range = smb2_unlock_range, 5630 .push_mand_locks = smb2_push_mandatory_locks, 5631 .get_lease_key = smb2_get_lease_key, 5632 .set_lease_key = smb2_set_lease_key, 5633 .new_lease_key = smb2_new_lease_key, 5634 .generate_signingkey = generate_smb30signingkey, 5635 .set_integrity = smb3_set_integrity, 5636 .is_read_op = smb21_is_read_op, 5637 .set_oplock_level = smb3_set_oplock_level, 5638 .create_lease_buf = smb3_create_lease_buf, 5639 .parse_lease_buf = smb3_parse_lease_buf, 5640 .copychunk_range = smb2_copychunk_range, 5641 .duplicate_extents = smb2_duplicate_extents, 5642 .validate_negotiate = smb3_validate_negotiate, 5643 .wp_retry_size = smb2_wp_retry_size, 5644 .dir_needs_close = smb2_dir_needs_close, 5645 .fallocate = smb3_fallocate, 5646 .enum_snapshots = smb3_enum_snapshots, 5647 .notify = smb3_notify, 5648 .init_transform_rq = smb3_init_transform_rq, 5649 .is_transform_hdr = smb3_is_transform_hdr, 5650 .receive_transform = smb3_receive_transform, 5651 .get_dfs_refer = smb2_get_dfs_refer, 5652 .select_sectype = smb2_select_sectype, 5653 #ifdef CONFIG_CIFS_XATTR 5654 .query_all_EAs = smb2_query_eas, 5655 .set_EA = smb2_set_ea, 5656 #endif /* CIFS_XATTR */ 5657 .get_acl = get_smb2_acl, 5658 .get_acl_by_fid = get_smb2_acl_by_fid, 5659 .set_acl = set_smb2_acl, 5660 .next_header = smb2_next_header, 5661 .ioctl_query_info = smb2_ioctl_query_info, 5662 .make_node = smb2_make_node, 5663 .fiemap = smb3_fiemap, 5664 .llseek = smb3_llseek, 5665 .is_status_io_timeout = smb2_is_status_io_timeout, 5666 .is_network_name_deleted = smb2_is_network_name_deleted, 5667 .rename_pending_delete = smb2_rename_pending_delete, 5668 }; 5669 5670 struct smb_version_operations smb311_operations = { 5671 .compare_fids = smb2_compare_fids, 5672 .setup_request = smb2_setup_request, 5673 .setup_async_request = smb2_setup_async_request, 5674 .check_receive = smb2_check_receive, 5675 .add_credits = smb2_add_credits, 5676 .set_credits = smb2_set_credits, 5677 .get_credits_field = smb2_get_credits_field, 5678 .get_credits = smb2_get_credits, 5679 .wait_mtu_credits = smb2_wait_mtu_credits, 5680 .adjust_credits = smb2_adjust_credits, 5681 .get_next_mid = smb2_get_next_mid, 5682 .revert_current_mid = smb2_revert_current_mid, 5683 .read_data_offset = smb2_read_data_offset, 5684 .read_data_length = smb2_read_data_length, 5685 .map_error = map_smb2_to_linux_error, 5686 .find_mid = smb2_find_mid, 5687 .check_message = smb2_check_message, 5688 .dump_detail = smb2_dump_detail, 5689 .clear_stats = smb2_clear_stats, 5690 .print_stats = smb2_print_stats, 5691 .dump_share_caps = smb2_dump_share_caps, 5692 .is_oplock_break = smb2_is_valid_oplock_break, 5693 .handle_cancelled_mid = smb2_handle_cancelled_mid, 5694 .downgrade_oplock = smb3_downgrade_oplock, 5695 .need_neg = smb2_need_neg, 5696 .negotiate = smb2_negotiate, 5697 .negotiate_wsize = smb3_negotiate_wsize, 5698 .negotiate_rsize = smb3_negotiate_rsize, 5699 .sess_setup = SMB2_sess_setup, 5700 .logoff = SMB2_logoff, 5701 .tree_connect = SMB2_tcon, 5702 .tree_disconnect = SMB2_tdis, 5703 .qfs_tcon = smb3_qfs_tcon, 5704 .query_server_interfaces = SMB3_request_interfaces, 5705 .is_path_accessible = smb2_is_path_accessible, 5706 .can_echo = smb2_can_echo, 5707 .echo = SMB2_echo, 5708 .query_path_info = smb2_query_path_info, 5709 .query_reparse_point = smb2_query_reparse_point, 5710 .get_srv_inum = smb2_get_srv_inum, 5711 .query_file_info = smb2_query_file_info, 5712 .set_path_size = smb2_set_path_size, 5713 .set_file_size = smb2_set_file_size, 5714 .set_file_info = smb2_set_file_info, 5715 .set_compression = smb2_set_compression, 5716 .mkdir = smb2_mkdir, 5717 .mkdir_setinfo = smb2_mkdir_setinfo, 5718 .posix_mkdir = smb311_posix_mkdir, 5719 .rmdir = smb2_rmdir, 5720 .unlink = smb2_unlink, 5721 .rename = smb2_rename_path, 5722 .create_hardlink = smb2_create_hardlink, 5723 .get_reparse_point_buffer = smb2_get_reparse_point_buffer, 5724 .query_mf_symlink = smb3_query_mf_symlink, 5725 .create_mf_symlink = smb3_create_mf_symlink, 5726 .create_reparse_inode = smb2_create_reparse_inode, 5727 .open = smb2_open_file, 5728 .set_fid = smb2_set_fid, 5729 .close = smb2_close_file, 5730 .close_getattr = smb2_close_getattr, 5731 .flush = smb2_flush_file, 5732 .async_readv = smb2_async_readv, 5733 .async_writev = smb2_async_writev, 5734 .sync_read = smb2_sync_read, 5735 .sync_write = smb2_sync_write, 5736 .query_dir_first = smb2_query_dir_first, 5737 .query_dir_next = smb2_query_dir_next, 5738 .close_dir = smb2_close_dir, 5739 .calc_smb_size = smb2_calc_size, 5740 .is_status_pending = smb2_is_status_pending, 5741 .is_session_expired = smb2_is_session_expired, 5742 .oplock_response = smb2_oplock_response, 5743 .queryfs = smb311_queryfs, 5744 .mand_lock = smb2_mand_lock, 5745 .mand_unlock_range = smb2_unlock_range, 5746 .push_mand_locks = smb2_push_mandatory_locks, 5747 .get_lease_key = smb2_get_lease_key, 5748 .set_lease_key = smb2_set_lease_key, 5749 .new_lease_key = smb2_new_lease_key, 5750 .generate_signingkey = generate_smb311signingkey, 5751 .set_integrity = smb3_set_integrity, 5752 .is_read_op = smb21_is_read_op, 5753 .set_oplock_level = smb3_set_oplock_level, 5754 .create_lease_buf = smb3_create_lease_buf, 5755 .parse_lease_buf = smb3_parse_lease_buf, 5756 .copychunk_range = smb2_copychunk_range, 5757 .duplicate_extents = smb2_duplicate_extents, 5758 /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ 5759 .wp_retry_size = smb2_wp_retry_size, 5760 .dir_needs_close = smb2_dir_needs_close, 5761 .fallocate = smb3_fallocate, 5762 .enum_snapshots = smb3_enum_snapshots, 5763 .notify = smb3_notify, 5764 .init_transform_rq = smb3_init_transform_rq, 5765 .is_transform_hdr = smb3_is_transform_hdr, 5766 .receive_transform = smb3_receive_transform, 5767 .get_dfs_refer = smb2_get_dfs_refer, 5768 .select_sectype = smb2_select_sectype, 5769 #ifdef CONFIG_CIFS_XATTR 5770 .query_all_EAs = smb2_query_eas, 5771 .set_EA = smb2_set_ea, 5772 #endif /* CIFS_XATTR */ 5773 .get_acl = get_smb2_acl, 5774 .get_acl_by_fid = get_smb2_acl_by_fid, 5775 .set_acl = set_smb2_acl, 5776 .next_header = smb2_next_header, 5777 .ioctl_query_info = smb2_ioctl_query_info, 5778 .make_node = smb2_make_node, 5779 .fiemap = smb3_fiemap, 5780 .llseek = smb3_llseek, 5781 .is_status_io_timeout = smb2_is_status_io_timeout, 5782 .is_network_name_deleted = smb2_is_network_name_deleted, 5783 .rename_pending_delete = smb2_rename_pending_delete, 5784 }; 5785 5786 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 5787 struct smb_version_values smb20_values = { 5788 .version_string = SMB20_VERSION_STRING, 5789 .protocol_id = SMB20_PROT_ID, 5790 .req_capabilities = 0, /* MBZ */ 5791 .large_lock_type = 0, 5792 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5793 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5794 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5795 .header_size = sizeof(struct smb2_hdr), 5796 .max_header_size = MAX_SMB2_HDR_SIZE, 5797 .read_rsp_size = sizeof(struct smb2_read_rsp), 5798 .lock_cmd = SMB2_LOCK, 5799 .cap_unix = 0, 5800 .cap_nt_find = SMB2_NT_FIND, 5801 .cap_large_files = SMB2_LARGE_FILES, 5802 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5803 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5804 .create_lease_size = sizeof(struct create_lease), 5805 }; 5806 #endif /* ALLOW_INSECURE_LEGACY */ 5807 5808 struct smb_version_values smb21_values = { 5809 .version_string = SMB21_VERSION_STRING, 5810 .protocol_id = SMB21_PROT_ID, 5811 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */ 5812 .large_lock_type = 0, 5813 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5814 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5815 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5816 .header_size = sizeof(struct smb2_hdr), 5817 .max_header_size = MAX_SMB2_HDR_SIZE, 5818 .read_rsp_size = sizeof(struct smb2_read_rsp), 5819 .lock_cmd = SMB2_LOCK, 5820 .cap_unix = 0, 5821 .cap_nt_find = SMB2_NT_FIND, 5822 .cap_large_files = SMB2_LARGE_FILES, 5823 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5824 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5825 .create_lease_size = sizeof(struct create_lease), 5826 }; 5827 5828 struct smb_version_values smb3any_values = { 5829 .version_string = SMB3ANY_VERSION_STRING, 5830 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 5831 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 5832 .large_lock_type = 0, 5833 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5834 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5835 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5836 .header_size = sizeof(struct smb2_hdr), 5837 .max_header_size = MAX_SMB2_HDR_SIZE, 5838 .read_rsp_size = sizeof(struct smb2_read_rsp), 5839 .lock_cmd = SMB2_LOCK, 5840 .cap_unix = 0, 5841 .cap_nt_find = SMB2_NT_FIND, 5842 .cap_large_files = SMB2_LARGE_FILES, 5843 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5844 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5845 .create_lease_size = sizeof(struct create_lease_v2), 5846 }; 5847 5848 struct smb_version_values smbdefault_values = { 5849 .version_string = SMBDEFAULT_VERSION_STRING, 5850 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 5851 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 5852 .large_lock_type = 0, 5853 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5854 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5855 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5856 .header_size = sizeof(struct smb2_hdr), 5857 .max_header_size = MAX_SMB2_HDR_SIZE, 5858 .read_rsp_size = sizeof(struct smb2_read_rsp), 5859 .lock_cmd = SMB2_LOCK, 5860 .cap_unix = 0, 5861 .cap_nt_find = SMB2_NT_FIND, 5862 .cap_large_files = SMB2_LARGE_FILES, 5863 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5864 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5865 .create_lease_size = sizeof(struct create_lease_v2), 5866 }; 5867 5868 struct smb_version_values smb30_values = { 5869 .version_string = SMB30_VERSION_STRING, 5870 .protocol_id = SMB30_PROT_ID, 5871 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 5872 .large_lock_type = 0, 5873 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5874 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5875 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5876 .header_size = sizeof(struct smb2_hdr), 5877 .max_header_size = MAX_SMB2_HDR_SIZE, 5878 .read_rsp_size = sizeof(struct smb2_read_rsp), 5879 .lock_cmd = SMB2_LOCK, 5880 .cap_unix = 0, 5881 .cap_nt_find = SMB2_NT_FIND, 5882 .cap_large_files = SMB2_LARGE_FILES, 5883 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5884 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5885 .create_lease_size = sizeof(struct create_lease_v2), 5886 }; 5887 5888 struct smb_version_values smb302_values = { 5889 .version_string = SMB302_VERSION_STRING, 5890 .protocol_id = SMB302_PROT_ID, 5891 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 5892 .large_lock_type = 0, 5893 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5894 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5895 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5896 .header_size = sizeof(struct smb2_hdr), 5897 .max_header_size = MAX_SMB2_HDR_SIZE, 5898 .read_rsp_size = sizeof(struct smb2_read_rsp), 5899 .lock_cmd = SMB2_LOCK, 5900 .cap_unix = 0, 5901 .cap_nt_find = SMB2_NT_FIND, 5902 .cap_large_files = SMB2_LARGE_FILES, 5903 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5904 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5905 .create_lease_size = sizeof(struct create_lease_v2), 5906 }; 5907 5908 struct smb_version_values smb311_values = { 5909 .version_string = SMB311_VERSION_STRING, 5910 .protocol_id = SMB311_PROT_ID, 5911 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, 5912 .large_lock_type = 0, 5913 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, 5914 .shared_lock_type = SMB2_LOCKFLAG_SHARED, 5915 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, 5916 .header_size = sizeof(struct smb2_hdr), 5917 .max_header_size = MAX_SMB2_HDR_SIZE, 5918 .read_rsp_size = sizeof(struct smb2_read_rsp), 5919 .lock_cmd = SMB2_LOCK, 5920 .cap_unix = 0, 5921 .cap_nt_find = SMB2_NT_FIND, 5922 .cap_large_files = SMB2_LARGE_FILES, 5923 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, 5924 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, 5925 .create_lease_size = sizeof(struct create_lease_v2), 5926 }; 5927