1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2009, 2013 5 * Etersoft, 2012 6 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Pavel Shilovsky (pshilovsky@samba.org) 2012 8 * 9 * Contains the routines for constructing the SMB2 PDUs themselves 10 * 11 */ 12 13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ 14 /* Note that there are handle based routines which must be */ 15 /* treated slightly differently for reconnection purposes since we never */ 16 /* want to reuse a stale file handle and only the caller knows the file info */ 17 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/vfs.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/uaccess.h> 23 #include <linux/uuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/xattr.h> 26 #include "cifsglob.h" 27 #include "cifsacl.h" 28 #include "cifsproto.h" 29 #include "smb2proto.h" 30 #include "cifs_unicode.h" 31 #include "cifs_debug.h" 32 #include "ntlmssp.h" 33 #include "smb2status.h" 34 #include "smb2glob.h" 35 #include "cifspdu.h" 36 #include "cifs_spnego.h" 37 #include "smbdirect.h" 38 #include "trace.h" 39 #ifdef CONFIG_CIFS_DFS_UPCALL 40 #include "dfs_cache.h" 41 #endif 42 #include "cached_dir.h" 43 44 /* 45 * The following table defines the expected "StructureSize" of SMB2 requests 46 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. 47 * 48 * Note that commands are defined in smb2pdu.h in le16 but the array below is 49 * indexed by command in host byte order. 50 */ 51 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { 52 /* SMB2_NEGOTIATE */ 36, 53 /* SMB2_SESSION_SETUP */ 25, 54 /* SMB2_LOGOFF */ 4, 55 /* SMB2_TREE_CONNECT */ 9, 56 /* SMB2_TREE_DISCONNECT */ 4, 57 /* SMB2_CREATE */ 57, 58 /* SMB2_CLOSE */ 24, 59 /* SMB2_FLUSH */ 24, 60 /* SMB2_READ */ 49, 61 /* SMB2_WRITE */ 49, 62 /* SMB2_LOCK */ 48, 63 /* SMB2_IOCTL */ 57, 64 /* SMB2_CANCEL */ 4, 65 /* SMB2_ECHO */ 4, 66 /* SMB2_QUERY_DIRECTORY */ 33, 67 /* SMB2_CHANGE_NOTIFY */ 32, 68 /* SMB2_QUERY_INFO */ 41, 69 /* SMB2_SET_INFO */ 33, 70 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ 71 }; 72 73 int smb3_encryption_required(const struct cifs_tcon *tcon) 74 { 75 if (!tcon || !tcon->ses) 76 return 0; 77 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || 78 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) 79 return 1; 80 if (tcon->seal && 81 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 82 return 1; 83 return 0; 84 } 85 86 static void 87 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, 88 const struct cifs_tcon *tcon, 89 struct TCP_Server_Info *server) 90 { 91 struct smb3_hdr_req *smb3_hdr; 92 93 shdr->ProtocolId = SMB2_PROTO_NUMBER; 94 shdr->StructureSize = cpu_to_le16(64); 95 shdr->Command = smb2_cmd; 96 97 if (server) { 98 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ 99 if (server->dialect >= SMB30_PROT_ID) { 100 smb3_hdr = (struct smb3_hdr_req *)shdr; 101 /* 102 * if primary channel is not set yet, use default 103 * channel for chan sequence num 104 */ 105 if (SERVER_IS_CHAN(server)) 106 smb3_hdr->ChannelSequence = 107 cpu_to_le16(server->primary_server->channel_sequence_num); 108 else 109 smb3_hdr->ChannelSequence = 110 cpu_to_le16(server->channel_sequence_num); 111 } 112 spin_lock(&server->req_lock); 113 /* Request up to 10 credits but don't go over the limit. */ 114 if (server->credits >= server->max_credits) 115 shdr->CreditRequest = cpu_to_le16(0); 116 else 117 shdr->CreditRequest = cpu_to_le16( 118 min_t(int, server->max_credits - 119 server->credits, 10)); 120 spin_unlock(&server->req_lock); 121 } else { 122 shdr->CreditRequest = cpu_to_le16(2); 123 } 124 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid); 125 126 if (!tcon) 127 goto out; 128 129 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 130 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 131 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 132 shdr->CreditCharge = cpu_to_le16(1); 133 /* else CreditCharge MBZ */ 134 135 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid); 136 /* Uid is not converted */ 137 if (tcon->ses) 138 shdr->SessionId = cpu_to_le64(tcon->ses->Suid); 139 140 /* 141 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have 142 * to pass the path on the Open SMB prefixed by \\server\share. 143 * Not sure when we would need to do the augmented path (if ever) and 144 * setting this flag breaks the SMB2 open operation since it is 145 * illegal to send an empty path name (without \\server\share prefix) 146 * when the DFS flag is set in the SMB open header. We could 147 * consider setting the flag on all operations other than open 148 * but it is safer to net set it for now. 149 */ 150 /* if (tcon->share_flags & SHI1005_FLAGS_DFS) 151 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ 152 153 if (server && server->sign && !smb3_encryption_required(tcon)) 154 shdr->Flags |= SMB2_FLAGS_SIGNED; 155 out: 156 return; 157 } 158 159 /* helper function for code reuse */ 160 static int 161 cifs_chan_skip_or_disable(struct cifs_ses *ses, 162 struct TCP_Server_Info *server, 163 bool from_reconnect) 164 { 165 struct TCP_Server_Info *pserver; 166 unsigned int chan_index; 167 168 if (SERVER_IS_CHAN(server)) { 169 cifs_dbg(VFS, 170 "server %s does not support multichannel anymore. Skip secondary channel\n", 171 ses->server->hostname); 172 173 spin_lock(&ses->chan_lock); 174 chan_index = cifs_ses_get_chan_index(ses, server); 175 if (chan_index == CIFS_INVAL_CHAN_INDEX) { 176 spin_unlock(&ses->chan_lock); 177 goto skip_terminate; 178 } 179 180 ses->chans[chan_index].server = NULL; 181 server->terminate = true; 182 spin_unlock(&ses->chan_lock); 183 184 /* 185 * the above reference of server by channel 186 * needs to be dropped without holding chan_lock 187 * as cifs_put_tcp_session takes a higher lock 188 * i.e. cifs_tcp_ses_lock 189 */ 190 cifs_put_tcp_session(server, from_reconnect); 191 192 cifs_signal_cifsd_for_reconnect(server, false); 193 194 /* mark primary server as needing reconnect */ 195 pserver = server->primary_server; 196 cifs_signal_cifsd_for_reconnect(pserver, false); 197 skip_terminate: 198 return -EHOSTDOWN; 199 } 200 201 cifs_server_dbg(VFS, 202 "server does not support multichannel anymore. Disable all other channels\n"); 203 cifs_disable_secondary_channels(ses); 204 205 206 return 0; 207 } 208 209 static int 210 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, 211 struct TCP_Server_Info *server, bool from_reconnect) 212 { 213 int rc = 0; 214 struct nls_table *nls_codepage = NULL; 215 struct cifs_ses *ses; 216 int xid; 217 218 /* 219 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so 220 * check for tcp and smb session status done differently 221 * for those three - in the calling routine. 222 */ 223 if (tcon == NULL) 224 return 0; 225 226 /* 227 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in 228 * cifs_tree_connect(). 229 */ 230 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) 231 return 0; 232 233 spin_lock(&tcon->tc_lock); 234 if (tcon->status == TID_EXITING) { 235 /* 236 * only tree disconnect allowed when disconnecting ... 237 */ 238 if (smb2_command != SMB2_TREE_DISCONNECT) { 239 spin_unlock(&tcon->tc_lock); 240 cifs_dbg(FYI, "can not send cmd %d while umounting\n", 241 smb2_command); 242 return -ENODEV; 243 } 244 } 245 spin_unlock(&tcon->tc_lock); 246 247 ses = tcon->ses; 248 if (!ses) 249 return -EIO; 250 spin_lock(&ses->ses_lock); 251 if (ses->ses_status == SES_EXITING) { 252 spin_unlock(&ses->ses_lock); 253 return -EIO; 254 } 255 spin_unlock(&ses->ses_lock); 256 if (!ses->server || !server) 257 return -EIO; 258 259 spin_lock(&server->srv_lock); 260 if (server->tcpStatus == CifsNeedReconnect) { 261 /* 262 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE 263 * here since they are implicitly done when session drops. 264 */ 265 switch (smb2_command) { 266 /* 267 * BB Should we keep oplock break and add flush to exceptions? 268 */ 269 case SMB2_TREE_DISCONNECT: 270 case SMB2_CANCEL: 271 case SMB2_CLOSE: 272 case SMB2_OPLOCK_BREAK: 273 spin_unlock(&server->srv_lock); 274 return -EAGAIN; 275 } 276 } 277 278 /* if server is marked for termination, cifsd will cleanup */ 279 if (server->terminate) { 280 spin_unlock(&server->srv_lock); 281 return -EHOSTDOWN; 282 } 283 spin_unlock(&server->srv_lock); 284 285 again: 286 rc = cifs_wait_for_server_reconnect(server, tcon->retry); 287 if (rc) 288 return rc; 289 290 spin_lock(&ses->chan_lock); 291 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { 292 spin_unlock(&ses->chan_lock); 293 return 0; 294 } 295 spin_unlock(&ses->chan_lock); 296 cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d", 297 tcon->ses->chans_need_reconnect, 298 tcon->need_reconnect); 299 300 mutex_lock(&ses->session_mutex); 301 /* 302 * if this is called by delayed work, and the channel has been disabled 303 * in parallel, the delayed work can continue to execute in parallel 304 * there's a chance that this channel may not exist anymore 305 */ 306 spin_lock(&server->srv_lock); 307 if (server->tcpStatus == CifsExiting) { 308 spin_unlock(&server->srv_lock); 309 mutex_unlock(&ses->session_mutex); 310 rc = -EHOSTDOWN; 311 goto out; 312 } 313 314 /* 315 * Recheck after acquire mutex. If another thread is negotiating 316 * and the server never sends an answer the socket will be closed 317 * and tcpStatus set to reconnect. 318 */ 319 if (server->tcpStatus == CifsNeedReconnect) { 320 spin_unlock(&server->srv_lock); 321 mutex_unlock(&ses->session_mutex); 322 323 if (tcon->retry) 324 goto again; 325 326 rc = -EHOSTDOWN; 327 goto out; 328 } 329 spin_unlock(&server->srv_lock); 330 331 nls_codepage = ses->local_nls; 332 333 /* 334 * need to prevent multiple threads trying to simultaneously 335 * reconnect the same SMB session 336 */ 337 spin_lock(&ses->ses_lock); 338 spin_lock(&ses->chan_lock); 339 if (!cifs_chan_needs_reconnect(ses, server) && 340 ses->ses_status == SES_GOOD) { 341 spin_unlock(&ses->chan_lock); 342 spin_unlock(&ses->ses_lock); 343 /* this means that we only need to tree connect */ 344 if (tcon->need_reconnect) 345 goto skip_sess_setup; 346 347 mutex_unlock(&ses->session_mutex); 348 goto out; 349 } 350 spin_unlock(&ses->chan_lock); 351 spin_unlock(&ses->ses_lock); 352 353 rc = cifs_negotiate_protocol(0, ses, server); 354 if (!rc) { 355 /* 356 * if server stopped supporting multichannel 357 * and the first channel reconnected, disable all the others. 358 */ 359 if (ses->chan_count > 1 && 360 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 361 rc = cifs_chan_skip_or_disable(ses, server, 362 from_reconnect); 363 if (rc) { 364 mutex_unlock(&ses->session_mutex); 365 goto out; 366 } 367 } 368 369 rc = cifs_setup_session(0, ses, server, nls_codepage); 370 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 371 /* 372 * Try alternate password for next reconnect (key rotation 373 * could be enabled on the server e.g.) if an alternate 374 * password is available and the current password is expired, 375 * but do not swap on non pwd related errors like host down 376 */ 377 if (ses->password2) 378 swap(ses->password2, ses->password); 379 } 380 381 if ((rc == -EACCES) && !tcon->retry) { 382 mutex_unlock(&ses->session_mutex); 383 rc = -EHOSTDOWN; 384 goto failed; 385 } else if (rc) { 386 mutex_unlock(&ses->session_mutex); 387 goto out; 388 } 389 } else { 390 mutex_unlock(&ses->session_mutex); 391 goto out; 392 } 393 394 skip_sess_setup: 395 if (!tcon->need_reconnect) { 396 mutex_unlock(&ses->session_mutex); 397 goto out; 398 } 399 cifs_mark_open_files_invalid(tcon); 400 if (tcon->use_persistent) 401 tcon->need_reopen_files = true; 402 403 rc = cifs_tree_connect(0, tcon, nls_codepage); 404 405 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); 406 if (rc) { 407 /* If sess reconnected but tcon didn't, something strange ... */ 408 mutex_unlock(&ses->session_mutex); 409 cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc); 410 goto out; 411 } 412 413 spin_lock(&ses->ses_lock); 414 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) { 415 spin_unlock(&ses->ses_lock); 416 mutex_unlock(&ses->session_mutex); 417 goto skip_add_channels; 418 } 419 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS; 420 spin_unlock(&ses->ses_lock); 421 422 if (!rc && 423 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && 424 server->ops->query_server_interfaces) { 425 mutex_unlock(&ses->session_mutex); 426 427 /* 428 * query server network interfaces, in case they change 429 */ 430 xid = get_xid(); 431 rc = server->ops->query_server_interfaces(xid, tcon, false); 432 free_xid(xid); 433 434 if (rc == -EOPNOTSUPP && ses->chan_count > 1) { 435 /* 436 * some servers like Azure SMB server do not advertise 437 * that multichannel has been disabled with server 438 * capabilities, rather return STATUS_NOT_IMPLEMENTED. 439 * treat this as server not supporting multichannel 440 */ 441 442 rc = cifs_chan_skip_or_disable(ses, server, 443 from_reconnect); 444 goto skip_add_channels; 445 } else if (rc) 446 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 447 __func__, rc); 448 449 if (ses->chan_max > ses->chan_count && 450 ses->iface_count && 451 !SERVER_IS_CHAN(server)) { 452 if (ses->chan_count == 1) { 453 cifs_server_dbg(VFS, "supports multichannel now\n"); 454 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 455 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 456 } 457 458 cifs_try_adding_channels(ses); 459 } 460 } else { 461 mutex_unlock(&ses->session_mutex); 462 } 463 464 skip_add_channels: 465 spin_lock(&ses->ses_lock); 466 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS; 467 spin_unlock(&ses->ses_lock); 468 469 if (smb2_command != SMB2_INTERNAL_CMD) 470 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 471 472 atomic_inc(&tconInfoReconnectCount); 473 out: 474 /* 475 * Check if handle based operation so we know whether we can continue 476 * or not without returning to caller to reset file handle. 477 */ 478 /* 479 * BB Is flush done by server on drop of tcp session? Should we special 480 * case it and skip above? 481 */ 482 switch (smb2_command) { 483 case SMB2_FLUSH: 484 case SMB2_READ: 485 case SMB2_WRITE: 486 case SMB2_LOCK: 487 case SMB2_QUERY_DIRECTORY: 488 case SMB2_CHANGE_NOTIFY: 489 case SMB2_QUERY_INFO: 490 case SMB2_SET_INFO: 491 rc = -EAGAIN; 492 } 493 failed: 494 return rc; 495 } 496 497 static void 498 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, 499 struct TCP_Server_Info *server, 500 void *buf, 501 unsigned int *total_len) 502 { 503 struct smb2_pdu *spdu = buf; 504 /* lookup word count ie StructureSize from table */ 505 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; 506 507 /* 508 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of 509 * largest operations (Create) 510 */ 511 memset(buf, 0, 256); 512 513 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server); 514 spdu->StructureSize2 = cpu_to_le16(parmsize); 515 516 *total_len = parmsize + sizeof(struct smb2_hdr); 517 } 518 519 /* 520 * Allocate and return pointer to an SMB request hdr, and set basic 521 * SMB information in the SMB header. If the return code is zero, this 522 * function must have filled in request_buf pointer. 523 */ 524 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 525 struct TCP_Server_Info *server, 526 void **request_buf, unsigned int *total_len) 527 { 528 /* BB eventually switch this to SMB2 specific small buf size */ 529 switch (smb2_command) { 530 case SMB2_SET_INFO: 531 case SMB2_QUERY_INFO: 532 *request_buf = cifs_buf_get(); 533 break; 534 default: 535 *request_buf = cifs_small_buf_get(); 536 break; 537 } 538 if (*request_buf == NULL) { 539 /* BB should we add a retry in here if not a writepage? */ 540 return -ENOMEM; 541 } 542 543 fill_small_buf(smb2_command, tcon, server, 544 (struct smb2_hdr *)(*request_buf), 545 total_len); 546 547 if (tcon != NULL) { 548 uint16_t com_code = le16_to_cpu(smb2_command); 549 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); 550 cifs_stats_inc(&tcon->num_smbs_sent); 551 } 552 553 return 0; 554 } 555 556 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 557 struct TCP_Server_Info *server, 558 void **request_buf, unsigned int *total_len) 559 { 560 int rc; 561 562 rc = smb2_reconnect(smb2_command, tcon, server, false); 563 if (rc) 564 return rc; 565 566 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf, 567 total_len); 568 } 569 570 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, 571 struct TCP_Server_Info *server, 572 void **request_buf, unsigned int *total_len) 573 { 574 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ 575 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { 576 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, 577 request_buf, total_len); 578 } 579 return smb2_plain_req_init(SMB2_IOCTL, tcon, server, 580 request_buf, total_len); 581 } 582 583 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ 584 585 static void 586 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) 587 { 588 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; 589 pneg_ctxt->DataLength = cpu_to_le16(38); 590 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); 591 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); 592 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); 593 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; 594 } 595 596 static void 597 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt) 598 { 599 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES; 600 pneg_ctxt->DataLength = 601 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context) 602 - sizeof(struct smb2_neg_context)); 603 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3); 604 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77; 605 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF; 606 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1; 607 } 608 609 static unsigned int 610 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt) 611 { 612 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities); 613 unsigned short num_algs = 1; /* number of signing algorithms sent */ 614 615 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; 616 /* 617 * Context Data length must be rounded to multiple of 8 for some servers 618 */ 619 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - 620 sizeof(struct smb2_neg_context) + 621 (num_algs * sizeof(u16)), 8)); 622 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); 623 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); 624 625 ctxt_len += sizeof(__le16) * num_algs; 626 ctxt_len = ALIGN(ctxt_len, 8); 627 return ctxt_len; 628 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ 629 } 630 631 static void 632 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 633 { 634 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 635 if (require_gcm_256) { 636 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */ 637 pneg_ctxt->CipherCount = cpu_to_le16(1); 638 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM; 639 } else if (enable_gcm_256) { 640 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */ 641 pneg_ctxt->CipherCount = cpu_to_le16(3); 642 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 643 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM; 644 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM; 645 } else { 646 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */ 647 pneg_ctxt->CipherCount = cpu_to_le16(2); 648 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 649 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 650 } 651 } 652 653 static unsigned int 654 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname) 655 { 656 struct nls_table *cp = load_nls_default(); 657 658 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID; 659 660 /* copy up to max of first 100 bytes of server name to NetName field */ 661 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); 662 /* context size is DataLength + minimal smb2_neg_context */ 663 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); 664 } 665 666 static void 667 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) 668 { 669 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; 670 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); 671 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 672 pneg_ctxt->Name[0] = 0x93; 673 pneg_ctxt->Name[1] = 0xAD; 674 pneg_ctxt->Name[2] = 0x25; 675 pneg_ctxt->Name[3] = 0x50; 676 pneg_ctxt->Name[4] = 0x9C; 677 pneg_ctxt->Name[5] = 0xB4; 678 pneg_ctxt->Name[6] = 0x11; 679 pneg_ctxt->Name[7] = 0xE7; 680 pneg_ctxt->Name[8] = 0xB4; 681 pneg_ctxt->Name[9] = 0x23; 682 pneg_ctxt->Name[10] = 0x83; 683 pneg_ctxt->Name[11] = 0xDE; 684 pneg_ctxt->Name[12] = 0x96; 685 pneg_ctxt->Name[13] = 0x8B; 686 pneg_ctxt->Name[14] = 0xCD; 687 pneg_ctxt->Name[15] = 0x7C; 688 } 689 690 static void 691 assemble_neg_contexts(struct smb2_negotiate_req *req, 692 struct TCP_Server_Info *server, unsigned int *total_len) 693 { 694 unsigned int ctxt_len, neg_context_count; 695 struct TCP_Server_Info *pserver; 696 char *pneg_ctxt; 697 char *hostname; 698 699 if (*total_len > 200) { 700 /* In case length corrupted don't want to overrun smb buffer */ 701 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n"); 702 return; 703 } 704 705 /* 706 * round up total_len of fixed part of SMB3 negotiate request to 8 707 * byte boundary before adding negotiate contexts 708 */ 709 *total_len = ALIGN(*total_len, 8); 710 711 pneg_ctxt = (*total_len) + (char *)req; 712 req->NegotiateContextOffset = cpu_to_le32(*total_len); 713 714 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); 715 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); 716 *total_len += ctxt_len; 717 pneg_ctxt += ctxt_len; 718 719 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); 720 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); 721 *total_len += ctxt_len; 722 pneg_ctxt += ctxt_len; 723 724 /* 725 * secondary channels don't have the hostname field populated 726 * use the hostname field in the primary channel instead 727 */ 728 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 729 cifs_server_lock(pserver); 730 hostname = pserver->hostname; 731 if (hostname && (hostname[0] != 0)) { 732 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, 733 hostname); 734 *total_len += ctxt_len; 735 pneg_ctxt += ctxt_len; 736 neg_context_count = 3; 737 } else 738 neg_context_count = 2; 739 cifs_server_unlock(pserver); 740 741 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); 742 *total_len += sizeof(struct smb2_posix_neg_context); 743 pneg_ctxt += sizeof(struct smb2_posix_neg_context); 744 neg_context_count++; 745 746 if (server->compression.requested) { 747 build_compression_ctxt((struct smb2_compression_capabilities_context *) 748 pneg_ctxt); 749 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); 750 *total_len += ctxt_len; 751 pneg_ctxt += ctxt_len; 752 neg_context_count++; 753 } 754 755 if (enable_negotiate_signing) { 756 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *) 757 pneg_ctxt); 758 *total_len += ctxt_len; 759 pneg_ctxt += ctxt_len; 760 neg_context_count++; 761 } 762 763 /* check for and add transport_capabilities and signing capabilities */ 764 req->NegotiateContextCount = cpu_to_le16(neg_context_count); 765 766 } 767 768 /* If invalid preauth context warn but use what we requested, SHA-512 */ 769 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) 770 { 771 unsigned int len = le16_to_cpu(ctxt->DataLength); 772 773 /* 774 * Caller checked that DataLength remains within SMB boundary. We still 775 * need to confirm that one HashAlgorithms member is accounted for. 776 */ 777 if (len < MIN_PREAUTH_CTXT_DATA_LEN) { 778 pr_warn_once("server sent bad preauth context\n"); 779 return; 780 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { 781 pr_warn_once("server sent invalid SaltLength\n"); 782 return; 783 } 784 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) 785 pr_warn_once("Invalid SMB3 hash algorithm count\n"); 786 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) 787 pr_warn_once("unknown SMB3 hash algorithm\n"); 788 } 789 790 static void decode_compress_ctx(struct TCP_Server_Info *server, 791 struct smb2_compression_capabilities_context *ctxt) 792 { 793 unsigned int len = le16_to_cpu(ctxt->DataLength); 794 __le16 alg; 795 796 server->compression.enabled = false; 797 798 /* 799 * Caller checked that DataLength remains within SMB boundary. We still 800 * need to confirm that one CompressionAlgorithms member is accounted 801 * for. 802 */ 803 if (len < 10) { 804 pr_warn_once("server sent bad compression cntxt\n"); 805 return; 806 } 807 808 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { 809 pr_warn_once("invalid SMB3 compress algorithm count\n"); 810 return; 811 } 812 813 alg = ctxt->CompressionAlgorithms[0]; 814 815 /* 'NONE' (0) compressor type is never negotiated */ 816 if (alg == 0 || le16_to_cpu(alg) > 3) { 817 pr_warn_once("invalid compression algorithm '%u'\n", alg); 818 return; 819 } 820 821 server->compression.alg = alg; 822 server->compression.enabled = true; 823 } 824 825 static int decode_encrypt_ctx(struct TCP_Server_Info *server, 826 struct smb2_encryption_neg_context *ctxt) 827 { 828 unsigned int len = le16_to_cpu(ctxt->DataLength); 829 830 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); 831 /* 832 * Caller checked that DataLength remains within SMB boundary. We still 833 * need to confirm that one Cipher flexible array member is accounted 834 * for. 835 */ 836 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { 837 pr_warn_once("server sent bad crypto ctxt len\n"); 838 return -EINVAL; 839 } 840 841 if (le16_to_cpu(ctxt->CipherCount) != 1) { 842 pr_warn_once("Invalid SMB3.11 cipher count\n"); 843 return -EINVAL; 844 } 845 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); 846 if (require_gcm_256) { 847 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) { 848 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n"); 849 return -EOPNOTSUPP; 850 } 851 } else if (ctxt->Ciphers[0] == 0) { 852 /* 853 * e.g. if server only supported AES256_CCM (very unlikely) 854 * or server supported no encryption types or had all disabled. 855 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case 856 * in which mount requested encryption ("seal") checks later 857 * on during tree connection will return proper rc, but if 858 * seal not requested by client, since server is allowed to 859 * return 0 to indicate no supported cipher, we can't fail here 860 */ 861 server->cipher_type = 0; 862 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION; 863 pr_warn_once("Server does not support requested encryption types\n"); 864 return 0; 865 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && 866 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) && 867 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) { 868 /* server returned a cipher we didn't ask for */ 869 pr_warn_once("Invalid SMB3.11 cipher returned\n"); 870 return -EINVAL; 871 } 872 server->cipher_type = ctxt->Ciphers[0]; 873 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 874 return 0; 875 } 876 877 static void decode_signing_ctx(struct TCP_Server_Info *server, 878 struct smb2_signing_capabilities *pctxt) 879 { 880 unsigned int len = le16_to_cpu(pctxt->DataLength); 881 882 /* 883 * Caller checked that DataLength remains within SMB boundary. We still 884 * need to confirm that one SigningAlgorithms flexible array member is 885 * accounted for. 886 */ 887 if ((len < 4) || (len > 16)) { 888 pr_warn_once("server sent bad signing negcontext\n"); 889 return; 890 } 891 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) { 892 pr_warn_once("Invalid signing algorithm count\n"); 893 return; 894 } 895 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) { 896 pr_warn_once("unknown signing algorithm\n"); 897 return; 898 } 899 900 server->signing_negotiated = true; 901 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]); 902 cifs_dbg(FYI, "signing algorithm %d chosen\n", 903 server->signing_algorithm); 904 } 905 906 907 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, 908 struct TCP_Server_Info *server, 909 unsigned int len_of_smb) 910 { 911 struct smb2_neg_context *pctx; 912 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); 913 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); 914 unsigned int len_of_ctxts, i; 915 int rc = 0; 916 917 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); 918 if (len_of_smb <= offset) { 919 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n"); 920 return -EINVAL; 921 } 922 923 len_of_ctxts = len_of_smb - offset; 924 925 for (i = 0; i < ctxt_cnt; i++) { 926 int clen; 927 /* check that offset is not beyond end of SMB */ 928 if (len_of_ctxts < sizeof(struct smb2_neg_context)) 929 break; 930 931 pctx = (struct smb2_neg_context *)(offset + (char *)rsp); 932 clen = sizeof(struct smb2_neg_context) 933 + le16_to_cpu(pctx->DataLength); 934 /* 935 * 2.2.4 SMB2 NEGOTIATE Response 936 * Subsequent negotiate contexts MUST appear at the first 8-byte 937 * aligned offset following the previous negotiate context. 938 */ 939 if (i + 1 != ctxt_cnt) 940 clen = ALIGN(clen, 8); 941 if (clen > len_of_ctxts) 942 break; 943 944 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) 945 decode_preauth_context( 946 (struct smb2_preauth_neg_context *)pctx); 947 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) 948 rc = decode_encrypt_ctx(server, 949 (struct smb2_encryption_neg_context *)pctx); 950 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) 951 decode_compress_ctx(server, 952 (struct smb2_compression_capabilities_context *)pctx); 953 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) 954 server->posix_ext_supported = true; 955 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) 956 decode_signing_ctx(server, 957 (struct smb2_signing_capabilities *)pctx); 958 else 959 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", 960 le16_to_cpu(pctx->ContextType)); 961 if (rc) 962 break; 963 964 offset += clen; 965 len_of_ctxts -= clen; 966 } 967 return rc; 968 } 969 970 static struct create_posix * 971 create_posix_buf(umode_t mode) 972 { 973 struct create_posix *buf; 974 975 buf = kzalloc(sizeof(struct create_posix), 976 GFP_KERNEL); 977 if (!buf) 978 return NULL; 979 980 buf->ccontext.DataOffset = 981 cpu_to_le16(offsetof(struct create_posix, Mode)); 982 buf->ccontext.DataLength = cpu_to_le32(4); 983 buf->ccontext.NameOffset = 984 cpu_to_le16(offsetof(struct create_posix, Name)); 985 buf->ccontext.NameLength = cpu_to_le16(16); 986 987 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 988 buf->Name[0] = 0x93; 989 buf->Name[1] = 0xAD; 990 buf->Name[2] = 0x25; 991 buf->Name[3] = 0x50; 992 buf->Name[4] = 0x9C; 993 buf->Name[5] = 0xB4; 994 buf->Name[6] = 0x11; 995 buf->Name[7] = 0xE7; 996 buf->Name[8] = 0xB4; 997 buf->Name[9] = 0x23; 998 buf->Name[10] = 0x83; 999 buf->Name[11] = 0xDE; 1000 buf->Name[12] = 0x96; 1001 buf->Name[13] = 0x8B; 1002 buf->Name[14] = 0xCD; 1003 buf->Name[15] = 0x7C; 1004 buf->Mode = cpu_to_le32(mode); 1005 cifs_dbg(FYI, "mode on posix create 0%o\n", mode); 1006 return buf; 1007 } 1008 1009 static int 1010 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) 1011 { 1012 unsigned int num = *num_iovec; 1013 1014 iov[num].iov_base = create_posix_buf(mode); 1015 if (mode == ACL_NO_MODE) 1016 cifs_dbg(FYI, "%s: no mode\n", __func__); 1017 if (iov[num].iov_base == NULL) 1018 return -ENOMEM; 1019 iov[num].iov_len = sizeof(struct create_posix); 1020 *num_iovec = num + 1; 1021 return 0; 1022 } 1023 1024 1025 /* 1026 * 1027 * SMB2 Worker functions follow: 1028 * 1029 * The general structure of the worker functions is: 1030 * 1) Call smb2_init (assembles SMB2 header) 1031 * 2) Initialize SMB2 command specific fields in fixed length area of SMB 1032 * 3) Call smb_sendrcv2 (sends request on socket and waits for response) 1033 * 4) Decode SMB2 command specific fields in the fixed length area 1034 * 5) Decode variable length data area (if any for this SMB2 command type) 1035 * 6) Call free smb buffer 1036 * 7) return 1037 * 1038 */ 1039 1040 int 1041 SMB2_negotiate(const unsigned int xid, 1042 struct cifs_ses *ses, 1043 struct TCP_Server_Info *server) 1044 { 1045 struct smb_rqst rqst; 1046 struct smb2_negotiate_req *req; 1047 struct smb2_negotiate_rsp *rsp; 1048 struct kvec iov[1]; 1049 struct kvec rsp_iov; 1050 int rc; 1051 int resp_buftype; 1052 int blob_offset, blob_length; 1053 char *security_blob; 1054 int flags = CIFS_NEG_OP; 1055 unsigned int total_len; 1056 1057 cifs_dbg(FYI, "Negotiate protocol\n"); 1058 1059 if (!server) { 1060 WARN(1, "%s: server is NULL!\n", __func__); 1061 return -EIO; 1062 } 1063 1064 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server, 1065 (void **) &req, &total_len); 1066 if (rc) 1067 return rc; 1068 1069 req->hdr.SessionId = 0; 1070 1071 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1072 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1073 1074 if (strcmp(server->vals->version_string, 1075 SMB3ANY_VERSION_STRING) == 0) { 1076 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1077 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1078 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1079 req->DialectCount = cpu_to_le16(3); 1080 total_len += 6; 1081 } else if (strcmp(server->vals->version_string, 1082 SMBDEFAULT_VERSION_STRING) == 0) { 1083 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1084 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1085 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1086 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1087 req->DialectCount = cpu_to_le16(4); 1088 total_len += 8; 1089 } else { 1090 /* otherwise send specific dialect */ 1091 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id); 1092 req->DialectCount = cpu_to_le16(1); 1093 total_len += 2; 1094 } 1095 1096 /* only one of SMB2 signing flags may be set in SMB2 request */ 1097 if (ses->sign) 1098 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1099 else if (global_secflags & CIFSSEC_MAY_SIGN) 1100 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1101 else 1102 req->SecurityMode = 0; 1103 1104 req->Capabilities = cpu_to_le32(server->vals->req_capabilities); 1105 if (ses->chan_max > 1) 1106 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1107 1108 /* ClientGUID must be zero for SMB2.02 dialect */ 1109 if (server->vals->protocol_id == SMB20_PROT_ID) 1110 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); 1111 else { 1112 memcpy(req->ClientGUID, server->client_guid, 1113 SMB2_CLIENT_GUID_SIZE); 1114 if ((server->vals->protocol_id == SMB311_PROT_ID) || 1115 (strcmp(server->vals->version_string, 1116 SMB3ANY_VERSION_STRING) == 0) || 1117 (strcmp(server->vals->version_string, 1118 SMBDEFAULT_VERSION_STRING) == 0)) 1119 assemble_neg_contexts(req, server, &total_len); 1120 } 1121 iov[0].iov_base = (char *)req; 1122 iov[0].iov_len = total_len; 1123 1124 memset(&rqst, 0, sizeof(struct smb_rqst)); 1125 rqst.rq_iov = iov; 1126 rqst.rq_nvec = 1; 1127 1128 rc = cifs_send_recv(xid, ses, server, 1129 &rqst, &resp_buftype, flags, &rsp_iov); 1130 cifs_small_buf_release(req); 1131 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; 1132 /* 1133 * No tcon so can't do 1134 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1135 */ 1136 if (rc == -EOPNOTSUPP) { 1137 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n"); 1138 goto neg_exit; 1139 } else if (rc != 0) 1140 goto neg_exit; 1141 1142 rc = -EIO; 1143 if (strcmp(server->vals->version_string, 1144 SMB3ANY_VERSION_STRING) == 0) { 1145 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1146 cifs_server_dbg(VFS, 1147 "SMB2 dialect returned but not requested\n"); 1148 goto neg_exit; 1149 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1150 cifs_server_dbg(VFS, 1151 "SMB2.1 dialect returned but not requested\n"); 1152 goto neg_exit; 1153 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1154 /* ops set to 3.0 by default for default so update */ 1155 server->ops = &smb311_operations; 1156 server->vals = &smb311_values; 1157 } 1158 } else if (strcmp(server->vals->version_string, 1159 SMBDEFAULT_VERSION_STRING) == 0) { 1160 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1161 cifs_server_dbg(VFS, 1162 "SMB2 dialect returned but not requested\n"); 1163 goto neg_exit; 1164 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1165 /* ops set to 3.0 by default for default so update */ 1166 server->ops = &smb21_operations; 1167 server->vals = &smb21_values; 1168 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1169 server->ops = &smb311_operations; 1170 server->vals = &smb311_values; 1171 } 1172 } else if (le16_to_cpu(rsp->DialectRevision) != 1173 server->vals->protocol_id) { 1174 /* if requested single dialect ensure returned dialect matched */ 1175 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", 1176 le16_to_cpu(rsp->DialectRevision)); 1177 goto neg_exit; 1178 } 1179 1180 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); 1181 1182 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) 1183 cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); 1184 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) 1185 cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); 1186 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) 1187 cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); 1188 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) 1189 cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); 1190 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) 1191 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); 1192 else { 1193 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", 1194 le16_to_cpu(rsp->DialectRevision)); 1195 goto neg_exit; 1196 } 1197 1198 rc = 0; 1199 server->dialect = le16_to_cpu(rsp->DialectRevision); 1200 1201 /* 1202 * Keep a copy of the hash after negprot. This hash will be 1203 * the starting hash value for all sessions made from this 1204 * server. 1205 */ 1206 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, 1207 SMB2_PREAUTH_HASH_SIZE); 1208 1209 /* SMB2 only has an extended negflavor */ 1210 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 1211 /* set it to the maximum buffer size value we can send with 1 credit */ 1212 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 1213 SMB2_MAX_BUFFER_SIZE); 1214 server->max_read = le32_to_cpu(rsp->MaxReadSize); 1215 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 1216 server->sec_mode = le16_to_cpu(rsp->SecurityMode); 1217 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) 1218 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", 1219 server->sec_mode); 1220 server->capabilities = le32_to_cpu(rsp->Capabilities); 1221 /* Internal types */ 1222 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; 1223 1224 /* 1225 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context 1226 * Set the cipher type manually. 1227 */ 1228 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 1229 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; 1230 1231 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, 1232 (struct smb2_hdr *)rsp); 1233 /* 1234 * See MS-SMB2 section 2.2.4: if no blob, client picks default which 1235 * for us will be 1236 * ses->sectype = RawNTLMSSP; 1237 * but for time being this is our only auth choice so doesn't matter. 1238 * We just found a server which sets blob length to zero expecting raw. 1239 */ 1240 if (blob_length == 0) { 1241 cifs_dbg(FYI, "missing security blob on negprot\n"); 1242 server->sec_ntlmssp = true; 1243 } 1244 1245 rc = cifs_enable_signing(server, ses->sign); 1246 if (rc) 1247 goto neg_exit; 1248 if (blob_length) { 1249 rc = decode_negTokenInit(security_blob, blob_length, server); 1250 if (rc == 1) 1251 rc = 0; 1252 else if (rc == 0) 1253 rc = -EIO; 1254 } 1255 1256 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1257 if (rsp->NegotiateContextCount) 1258 rc = smb311_decode_neg_context(rsp, server, 1259 rsp_iov.iov_len); 1260 else 1261 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); 1262 } 1263 neg_exit: 1264 free_rsp_buf(resp_buftype, rsp); 1265 return rc; 1266 } 1267 1268 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) 1269 { 1270 int rc; 1271 struct validate_negotiate_info_req *pneg_inbuf; 1272 struct validate_negotiate_info_rsp *pneg_rsp = NULL; 1273 u32 rsplen; 1274 u32 inbuflen; /* max of 4 dialects */ 1275 struct TCP_Server_Info *server = tcon->ses->server; 1276 1277 cifs_dbg(FYI, "validate negotiate\n"); 1278 1279 /* In SMB3.11 preauth integrity supersedes validate negotiate */ 1280 if (server->dialect == SMB311_PROT_ID) 1281 return 0; 1282 1283 /* 1284 * validation ioctl must be signed, so no point sending this if we 1285 * can not sign it (ie are not known user). Even if signing is not 1286 * required (enabled but not negotiated), in those cases we selectively 1287 * sign just this, the first and only signed request on a connection. 1288 * Having validation of negotiate info helps reduce attack vectors. 1289 */ 1290 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) 1291 return 0; /* validation requires signing */ 1292 1293 if (tcon->ses->user_name == NULL) { 1294 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); 1295 return 0; /* validation requires signing */ 1296 } 1297 1298 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) 1299 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); 1300 1301 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); 1302 if (!pneg_inbuf) 1303 return -ENOMEM; 1304 1305 pneg_inbuf->Capabilities = 1306 cpu_to_le32(server->vals->req_capabilities); 1307 if (tcon->ses->chan_max > 1) 1308 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1309 1310 memcpy(pneg_inbuf->Guid, server->client_guid, 1311 SMB2_CLIENT_GUID_SIZE); 1312 1313 if (tcon->ses->sign) 1314 pneg_inbuf->SecurityMode = 1315 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1316 else if (global_secflags & CIFSSEC_MAY_SIGN) 1317 pneg_inbuf->SecurityMode = 1318 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1319 else 1320 pneg_inbuf->SecurityMode = 0; 1321 1322 1323 if (strcmp(server->vals->version_string, 1324 SMB3ANY_VERSION_STRING) == 0) { 1325 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1326 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1327 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1328 pneg_inbuf->DialectCount = cpu_to_le16(3); 1329 /* SMB 2.1 not included so subtract one dialect from len */ 1330 inbuflen = sizeof(*pneg_inbuf) - 1331 (sizeof(pneg_inbuf->Dialects[0])); 1332 } else if (strcmp(server->vals->version_string, 1333 SMBDEFAULT_VERSION_STRING) == 0) { 1334 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1335 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1336 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1337 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1338 pneg_inbuf->DialectCount = cpu_to_le16(4); 1339 /* structure is big enough for 4 dialects */ 1340 inbuflen = sizeof(*pneg_inbuf); 1341 } else { 1342 /* otherwise specific dialect was requested */ 1343 pneg_inbuf->Dialects[0] = 1344 cpu_to_le16(server->vals->protocol_id); 1345 pneg_inbuf->DialectCount = cpu_to_le16(1); 1346 /* structure is big enough for 4 dialects, sending only 1 */ 1347 inbuflen = sizeof(*pneg_inbuf) - 1348 sizeof(pneg_inbuf->Dialects[0]) * 3; 1349 } 1350 1351 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1352 FSCTL_VALIDATE_NEGOTIATE_INFO, 1353 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, 1354 (char **)&pneg_rsp, &rsplen); 1355 if (rc == -EOPNOTSUPP) { 1356 /* 1357 * Old Windows versions or Netapp SMB server can return 1358 * not supported error. Client should accept it. 1359 */ 1360 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n"); 1361 rc = 0; 1362 goto out_free_inbuf; 1363 } else if (rc != 0) { 1364 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", 1365 rc); 1366 rc = -EIO; 1367 goto out_free_inbuf; 1368 } 1369 1370 rc = -EIO; 1371 if (rsplen != sizeof(*pneg_rsp)) { 1372 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n", 1373 rsplen); 1374 1375 /* relax check since Mac returns max bufsize allowed on ioctl */ 1376 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) 1377 goto out_free_rsp; 1378 } 1379 1380 /* check validate negotiate info response matches what we got earlier */ 1381 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect)) 1382 goto vneg_out; 1383 1384 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode)) 1385 goto vneg_out; 1386 1387 /* do not validate server guid because not saved at negprot time yet */ 1388 1389 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | 1390 SMB2_LARGE_FILES) != server->capabilities) 1391 goto vneg_out; 1392 1393 /* validate negotiate successful */ 1394 rc = 0; 1395 cifs_dbg(FYI, "validate negotiate info successful\n"); 1396 goto out_free_rsp; 1397 1398 vneg_out: 1399 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 1400 out_free_rsp: 1401 kfree(pneg_rsp); 1402 out_free_inbuf: 1403 kfree(pneg_inbuf); 1404 return rc; 1405 } 1406 1407 enum securityEnum 1408 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) 1409 { 1410 switch (requested) { 1411 case Kerberos: 1412 case RawNTLMSSP: 1413 return requested; 1414 case NTLMv2: 1415 return RawNTLMSSP; 1416 case Unspecified: 1417 if (server->sec_ntlmssp && 1418 (global_secflags & CIFSSEC_MAY_NTLMSSP)) 1419 return RawNTLMSSP; 1420 if ((server->sec_kerberos || server->sec_mskerberos) && 1421 (global_secflags & CIFSSEC_MAY_KRB5)) 1422 return Kerberos; 1423 fallthrough; 1424 default: 1425 return Unspecified; 1426 } 1427 } 1428 1429 struct SMB2_sess_data { 1430 unsigned int xid; 1431 struct cifs_ses *ses; 1432 struct TCP_Server_Info *server; 1433 struct nls_table *nls_cp; 1434 void (*func)(struct SMB2_sess_data *); 1435 int result; 1436 u64 previous_session; 1437 1438 /* we will send the SMB in three pieces: 1439 * a fixed length beginning part, an optional 1440 * SPNEGO blob (which can be zero length), and a 1441 * last part which will include the strings 1442 * and rest of bcc area. This allows us to avoid 1443 * a large buffer 17K allocation 1444 */ 1445 int buf0_type; 1446 struct kvec iov[2]; 1447 }; 1448 1449 static int 1450 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) 1451 { 1452 int rc; 1453 struct cifs_ses *ses = sess_data->ses; 1454 struct TCP_Server_Info *server = sess_data->server; 1455 struct smb2_sess_setup_req *req; 1456 unsigned int total_len; 1457 bool is_binding = false; 1458 1459 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, 1460 (void **) &req, 1461 &total_len); 1462 if (rc) 1463 return rc; 1464 1465 spin_lock(&ses->ses_lock); 1466 is_binding = (ses->ses_status == SES_GOOD); 1467 spin_unlock(&ses->ses_lock); 1468 1469 if (is_binding) { 1470 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1471 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1472 req->PreviousSessionId = 0; 1473 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; 1474 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid); 1475 } else { 1476 /* First session, not a reauthenticate */ 1477 req->hdr.SessionId = 0; 1478 /* 1479 * if reconnect, we need to send previous sess id 1480 * otherwise it is 0 1481 */ 1482 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); 1483 req->Flags = 0; /* MBZ */ 1484 cifs_dbg(FYI, "Fresh session. Previous: %llx\n", 1485 sess_data->previous_session); 1486 } 1487 1488 /* enough to enable echos and oplocks and one max size write */ 1489 if (server->credits >= server->max_credits) 1490 req->hdr.CreditRequest = cpu_to_le16(0); 1491 else 1492 req->hdr.CreditRequest = cpu_to_le16( 1493 min_t(int, server->max_credits - 1494 server->credits, 130)); 1495 1496 /* only one of SMB2 signing flags may be set in SMB2 request */ 1497 if (server->sign) 1498 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; 1499 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ 1500 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; 1501 else 1502 req->SecurityMode = 0; 1503 1504 #ifdef CONFIG_CIFS_DFS_UPCALL 1505 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); 1506 #else 1507 req->Capabilities = 0; 1508 #endif /* DFS_UPCALL */ 1509 1510 req->Channel = 0; /* MBZ */ 1511 1512 sess_data->iov[0].iov_base = (char *)req; 1513 /* 1 for pad */ 1514 sess_data->iov[0].iov_len = total_len - 1; 1515 /* 1516 * This variable will be used to clear the buffer 1517 * allocated above in case of any error in the calling function. 1518 */ 1519 sess_data->buf0_type = CIFS_SMALL_BUFFER; 1520 1521 return 0; 1522 } 1523 1524 static void 1525 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) 1526 { 1527 struct kvec *iov = sess_data->iov; 1528 1529 /* iov[1] is already freed by caller */ 1530 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) 1531 memzero_explicit(iov[0].iov_base, iov[0].iov_len); 1532 1533 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); 1534 sess_data->buf0_type = CIFS_NO_BUFFER; 1535 } 1536 1537 static int 1538 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) 1539 { 1540 int rc; 1541 struct smb_rqst rqst; 1542 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; 1543 struct kvec rsp_iov = { NULL, 0 }; 1544 1545 /* Testing shows that buffer offset must be at location of Buffer[0] */ 1546 req->SecurityBufferOffset = 1547 cpu_to_le16(sizeof(struct smb2_sess_setup_req)); 1548 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); 1549 1550 memset(&rqst, 0, sizeof(struct smb_rqst)); 1551 rqst.rq_iov = sess_data->iov; 1552 rqst.rq_nvec = 2; 1553 1554 /* BB add code to build os and lm fields */ 1555 rc = cifs_send_recv(sess_data->xid, sess_data->ses, 1556 sess_data->server, 1557 &rqst, 1558 &sess_data->buf0_type, 1559 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); 1560 cifs_small_buf_release(sess_data->iov[0].iov_base); 1561 if (rc == 0) 1562 sess_data->ses->expired_pwd = false; 1563 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) 1564 sess_data->ses->expired_pwd = true; 1565 1566 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); 1567 1568 return rc; 1569 } 1570 1571 static int 1572 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) 1573 { 1574 int rc = 0; 1575 struct cifs_ses *ses = sess_data->ses; 1576 struct TCP_Server_Info *server = sess_data->server; 1577 1578 cifs_server_lock(server); 1579 if (server->ops->generate_signingkey) { 1580 rc = server->ops->generate_signingkey(ses, server); 1581 if (rc) { 1582 cifs_dbg(FYI, 1583 "SMB3 session key generation failed\n"); 1584 cifs_server_unlock(server); 1585 return rc; 1586 } 1587 } 1588 if (!server->session_estab) { 1589 server->sequence_number = 0x2; 1590 server->session_estab = true; 1591 } 1592 cifs_server_unlock(server); 1593 1594 cifs_dbg(FYI, "SMB2/3 session established successfully\n"); 1595 return rc; 1596 } 1597 1598 #ifdef CONFIG_CIFS_UPCALL 1599 static void 1600 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1601 { 1602 int rc; 1603 struct cifs_ses *ses = sess_data->ses; 1604 struct TCP_Server_Info *server = sess_data->server; 1605 struct cifs_spnego_msg *msg; 1606 struct key *spnego_key = NULL; 1607 struct smb2_sess_setup_rsp *rsp = NULL; 1608 bool is_binding = false; 1609 1610 rc = SMB2_sess_alloc_buffer(sess_data); 1611 if (rc) 1612 goto out; 1613 1614 spnego_key = cifs_get_spnego_key(ses, server); 1615 if (IS_ERR(spnego_key)) { 1616 rc = PTR_ERR(spnego_key); 1617 if (rc == -ENOKEY) 1618 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); 1619 spnego_key = NULL; 1620 goto out; 1621 } 1622 1623 msg = spnego_key->payload.data[0]; 1624 /* 1625 * check version field to make sure that cifs.upcall is 1626 * sending us a response in an expected form 1627 */ 1628 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { 1629 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n", 1630 CIFS_SPNEGO_UPCALL_VERSION, msg->version); 1631 rc = -EKEYREJECTED; 1632 goto out_put_spnego_key; 1633 } 1634 1635 spin_lock(&ses->ses_lock); 1636 is_binding = (ses->ses_status == SES_GOOD); 1637 spin_unlock(&ses->ses_lock); 1638 1639 /* keep session key if binding */ 1640 if (!is_binding) { 1641 kfree_sensitive(ses->auth_key.response); 1642 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1643 GFP_KERNEL); 1644 if (!ses->auth_key.response) { 1645 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", 1646 msg->sesskey_len); 1647 rc = -ENOMEM; 1648 goto out_put_spnego_key; 1649 } 1650 ses->auth_key.len = msg->sesskey_len; 1651 } 1652 1653 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; 1654 sess_data->iov[1].iov_len = msg->secblob_len; 1655 1656 rc = SMB2_sess_sendreceive(sess_data); 1657 if (rc) 1658 goto out_put_spnego_key; 1659 1660 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1661 /* keep session id and flags if binding */ 1662 if (!is_binding) { 1663 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1664 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1665 } 1666 1667 rc = SMB2_sess_establish_session(sess_data); 1668 out_put_spnego_key: 1669 key_invalidate(spnego_key); 1670 key_put(spnego_key); 1671 if (rc) { 1672 kfree_sensitive(ses->auth_key.response); 1673 ses->auth_key.response = NULL; 1674 ses->auth_key.len = 0; 1675 } 1676 out: 1677 sess_data->result = rc; 1678 sess_data->func = NULL; 1679 SMB2_sess_free_buffer(sess_data); 1680 } 1681 #else 1682 static void 1683 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1684 { 1685 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); 1686 sess_data->result = -EOPNOTSUPP; 1687 sess_data->func = NULL; 1688 } 1689 #endif 1690 1691 static void 1692 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); 1693 1694 static void 1695 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) 1696 { 1697 int rc; 1698 struct cifs_ses *ses = sess_data->ses; 1699 struct TCP_Server_Info *server = sess_data->server; 1700 struct smb2_sess_setup_rsp *rsp = NULL; 1701 unsigned char *ntlmssp_blob = NULL; 1702 bool use_spnego = false; /* else use raw ntlmssp */ 1703 u16 blob_length = 0; 1704 bool is_binding = false; 1705 1706 /* 1707 * If memory allocation is successful, caller of this function 1708 * frees it. 1709 */ 1710 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); 1711 if (!ses->ntlmssp) { 1712 rc = -ENOMEM; 1713 goto out_err; 1714 } 1715 ses->ntlmssp->sesskey_per_smbsess = true; 1716 1717 rc = SMB2_sess_alloc_buffer(sess_data); 1718 if (rc) 1719 goto out_err; 1720 1721 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob, 1722 &blob_length, ses, server, 1723 sess_data->nls_cp); 1724 if (rc) 1725 goto out; 1726 1727 if (use_spnego) { 1728 /* BB eventually need to add this */ 1729 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1730 rc = -EOPNOTSUPP; 1731 goto out; 1732 } 1733 sess_data->iov[1].iov_base = ntlmssp_blob; 1734 sess_data->iov[1].iov_len = blob_length; 1735 1736 rc = SMB2_sess_sendreceive(sess_data); 1737 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1738 1739 /* If true, rc here is expected and not an error */ 1740 if (sess_data->buf0_type != CIFS_NO_BUFFER && 1741 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) 1742 rc = 0; 1743 1744 if (rc) 1745 goto out; 1746 1747 if (offsetof(struct smb2_sess_setup_rsp, Buffer) != 1748 le16_to_cpu(rsp->SecurityBufferOffset)) { 1749 cifs_dbg(VFS, "Invalid security buffer offset %d\n", 1750 le16_to_cpu(rsp->SecurityBufferOffset)); 1751 rc = -EIO; 1752 goto out; 1753 } 1754 rc = decode_ntlmssp_challenge(rsp->Buffer, 1755 le16_to_cpu(rsp->SecurityBufferLength), ses); 1756 if (rc) 1757 goto out; 1758 1759 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); 1760 1761 spin_lock(&ses->ses_lock); 1762 is_binding = (ses->ses_status == SES_GOOD); 1763 spin_unlock(&ses->ses_lock); 1764 1765 /* keep existing ses id and flags if binding */ 1766 if (!is_binding) { 1767 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1768 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1769 } 1770 1771 out: 1772 kfree_sensitive(ntlmssp_blob); 1773 SMB2_sess_free_buffer(sess_data); 1774 if (!rc) { 1775 sess_data->result = 0; 1776 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; 1777 return; 1778 } 1779 out_err: 1780 kfree_sensitive(ses->ntlmssp); 1781 ses->ntlmssp = NULL; 1782 sess_data->result = rc; 1783 sess_data->func = NULL; 1784 } 1785 1786 static void 1787 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) 1788 { 1789 int rc; 1790 struct cifs_ses *ses = sess_data->ses; 1791 struct TCP_Server_Info *server = sess_data->server; 1792 struct smb2_sess_setup_req *req; 1793 struct smb2_sess_setup_rsp *rsp = NULL; 1794 unsigned char *ntlmssp_blob = NULL; 1795 bool use_spnego = false; /* else use raw ntlmssp */ 1796 u16 blob_length = 0; 1797 bool is_binding = false; 1798 1799 rc = SMB2_sess_alloc_buffer(sess_data); 1800 if (rc) 1801 goto out; 1802 1803 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; 1804 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1805 1806 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, 1807 ses, server, 1808 sess_data->nls_cp); 1809 if (rc) { 1810 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); 1811 goto out; 1812 } 1813 1814 if (use_spnego) { 1815 /* BB eventually need to add this */ 1816 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1817 rc = -EOPNOTSUPP; 1818 goto out; 1819 } 1820 sess_data->iov[1].iov_base = ntlmssp_blob; 1821 sess_data->iov[1].iov_len = blob_length; 1822 1823 rc = SMB2_sess_sendreceive(sess_data); 1824 if (rc) 1825 goto out; 1826 1827 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1828 1829 spin_lock(&ses->ses_lock); 1830 is_binding = (ses->ses_status == SES_GOOD); 1831 spin_unlock(&ses->ses_lock); 1832 1833 /* keep existing ses id and flags if binding */ 1834 if (!is_binding) { 1835 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1836 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1837 } 1838 1839 rc = SMB2_sess_establish_session(sess_data); 1840 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS 1841 if (ses->server->dialect < SMB30_PROT_ID) { 1842 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__); 1843 /* 1844 * The session id is opaque in terms of endianness, so we can't 1845 * print it as a long long. we dump it as we got it on the wire 1846 */ 1847 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), 1848 &ses->Suid); 1849 cifs_dbg(VFS, "Session Key %*ph\n", 1850 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); 1851 cifs_dbg(VFS, "Signing Key %*ph\n", 1852 SMB3_SIGN_KEY_SIZE, ses->auth_key.response); 1853 } 1854 #endif 1855 out: 1856 kfree_sensitive(ntlmssp_blob); 1857 SMB2_sess_free_buffer(sess_data); 1858 kfree_sensitive(ses->ntlmssp); 1859 ses->ntlmssp = NULL; 1860 sess_data->result = rc; 1861 sess_data->func = NULL; 1862 } 1863 1864 static int 1865 SMB2_select_sec(struct SMB2_sess_data *sess_data) 1866 { 1867 int type; 1868 struct cifs_ses *ses = sess_data->ses; 1869 struct TCP_Server_Info *server = sess_data->server; 1870 1871 type = smb2_select_sectype(server, ses->sectype); 1872 cifs_dbg(FYI, "sess setup type %d\n", type); 1873 if (type == Unspecified) { 1874 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); 1875 return -EINVAL; 1876 } 1877 1878 switch (type) { 1879 case Kerberos: 1880 sess_data->func = SMB2_auth_kerberos; 1881 break; 1882 case RawNTLMSSP: 1883 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; 1884 break; 1885 default: 1886 cifs_dbg(VFS, "secType %d not supported!\n", type); 1887 return -EOPNOTSUPP; 1888 } 1889 1890 return 0; 1891 } 1892 1893 int 1894 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 1895 struct TCP_Server_Info *server, 1896 const struct nls_table *nls_cp) 1897 { 1898 int rc = 0; 1899 struct SMB2_sess_data *sess_data; 1900 1901 cifs_dbg(FYI, "Session Setup\n"); 1902 1903 if (!server) { 1904 WARN(1, "%s: server is NULL!\n", __func__); 1905 return -EIO; 1906 } 1907 1908 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); 1909 if (!sess_data) 1910 return -ENOMEM; 1911 1912 sess_data->xid = xid; 1913 sess_data->ses = ses; 1914 sess_data->server = server; 1915 sess_data->buf0_type = CIFS_NO_BUFFER; 1916 sess_data->nls_cp = (struct nls_table *) nls_cp; 1917 sess_data->previous_session = ses->Suid; 1918 1919 rc = SMB2_select_sec(sess_data); 1920 if (rc) 1921 goto out; 1922 1923 /* 1924 * Initialize the session hash with the server one. 1925 */ 1926 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash, 1927 SMB2_PREAUTH_HASH_SIZE); 1928 1929 while (sess_data->func) 1930 sess_data->func(sess_data); 1931 1932 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) 1933 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n"); 1934 rc = sess_data->result; 1935 out: 1936 kfree_sensitive(sess_data); 1937 return rc; 1938 } 1939 1940 int 1941 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) 1942 { 1943 struct smb_rqst rqst; 1944 struct smb2_logoff_req *req; /* response is also trivial struct */ 1945 int rc = 0; 1946 struct TCP_Server_Info *server; 1947 int flags = 0; 1948 unsigned int total_len; 1949 struct kvec iov[1]; 1950 struct kvec rsp_iov; 1951 int resp_buf_type; 1952 1953 cifs_dbg(FYI, "disconnect session %p\n", ses); 1954 1955 if (ses && (ses->server)) 1956 server = ses->server; 1957 else 1958 return -EIO; 1959 1960 /* no need to send SMB logoff if uid already closed due to reconnect */ 1961 spin_lock(&ses->chan_lock); 1962 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 1963 spin_unlock(&ses->chan_lock); 1964 goto smb2_session_already_dead; 1965 } 1966 spin_unlock(&ses->chan_lock); 1967 1968 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, 1969 (void **) &req, &total_len); 1970 if (rc) 1971 return rc; 1972 1973 /* since no tcon, smb2_init can not do this, so do here */ 1974 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1975 1976 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) 1977 flags |= CIFS_TRANSFORM_REQ; 1978 else if (server->sign) 1979 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1980 1981 flags |= CIFS_NO_RSP_BUF; 1982 1983 iov[0].iov_base = (char *)req; 1984 iov[0].iov_len = total_len; 1985 1986 memset(&rqst, 0, sizeof(struct smb_rqst)); 1987 rqst.rq_iov = iov; 1988 rqst.rq_nvec = 1; 1989 1990 rc = cifs_send_recv(xid, ses, ses->server, 1991 &rqst, &resp_buf_type, flags, &rsp_iov); 1992 cifs_small_buf_release(req); 1993 /* 1994 * No tcon so can't do 1995 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1996 */ 1997 1998 smb2_session_already_dead: 1999 return rc; 2000 } 2001 2002 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) 2003 { 2004 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); 2005 } 2006 2007 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) 2008 2009 /* These are similar values to what Windows uses */ 2010 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) 2011 { 2012 tcon->max_chunks = 256; 2013 tcon->max_bytes_chunk = 1048576; 2014 tcon->max_bytes_copy = 16777216; 2015 } 2016 2017 int 2018 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, 2019 struct cifs_tcon *tcon, const struct nls_table *cp) 2020 { 2021 struct smb_rqst rqst; 2022 struct smb2_tree_connect_req *req; 2023 struct smb2_tree_connect_rsp *rsp = NULL; 2024 struct kvec iov[2]; 2025 struct kvec rsp_iov = { NULL, 0 }; 2026 int rc = 0; 2027 int resp_buftype; 2028 int unc_path_len; 2029 __le16 *unc_path = NULL; 2030 int flags = 0; 2031 unsigned int total_len; 2032 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2033 2034 cifs_dbg(FYI, "TCON\n"); 2035 2036 if (!server || !tree) 2037 return -EIO; 2038 2039 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 2040 if (unc_path == NULL) 2041 return -ENOMEM; 2042 2043 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp); 2044 if (unc_path_len <= 0) { 2045 kfree(unc_path); 2046 return -EINVAL; 2047 } 2048 unc_path_len *= 2; 2049 2050 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ 2051 tcon->tid = 0; 2052 atomic_set(&tcon->num_remote_opens, 0); 2053 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server, 2054 (void **) &req, &total_len); 2055 if (rc) { 2056 kfree(unc_path); 2057 return rc; 2058 } 2059 2060 if (smb3_encryption_required(tcon)) 2061 flags |= CIFS_TRANSFORM_REQ; 2062 2063 iov[0].iov_base = (char *)req; 2064 /* 1 for pad */ 2065 iov[0].iov_len = total_len - 1; 2066 2067 /* Testing shows that buffer offset must be at location of Buffer[0] */ 2068 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)); 2069 req->PathLength = cpu_to_le16(unc_path_len); 2070 iov[1].iov_base = unc_path; 2071 iov[1].iov_len = unc_path_len; 2072 2073 /* 2074 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 2075 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 2076 * (Samba servers don't always set the flag so also check if null user) 2077 */ 2078 if ((server->dialect == SMB311_PROT_ID) && 2079 !smb3_encryption_required(tcon) && 2080 !(ses->session_flags & 2081 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && 2082 ((ses->user_name != NULL) || (ses->sectype == Kerberos))) 2083 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 2084 2085 memset(&rqst, 0, sizeof(struct smb_rqst)); 2086 rqst.rq_iov = iov; 2087 rqst.rq_nvec = 2; 2088 2089 /* Need 64 for max size write so ask for more in case not there yet */ 2090 if (server->credits >= server->max_credits) 2091 req->hdr.CreditRequest = cpu_to_le16(0); 2092 else 2093 req->hdr.CreditRequest = cpu_to_le16( 2094 min_t(int, server->max_credits - 2095 server->credits, 64)); 2096 2097 rc = cifs_send_recv(xid, ses, server, 2098 &rqst, &resp_buftype, flags, &rsp_iov); 2099 cifs_small_buf_release(req); 2100 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; 2101 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); 2102 if ((rc != 0) || (rsp == NULL)) { 2103 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); 2104 tcon->need_reconnect = true; 2105 goto tcon_error_exit; 2106 } 2107 2108 switch (rsp->ShareType) { 2109 case SMB2_SHARE_TYPE_DISK: 2110 cifs_dbg(FYI, "connection to disk share\n"); 2111 break; 2112 case SMB2_SHARE_TYPE_PIPE: 2113 tcon->pipe = true; 2114 cifs_dbg(FYI, "connection to pipe share\n"); 2115 break; 2116 case SMB2_SHARE_TYPE_PRINT: 2117 tcon->print = true; 2118 cifs_dbg(FYI, "connection to printer\n"); 2119 break; 2120 default: 2121 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 2122 rc = -EOPNOTSUPP; 2123 goto tcon_error_exit; 2124 } 2125 2126 tcon->share_flags = le32_to_cpu(rsp->ShareFlags); 2127 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ 2128 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); 2129 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); 2130 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 2131 2132 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && 2133 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 2134 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n"); 2135 2136 if (tcon->seal && 2137 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 2138 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n"); 2139 2140 init_copy_chunk_defaults(tcon); 2141 if (server->ops->validate_negotiate) 2142 rc = server->ops->validate_negotiate(xid, tcon); 2143 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */ 2144 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT) 2145 server->nosharesock = true; 2146 tcon_exit: 2147 2148 free_rsp_buf(resp_buftype, rsp); 2149 kfree(unc_path); 2150 return rc; 2151 2152 tcon_error_exit: 2153 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) 2154 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 2155 goto tcon_exit; 2156 } 2157 2158 int 2159 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) 2160 { 2161 struct smb_rqst rqst; 2162 struct smb2_tree_disconnect_req *req; /* response is trivial */ 2163 int rc = 0; 2164 struct cifs_ses *ses = tcon->ses; 2165 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2166 int flags = 0; 2167 unsigned int total_len; 2168 struct kvec iov[1]; 2169 struct kvec rsp_iov; 2170 int resp_buf_type; 2171 2172 cifs_dbg(FYI, "Tree Disconnect\n"); 2173 2174 if (!ses || !(ses->server)) 2175 return -EIO; 2176 2177 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name); 2178 spin_lock(&ses->chan_lock); 2179 if ((tcon->need_reconnect) || 2180 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) { 2181 spin_unlock(&ses->chan_lock); 2182 return 0; 2183 } 2184 spin_unlock(&ses->chan_lock); 2185 2186 invalidate_all_cached_dirs(tcon); 2187 2188 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server, 2189 (void **) &req, 2190 &total_len); 2191 if (rc) 2192 return rc; 2193 2194 if (smb3_encryption_required(tcon)) 2195 flags |= CIFS_TRANSFORM_REQ; 2196 2197 flags |= CIFS_NO_RSP_BUF; 2198 2199 iov[0].iov_base = (char *)req; 2200 iov[0].iov_len = total_len; 2201 2202 memset(&rqst, 0, sizeof(struct smb_rqst)); 2203 rqst.rq_iov = iov; 2204 rqst.rq_nvec = 1; 2205 2206 rc = cifs_send_recv(xid, ses, server, 2207 &rqst, &resp_buf_type, flags, &rsp_iov); 2208 cifs_small_buf_release(req); 2209 if (rc) { 2210 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); 2211 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc); 2212 } 2213 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid); 2214 2215 return rc; 2216 } 2217 2218 2219 static struct create_durable * 2220 create_durable_buf(void) 2221 { 2222 struct create_durable *buf; 2223 2224 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2225 if (!buf) 2226 return NULL; 2227 2228 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2229 (struct create_durable, Data)); 2230 buf->ccontext.DataLength = cpu_to_le32(16); 2231 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2232 (struct create_durable, Name)); 2233 buf->ccontext.NameLength = cpu_to_le16(4); 2234 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ 2235 buf->Name[0] = 'D'; 2236 buf->Name[1] = 'H'; 2237 buf->Name[2] = 'n'; 2238 buf->Name[3] = 'Q'; 2239 return buf; 2240 } 2241 2242 static struct create_durable * 2243 create_reconnect_durable_buf(struct cifs_fid *fid) 2244 { 2245 struct create_durable *buf; 2246 2247 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2248 if (!buf) 2249 return NULL; 2250 2251 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2252 (struct create_durable, Data)); 2253 buf->ccontext.DataLength = cpu_to_le32(16); 2254 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2255 (struct create_durable, Name)); 2256 buf->ccontext.NameLength = cpu_to_le16(4); 2257 buf->Data.Fid.PersistentFileId = fid->persistent_fid; 2258 buf->Data.Fid.VolatileFileId = fid->volatile_fid; 2259 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ 2260 buf->Name[0] = 'D'; 2261 buf->Name[1] = 'H'; 2262 buf->Name[2] = 'n'; 2263 buf->Name[3] = 'C'; 2264 return buf; 2265 } 2266 2267 static void 2268 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf) 2269 { 2270 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc; 2271 2272 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n", 2273 pdisk_id->DiskFileId, pdisk_id->VolumeId); 2274 buf->IndexNumber = pdisk_id->DiskFileId; 2275 } 2276 2277 static void 2278 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, 2279 struct create_posix_rsp *posix) 2280 { 2281 int sid_len; 2282 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset); 2283 u8 *end = beg + le32_to_cpu(cc->DataLength); 2284 u8 *sid; 2285 2286 memset(posix, 0, sizeof(*posix)); 2287 2288 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0)); 2289 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4)); 2290 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8)); 2291 2292 sid = beg + 12; 2293 sid_len = posix_info_sid_size(sid, end); 2294 if (sid_len < 0) { 2295 cifs_dbg(VFS, "bad owner sid in posix create response\n"); 2296 return; 2297 } 2298 memcpy(&posix->owner, sid, sid_len); 2299 2300 sid = sid + sid_len; 2301 sid_len = posix_info_sid_size(sid, end); 2302 if (sid_len < 0) { 2303 cifs_dbg(VFS, "bad group sid in posix create response\n"); 2304 return; 2305 } 2306 memcpy(&posix->group, sid, sid_len); 2307 2308 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n", 2309 posix->nlink, posix->mode, posix->reparse_tag); 2310 } 2311 2312 int smb2_parse_contexts(struct TCP_Server_Info *server, 2313 struct kvec *rsp_iov, 2314 unsigned int *epoch, 2315 char *lease_key, __u8 *oplock, 2316 struct smb2_file_all_info *buf, 2317 struct create_posix_rsp *posix) 2318 { 2319 struct smb2_create_rsp *rsp = rsp_iov->iov_base; 2320 struct create_context *cc; 2321 size_t rem, off, len; 2322 size_t doff, dlen; 2323 size_t noff, nlen; 2324 char *name; 2325 static const char smb3_create_tag_posix[] = { 2326 0x93, 0xAD, 0x25, 0x50, 0x9C, 2327 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83, 2328 0xDE, 0x96, 0x8B, 0xCD, 0x7C 2329 }; 2330 2331 *oplock = 0; 2332 2333 off = le32_to_cpu(rsp->CreateContextsOffset); 2334 rem = le32_to_cpu(rsp->CreateContextsLength); 2335 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) 2336 return -EINVAL; 2337 cc = (struct create_context *)((u8 *)rsp + off); 2338 2339 /* Initialize inode number to 0 in case no valid data in qfid context */ 2340 if (buf) 2341 buf->IndexNumber = 0; 2342 2343 while (rem >= sizeof(*cc)) { 2344 doff = le16_to_cpu(cc->DataOffset); 2345 dlen = le32_to_cpu(cc->DataLength); 2346 if (check_add_overflow(doff, dlen, &len) || len > rem) 2347 return -EINVAL; 2348 2349 noff = le16_to_cpu(cc->NameOffset); 2350 nlen = le16_to_cpu(cc->NameLength); 2351 if (noff + nlen > doff) 2352 return -EINVAL; 2353 2354 name = (char *)cc + noff; 2355 switch (nlen) { 2356 case 4: 2357 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { 2358 *oplock = server->ops->parse_lease_buf(cc, epoch, 2359 lease_key); 2360 } else if (buf && 2361 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { 2362 parse_query_id_ctxt(cc, buf); 2363 } 2364 break; 2365 case 16: 2366 if (posix && !memcmp(name, smb3_create_tag_posix, 16)) 2367 parse_posix_ctxt(cc, buf, posix); 2368 break; 2369 default: 2370 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", 2371 __func__, nlen, dlen); 2372 if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) 2373 cifs_dump_mem("context data: ", cc, dlen); 2374 break; 2375 } 2376 2377 off = le32_to_cpu(cc->Next); 2378 if (!off) 2379 break; 2380 if (check_sub_overflow(rem, off, &rem)) 2381 return -EINVAL; 2382 cc = (struct create_context *)((u8 *)cc + off); 2383 } 2384 2385 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 2386 *oplock = rsp->OplockLevel; 2387 2388 return 0; 2389 } 2390 2391 static int 2392 add_lease_context(struct TCP_Server_Info *server, 2393 struct smb2_create_req *req, 2394 struct kvec *iov, 2395 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) 2396 { 2397 unsigned int num = *num_iovec; 2398 2399 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); 2400 if (iov[num].iov_base == NULL) 2401 return -ENOMEM; 2402 iov[num].iov_len = server->vals->create_lease_size; 2403 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; 2404 *num_iovec = num + 1; 2405 return 0; 2406 } 2407 2408 static struct create_durable_v2 * 2409 create_durable_v2_buf(struct cifs_open_parms *oparms) 2410 { 2411 struct cifs_fid *pfid = oparms->fid; 2412 struct create_durable_v2 *buf; 2413 2414 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); 2415 if (!buf) 2416 return NULL; 2417 2418 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2419 (struct create_durable_v2, dcontext)); 2420 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); 2421 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2422 (struct create_durable_v2, Name)); 2423 buf->ccontext.NameLength = cpu_to_le16(4); 2424 2425 /* 2426 * NB: Handle timeout defaults to 0, which allows server to choose 2427 * (most servers default to 120 seconds) and most clients default to 0. 2428 * This can be overridden at mount ("handletimeout=") if the user wants 2429 * a different persistent (or resilient) handle timeout for all opens 2430 * on a particular SMB3 mount. 2431 */ 2432 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); 2433 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2434 2435 /* for replay, we should not overwrite the existing create guid */ 2436 if (!oparms->replay) { 2437 generate_random_uuid(buf->dcontext.CreateGuid); 2438 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 2439 } else 2440 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16); 2441 2442 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ 2443 buf->Name[0] = 'D'; 2444 buf->Name[1] = 'H'; 2445 buf->Name[2] = '2'; 2446 buf->Name[3] = 'Q'; 2447 return buf; 2448 } 2449 2450 static struct create_durable_handle_reconnect_v2 * 2451 create_reconnect_durable_v2_buf(struct cifs_fid *fid) 2452 { 2453 struct create_durable_handle_reconnect_v2 *buf; 2454 2455 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), 2456 GFP_KERNEL); 2457 if (!buf) 2458 return NULL; 2459 2460 buf->ccontext.DataOffset = 2461 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2462 dcontext)); 2463 buf->ccontext.DataLength = 2464 cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); 2465 buf->ccontext.NameOffset = 2466 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2467 Name)); 2468 buf->ccontext.NameLength = cpu_to_le16(4); 2469 2470 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; 2471 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; 2472 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2473 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); 2474 2475 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ 2476 buf->Name[0] = 'D'; 2477 buf->Name[1] = 'H'; 2478 buf->Name[2] = '2'; 2479 buf->Name[3] = 'C'; 2480 return buf; 2481 } 2482 2483 static int 2484 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, 2485 struct cifs_open_parms *oparms) 2486 { 2487 unsigned int num = *num_iovec; 2488 2489 iov[num].iov_base = create_durable_v2_buf(oparms); 2490 if (iov[num].iov_base == NULL) 2491 return -ENOMEM; 2492 iov[num].iov_len = sizeof(struct create_durable_v2); 2493 *num_iovec = num + 1; 2494 return 0; 2495 } 2496 2497 static int 2498 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, 2499 struct cifs_open_parms *oparms) 2500 { 2501 unsigned int num = *num_iovec; 2502 2503 /* indicate that we don't need to relock the file */ 2504 oparms->reconnect = false; 2505 2506 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); 2507 if (iov[num].iov_base == NULL) 2508 return -ENOMEM; 2509 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); 2510 *num_iovec = num + 1; 2511 return 0; 2512 } 2513 2514 static int 2515 add_durable_context(struct kvec *iov, unsigned int *num_iovec, 2516 struct cifs_open_parms *oparms, bool use_persistent) 2517 { 2518 unsigned int num = *num_iovec; 2519 2520 if (use_persistent) { 2521 if (oparms->reconnect) 2522 return add_durable_reconnect_v2_context(iov, num_iovec, 2523 oparms); 2524 else 2525 return add_durable_v2_context(iov, num_iovec, oparms); 2526 } 2527 2528 if (oparms->reconnect) { 2529 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); 2530 /* indicate that we don't need to relock the file */ 2531 oparms->reconnect = false; 2532 } else 2533 iov[num].iov_base = create_durable_buf(); 2534 if (iov[num].iov_base == NULL) 2535 return -ENOMEM; 2536 iov[num].iov_len = sizeof(struct create_durable); 2537 *num_iovec = num + 1; 2538 return 0; 2539 } 2540 2541 /* See MS-SMB2 2.2.13.2.7 */ 2542 static struct crt_twarp_ctxt * 2543 create_twarp_buf(__u64 timewarp) 2544 { 2545 struct crt_twarp_ctxt *buf; 2546 2547 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL); 2548 if (!buf) 2549 return NULL; 2550 2551 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2552 (struct crt_twarp_ctxt, Timestamp)); 2553 buf->ccontext.DataLength = cpu_to_le32(8); 2554 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2555 (struct crt_twarp_ctxt, Name)); 2556 buf->ccontext.NameLength = cpu_to_le16(4); 2557 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ 2558 buf->Name[0] = 'T'; 2559 buf->Name[1] = 'W'; 2560 buf->Name[2] = 'r'; 2561 buf->Name[3] = 'p'; 2562 buf->Timestamp = cpu_to_le64(timewarp); 2563 return buf; 2564 } 2565 2566 /* See MS-SMB2 2.2.13.2.7 */ 2567 static int 2568 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) 2569 { 2570 unsigned int num = *num_iovec; 2571 2572 iov[num].iov_base = create_twarp_buf(timewarp); 2573 if (iov[num].iov_base == NULL) 2574 return -ENOMEM; 2575 iov[num].iov_len = sizeof(struct crt_twarp_ctxt); 2576 *num_iovec = num + 1; 2577 return 0; 2578 } 2579 2580 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ 2581 static void setup_owner_group_sids(char *buf) 2582 { 2583 struct owner_group_sids *sids = (struct owner_group_sids *)buf; 2584 2585 /* Populate the user ownership fields S-1-5-88-1 */ 2586 sids->owner.Revision = 1; 2587 sids->owner.NumAuth = 3; 2588 sids->owner.Authority[5] = 5; 2589 sids->owner.SubAuthorities[0] = cpu_to_le32(88); 2590 sids->owner.SubAuthorities[1] = cpu_to_le32(1); 2591 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val); 2592 2593 /* Populate the group ownership fields S-1-5-88-2 */ 2594 sids->group.Revision = 1; 2595 sids->group.NumAuth = 3; 2596 sids->group.Authority[5] = 5; 2597 sids->group.SubAuthorities[0] = cpu_to_le32(88); 2598 sids->group.SubAuthorities[1] = cpu_to_le32(2); 2599 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val); 2600 2601 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val); 2602 } 2603 2604 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */ 2605 static struct crt_sd_ctxt * 2606 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) 2607 { 2608 struct crt_sd_ctxt *buf; 2609 __u8 *ptr, *aclptr; 2610 unsigned int acelen, acl_size, ace_count; 2611 unsigned int owner_offset = 0; 2612 unsigned int group_offset = 0; 2613 struct smb3_acl acl = {}; 2614 2615 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); 2616 2617 if (set_owner) { 2618 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ 2619 *len += sizeof(struct owner_group_sids); 2620 } 2621 2622 buf = kzalloc(*len, GFP_KERNEL); 2623 if (buf == NULL) 2624 return buf; 2625 2626 ptr = (__u8 *)&buf[1]; 2627 if (set_owner) { 2628 /* offset fields are from beginning of security descriptor not of create context */ 2629 owner_offset = ptr - (__u8 *)&buf->sd; 2630 buf->sd.OffsetOwner = cpu_to_le32(owner_offset); 2631 group_offset = owner_offset + offsetof(struct owner_group_sids, group); 2632 buf->sd.OffsetGroup = cpu_to_le32(group_offset); 2633 2634 setup_owner_group_sids(ptr); 2635 ptr += sizeof(struct owner_group_sids); 2636 } else { 2637 buf->sd.OffsetOwner = 0; 2638 buf->sd.OffsetGroup = 0; 2639 } 2640 2641 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); 2642 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); 2643 buf->ccontext.NameLength = cpu_to_le16(4); 2644 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ 2645 buf->Name[0] = 'S'; 2646 buf->Name[1] = 'e'; 2647 buf->Name[2] = 'c'; 2648 buf->Name[3] = 'D'; 2649 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ 2650 2651 /* 2652 * ACL is "self relative" ie ACL is stored in contiguous block of memory 2653 * and "DP" ie the DACL is present 2654 */ 2655 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); 2656 2657 /* offset owner, group and Sbz1 and SACL are all zero */ 2658 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2659 /* Ship the ACL for now. we will copy it into buf later. */ 2660 aclptr = ptr; 2661 ptr += sizeof(struct smb3_acl); 2662 2663 /* create one ACE to hold the mode embedded in reserved special SID */ 2664 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); 2665 ptr += acelen; 2666 acl_size = acelen + sizeof(struct smb3_acl); 2667 ace_count = 1; 2668 2669 if (set_owner) { 2670 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ 2671 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr); 2672 ptr += acelen; 2673 acl_size += acelen; 2674 ace_count += 1; 2675 } 2676 2677 /* and one more ACE to allow access for authenticated users */ 2678 acelen = setup_authusers_ACE((struct cifs_ace *)ptr); 2679 ptr += acelen; 2680 acl_size += acelen; 2681 ace_count += 1; 2682 2683 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2684 acl.AclSize = cpu_to_le16(acl_size); 2685 acl.AceCount = cpu_to_le16(ace_count); 2686 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ 2687 memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2688 2689 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2690 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); 2691 2692 return buf; 2693 } 2694 2695 static int 2696 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner) 2697 { 2698 unsigned int num = *num_iovec; 2699 unsigned int len = 0; 2700 2701 iov[num].iov_base = create_sd_buf(mode, set_owner, &len); 2702 if (iov[num].iov_base == NULL) 2703 return -ENOMEM; 2704 iov[num].iov_len = len; 2705 *num_iovec = num + 1; 2706 return 0; 2707 } 2708 2709 static struct crt_query_id_ctxt * 2710 create_query_id_buf(void) 2711 { 2712 struct crt_query_id_ctxt *buf; 2713 2714 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL); 2715 if (!buf) 2716 return NULL; 2717 2718 buf->ccontext.DataOffset = cpu_to_le16(0); 2719 buf->ccontext.DataLength = cpu_to_le32(0); 2720 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2721 (struct crt_query_id_ctxt, Name)); 2722 buf->ccontext.NameLength = cpu_to_le16(4); 2723 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */ 2724 buf->Name[0] = 'Q'; 2725 buf->Name[1] = 'F'; 2726 buf->Name[2] = 'i'; 2727 buf->Name[3] = 'd'; 2728 return buf; 2729 } 2730 2731 /* See MS-SMB2 2.2.13.2.9 */ 2732 static int 2733 add_query_id_context(struct kvec *iov, unsigned int *num_iovec) 2734 { 2735 unsigned int num = *num_iovec; 2736 2737 iov[num].iov_base = create_query_id_buf(); 2738 if (iov[num].iov_base == NULL) 2739 return -ENOMEM; 2740 iov[num].iov_len = sizeof(struct crt_query_id_ctxt); 2741 *num_iovec = num + 1; 2742 return 0; 2743 } 2744 2745 static void add_ea_context(struct cifs_open_parms *oparms, 2746 struct kvec *rq_iov, unsigned int *num_iovs) 2747 { 2748 struct kvec *iov = oparms->ea_cctx; 2749 2750 if (iov && iov->iov_base && iov->iov_len) { 2751 rq_iov[(*num_iovs)++] = *iov; 2752 memset(iov, 0, sizeof(*iov)); 2753 } 2754 } 2755 2756 static int 2757 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, 2758 const char *treename, const __le16 *path) 2759 { 2760 int treename_len, path_len; 2761 struct nls_table *cp; 2762 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; 2763 2764 /* 2765 * skip leading "\\" 2766 */ 2767 treename_len = strlen(treename); 2768 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) 2769 return -EINVAL; 2770 2771 treename += 2; 2772 treename_len -= 2; 2773 2774 path_len = UniStrnlen((wchar_t *)path, PATH_MAX); 2775 2776 /* make room for one path separator only if @path isn't empty */ 2777 *out_len = treename_len + (path[0] ? 1 : 0) + path_len; 2778 2779 /* 2780 * final path needs to be 8-byte aligned as specified in 2781 * MS-SMB2 2.2.13 SMB2 CREATE Request. 2782 */ 2783 *out_size = round_up(*out_len * sizeof(__le16), 8); 2784 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); 2785 if (!*out_path) 2786 return -ENOMEM; 2787 2788 cp = load_nls_default(); 2789 cifs_strtoUTF16(*out_path, treename, treename_len, cp); 2790 2791 /* Do not append the separator if the path is empty */ 2792 if (path[0] != cpu_to_le16(0x0000)) { 2793 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep); 2794 UniStrcat((wchar_t *)*out_path, (wchar_t *)path); 2795 } 2796 2797 unload_nls(cp); 2798 2799 return 0; 2800 } 2801 2802 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, 2803 umode_t mode, struct cifs_tcon *tcon, 2804 const char *full_path, 2805 struct cifs_sb_info *cifs_sb) 2806 { 2807 struct smb_rqst rqst; 2808 struct smb2_create_req *req; 2809 struct smb2_create_rsp *rsp = NULL; 2810 struct cifs_ses *ses = tcon->ses; 2811 struct kvec iov[3]; /* make sure at least one for each open context */ 2812 struct kvec rsp_iov = {NULL, 0}; 2813 int resp_buftype; 2814 int uni_path_len; 2815 __le16 *copy_path = NULL; 2816 int copy_size; 2817 int rc = 0; 2818 unsigned int n_iov = 2; 2819 __u32 file_attributes = 0; 2820 char *pc_buf = NULL; 2821 int flags = 0; 2822 unsigned int total_len; 2823 __le16 *utf16_path = NULL; 2824 struct TCP_Server_Info *server; 2825 int retries = 0, cur_sleep = 1; 2826 2827 replay_again: 2828 /* reinitialize for possible replay */ 2829 flags = 0; 2830 n_iov = 2; 2831 server = cifs_pick_channel(ses); 2832 2833 cifs_dbg(FYI, "mkdir\n"); 2834 2835 /* resource #1: path allocation */ 2836 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 2837 if (!utf16_path) 2838 return -ENOMEM; 2839 2840 if (!ses || !server) { 2841 rc = -EIO; 2842 goto err_free_path; 2843 } 2844 2845 /* resource #2: request */ 2846 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 2847 (void **) &req, &total_len); 2848 if (rc) 2849 goto err_free_path; 2850 2851 2852 if (smb3_encryption_required(tcon)) 2853 flags |= CIFS_TRANSFORM_REQ; 2854 2855 req->ImpersonationLevel = IL_IMPERSONATION; 2856 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); 2857 /* File attributes ignored on open (used in create though) */ 2858 req->FileAttributes = cpu_to_le32(file_attributes); 2859 req->ShareAccess = FILE_SHARE_ALL_LE; 2860 req->CreateDisposition = cpu_to_le32(FILE_CREATE); 2861 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); 2862 2863 iov[0].iov_base = (char *)req; 2864 /* -1 since last byte is buf[0] which is sent below (path) */ 2865 iov[0].iov_len = total_len - 1; 2866 2867 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 2868 2869 /* [MS-SMB2] 2.2.13 NameOffset: 2870 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 2871 * the SMB2 header, the file name includes a prefix that will 2872 * be processed during DFS name normalization as specified in 2873 * section 3.3.5.9. Otherwise, the file name is relative to 2874 * the share that is identified by the TreeId in the SMB2 2875 * header. 2876 */ 2877 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 2878 int name_len; 2879 2880 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 2881 rc = alloc_path_with_tree_prefix(©_path, ©_size, 2882 &name_len, 2883 tcon->tree_name, utf16_path); 2884 if (rc) 2885 goto err_free_req; 2886 2887 req->NameLength = cpu_to_le16(name_len * 2); 2888 uni_path_len = copy_size; 2889 /* free before overwriting resource */ 2890 kfree(utf16_path); 2891 utf16_path = copy_path; 2892 } else { 2893 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; 2894 /* MUST set path len (NameLength) to 0 opening root of share */ 2895 req->NameLength = cpu_to_le16(uni_path_len - 2); 2896 if (uni_path_len % 8 != 0) { 2897 copy_size = roundup(uni_path_len, 8); 2898 copy_path = kzalloc(copy_size, GFP_KERNEL); 2899 if (!copy_path) { 2900 rc = -ENOMEM; 2901 goto err_free_req; 2902 } 2903 memcpy((char *)copy_path, (const char *)utf16_path, 2904 uni_path_len); 2905 uni_path_len = copy_size; 2906 /* free before overwriting resource */ 2907 kfree(utf16_path); 2908 utf16_path = copy_path; 2909 } 2910 } 2911 2912 iov[1].iov_len = uni_path_len; 2913 iov[1].iov_base = utf16_path; 2914 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; 2915 2916 if (tcon->posix_extensions) { 2917 /* resource #3: posix buf */ 2918 rc = add_posix_context(iov, &n_iov, mode); 2919 if (rc) 2920 goto err_free_req; 2921 req->CreateContextsOffset = cpu_to_le32( 2922 sizeof(struct smb2_create_req) + 2923 iov[1].iov_len); 2924 pc_buf = iov[n_iov-1].iov_base; 2925 } 2926 2927 2928 memset(&rqst, 0, sizeof(struct smb_rqst)); 2929 rqst.rq_iov = iov; 2930 rqst.rq_nvec = n_iov; 2931 2932 /* no need to inc num_remote_opens because we close it just below */ 2933 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE, 2934 FILE_WRITE_ATTRIBUTES); 2935 2936 if (retries) 2937 smb2_set_replay(server, &rqst); 2938 2939 /* resource #4: response buffer */ 2940 rc = cifs_send_recv(xid, ses, server, 2941 &rqst, &resp_buftype, flags, &rsp_iov); 2942 if (rc) { 2943 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 2944 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, 2945 CREATE_NOT_FILE, 2946 FILE_WRITE_ATTRIBUTES, rc); 2947 goto err_free_rsp_buf; 2948 } 2949 2950 /* 2951 * Although unlikely to be possible for rsp to be null and rc not set, 2952 * adding check below is slightly safer long term (and quiets Coverity 2953 * warning) 2954 */ 2955 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 2956 if (rsp == NULL) { 2957 rc = -EIO; 2958 kfree(pc_buf); 2959 goto err_free_req; 2960 } 2961 2962 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 2963 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); 2964 2965 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); 2966 2967 /* Eventually save off posix specific response info and timestaps */ 2968 2969 err_free_rsp_buf: 2970 free_rsp_buf(resp_buftype, rsp); 2971 kfree(pc_buf); 2972 err_free_req: 2973 cifs_small_buf_release(req); 2974 err_free_path: 2975 kfree(utf16_path); 2976 2977 if (is_replayable_error(rc) && 2978 smb2_should_replay(tcon, &retries, &cur_sleep)) 2979 goto replay_again; 2980 2981 return rc; 2982 } 2983 2984 int 2985 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 2986 struct smb_rqst *rqst, __u8 *oplock, 2987 struct cifs_open_parms *oparms, __le16 *path) 2988 { 2989 struct smb2_create_req *req; 2990 unsigned int n_iov = 2; 2991 __u32 file_attributes = 0; 2992 int copy_size; 2993 int uni_path_len; 2994 unsigned int total_len; 2995 struct kvec *iov = rqst->rq_iov; 2996 __le16 *copy_path; 2997 int rc; 2998 2999 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 3000 (void **) &req, &total_len); 3001 if (rc) 3002 return rc; 3003 3004 iov[0].iov_base = (char *)req; 3005 /* -1 since last byte is buf[0] which is sent below (path) */ 3006 iov[0].iov_len = total_len - 1; 3007 3008 if (oparms->create_options & CREATE_OPTION_READONLY) 3009 file_attributes |= ATTR_READONLY; 3010 if (oparms->create_options & CREATE_OPTION_SPECIAL) 3011 file_attributes |= ATTR_SYSTEM; 3012 3013 req->ImpersonationLevel = IL_IMPERSONATION; 3014 req->DesiredAccess = cpu_to_le32(oparms->desired_access); 3015 /* File attributes ignored on open (used in create though) */ 3016 req->FileAttributes = cpu_to_le32(file_attributes); 3017 req->ShareAccess = FILE_SHARE_ALL_LE; 3018 3019 req->CreateDisposition = cpu_to_le32(oparms->disposition); 3020 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); 3021 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 3022 3023 /* [MS-SMB2] 2.2.13 NameOffset: 3024 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 3025 * the SMB2 header, the file name includes a prefix that will 3026 * be processed during DFS name normalization as specified in 3027 * section 3.3.5.9. Otherwise, the file name is relative to 3028 * the share that is identified by the TreeId in the SMB2 3029 * header. 3030 */ 3031 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 3032 int name_len; 3033 3034 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 3035 rc = alloc_path_with_tree_prefix(©_path, ©_size, 3036 &name_len, 3037 tcon->tree_name, path); 3038 if (rc) 3039 return rc; 3040 req->NameLength = cpu_to_le16(name_len * 2); 3041 uni_path_len = copy_size; 3042 path = copy_path; 3043 } else { 3044 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; 3045 /* MUST set path len (NameLength) to 0 opening root of share */ 3046 req->NameLength = cpu_to_le16(uni_path_len - 2); 3047 copy_size = round_up(uni_path_len, 8); 3048 copy_path = kzalloc(copy_size, GFP_KERNEL); 3049 if (!copy_path) 3050 return -ENOMEM; 3051 memcpy((char *)copy_path, (const char *)path, 3052 uni_path_len); 3053 uni_path_len = copy_size; 3054 path = copy_path; 3055 } 3056 3057 iov[1].iov_len = uni_path_len; 3058 iov[1].iov_base = path; 3059 3060 if ((!server->oplocks) || (tcon->no_lease)) 3061 *oplock = SMB2_OPLOCK_LEVEL_NONE; 3062 3063 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 3064 *oplock == SMB2_OPLOCK_LEVEL_NONE) 3065 req->RequestedOplockLevel = *oplock; 3066 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && 3067 (oparms->create_options & CREATE_NOT_FILE)) 3068 req->RequestedOplockLevel = *oplock; /* no srv lease support */ 3069 else { 3070 rc = add_lease_context(server, req, iov, &n_iov, 3071 oparms->fid->lease_key, oplock); 3072 if (rc) 3073 return rc; 3074 } 3075 3076 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { 3077 rc = add_durable_context(iov, &n_iov, oparms, 3078 tcon->use_persistent); 3079 if (rc) 3080 return rc; 3081 } 3082 3083 if (tcon->posix_extensions) { 3084 rc = add_posix_context(iov, &n_iov, oparms->mode); 3085 if (rc) 3086 return rc; 3087 } 3088 3089 if (tcon->snapshot_time) { 3090 cifs_dbg(FYI, "adding snapshot context\n"); 3091 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); 3092 if (rc) 3093 return rc; 3094 } 3095 3096 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { 3097 bool set_mode; 3098 bool set_owner; 3099 3100 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) && 3101 (oparms->mode != ACL_NO_MODE)) 3102 set_mode = true; 3103 else { 3104 set_mode = false; 3105 oparms->mode = ACL_NO_MODE; 3106 } 3107 3108 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 3109 set_owner = true; 3110 else 3111 set_owner = false; 3112 3113 if (set_owner | set_mode) { 3114 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); 3115 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner); 3116 if (rc) 3117 return rc; 3118 } 3119 } 3120 3121 add_query_id_context(iov, &n_iov); 3122 add_ea_context(oparms, iov, &n_iov); 3123 3124 if (n_iov > 2) { 3125 /* 3126 * We have create contexts behind iov[1] (the file 3127 * name), point at them from the main create request 3128 */ 3129 req->CreateContextsOffset = cpu_to_le32( 3130 sizeof(struct smb2_create_req) + 3131 iov[1].iov_len); 3132 req->CreateContextsLength = 0; 3133 3134 for (unsigned int i = 2; i < (n_iov-1); i++) { 3135 struct kvec *v = &iov[i]; 3136 size_t len = v->iov_len; 3137 struct create_context *cctx = 3138 (struct create_context *)v->iov_base; 3139 3140 cctx->Next = cpu_to_le32(len); 3141 le32_add_cpu(&req->CreateContextsLength, len); 3142 } 3143 le32_add_cpu(&req->CreateContextsLength, 3144 iov[n_iov-1].iov_len); 3145 } 3146 3147 rqst->rq_nvec = n_iov; 3148 return 0; 3149 } 3150 3151 /* rq_iov[0] is the request and is released by cifs_small_buf_release(). 3152 * All other vectors are freed by kfree(). 3153 */ 3154 void 3155 SMB2_open_free(struct smb_rqst *rqst) 3156 { 3157 int i; 3158 3159 if (rqst && rqst->rq_iov) { 3160 cifs_small_buf_release(rqst->rq_iov[0].iov_base); 3161 for (i = 1; i < rqst->rq_nvec; i++) 3162 if (rqst->rq_iov[i].iov_base != smb2_padding) 3163 kfree(rqst->rq_iov[i].iov_base); 3164 } 3165 } 3166 3167 int 3168 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, 3169 __u8 *oplock, struct smb2_file_all_info *buf, 3170 struct create_posix_rsp *posix, 3171 struct kvec *err_iov, int *buftype) 3172 { 3173 struct smb_rqst rqst; 3174 struct smb2_create_rsp *rsp = NULL; 3175 struct cifs_tcon *tcon = oparms->tcon; 3176 struct cifs_ses *ses = tcon->ses; 3177 struct TCP_Server_Info *server; 3178 struct kvec iov[SMB2_CREATE_IOV_SIZE]; 3179 struct kvec rsp_iov = {NULL, 0}; 3180 int resp_buftype = CIFS_NO_BUFFER; 3181 int rc = 0; 3182 int flags = 0; 3183 int retries = 0, cur_sleep = 1; 3184 3185 replay_again: 3186 /* reinitialize for possible replay */ 3187 flags = 0; 3188 server = cifs_pick_channel(ses); 3189 oparms->replay = !!(retries); 3190 3191 cifs_dbg(FYI, "create/open\n"); 3192 if (!ses || !server) 3193 return -EIO; 3194 3195 if (smb3_encryption_required(tcon)) 3196 flags |= CIFS_TRANSFORM_REQ; 3197 3198 memset(&rqst, 0, sizeof(struct smb_rqst)); 3199 memset(&iov, 0, sizeof(iov)); 3200 rqst.rq_iov = iov; 3201 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; 3202 3203 rc = SMB2_open_init(tcon, server, 3204 &rqst, oplock, oparms, path); 3205 if (rc) 3206 goto creat_exit; 3207 3208 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path, 3209 oparms->create_options, oparms->desired_access); 3210 3211 if (retries) 3212 smb2_set_replay(server, &rqst); 3213 3214 rc = cifs_send_recv(xid, ses, server, 3215 &rqst, &resp_buftype, flags, 3216 &rsp_iov); 3217 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 3218 3219 if (rc != 0) { 3220 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 3221 if (err_iov && rsp) { 3222 *err_iov = rsp_iov; 3223 *buftype = resp_buftype; 3224 resp_buftype = CIFS_NO_BUFFER; 3225 rsp = NULL; 3226 } 3227 trace_smb3_open_err(xid, tcon->tid, ses->Suid, 3228 oparms->create_options, oparms->desired_access, rc); 3229 if (rc == -EREMCHG) { 3230 pr_warn_once("server share %s deleted\n", 3231 tcon->tree_name); 3232 tcon->need_reconnect = true; 3233 } 3234 goto creat_exit; 3235 } else if (rsp == NULL) /* unlikely to happen, but safer to check */ 3236 goto creat_exit; 3237 else 3238 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 3239 oparms->create_options, oparms->desired_access); 3240 3241 atomic_inc(&tcon->num_remote_opens); 3242 oparms->fid->persistent_fid = rsp->PersistentFileId; 3243 oparms->fid->volatile_fid = rsp->VolatileFileId; 3244 oparms->fid->access = oparms->desired_access; 3245 #ifdef CONFIG_CIFS_DEBUG2 3246 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); 3247 #endif /* CIFS_DEBUG2 */ 3248 3249 if (buf) { 3250 buf->CreationTime = rsp->CreationTime; 3251 buf->LastAccessTime = rsp->LastAccessTime; 3252 buf->LastWriteTime = rsp->LastWriteTime; 3253 buf->ChangeTime = rsp->ChangeTime; 3254 buf->AllocationSize = rsp->AllocationSize; 3255 buf->EndOfFile = rsp->EndofFile; 3256 buf->Attributes = rsp->FileAttributes; 3257 buf->NumberOfLinks = cpu_to_le32(1); 3258 buf->DeletePending = 0; 3259 } 3260 3261 3262 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, 3263 oparms->fid->lease_key, oplock, buf, posix); 3264 creat_exit: 3265 SMB2_open_free(&rqst); 3266 free_rsp_buf(resp_buftype, rsp); 3267 3268 if (is_replayable_error(rc) && 3269 smb2_should_replay(tcon, &retries, &cur_sleep)) 3270 goto replay_again; 3271 3272 return rc; 3273 } 3274 3275 int 3276 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3277 struct smb_rqst *rqst, 3278 u64 persistent_fid, u64 volatile_fid, u32 opcode, 3279 char *in_data, u32 indatalen, 3280 __u32 max_response_size) 3281 { 3282 struct smb2_ioctl_req *req; 3283 struct kvec *iov = rqst->rq_iov; 3284 unsigned int total_len; 3285 int rc; 3286 char *in_data_buf; 3287 3288 rc = smb2_ioctl_req_init(opcode, tcon, server, 3289 (void **) &req, &total_len); 3290 if (rc) 3291 return rc; 3292 3293 if (indatalen) { 3294 /* 3295 * indatalen is usually small at a couple of bytes max, so 3296 * just allocate through generic pool 3297 */ 3298 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS); 3299 if (!in_data_buf) { 3300 cifs_small_buf_release(req); 3301 return -ENOMEM; 3302 } 3303 } 3304 3305 req->CtlCode = cpu_to_le32(opcode); 3306 req->PersistentFileId = persistent_fid; 3307 req->VolatileFileId = volatile_fid; 3308 3309 iov[0].iov_base = (char *)req; 3310 /* 3311 * If no input data, the size of ioctl struct in 3312 * protocol spec still includes a 1 byte data buffer, 3313 * but if input data passed to ioctl, we do not 3314 * want to double count this, so we do not send 3315 * the dummy one byte of data in iovec[0] if sending 3316 * input data (in iovec[1]). 3317 */ 3318 if (indatalen) { 3319 req->InputCount = cpu_to_le32(indatalen); 3320 /* do not set InputOffset if no input data */ 3321 req->InputOffset = 3322 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); 3323 rqst->rq_nvec = 2; 3324 iov[0].iov_len = total_len - 1; 3325 iov[1].iov_base = in_data_buf; 3326 iov[1].iov_len = indatalen; 3327 } else { 3328 rqst->rq_nvec = 1; 3329 iov[0].iov_len = total_len; 3330 } 3331 3332 req->OutputOffset = 0; 3333 req->OutputCount = 0; /* MBZ */ 3334 3335 /* 3336 * In most cases max_response_size is set to 16K (CIFSMaxBufSize) 3337 * We Could increase default MaxOutputResponse, but that could require 3338 * more credits. Windows typically sets this smaller, but for some 3339 * ioctls it may be useful to allow server to send more. No point 3340 * limiting what the server can send as long as fits in one credit 3341 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want 3342 * to increase this limit up in the future. 3343 * Note that for snapshot queries that servers like Azure expect that 3344 * the first query be minimal size (and just used to get the number/size 3345 * of previous versions) so response size must be specified as EXACTLY 3346 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple 3347 * of eight bytes. Currently that is the only case where we set max 3348 * response size smaller. 3349 */ 3350 req->MaxOutputResponse = cpu_to_le32(max_response_size); 3351 req->hdr.CreditCharge = 3352 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), 3353 SMB2_MAX_BUFFER_SIZE)); 3354 /* always an FSCTL (for now) */ 3355 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 3356 3357 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ 3358 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) 3359 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 3360 3361 return 0; 3362 } 3363 3364 void 3365 SMB2_ioctl_free(struct smb_rqst *rqst) 3366 { 3367 int i; 3368 3369 if (rqst && rqst->rq_iov) { 3370 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3371 for (i = 1; i < rqst->rq_nvec; i++) 3372 if (rqst->rq_iov[i].iov_base != smb2_padding) 3373 kfree(rqst->rq_iov[i].iov_base); 3374 } 3375 } 3376 3377 3378 /* 3379 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 3380 */ 3381 int 3382 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 3383 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, 3384 u32 max_out_data_len, char **out_data, 3385 u32 *plen /* returned data len */) 3386 { 3387 struct smb_rqst rqst; 3388 struct smb2_ioctl_rsp *rsp = NULL; 3389 struct cifs_ses *ses; 3390 struct TCP_Server_Info *server; 3391 struct kvec iov[SMB2_IOCTL_IOV_SIZE]; 3392 struct kvec rsp_iov = {NULL, 0}; 3393 int resp_buftype = CIFS_NO_BUFFER; 3394 int rc = 0; 3395 int flags = 0; 3396 int retries = 0, cur_sleep = 1; 3397 3398 if (!tcon) 3399 return -EIO; 3400 3401 ses = tcon->ses; 3402 if (!ses) 3403 return -EIO; 3404 3405 replay_again: 3406 /* reinitialize for possible replay */ 3407 flags = 0; 3408 server = cifs_pick_channel(ses); 3409 3410 if (!server) 3411 return -EIO; 3412 3413 cifs_dbg(FYI, "SMB2 IOCTL\n"); 3414 3415 if (out_data != NULL) 3416 *out_data = NULL; 3417 3418 /* zero out returned data len, in case of error */ 3419 if (plen) 3420 *plen = 0; 3421 3422 if (smb3_encryption_required(tcon)) 3423 flags |= CIFS_TRANSFORM_REQ; 3424 3425 memset(&rqst, 0, sizeof(struct smb_rqst)); 3426 memset(&iov, 0, sizeof(iov)); 3427 rqst.rq_iov = iov; 3428 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 3429 3430 rc = SMB2_ioctl_init(tcon, server, 3431 &rqst, persistent_fid, volatile_fid, opcode, 3432 in_data, indatalen, max_out_data_len); 3433 if (rc) 3434 goto ioctl_exit; 3435 3436 if (retries) 3437 smb2_set_replay(server, &rqst); 3438 3439 rc = cifs_send_recv(xid, ses, server, 3440 &rqst, &resp_buftype, flags, 3441 &rsp_iov); 3442 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; 3443 3444 if (rc != 0) 3445 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, 3446 ses->Suid, 0, opcode, rc); 3447 3448 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) { 3449 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3450 goto ioctl_exit; 3451 } else if (rc == -EINVAL) { 3452 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 3453 (opcode != FSCTL_SRV_COPYCHUNK)) { 3454 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3455 goto ioctl_exit; 3456 } 3457 } else if (rc == -E2BIG) { 3458 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) { 3459 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3460 goto ioctl_exit; 3461 } 3462 } 3463 3464 /* check if caller wants to look at return data or just return rc */ 3465 if ((plen == NULL) || (out_data == NULL)) 3466 goto ioctl_exit; 3467 3468 /* 3469 * Although unlikely to be possible for rsp to be null and rc not set, 3470 * adding check below is slightly safer long term (and quiets Coverity 3471 * warning) 3472 */ 3473 if (rsp == NULL) { 3474 rc = -EIO; 3475 goto ioctl_exit; 3476 } 3477 3478 *plen = le32_to_cpu(rsp->OutputCount); 3479 3480 /* We check for obvious errors in the output buffer length and offset */ 3481 if (*plen == 0) 3482 goto ioctl_exit; /* server returned no data */ 3483 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { 3484 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); 3485 *plen = 0; 3486 rc = -EIO; 3487 goto ioctl_exit; 3488 } 3489 3490 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { 3491 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, 3492 le32_to_cpu(rsp->OutputOffset)); 3493 *plen = 0; 3494 rc = -EIO; 3495 goto ioctl_exit; 3496 } 3497 3498 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), 3499 *plen, GFP_KERNEL); 3500 if (*out_data == NULL) { 3501 rc = -ENOMEM; 3502 goto ioctl_exit; 3503 } 3504 3505 ioctl_exit: 3506 SMB2_ioctl_free(&rqst); 3507 free_rsp_buf(resp_buftype, rsp); 3508 3509 if (is_replayable_error(rc) && 3510 smb2_should_replay(tcon, &retries, &cur_sleep)) 3511 goto replay_again; 3512 3513 return rc; 3514 } 3515 3516 /* 3517 * Individual callers to ioctl worker function follow 3518 */ 3519 3520 int 3521 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, 3522 u64 persistent_fid, u64 volatile_fid) 3523 { 3524 int rc; 3525 struct compress_ioctl fsctl_input; 3526 char *ret_data = NULL; 3527 3528 fsctl_input.CompressionState = 3529 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); 3530 3531 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 3532 FSCTL_SET_COMPRESSION, 3533 (char *)&fsctl_input /* data input */, 3534 2 /* in data len */, CIFSMaxBufSize /* max out data */, 3535 &ret_data /* out data */, NULL); 3536 3537 cifs_dbg(FYI, "set compression rc %d\n", rc); 3538 3539 return rc; 3540 } 3541 3542 int 3543 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3544 struct smb_rqst *rqst, 3545 u64 persistent_fid, u64 volatile_fid, bool query_attrs) 3546 { 3547 struct smb2_close_req *req; 3548 struct kvec *iov = rqst->rq_iov; 3549 unsigned int total_len; 3550 int rc; 3551 3552 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server, 3553 (void **) &req, &total_len); 3554 if (rc) 3555 return rc; 3556 3557 req->PersistentFileId = persistent_fid; 3558 req->VolatileFileId = volatile_fid; 3559 if (query_attrs) 3560 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; 3561 else 3562 req->Flags = 0; 3563 iov[0].iov_base = (char *)req; 3564 iov[0].iov_len = total_len; 3565 3566 return 0; 3567 } 3568 3569 void 3570 SMB2_close_free(struct smb_rqst *rqst) 3571 { 3572 if (rqst && rqst->rq_iov) 3573 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3574 } 3575 3576 int 3577 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3578 u64 persistent_fid, u64 volatile_fid, 3579 struct smb2_file_network_open_info *pbuf) 3580 { 3581 struct smb_rqst rqst; 3582 struct smb2_close_rsp *rsp = NULL; 3583 struct cifs_ses *ses = tcon->ses; 3584 struct TCP_Server_Info *server; 3585 struct kvec iov[1]; 3586 struct kvec rsp_iov; 3587 int resp_buftype = CIFS_NO_BUFFER; 3588 int rc = 0; 3589 int flags = 0; 3590 bool query_attrs = false; 3591 int retries = 0, cur_sleep = 1; 3592 3593 replay_again: 3594 /* reinitialize for possible replay */ 3595 flags = 0; 3596 query_attrs = false; 3597 server = cifs_pick_channel(ses); 3598 3599 cifs_dbg(FYI, "Close\n"); 3600 3601 if (!ses || !server) 3602 return -EIO; 3603 3604 if (smb3_encryption_required(tcon)) 3605 flags |= CIFS_TRANSFORM_REQ; 3606 3607 memset(&rqst, 0, sizeof(struct smb_rqst)); 3608 memset(&iov, 0, sizeof(iov)); 3609 rqst.rq_iov = iov; 3610 rqst.rq_nvec = 1; 3611 3612 /* check if need to ask server to return timestamps in close response */ 3613 if (pbuf) 3614 query_attrs = true; 3615 3616 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid); 3617 rc = SMB2_close_init(tcon, server, 3618 &rqst, persistent_fid, volatile_fid, 3619 query_attrs); 3620 if (rc) 3621 goto close_exit; 3622 3623 if (retries) 3624 smb2_set_replay(server, &rqst); 3625 3626 rc = cifs_send_recv(xid, ses, server, 3627 &rqst, &resp_buftype, flags, &rsp_iov); 3628 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; 3629 3630 if (rc != 0) { 3631 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); 3632 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, 3633 rc); 3634 goto close_exit; 3635 } else { 3636 trace_smb3_close_done(xid, persistent_fid, tcon->tid, 3637 ses->Suid); 3638 if (pbuf) 3639 memcpy(&pbuf->network_open_info, 3640 &rsp->network_open_info, 3641 sizeof(pbuf->network_open_info)); 3642 atomic_dec(&tcon->num_remote_opens); 3643 } 3644 3645 close_exit: 3646 SMB2_close_free(&rqst); 3647 free_rsp_buf(resp_buftype, rsp); 3648 3649 /* retry close in a worker thread if this one is interrupted */ 3650 if (is_interrupt_error(rc)) { 3651 int tmp_rc; 3652 3653 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, 3654 volatile_fid); 3655 if (tmp_rc) 3656 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", 3657 persistent_fid, tmp_rc); 3658 } 3659 3660 if (is_replayable_error(rc) && 3661 smb2_should_replay(tcon, &retries, &cur_sleep)) 3662 goto replay_again; 3663 3664 return rc; 3665 } 3666 3667 int 3668 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3669 u64 persistent_fid, u64 volatile_fid) 3670 { 3671 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL); 3672 } 3673 3674 int 3675 smb2_validate_iov(unsigned int offset, unsigned int buffer_length, 3676 struct kvec *iov, unsigned int min_buf_size) 3677 { 3678 unsigned int smb_len = iov->iov_len; 3679 char *end_of_smb = smb_len + (char *)iov->iov_base; 3680 char *begin_of_buf = offset + (char *)iov->iov_base; 3681 char *end_of_buf = begin_of_buf + buffer_length; 3682 3683 3684 if (buffer_length < min_buf_size) { 3685 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", 3686 buffer_length, min_buf_size); 3687 return -EINVAL; 3688 } 3689 3690 /* check if beyond RFC1001 maximum length */ 3691 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { 3692 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", 3693 buffer_length, smb_len); 3694 return -EINVAL; 3695 } 3696 3697 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { 3698 cifs_dbg(VFS, "Invalid server response, bad offset to data\n"); 3699 return -EINVAL; 3700 } 3701 3702 return 0; 3703 } 3704 3705 /* 3706 * If SMB buffer fields are valid, copy into temporary buffer to hold result. 3707 * Caller must free buffer. 3708 */ 3709 int 3710 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, 3711 struct kvec *iov, unsigned int minbufsize, 3712 char *data) 3713 { 3714 char *begin_of_buf = offset + (char *)iov->iov_base; 3715 int rc; 3716 3717 if (!data) 3718 return -EINVAL; 3719 3720 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); 3721 if (rc) 3722 return rc; 3723 3724 memcpy(data, begin_of_buf, minbufsize); 3725 3726 return 0; 3727 } 3728 3729 int 3730 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3731 struct smb_rqst *rqst, 3732 u64 persistent_fid, u64 volatile_fid, 3733 u8 info_class, u8 info_type, u32 additional_info, 3734 size_t output_len, size_t input_len, void *input) 3735 { 3736 struct smb2_query_info_req *req; 3737 struct kvec *iov = rqst->rq_iov; 3738 unsigned int total_len; 3739 size_t len; 3740 int rc; 3741 3742 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) || 3743 len > CIFSMaxBufSize)) 3744 return -EINVAL; 3745 3746 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 3747 (void **) &req, &total_len); 3748 if (rc) 3749 return rc; 3750 3751 req->InfoType = info_type; 3752 req->FileInfoClass = info_class; 3753 req->PersistentFileId = persistent_fid; 3754 req->VolatileFileId = volatile_fid; 3755 req->AdditionalInformation = cpu_to_le32(additional_info); 3756 3757 req->OutputBufferLength = cpu_to_le32(output_len); 3758 if (input_len) { 3759 req->InputBufferLength = cpu_to_le32(input_len); 3760 /* total_len for smb query request never close to le16 max */ 3761 req->InputBufferOffset = cpu_to_le16(total_len - 1); 3762 memcpy(req->Buffer, input, input_len); 3763 } 3764 3765 iov[0].iov_base = (char *)req; 3766 /* 1 for Buffer */ 3767 iov[0].iov_len = len; 3768 return 0; 3769 } 3770 3771 void 3772 SMB2_query_info_free(struct smb_rqst *rqst) 3773 { 3774 if (rqst && rqst->rq_iov) 3775 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3776 } 3777 3778 static int 3779 query_info(const unsigned int xid, struct cifs_tcon *tcon, 3780 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, 3781 u32 additional_info, size_t output_len, size_t min_len, void **data, 3782 u32 *dlen) 3783 { 3784 struct smb_rqst rqst; 3785 struct smb2_query_info_rsp *rsp = NULL; 3786 struct kvec iov[1]; 3787 struct kvec rsp_iov; 3788 int rc = 0; 3789 int resp_buftype = CIFS_NO_BUFFER; 3790 struct cifs_ses *ses = tcon->ses; 3791 struct TCP_Server_Info *server; 3792 int flags = 0; 3793 bool allocated = false; 3794 int retries = 0, cur_sleep = 1; 3795 3796 cifs_dbg(FYI, "Query Info\n"); 3797 3798 if (!ses) 3799 return -EIO; 3800 3801 replay_again: 3802 /* reinitialize for possible replay */ 3803 flags = 0; 3804 allocated = false; 3805 server = cifs_pick_channel(ses); 3806 3807 if (!server) 3808 return -EIO; 3809 3810 if (smb3_encryption_required(tcon)) 3811 flags |= CIFS_TRANSFORM_REQ; 3812 3813 memset(&rqst, 0, sizeof(struct smb_rqst)); 3814 memset(&iov, 0, sizeof(iov)); 3815 rqst.rq_iov = iov; 3816 rqst.rq_nvec = 1; 3817 3818 rc = SMB2_query_info_init(tcon, server, 3819 &rqst, persistent_fid, volatile_fid, 3820 info_class, info_type, additional_info, 3821 output_len, 0, NULL); 3822 if (rc) 3823 goto qinf_exit; 3824 3825 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, 3826 ses->Suid, info_class, (__u32)info_type); 3827 3828 if (retries) 3829 smb2_set_replay(server, &rqst); 3830 3831 rc = cifs_send_recv(xid, ses, server, 3832 &rqst, &resp_buftype, flags, &rsp_iov); 3833 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 3834 3835 if (rc) { 3836 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 3837 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, 3838 ses->Suid, info_class, (__u32)info_type, rc); 3839 goto qinf_exit; 3840 } 3841 3842 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, 3843 ses->Suid, info_class, (__u32)info_type); 3844 3845 if (dlen) { 3846 *dlen = le32_to_cpu(rsp->OutputBufferLength); 3847 if (!*data) { 3848 *data = kmalloc(*dlen, GFP_KERNEL); 3849 if (!*data) { 3850 cifs_tcon_dbg(VFS, 3851 "Error %d allocating memory for acl\n", 3852 rc); 3853 *dlen = 0; 3854 rc = -ENOMEM; 3855 goto qinf_exit; 3856 } 3857 allocated = true; 3858 } 3859 } 3860 3861 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 3862 le32_to_cpu(rsp->OutputBufferLength), 3863 &rsp_iov, dlen ? *dlen : min_len, *data); 3864 if (rc && allocated) { 3865 kfree(*data); 3866 *data = NULL; 3867 *dlen = 0; 3868 } 3869 3870 qinf_exit: 3871 SMB2_query_info_free(&rqst); 3872 free_rsp_buf(resp_buftype, rsp); 3873 3874 if (is_replayable_error(rc) && 3875 smb2_should_replay(tcon, &retries, &cur_sleep)) 3876 goto replay_again; 3877 3878 return rc; 3879 } 3880 3881 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3882 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) 3883 { 3884 return query_info(xid, tcon, persistent_fid, volatile_fid, 3885 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, 3886 sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 3887 sizeof(struct smb2_file_all_info), (void **)&data, 3888 NULL); 3889 } 3890 3891 #if 0 3892 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */ 3893 int 3894 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3895 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen) 3896 { 3897 size_t output_len = sizeof(struct smb311_posix_qinfo *) + 3898 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2); 3899 *plen = 0; 3900 3901 return query_info(xid, tcon, persistent_fid, volatile_fid, 3902 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, 3903 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen); 3904 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */ 3905 } 3906 #endif 3907 3908 int 3909 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, 3910 u64 persistent_fid, u64 volatile_fid, 3911 void **data, u32 *plen, u32 extra_info) 3912 { 3913 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | 3914 extra_info; 3915 *plen = 0; 3916 3917 return query_info(xid, tcon, persistent_fid, volatile_fid, 3918 0, SMB2_O_INFO_SECURITY, additional_info, 3919 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); 3920 } 3921 3922 int 3923 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, 3924 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) 3925 { 3926 return query_info(xid, tcon, persistent_fid, volatile_fid, 3927 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, 3928 sizeof(struct smb2_file_internal_info), 3929 sizeof(struct smb2_file_internal_info), 3930 (void **)&uniqueid, NULL); 3931 } 3932 3933 /* 3934 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory 3935 * See MS-SMB2 2.2.35 and 2.2.36 3936 */ 3937 3938 static int 3939 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, 3940 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3941 u64 persistent_fid, u64 volatile_fid, 3942 u32 completion_filter, bool watch_tree) 3943 { 3944 struct smb2_change_notify_req *req; 3945 struct kvec *iov = rqst->rq_iov; 3946 unsigned int total_len; 3947 int rc; 3948 3949 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server, 3950 (void **) &req, &total_len); 3951 if (rc) 3952 return rc; 3953 3954 req->PersistentFileId = persistent_fid; 3955 req->VolatileFileId = volatile_fid; 3956 /* See note 354 of MS-SMB2, 64K max */ 3957 req->OutputBufferLength = 3958 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); 3959 req->CompletionFilter = cpu_to_le32(completion_filter); 3960 if (watch_tree) 3961 req->Flags = cpu_to_le16(SMB2_WATCH_TREE); 3962 else 3963 req->Flags = 0; 3964 3965 iov[0].iov_base = (char *)req; 3966 iov[0].iov_len = total_len; 3967 3968 return 0; 3969 } 3970 3971 int 3972 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, 3973 u64 persistent_fid, u64 volatile_fid, bool watch_tree, 3974 u32 completion_filter, u32 max_out_data_len, char **out_data, 3975 u32 *plen /* returned data len */) 3976 { 3977 struct cifs_ses *ses = tcon->ses; 3978 struct TCP_Server_Info *server; 3979 struct smb_rqst rqst; 3980 struct smb2_change_notify_rsp *smb_rsp; 3981 struct kvec iov[1]; 3982 struct kvec rsp_iov = {NULL, 0}; 3983 int resp_buftype = CIFS_NO_BUFFER; 3984 int flags = 0; 3985 int rc = 0; 3986 int retries = 0, cur_sleep = 1; 3987 3988 replay_again: 3989 /* reinitialize for possible replay */ 3990 flags = 0; 3991 server = cifs_pick_channel(ses); 3992 3993 cifs_dbg(FYI, "change notify\n"); 3994 if (!ses || !server) 3995 return -EIO; 3996 3997 if (smb3_encryption_required(tcon)) 3998 flags |= CIFS_TRANSFORM_REQ; 3999 4000 memset(&rqst, 0, sizeof(struct smb_rqst)); 4001 memset(&iov, 0, sizeof(iov)); 4002 if (plen) 4003 *plen = 0; 4004 4005 rqst.rq_iov = iov; 4006 rqst.rq_nvec = 1; 4007 4008 rc = SMB2_notify_init(xid, &rqst, tcon, server, 4009 persistent_fid, volatile_fid, 4010 completion_filter, watch_tree); 4011 if (rc) 4012 goto cnotify_exit; 4013 4014 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid, 4015 (u8)watch_tree, completion_filter); 4016 4017 if (retries) 4018 smb2_set_replay(server, &rqst); 4019 4020 rc = cifs_send_recv(xid, ses, server, 4021 &rqst, &resp_buftype, flags, &rsp_iov); 4022 4023 if (rc != 0) { 4024 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE); 4025 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid, 4026 (u8)watch_tree, completion_filter, rc); 4027 } else { 4028 trace_smb3_notify_done(xid, persistent_fid, tcon->tid, 4029 ses->Suid, (u8)watch_tree, completion_filter); 4030 /* validate that notify information is plausible */ 4031 if ((rsp_iov.iov_base == NULL) || 4032 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1)) 4033 goto cnotify_exit; 4034 4035 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; 4036 4037 smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4038 le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, 4039 sizeof(struct file_notify_information)); 4040 4041 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), 4042 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); 4043 if (*out_data == NULL) { 4044 rc = -ENOMEM; 4045 goto cnotify_exit; 4046 } else if (plen) 4047 *plen = le32_to_cpu(smb_rsp->OutputBufferLength); 4048 } 4049 4050 cnotify_exit: 4051 if (rqst.rq_iov) 4052 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */ 4053 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4054 4055 if (is_replayable_error(rc) && 4056 smb2_should_replay(tcon, &retries, &cur_sleep)) 4057 goto replay_again; 4058 4059 return rc; 4060 } 4061 4062 4063 4064 /* 4065 * This is a no-op for now. We're not really interested in the reply, but 4066 * rather in the fact that the server sent one and that server->lstrp 4067 * gets updated. 4068 * 4069 * FIXME: maybe we should consider checking that the reply matches request? 4070 */ 4071 static void 4072 smb2_echo_callback(struct mid_q_entry *mid) 4073 { 4074 struct TCP_Server_Info *server = mid->callback_data; 4075 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 4076 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4077 4078 if (mid->mid_state == MID_RESPONSE_RECEIVED 4079 || mid->mid_state == MID_RESPONSE_MALFORMED) { 4080 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4081 credits.instance = server->reconnect_instance; 4082 } 4083 4084 release_mid(mid); 4085 add_credits(server, &credits, CIFS_ECHO_OP); 4086 } 4087 4088 void smb2_reconnect_server(struct work_struct *work) 4089 { 4090 struct TCP_Server_Info *server = container_of(work, 4091 struct TCP_Server_Info, reconnect.work); 4092 struct TCP_Server_Info *pserver; 4093 struct cifs_ses *ses, *ses2; 4094 struct cifs_tcon *tcon, *tcon2; 4095 struct list_head tmp_list, tmp_ses_list; 4096 bool ses_exist = false; 4097 bool tcon_selected = false; 4098 int rc; 4099 bool resched = false; 4100 4101 /* first check if ref count has reached 0, if not inc ref count */ 4102 spin_lock(&cifs_tcp_ses_lock); 4103 if (!server->srv_count) { 4104 spin_unlock(&cifs_tcp_ses_lock); 4105 return; 4106 } 4107 server->srv_count++; 4108 spin_unlock(&cifs_tcp_ses_lock); 4109 4110 /* If server is a channel, select the primary channel */ 4111 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 4112 4113 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 4114 mutex_lock(&pserver->reconnect_mutex); 4115 4116 /* if the server is marked for termination, drop the ref count here */ 4117 if (server->terminate) { 4118 cifs_put_tcp_session(server, true); 4119 mutex_unlock(&pserver->reconnect_mutex); 4120 return; 4121 } 4122 4123 INIT_LIST_HEAD(&tmp_list); 4124 INIT_LIST_HEAD(&tmp_ses_list); 4125 cifs_dbg(FYI, "Reconnecting tcons and channels\n"); 4126 4127 spin_lock(&cifs_tcp_ses_lock); 4128 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4129 spin_lock(&ses->ses_lock); 4130 if (ses->ses_status == SES_EXITING) { 4131 spin_unlock(&ses->ses_lock); 4132 continue; 4133 } 4134 spin_unlock(&ses->ses_lock); 4135 4136 tcon_selected = false; 4137 4138 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 4139 if (tcon->need_reconnect || tcon->need_reopen_files) { 4140 tcon->tc_count++; 4141 list_add_tail(&tcon->rlist, &tmp_list); 4142 tcon_selected = true; 4143 } 4144 } 4145 /* 4146 * IPC has the same lifetime as its session and uses its 4147 * refcount. 4148 */ 4149 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { 4150 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); 4151 tcon_selected = true; 4152 cifs_smb_ses_inc_refcount(ses); 4153 } 4154 /* 4155 * handle the case where channel needs to reconnect 4156 * binding session, but tcon is healthy (some other channel 4157 * is active) 4158 */ 4159 spin_lock(&ses->chan_lock); 4160 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { 4161 list_add_tail(&ses->rlist, &tmp_ses_list); 4162 ses_exist = true; 4163 cifs_smb_ses_inc_refcount(ses); 4164 } 4165 spin_unlock(&ses->chan_lock); 4166 } 4167 spin_unlock(&cifs_tcp_ses_lock); 4168 4169 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 4170 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4171 if (!rc) 4172 cifs_reopen_persistent_handles(tcon); 4173 else 4174 resched = true; 4175 list_del_init(&tcon->rlist); 4176 if (tcon->ipc) 4177 cifs_put_smb_ses(tcon->ses); 4178 else 4179 cifs_put_tcon(tcon); 4180 } 4181 4182 if (!ses_exist) 4183 goto done; 4184 4185 /* allocate a dummy tcon struct used for reconnect */ 4186 tcon = tcon_info_alloc(false); 4187 if (!tcon) { 4188 resched = true; 4189 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4190 list_del_init(&ses->rlist); 4191 cifs_put_smb_ses(ses); 4192 } 4193 goto done; 4194 } 4195 4196 tcon->status = TID_GOOD; 4197 tcon->retry = false; 4198 tcon->need_reconnect = false; 4199 4200 /* now reconnect sessions for necessary channels */ 4201 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4202 tcon->ses = ses; 4203 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4204 if (rc) 4205 resched = true; 4206 list_del_init(&ses->rlist); 4207 cifs_put_smb_ses(ses); 4208 } 4209 tconInfoFree(tcon); 4210 4211 done: 4212 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); 4213 if (resched) 4214 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); 4215 mutex_unlock(&pserver->reconnect_mutex); 4216 4217 /* now we can safely release srv struct */ 4218 cifs_put_tcp_session(server, true); 4219 } 4220 4221 int 4222 SMB2_echo(struct TCP_Server_Info *server) 4223 { 4224 struct smb2_echo_req *req; 4225 int rc = 0; 4226 struct kvec iov[1]; 4227 struct smb_rqst rqst = { .rq_iov = iov, 4228 .rq_nvec = 1 }; 4229 unsigned int total_len; 4230 4231 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); 4232 4233 spin_lock(&server->srv_lock); 4234 if (server->ops->need_neg && 4235 server->ops->need_neg(server)) { 4236 spin_unlock(&server->srv_lock); 4237 /* No need to send echo on newly established connections */ 4238 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 4239 return rc; 4240 } 4241 spin_unlock(&server->srv_lock); 4242 4243 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, 4244 (void **)&req, &total_len); 4245 if (rc) 4246 return rc; 4247 4248 req->hdr.CreditRequest = cpu_to_le16(1); 4249 4250 iov[0].iov_len = total_len; 4251 iov[0].iov_base = (char *)req; 4252 4253 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, 4254 server, CIFS_ECHO_OP, NULL); 4255 if (rc) 4256 cifs_dbg(FYI, "Echo request failed: %d\n", rc); 4257 4258 cifs_small_buf_release(req); 4259 return rc; 4260 } 4261 4262 void 4263 SMB2_flush_free(struct smb_rqst *rqst) 4264 { 4265 if (rqst && rqst->rq_iov) 4266 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 4267 } 4268 4269 int 4270 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, 4271 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 4272 u64 persistent_fid, u64 volatile_fid) 4273 { 4274 struct smb2_flush_req *req; 4275 struct kvec *iov = rqst->rq_iov; 4276 unsigned int total_len; 4277 int rc; 4278 4279 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server, 4280 (void **) &req, &total_len); 4281 if (rc) 4282 return rc; 4283 4284 req->PersistentFileId = persistent_fid; 4285 req->VolatileFileId = volatile_fid; 4286 4287 iov[0].iov_base = (char *)req; 4288 iov[0].iov_len = total_len; 4289 4290 return 0; 4291 } 4292 4293 int 4294 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 4295 u64 volatile_fid) 4296 { 4297 struct cifs_ses *ses = tcon->ses; 4298 struct smb_rqst rqst; 4299 struct kvec iov[1]; 4300 struct kvec rsp_iov = {NULL, 0}; 4301 struct TCP_Server_Info *server; 4302 int resp_buftype = CIFS_NO_BUFFER; 4303 int flags = 0; 4304 int rc = 0; 4305 int retries = 0, cur_sleep = 1; 4306 4307 replay_again: 4308 /* reinitialize for possible replay */ 4309 flags = 0; 4310 server = cifs_pick_channel(ses); 4311 4312 cifs_dbg(FYI, "flush\n"); 4313 if (!ses || !(ses->server)) 4314 return -EIO; 4315 4316 if (smb3_encryption_required(tcon)) 4317 flags |= CIFS_TRANSFORM_REQ; 4318 4319 memset(&rqst, 0, sizeof(struct smb_rqst)); 4320 memset(&iov, 0, sizeof(iov)); 4321 rqst.rq_iov = iov; 4322 rqst.rq_nvec = 1; 4323 4324 rc = SMB2_flush_init(xid, &rqst, tcon, server, 4325 persistent_fid, volatile_fid); 4326 if (rc) 4327 goto flush_exit; 4328 4329 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid); 4330 4331 if (retries) 4332 smb2_set_replay(server, &rqst); 4333 4334 rc = cifs_send_recv(xid, ses, server, 4335 &rqst, &resp_buftype, flags, &rsp_iov); 4336 4337 if (rc != 0) { 4338 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 4339 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, 4340 rc); 4341 } else 4342 trace_smb3_flush_done(xid, persistent_fid, tcon->tid, 4343 ses->Suid); 4344 4345 flush_exit: 4346 SMB2_flush_free(&rqst); 4347 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4348 4349 if (is_replayable_error(rc) && 4350 smb2_should_replay(tcon, &retries, &cur_sleep)) 4351 goto replay_again; 4352 4353 return rc; 4354 } 4355 4356 #ifdef CONFIG_CIFS_SMB_DIRECT 4357 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) 4358 { 4359 struct TCP_Server_Info *server = io_parms->server; 4360 struct cifs_tcon *tcon = io_parms->tcon; 4361 4362 /* we can only offload if we're connected */ 4363 if (!server || !tcon) 4364 return false; 4365 4366 /* we can only offload on an rdma connection */ 4367 if (!server->rdma || !server->smbd_conn) 4368 return false; 4369 4370 /* we don't support signed offload yet */ 4371 if (server->sign) 4372 return false; 4373 4374 /* we don't support encrypted offload yet */ 4375 if (smb3_encryption_required(tcon)) 4376 return false; 4377 4378 /* offload also has its overhead, so only do it if desired */ 4379 if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold) 4380 return false; 4381 4382 return true; 4383 } 4384 #endif /* CONFIG_CIFS_SMB_DIRECT */ 4385 4386 /* 4387 * To form a chain of read requests, any read requests after the first should 4388 * have the end_of_chain boolean set to true. 4389 */ 4390 static int 4391 smb2_new_read_req(void **buf, unsigned int *total_len, 4392 struct cifs_io_parms *io_parms, struct cifs_readdata *rdata, 4393 unsigned int remaining_bytes, int request_type) 4394 { 4395 int rc = -EACCES; 4396 struct smb2_read_req *req = NULL; 4397 struct smb2_hdr *shdr; 4398 struct TCP_Server_Info *server = io_parms->server; 4399 4400 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server, 4401 (void **) &req, total_len); 4402 if (rc) 4403 return rc; 4404 4405 if (server == NULL) 4406 return -ECONNABORTED; 4407 4408 shdr = &req->hdr; 4409 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4410 4411 req->PersistentFileId = io_parms->persistent_fid; 4412 req->VolatileFileId = io_parms->volatile_fid; 4413 req->ReadChannelInfoOffset = 0; /* reserved */ 4414 req->ReadChannelInfoLength = 0; /* reserved */ 4415 req->Channel = 0; /* reserved */ 4416 req->MinimumCount = 0; 4417 req->Length = cpu_to_le32(io_parms->length); 4418 req->Offset = cpu_to_le64(io_parms->offset); 4419 4420 trace_smb3_read_enter(0 /* xid */, 4421 io_parms->persistent_fid, 4422 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4423 io_parms->offset, io_parms->length); 4424 #ifdef CONFIG_CIFS_SMB_DIRECT 4425 /* 4426 * If we want to do a RDMA write, fill in and append 4427 * smbd_buffer_descriptor_v1 to the end of read request 4428 */ 4429 if (smb3_use_rdma_offload(io_parms)) { 4430 struct smbd_buffer_descriptor_v1 *v1; 4431 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4432 4433 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->iter, 4434 true, need_invalidate); 4435 if (!rdata->mr) 4436 return -EAGAIN; 4437 4438 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4439 if (need_invalidate) 4440 req->Channel = SMB2_CHANNEL_RDMA_V1; 4441 req->ReadChannelInfoOffset = 4442 cpu_to_le16(offsetof(struct smb2_read_req, Buffer)); 4443 req->ReadChannelInfoLength = 4444 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); 4445 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; 4446 v1->offset = cpu_to_le64(rdata->mr->mr->iova); 4447 v1->token = cpu_to_le32(rdata->mr->mr->rkey); 4448 v1->length = cpu_to_le32(rdata->mr->mr->length); 4449 4450 *total_len += sizeof(*v1) - 1; 4451 } 4452 #endif 4453 if (request_type & CHAINED_REQUEST) { 4454 if (!(request_type & END_OF_CHAIN)) { 4455 /* next 8-byte aligned request */ 4456 *total_len = ALIGN(*total_len, 8); 4457 shdr->NextCommand = cpu_to_le32(*total_len); 4458 } else /* END_OF_CHAIN */ 4459 shdr->NextCommand = 0; 4460 if (request_type & RELATED_REQUEST) { 4461 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; 4462 /* 4463 * Related requests use info from previous read request 4464 * in chain. 4465 */ 4466 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); 4467 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); 4468 req->PersistentFileId = (u64)-1; 4469 req->VolatileFileId = (u64)-1; 4470 } 4471 } 4472 if (remaining_bytes > io_parms->length) 4473 req->RemainingBytes = cpu_to_le32(remaining_bytes); 4474 else 4475 req->RemainingBytes = 0; 4476 4477 *buf = req; 4478 return rc; 4479 } 4480 4481 static void 4482 smb2_readv_callback(struct mid_q_entry *mid) 4483 { 4484 struct cifs_readdata *rdata = mid->callback_data; 4485 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 4486 struct TCP_Server_Info *server = rdata->server; 4487 struct smb2_hdr *shdr = 4488 (struct smb2_hdr *)rdata->iov[0].iov_base; 4489 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4490 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 }; 4491 4492 if (rdata->got_bytes) { 4493 rqst.rq_iter = rdata->iter; 4494 rqst.rq_iter_size = iov_iter_count(&rdata->iter); 4495 } 4496 4497 WARN_ONCE(rdata->server != mid->server, 4498 "rdata server %p != mid server %p", 4499 rdata->server, mid->server); 4500 4501 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", 4502 __func__, mid->mid, mid->mid_state, rdata->result, 4503 rdata->bytes); 4504 4505 switch (mid->mid_state) { 4506 case MID_RESPONSE_RECEIVED: 4507 credits.value = le16_to_cpu(shdr->CreditRequest); 4508 credits.instance = server->reconnect_instance; 4509 /* result already set, check signature */ 4510 if (server->sign && !mid->decrypted) { 4511 int rc; 4512 4513 iov_iter_revert(&rqst.rq_iter, rdata->got_bytes); 4514 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); 4515 rc = smb2_verify_signature(&rqst, server); 4516 if (rc) 4517 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n", 4518 rc); 4519 } 4520 /* FIXME: should this be counted toward the initiating task? */ 4521 task_io_account_read(rdata->got_bytes); 4522 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4523 break; 4524 case MID_REQUEST_SUBMITTED: 4525 case MID_RETRY_NEEDED: 4526 rdata->result = -EAGAIN; 4527 if (server->sign && rdata->got_bytes) 4528 /* reset bytes number since we can not check a sign */ 4529 rdata->got_bytes = 0; 4530 /* FIXME: should this be counted toward the initiating task? */ 4531 task_io_account_read(rdata->got_bytes); 4532 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4533 break; 4534 case MID_RESPONSE_MALFORMED: 4535 credits.value = le16_to_cpu(shdr->CreditRequest); 4536 credits.instance = server->reconnect_instance; 4537 fallthrough; 4538 default: 4539 rdata->result = -EIO; 4540 } 4541 #ifdef CONFIG_CIFS_SMB_DIRECT 4542 /* 4543 * If this rdata has a memmory registered, the MR can be freed 4544 * MR needs to be freed as soon as I/O finishes to prevent deadlock 4545 * because they have limited number and are used for future I/Os 4546 */ 4547 if (rdata->mr) { 4548 smbd_deregister_mr(rdata->mr); 4549 rdata->mr = NULL; 4550 } 4551 #endif 4552 if (rdata->result && rdata->result != -ENODATA) { 4553 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 4554 trace_smb3_read_err(0 /* xid */, 4555 rdata->cfile->fid.persistent_fid, 4556 tcon->tid, tcon->ses->Suid, rdata->offset, 4557 rdata->bytes, rdata->result); 4558 } else 4559 trace_smb3_read_done(0 /* xid */, 4560 rdata->cfile->fid.persistent_fid, 4561 tcon->tid, tcon->ses->Suid, 4562 rdata->offset, rdata->got_bytes); 4563 4564 queue_work(cifsiod_wq, &rdata->work); 4565 release_mid(mid); 4566 add_credits(server, &credits, 0); 4567 } 4568 4569 /* smb2_async_readv - send an async read, and set up mid to handle result */ 4570 int 4571 smb2_async_readv(struct cifs_readdata *rdata) 4572 { 4573 int rc, flags = 0; 4574 char *buf; 4575 struct smb2_hdr *shdr; 4576 struct cifs_io_parms io_parms; 4577 struct smb_rqst rqst = { .rq_iov = rdata->iov, 4578 .rq_nvec = 1 }; 4579 struct TCP_Server_Info *server; 4580 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 4581 unsigned int total_len; 4582 int credit_request; 4583 4584 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", 4585 __func__, rdata->offset, rdata->bytes); 4586 4587 if (!rdata->server) 4588 rdata->server = cifs_pick_channel(tcon->ses); 4589 4590 io_parms.tcon = tlink_tcon(rdata->cfile->tlink); 4591 io_parms.server = server = rdata->server; 4592 io_parms.offset = rdata->offset; 4593 io_parms.length = rdata->bytes; 4594 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; 4595 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; 4596 io_parms.pid = rdata->pid; 4597 4598 rc = smb2_new_read_req( 4599 (void **) &buf, &total_len, &io_parms, rdata, 0, 0); 4600 if (rc) 4601 return rc; 4602 4603 if (smb3_encryption_required(io_parms.tcon)) 4604 flags |= CIFS_TRANSFORM_REQ; 4605 4606 rdata->iov[0].iov_base = buf; 4607 rdata->iov[0].iov_len = total_len; 4608 4609 shdr = (struct smb2_hdr *)buf; 4610 4611 if (rdata->credits.value > 0) { 4612 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, 4613 SMB2_MAX_BUFFER_SIZE)); 4614 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4615 if (server->credits >= server->max_credits) 4616 shdr->CreditRequest = cpu_to_le16(0); 4617 else 4618 shdr->CreditRequest = cpu_to_le16( 4619 min_t(int, server->max_credits - 4620 server->credits, credit_request)); 4621 4622 rc = adjust_credits(server, &rdata->credits, rdata->bytes); 4623 if (rc) 4624 goto async_readv_out; 4625 4626 flags |= CIFS_HAS_CREDITS; 4627 } 4628 4629 kref_get(&rdata->refcount); 4630 rc = cifs_call_async(server, &rqst, 4631 cifs_readv_receive, smb2_readv_callback, 4632 smb3_handle_read_data, rdata, flags, 4633 &rdata->credits); 4634 if (rc) { 4635 kref_put(&rdata->refcount, cifs_readdata_release); 4636 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 4637 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, 4638 io_parms.tcon->tid, 4639 io_parms.tcon->ses->Suid, 4640 io_parms.offset, io_parms.length, rc); 4641 } 4642 4643 async_readv_out: 4644 cifs_small_buf_release(buf); 4645 return rc; 4646 } 4647 4648 int 4649 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 4650 unsigned int *nbytes, char **buf, int *buf_type) 4651 { 4652 struct smb_rqst rqst; 4653 int resp_buftype, rc; 4654 struct smb2_read_req *req = NULL; 4655 struct smb2_read_rsp *rsp = NULL; 4656 struct kvec iov[1]; 4657 struct kvec rsp_iov; 4658 unsigned int total_len; 4659 int flags = CIFS_LOG_ERROR; 4660 struct cifs_ses *ses = io_parms->tcon->ses; 4661 4662 if (!io_parms->server) 4663 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 4664 4665 *nbytes = 0; 4666 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); 4667 if (rc) 4668 return rc; 4669 4670 if (smb3_encryption_required(io_parms->tcon)) 4671 flags |= CIFS_TRANSFORM_REQ; 4672 4673 iov[0].iov_base = (char *)req; 4674 iov[0].iov_len = total_len; 4675 4676 memset(&rqst, 0, sizeof(struct smb_rqst)); 4677 rqst.rq_iov = iov; 4678 rqst.rq_nvec = 1; 4679 4680 rc = cifs_send_recv(xid, ses, io_parms->server, 4681 &rqst, &resp_buftype, flags, &rsp_iov); 4682 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 4683 4684 if (rc) { 4685 if (rc != -ENODATA) { 4686 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 4687 cifs_dbg(VFS, "Send error in read = %d\n", rc); 4688 trace_smb3_read_err(xid, 4689 req->PersistentFileId, 4690 io_parms->tcon->tid, ses->Suid, 4691 io_parms->offset, io_parms->length, 4692 rc); 4693 } else 4694 trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, 4695 ses->Suid, io_parms->offset, 0); 4696 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4697 cifs_small_buf_release(req); 4698 return rc == -ENODATA ? 0 : rc; 4699 } else 4700 trace_smb3_read_done(xid, 4701 req->PersistentFileId, 4702 io_parms->tcon->tid, ses->Suid, 4703 io_parms->offset, io_parms->length); 4704 4705 cifs_small_buf_release(req); 4706 4707 *nbytes = le32_to_cpu(rsp->DataLength); 4708 if ((*nbytes > CIFS_MAX_MSGSIZE) || 4709 (*nbytes > io_parms->length)) { 4710 cifs_dbg(FYI, "bad length %d for count %d\n", 4711 *nbytes, io_parms->length); 4712 rc = -EIO; 4713 *nbytes = 0; 4714 } 4715 4716 if (*buf) { 4717 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); 4718 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4719 } else if (resp_buftype != CIFS_NO_BUFFER) { 4720 *buf = rsp_iov.iov_base; 4721 if (resp_buftype == CIFS_SMALL_BUFFER) 4722 *buf_type = CIFS_SMALL_BUFFER; 4723 else if (resp_buftype == CIFS_LARGE_BUFFER) 4724 *buf_type = CIFS_LARGE_BUFFER; 4725 } 4726 return rc; 4727 } 4728 4729 /* 4730 * Check the mid_state and signature on received buffer (if any), and queue the 4731 * workqueue completion task. 4732 */ 4733 static void 4734 smb2_writev_callback(struct mid_q_entry *mid) 4735 { 4736 struct cifs_writedata *wdata = mid->callback_data; 4737 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 4738 struct TCP_Server_Info *server = wdata->server; 4739 unsigned int written; 4740 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 4741 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4742 4743 WARN_ONCE(wdata->server != mid->server, 4744 "wdata server %p != mid server %p", 4745 wdata->server, mid->server); 4746 4747 switch (mid->mid_state) { 4748 case MID_RESPONSE_RECEIVED: 4749 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4750 credits.instance = server->reconnect_instance; 4751 wdata->result = smb2_check_receive(mid, server, 0); 4752 if (wdata->result != 0) 4753 break; 4754 4755 written = le32_to_cpu(rsp->DataLength); 4756 /* 4757 * Mask off high 16 bits when bytes written as returned 4758 * by the server is greater than bytes requested by the 4759 * client. OS/2 servers are known to set incorrect 4760 * CountHigh values. 4761 */ 4762 if (written > wdata->bytes) 4763 written &= 0xFFFF; 4764 4765 if (written < wdata->bytes) 4766 wdata->result = -ENOSPC; 4767 else 4768 wdata->bytes = written; 4769 break; 4770 case MID_REQUEST_SUBMITTED: 4771 case MID_RETRY_NEEDED: 4772 wdata->result = -EAGAIN; 4773 break; 4774 case MID_RESPONSE_MALFORMED: 4775 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4776 credits.instance = server->reconnect_instance; 4777 fallthrough; 4778 default: 4779 wdata->result = -EIO; 4780 break; 4781 } 4782 #ifdef CONFIG_CIFS_SMB_DIRECT 4783 /* 4784 * If this wdata has a memory registered, the MR can be freed 4785 * The number of MRs available is limited, it's important to recover 4786 * used MR as soon as I/O is finished. Hold MR longer in the later 4787 * I/O process can possibly result in I/O deadlock due to lack of MR 4788 * to send request on I/O retry 4789 */ 4790 if (wdata->mr) { 4791 smbd_deregister_mr(wdata->mr); 4792 wdata->mr = NULL; 4793 } 4794 #endif 4795 if (wdata->result) { 4796 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4797 trace_smb3_write_err(0 /* no xid */, 4798 wdata->cfile->fid.persistent_fid, 4799 tcon->tid, tcon->ses->Suid, wdata->offset, 4800 wdata->bytes, wdata->result); 4801 if (wdata->result == -ENOSPC) 4802 pr_warn_once("Out of space writing to %s\n", 4803 tcon->tree_name); 4804 } else 4805 trace_smb3_write_done(0 /* no xid */, 4806 wdata->cfile->fid.persistent_fid, 4807 tcon->tid, tcon->ses->Suid, 4808 wdata->offset, wdata->bytes); 4809 4810 queue_work(cifsiod_wq, &wdata->work); 4811 release_mid(mid); 4812 add_credits(server, &credits, 0); 4813 } 4814 4815 /* smb2_async_writev - send an async write, and set up mid to handle result */ 4816 int 4817 smb2_async_writev(struct cifs_writedata *wdata, 4818 void (*release)(struct kref *kref)) 4819 { 4820 int rc = -EACCES, flags = 0; 4821 struct smb2_write_req *req = NULL; 4822 struct smb2_hdr *shdr; 4823 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 4824 struct TCP_Server_Info *server = wdata->server; 4825 struct kvec iov[1]; 4826 struct smb_rqst rqst = { }; 4827 unsigned int total_len; 4828 struct cifs_io_parms _io_parms; 4829 struct cifs_io_parms *io_parms = NULL; 4830 int credit_request; 4831 4832 if (!wdata->server || wdata->replay) 4833 server = wdata->server = cifs_pick_channel(tcon->ses); 4834 4835 /* 4836 * in future we may get cifs_io_parms passed in from the caller, 4837 * but for now we construct it here... 4838 */ 4839 _io_parms = (struct cifs_io_parms) { 4840 .tcon = tcon, 4841 .server = server, 4842 .offset = wdata->offset, 4843 .length = wdata->bytes, 4844 .persistent_fid = wdata->cfile->fid.persistent_fid, 4845 .volatile_fid = wdata->cfile->fid.volatile_fid, 4846 .pid = wdata->pid, 4847 }; 4848 io_parms = &_io_parms; 4849 4850 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, 4851 (void **) &req, &total_len); 4852 if (rc) 4853 return rc; 4854 4855 if (smb3_encryption_required(tcon)) 4856 flags |= CIFS_TRANSFORM_REQ; 4857 4858 shdr = (struct smb2_hdr *)req; 4859 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4860 4861 req->PersistentFileId = io_parms->persistent_fid; 4862 req->VolatileFileId = io_parms->volatile_fid; 4863 req->WriteChannelInfoOffset = 0; 4864 req->WriteChannelInfoLength = 0; 4865 req->Channel = SMB2_CHANNEL_NONE; 4866 req->Offset = cpu_to_le64(io_parms->offset); 4867 req->DataOffset = cpu_to_le16( 4868 offsetof(struct smb2_write_req, Buffer)); 4869 req->RemainingBytes = 0; 4870 4871 trace_smb3_write_enter(0 /* xid */, 4872 io_parms->persistent_fid, 4873 io_parms->tcon->tid, 4874 io_parms->tcon->ses->Suid, 4875 io_parms->offset, 4876 io_parms->length); 4877 4878 #ifdef CONFIG_CIFS_SMB_DIRECT 4879 /* 4880 * If we want to do a server RDMA read, fill in and append 4881 * smbd_buffer_descriptor_v1 to the end of write request 4882 */ 4883 if (smb3_use_rdma_offload(io_parms)) { 4884 struct smbd_buffer_descriptor_v1 *v1; 4885 size_t data_size = iov_iter_count(&wdata->iter); 4886 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4887 4888 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->iter, 4889 false, need_invalidate); 4890 if (!wdata->mr) { 4891 rc = -EAGAIN; 4892 goto async_writev_out; 4893 } 4894 req->Length = 0; 4895 req->DataOffset = 0; 4896 req->RemainingBytes = cpu_to_le32(data_size); 4897 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4898 if (need_invalidate) 4899 req->Channel = SMB2_CHANNEL_RDMA_V1; 4900 req->WriteChannelInfoOffset = 4901 cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); 4902 req->WriteChannelInfoLength = 4903 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); 4904 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; 4905 v1->offset = cpu_to_le64(wdata->mr->mr->iova); 4906 v1->token = cpu_to_le32(wdata->mr->mr->rkey); 4907 v1->length = cpu_to_le32(wdata->mr->mr->length); 4908 } 4909 #endif 4910 iov[0].iov_len = total_len - 1; 4911 iov[0].iov_base = (char *)req; 4912 4913 rqst.rq_iov = iov; 4914 rqst.rq_nvec = 1; 4915 rqst.rq_iter = wdata->iter; 4916 rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter); 4917 if (wdata->replay) 4918 smb2_set_replay(server, &rqst); 4919 #ifdef CONFIG_CIFS_SMB_DIRECT 4920 if (wdata->mr) 4921 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); 4922 #endif 4923 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4924 io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter)); 4925 4926 #ifdef CONFIG_CIFS_SMB_DIRECT 4927 /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4928 if (!wdata->mr) 4929 req->Length = cpu_to_le32(io_parms->length); 4930 #else 4931 req->Length = cpu_to_le32(io_parms->length); 4932 #endif 4933 4934 if (wdata->credits.value > 0) { 4935 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, 4936 SMB2_MAX_BUFFER_SIZE)); 4937 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4938 if (server->credits >= server->max_credits) 4939 shdr->CreditRequest = cpu_to_le16(0); 4940 else 4941 shdr->CreditRequest = cpu_to_le16( 4942 min_t(int, server->max_credits - 4943 server->credits, credit_request)); 4944 4945 rc = adjust_credits(server, &wdata->credits, io_parms->length); 4946 if (rc) 4947 goto async_writev_out; 4948 4949 flags |= CIFS_HAS_CREDITS; 4950 } 4951 4952 kref_get(&wdata->refcount); 4953 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, 4954 wdata, flags, &wdata->credits); 4955 4956 if (rc) { 4957 trace_smb3_write_err(0 /* no xid */, 4958 io_parms->persistent_fid, 4959 io_parms->tcon->tid, 4960 io_parms->tcon->ses->Suid, 4961 io_parms->offset, 4962 io_parms->length, 4963 rc); 4964 kref_put(&wdata->refcount, release); 4965 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4966 } 4967 4968 async_writev_out: 4969 cifs_small_buf_release(req); 4970 return rc; 4971 } 4972 4973 /* 4974 * SMB2_write function gets iov pointer to kvec array with n_vec as a length. 4975 * The length field from io_parms must be at least 1 and indicates a number of 4976 * elements with data to write that begins with position 1 in iov array. All 4977 * data length is specified by count. 4978 */ 4979 int 4980 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 4981 unsigned int *nbytes, struct kvec *iov, int n_vec) 4982 { 4983 struct smb_rqst rqst; 4984 int rc = 0; 4985 struct smb2_write_req *req = NULL; 4986 struct smb2_write_rsp *rsp = NULL; 4987 int resp_buftype; 4988 struct kvec rsp_iov; 4989 int flags = 0; 4990 unsigned int total_len; 4991 struct TCP_Server_Info *server; 4992 int retries = 0, cur_sleep = 1; 4993 4994 replay_again: 4995 /* reinitialize for possible replay */ 4996 flags = 0; 4997 *nbytes = 0; 4998 if (!io_parms->server) 4999 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 5000 server = io_parms->server; 5001 if (server == NULL) 5002 return -ECONNABORTED; 5003 5004 if (n_vec < 1) 5005 return rc; 5006 5007 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server, 5008 (void **) &req, &total_len); 5009 if (rc) 5010 return rc; 5011 5012 if (smb3_encryption_required(io_parms->tcon)) 5013 flags |= CIFS_TRANSFORM_REQ; 5014 5015 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 5016 5017 req->PersistentFileId = io_parms->persistent_fid; 5018 req->VolatileFileId = io_parms->volatile_fid; 5019 req->WriteChannelInfoOffset = 0; 5020 req->WriteChannelInfoLength = 0; 5021 req->Channel = 0; 5022 req->Length = cpu_to_le32(io_parms->length); 5023 req->Offset = cpu_to_le64(io_parms->offset); 5024 req->DataOffset = cpu_to_le16( 5025 offsetof(struct smb2_write_req, Buffer)); 5026 req->RemainingBytes = 0; 5027 5028 trace_smb3_write_enter(xid, io_parms->persistent_fid, 5029 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 5030 io_parms->offset, io_parms->length); 5031 5032 iov[0].iov_base = (char *)req; 5033 /* 1 for Buffer */ 5034 iov[0].iov_len = total_len - 1; 5035 5036 memset(&rqst, 0, sizeof(struct smb_rqst)); 5037 rqst.rq_iov = iov; 5038 rqst.rq_nvec = n_vec + 1; 5039 5040 if (retries) 5041 smb2_set_replay(server, &rqst); 5042 5043 rc = cifs_send_recv(xid, io_parms->tcon->ses, server, 5044 &rqst, 5045 &resp_buftype, flags, &rsp_iov); 5046 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; 5047 5048 if (rc) { 5049 trace_smb3_write_err(xid, 5050 req->PersistentFileId, 5051 io_parms->tcon->tid, 5052 io_parms->tcon->ses->Suid, 5053 io_parms->offset, io_parms->length, rc); 5054 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); 5055 cifs_dbg(VFS, "Send error in write = %d\n", rc); 5056 } else { 5057 *nbytes = le32_to_cpu(rsp->DataLength); 5058 trace_smb3_write_done(xid, 5059 req->PersistentFileId, 5060 io_parms->tcon->tid, 5061 io_parms->tcon->ses->Suid, 5062 io_parms->offset, *nbytes); 5063 } 5064 5065 cifs_small_buf_release(req); 5066 free_rsp_buf(resp_buftype, rsp); 5067 5068 if (is_replayable_error(rc) && 5069 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep)) 5070 goto replay_again; 5071 5072 return rc; 5073 } 5074 5075 int posix_info_sid_size(const void *beg, const void *end) 5076 { 5077 size_t subauth; 5078 int total; 5079 5080 if (beg + 1 > end) 5081 return -1; 5082 5083 subauth = *(u8 *)(beg+1); 5084 if (subauth < 1 || subauth > 15) 5085 return -1; 5086 5087 total = 1 + 1 + 6 + 4*subauth; 5088 if (beg + total > end) 5089 return -1; 5090 5091 return total; 5092 } 5093 5094 int posix_info_parse(const void *beg, const void *end, 5095 struct smb2_posix_info_parsed *out) 5096 5097 { 5098 int total_len = 0; 5099 int owner_len, group_len; 5100 int name_len; 5101 const void *owner_sid; 5102 const void *group_sid; 5103 const void *name; 5104 5105 /* if no end bound given, assume payload to be correct */ 5106 if (!end) { 5107 const struct smb2_posix_info *p = beg; 5108 5109 end = beg + le32_to_cpu(p->NextEntryOffset); 5110 /* last element will have a 0 offset, pick a sensible bound */ 5111 if (end == beg) 5112 end += 0xFFFF; 5113 } 5114 5115 /* check base buf */ 5116 if (beg + sizeof(struct smb2_posix_info) > end) 5117 return -1; 5118 total_len = sizeof(struct smb2_posix_info); 5119 5120 /* check owner sid */ 5121 owner_sid = beg + total_len; 5122 owner_len = posix_info_sid_size(owner_sid, end); 5123 if (owner_len < 0) 5124 return -1; 5125 total_len += owner_len; 5126 5127 /* check group sid */ 5128 group_sid = beg + total_len; 5129 group_len = posix_info_sid_size(group_sid, end); 5130 if (group_len < 0) 5131 return -1; 5132 total_len += group_len; 5133 5134 /* check name len */ 5135 if (beg + total_len + 4 > end) 5136 return -1; 5137 name_len = le32_to_cpu(*(__le32 *)(beg + total_len)); 5138 if (name_len < 1 || name_len > 0xFFFF) 5139 return -1; 5140 total_len += 4; 5141 5142 /* check name */ 5143 name = beg + total_len; 5144 if (name + name_len > end) 5145 return -1; 5146 total_len += name_len; 5147 5148 if (out) { 5149 out->base = beg; 5150 out->size = total_len; 5151 out->name_len = name_len; 5152 out->name = name; 5153 memcpy(&out->owner, owner_sid, owner_len); 5154 memcpy(&out->group, group_sid, group_len); 5155 } 5156 return total_len; 5157 } 5158 5159 static int posix_info_extra_size(const void *beg, const void *end) 5160 { 5161 int len = posix_info_parse(beg, end, NULL); 5162 5163 if (len < 0) 5164 return -1; 5165 return len - sizeof(struct smb2_posix_info); 5166 } 5167 5168 static unsigned int 5169 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry, 5170 size_t size) 5171 { 5172 int len; 5173 unsigned int entrycount = 0; 5174 unsigned int next_offset = 0; 5175 char *entryptr; 5176 FILE_DIRECTORY_INFO *dir_info; 5177 5178 if (bufstart == NULL) 5179 return 0; 5180 5181 entryptr = bufstart; 5182 5183 while (1) { 5184 if (entryptr + next_offset < entryptr || 5185 entryptr + next_offset > end_of_buf || 5186 entryptr + next_offset + size > end_of_buf) { 5187 cifs_dbg(VFS, "malformed search entry would overflow\n"); 5188 break; 5189 } 5190 5191 entryptr = entryptr + next_offset; 5192 dir_info = (FILE_DIRECTORY_INFO *)entryptr; 5193 5194 if (infotype == SMB_FIND_FILE_POSIX_INFO) 5195 len = posix_info_extra_size(entryptr, end_of_buf); 5196 else 5197 len = le32_to_cpu(dir_info->FileNameLength); 5198 5199 if (len < 0 || 5200 entryptr + len < entryptr || 5201 entryptr + len > end_of_buf || 5202 entryptr + len + size > end_of_buf) { 5203 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", 5204 end_of_buf); 5205 break; 5206 } 5207 5208 *lastentry = entryptr; 5209 entrycount++; 5210 5211 next_offset = le32_to_cpu(dir_info->NextEntryOffset); 5212 if (!next_offset) 5213 break; 5214 } 5215 5216 return entrycount; 5217 } 5218 5219 /* 5220 * Readdir/FindFirst 5221 */ 5222 int SMB2_query_directory_init(const unsigned int xid, 5223 struct cifs_tcon *tcon, 5224 struct TCP_Server_Info *server, 5225 struct smb_rqst *rqst, 5226 u64 persistent_fid, u64 volatile_fid, 5227 int index, int info_level) 5228 { 5229 struct smb2_query_directory_req *req; 5230 unsigned char *bufptr; 5231 __le16 asteriks = cpu_to_le16('*'); 5232 unsigned int output_size = CIFSMaxBufSize - 5233 MAX_SMB2_CREATE_RESPONSE_SIZE - 5234 MAX_SMB2_CLOSE_RESPONSE_SIZE; 5235 unsigned int total_len; 5236 struct kvec *iov = rqst->rq_iov; 5237 int len, rc; 5238 5239 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server, 5240 (void **) &req, &total_len); 5241 if (rc) 5242 return rc; 5243 5244 switch (info_level) { 5245 case SMB_FIND_FILE_DIRECTORY_INFO: 5246 req->FileInformationClass = FILE_DIRECTORY_INFORMATION; 5247 break; 5248 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5249 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; 5250 break; 5251 case SMB_FIND_FILE_POSIX_INFO: 5252 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; 5253 break; 5254 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5255 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION; 5256 break; 5257 default: 5258 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5259 info_level); 5260 return -EINVAL; 5261 } 5262 5263 req->FileIndex = cpu_to_le32(index); 5264 req->PersistentFileId = persistent_fid; 5265 req->VolatileFileId = volatile_fid; 5266 5267 len = 0x2; 5268 bufptr = req->Buffer; 5269 memcpy(bufptr, &asteriks, len); 5270 5271 req->FileNameOffset = 5272 cpu_to_le16(sizeof(struct smb2_query_directory_req)); 5273 req->FileNameLength = cpu_to_le16(len); 5274 /* 5275 * BB could be 30 bytes or so longer if we used SMB2 specific 5276 * buffer lengths, but this is safe and close enough. 5277 */ 5278 output_size = min_t(unsigned int, output_size, server->maxBuf); 5279 output_size = min_t(unsigned int, output_size, 2 << 15); 5280 req->OutputBufferLength = cpu_to_le32(output_size); 5281 5282 iov[0].iov_base = (char *)req; 5283 /* 1 for Buffer */ 5284 iov[0].iov_len = total_len - 1; 5285 5286 iov[1].iov_base = (char *)(req->Buffer); 5287 iov[1].iov_len = len; 5288 5289 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, 5290 tcon->ses->Suid, index, output_size); 5291 5292 return 0; 5293 } 5294 5295 void SMB2_query_directory_free(struct smb_rqst *rqst) 5296 { 5297 if (rqst && rqst->rq_iov) { 5298 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5299 } 5300 } 5301 5302 int 5303 smb2_parse_query_directory(struct cifs_tcon *tcon, 5304 struct kvec *rsp_iov, 5305 int resp_buftype, 5306 struct cifs_search_info *srch_inf) 5307 { 5308 struct smb2_query_directory_rsp *rsp; 5309 size_t info_buf_size; 5310 char *end_of_smb; 5311 int rc; 5312 5313 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base; 5314 5315 switch (srch_inf->info_level) { 5316 case SMB_FIND_FILE_DIRECTORY_INFO: 5317 info_buf_size = sizeof(FILE_DIRECTORY_INFO); 5318 break; 5319 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5320 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO); 5321 break; 5322 case SMB_FIND_FILE_POSIX_INFO: 5323 /* note that posix payload are variable size */ 5324 info_buf_size = sizeof(struct smb2_posix_info); 5325 break; 5326 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5327 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO); 5328 break; 5329 default: 5330 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5331 srch_inf->info_level); 5332 return -EINVAL; 5333 } 5334 5335 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5336 le32_to_cpu(rsp->OutputBufferLength), rsp_iov, 5337 info_buf_size); 5338 if (rc) { 5339 cifs_tcon_dbg(VFS, "bad info payload"); 5340 return rc; 5341 } 5342 5343 srch_inf->unicode = true; 5344 5345 if (srch_inf->ntwrk_buf_start) { 5346 if (srch_inf->smallBuf) 5347 cifs_small_buf_release(srch_inf->ntwrk_buf_start); 5348 else 5349 cifs_buf_release(srch_inf->ntwrk_buf_start); 5350 } 5351 srch_inf->ntwrk_buf_start = (char *)rsp; 5352 srch_inf->srch_entries_start = srch_inf->last_entry = 5353 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); 5354 end_of_smb = rsp_iov->iov_len + (char *)rsp; 5355 5356 srch_inf->entries_in_buffer = num_entries( 5357 srch_inf->info_level, 5358 srch_inf->srch_entries_start, 5359 end_of_smb, 5360 &srch_inf->last_entry, 5361 info_buf_size); 5362 5363 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; 5364 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", 5365 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, 5366 srch_inf->srch_entries_start, srch_inf->last_entry); 5367 if (resp_buftype == CIFS_LARGE_BUFFER) 5368 srch_inf->smallBuf = false; 5369 else if (resp_buftype == CIFS_SMALL_BUFFER) 5370 srch_inf->smallBuf = true; 5371 else 5372 cifs_tcon_dbg(VFS, "Invalid search buffer type\n"); 5373 5374 return 0; 5375 } 5376 5377 int 5378 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, 5379 u64 persistent_fid, u64 volatile_fid, int index, 5380 struct cifs_search_info *srch_inf) 5381 { 5382 struct smb_rqst rqst; 5383 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; 5384 struct smb2_query_directory_rsp *rsp = NULL; 5385 int resp_buftype = CIFS_NO_BUFFER; 5386 struct kvec rsp_iov; 5387 int rc = 0; 5388 struct cifs_ses *ses = tcon->ses; 5389 struct TCP_Server_Info *server; 5390 int flags = 0; 5391 int retries = 0, cur_sleep = 1; 5392 5393 replay_again: 5394 /* reinitialize for possible replay */ 5395 flags = 0; 5396 server = cifs_pick_channel(ses); 5397 5398 if (!ses || !(ses->server)) 5399 return -EIO; 5400 5401 if (smb3_encryption_required(tcon)) 5402 flags |= CIFS_TRANSFORM_REQ; 5403 5404 memset(&rqst, 0, sizeof(struct smb_rqst)); 5405 memset(&iov, 0, sizeof(iov)); 5406 rqst.rq_iov = iov; 5407 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; 5408 5409 rc = SMB2_query_directory_init(xid, tcon, server, 5410 &rqst, persistent_fid, 5411 volatile_fid, index, 5412 srch_inf->info_level); 5413 if (rc) 5414 goto qdir_exit; 5415 5416 if (retries) 5417 smb2_set_replay(server, &rqst); 5418 5419 rc = cifs_send_recv(xid, ses, server, 5420 &rqst, &resp_buftype, flags, &rsp_iov); 5421 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; 5422 5423 if (rc) { 5424 if (rc == -ENODATA && 5425 rsp->hdr.Status == STATUS_NO_MORE_FILES) { 5426 trace_smb3_query_dir_done(xid, persistent_fid, 5427 tcon->tid, tcon->ses->Suid, index, 0); 5428 srch_inf->endOfSearch = true; 5429 rc = 0; 5430 } else { 5431 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5432 tcon->ses->Suid, index, 0, rc); 5433 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 5434 } 5435 goto qdir_exit; 5436 } 5437 5438 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype, 5439 srch_inf); 5440 if (rc) { 5441 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5442 tcon->ses->Suid, index, 0, rc); 5443 goto qdir_exit; 5444 } 5445 resp_buftype = CIFS_NO_BUFFER; 5446 5447 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, 5448 tcon->ses->Suid, index, srch_inf->entries_in_buffer); 5449 5450 qdir_exit: 5451 SMB2_query_directory_free(&rqst); 5452 free_rsp_buf(resp_buftype, rsp); 5453 5454 if (is_replayable_error(rc) && 5455 smb2_should_replay(tcon, &retries, &cur_sleep)) 5456 goto replay_again; 5457 5458 return rc; 5459 } 5460 5461 int 5462 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 5463 struct smb_rqst *rqst, 5464 u64 persistent_fid, u64 volatile_fid, u32 pid, 5465 u8 info_class, u8 info_type, u32 additional_info, 5466 void **data, unsigned int *size) 5467 { 5468 struct smb2_set_info_req *req; 5469 struct kvec *iov = rqst->rq_iov; 5470 unsigned int i, total_len; 5471 int rc; 5472 5473 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server, 5474 (void **) &req, &total_len); 5475 if (rc) 5476 return rc; 5477 5478 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 5479 req->InfoType = info_type; 5480 req->FileInfoClass = info_class; 5481 req->PersistentFileId = persistent_fid; 5482 req->VolatileFileId = volatile_fid; 5483 req->AdditionalInformation = cpu_to_le32(additional_info); 5484 5485 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req)); 5486 req->BufferLength = cpu_to_le32(*size); 5487 5488 memcpy(req->Buffer, *data, *size); 5489 total_len += *size; 5490 5491 iov[0].iov_base = (char *)req; 5492 /* 1 for Buffer */ 5493 iov[0].iov_len = total_len - 1; 5494 5495 for (i = 1; i < rqst->rq_nvec; i++) { 5496 le32_add_cpu(&req->BufferLength, size[i]); 5497 iov[i].iov_base = (char *)data[i]; 5498 iov[i].iov_len = size[i]; 5499 } 5500 5501 return 0; 5502 } 5503 5504 void 5505 SMB2_set_info_free(struct smb_rqst *rqst) 5506 { 5507 if (rqst && rqst->rq_iov) 5508 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5509 } 5510 5511 static int 5512 send_set_info(const unsigned int xid, struct cifs_tcon *tcon, 5513 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, 5514 u8 info_type, u32 additional_info, unsigned int num, 5515 void **data, unsigned int *size) 5516 { 5517 struct smb_rqst rqst; 5518 struct smb2_set_info_rsp *rsp = NULL; 5519 struct kvec *iov; 5520 struct kvec rsp_iov; 5521 int rc = 0; 5522 int resp_buftype; 5523 struct cifs_ses *ses = tcon->ses; 5524 struct TCP_Server_Info *server; 5525 int flags = 0; 5526 int retries = 0, cur_sleep = 1; 5527 5528 replay_again: 5529 /* reinitialize for possible replay */ 5530 flags = 0; 5531 server = cifs_pick_channel(ses); 5532 5533 if (!ses || !server) 5534 return -EIO; 5535 5536 if (!num) 5537 return -EINVAL; 5538 5539 if (smb3_encryption_required(tcon)) 5540 flags |= CIFS_TRANSFORM_REQ; 5541 5542 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL); 5543 if (!iov) 5544 return -ENOMEM; 5545 5546 memset(&rqst, 0, sizeof(struct smb_rqst)); 5547 rqst.rq_iov = iov; 5548 rqst.rq_nvec = num; 5549 5550 rc = SMB2_set_info_init(tcon, server, 5551 &rqst, persistent_fid, volatile_fid, pid, 5552 info_class, info_type, additional_info, 5553 data, size); 5554 if (rc) { 5555 kfree(iov); 5556 return rc; 5557 } 5558 5559 if (retries) 5560 smb2_set_replay(server, &rqst); 5561 5562 rc = cifs_send_recv(xid, ses, server, 5563 &rqst, &resp_buftype, flags, 5564 &rsp_iov); 5565 SMB2_set_info_free(&rqst); 5566 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; 5567 5568 if (rc != 0) { 5569 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 5570 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, 5571 ses->Suid, info_class, (__u32)info_type, rc); 5572 } 5573 5574 free_rsp_buf(resp_buftype, rsp); 5575 kfree(iov); 5576 5577 if (is_replayable_error(rc) && 5578 smb2_should_replay(tcon, &retries, &cur_sleep)) 5579 goto replay_again; 5580 5581 return rc; 5582 } 5583 5584 int 5585 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 5586 u64 volatile_fid, u32 pid, loff_t new_eof) 5587 { 5588 struct smb2_file_eof_info info; 5589 void *data; 5590 unsigned int size; 5591 5592 info.EndOfFile = cpu_to_le64(new_eof); 5593 5594 data = &info; 5595 size = sizeof(struct smb2_file_eof_info); 5596 5597 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof); 5598 5599 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5600 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 5601 0, 1, &data, &size); 5602 } 5603 5604 int 5605 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, 5606 u64 persistent_fid, u64 volatile_fid, 5607 struct cifs_ntsd *pnntsd, int pacllen, int aclflag) 5608 { 5609 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5610 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 5611 1, (void **)&pnntsd, &pacllen); 5612 } 5613 5614 int 5615 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, 5616 u64 persistent_fid, u64 volatile_fid, 5617 struct smb2_file_full_ea_info *buf, int len) 5618 { 5619 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5620 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 5621 0, 1, (void **)&buf, &len); 5622 } 5623 5624 int 5625 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 5626 const u64 persistent_fid, const u64 volatile_fid, 5627 __u8 oplock_level) 5628 { 5629 struct smb_rqst rqst; 5630 int rc; 5631 struct smb2_oplock_break *req = NULL; 5632 struct cifs_ses *ses = tcon->ses; 5633 struct TCP_Server_Info *server; 5634 int flags = CIFS_OBREAK_OP; 5635 unsigned int total_len; 5636 struct kvec iov[1]; 5637 struct kvec rsp_iov; 5638 int resp_buf_type; 5639 int retries = 0, cur_sleep = 1; 5640 5641 replay_again: 5642 /* reinitialize for possible replay */ 5643 flags = CIFS_OBREAK_OP; 5644 server = cifs_pick_channel(ses); 5645 5646 cifs_dbg(FYI, "SMB2_oplock_break\n"); 5647 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 5648 (void **) &req, &total_len); 5649 if (rc) 5650 return rc; 5651 5652 if (smb3_encryption_required(tcon)) 5653 flags |= CIFS_TRANSFORM_REQ; 5654 5655 req->VolatileFid = volatile_fid; 5656 req->PersistentFid = persistent_fid; 5657 req->OplockLevel = oplock_level; 5658 req->hdr.CreditRequest = cpu_to_le16(1); 5659 5660 flags |= CIFS_NO_RSP_BUF; 5661 5662 iov[0].iov_base = (char *)req; 5663 iov[0].iov_len = total_len; 5664 5665 memset(&rqst, 0, sizeof(struct smb_rqst)); 5666 rqst.rq_iov = iov; 5667 rqst.rq_nvec = 1; 5668 5669 if (retries) 5670 smb2_set_replay(server, &rqst); 5671 5672 rc = cifs_send_recv(xid, ses, server, 5673 &rqst, &resp_buf_type, flags, &rsp_iov); 5674 cifs_small_buf_release(req); 5675 if (rc) { 5676 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 5677 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); 5678 } 5679 5680 if (is_replayable_error(rc) && 5681 smb2_should_replay(tcon, &retries, &cur_sleep)) 5682 goto replay_again; 5683 5684 return rc; 5685 } 5686 5687 void 5688 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, 5689 struct kstatfs *kst) 5690 { 5691 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * 5692 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); 5693 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); 5694 kst->f_bfree = kst->f_bavail = 5695 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); 5696 return; 5697 } 5698 5699 static void 5700 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, 5701 struct kstatfs *kst) 5702 { 5703 kst->f_bsize = le32_to_cpu(response_data->BlockSize); 5704 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); 5705 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); 5706 if (response_data->UserBlocksAvail == cpu_to_le64(-1)) 5707 kst->f_bavail = kst->f_bfree; 5708 else 5709 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); 5710 if (response_data->TotalFileNodes != cpu_to_le64(-1)) 5711 kst->f_files = le64_to_cpu(response_data->TotalFileNodes); 5712 if (response_data->FreeFileNodes != cpu_to_le64(-1)) 5713 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); 5714 5715 return; 5716 } 5717 5718 static int 5719 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, 5720 struct TCP_Server_Info *server, 5721 int level, int outbuf_len, u64 persistent_fid, 5722 u64 volatile_fid) 5723 { 5724 int rc; 5725 struct smb2_query_info_req *req; 5726 unsigned int total_len; 5727 5728 cifs_dbg(FYI, "Query FSInfo level %d\n", level); 5729 5730 if ((tcon->ses == NULL) || server == NULL) 5731 return -EIO; 5732 5733 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 5734 (void **) &req, &total_len); 5735 if (rc) 5736 return rc; 5737 5738 req->InfoType = SMB2_O_INFO_FILESYSTEM; 5739 req->FileInfoClass = level; 5740 req->PersistentFileId = persistent_fid; 5741 req->VolatileFileId = volatile_fid; 5742 /* 1 for pad */ 5743 req->InputBufferOffset = 5744 cpu_to_le16(sizeof(struct smb2_query_info_req)); 5745 req->OutputBufferLength = cpu_to_le32( 5746 outbuf_len + sizeof(struct smb2_query_info_rsp)); 5747 5748 iov->iov_base = (char *)req; 5749 iov->iov_len = total_len; 5750 return 0; 5751 } 5752 5753 static inline void free_qfs_info_req(struct kvec *iov) 5754 { 5755 cifs_buf_release(iov->iov_base); 5756 } 5757 5758 int 5759 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, 5760 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 5761 { 5762 struct smb_rqst rqst; 5763 struct smb2_query_info_rsp *rsp = NULL; 5764 struct kvec iov; 5765 struct kvec rsp_iov; 5766 int rc = 0; 5767 int resp_buftype; 5768 struct cifs_ses *ses = tcon->ses; 5769 struct TCP_Server_Info *server; 5770 FILE_SYSTEM_POSIX_INFO *info = NULL; 5771 int flags = 0; 5772 int retries = 0, cur_sleep = 1; 5773 5774 replay_again: 5775 /* reinitialize for possible replay */ 5776 flags = 0; 5777 server = cifs_pick_channel(ses); 5778 5779 rc = build_qfs_info_req(&iov, tcon, server, 5780 FS_POSIX_INFORMATION, 5781 sizeof(FILE_SYSTEM_POSIX_INFO), 5782 persistent_fid, volatile_fid); 5783 if (rc) 5784 return rc; 5785 5786 if (smb3_encryption_required(tcon)) 5787 flags |= CIFS_TRANSFORM_REQ; 5788 5789 memset(&rqst, 0, sizeof(struct smb_rqst)); 5790 rqst.rq_iov = &iov; 5791 rqst.rq_nvec = 1; 5792 5793 if (retries) 5794 smb2_set_replay(server, &rqst); 5795 5796 rc = cifs_send_recv(xid, ses, server, 5797 &rqst, &resp_buftype, flags, &rsp_iov); 5798 free_qfs_info_req(&iov); 5799 if (rc) { 5800 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5801 goto posix_qfsinf_exit; 5802 } 5803 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5804 5805 info = (FILE_SYSTEM_POSIX_INFO *)( 5806 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 5807 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5808 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 5809 sizeof(FILE_SYSTEM_POSIX_INFO)); 5810 if (!rc) 5811 copy_posix_fs_info_to_kstatfs(info, fsdata); 5812 5813 posix_qfsinf_exit: 5814 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5815 5816 if (is_replayable_error(rc) && 5817 smb2_should_replay(tcon, &retries, &cur_sleep)) 5818 goto replay_again; 5819 5820 return rc; 5821 } 5822 5823 int 5824 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 5825 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 5826 { 5827 struct smb_rqst rqst; 5828 struct smb2_query_info_rsp *rsp = NULL; 5829 struct kvec iov; 5830 struct kvec rsp_iov; 5831 int rc = 0; 5832 int resp_buftype; 5833 struct cifs_ses *ses = tcon->ses; 5834 struct TCP_Server_Info *server; 5835 struct smb2_fs_full_size_info *info = NULL; 5836 int flags = 0; 5837 int retries = 0, cur_sleep = 1; 5838 5839 replay_again: 5840 /* reinitialize for possible replay */ 5841 flags = 0; 5842 server = cifs_pick_channel(ses); 5843 5844 rc = build_qfs_info_req(&iov, tcon, server, 5845 FS_FULL_SIZE_INFORMATION, 5846 sizeof(struct smb2_fs_full_size_info), 5847 persistent_fid, volatile_fid); 5848 if (rc) 5849 return rc; 5850 5851 if (smb3_encryption_required(tcon)) 5852 flags |= CIFS_TRANSFORM_REQ; 5853 5854 memset(&rqst, 0, sizeof(struct smb_rqst)); 5855 rqst.rq_iov = &iov; 5856 rqst.rq_nvec = 1; 5857 5858 if (retries) 5859 smb2_set_replay(server, &rqst); 5860 5861 rc = cifs_send_recv(xid, ses, server, 5862 &rqst, &resp_buftype, flags, &rsp_iov); 5863 free_qfs_info_req(&iov); 5864 if (rc) { 5865 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5866 goto qfsinf_exit; 5867 } 5868 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5869 5870 info = (struct smb2_fs_full_size_info *)( 5871 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 5872 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5873 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 5874 sizeof(struct smb2_fs_full_size_info)); 5875 if (!rc) 5876 smb2_copy_fs_info_to_kstatfs(info, fsdata); 5877 5878 qfsinf_exit: 5879 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5880 5881 if (is_replayable_error(rc) && 5882 smb2_should_replay(tcon, &retries, &cur_sleep)) 5883 goto replay_again; 5884 5885 return rc; 5886 } 5887 5888 int 5889 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, 5890 u64 persistent_fid, u64 volatile_fid, int level) 5891 { 5892 struct smb_rqst rqst; 5893 struct smb2_query_info_rsp *rsp = NULL; 5894 struct kvec iov; 5895 struct kvec rsp_iov; 5896 int rc = 0; 5897 int resp_buftype, max_len, min_len; 5898 struct cifs_ses *ses = tcon->ses; 5899 struct TCP_Server_Info *server; 5900 unsigned int rsp_len, offset; 5901 int flags = 0; 5902 int retries = 0, cur_sleep = 1; 5903 5904 replay_again: 5905 /* reinitialize for possible replay */ 5906 flags = 0; 5907 server = cifs_pick_channel(ses); 5908 5909 if (level == FS_DEVICE_INFORMATION) { 5910 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5911 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5912 } else if (level == FS_ATTRIBUTE_INFORMATION) { 5913 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); 5914 min_len = MIN_FS_ATTR_INFO_SIZE; 5915 } else if (level == FS_SECTOR_SIZE_INFORMATION) { 5916 max_len = sizeof(struct smb3_fs_ss_info); 5917 min_len = sizeof(struct smb3_fs_ss_info); 5918 } else if (level == FS_VOLUME_INFORMATION) { 5919 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; 5920 min_len = sizeof(struct smb3_fs_vol_info); 5921 } else { 5922 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); 5923 return -EINVAL; 5924 } 5925 5926 rc = build_qfs_info_req(&iov, tcon, server, 5927 level, max_len, 5928 persistent_fid, volatile_fid); 5929 if (rc) 5930 return rc; 5931 5932 if (smb3_encryption_required(tcon)) 5933 flags |= CIFS_TRANSFORM_REQ; 5934 5935 memset(&rqst, 0, sizeof(struct smb_rqst)); 5936 rqst.rq_iov = &iov; 5937 rqst.rq_nvec = 1; 5938 5939 if (retries) 5940 smb2_set_replay(server, &rqst); 5941 5942 rc = cifs_send_recv(xid, ses, server, 5943 &rqst, &resp_buftype, flags, &rsp_iov); 5944 free_qfs_info_req(&iov); 5945 if (rc) { 5946 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5947 goto qfsattr_exit; 5948 } 5949 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5950 5951 rsp_len = le32_to_cpu(rsp->OutputBufferLength); 5952 offset = le16_to_cpu(rsp->OutputBufferOffset); 5953 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); 5954 if (rc) 5955 goto qfsattr_exit; 5956 5957 if (level == FS_ATTRIBUTE_INFORMATION) 5958 memcpy(&tcon->fsAttrInfo, offset 5959 + (char *)rsp, min_t(unsigned int, 5960 rsp_len, max_len)); 5961 else if (level == FS_DEVICE_INFORMATION) 5962 memcpy(&tcon->fsDevInfo, offset 5963 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); 5964 else if (level == FS_SECTOR_SIZE_INFORMATION) { 5965 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) 5966 (offset + (char *)rsp); 5967 tcon->ss_flags = le32_to_cpu(ss_info->Flags); 5968 tcon->perf_sector_size = 5969 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); 5970 } else if (level == FS_VOLUME_INFORMATION) { 5971 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) 5972 (offset + (char *)rsp); 5973 tcon->vol_serial_number = vol_info->VolumeSerialNumber; 5974 tcon->vol_create_time = vol_info->VolumeCreationTime; 5975 } 5976 5977 qfsattr_exit: 5978 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5979 5980 if (is_replayable_error(rc) && 5981 smb2_should_replay(tcon, &retries, &cur_sleep)) 5982 goto replay_again; 5983 5984 return rc; 5985 } 5986 5987 int 5988 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, 5989 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 5990 const __u32 num_lock, struct smb2_lock_element *buf) 5991 { 5992 struct smb_rqst rqst; 5993 int rc = 0; 5994 struct smb2_lock_req *req = NULL; 5995 struct kvec iov[2]; 5996 struct kvec rsp_iov; 5997 int resp_buf_type; 5998 unsigned int count; 5999 int flags = CIFS_NO_RSP_BUF; 6000 unsigned int total_len; 6001 struct TCP_Server_Info *server; 6002 int retries = 0, cur_sleep = 1; 6003 6004 replay_again: 6005 /* reinitialize for possible replay */ 6006 flags = CIFS_NO_RSP_BUF; 6007 server = cifs_pick_channel(tcon->ses); 6008 6009 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); 6010 6011 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server, 6012 (void **) &req, &total_len); 6013 if (rc) 6014 return rc; 6015 6016 if (smb3_encryption_required(tcon)) 6017 flags |= CIFS_TRANSFORM_REQ; 6018 6019 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 6020 req->LockCount = cpu_to_le16(num_lock); 6021 6022 req->PersistentFileId = persist_fid; 6023 req->VolatileFileId = volatile_fid; 6024 6025 count = num_lock * sizeof(struct smb2_lock_element); 6026 6027 iov[0].iov_base = (char *)req; 6028 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); 6029 iov[1].iov_base = (char *)buf; 6030 iov[1].iov_len = count; 6031 6032 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); 6033 6034 memset(&rqst, 0, sizeof(struct smb_rqst)); 6035 rqst.rq_iov = iov; 6036 rqst.rq_nvec = 2; 6037 6038 if (retries) 6039 smb2_set_replay(server, &rqst); 6040 6041 rc = cifs_send_recv(xid, tcon->ses, server, 6042 &rqst, &resp_buf_type, flags, 6043 &rsp_iov); 6044 cifs_small_buf_release(req); 6045 if (rc) { 6046 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); 6047 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); 6048 trace_smb3_lock_err(xid, persist_fid, tcon->tid, 6049 tcon->ses->Suid, rc); 6050 } 6051 6052 if (is_replayable_error(rc) && 6053 smb2_should_replay(tcon, &retries, &cur_sleep)) 6054 goto replay_again; 6055 6056 return rc; 6057 } 6058 6059 int 6060 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, 6061 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6062 const __u64 length, const __u64 offset, const __u32 lock_flags, 6063 const bool wait) 6064 { 6065 struct smb2_lock_element lock; 6066 6067 lock.Offset = cpu_to_le64(offset); 6068 lock.Length = cpu_to_le64(length); 6069 lock.Flags = cpu_to_le32(lock_flags); 6070 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) 6071 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); 6072 6073 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); 6074 } 6075 6076 int 6077 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 6078 __u8 *lease_key, const __le32 lease_state) 6079 { 6080 struct smb_rqst rqst; 6081 int rc; 6082 struct smb2_lease_ack *req = NULL; 6083 struct cifs_ses *ses = tcon->ses; 6084 int flags = CIFS_OBREAK_OP; 6085 unsigned int total_len; 6086 struct kvec iov[1]; 6087 struct kvec rsp_iov; 6088 int resp_buf_type; 6089 __u64 *please_key_high; 6090 __u64 *please_key_low; 6091 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); 6092 6093 cifs_dbg(FYI, "SMB2_lease_break\n"); 6094 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 6095 (void **) &req, &total_len); 6096 if (rc) 6097 return rc; 6098 6099 if (smb3_encryption_required(tcon)) 6100 flags |= CIFS_TRANSFORM_REQ; 6101 6102 req->hdr.CreditRequest = cpu_to_le16(1); 6103 req->StructureSize = cpu_to_le16(36); 6104 total_len += 12; 6105 6106 memcpy(req->LeaseKey, lease_key, 16); 6107 req->LeaseState = lease_state; 6108 6109 flags |= CIFS_NO_RSP_BUF; 6110 6111 iov[0].iov_base = (char *)req; 6112 iov[0].iov_len = total_len; 6113 6114 memset(&rqst, 0, sizeof(struct smb_rqst)); 6115 rqst.rq_iov = iov; 6116 rqst.rq_nvec = 1; 6117 6118 rc = cifs_send_recv(xid, ses, server, 6119 &rqst, &resp_buf_type, flags, &rsp_iov); 6120 cifs_small_buf_release(req); 6121 6122 please_key_low = (__u64 *)lease_key; 6123 please_key_high = (__u64 *)(lease_key+8); 6124 if (rc) { 6125 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 6126 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 6127 ses->Suid, *please_key_low, *please_key_high, rc); 6128 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); 6129 } else 6130 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid, 6131 ses->Suid, *please_key_low, *please_key_high); 6132 6133 return rc; 6134 } 6135