1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2009, 2013 5 * Etersoft, 2012 6 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Pavel Shilovsky (pshilovsky@samba.org) 2012 8 * 9 * Contains the routines for constructing the SMB2 PDUs themselves 10 * 11 */ 12 13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ 14 /* Note that there are handle based routines which must be */ 15 /* treated slightly differently for reconnection purposes since we never */ 16 /* want to reuse a stale file handle and only the caller knows the file info */ 17 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/vfs.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/uaccess.h> 23 #include <linux/uuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/xattr.h> 26 #include <linux/netfs.h> 27 #include <trace/events/netfs.h> 28 #include "cifsglob.h" 29 #include "cifsacl.h" 30 #include "cifsproto.h" 31 #include "smb2proto.h" 32 #include "cifs_unicode.h" 33 #include "cifs_debug.h" 34 #include "ntlmssp.h" 35 #include "smb2status.h" 36 #include "smb2glob.h" 37 #include "cifspdu.h" 38 #include "cifs_spnego.h" 39 #include "smbdirect.h" 40 #include "trace.h" 41 #ifdef CONFIG_CIFS_DFS_UPCALL 42 #include "dfs_cache.h" 43 #endif 44 #include "cached_dir.h" 45 46 /* 47 * The following table defines the expected "StructureSize" of SMB2 requests 48 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. 49 * 50 * Note that commands are defined in smb2pdu.h in le16 but the array below is 51 * indexed by command in host byte order. 52 */ 53 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { 54 /* SMB2_NEGOTIATE */ 36, 55 /* SMB2_SESSION_SETUP */ 25, 56 /* SMB2_LOGOFF */ 4, 57 /* SMB2_TREE_CONNECT */ 9, 58 /* SMB2_TREE_DISCONNECT */ 4, 59 /* SMB2_CREATE */ 57, 60 /* SMB2_CLOSE */ 24, 61 /* SMB2_FLUSH */ 24, 62 /* SMB2_READ */ 49, 63 /* SMB2_WRITE */ 49, 64 /* SMB2_LOCK */ 48, 65 /* SMB2_IOCTL */ 57, 66 /* SMB2_CANCEL */ 4, 67 /* SMB2_ECHO */ 4, 68 /* SMB2_QUERY_DIRECTORY */ 33, 69 /* SMB2_CHANGE_NOTIFY */ 32, 70 /* SMB2_QUERY_INFO */ 41, 71 /* SMB2_SET_INFO */ 33, 72 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ 73 }; 74 75 int smb3_encryption_required(const struct cifs_tcon *tcon) 76 { 77 if (!tcon || !tcon->ses) 78 return 0; 79 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || 80 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) 81 return 1; 82 if (tcon->seal && 83 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 84 return 1; 85 return 0; 86 } 87 88 static void 89 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, 90 const struct cifs_tcon *tcon, 91 struct TCP_Server_Info *server) 92 { 93 struct smb3_hdr_req *smb3_hdr; 94 95 shdr->ProtocolId = SMB2_PROTO_NUMBER; 96 shdr->StructureSize = cpu_to_le16(64); 97 shdr->Command = smb2_cmd; 98 99 if (server) { 100 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ 101 if (server->dialect >= SMB30_PROT_ID) { 102 smb3_hdr = (struct smb3_hdr_req *)shdr; 103 /* 104 * if primary channel is not set yet, use default 105 * channel for chan sequence num 106 */ 107 if (SERVER_IS_CHAN(server)) 108 smb3_hdr->ChannelSequence = 109 cpu_to_le16(server->primary_server->channel_sequence_num); 110 else 111 smb3_hdr->ChannelSequence = 112 cpu_to_le16(server->channel_sequence_num); 113 } 114 spin_lock(&server->req_lock); 115 /* Request up to 10 credits but don't go over the limit. */ 116 if (server->credits >= server->max_credits) 117 shdr->CreditRequest = cpu_to_le16(0); 118 else 119 shdr->CreditRequest = cpu_to_le16( 120 min_t(int, server->max_credits - 121 server->credits, 10)); 122 spin_unlock(&server->req_lock); 123 } else { 124 shdr->CreditRequest = cpu_to_le16(2); 125 } 126 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid); 127 128 if (!tcon) 129 goto out; 130 131 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 132 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 133 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 134 shdr->CreditCharge = cpu_to_le16(1); 135 /* else CreditCharge MBZ */ 136 137 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid); 138 /* Uid is not converted */ 139 if (tcon->ses) 140 shdr->SessionId = cpu_to_le64(tcon->ses->Suid); 141 142 /* 143 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have 144 * to pass the path on the Open SMB prefixed by \\server\share. 145 * Not sure when we would need to do the augmented path (if ever) and 146 * setting this flag breaks the SMB2 open operation since it is 147 * illegal to send an empty path name (without \\server\share prefix) 148 * when the DFS flag is set in the SMB open header. We could 149 * consider setting the flag on all operations other than open 150 * but it is safer to net set it for now. 151 */ 152 /* if (tcon->share_flags & SHI1005_FLAGS_DFS) 153 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ 154 155 if (server && server->sign && !smb3_encryption_required(tcon)) 156 shdr->Flags |= SMB2_FLAGS_SIGNED; 157 out: 158 return; 159 } 160 161 /* helper function for code reuse */ 162 static int 163 cifs_chan_skip_or_disable(struct cifs_ses *ses, 164 struct TCP_Server_Info *server, 165 bool from_reconnect) 166 { 167 struct TCP_Server_Info *pserver; 168 unsigned int chan_index; 169 170 if (SERVER_IS_CHAN(server)) { 171 cifs_dbg(VFS, 172 "server %s does not support multichannel anymore. Skip secondary channel\n", 173 ses->server->hostname); 174 175 spin_lock(&ses->chan_lock); 176 chan_index = cifs_ses_get_chan_index(ses, server); 177 if (chan_index == CIFS_INVAL_CHAN_INDEX) { 178 spin_unlock(&ses->chan_lock); 179 goto skip_terminate; 180 } 181 182 ses->chans[chan_index].server = NULL; 183 server->terminate = true; 184 spin_unlock(&ses->chan_lock); 185 186 /* 187 * the above reference of server by channel 188 * needs to be dropped without holding chan_lock 189 * as cifs_put_tcp_session takes a higher lock 190 * i.e. cifs_tcp_ses_lock 191 */ 192 cifs_put_tcp_session(server, from_reconnect); 193 194 cifs_signal_cifsd_for_reconnect(server, false); 195 196 /* mark primary server as needing reconnect */ 197 pserver = server->primary_server; 198 cifs_signal_cifsd_for_reconnect(pserver, false); 199 skip_terminate: 200 return -EHOSTDOWN; 201 } 202 203 cifs_server_dbg(VFS, 204 "server does not support multichannel anymore. Disable all other channels\n"); 205 cifs_disable_secondary_channels(ses); 206 207 208 return 0; 209 } 210 211 static int 212 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, 213 struct TCP_Server_Info *server, bool from_reconnect) 214 { 215 int rc = 0; 216 struct nls_table *nls_codepage = NULL; 217 struct cifs_ses *ses; 218 int xid; 219 220 /* 221 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so 222 * check for tcp and smb session status done differently 223 * for those three - in the calling routine. 224 */ 225 if (tcon == NULL) 226 return 0; 227 228 /* 229 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in 230 * cifs_tree_connect(). 231 */ 232 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) 233 return 0; 234 235 spin_lock(&tcon->tc_lock); 236 if (tcon->status == TID_EXITING) { 237 /* 238 * only tree disconnect allowed when disconnecting ... 239 */ 240 if (smb2_command != SMB2_TREE_DISCONNECT) { 241 spin_unlock(&tcon->tc_lock); 242 cifs_dbg(FYI, "can not send cmd %d while umounting\n", 243 smb2_command); 244 return -ENODEV; 245 } 246 } 247 spin_unlock(&tcon->tc_lock); 248 249 ses = tcon->ses; 250 if (!ses) 251 return -EIO; 252 spin_lock(&ses->ses_lock); 253 if (ses->ses_status == SES_EXITING) { 254 spin_unlock(&ses->ses_lock); 255 return -EIO; 256 } 257 spin_unlock(&ses->ses_lock); 258 if (!ses->server || !server) 259 return -EIO; 260 261 spin_lock(&server->srv_lock); 262 if (server->tcpStatus == CifsNeedReconnect) { 263 /* 264 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE 265 * here since they are implicitly done when session drops. 266 */ 267 switch (smb2_command) { 268 /* 269 * BB Should we keep oplock break and add flush to exceptions? 270 */ 271 case SMB2_TREE_DISCONNECT: 272 case SMB2_CANCEL: 273 case SMB2_CLOSE: 274 case SMB2_OPLOCK_BREAK: 275 spin_unlock(&server->srv_lock); 276 return -EAGAIN; 277 } 278 } 279 280 /* if server is marked for termination, cifsd will cleanup */ 281 if (server->terminate) { 282 spin_unlock(&server->srv_lock); 283 return -EHOSTDOWN; 284 } 285 spin_unlock(&server->srv_lock); 286 287 again: 288 rc = cifs_wait_for_server_reconnect(server, tcon->retry); 289 if (rc) 290 return rc; 291 292 spin_lock(&ses->chan_lock); 293 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { 294 spin_unlock(&ses->chan_lock); 295 return 0; 296 } 297 spin_unlock(&ses->chan_lock); 298 cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d", 299 tcon->ses->chans_need_reconnect, 300 tcon->need_reconnect); 301 302 mutex_lock(&ses->session_mutex); 303 /* 304 * if this is called by delayed work, and the channel has been disabled 305 * in parallel, the delayed work can continue to execute in parallel 306 * there's a chance that this channel may not exist anymore 307 */ 308 spin_lock(&server->srv_lock); 309 if (server->tcpStatus == CifsExiting) { 310 spin_unlock(&server->srv_lock); 311 mutex_unlock(&ses->session_mutex); 312 rc = -EHOSTDOWN; 313 goto out; 314 } 315 316 /* 317 * Recheck after acquire mutex. If another thread is negotiating 318 * and the server never sends an answer the socket will be closed 319 * and tcpStatus set to reconnect. 320 */ 321 if (server->tcpStatus == CifsNeedReconnect) { 322 spin_unlock(&server->srv_lock); 323 mutex_unlock(&ses->session_mutex); 324 325 if (tcon->retry) 326 goto again; 327 328 rc = -EHOSTDOWN; 329 goto out; 330 } 331 spin_unlock(&server->srv_lock); 332 333 nls_codepage = ses->local_nls; 334 335 /* 336 * need to prevent multiple threads trying to simultaneously 337 * reconnect the same SMB session 338 */ 339 spin_lock(&ses->ses_lock); 340 spin_lock(&ses->chan_lock); 341 if (!cifs_chan_needs_reconnect(ses, server) && 342 ses->ses_status == SES_GOOD) { 343 spin_unlock(&ses->chan_lock); 344 spin_unlock(&ses->ses_lock); 345 /* this means that we only need to tree connect */ 346 if (tcon->need_reconnect) 347 goto skip_sess_setup; 348 349 mutex_unlock(&ses->session_mutex); 350 goto out; 351 } 352 spin_unlock(&ses->chan_lock); 353 spin_unlock(&ses->ses_lock); 354 355 rc = cifs_negotiate_protocol(0, ses, server); 356 if (!rc) { 357 /* 358 * if server stopped supporting multichannel 359 * and the first channel reconnected, disable all the others. 360 */ 361 if (ses->chan_count > 1 && 362 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 363 rc = cifs_chan_skip_or_disable(ses, server, 364 from_reconnect); 365 if (rc) { 366 mutex_unlock(&ses->session_mutex); 367 goto out; 368 } 369 } 370 371 rc = cifs_setup_session(0, ses, server, nls_codepage); 372 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 373 /* 374 * Try alternate password for next reconnect (key rotation 375 * could be enabled on the server e.g.) if an alternate 376 * password is available and the current password is expired, 377 * but do not swap on non pwd related errors like host down 378 */ 379 if (ses->password2) 380 swap(ses->password2, ses->password); 381 } 382 383 if ((rc == -EACCES) && !tcon->retry) { 384 mutex_unlock(&ses->session_mutex); 385 rc = -EHOSTDOWN; 386 goto failed; 387 } else if (rc) { 388 mutex_unlock(&ses->session_mutex); 389 goto out; 390 } 391 } else { 392 mutex_unlock(&ses->session_mutex); 393 goto out; 394 } 395 396 skip_sess_setup: 397 if (!tcon->need_reconnect) { 398 mutex_unlock(&ses->session_mutex); 399 goto out; 400 } 401 cifs_mark_open_files_invalid(tcon); 402 if (tcon->use_persistent) 403 tcon->need_reopen_files = true; 404 405 rc = cifs_tree_connect(0, tcon, nls_codepage); 406 407 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); 408 if (rc) { 409 /* If sess reconnected but tcon didn't, something strange ... */ 410 mutex_unlock(&ses->session_mutex); 411 cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc); 412 goto out; 413 } 414 415 spin_lock(&ses->ses_lock); 416 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) { 417 spin_unlock(&ses->ses_lock); 418 mutex_unlock(&ses->session_mutex); 419 goto skip_add_channels; 420 } 421 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS; 422 spin_unlock(&ses->ses_lock); 423 424 if (!rc && 425 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && 426 server->ops->query_server_interfaces) { 427 mutex_unlock(&ses->session_mutex); 428 429 /* 430 * query server network interfaces, in case they change 431 */ 432 xid = get_xid(); 433 rc = server->ops->query_server_interfaces(xid, tcon, false); 434 free_xid(xid); 435 436 if (rc == -EOPNOTSUPP && ses->chan_count > 1) { 437 /* 438 * some servers like Azure SMB server do not advertise 439 * that multichannel has been disabled with server 440 * capabilities, rather return STATUS_NOT_IMPLEMENTED. 441 * treat this as server not supporting multichannel 442 */ 443 444 rc = cifs_chan_skip_or_disable(ses, server, 445 from_reconnect); 446 goto skip_add_channels; 447 } else if (rc) 448 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 449 __func__, rc); 450 451 if (ses->chan_max > ses->chan_count && 452 ses->iface_count && 453 !SERVER_IS_CHAN(server)) { 454 if (ses->chan_count == 1) { 455 cifs_server_dbg(VFS, "supports multichannel now\n"); 456 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 457 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 458 } 459 460 cifs_try_adding_channels(ses); 461 } 462 } else { 463 mutex_unlock(&ses->session_mutex); 464 } 465 466 skip_add_channels: 467 spin_lock(&ses->ses_lock); 468 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS; 469 spin_unlock(&ses->ses_lock); 470 471 if (smb2_command != SMB2_INTERNAL_CMD) 472 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 473 474 atomic_inc(&tconInfoReconnectCount); 475 out: 476 /* 477 * Check if handle based operation so we know whether we can continue 478 * or not without returning to caller to reset file handle. 479 */ 480 /* 481 * BB Is flush done by server on drop of tcp session? Should we special 482 * case it and skip above? 483 */ 484 switch (smb2_command) { 485 case SMB2_FLUSH: 486 case SMB2_READ: 487 case SMB2_WRITE: 488 case SMB2_LOCK: 489 case SMB2_QUERY_DIRECTORY: 490 case SMB2_CHANGE_NOTIFY: 491 case SMB2_QUERY_INFO: 492 case SMB2_SET_INFO: 493 rc = -EAGAIN; 494 } 495 failed: 496 return rc; 497 } 498 499 static void 500 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, 501 struct TCP_Server_Info *server, 502 void *buf, 503 unsigned int *total_len) 504 { 505 struct smb2_pdu *spdu = buf; 506 /* lookup word count ie StructureSize from table */ 507 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; 508 509 /* 510 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of 511 * largest operations (Create) 512 */ 513 memset(buf, 0, 256); 514 515 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server); 516 spdu->StructureSize2 = cpu_to_le16(parmsize); 517 518 *total_len = parmsize + sizeof(struct smb2_hdr); 519 } 520 521 /* 522 * Allocate and return pointer to an SMB request hdr, and set basic 523 * SMB information in the SMB header. If the return code is zero, this 524 * function must have filled in request_buf pointer. 525 */ 526 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 527 struct TCP_Server_Info *server, 528 void **request_buf, unsigned int *total_len) 529 { 530 /* BB eventually switch this to SMB2 specific small buf size */ 531 switch (smb2_command) { 532 case SMB2_SET_INFO: 533 case SMB2_QUERY_INFO: 534 *request_buf = cifs_buf_get(); 535 break; 536 default: 537 *request_buf = cifs_small_buf_get(); 538 break; 539 } 540 if (*request_buf == NULL) { 541 /* BB should we add a retry in here if not a writepage? */ 542 return -ENOMEM; 543 } 544 545 fill_small_buf(smb2_command, tcon, server, 546 (struct smb2_hdr *)(*request_buf), 547 total_len); 548 549 if (tcon != NULL) { 550 uint16_t com_code = le16_to_cpu(smb2_command); 551 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); 552 cifs_stats_inc(&tcon->num_smbs_sent); 553 } 554 555 return 0; 556 } 557 558 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 559 struct TCP_Server_Info *server, 560 void **request_buf, unsigned int *total_len) 561 { 562 int rc; 563 564 rc = smb2_reconnect(smb2_command, tcon, server, false); 565 if (rc) 566 return rc; 567 568 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf, 569 total_len); 570 } 571 572 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, 573 struct TCP_Server_Info *server, 574 void **request_buf, unsigned int *total_len) 575 { 576 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ 577 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { 578 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, 579 request_buf, total_len); 580 } 581 return smb2_plain_req_init(SMB2_IOCTL, tcon, server, 582 request_buf, total_len); 583 } 584 585 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ 586 587 static void 588 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) 589 { 590 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; 591 pneg_ctxt->DataLength = cpu_to_le16(38); 592 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); 593 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); 594 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); 595 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; 596 } 597 598 static void 599 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt) 600 { 601 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES; 602 pneg_ctxt->DataLength = 603 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context) 604 - sizeof(struct smb2_neg_context)); 605 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3); 606 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77; 607 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF; 608 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1; 609 } 610 611 static unsigned int 612 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt) 613 { 614 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities); 615 unsigned short num_algs = 1; /* number of signing algorithms sent */ 616 617 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; 618 /* 619 * Context Data length must be rounded to multiple of 8 for some servers 620 */ 621 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - 622 sizeof(struct smb2_neg_context) + 623 (num_algs * sizeof(u16)), 8)); 624 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); 625 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); 626 627 ctxt_len += sizeof(__le16) * num_algs; 628 ctxt_len = ALIGN(ctxt_len, 8); 629 return ctxt_len; 630 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ 631 } 632 633 static void 634 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 635 { 636 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 637 if (require_gcm_256) { 638 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */ 639 pneg_ctxt->CipherCount = cpu_to_le16(1); 640 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM; 641 } else if (enable_gcm_256) { 642 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */ 643 pneg_ctxt->CipherCount = cpu_to_le16(3); 644 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 645 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM; 646 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM; 647 } else { 648 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */ 649 pneg_ctxt->CipherCount = cpu_to_le16(2); 650 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 651 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 652 } 653 } 654 655 static unsigned int 656 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname) 657 { 658 struct nls_table *cp = load_nls_default(); 659 660 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID; 661 662 /* copy up to max of first 100 bytes of server name to NetName field */ 663 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); 664 /* context size is DataLength + minimal smb2_neg_context */ 665 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); 666 } 667 668 static void 669 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) 670 { 671 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; 672 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); 673 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 674 pneg_ctxt->Name[0] = 0x93; 675 pneg_ctxt->Name[1] = 0xAD; 676 pneg_ctxt->Name[2] = 0x25; 677 pneg_ctxt->Name[3] = 0x50; 678 pneg_ctxt->Name[4] = 0x9C; 679 pneg_ctxt->Name[5] = 0xB4; 680 pneg_ctxt->Name[6] = 0x11; 681 pneg_ctxt->Name[7] = 0xE7; 682 pneg_ctxt->Name[8] = 0xB4; 683 pneg_ctxt->Name[9] = 0x23; 684 pneg_ctxt->Name[10] = 0x83; 685 pneg_ctxt->Name[11] = 0xDE; 686 pneg_ctxt->Name[12] = 0x96; 687 pneg_ctxt->Name[13] = 0x8B; 688 pneg_ctxt->Name[14] = 0xCD; 689 pneg_ctxt->Name[15] = 0x7C; 690 } 691 692 static void 693 assemble_neg_contexts(struct smb2_negotiate_req *req, 694 struct TCP_Server_Info *server, unsigned int *total_len) 695 { 696 unsigned int ctxt_len, neg_context_count; 697 struct TCP_Server_Info *pserver; 698 char *pneg_ctxt; 699 char *hostname; 700 701 if (*total_len > 200) { 702 /* In case length corrupted don't want to overrun smb buffer */ 703 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n"); 704 return; 705 } 706 707 /* 708 * round up total_len of fixed part of SMB3 negotiate request to 8 709 * byte boundary before adding negotiate contexts 710 */ 711 *total_len = ALIGN(*total_len, 8); 712 713 pneg_ctxt = (*total_len) + (char *)req; 714 req->NegotiateContextOffset = cpu_to_le32(*total_len); 715 716 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); 717 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); 718 *total_len += ctxt_len; 719 pneg_ctxt += ctxt_len; 720 721 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); 722 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); 723 *total_len += ctxt_len; 724 pneg_ctxt += ctxt_len; 725 726 /* 727 * secondary channels don't have the hostname field populated 728 * use the hostname field in the primary channel instead 729 */ 730 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 731 cifs_server_lock(pserver); 732 hostname = pserver->hostname; 733 if (hostname && (hostname[0] != 0)) { 734 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, 735 hostname); 736 *total_len += ctxt_len; 737 pneg_ctxt += ctxt_len; 738 neg_context_count = 3; 739 } else 740 neg_context_count = 2; 741 cifs_server_unlock(pserver); 742 743 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); 744 *total_len += sizeof(struct smb2_posix_neg_context); 745 pneg_ctxt += sizeof(struct smb2_posix_neg_context); 746 neg_context_count++; 747 748 if (server->compression.requested) { 749 build_compression_ctxt((struct smb2_compression_capabilities_context *) 750 pneg_ctxt); 751 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); 752 *total_len += ctxt_len; 753 pneg_ctxt += ctxt_len; 754 neg_context_count++; 755 } 756 757 if (enable_negotiate_signing) { 758 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *) 759 pneg_ctxt); 760 *total_len += ctxt_len; 761 pneg_ctxt += ctxt_len; 762 neg_context_count++; 763 } 764 765 /* check for and add transport_capabilities and signing capabilities */ 766 req->NegotiateContextCount = cpu_to_le16(neg_context_count); 767 768 } 769 770 /* If invalid preauth context warn but use what we requested, SHA-512 */ 771 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) 772 { 773 unsigned int len = le16_to_cpu(ctxt->DataLength); 774 775 /* 776 * Caller checked that DataLength remains within SMB boundary. We still 777 * need to confirm that one HashAlgorithms member is accounted for. 778 */ 779 if (len < MIN_PREAUTH_CTXT_DATA_LEN) { 780 pr_warn_once("server sent bad preauth context\n"); 781 return; 782 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { 783 pr_warn_once("server sent invalid SaltLength\n"); 784 return; 785 } 786 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) 787 pr_warn_once("Invalid SMB3 hash algorithm count\n"); 788 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) 789 pr_warn_once("unknown SMB3 hash algorithm\n"); 790 } 791 792 static void decode_compress_ctx(struct TCP_Server_Info *server, 793 struct smb2_compression_capabilities_context *ctxt) 794 { 795 unsigned int len = le16_to_cpu(ctxt->DataLength); 796 __le16 alg; 797 798 server->compression.enabled = false; 799 800 /* 801 * Caller checked that DataLength remains within SMB boundary. We still 802 * need to confirm that one CompressionAlgorithms member is accounted 803 * for. 804 */ 805 if (len < 10) { 806 pr_warn_once("server sent bad compression cntxt\n"); 807 return; 808 } 809 810 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { 811 pr_warn_once("invalid SMB3 compress algorithm count\n"); 812 return; 813 } 814 815 alg = ctxt->CompressionAlgorithms[0]; 816 817 /* 'NONE' (0) compressor type is never negotiated */ 818 if (alg == 0 || le16_to_cpu(alg) > 3) { 819 pr_warn_once("invalid compression algorithm '%u'\n", alg); 820 return; 821 } 822 823 server->compression.alg = alg; 824 server->compression.enabled = true; 825 } 826 827 static int decode_encrypt_ctx(struct TCP_Server_Info *server, 828 struct smb2_encryption_neg_context *ctxt) 829 { 830 unsigned int len = le16_to_cpu(ctxt->DataLength); 831 832 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); 833 /* 834 * Caller checked that DataLength remains within SMB boundary. We still 835 * need to confirm that one Cipher flexible array member is accounted 836 * for. 837 */ 838 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { 839 pr_warn_once("server sent bad crypto ctxt len\n"); 840 return -EINVAL; 841 } 842 843 if (le16_to_cpu(ctxt->CipherCount) != 1) { 844 pr_warn_once("Invalid SMB3.11 cipher count\n"); 845 return -EINVAL; 846 } 847 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); 848 if (require_gcm_256) { 849 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) { 850 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n"); 851 return -EOPNOTSUPP; 852 } 853 } else if (ctxt->Ciphers[0] == 0) { 854 /* 855 * e.g. if server only supported AES256_CCM (very unlikely) 856 * or server supported no encryption types or had all disabled. 857 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case 858 * in which mount requested encryption ("seal") checks later 859 * on during tree connection will return proper rc, but if 860 * seal not requested by client, since server is allowed to 861 * return 0 to indicate no supported cipher, we can't fail here 862 */ 863 server->cipher_type = 0; 864 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION; 865 pr_warn_once("Server does not support requested encryption types\n"); 866 return 0; 867 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && 868 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) && 869 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) { 870 /* server returned a cipher we didn't ask for */ 871 pr_warn_once("Invalid SMB3.11 cipher returned\n"); 872 return -EINVAL; 873 } 874 server->cipher_type = ctxt->Ciphers[0]; 875 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 876 return 0; 877 } 878 879 static void decode_signing_ctx(struct TCP_Server_Info *server, 880 struct smb2_signing_capabilities *pctxt) 881 { 882 unsigned int len = le16_to_cpu(pctxt->DataLength); 883 884 /* 885 * Caller checked that DataLength remains within SMB boundary. We still 886 * need to confirm that one SigningAlgorithms flexible array member is 887 * accounted for. 888 */ 889 if ((len < 4) || (len > 16)) { 890 pr_warn_once("server sent bad signing negcontext\n"); 891 return; 892 } 893 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) { 894 pr_warn_once("Invalid signing algorithm count\n"); 895 return; 896 } 897 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) { 898 pr_warn_once("unknown signing algorithm\n"); 899 return; 900 } 901 902 server->signing_negotiated = true; 903 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]); 904 cifs_dbg(FYI, "signing algorithm %d chosen\n", 905 server->signing_algorithm); 906 } 907 908 909 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, 910 struct TCP_Server_Info *server, 911 unsigned int len_of_smb) 912 { 913 struct smb2_neg_context *pctx; 914 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); 915 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); 916 unsigned int len_of_ctxts, i; 917 int rc = 0; 918 919 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); 920 if (len_of_smb <= offset) { 921 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n"); 922 return -EINVAL; 923 } 924 925 len_of_ctxts = len_of_smb - offset; 926 927 for (i = 0; i < ctxt_cnt; i++) { 928 int clen; 929 /* check that offset is not beyond end of SMB */ 930 if (len_of_ctxts < sizeof(struct smb2_neg_context)) 931 break; 932 933 pctx = (struct smb2_neg_context *)(offset + (char *)rsp); 934 clen = sizeof(struct smb2_neg_context) 935 + le16_to_cpu(pctx->DataLength); 936 /* 937 * 2.2.4 SMB2 NEGOTIATE Response 938 * Subsequent negotiate contexts MUST appear at the first 8-byte 939 * aligned offset following the previous negotiate context. 940 */ 941 if (i + 1 != ctxt_cnt) 942 clen = ALIGN(clen, 8); 943 if (clen > len_of_ctxts) 944 break; 945 946 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) 947 decode_preauth_context( 948 (struct smb2_preauth_neg_context *)pctx); 949 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) 950 rc = decode_encrypt_ctx(server, 951 (struct smb2_encryption_neg_context *)pctx); 952 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) 953 decode_compress_ctx(server, 954 (struct smb2_compression_capabilities_context *)pctx); 955 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) 956 server->posix_ext_supported = true; 957 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) 958 decode_signing_ctx(server, 959 (struct smb2_signing_capabilities *)pctx); 960 else 961 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", 962 le16_to_cpu(pctx->ContextType)); 963 if (rc) 964 break; 965 966 offset += clen; 967 len_of_ctxts -= clen; 968 } 969 return rc; 970 } 971 972 static struct create_posix * 973 create_posix_buf(umode_t mode) 974 { 975 struct create_posix *buf; 976 977 buf = kzalloc(sizeof(struct create_posix), 978 GFP_KERNEL); 979 if (!buf) 980 return NULL; 981 982 buf->ccontext.DataOffset = 983 cpu_to_le16(offsetof(struct create_posix, Mode)); 984 buf->ccontext.DataLength = cpu_to_le32(4); 985 buf->ccontext.NameOffset = 986 cpu_to_le16(offsetof(struct create_posix, Name)); 987 buf->ccontext.NameLength = cpu_to_le16(16); 988 989 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 990 buf->Name[0] = 0x93; 991 buf->Name[1] = 0xAD; 992 buf->Name[2] = 0x25; 993 buf->Name[3] = 0x50; 994 buf->Name[4] = 0x9C; 995 buf->Name[5] = 0xB4; 996 buf->Name[6] = 0x11; 997 buf->Name[7] = 0xE7; 998 buf->Name[8] = 0xB4; 999 buf->Name[9] = 0x23; 1000 buf->Name[10] = 0x83; 1001 buf->Name[11] = 0xDE; 1002 buf->Name[12] = 0x96; 1003 buf->Name[13] = 0x8B; 1004 buf->Name[14] = 0xCD; 1005 buf->Name[15] = 0x7C; 1006 buf->Mode = cpu_to_le32(mode); 1007 cifs_dbg(FYI, "mode on posix create 0%o\n", mode); 1008 return buf; 1009 } 1010 1011 static int 1012 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) 1013 { 1014 unsigned int num = *num_iovec; 1015 1016 iov[num].iov_base = create_posix_buf(mode); 1017 if (mode == ACL_NO_MODE) 1018 cifs_dbg(FYI, "%s: no mode\n", __func__); 1019 if (iov[num].iov_base == NULL) 1020 return -ENOMEM; 1021 iov[num].iov_len = sizeof(struct create_posix); 1022 *num_iovec = num + 1; 1023 return 0; 1024 } 1025 1026 1027 /* 1028 * 1029 * SMB2 Worker functions follow: 1030 * 1031 * The general structure of the worker functions is: 1032 * 1) Call smb2_init (assembles SMB2 header) 1033 * 2) Initialize SMB2 command specific fields in fixed length area of SMB 1034 * 3) Call smb_sendrcv2 (sends request on socket and waits for response) 1035 * 4) Decode SMB2 command specific fields in the fixed length area 1036 * 5) Decode variable length data area (if any for this SMB2 command type) 1037 * 6) Call free smb buffer 1038 * 7) return 1039 * 1040 */ 1041 1042 int 1043 SMB2_negotiate(const unsigned int xid, 1044 struct cifs_ses *ses, 1045 struct TCP_Server_Info *server) 1046 { 1047 struct smb_rqst rqst; 1048 struct smb2_negotiate_req *req; 1049 struct smb2_negotiate_rsp *rsp; 1050 struct kvec iov[1]; 1051 struct kvec rsp_iov; 1052 int rc; 1053 int resp_buftype; 1054 int blob_offset, blob_length; 1055 char *security_blob; 1056 int flags = CIFS_NEG_OP; 1057 unsigned int total_len; 1058 1059 cifs_dbg(FYI, "Negotiate protocol\n"); 1060 1061 if (!server) { 1062 WARN(1, "%s: server is NULL!\n", __func__); 1063 return -EIO; 1064 } 1065 1066 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server, 1067 (void **) &req, &total_len); 1068 if (rc) 1069 return rc; 1070 1071 req->hdr.SessionId = 0; 1072 1073 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1074 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1075 1076 if (strcmp(server->vals->version_string, 1077 SMB3ANY_VERSION_STRING) == 0) { 1078 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1079 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1080 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1081 req->DialectCount = cpu_to_le16(3); 1082 total_len += 6; 1083 } else if (strcmp(server->vals->version_string, 1084 SMBDEFAULT_VERSION_STRING) == 0) { 1085 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1086 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1087 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1088 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1089 req->DialectCount = cpu_to_le16(4); 1090 total_len += 8; 1091 } else { 1092 /* otherwise send specific dialect */ 1093 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id); 1094 req->DialectCount = cpu_to_le16(1); 1095 total_len += 2; 1096 } 1097 1098 /* only one of SMB2 signing flags may be set in SMB2 request */ 1099 if (ses->sign) 1100 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1101 else if (global_secflags & CIFSSEC_MAY_SIGN) 1102 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1103 else 1104 req->SecurityMode = 0; 1105 1106 req->Capabilities = cpu_to_le32(server->vals->req_capabilities); 1107 if (ses->chan_max > 1) 1108 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1109 1110 /* ClientGUID must be zero for SMB2.02 dialect */ 1111 if (server->vals->protocol_id == SMB20_PROT_ID) 1112 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); 1113 else { 1114 memcpy(req->ClientGUID, server->client_guid, 1115 SMB2_CLIENT_GUID_SIZE); 1116 if ((server->vals->protocol_id == SMB311_PROT_ID) || 1117 (strcmp(server->vals->version_string, 1118 SMB3ANY_VERSION_STRING) == 0) || 1119 (strcmp(server->vals->version_string, 1120 SMBDEFAULT_VERSION_STRING) == 0)) 1121 assemble_neg_contexts(req, server, &total_len); 1122 } 1123 iov[0].iov_base = (char *)req; 1124 iov[0].iov_len = total_len; 1125 1126 memset(&rqst, 0, sizeof(struct smb_rqst)); 1127 rqst.rq_iov = iov; 1128 rqst.rq_nvec = 1; 1129 1130 rc = cifs_send_recv(xid, ses, server, 1131 &rqst, &resp_buftype, flags, &rsp_iov); 1132 cifs_small_buf_release(req); 1133 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; 1134 /* 1135 * No tcon so can't do 1136 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1137 */ 1138 if (rc == -EOPNOTSUPP) { 1139 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n"); 1140 goto neg_exit; 1141 } else if (rc != 0) 1142 goto neg_exit; 1143 1144 rc = -EIO; 1145 if (strcmp(server->vals->version_string, 1146 SMB3ANY_VERSION_STRING) == 0) { 1147 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1148 cifs_server_dbg(VFS, 1149 "SMB2 dialect returned but not requested\n"); 1150 goto neg_exit; 1151 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1152 cifs_server_dbg(VFS, 1153 "SMB2.1 dialect returned but not requested\n"); 1154 goto neg_exit; 1155 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1156 /* ops set to 3.0 by default for default so update */ 1157 server->ops = &smb311_operations; 1158 server->vals = &smb311_values; 1159 } 1160 } else if (strcmp(server->vals->version_string, 1161 SMBDEFAULT_VERSION_STRING) == 0) { 1162 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1163 cifs_server_dbg(VFS, 1164 "SMB2 dialect returned but not requested\n"); 1165 goto neg_exit; 1166 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1167 /* ops set to 3.0 by default for default so update */ 1168 server->ops = &smb21_operations; 1169 server->vals = &smb21_values; 1170 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1171 server->ops = &smb311_operations; 1172 server->vals = &smb311_values; 1173 } 1174 } else if (le16_to_cpu(rsp->DialectRevision) != 1175 server->vals->protocol_id) { 1176 /* if requested single dialect ensure returned dialect matched */ 1177 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", 1178 le16_to_cpu(rsp->DialectRevision)); 1179 goto neg_exit; 1180 } 1181 1182 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); 1183 1184 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) 1185 cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); 1186 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) 1187 cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); 1188 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) 1189 cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); 1190 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) 1191 cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); 1192 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) 1193 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); 1194 else { 1195 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", 1196 le16_to_cpu(rsp->DialectRevision)); 1197 goto neg_exit; 1198 } 1199 1200 rc = 0; 1201 server->dialect = le16_to_cpu(rsp->DialectRevision); 1202 1203 /* 1204 * Keep a copy of the hash after negprot. This hash will be 1205 * the starting hash value for all sessions made from this 1206 * server. 1207 */ 1208 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, 1209 SMB2_PREAUTH_HASH_SIZE); 1210 1211 /* SMB2 only has an extended negflavor */ 1212 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 1213 /* set it to the maximum buffer size value we can send with 1 credit */ 1214 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 1215 SMB2_MAX_BUFFER_SIZE); 1216 server->max_read = le32_to_cpu(rsp->MaxReadSize); 1217 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 1218 server->sec_mode = le16_to_cpu(rsp->SecurityMode); 1219 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) 1220 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", 1221 server->sec_mode); 1222 server->capabilities = le32_to_cpu(rsp->Capabilities); 1223 /* Internal types */ 1224 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; 1225 1226 /* 1227 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context 1228 * Set the cipher type manually. 1229 */ 1230 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 1231 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; 1232 1233 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, 1234 (struct smb2_hdr *)rsp); 1235 /* 1236 * See MS-SMB2 section 2.2.4: if no blob, client picks default which 1237 * for us will be 1238 * ses->sectype = RawNTLMSSP; 1239 * but for time being this is our only auth choice so doesn't matter. 1240 * We just found a server which sets blob length to zero expecting raw. 1241 */ 1242 if (blob_length == 0) { 1243 cifs_dbg(FYI, "missing security blob on negprot\n"); 1244 server->sec_ntlmssp = true; 1245 } 1246 1247 rc = cifs_enable_signing(server, ses->sign); 1248 if (rc) 1249 goto neg_exit; 1250 if (blob_length) { 1251 rc = decode_negTokenInit(security_blob, blob_length, server); 1252 if (rc == 1) 1253 rc = 0; 1254 else if (rc == 0) 1255 rc = -EIO; 1256 } 1257 1258 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1259 if (rsp->NegotiateContextCount) 1260 rc = smb311_decode_neg_context(rsp, server, 1261 rsp_iov.iov_len); 1262 else 1263 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); 1264 } 1265 neg_exit: 1266 free_rsp_buf(resp_buftype, rsp); 1267 return rc; 1268 } 1269 1270 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) 1271 { 1272 int rc; 1273 struct validate_negotiate_info_req *pneg_inbuf; 1274 struct validate_negotiate_info_rsp *pneg_rsp = NULL; 1275 u32 rsplen; 1276 u32 inbuflen; /* max of 4 dialects */ 1277 struct TCP_Server_Info *server = tcon->ses->server; 1278 1279 cifs_dbg(FYI, "validate negotiate\n"); 1280 1281 /* In SMB3.11 preauth integrity supersedes validate negotiate */ 1282 if (server->dialect == SMB311_PROT_ID) 1283 return 0; 1284 1285 /* 1286 * validation ioctl must be signed, so no point sending this if we 1287 * can not sign it (ie are not known user). Even if signing is not 1288 * required (enabled but not negotiated), in those cases we selectively 1289 * sign just this, the first and only signed request on a connection. 1290 * Having validation of negotiate info helps reduce attack vectors. 1291 */ 1292 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) 1293 return 0; /* validation requires signing */ 1294 1295 if (tcon->ses->user_name == NULL) { 1296 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); 1297 return 0; /* validation requires signing */ 1298 } 1299 1300 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) 1301 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); 1302 1303 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); 1304 if (!pneg_inbuf) 1305 return -ENOMEM; 1306 1307 pneg_inbuf->Capabilities = 1308 cpu_to_le32(server->vals->req_capabilities); 1309 if (tcon->ses->chan_max > 1) 1310 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1311 1312 memcpy(pneg_inbuf->Guid, server->client_guid, 1313 SMB2_CLIENT_GUID_SIZE); 1314 1315 if (tcon->ses->sign) 1316 pneg_inbuf->SecurityMode = 1317 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1318 else if (global_secflags & CIFSSEC_MAY_SIGN) 1319 pneg_inbuf->SecurityMode = 1320 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1321 else 1322 pneg_inbuf->SecurityMode = 0; 1323 1324 1325 if (strcmp(server->vals->version_string, 1326 SMB3ANY_VERSION_STRING) == 0) { 1327 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1328 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1329 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1330 pneg_inbuf->DialectCount = cpu_to_le16(3); 1331 /* SMB 2.1 not included so subtract one dialect from len */ 1332 inbuflen = sizeof(*pneg_inbuf) - 1333 (sizeof(pneg_inbuf->Dialects[0])); 1334 } else if (strcmp(server->vals->version_string, 1335 SMBDEFAULT_VERSION_STRING) == 0) { 1336 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1337 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1338 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1339 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1340 pneg_inbuf->DialectCount = cpu_to_le16(4); 1341 /* structure is big enough for 4 dialects */ 1342 inbuflen = sizeof(*pneg_inbuf); 1343 } else { 1344 /* otherwise specific dialect was requested */ 1345 pneg_inbuf->Dialects[0] = 1346 cpu_to_le16(server->vals->protocol_id); 1347 pneg_inbuf->DialectCount = cpu_to_le16(1); 1348 /* structure is big enough for 4 dialects, sending only 1 */ 1349 inbuflen = sizeof(*pneg_inbuf) - 1350 sizeof(pneg_inbuf->Dialects[0]) * 3; 1351 } 1352 1353 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1354 FSCTL_VALIDATE_NEGOTIATE_INFO, 1355 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, 1356 (char **)&pneg_rsp, &rsplen); 1357 if (rc == -EOPNOTSUPP) { 1358 /* 1359 * Old Windows versions or Netapp SMB server can return 1360 * not supported error. Client should accept it. 1361 */ 1362 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n"); 1363 rc = 0; 1364 goto out_free_inbuf; 1365 } else if (rc != 0) { 1366 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", 1367 rc); 1368 rc = -EIO; 1369 goto out_free_inbuf; 1370 } 1371 1372 rc = -EIO; 1373 if (rsplen != sizeof(*pneg_rsp)) { 1374 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n", 1375 rsplen); 1376 1377 /* relax check since Mac returns max bufsize allowed on ioctl */ 1378 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) 1379 goto out_free_rsp; 1380 } 1381 1382 /* check validate negotiate info response matches what we got earlier */ 1383 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect)) 1384 goto vneg_out; 1385 1386 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode)) 1387 goto vneg_out; 1388 1389 /* do not validate server guid because not saved at negprot time yet */ 1390 1391 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | 1392 SMB2_LARGE_FILES) != server->capabilities) 1393 goto vneg_out; 1394 1395 /* validate negotiate successful */ 1396 rc = 0; 1397 cifs_dbg(FYI, "validate negotiate info successful\n"); 1398 goto out_free_rsp; 1399 1400 vneg_out: 1401 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 1402 out_free_rsp: 1403 kfree(pneg_rsp); 1404 out_free_inbuf: 1405 kfree(pneg_inbuf); 1406 return rc; 1407 } 1408 1409 enum securityEnum 1410 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) 1411 { 1412 switch (requested) { 1413 case Kerberos: 1414 case RawNTLMSSP: 1415 return requested; 1416 case NTLMv2: 1417 return RawNTLMSSP; 1418 case Unspecified: 1419 if (server->sec_ntlmssp && 1420 (global_secflags & CIFSSEC_MAY_NTLMSSP)) 1421 return RawNTLMSSP; 1422 if ((server->sec_kerberos || server->sec_mskerberos) && 1423 (global_secflags & CIFSSEC_MAY_KRB5)) 1424 return Kerberos; 1425 fallthrough; 1426 default: 1427 return Unspecified; 1428 } 1429 } 1430 1431 struct SMB2_sess_data { 1432 unsigned int xid; 1433 struct cifs_ses *ses; 1434 struct TCP_Server_Info *server; 1435 struct nls_table *nls_cp; 1436 void (*func)(struct SMB2_sess_data *); 1437 int result; 1438 u64 previous_session; 1439 1440 /* we will send the SMB in three pieces: 1441 * a fixed length beginning part, an optional 1442 * SPNEGO blob (which can be zero length), and a 1443 * last part which will include the strings 1444 * and rest of bcc area. This allows us to avoid 1445 * a large buffer 17K allocation 1446 */ 1447 int buf0_type; 1448 struct kvec iov[2]; 1449 }; 1450 1451 static int 1452 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) 1453 { 1454 int rc; 1455 struct cifs_ses *ses = sess_data->ses; 1456 struct TCP_Server_Info *server = sess_data->server; 1457 struct smb2_sess_setup_req *req; 1458 unsigned int total_len; 1459 bool is_binding = false; 1460 1461 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, 1462 (void **) &req, 1463 &total_len); 1464 if (rc) 1465 return rc; 1466 1467 spin_lock(&ses->ses_lock); 1468 is_binding = (ses->ses_status == SES_GOOD); 1469 spin_unlock(&ses->ses_lock); 1470 1471 if (is_binding) { 1472 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1473 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1474 req->PreviousSessionId = 0; 1475 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; 1476 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid); 1477 } else { 1478 /* First session, not a reauthenticate */ 1479 req->hdr.SessionId = 0; 1480 /* 1481 * if reconnect, we need to send previous sess id 1482 * otherwise it is 0 1483 */ 1484 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); 1485 req->Flags = 0; /* MBZ */ 1486 cifs_dbg(FYI, "Fresh session. Previous: %llx\n", 1487 sess_data->previous_session); 1488 } 1489 1490 /* enough to enable echos and oplocks and one max size write */ 1491 if (server->credits >= server->max_credits) 1492 req->hdr.CreditRequest = cpu_to_le16(0); 1493 else 1494 req->hdr.CreditRequest = cpu_to_le16( 1495 min_t(int, server->max_credits - 1496 server->credits, 130)); 1497 1498 /* only one of SMB2 signing flags may be set in SMB2 request */ 1499 if (server->sign) 1500 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; 1501 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ 1502 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; 1503 else 1504 req->SecurityMode = 0; 1505 1506 #ifdef CONFIG_CIFS_DFS_UPCALL 1507 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); 1508 #else 1509 req->Capabilities = 0; 1510 #endif /* DFS_UPCALL */ 1511 1512 req->Channel = 0; /* MBZ */ 1513 1514 sess_data->iov[0].iov_base = (char *)req; 1515 /* 1 for pad */ 1516 sess_data->iov[0].iov_len = total_len - 1; 1517 /* 1518 * This variable will be used to clear the buffer 1519 * allocated above in case of any error in the calling function. 1520 */ 1521 sess_data->buf0_type = CIFS_SMALL_BUFFER; 1522 1523 return 0; 1524 } 1525 1526 static void 1527 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) 1528 { 1529 struct kvec *iov = sess_data->iov; 1530 1531 /* iov[1] is already freed by caller */ 1532 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) 1533 memzero_explicit(iov[0].iov_base, iov[0].iov_len); 1534 1535 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); 1536 sess_data->buf0_type = CIFS_NO_BUFFER; 1537 } 1538 1539 static int 1540 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) 1541 { 1542 int rc; 1543 struct smb_rqst rqst; 1544 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; 1545 struct kvec rsp_iov = { NULL, 0 }; 1546 1547 /* Testing shows that buffer offset must be at location of Buffer[0] */ 1548 req->SecurityBufferOffset = 1549 cpu_to_le16(sizeof(struct smb2_sess_setup_req)); 1550 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); 1551 1552 memset(&rqst, 0, sizeof(struct smb_rqst)); 1553 rqst.rq_iov = sess_data->iov; 1554 rqst.rq_nvec = 2; 1555 1556 /* BB add code to build os and lm fields */ 1557 rc = cifs_send_recv(sess_data->xid, sess_data->ses, 1558 sess_data->server, 1559 &rqst, 1560 &sess_data->buf0_type, 1561 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); 1562 cifs_small_buf_release(sess_data->iov[0].iov_base); 1563 if (rc == 0) 1564 sess_data->ses->expired_pwd = false; 1565 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) 1566 sess_data->ses->expired_pwd = true; 1567 1568 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); 1569 1570 return rc; 1571 } 1572 1573 static int 1574 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) 1575 { 1576 int rc = 0; 1577 struct cifs_ses *ses = sess_data->ses; 1578 struct TCP_Server_Info *server = sess_data->server; 1579 1580 cifs_server_lock(server); 1581 if (server->ops->generate_signingkey) { 1582 rc = server->ops->generate_signingkey(ses, server); 1583 if (rc) { 1584 cifs_dbg(FYI, 1585 "SMB3 session key generation failed\n"); 1586 cifs_server_unlock(server); 1587 return rc; 1588 } 1589 } 1590 if (!server->session_estab) { 1591 server->sequence_number = 0x2; 1592 server->session_estab = true; 1593 } 1594 cifs_server_unlock(server); 1595 1596 cifs_dbg(FYI, "SMB2/3 session established successfully\n"); 1597 return rc; 1598 } 1599 1600 #ifdef CONFIG_CIFS_UPCALL 1601 static void 1602 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1603 { 1604 int rc; 1605 struct cifs_ses *ses = sess_data->ses; 1606 struct TCP_Server_Info *server = sess_data->server; 1607 struct cifs_spnego_msg *msg; 1608 struct key *spnego_key = NULL; 1609 struct smb2_sess_setup_rsp *rsp = NULL; 1610 bool is_binding = false; 1611 1612 rc = SMB2_sess_alloc_buffer(sess_data); 1613 if (rc) 1614 goto out; 1615 1616 spnego_key = cifs_get_spnego_key(ses, server); 1617 if (IS_ERR(spnego_key)) { 1618 rc = PTR_ERR(spnego_key); 1619 if (rc == -ENOKEY) 1620 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); 1621 spnego_key = NULL; 1622 goto out; 1623 } 1624 1625 msg = spnego_key->payload.data[0]; 1626 /* 1627 * check version field to make sure that cifs.upcall is 1628 * sending us a response in an expected form 1629 */ 1630 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { 1631 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n", 1632 CIFS_SPNEGO_UPCALL_VERSION, msg->version); 1633 rc = -EKEYREJECTED; 1634 goto out_put_spnego_key; 1635 } 1636 1637 spin_lock(&ses->ses_lock); 1638 is_binding = (ses->ses_status == SES_GOOD); 1639 spin_unlock(&ses->ses_lock); 1640 1641 /* keep session key if binding */ 1642 if (!is_binding) { 1643 kfree_sensitive(ses->auth_key.response); 1644 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1645 GFP_KERNEL); 1646 if (!ses->auth_key.response) { 1647 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", 1648 msg->sesskey_len); 1649 rc = -ENOMEM; 1650 goto out_put_spnego_key; 1651 } 1652 ses->auth_key.len = msg->sesskey_len; 1653 } 1654 1655 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; 1656 sess_data->iov[1].iov_len = msg->secblob_len; 1657 1658 rc = SMB2_sess_sendreceive(sess_data); 1659 if (rc) 1660 goto out_put_spnego_key; 1661 1662 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1663 /* keep session id and flags if binding */ 1664 if (!is_binding) { 1665 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1666 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1667 } 1668 1669 rc = SMB2_sess_establish_session(sess_data); 1670 out_put_spnego_key: 1671 key_invalidate(spnego_key); 1672 key_put(spnego_key); 1673 if (rc) { 1674 kfree_sensitive(ses->auth_key.response); 1675 ses->auth_key.response = NULL; 1676 ses->auth_key.len = 0; 1677 } 1678 out: 1679 sess_data->result = rc; 1680 sess_data->func = NULL; 1681 SMB2_sess_free_buffer(sess_data); 1682 } 1683 #else 1684 static void 1685 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1686 { 1687 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); 1688 sess_data->result = -EOPNOTSUPP; 1689 sess_data->func = NULL; 1690 } 1691 #endif 1692 1693 static void 1694 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); 1695 1696 static void 1697 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) 1698 { 1699 int rc; 1700 struct cifs_ses *ses = sess_data->ses; 1701 struct TCP_Server_Info *server = sess_data->server; 1702 struct smb2_sess_setup_rsp *rsp = NULL; 1703 unsigned char *ntlmssp_blob = NULL; 1704 bool use_spnego = false; /* else use raw ntlmssp */ 1705 u16 blob_length = 0; 1706 bool is_binding = false; 1707 1708 /* 1709 * If memory allocation is successful, caller of this function 1710 * frees it. 1711 */ 1712 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); 1713 if (!ses->ntlmssp) { 1714 rc = -ENOMEM; 1715 goto out_err; 1716 } 1717 ses->ntlmssp->sesskey_per_smbsess = true; 1718 1719 rc = SMB2_sess_alloc_buffer(sess_data); 1720 if (rc) 1721 goto out_err; 1722 1723 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob, 1724 &blob_length, ses, server, 1725 sess_data->nls_cp); 1726 if (rc) 1727 goto out; 1728 1729 if (use_spnego) { 1730 /* BB eventually need to add this */ 1731 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1732 rc = -EOPNOTSUPP; 1733 goto out; 1734 } 1735 sess_data->iov[1].iov_base = ntlmssp_blob; 1736 sess_data->iov[1].iov_len = blob_length; 1737 1738 rc = SMB2_sess_sendreceive(sess_data); 1739 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1740 1741 /* If true, rc here is expected and not an error */ 1742 if (sess_data->buf0_type != CIFS_NO_BUFFER && 1743 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) 1744 rc = 0; 1745 1746 if (rc) 1747 goto out; 1748 1749 if (offsetof(struct smb2_sess_setup_rsp, Buffer) != 1750 le16_to_cpu(rsp->SecurityBufferOffset)) { 1751 cifs_dbg(VFS, "Invalid security buffer offset %d\n", 1752 le16_to_cpu(rsp->SecurityBufferOffset)); 1753 rc = -EIO; 1754 goto out; 1755 } 1756 rc = decode_ntlmssp_challenge(rsp->Buffer, 1757 le16_to_cpu(rsp->SecurityBufferLength), ses); 1758 if (rc) 1759 goto out; 1760 1761 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); 1762 1763 spin_lock(&ses->ses_lock); 1764 is_binding = (ses->ses_status == SES_GOOD); 1765 spin_unlock(&ses->ses_lock); 1766 1767 /* keep existing ses id and flags if binding */ 1768 if (!is_binding) { 1769 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1770 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1771 } 1772 1773 out: 1774 kfree_sensitive(ntlmssp_blob); 1775 SMB2_sess_free_buffer(sess_data); 1776 if (!rc) { 1777 sess_data->result = 0; 1778 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; 1779 return; 1780 } 1781 out_err: 1782 kfree_sensitive(ses->ntlmssp); 1783 ses->ntlmssp = NULL; 1784 sess_data->result = rc; 1785 sess_data->func = NULL; 1786 } 1787 1788 static void 1789 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) 1790 { 1791 int rc; 1792 struct cifs_ses *ses = sess_data->ses; 1793 struct TCP_Server_Info *server = sess_data->server; 1794 struct smb2_sess_setup_req *req; 1795 struct smb2_sess_setup_rsp *rsp = NULL; 1796 unsigned char *ntlmssp_blob = NULL; 1797 bool use_spnego = false; /* else use raw ntlmssp */ 1798 u16 blob_length = 0; 1799 bool is_binding = false; 1800 1801 rc = SMB2_sess_alloc_buffer(sess_data); 1802 if (rc) 1803 goto out; 1804 1805 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; 1806 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1807 1808 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, 1809 ses, server, 1810 sess_data->nls_cp); 1811 if (rc) { 1812 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); 1813 goto out; 1814 } 1815 1816 if (use_spnego) { 1817 /* BB eventually need to add this */ 1818 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1819 rc = -EOPNOTSUPP; 1820 goto out; 1821 } 1822 sess_data->iov[1].iov_base = ntlmssp_blob; 1823 sess_data->iov[1].iov_len = blob_length; 1824 1825 rc = SMB2_sess_sendreceive(sess_data); 1826 if (rc) 1827 goto out; 1828 1829 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1830 1831 spin_lock(&ses->ses_lock); 1832 is_binding = (ses->ses_status == SES_GOOD); 1833 spin_unlock(&ses->ses_lock); 1834 1835 /* keep existing ses id and flags if binding */ 1836 if (!is_binding) { 1837 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1838 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1839 } 1840 1841 rc = SMB2_sess_establish_session(sess_data); 1842 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS 1843 if (ses->server->dialect < SMB30_PROT_ID) { 1844 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__); 1845 /* 1846 * The session id is opaque in terms of endianness, so we can't 1847 * print it as a long long. we dump it as we got it on the wire 1848 */ 1849 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), 1850 &ses->Suid); 1851 cifs_dbg(VFS, "Session Key %*ph\n", 1852 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); 1853 cifs_dbg(VFS, "Signing Key %*ph\n", 1854 SMB3_SIGN_KEY_SIZE, ses->auth_key.response); 1855 } 1856 #endif 1857 out: 1858 kfree_sensitive(ntlmssp_blob); 1859 SMB2_sess_free_buffer(sess_data); 1860 kfree_sensitive(ses->ntlmssp); 1861 ses->ntlmssp = NULL; 1862 sess_data->result = rc; 1863 sess_data->func = NULL; 1864 } 1865 1866 static int 1867 SMB2_select_sec(struct SMB2_sess_data *sess_data) 1868 { 1869 int type; 1870 struct cifs_ses *ses = sess_data->ses; 1871 struct TCP_Server_Info *server = sess_data->server; 1872 1873 type = smb2_select_sectype(server, ses->sectype); 1874 cifs_dbg(FYI, "sess setup type %d\n", type); 1875 if (type == Unspecified) { 1876 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); 1877 return -EINVAL; 1878 } 1879 1880 switch (type) { 1881 case Kerberos: 1882 sess_data->func = SMB2_auth_kerberos; 1883 break; 1884 case RawNTLMSSP: 1885 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; 1886 break; 1887 default: 1888 cifs_dbg(VFS, "secType %d not supported!\n", type); 1889 return -EOPNOTSUPP; 1890 } 1891 1892 return 0; 1893 } 1894 1895 int 1896 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 1897 struct TCP_Server_Info *server, 1898 const struct nls_table *nls_cp) 1899 { 1900 int rc = 0; 1901 struct SMB2_sess_data *sess_data; 1902 1903 cifs_dbg(FYI, "Session Setup\n"); 1904 1905 if (!server) { 1906 WARN(1, "%s: server is NULL!\n", __func__); 1907 return -EIO; 1908 } 1909 1910 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); 1911 if (!sess_data) 1912 return -ENOMEM; 1913 1914 sess_data->xid = xid; 1915 sess_data->ses = ses; 1916 sess_data->server = server; 1917 sess_data->buf0_type = CIFS_NO_BUFFER; 1918 sess_data->nls_cp = (struct nls_table *) nls_cp; 1919 sess_data->previous_session = ses->Suid; 1920 1921 rc = SMB2_select_sec(sess_data); 1922 if (rc) 1923 goto out; 1924 1925 /* 1926 * Initialize the session hash with the server one. 1927 */ 1928 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash, 1929 SMB2_PREAUTH_HASH_SIZE); 1930 1931 while (sess_data->func) 1932 sess_data->func(sess_data); 1933 1934 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) 1935 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n"); 1936 rc = sess_data->result; 1937 out: 1938 kfree_sensitive(sess_data); 1939 return rc; 1940 } 1941 1942 int 1943 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) 1944 { 1945 struct smb_rqst rqst; 1946 struct smb2_logoff_req *req; /* response is also trivial struct */ 1947 int rc = 0; 1948 struct TCP_Server_Info *server; 1949 int flags = 0; 1950 unsigned int total_len; 1951 struct kvec iov[1]; 1952 struct kvec rsp_iov; 1953 int resp_buf_type; 1954 1955 cifs_dbg(FYI, "disconnect session %p\n", ses); 1956 1957 if (ses && (ses->server)) 1958 server = ses->server; 1959 else 1960 return -EIO; 1961 1962 /* no need to send SMB logoff if uid already closed due to reconnect */ 1963 spin_lock(&ses->chan_lock); 1964 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 1965 spin_unlock(&ses->chan_lock); 1966 goto smb2_session_already_dead; 1967 } 1968 spin_unlock(&ses->chan_lock); 1969 1970 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, 1971 (void **) &req, &total_len); 1972 if (rc) 1973 return rc; 1974 1975 /* since no tcon, smb2_init can not do this, so do here */ 1976 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1977 1978 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) 1979 flags |= CIFS_TRANSFORM_REQ; 1980 else if (server->sign) 1981 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1982 1983 flags |= CIFS_NO_RSP_BUF; 1984 1985 iov[0].iov_base = (char *)req; 1986 iov[0].iov_len = total_len; 1987 1988 memset(&rqst, 0, sizeof(struct smb_rqst)); 1989 rqst.rq_iov = iov; 1990 rqst.rq_nvec = 1; 1991 1992 rc = cifs_send_recv(xid, ses, ses->server, 1993 &rqst, &resp_buf_type, flags, &rsp_iov); 1994 cifs_small_buf_release(req); 1995 /* 1996 * No tcon so can't do 1997 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1998 */ 1999 2000 smb2_session_already_dead: 2001 return rc; 2002 } 2003 2004 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) 2005 { 2006 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); 2007 } 2008 2009 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) 2010 2011 /* These are similar values to what Windows uses */ 2012 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) 2013 { 2014 tcon->max_chunks = 256; 2015 tcon->max_bytes_chunk = 1048576; 2016 tcon->max_bytes_copy = 16777216; 2017 } 2018 2019 int 2020 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, 2021 struct cifs_tcon *tcon, const struct nls_table *cp) 2022 { 2023 struct smb_rqst rqst; 2024 struct smb2_tree_connect_req *req; 2025 struct smb2_tree_connect_rsp *rsp = NULL; 2026 struct kvec iov[2]; 2027 struct kvec rsp_iov = { NULL, 0 }; 2028 int rc = 0; 2029 int resp_buftype; 2030 int unc_path_len; 2031 __le16 *unc_path = NULL; 2032 int flags = 0; 2033 unsigned int total_len; 2034 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2035 2036 cifs_dbg(FYI, "TCON\n"); 2037 2038 if (!server || !tree) 2039 return -EIO; 2040 2041 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 2042 if (unc_path == NULL) 2043 return -ENOMEM; 2044 2045 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp); 2046 if (unc_path_len <= 0) { 2047 kfree(unc_path); 2048 return -EINVAL; 2049 } 2050 unc_path_len *= 2; 2051 2052 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ 2053 tcon->tid = 0; 2054 atomic_set(&tcon->num_remote_opens, 0); 2055 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server, 2056 (void **) &req, &total_len); 2057 if (rc) { 2058 kfree(unc_path); 2059 return rc; 2060 } 2061 2062 if (smb3_encryption_required(tcon)) 2063 flags |= CIFS_TRANSFORM_REQ; 2064 2065 iov[0].iov_base = (char *)req; 2066 /* 1 for pad */ 2067 iov[0].iov_len = total_len - 1; 2068 2069 /* Testing shows that buffer offset must be at location of Buffer[0] */ 2070 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)); 2071 req->PathLength = cpu_to_le16(unc_path_len); 2072 iov[1].iov_base = unc_path; 2073 iov[1].iov_len = unc_path_len; 2074 2075 /* 2076 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 2077 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 2078 * (Samba servers don't always set the flag so also check if null user) 2079 */ 2080 if ((server->dialect == SMB311_PROT_ID) && 2081 !smb3_encryption_required(tcon) && 2082 !(ses->session_flags & 2083 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && 2084 ((ses->user_name != NULL) || (ses->sectype == Kerberos))) 2085 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 2086 2087 memset(&rqst, 0, sizeof(struct smb_rqst)); 2088 rqst.rq_iov = iov; 2089 rqst.rq_nvec = 2; 2090 2091 /* Need 64 for max size write so ask for more in case not there yet */ 2092 if (server->credits >= server->max_credits) 2093 req->hdr.CreditRequest = cpu_to_le16(0); 2094 else 2095 req->hdr.CreditRequest = cpu_to_le16( 2096 min_t(int, server->max_credits - 2097 server->credits, 64)); 2098 2099 rc = cifs_send_recv(xid, ses, server, 2100 &rqst, &resp_buftype, flags, &rsp_iov); 2101 cifs_small_buf_release(req); 2102 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; 2103 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); 2104 if ((rc != 0) || (rsp == NULL)) { 2105 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); 2106 tcon->need_reconnect = true; 2107 goto tcon_error_exit; 2108 } 2109 2110 switch (rsp->ShareType) { 2111 case SMB2_SHARE_TYPE_DISK: 2112 cifs_dbg(FYI, "connection to disk share\n"); 2113 break; 2114 case SMB2_SHARE_TYPE_PIPE: 2115 tcon->pipe = true; 2116 cifs_dbg(FYI, "connection to pipe share\n"); 2117 break; 2118 case SMB2_SHARE_TYPE_PRINT: 2119 tcon->print = true; 2120 cifs_dbg(FYI, "connection to printer\n"); 2121 break; 2122 default: 2123 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 2124 rc = -EOPNOTSUPP; 2125 goto tcon_error_exit; 2126 } 2127 2128 tcon->share_flags = le32_to_cpu(rsp->ShareFlags); 2129 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ 2130 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); 2131 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); 2132 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 2133 2134 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && 2135 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 2136 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n"); 2137 2138 if (tcon->seal && 2139 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 2140 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n"); 2141 2142 init_copy_chunk_defaults(tcon); 2143 if (server->ops->validate_negotiate) 2144 rc = server->ops->validate_negotiate(xid, tcon); 2145 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */ 2146 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT) 2147 server->nosharesock = true; 2148 tcon_exit: 2149 2150 free_rsp_buf(resp_buftype, rsp); 2151 kfree(unc_path); 2152 return rc; 2153 2154 tcon_error_exit: 2155 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) 2156 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 2157 goto tcon_exit; 2158 } 2159 2160 int 2161 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) 2162 { 2163 struct smb_rqst rqst; 2164 struct smb2_tree_disconnect_req *req; /* response is trivial */ 2165 int rc = 0; 2166 struct cifs_ses *ses = tcon->ses; 2167 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2168 int flags = 0; 2169 unsigned int total_len; 2170 struct kvec iov[1]; 2171 struct kvec rsp_iov; 2172 int resp_buf_type; 2173 2174 cifs_dbg(FYI, "Tree Disconnect\n"); 2175 2176 if (!ses || !(ses->server)) 2177 return -EIO; 2178 2179 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name); 2180 spin_lock(&ses->chan_lock); 2181 if ((tcon->need_reconnect) || 2182 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) { 2183 spin_unlock(&ses->chan_lock); 2184 return 0; 2185 } 2186 spin_unlock(&ses->chan_lock); 2187 2188 invalidate_all_cached_dirs(tcon); 2189 2190 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server, 2191 (void **) &req, 2192 &total_len); 2193 if (rc) 2194 return rc; 2195 2196 if (smb3_encryption_required(tcon)) 2197 flags |= CIFS_TRANSFORM_REQ; 2198 2199 flags |= CIFS_NO_RSP_BUF; 2200 2201 iov[0].iov_base = (char *)req; 2202 iov[0].iov_len = total_len; 2203 2204 memset(&rqst, 0, sizeof(struct smb_rqst)); 2205 rqst.rq_iov = iov; 2206 rqst.rq_nvec = 1; 2207 2208 rc = cifs_send_recv(xid, ses, server, 2209 &rqst, &resp_buf_type, flags, &rsp_iov); 2210 cifs_small_buf_release(req); 2211 if (rc) { 2212 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); 2213 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc); 2214 } 2215 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid); 2216 2217 return rc; 2218 } 2219 2220 2221 static struct create_durable * 2222 create_durable_buf(void) 2223 { 2224 struct create_durable *buf; 2225 2226 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2227 if (!buf) 2228 return NULL; 2229 2230 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2231 (struct create_durable, Data)); 2232 buf->ccontext.DataLength = cpu_to_le32(16); 2233 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2234 (struct create_durable, Name)); 2235 buf->ccontext.NameLength = cpu_to_le16(4); 2236 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ 2237 buf->Name[0] = 'D'; 2238 buf->Name[1] = 'H'; 2239 buf->Name[2] = 'n'; 2240 buf->Name[3] = 'Q'; 2241 return buf; 2242 } 2243 2244 static struct create_durable * 2245 create_reconnect_durable_buf(struct cifs_fid *fid) 2246 { 2247 struct create_durable *buf; 2248 2249 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2250 if (!buf) 2251 return NULL; 2252 2253 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2254 (struct create_durable, Data)); 2255 buf->ccontext.DataLength = cpu_to_le32(16); 2256 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2257 (struct create_durable, Name)); 2258 buf->ccontext.NameLength = cpu_to_le16(4); 2259 buf->Data.Fid.PersistentFileId = fid->persistent_fid; 2260 buf->Data.Fid.VolatileFileId = fid->volatile_fid; 2261 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ 2262 buf->Name[0] = 'D'; 2263 buf->Name[1] = 'H'; 2264 buf->Name[2] = 'n'; 2265 buf->Name[3] = 'C'; 2266 return buf; 2267 } 2268 2269 static void 2270 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf) 2271 { 2272 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc; 2273 2274 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n", 2275 pdisk_id->DiskFileId, pdisk_id->VolumeId); 2276 buf->IndexNumber = pdisk_id->DiskFileId; 2277 } 2278 2279 static void 2280 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, 2281 struct create_posix_rsp *posix) 2282 { 2283 int sid_len; 2284 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset); 2285 u8 *end = beg + le32_to_cpu(cc->DataLength); 2286 u8 *sid; 2287 2288 memset(posix, 0, sizeof(*posix)); 2289 2290 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0)); 2291 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4)); 2292 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8)); 2293 2294 sid = beg + 12; 2295 sid_len = posix_info_sid_size(sid, end); 2296 if (sid_len < 0) { 2297 cifs_dbg(VFS, "bad owner sid in posix create response\n"); 2298 return; 2299 } 2300 memcpy(&posix->owner, sid, sid_len); 2301 2302 sid = sid + sid_len; 2303 sid_len = posix_info_sid_size(sid, end); 2304 if (sid_len < 0) { 2305 cifs_dbg(VFS, "bad group sid in posix create response\n"); 2306 return; 2307 } 2308 memcpy(&posix->group, sid, sid_len); 2309 2310 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n", 2311 posix->nlink, posix->mode, posix->reparse_tag); 2312 } 2313 2314 int smb2_parse_contexts(struct TCP_Server_Info *server, 2315 struct kvec *rsp_iov, 2316 unsigned int *epoch, 2317 char *lease_key, __u8 *oplock, 2318 struct smb2_file_all_info *buf, 2319 struct create_posix_rsp *posix) 2320 { 2321 struct smb2_create_rsp *rsp = rsp_iov->iov_base; 2322 struct create_context *cc; 2323 size_t rem, off, len; 2324 size_t doff, dlen; 2325 size_t noff, nlen; 2326 char *name; 2327 static const char smb3_create_tag_posix[] = { 2328 0x93, 0xAD, 0x25, 0x50, 0x9C, 2329 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83, 2330 0xDE, 0x96, 0x8B, 0xCD, 0x7C 2331 }; 2332 2333 *oplock = 0; 2334 2335 off = le32_to_cpu(rsp->CreateContextsOffset); 2336 rem = le32_to_cpu(rsp->CreateContextsLength); 2337 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) 2338 return -EINVAL; 2339 cc = (struct create_context *)((u8 *)rsp + off); 2340 2341 /* Initialize inode number to 0 in case no valid data in qfid context */ 2342 if (buf) 2343 buf->IndexNumber = 0; 2344 2345 while (rem >= sizeof(*cc)) { 2346 doff = le16_to_cpu(cc->DataOffset); 2347 dlen = le32_to_cpu(cc->DataLength); 2348 if (check_add_overflow(doff, dlen, &len) || len > rem) 2349 return -EINVAL; 2350 2351 noff = le16_to_cpu(cc->NameOffset); 2352 nlen = le16_to_cpu(cc->NameLength); 2353 if (noff + nlen > doff) 2354 return -EINVAL; 2355 2356 name = (char *)cc + noff; 2357 switch (nlen) { 2358 case 4: 2359 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { 2360 *oplock = server->ops->parse_lease_buf(cc, epoch, 2361 lease_key); 2362 } else if (buf && 2363 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { 2364 parse_query_id_ctxt(cc, buf); 2365 } 2366 break; 2367 case 16: 2368 if (posix && !memcmp(name, smb3_create_tag_posix, 16)) 2369 parse_posix_ctxt(cc, buf, posix); 2370 break; 2371 default: 2372 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", 2373 __func__, nlen, dlen); 2374 if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) 2375 cifs_dump_mem("context data: ", cc, dlen); 2376 break; 2377 } 2378 2379 off = le32_to_cpu(cc->Next); 2380 if (!off) 2381 break; 2382 if (check_sub_overflow(rem, off, &rem)) 2383 return -EINVAL; 2384 cc = (struct create_context *)((u8 *)cc + off); 2385 } 2386 2387 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 2388 *oplock = rsp->OplockLevel; 2389 2390 return 0; 2391 } 2392 2393 static int 2394 add_lease_context(struct TCP_Server_Info *server, 2395 struct smb2_create_req *req, 2396 struct kvec *iov, 2397 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) 2398 { 2399 unsigned int num = *num_iovec; 2400 2401 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); 2402 if (iov[num].iov_base == NULL) 2403 return -ENOMEM; 2404 iov[num].iov_len = server->vals->create_lease_size; 2405 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; 2406 *num_iovec = num + 1; 2407 return 0; 2408 } 2409 2410 static struct create_durable_v2 * 2411 create_durable_v2_buf(struct cifs_open_parms *oparms) 2412 { 2413 struct cifs_fid *pfid = oparms->fid; 2414 struct create_durable_v2 *buf; 2415 2416 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); 2417 if (!buf) 2418 return NULL; 2419 2420 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2421 (struct create_durable_v2, dcontext)); 2422 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); 2423 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2424 (struct create_durable_v2, Name)); 2425 buf->ccontext.NameLength = cpu_to_le16(4); 2426 2427 /* 2428 * NB: Handle timeout defaults to 0, which allows server to choose 2429 * (most servers default to 120 seconds) and most clients default to 0. 2430 * This can be overridden at mount ("handletimeout=") if the user wants 2431 * a different persistent (or resilient) handle timeout for all opens 2432 * on a particular SMB3 mount. 2433 */ 2434 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); 2435 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2436 2437 /* for replay, we should not overwrite the existing create guid */ 2438 if (!oparms->replay) { 2439 generate_random_uuid(buf->dcontext.CreateGuid); 2440 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 2441 } else 2442 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16); 2443 2444 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ 2445 buf->Name[0] = 'D'; 2446 buf->Name[1] = 'H'; 2447 buf->Name[2] = '2'; 2448 buf->Name[3] = 'Q'; 2449 return buf; 2450 } 2451 2452 static struct create_durable_handle_reconnect_v2 * 2453 create_reconnect_durable_v2_buf(struct cifs_fid *fid) 2454 { 2455 struct create_durable_handle_reconnect_v2 *buf; 2456 2457 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), 2458 GFP_KERNEL); 2459 if (!buf) 2460 return NULL; 2461 2462 buf->ccontext.DataOffset = 2463 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2464 dcontext)); 2465 buf->ccontext.DataLength = 2466 cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); 2467 buf->ccontext.NameOffset = 2468 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2469 Name)); 2470 buf->ccontext.NameLength = cpu_to_le16(4); 2471 2472 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; 2473 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; 2474 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2475 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); 2476 2477 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ 2478 buf->Name[0] = 'D'; 2479 buf->Name[1] = 'H'; 2480 buf->Name[2] = '2'; 2481 buf->Name[3] = 'C'; 2482 return buf; 2483 } 2484 2485 static int 2486 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, 2487 struct cifs_open_parms *oparms) 2488 { 2489 unsigned int num = *num_iovec; 2490 2491 iov[num].iov_base = create_durable_v2_buf(oparms); 2492 if (iov[num].iov_base == NULL) 2493 return -ENOMEM; 2494 iov[num].iov_len = sizeof(struct create_durable_v2); 2495 *num_iovec = num + 1; 2496 return 0; 2497 } 2498 2499 static int 2500 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, 2501 struct cifs_open_parms *oparms) 2502 { 2503 unsigned int num = *num_iovec; 2504 2505 /* indicate that we don't need to relock the file */ 2506 oparms->reconnect = false; 2507 2508 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); 2509 if (iov[num].iov_base == NULL) 2510 return -ENOMEM; 2511 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); 2512 *num_iovec = num + 1; 2513 return 0; 2514 } 2515 2516 static int 2517 add_durable_context(struct kvec *iov, unsigned int *num_iovec, 2518 struct cifs_open_parms *oparms, bool use_persistent) 2519 { 2520 unsigned int num = *num_iovec; 2521 2522 if (use_persistent) { 2523 if (oparms->reconnect) 2524 return add_durable_reconnect_v2_context(iov, num_iovec, 2525 oparms); 2526 else 2527 return add_durable_v2_context(iov, num_iovec, oparms); 2528 } 2529 2530 if (oparms->reconnect) { 2531 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); 2532 /* indicate that we don't need to relock the file */ 2533 oparms->reconnect = false; 2534 } else 2535 iov[num].iov_base = create_durable_buf(); 2536 if (iov[num].iov_base == NULL) 2537 return -ENOMEM; 2538 iov[num].iov_len = sizeof(struct create_durable); 2539 *num_iovec = num + 1; 2540 return 0; 2541 } 2542 2543 /* See MS-SMB2 2.2.13.2.7 */ 2544 static struct crt_twarp_ctxt * 2545 create_twarp_buf(__u64 timewarp) 2546 { 2547 struct crt_twarp_ctxt *buf; 2548 2549 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL); 2550 if (!buf) 2551 return NULL; 2552 2553 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2554 (struct crt_twarp_ctxt, Timestamp)); 2555 buf->ccontext.DataLength = cpu_to_le32(8); 2556 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2557 (struct crt_twarp_ctxt, Name)); 2558 buf->ccontext.NameLength = cpu_to_le16(4); 2559 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ 2560 buf->Name[0] = 'T'; 2561 buf->Name[1] = 'W'; 2562 buf->Name[2] = 'r'; 2563 buf->Name[3] = 'p'; 2564 buf->Timestamp = cpu_to_le64(timewarp); 2565 return buf; 2566 } 2567 2568 /* See MS-SMB2 2.2.13.2.7 */ 2569 static int 2570 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) 2571 { 2572 unsigned int num = *num_iovec; 2573 2574 iov[num].iov_base = create_twarp_buf(timewarp); 2575 if (iov[num].iov_base == NULL) 2576 return -ENOMEM; 2577 iov[num].iov_len = sizeof(struct crt_twarp_ctxt); 2578 *num_iovec = num + 1; 2579 return 0; 2580 } 2581 2582 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ 2583 static void setup_owner_group_sids(char *buf) 2584 { 2585 struct owner_group_sids *sids = (struct owner_group_sids *)buf; 2586 2587 /* Populate the user ownership fields S-1-5-88-1 */ 2588 sids->owner.Revision = 1; 2589 sids->owner.NumAuth = 3; 2590 sids->owner.Authority[5] = 5; 2591 sids->owner.SubAuthorities[0] = cpu_to_le32(88); 2592 sids->owner.SubAuthorities[1] = cpu_to_le32(1); 2593 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val); 2594 2595 /* Populate the group ownership fields S-1-5-88-2 */ 2596 sids->group.Revision = 1; 2597 sids->group.NumAuth = 3; 2598 sids->group.Authority[5] = 5; 2599 sids->group.SubAuthorities[0] = cpu_to_le32(88); 2600 sids->group.SubAuthorities[1] = cpu_to_le32(2); 2601 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val); 2602 2603 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val); 2604 } 2605 2606 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */ 2607 static struct crt_sd_ctxt * 2608 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) 2609 { 2610 struct crt_sd_ctxt *buf; 2611 __u8 *ptr, *aclptr; 2612 unsigned int acelen, acl_size, ace_count; 2613 unsigned int owner_offset = 0; 2614 unsigned int group_offset = 0; 2615 struct smb3_acl acl = {}; 2616 2617 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); 2618 2619 if (set_owner) { 2620 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ 2621 *len += sizeof(struct owner_group_sids); 2622 } 2623 2624 buf = kzalloc(*len, GFP_KERNEL); 2625 if (buf == NULL) 2626 return buf; 2627 2628 ptr = (__u8 *)&buf[1]; 2629 if (set_owner) { 2630 /* offset fields are from beginning of security descriptor not of create context */ 2631 owner_offset = ptr - (__u8 *)&buf->sd; 2632 buf->sd.OffsetOwner = cpu_to_le32(owner_offset); 2633 group_offset = owner_offset + offsetof(struct owner_group_sids, group); 2634 buf->sd.OffsetGroup = cpu_to_le32(group_offset); 2635 2636 setup_owner_group_sids(ptr); 2637 ptr += sizeof(struct owner_group_sids); 2638 } else { 2639 buf->sd.OffsetOwner = 0; 2640 buf->sd.OffsetGroup = 0; 2641 } 2642 2643 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); 2644 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); 2645 buf->ccontext.NameLength = cpu_to_le16(4); 2646 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ 2647 buf->Name[0] = 'S'; 2648 buf->Name[1] = 'e'; 2649 buf->Name[2] = 'c'; 2650 buf->Name[3] = 'D'; 2651 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ 2652 2653 /* 2654 * ACL is "self relative" ie ACL is stored in contiguous block of memory 2655 * and "DP" ie the DACL is present 2656 */ 2657 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); 2658 2659 /* offset owner, group and Sbz1 and SACL are all zero */ 2660 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2661 /* Ship the ACL for now. we will copy it into buf later. */ 2662 aclptr = ptr; 2663 ptr += sizeof(struct smb3_acl); 2664 2665 /* create one ACE to hold the mode embedded in reserved special SID */ 2666 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); 2667 ptr += acelen; 2668 acl_size = acelen + sizeof(struct smb3_acl); 2669 ace_count = 1; 2670 2671 if (set_owner) { 2672 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ 2673 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr); 2674 ptr += acelen; 2675 acl_size += acelen; 2676 ace_count += 1; 2677 } 2678 2679 /* and one more ACE to allow access for authenticated users */ 2680 acelen = setup_authusers_ACE((struct cifs_ace *)ptr); 2681 ptr += acelen; 2682 acl_size += acelen; 2683 ace_count += 1; 2684 2685 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2686 acl.AclSize = cpu_to_le16(acl_size); 2687 acl.AceCount = cpu_to_le16(ace_count); 2688 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ 2689 memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2690 2691 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2692 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); 2693 2694 return buf; 2695 } 2696 2697 static int 2698 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner) 2699 { 2700 unsigned int num = *num_iovec; 2701 unsigned int len = 0; 2702 2703 iov[num].iov_base = create_sd_buf(mode, set_owner, &len); 2704 if (iov[num].iov_base == NULL) 2705 return -ENOMEM; 2706 iov[num].iov_len = len; 2707 *num_iovec = num + 1; 2708 return 0; 2709 } 2710 2711 static struct crt_query_id_ctxt * 2712 create_query_id_buf(void) 2713 { 2714 struct crt_query_id_ctxt *buf; 2715 2716 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL); 2717 if (!buf) 2718 return NULL; 2719 2720 buf->ccontext.DataOffset = cpu_to_le16(0); 2721 buf->ccontext.DataLength = cpu_to_le32(0); 2722 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2723 (struct crt_query_id_ctxt, Name)); 2724 buf->ccontext.NameLength = cpu_to_le16(4); 2725 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */ 2726 buf->Name[0] = 'Q'; 2727 buf->Name[1] = 'F'; 2728 buf->Name[2] = 'i'; 2729 buf->Name[3] = 'd'; 2730 return buf; 2731 } 2732 2733 /* See MS-SMB2 2.2.13.2.9 */ 2734 static int 2735 add_query_id_context(struct kvec *iov, unsigned int *num_iovec) 2736 { 2737 unsigned int num = *num_iovec; 2738 2739 iov[num].iov_base = create_query_id_buf(); 2740 if (iov[num].iov_base == NULL) 2741 return -ENOMEM; 2742 iov[num].iov_len = sizeof(struct crt_query_id_ctxt); 2743 *num_iovec = num + 1; 2744 return 0; 2745 } 2746 2747 static void add_ea_context(struct cifs_open_parms *oparms, 2748 struct kvec *rq_iov, unsigned int *num_iovs) 2749 { 2750 struct kvec *iov = oparms->ea_cctx; 2751 2752 if (iov && iov->iov_base && iov->iov_len) { 2753 rq_iov[(*num_iovs)++] = *iov; 2754 memset(iov, 0, sizeof(*iov)); 2755 } 2756 } 2757 2758 static int 2759 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, 2760 const char *treename, const __le16 *path) 2761 { 2762 int treename_len, path_len; 2763 struct nls_table *cp; 2764 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; 2765 2766 /* 2767 * skip leading "\\" 2768 */ 2769 treename_len = strlen(treename); 2770 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) 2771 return -EINVAL; 2772 2773 treename += 2; 2774 treename_len -= 2; 2775 2776 path_len = UniStrnlen((wchar_t *)path, PATH_MAX); 2777 2778 /* make room for one path separator only if @path isn't empty */ 2779 *out_len = treename_len + (path[0] ? 1 : 0) + path_len; 2780 2781 /* 2782 * final path needs to be 8-byte aligned as specified in 2783 * MS-SMB2 2.2.13 SMB2 CREATE Request. 2784 */ 2785 *out_size = round_up(*out_len * sizeof(__le16), 8); 2786 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); 2787 if (!*out_path) 2788 return -ENOMEM; 2789 2790 cp = load_nls_default(); 2791 cifs_strtoUTF16(*out_path, treename, treename_len, cp); 2792 2793 /* Do not append the separator if the path is empty */ 2794 if (path[0] != cpu_to_le16(0x0000)) { 2795 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep); 2796 UniStrcat((wchar_t *)*out_path, (wchar_t *)path); 2797 } 2798 2799 unload_nls(cp); 2800 2801 return 0; 2802 } 2803 2804 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, 2805 umode_t mode, struct cifs_tcon *tcon, 2806 const char *full_path, 2807 struct cifs_sb_info *cifs_sb) 2808 { 2809 struct smb_rqst rqst; 2810 struct smb2_create_req *req; 2811 struct smb2_create_rsp *rsp = NULL; 2812 struct cifs_ses *ses = tcon->ses; 2813 struct kvec iov[3]; /* make sure at least one for each open context */ 2814 struct kvec rsp_iov = {NULL, 0}; 2815 int resp_buftype; 2816 int uni_path_len; 2817 __le16 *copy_path = NULL; 2818 int copy_size; 2819 int rc = 0; 2820 unsigned int n_iov = 2; 2821 __u32 file_attributes = 0; 2822 char *pc_buf = NULL; 2823 int flags = 0; 2824 unsigned int total_len; 2825 __le16 *utf16_path = NULL; 2826 struct TCP_Server_Info *server; 2827 int retries = 0, cur_sleep = 1; 2828 2829 replay_again: 2830 /* reinitialize for possible replay */ 2831 flags = 0; 2832 n_iov = 2; 2833 server = cifs_pick_channel(ses); 2834 2835 cifs_dbg(FYI, "mkdir\n"); 2836 2837 /* resource #1: path allocation */ 2838 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 2839 if (!utf16_path) 2840 return -ENOMEM; 2841 2842 if (!ses || !server) { 2843 rc = -EIO; 2844 goto err_free_path; 2845 } 2846 2847 /* resource #2: request */ 2848 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 2849 (void **) &req, &total_len); 2850 if (rc) 2851 goto err_free_path; 2852 2853 2854 if (smb3_encryption_required(tcon)) 2855 flags |= CIFS_TRANSFORM_REQ; 2856 2857 req->ImpersonationLevel = IL_IMPERSONATION; 2858 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); 2859 /* File attributes ignored on open (used in create though) */ 2860 req->FileAttributes = cpu_to_le32(file_attributes); 2861 req->ShareAccess = FILE_SHARE_ALL_LE; 2862 req->CreateDisposition = cpu_to_le32(FILE_CREATE); 2863 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); 2864 2865 iov[0].iov_base = (char *)req; 2866 /* -1 since last byte is buf[0] which is sent below (path) */ 2867 iov[0].iov_len = total_len - 1; 2868 2869 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 2870 2871 /* [MS-SMB2] 2.2.13 NameOffset: 2872 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 2873 * the SMB2 header, the file name includes a prefix that will 2874 * be processed during DFS name normalization as specified in 2875 * section 3.3.5.9. Otherwise, the file name is relative to 2876 * the share that is identified by the TreeId in the SMB2 2877 * header. 2878 */ 2879 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 2880 int name_len; 2881 2882 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 2883 rc = alloc_path_with_tree_prefix(©_path, ©_size, 2884 &name_len, 2885 tcon->tree_name, utf16_path); 2886 if (rc) 2887 goto err_free_req; 2888 2889 req->NameLength = cpu_to_le16(name_len * 2); 2890 uni_path_len = copy_size; 2891 /* free before overwriting resource */ 2892 kfree(utf16_path); 2893 utf16_path = copy_path; 2894 } else { 2895 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; 2896 /* MUST set path len (NameLength) to 0 opening root of share */ 2897 req->NameLength = cpu_to_le16(uni_path_len - 2); 2898 if (uni_path_len % 8 != 0) { 2899 copy_size = roundup(uni_path_len, 8); 2900 copy_path = kzalloc(copy_size, GFP_KERNEL); 2901 if (!copy_path) { 2902 rc = -ENOMEM; 2903 goto err_free_req; 2904 } 2905 memcpy((char *)copy_path, (const char *)utf16_path, 2906 uni_path_len); 2907 uni_path_len = copy_size; 2908 /* free before overwriting resource */ 2909 kfree(utf16_path); 2910 utf16_path = copy_path; 2911 } 2912 } 2913 2914 iov[1].iov_len = uni_path_len; 2915 iov[1].iov_base = utf16_path; 2916 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; 2917 2918 if (tcon->posix_extensions) { 2919 /* resource #3: posix buf */ 2920 rc = add_posix_context(iov, &n_iov, mode); 2921 if (rc) 2922 goto err_free_req; 2923 req->CreateContextsOffset = cpu_to_le32( 2924 sizeof(struct smb2_create_req) + 2925 iov[1].iov_len); 2926 pc_buf = iov[n_iov-1].iov_base; 2927 } 2928 2929 2930 memset(&rqst, 0, sizeof(struct smb_rqst)); 2931 rqst.rq_iov = iov; 2932 rqst.rq_nvec = n_iov; 2933 2934 /* no need to inc num_remote_opens because we close it just below */ 2935 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE, 2936 FILE_WRITE_ATTRIBUTES); 2937 2938 if (retries) 2939 smb2_set_replay(server, &rqst); 2940 2941 /* resource #4: response buffer */ 2942 rc = cifs_send_recv(xid, ses, server, 2943 &rqst, &resp_buftype, flags, &rsp_iov); 2944 if (rc) { 2945 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 2946 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, 2947 CREATE_NOT_FILE, 2948 FILE_WRITE_ATTRIBUTES, rc); 2949 goto err_free_rsp_buf; 2950 } 2951 2952 /* 2953 * Although unlikely to be possible for rsp to be null and rc not set, 2954 * adding check below is slightly safer long term (and quiets Coverity 2955 * warning) 2956 */ 2957 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 2958 if (rsp == NULL) { 2959 rc = -EIO; 2960 kfree(pc_buf); 2961 goto err_free_req; 2962 } 2963 2964 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 2965 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); 2966 2967 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); 2968 2969 /* Eventually save off posix specific response info and timestaps */ 2970 2971 err_free_rsp_buf: 2972 free_rsp_buf(resp_buftype, rsp); 2973 kfree(pc_buf); 2974 err_free_req: 2975 cifs_small_buf_release(req); 2976 err_free_path: 2977 kfree(utf16_path); 2978 2979 if (is_replayable_error(rc) && 2980 smb2_should_replay(tcon, &retries, &cur_sleep)) 2981 goto replay_again; 2982 2983 return rc; 2984 } 2985 2986 int 2987 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 2988 struct smb_rqst *rqst, __u8 *oplock, 2989 struct cifs_open_parms *oparms, __le16 *path) 2990 { 2991 struct smb2_create_req *req; 2992 unsigned int n_iov = 2; 2993 __u32 file_attributes = 0; 2994 int copy_size; 2995 int uni_path_len; 2996 unsigned int total_len; 2997 struct kvec *iov = rqst->rq_iov; 2998 __le16 *copy_path; 2999 int rc; 3000 3001 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 3002 (void **) &req, &total_len); 3003 if (rc) 3004 return rc; 3005 3006 iov[0].iov_base = (char *)req; 3007 /* -1 since last byte is buf[0] which is sent below (path) */ 3008 iov[0].iov_len = total_len - 1; 3009 3010 if (oparms->create_options & CREATE_OPTION_READONLY) 3011 file_attributes |= ATTR_READONLY; 3012 if (oparms->create_options & CREATE_OPTION_SPECIAL) 3013 file_attributes |= ATTR_SYSTEM; 3014 3015 req->ImpersonationLevel = IL_IMPERSONATION; 3016 req->DesiredAccess = cpu_to_le32(oparms->desired_access); 3017 /* File attributes ignored on open (used in create though) */ 3018 req->FileAttributes = cpu_to_le32(file_attributes); 3019 req->ShareAccess = FILE_SHARE_ALL_LE; 3020 3021 req->CreateDisposition = cpu_to_le32(oparms->disposition); 3022 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); 3023 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 3024 3025 /* [MS-SMB2] 2.2.13 NameOffset: 3026 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 3027 * the SMB2 header, the file name includes a prefix that will 3028 * be processed during DFS name normalization as specified in 3029 * section 3.3.5.9. Otherwise, the file name is relative to 3030 * the share that is identified by the TreeId in the SMB2 3031 * header. 3032 */ 3033 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 3034 int name_len; 3035 3036 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 3037 rc = alloc_path_with_tree_prefix(©_path, ©_size, 3038 &name_len, 3039 tcon->tree_name, path); 3040 if (rc) 3041 return rc; 3042 req->NameLength = cpu_to_le16(name_len * 2); 3043 uni_path_len = copy_size; 3044 path = copy_path; 3045 } else { 3046 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; 3047 /* MUST set path len (NameLength) to 0 opening root of share */ 3048 req->NameLength = cpu_to_le16(uni_path_len - 2); 3049 copy_size = round_up(uni_path_len, 8); 3050 copy_path = kzalloc(copy_size, GFP_KERNEL); 3051 if (!copy_path) 3052 return -ENOMEM; 3053 memcpy((char *)copy_path, (const char *)path, 3054 uni_path_len); 3055 uni_path_len = copy_size; 3056 path = copy_path; 3057 } 3058 3059 iov[1].iov_len = uni_path_len; 3060 iov[1].iov_base = path; 3061 3062 if ((!server->oplocks) || (tcon->no_lease)) 3063 *oplock = SMB2_OPLOCK_LEVEL_NONE; 3064 3065 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 3066 *oplock == SMB2_OPLOCK_LEVEL_NONE) 3067 req->RequestedOplockLevel = *oplock; 3068 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && 3069 (oparms->create_options & CREATE_NOT_FILE)) 3070 req->RequestedOplockLevel = *oplock; /* no srv lease support */ 3071 else { 3072 rc = add_lease_context(server, req, iov, &n_iov, 3073 oparms->fid->lease_key, oplock); 3074 if (rc) 3075 return rc; 3076 } 3077 3078 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { 3079 rc = add_durable_context(iov, &n_iov, oparms, 3080 tcon->use_persistent); 3081 if (rc) 3082 return rc; 3083 } 3084 3085 if (tcon->posix_extensions) { 3086 rc = add_posix_context(iov, &n_iov, oparms->mode); 3087 if (rc) 3088 return rc; 3089 } 3090 3091 if (tcon->snapshot_time) { 3092 cifs_dbg(FYI, "adding snapshot context\n"); 3093 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); 3094 if (rc) 3095 return rc; 3096 } 3097 3098 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { 3099 bool set_mode; 3100 bool set_owner; 3101 3102 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) && 3103 (oparms->mode != ACL_NO_MODE)) 3104 set_mode = true; 3105 else { 3106 set_mode = false; 3107 oparms->mode = ACL_NO_MODE; 3108 } 3109 3110 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 3111 set_owner = true; 3112 else 3113 set_owner = false; 3114 3115 if (set_owner | set_mode) { 3116 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); 3117 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner); 3118 if (rc) 3119 return rc; 3120 } 3121 } 3122 3123 add_query_id_context(iov, &n_iov); 3124 add_ea_context(oparms, iov, &n_iov); 3125 3126 if (n_iov > 2) { 3127 /* 3128 * We have create contexts behind iov[1] (the file 3129 * name), point at them from the main create request 3130 */ 3131 req->CreateContextsOffset = cpu_to_le32( 3132 sizeof(struct smb2_create_req) + 3133 iov[1].iov_len); 3134 req->CreateContextsLength = 0; 3135 3136 for (unsigned int i = 2; i < (n_iov-1); i++) { 3137 struct kvec *v = &iov[i]; 3138 size_t len = v->iov_len; 3139 struct create_context *cctx = 3140 (struct create_context *)v->iov_base; 3141 3142 cctx->Next = cpu_to_le32(len); 3143 le32_add_cpu(&req->CreateContextsLength, len); 3144 } 3145 le32_add_cpu(&req->CreateContextsLength, 3146 iov[n_iov-1].iov_len); 3147 } 3148 3149 rqst->rq_nvec = n_iov; 3150 return 0; 3151 } 3152 3153 /* rq_iov[0] is the request and is released by cifs_small_buf_release(). 3154 * All other vectors are freed by kfree(). 3155 */ 3156 void 3157 SMB2_open_free(struct smb_rqst *rqst) 3158 { 3159 int i; 3160 3161 if (rqst && rqst->rq_iov) { 3162 cifs_small_buf_release(rqst->rq_iov[0].iov_base); 3163 for (i = 1; i < rqst->rq_nvec; i++) 3164 if (rqst->rq_iov[i].iov_base != smb2_padding) 3165 kfree(rqst->rq_iov[i].iov_base); 3166 } 3167 } 3168 3169 int 3170 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, 3171 __u8 *oplock, struct smb2_file_all_info *buf, 3172 struct create_posix_rsp *posix, 3173 struct kvec *err_iov, int *buftype) 3174 { 3175 struct smb_rqst rqst; 3176 struct smb2_create_rsp *rsp = NULL; 3177 struct cifs_tcon *tcon = oparms->tcon; 3178 struct cifs_ses *ses = tcon->ses; 3179 struct TCP_Server_Info *server; 3180 struct kvec iov[SMB2_CREATE_IOV_SIZE]; 3181 struct kvec rsp_iov = {NULL, 0}; 3182 int resp_buftype = CIFS_NO_BUFFER; 3183 int rc = 0; 3184 int flags = 0; 3185 int retries = 0, cur_sleep = 1; 3186 3187 replay_again: 3188 /* reinitialize for possible replay */ 3189 flags = 0; 3190 server = cifs_pick_channel(ses); 3191 oparms->replay = !!(retries); 3192 3193 cifs_dbg(FYI, "create/open\n"); 3194 if (!ses || !server) 3195 return -EIO; 3196 3197 if (smb3_encryption_required(tcon)) 3198 flags |= CIFS_TRANSFORM_REQ; 3199 3200 memset(&rqst, 0, sizeof(struct smb_rqst)); 3201 memset(&iov, 0, sizeof(iov)); 3202 rqst.rq_iov = iov; 3203 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; 3204 3205 rc = SMB2_open_init(tcon, server, 3206 &rqst, oplock, oparms, path); 3207 if (rc) 3208 goto creat_exit; 3209 3210 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path, 3211 oparms->create_options, oparms->desired_access); 3212 3213 if (retries) 3214 smb2_set_replay(server, &rqst); 3215 3216 rc = cifs_send_recv(xid, ses, server, 3217 &rqst, &resp_buftype, flags, 3218 &rsp_iov); 3219 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 3220 3221 if (rc != 0) { 3222 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 3223 if (err_iov && rsp) { 3224 *err_iov = rsp_iov; 3225 *buftype = resp_buftype; 3226 resp_buftype = CIFS_NO_BUFFER; 3227 rsp = NULL; 3228 } 3229 trace_smb3_open_err(xid, tcon->tid, ses->Suid, 3230 oparms->create_options, oparms->desired_access, rc); 3231 if (rc == -EREMCHG) { 3232 pr_warn_once("server share %s deleted\n", 3233 tcon->tree_name); 3234 tcon->need_reconnect = true; 3235 } 3236 goto creat_exit; 3237 } else if (rsp == NULL) /* unlikely to happen, but safer to check */ 3238 goto creat_exit; 3239 else 3240 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 3241 oparms->create_options, oparms->desired_access); 3242 3243 atomic_inc(&tcon->num_remote_opens); 3244 oparms->fid->persistent_fid = rsp->PersistentFileId; 3245 oparms->fid->volatile_fid = rsp->VolatileFileId; 3246 oparms->fid->access = oparms->desired_access; 3247 #ifdef CONFIG_CIFS_DEBUG2 3248 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); 3249 #endif /* CIFS_DEBUG2 */ 3250 3251 if (buf) { 3252 buf->CreationTime = rsp->CreationTime; 3253 buf->LastAccessTime = rsp->LastAccessTime; 3254 buf->LastWriteTime = rsp->LastWriteTime; 3255 buf->ChangeTime = rsp->ChangeTime; 3256 buf->AllocationSize = rsp->AllocationSize; 3257 buf->EndOfFile = rsp->EndofFile; 3258 buf->Attributes = rsp->FileAttributes; 3259 buf->NumberOfLinks = cpu_to_le32(1); 3260 buf->DeletePending = 0; 3261 } 3262 3263 3264 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, 3265 oparms->fid->lease_key, oplock, buf, posix); 3266 creat_exit: 3267 SMB2_open_free(&rqst); 3268 free_rsp_buf(resp_buftype, rsp); 3269 3270 if (is_replayable_error(rc) && 3271 smb2_should_replay(tcon, &retries, &cur_sleep)) 3272 goto replay_again; 3273 3274 return rc; 3275 } 3276 3277 int 3278 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3279 struct smb_rqst *rqst, 3280 u64 persistent_fid, u64 volatile_fid, u32 opcode, 3281 char *in_data, u32 indatalen, 3282 __u32 max_response_size) 3283 { 3284 struct smb2_ioctl_req *req; 3285 struct kvec *iov = rqst->rq_iov; 3286 unsigned int total_len; 3287 int rc; 3288 char *in_data_buf; 3289 3290 rc = smb2_ioctl_req_init(opcode, tcon, server, 3291 (void **) &req, &total_len); 3292 if (rc) 3293 return rc; 3294 3295 if (indatalen) { 3296 /* 3297 * indatalen is usually small at a couple of bytes max, so 3298 * just allocate through generic pool 3299 */ 3300 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS); 3301 if (!in_data_buf) { 3302 cifs_small_buf_release(req); 3303 return -ENOMEM; 3304 } 3305 } 3306 3307 req->CtlCode = cpu_to_le32(opcode); 3308 req->PersistentFileId = persistent_fid; 3309 req->VolatileFileId = volatile_fid; 3310 3311 iov[0].iov_base = (char *)req; 3312 /* 3313 * If no input data, the size of ioctl struct in 3314 * protocol spec still includes a 1 byte data buffer, 3315 * but if input data passed to ioctl, we do not 3316 * want to double count this, so we do not send 3317 * the dummy one byte of data in iovec[0] if sending 3318 * input data (in iovec[1]). 3319 */ 3320 if (indatalen) { 3321 req->InputCount = cpu_to_le32(indatalen); 3322 /* do not set InputOffset if no input data */ 3323 req->InputOffset = 3324 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); 3325 rqst->rq_nvec = 2; 3326 iov[0].iov_len = total_len - 1; 3327 iov[1].iov_base = in_data_buf; 3328 iov[1].iov_len = indatalen; 3329 } else { 3330 rqst->rq_nvec = 1; 3331 iov[0].iov_len = total_len; 3332 } 3333 3334 req->OutputOffset = 0; 3335 req->OutputCount = 0; /* MBZ */ 3336 3337 /* 3338 * In most cases max_response_size is set to 16K (CIFSMaxBufSize) 3339 * We Could increase default MaxOutputResponse, but that could require 3340 * more credits. Windows typically sets this smaller, but for some 3341 * ioctls it may be useful to allow server to send more. No point 3342 * limiting what the server can send as long as fits in one credit 3343 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want 3344 * to increase this limit up in the future. 3345 * Note that for snapshot queries that servers like Azure expect that 3346 * the first query be minimal size (and just used to get the number/size 3347 * of previous versions) so response size must be specified as EXACTLY 3348 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple 3349 * of eight bytes. Currently that is the only case where we set max 3350 * response size smaller. 3351 */ 3352 req->MaxOutputResponse = cpu_to_le32(max_response_size); 3353 req->hdr.CreditCharge = 3354 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), 3355 SMB2_MAX_BUFFER_SIZE)); 3356 /* always an FSCTL (for now) */ 3357 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 3358 3359 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ 3360 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) 3361 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 3362 3363 return 0; 3364 } 3365 3366 void 3367 SMB2_ioctl_free(struct smb_rqst *rqst) 3368 { 3369 int i; 3370 3371 if (rqst && rqst->rq_iov) { 3372 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3373 for (i = 1; i < rqst->rq_nvec; i++) 3374 if (rqst->rq_iov[i].iov_base != smb2_padding) 3375 kfree(rqst->rq_iov[i].iov_base); 3376 } 3377 } 3378 3379 3380 /* 3381 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 3382 */ 3383 int 3384 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 3385 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, 3386 u32 max_out_data_len, char **out_data, 3387 u32 *plen /* returned data len */) 3388 { 3389 struct smb_rqst rqst; 3390 struct smb2_ioctl_rsp *rsp = NULL; 3391 struct cifs_ses *ses; 3392 struct TCP_Server_Info *server; 3393 struct kvec iov[SMB2_IOCTL_IOV_SIZE]; 3394 struct kvec rsp_iov = {NULL, 0}; 3395 int resp_buftype = CIFS_NO_BUFFER; 3396 int rc = 0; 3397 int flags = 0; 3398 int retries = 0, cur_sleep = 1; 3399 3400 if (!tcon) 3401 return -EIO; 3402 3403 ses = tcon->ses; 3404 if (!ses) 3405 return -EIO; 3406 3407 replay_again: 3408 /* reinitialize for possible replay */ 3409 flags = 0; 3410 server = cifs_pick_channel(ses); 3411 3412 if (!server) 3413 return -EIO; 3414 3415 cifs_dbg(FYI, "SMB2 IOCTL\n"); 3416 3417 if (out_data != NULL) 3418 *out_data = NULL; 3419 3420 /* zero out returned data len, in case of error */ 3421 if (plen) 3422 *plen = 0; 3423 3424 if (smb3_encryption_required(tcon)) 3425 flags |= CIFS_TRANSFORM_REQ; 3426 3427 memset(&rqst, 0, sizeof(struct smb_rqst)); 3428 memset(&iov, 0, sizeof(iov)); 3429 rqst.rq_iov = iov; 3430 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 3431 3432 rc = SMB2_ioctl_init(tcon, server, 3433 &rqst, persistent_fid, volatile_fid, opcode, 3434 in_data, indatalen, max_out_data_len); 3435 if (rc) 3436 goto ioctl_exit; 3437 3438 if (retries) 3439 smb2_set_replay(server, &rqst); 3440 3441 rc = cifs_send_recv(xid, ses, server, 3442 &rqst, &resp_buftype, flags, 3443 &rsp_iov); 3444 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; 3445 3446 if (rc != 0) 3447 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, 3448 ses->Suid, 0, opcode, rc); 3449 3450 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) { 3451 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3452 goto ioctl_exit; 3453 } else if (rc == -EINVAL) { 3454 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 3455 (opcode != FSCTL_SRV_COPYCHUNK)) { 3456 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3457 goto ioctl_exit; 3458 } 3459 } else if (rc == -E2BIG) { 3460 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) { 3461 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3462 goto ioctl_exit; 3463 } 3464 } 3465 3466 /* check if caller wants to look at return data or just return rc */ 3467 if ((plen == NULL) || (out_data == NULL)) 3468 goto ioctl_exit; 3469 3470 /* 3471 * Although unlikely to be possible for rsp to be null and rc not set, 3472 * adding check below is slightly safer long term (and quiets Coverity 3473 * warning) 3474 */ 3475 if (rsp == NULL) { 3476 rc = -EIO; 3477 goto ioctl_exit; 3478 } 3479 3480 *plen = le32_to_cpu(rsp->OutputCount); 3481 3482 /* We check for obvious errors in the output buffer length and offset */ 3483 if (*plen == 0) 3484 goto ioctl_exit; /* server returned no data */ 3485 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { 3486 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); 3487 *plen = 0; 3488 rc = -EIO; 3489 goto ioctl_exit; 3490 } 3491 3492 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { 3493 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, 3494 le32_to_cpu(rsp->OutputOffset)); 3495 *plen = 0; 3496 rc = -EIO; 3497 goto ioctl_exit; 3498 } 3499 3500 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), 3501 *plen, GFP_KERNEL); 3502 if (*out_data == NULL) { 3503 rc = -ENOMEM; 3504 goto ioctl_exit; 3505 } 3506 3507 ioctl_exit: 3508 SMB2_ioctl_free(&rqst); 3509 free_rsp_buf(resp_buftype, rsp); 3510 3511 if (is_replayable_error(rc) && 3512 smb2_should_replay(tcon, &retries, &cur_sleep)) 3513 goto replay_again; 3514 3515 return rc; 3516 } 3517 3518 /* 3519 * Individual callers to ioctl worker function follow 3520 */ 3521 3522 int 3523 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, 3524 u64 persistent_fid, u64 volatile_fid) 3525 { 3526 int rc; 3527 struct compress_ioctl fsctl_input; 3528 char *ret_data = NULL; 3529 3530 fsctl_input.CompressionState = 3531 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); 3532 3533 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 3534 FSCTL_SET_COMPRESSION, 3535 (char *)&fsctl_input /* data input */, 3536 2 /* in data len */, CIFSMaxBufSize /* max out data */, 3537 &ret_data /* out data */, NULL); 3538 3539 cifs_dbg(FYI, "set compression rc %d\n", rc); 3540 3541 return rc; 3542 } 3543 3544 int 3545 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3546 struct smb_rqst *rqst, 3547 u64 persistent_fid, u64 volatile_fid, bool query_attrs) 3548 { 3549 struct smb2_close_req *req; 3550 struct kvec *iov = rqst->rq_iov; 3551 unsigned int total_len; 3552 int rc; 3553 3554 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server, 3555 (void **) &req, &total_len); 3556 if (rc) 3557 return rc; 3558 3559 req->PersistentFileId = persistent_fid; 3560 req->VolatileFileId = volatile_fid; 3561 if (query_attrs) 3562 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; 3563 else 3564 req->Flags = 0; 3565 iov[0].iov_base = (char *)req; 3566 iov[0].iov_len = total_len; 3567 3568 return 0; 3569 } 3570 3571 void 3572 SMB2_close_free(struct smb_rqst *rqst) 3573 { 3574 if (rqst && rqst->rq_iov) 3575 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3576 } 3577 3578 int 3579 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3580 u64 persistent_fid, u64 volatile_fid, 3581 struct smb2_file_network_open_info *pbuf) 3582 { 3583 struct smb_rqst rqst; 3584 struct smb2_close_rsp *rsp = NULL; 3585 struct cifs_ses *ses = tcon->ses; 3586 struct TCP_Server_Info *server; 3587 struct kvec iov[1]; 3588 struct kvec rsp_iov; 3589 int resp_buftype = CIFS_NO_BUFFER; 3590 int rc = 0; 3591 int flags = 0; 3592 bool query_attrs = false; 3593 int retries = 0, cur_sleep = 1; 3594 3595 replay_again: 3596 /* reinitialize for possible replay */ 3597 flags = 0; 3598 query_attrs = false; 3599 server = cifs_pick_channel(ses); 3600 3601 cifs_dbg(FYI, "Close\n"); 3602 3603 if (!ses || !server) 3604 return -EIO; 3605 3606 if (smb3_encryption_required(tcon)) 3607 flags |= CIFS_TRANSFORM_REQ; 3608 3609 memset(&rqst, 0, sizeof(struct smb_rqst)); 3610 memset(&iov, 0, sizeof(iov)); 3611 rqst.rq_iov = iov; 3612 rqst.rq_nvec = 1; 3613 3614 /* check if need to ask server to return timestamps in close response */ 3615 if (pbuf) 3616 query_attrs = true; 3617 3618 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid); 3619 rc = SMB2_close_init(tcon, server, 3620 &rqst, persistent_fid, volatile_fid, 3621 query_attrs); 3622 if (rc) 3623 goto close_exit; 3624 3625 if (retries) 3626 smb2_set_replay(server, &rqst); 3627 3628 rc = cifs_send_recv(xid, ses, server, 3629 &rqst, &resp_buftype, flags, &rsp_iov); 3630 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; 3631 3632 if (rc != 0) { 3633 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); 3634 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, 3635 rc); 3636 goto close_exit; 3637 } else { 3638 trace_smb3_close_done(xid, persistent_fid, tcon->tid, 3639 ses->Suid); 3640 if (pbuf) 3641 memcpy(&pbuf->network_open_info, 3642 &rsp->network_open_info, 3643 sizeof(pbuf->network_open_info)); 3644 atomic_dec(&tcon->num_remote_opens); 3645 } 3646 3647 close_exit: 3648 SMB2_close_free(&rqst); 3649 free_rsp_buf(resp_buftype, rsp); 3650 3651 /* retry close in a worker thread if this one is interrupted */ 3652 if (is_interrupt_error(rc)) { 3653 int tmp_rc; 3654 3655 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, 3656 volatile_fid); 3657 if (tmp_rc) 3658 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", 3659 persistent_fid, tmp_rc); 3660 } 3661 3662 if (is_replayable_error(rc) && 3663 smb2_should_replay(tcon, &retries, &cur_sleep)) 3664 goto replay_again; 3665 3666 return rc; 3667 } 3668 3669 int 3670 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3671 u64 persistent_fid, u64 volatile_fid) 3672 { 3673 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL); 3674 } 3675 3676 int 3677 smb2_validate_iov(unsigned int offset, unsigned int buffer_length, 3678 struct kvec *iov, unsigned int min_buf_size) 3679 { 3680 unsigned int smb_len = iov->iov_len; 3681 char *end_of_smb = smb_len + (char *)iov->iov_base; 3682 char *begin_of_buf = offset + (char *)iov->iov_base; 3683 char *end_of_buf = begin_of_buf + buffer_length; 3684 3685 3686 if (buffer_length < min_buf_size) { 3687 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", 3688 buffer_length, min_buf_size); 3689 return -EINVAL; 3690 } 3691 3692 /* check if beyond RFC1001 maximum length */ 3693 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { 3694 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", 3695 buffer_length, smb_len); 3696 return -EINVAL; 3697 } 3698 3699 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { 3700 cifs_dbg(VFS, "Invalid server response, bad offset to data\n"); 3701 return -EINVAL; 3702 } 3703 3704 return 0; 3705 } 3706 3707 /* 3708 * If SMB buffer fields are valid, copy into temporary buffer to hold result. 3709 * Caller must free buffer. 3710 */ 3711 int 3712 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, 3713 struct kvec *iov, unsigned int minbufsize, 3714 char *data) 3715 { 3716 char *begin_of_buf = offset + (char *)iov->iov_base; 3717 int rc; 3718 3719 if (!data) 3720 return -EINVAL; 3721 3722 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); 3723 if (rc) 3724 return rc; 3725 3726 memcpy(data, begin_of_buf, minbufsize); 3727 3728 return 0; 3729 } 3730 3731 int 3732 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3733 struct smb_rqst *rqst, 3734 u64 persistent_fid, u64 volatile_fid, 3735 u8 info_class, u8 info_type, u32 additional_info, 3736 size_t output_len, size_t input_len, void *input) 3737 { 3738 struct smb2_query_info_req *req; 3739 struct kvec *iov = rqst->rq_iov; 3740 unsigned int total_len; 3741 size_t len; 3742 int rc; 3743 3744 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) || 3745 len > CIFSMaxBufSize)) 3746 return -EINVAL; 3747 3748 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 3749 (void **) &req, &total_len); 3750 if (rc) 3751 return rc; 3752 3753 req->InfoType = info_type; 3754 req->FileInfoClass = info_class; 3755 req->PersistentFileId = persistent_fid; 3756 req->VolatileFileId = volatile_fid; 3757 req->AdditionalInformation = cpu_to_le32(additional_info); 3758 3759 req->OutputBufferLength = cpu_to_le32(output_len); 3760 if (input_len) { 3761 req->InputBufferLength = cpu_to_le32(input_len); 3762 /* total_len for smb query request never close to le16 max */ 3763 req->InputBufferOffset = cpu_to_le16(total_len - 1); 3764 memcpy(req->Buffer, input, input_len); 3765 } 3766 3767 iov[0].iov_base = (char *)req; 3768 /* 1 for Buffer */ 3769 iov[0].iov_len = len; 3770 return 0; 3771 } 3772 3773 void 3774 SMB2_query_info_free(struct smb_rqst *rqst) 3775 { 3776 if (rqst && rqst->rq_iov) 3777 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3778 } 3779 3780 static int 3781 query_info(const unsigned int xid, struct cifs_tcon *tcon, 3782 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, 3783 u32 additional_info, size_t output_len, size_t min_len, void **data, 3784 u32 *dlen) 3785 { 3786 struct smb_rqst rqst; 3787 struct smb2_query_info_rsp *rsp = NULL; 3788 struct kvec iov[1]; 3789 struct kvec rsp_iov; 3790 int rc = 0; 3791 int resp_buftype = CIFS_NO_BUFFER; 3792 struct cifs_ses *ses = tcon->ses; 3793 struct TCP_Server_Info *server; 3794 int flags = 0; 3795 bool allocated = false; 3796 int retries = 0, cur_sleep = 1; 3797 3798 cifs_dbg(FYI, "Query Info\n"); 3799 3800 if (!ses) 3801 return -EIO; 3802 3803 replay_again: 3804 /* reinitialize for possible replay */ 3805 flags = 0; 3806 allocated = false; 3807 server = cifs_pick_channel(ses); 3808 3809 if (!server) 3810 return -EIO; 3811 3812 if (smb3_encryption_required(tcon)) 3813 flags |= CIFS_TRANSFORM_REQ; 3814 3815 memset(&rqst, 0, sizeof(struct smb_rqst)); 3816 memset(&iov, 0, sizeof(iov)); 3817 rqst.rq_iov = iov; 3818 rqst.rq_nvec = 1; 3819 3820 rc = SMB2_query_info_init(tcon, server, 3821 &rqst, persistent_fid, volatile_fid, 3822 info_class, info_type, additional_info, 3823 output_len, 0, NULL); 3824 if (rc) 3825 goto qinf_exit; 3826 3827 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, 3828 ses->Suid, info_class, (__u32)info_type); 3829 3830 if (retries) 3831 smb2_set_replay(server, &rqst); 3832 3833 rc = cifs_send_recv(xid, ses, server, 3834 &rqst, &resp_buftype, flags, &rsp_iov); 3835 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 3836 3837 if (rc) { 3838 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 3839 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, 3840 ses->Suid, info_class, (__u32)info_type, rc); 3841 goto qinf_exit; 3842 } 3843 3844 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, 3845 ses->Suid, info_class, (__u32)info_type); 3846 3847 if (dlen) { 3848 *dlen = le32_to_cpu(rsp->OutputBufferLength); 3849 if (!*data) { 3850 *data = kmalloc(*dlen, GFP_KERNEL); 3851 if (!*data) { 3852 cifs_tcon_dbg(VFS, 3853 "Error %d allocating memory for acl\n", 3854 rc); 3855 *dlen = 0; 3856 rc = -ENOMEM; 3857 goto qinf_exit; 3858 } 3859 allocated = true; 3860 } 3861 } 3862 3863 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 3864 le32_to_cpu(rsp->OutputBufferLength), 3865 &rsp_iov, dlen ? *dlen : min_len, *data); 3866 if (rc && allocated) { 3867 kfree(*data); 3868 *data = NULL; 3869 *dlen = 0; 3870 } 3871 3872 qinf_exit: 3873 SMB2_query_info_free(&rqst); 3874 free_rsp_buf(resp_buftype, rsp); 3875 3876 if (is_replayable_error(rc) && 3877 smb2_should_replay(tcon, &retries, &cur_sleep)) 3878 goto replay_again; 3879 3880 return rc; 3881 } 3882 3883 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3884 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) 3885 { 3886 return query_info(xid, tcon, persistent_fid, volatile_fid, 3887 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, 3888 sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 3889 sizeof(struct smb2_file_all_info), (void **)&data, 3890 NULL); 3891 } 3892 3893 #if 0 3894 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */ 3895 int 3896 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3897 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen) 3898 { 3899 size_t output_len = sizeof(struct smb311_posix_qinfo *) + 3900 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2); 3901 *plen = 0; 3902 3903 return query_info(xid, tcon, persistent_fid, volatile_fid, 3904 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, 3905 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen); 3906 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */ 3907 } 3908 #endif 3909 3910 int 3911 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, 3912 u64 persistent_fid, u64 volatile_fid, 3913 void **data, u32 *plen, u32 extra_info) 3914 { 3915 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | 3916 extra_info; 3917 *plen = 0; 3918 3919 return query_info(xid, tcon, persistent_fid, volatile_fid, 3920 0, SMB2_O_INFO_SECURITY, additional_info, 3921 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); 3922 } 3923 3924 int 3925 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, 3926 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) 3927 { 3928 return query_info(xid, tcon, persistent_fid, volatile_fid, 3929 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, 3930 sizeof(struct smb2_file_internal_info), 3931 sizeof(struct smb2_file_internal_info), 3932 (void **)&uniqueid, NULL); 3933 } 3934 3935 /* 3936 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory 3937 * See MS-SMB2 2.2.35 and 2.2.36 3938 */ 3939 3940 static int 3941 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, 3942 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3943 u64 persistent_fid, u64 volatile_fid, 3944 u32 completion_filter, bool watch_tree) 3945 { 3946 struct smb2_change_notify_req *req; 3947 struct kvec *iov = rqst->rq_iov; 3948 unsigned int total_len; 3949 int rc; 3950 3951 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server, 3952 (void **) &req, &total_len); 3953 if (rc) 3954 return rc; 3955 3956 req->PersistentFileId = persistent_fid; 3957 req->VolatileFileId = volatile_fid; 3958 /* See note 354 of MS-SMB2, 64K max */ 3959 req->OutputBufferLength = 3960 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); 3961 req->CompletionFilter = cpu_to_le32(completion_filter); 3962 if (watch_tree) 3963 req->Flags = cpu_to_le16(SMB2_WATCH_TREE); 3964 else 3965 req->Flags = 0; 3966 3967 iov[0].iov_base = (char *)req; 3968 iov[0].iov_len = total_len; 3969 3970 return 0; 3971 } 3972 3973 int 3974 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, 3975 u64 persistent_fid, u64 volatile_fid, bool watch_tree, 3976 u32 completion_filter, u32 max_out_data_len, char **out_data, 3977 u32 *plen /* returned data len */) 3978 { 3979 struct cifs_ses *ses = tcon->ses; 3980 struct TCP_Server_Info *server; 3981 struct smb_rqst rqst; 3982 struct smb2_change_notify_rsp *smb_rsp; 3983 struct kvec iov[1]; 3984 struct kvec rsp_iov = {NULL, 0}; 3985 int resp_buftype = CIFS_NO_BUFFER; 3986 int flags = 0; 3987 int rc = 0; 3988 int retries = 0, cur_sleep = 1; 3989 3990 replay_again: 3991 /* reinitialize for possible replay */ 3992 flags = 0; 3993 server = cifs_pick_channel(ses); 3994 3995 cifs_dbg(FYI, "change notify\n"); 3996 if (!ses || !server) 3997 return -EIO; 3998 3999 if (smb3_encryption_required(tcon)) 4000 flags |= CIFS_TRANSFORM_REQ; 4001 4002 memset(&rqst, 0, sizeof(struct smb_rqst)); 4003 memset(&iov, 0, sizeof(iov)); 4004 if (plen) 4005 *plen = 0; 4006 4007 rqst.rq_iov = iov; 4008 rqst.rq_nvec = 1; 4009 4010 rc = SMB2_notify_init(xid, &rqst, tcon, server, 4011 persistent_fid, volatile_fid, 4012 completion_filter, watch_tree); 4013 if (rc) 4014 goto cnotify_exit; 4015 4016 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid, 4017 (u8)watch_tree, completion_filter); 4018 4019 if (retries) 4020 smb2_set_replay(server, &rqst); 4021 4022 rc = cifs_send_recv(xid, ses, server, 4023 &rqst, &resp_buftype, flags, &rsp_iov); 4024 4025 if (rc != 0) { 4026 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE); 4027 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid, 4028 (u8)watch_tree, completion_filter, rc); 4029 } else { 4030 trace_smb3_notify_done(xid, persistent_fid, tcon->tid, 4031 ses->Suid, (u8)watch_tree, completion_filter); 4032 /* validate that notify information is plausible */ 4033 if ((rsp_iov.iov_base == NULL) || 4034 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1)) 4035 goto cnotify_exit; 4036 4037 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; 4038 4039 smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4040 le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, 4041 sizeof(struct file_notify_information)); 4042 4043 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), 4044 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); 4045 if (*out_data == NULL) { 4046 rc = -ENOMEM; 4047 goto cnotify_exit; 4048 } else if (plen) 4049 *plen = le32_to_cpu(smb_rsp->OutputBufferLength); 4050 } 4051 4052 cnotify_exit: 4053 if (rqst.rq_iov) 4054 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */ 4055 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4056 4057 if (is_replayable_error(rc) && 4058 smb2_should_replay(tcon, &retries, &cur_sleep)) 4059 goto replay_again; 4060 4061 return rc; 4062 } 4063 4064 4065 4066 /* 4067 * This is a no-op for now. We're not really interested in the reply, but 4068 * rather in the fact that the server sent one and that server->lstrp 4069 * gets updated. 4070 * 4071 * FIXME: maybe we should consider checking that the reply matches request? 4072 */ 4073 static void 4074 smb2_echo_callback(struct mid_q_entry *mid) 4075 { 4076 struct TCP_Server_Info *server = mid->callback_data; 4077 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 4078 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4079 4080 if (mid->mid_state == MID_RESPONSE_RECEIVED 4081 || mid->mid_state == MID_RESPONSE_MALFORMED) { 4082 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4083 credits.instance = server->reconnect_instance; 4084 } 4085 4086 release_mid(mid); 4087 add_credits(server, &credits, CIFS_ECHO_OP); 4088 } 4089 4090 void smb2_reconnect_server(struct work_struct *work) 4091 { 4092 struct TCP_Server_Info *server = container_of(work, 4093 struct TCP_Server_Info, reconnect.work); 4094 struct TCP_Server_Info *pserver; 4095 struct cifs_ses *ses, *ses2; 4096 struct cifs_tcon *tcon, *tcon2; 4097 struct list_head tmp_list, tmp_ses_list; 4098 bool ses_exist = false; 4099 bool tcon_selected = false; 4100 int rc; 4101 bool resched = false; 4102 4103 /* first check if ref count has reached 0, if not inc ref count */ 4104 spin_lock(&cifs_tcp_ses_lock); 4105 if (!server->srv_count) { 4106 spin_unlock(&cifs_tcp_ses_lock); 4107 return; 4108 } 4109 server->srv_count++; 4110 spin_unlock(&cifs_tcp_ses_lock); 4111 4112 /* If server is a channel, select the primary channel */ 4113 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 4114 4115 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 4116 mutex_lock(&pserver->reconnect_mutex); 4117 4118 /* if the server is marked for termination, drop the ref count here */ 4119 if (server->terminate) { 4120 cifs_put_tcp_session(server, true); 4121 mutex_unlock(&pserver->reconnect_mutex); 4122 return; 4123 } 4124 4125 INIT_LIST_HEAD(&tmp_list); 4126 INIT_LIST_HEAD(&tmp_ses_list); 4127 cifs_dbg(FYI, "Reconnecting tcons and channels\n"); 4128 4129 spin_lock(&cifs_tcp_ses_lock); 4130 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4131 spin_lock(&ses->ses_lock); 4132 if (ses->ses_status == SES_EXITING) { 4133 spin_unlock(&ses->ses_lock); 4134 continue; 4135 } 4136 spin_unlock(&ses->ses_lock); 4137 4138 tcon_selected = false; 4139 4140 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 4141 if (tcon->need_reconnect || tcon->need_reopen_files) { 4142 tcon->tc_count++; 4143 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 4144 netfs_trace_tcon_ref_get_reconnect_server); 4145 list_add_tail(&tcon->rlist, &tmp_list); 4146 tcon_selected = true; 4147 } 4148 } 4149 /* 4150 * IPC has the same lifetime as its session and uses its 4151 * refcount. 4152 */ 4153 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { 4154 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); 4155 tcon_selected = true; 4156 cifs_smb_ses_inc_refcount(ses); 4157 } 4158 /* 4159 * handle the case where channel needs to reconnect 4160 * binding session, but tcon is healthy (some other channel 4161 * is active) 4162 */ 4163 spin_lock(&ses->chan_lock); 4164 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { 4165 list_add_tail(&ses->rlist, &tmp_ses_list); 4166 ses_exist = true; 4167 cifs_smb_ses_inc_refcount(ses); 4168 } 4169 spin_unlock(&ses->chan_lock); 4170 } 4171 spin_unlock(&cifs_tcp_ses_lock); 4172 4173 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 4174 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4175 if (!rc) 4176 cifs_reopen_persistent_handles(tcon); 4177 else 4178 resched = true; 4179 list_del_init(&tcon->rlist); 4180 if (tcon->ipc) 4181 cifs_put_smb_ses(tcon->ses); 4182 else 4183 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server); 4184 } 4185 4186 if (!ses_exist) 4187 goto done; 4188 4189 /* allocate a dummy tcon struct used for reconnect */ 4190 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server); 4191 if (!tcon) { 4192 resched = true; 4193 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4194 list_del_init(&ses->rlist); 4195 cifs_put_smb_ses(ses); 4196 } 4197 goto done; 4198 } 4199 4200 tcon->status = TID_GOOD; 4201 tcon->retry = false; 4202 tcon->need_reconnect = false; 4203 4204 /* now reconnect sessions for necessary channels */ 4205 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4206 tcon->ses = ses; 4207 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4208 if (rc) 4209 resched = true; 4210 list_del_init(&ses->rlist); 4211 cifs_put_smb_ses(ses); 4212 } 4213 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server); 4214 4215 done: 4216 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); 4217 if (resched) 4218 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); 4219 mutex_unlock(&pserver->reconnect_mutex); 4220 4221 /* now we can safely release srv struct */ 4222 cifs_put_tcp_session(server, true); 4223 } 4224 4225 int 4226 SMB2_echo(struct TCP_Server_Info *server) 4227 { 4228 struct smb2_echo_req *req; 4229 int rc = 0; 4230 struct kvec iov[1]; 4231 struct smb_rqst rqst = { .rq_iov = iov, 4232 .rq_nvec = 1 }; 4233 unsigned int total_len; 4234 4235 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); 4236 4237 spin_lock(&server->srv_lock); 4238 if (server->ops->need_neg && 4239 server->ops->need_neg(server)) { 4240 spin_unlock(&server->srv_lock); 4241 /* No need to send echo on newly established connections */ 4242 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 4243 return rc; 4244 } 4245 spin_unlock(&server->srv_lock); 4246 4247 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, 4248 (void **)&req, &total_len); 4249 if (rc) 4250 return rc; 4251 4252 req->hdr.CreditRequest = cpu_to_le16(1); 4253 4254 iov[0].iov_len = total_len; 4255 iov[0].iov_base = (char *)req; 4256 4257 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, 4258 server, CIFS_ECHO_OP, NULL); 4259 if (rc) 4260 cifs_dbg(FYI, "Echo request failed: %d\n", rc); 4261 4262 cifs_small_buf_release(req); 4263 return rc; 4264 } 4265 4266 void 4267 SMB2_flush_free(struct smb_rqst *rqst) 4268 { 4269 if (rqst && rqst->rq_iov) 4270 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 4271 } 4272 4273 int 4274 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, 4275 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 4276 u64 persistent_fid, u64 volatile_fid) 4277 { 4278 struct smb2_flush_req *req; 4279 struct kvec *iov = rqst->rq_iov; 4280 unsigned int total_len; 4281 int rc; 4282 4283 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server, 4284 (void **) &req, &total_len); 4285 if (rc) 4286 return rc; 4287 4288 req->PersistentFileId = persistent_fid; 4289 req->VolatileFileId = volatile_fid; 4290 4291 iov[0].iov_base = (char *)req; 4292 iov[0].iov_len = total_len; 4293 4294 return 0; 4295 } 4296 4297 int 4298 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 4299 u64 volatile_fid) 4300 { 4301 struct cifs_ses *ses = tcon->ses; 4302 struct smb_rqst rqst; 4303 struct kvec iov[1]; 4304 struct kvec rsp_iov = {NULL, 0}; 4305 struct TCP_Server_Info *server; 4306 int resp_buftype = CIFS_NO_BUFFER; 4307 int flags = 0; 4308 int rc = 0; 4309 int retries = 0, cur_sleep = 1; 4310 4311 replay_again: 4312 /* reinitialize for possible replay */ 4313 flags = 0; 4314 server = cifs_pick_channel(ses); 4315 4316 cifs_dbg(FYI, "flush\n"); 4317 if (!ses || !(ses->server)) 4318 return -EIO; 4319 4320 if (smb3_encryption_required(tcon)) 4321 flags |= CIFS_TRANSFORM_REQ; 4322 4323 memset(&rqst, 0, sizeof(struct smb_rqst)); 4324 memset(&iov, 0, sizeof(iov)); 4325 rqst.rq_iov = iov; 4326 rqst.rq_nvec = 1; 4327 4328 rc = SMB2_flush_init(xid, &rqst, tcon, server, 4329 persistent_fid, volatile_fid); 4330 if (rc) 4331 goto flush_exit; 4332 4333 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid); 4334 4335 if (retries) 4336 smb2_set_replay(server, &rqst); 4337 4338 rc = cifs_send_recv(xid, ses, server, 4339 &rqst, &resp_buftype, flags, &rsp_iov); 4340 4341 if (rc != 0) { 4342 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 4343 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, 4344 rc); 4345 } else 4346 trace_smb3_flush_done(xid, persistent_fid, tcon->tid, 4347 ses->Suid); 4348 4349 flush_exit: 4350 SMB2_flush_free(&rqst); 4351 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4352 4353 if (is_replayable_error(rc) && 4354 smb2_should_replay(tcon, &retries, &cur_sleep)) 4355 goto replay_again; 4356 4357 return rc; 4358 } 4359 4360 #ifdef CONFIG_CIFS_SMB_DIRECT 4361 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) 4362 { 4363 struct TCP_Server_Info *server = io_parms->server; 4364 struct cifs_tcon *tcon = io_parms->tcon; 4365 4366 /* we can only offload if we're connected */ 4367 if (!server || !tcon) 4368 return false; 4369 4370 /* we can only offload on an rdma connection */ 4371 if (!server->rdma || !server->smbd_conn) 4372 return false; 4373 4374 /* we don't support signed offload yet */ 4375 if (server->sign) 4376 return false; 4377 4378 /* we don't support encrypted offload yet */ 4379 if (smb3_encryption_required(tcon)) 4380 return false; 4381 4382 /* offload also has its overhead, so only do it if desired */ 4383 if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold) 4384 return false; 4385 4386 return true; 4387 } 4388 #endif /* CONFIG_CIFS_SMB_DIRECT */ 4389 4390 /* 4391 * To form a chain of read requests, any read requests after the first should 4392 * have the end_of_chain boolean set to true. 4393 */ 4394 static int 4395 smb2_new_read_req(void **buf, unsigned int *total_len, 4396 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata, 4397 unsigned int remaining_bytes, int request_type) 4398 { 4399 int rc = -EACCES; 4400 struct smb2_read_req *req = NULL; 4401 struct smb2_hdr *shdr; 4402 struct TCP_Server_Info *server = io_parms->server; 4403 4404 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server, 4405 (void **) &req, total_len); 4406 if (rc) 4407 return rc; 4408 4409 if (server == NULL) 4410 return -ECONNABORTED; 4411 4412 shdr = &req->hdr; 4413 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4414 4415 req->PersistentFileId = io_parms->persistent_fid; 4416 req->VolatileFileId = io_parms->volatile_fid; 4417 req->ReadChannelInfoOffset = 0; /* reserved */ 4418 req->ReadChannelInfoLength = 0; /* reserved */ 4419 req->Channel = 0; /* reserved */ 4420 req->MinimumCount = 0; 4421 req->Length = cpu_to_le32(io_parms->length); 4422 req->Offset = cpu_to_le64(io_parms->offset); 4423 4424 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0, 4425 rdata ? rdata->subreq.debug_index : 0, 4426 rdata ? rdata->xid : 0, 4427 io_parms->persistent_fid, 4428 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4429 io_parms->offset, io_parms->length); 4430 #ifdef CONFIG_CIFS_SMB_DIRECT 4431 /* 4432 * If we want to do a RDMA write, fill in and append 4433 * smbd_buffer_descriptor_v1 to the end of read request 4434 */ 4435 if (smb3_use_rdma_offload(io_parms)) { 4436 struct smbd_buffer_descriptor_v1 *v1; 4437 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4438 4439 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter, 4440 true, need_invalidate); 4441 if (!rdata->mr) 4442 return -EAGAIN; 4443 4444 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4445 if (need_invalidate) 4446 req->Channel = SMB2_CHANNEL_RDMA_V1; 4447 req->ReadChannelInfoOffset = 4448 cpu_to_le16(offsetof(struct smb2_read_req, Buffer)); 4449 req->ReadChannelInfoLength = 4450 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); 4451 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; 4452 v1->offset = cpu_to_le64(rdata->mr->mr->iova); 4453 v1->token = cpu_to_le32(rdata->mr->mr->rkey); 4454 v1->length = cpu_to_le32(rdata->mr->mr->length); 4455 4456 *total_len += sizeof(*v1) - 1; 4457 } 4458 #endif 4459 if (request_type & CHAINED_REQUEST) { 4460 if (!(request_type & END_OF_CHAIN)) { 4461 /* next 8-byte aligned request */ 4462 *total_len = ALIGN(*total_len, 8); 4463 shdr->NextCommand = cpu_to_le32(*total_len); 4464 } else /* END_OF_CHAIN */ 4465 shdr->NextCommand = 0; 4466 if (request_type & RELATED_REQUEST) { 4467 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; 4468 /* 4469 * Related requests use info from previous read request 4470 * in chain. 4471 */ 4472 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); 4473 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); 4474 req->PersistentFileId = (u64)-1; 4475 req->VolatileFileId = (u64)-1; 4476 } 4477 } 4478 if (remaining_bytes > io_parms->length) 4479 req->RemainingBytes = cpu_to_le32(remaining_bytes); 4480 else 4481 req->RemainingBytes = 0; 4482 4483 *buf = req; 4484 return rc; 4485 } 4486 4487 static void 4488 smb2_readv_callback(struct mid_q_entry *mid) 4489 { 4490 struct cifs_io_subrequest *rdata = mid->callback_data; 4491 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4492 struct TCP_Server_Info *server = rdata->server; 4493 struct smb2_hdr *shdr = 4494 (struct smb2_hdr *)rdata->iov[0].iov_base; 4495 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4496 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 }; 4497 4498 if (rdata->got_bytes) { 4499 rqst.rq_iter = rdata->subreq.io_iter; 4500 rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter); 4501 } 4502 4503 WARN_ONCE(rdata->server != mid->server, 4504 "rdata server %p != mid server %p", 4505 rdata->server, mid->server); 4506 4507 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n", 4508 __func__, mid->mid, mid->mid_state, rdata->result, 4509 rdata->subreq.len); 4510 4511 switch (mid->mid_state) { 4512 case MID_RESPONSE_RECEIVED: 4513 credits.value = le16_to_cpu(shdr->CreditRequest); 4514 credits.instance = server->reconnect_instance; 4515 /* result already set, check signature */ 4516 if (server->sign && !mid->decrypted) { 4517 int rc; 4518 4519 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); 4520 rc = smb2_verify_signature(&rqst, server); 4521 if (rc) 4522 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n", 4523 rc); 4524 } 4525 /* FIXME: should this be counted toward the initiating task? */ 4526 task_io_account_read(rdata->got_bytes); 4527 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4528 break; 4529 case MID_REQUEST_SUBMITTED: 4530 case MID_RETRY_NEEDED: 4531 rdata->result = -EAGAIN; 4532 if (server->sign && rdata->got_bytes) 4533 /* reset bytes number since we can not check a sign */ 4534 rdata->got_bytes = 0; 4535 /* FIXME: should this be counted toward the initiating task? */ 4536 task_io_account_read(rdata->got_bytes); 4537 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4538 break; 4539 case MID_RESPONSE_MALFORMED: 4540 credits.value = le16_to_cpu(shdr->CreditRequest); 4541 credits.instance = server->reconnect_instance; 4542 fallthrough; 4543 default: 4544 rdata->result = -EIO; 4545 } 4546 #ifdef CONFIG_CIFS_SMB_DIRECT 4547 /* 4548 * If this rdata has a memmory registered, the MR can be freed 4549 * MR needs to be freed as soon as I/O finishes to prevent deadlock 4550 * because they have limited number and are used for future I/Os 4551 */ 4552 if (rdata->mr) { 4553 smbd_deregister_mr(rdata->mr); 4554 rdata->mr = NULL; 4555 } 4556 #endif 4557 if (rdata->result && rdata->result != -ENODATA) { 4558 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 4559 trace_smb3_read_err(rdata->rreq->debug_id, 4560 rdata->subreq.debug_index, 4561 rdata->xid, 4562 rdata->req->cfile->fid.persistent_fid, 4563 tcon->tid, tcon->ses->Suid, rdata->subreq.start, 4564 rdata->subreq.len, rdata->result); 4565 } else 4566 trace_smb3_read_done(rdata->rreq->debug_id, 4567 rdata->subreq.debug_index, 4568 rdata->xid, 4569 rdata->req->cfile->fid.persistent_fid, 4570 tcon->tid, tcon->ses->Suid, 4571 rdata->subreq.start, rdata->got_bytes); 4572 4573 if (rdata->result == -ENODATA) { 4574 /* We may have got an EOF error because fallocate 4575 * failed to enlarge the file. 4576 */ 4577 if (rdata->subreq.start < rdata->subreq.rreq->i_size) 4578 rdata->result = 0; 4579 } 4580 rdata->credits.value = 0; 4581 netfs_subreq_terminated(&rdata->subreq, 4582 (rdata->result == 0 || rdata->result == -EAGAIN) ? 4583 rdata->got_bytes : rdata->result, true); 4584 release_mid(mid); 4585 add_credits(server, &credits, 0); 4586 } 4587 4588 /* smb2_async_readv - send an async read, and set up mid to handle result */ 4589 int 4590 smb2_async_readv(struct cifs_io_subrequest *rdata) 4591 { 4592 int rc, flags = 0; 4593 char *buf; 4594 struct smb2_hdr *shdr; 4595 struct cifs_io_parms io_parms; 4596 struct smb_rqst rqst = { .rq_iov = rdata->iov, 4597 .rq_nvec = 1 }; 4598 struct TCP_Server_Info *server; 4599 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4600 unsigned int total_len; 4601 int credit_request; 4602 4603 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n", 4604 __func__, rdata->subreq.start, rdata->subreq.len); 4605 4606 if (!rdata->server) 4607 rdata->server = cifs_pick_channel(tcon->ses); 4608 4609 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); 4610 io_parms.server = server = rdata->server; 4611 io_parms.offset = rdata->subreq.start; 4612 io_parms.length = rdata->subreq.len; 4613 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; 4614 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; 4615 io_parms.pid = rdata->pid; 4616 4617 rc = smb2_new_read_req( 4618 (void **) &buf, &total_len, &io_parms, rdata, 0, 0); 4619 if (rc) 4620 return rc; 4621 4622 if (smb3_encryption_required(io_parms.tcon)) 4623 flags |= CIFS_TRANSFORM_REQ; 4624 4625 rdata->iov[0].iov_base = buf; 4626 rdata->iov[0].iov_len = total_len; 4627 4628 shdr = (struct smb2_hdr *)buf; 4629 4630 if (rdata->credits.value > 0) { 4631 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len, 4632 SMB2_MAX_BUFFER_SIZE)); 4633 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4634 if (server->credits >= server->max_credits) 4635 shdr->CreditRequest = cpu_to_le16(0); 4636 else 4637 shdr->CreditRequest = cpu_to_le16( 4638 min_t(int, server->max_credits - 4639 server->credits, credit_request)); 4640 4641 rc = adjust_credits(server, &rdata->credits, rdata->subreq.len); 4642 if (rc) 4643 goto async_readv_out; 4644 4645 flags |= CIFS_HAS_CREDITS; 4646 } 4647 4648 rc = cifs_call_async(server, &rqst, 4649 cifs_readv_receive, smb2_readv_callback, 4650 smb3_handle_read_data, rdata, flags, 4651 &rdata->credits); 4652 if (rc) { 4653 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 4654 trace_smb3_read_err(rdata->rreq->debug_id, 4655 rdata->subreq.debug_index, 4656 rdata->xid, io_parms.persistent_fid, 4657 io_parms.tcon->tid, 4658 io_parms.tcon->ses->Suid, 4659 io_parms.offset, io_parms.length, rc); 4660 } 4661 4662 async_readv_out: 4663 cifs_small_buf_release(buf); 4664 return rc; 4665 } 4666 4667 int 4668 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 4669 unsigned int *nbytes, char **buf, int *buf_type) 4670 { 4671 struct smb_rqst rqst; 4672 int resp_buftype, rc; 4673 struct smb2_read_req *req = NULL; 4674 struct smb2_read_rsp *rsp = NULL; 4675 struct kvec iov[1]; 4676 struct kvec rsp_iov; 4677 unsigned int total_len; 4678 int flags = CIFS_LOG_ERROR; 4679 struct cifs_ses *ses = io_parms->tcon->ses; 4680 4681 if (!io_parms->server) 4682 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 4683 4684 *nbytes = 0; 4685 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); 4686 if (rc) 4687 return rc; 4688 4689 if (smb3_encryption_required(io_parms->tcon)) 4690 flags |= CIFS_TRANSFORM_REQ; 4691 4692 iov[0].iov_base = (char *)req; 4693 iov[0].iov_len = total_len; 4694 4695 memset(&rqst, 0, sizeof(struct smb_rqst)); 4696 rqst.rq_iov = iov; 4697 rqst.rq_nvec = 1; 4698 4699 rc = cifs_send_recv(xid, ses, io_parms->server, 4700 &rqst, &resp_buftype, flags, &rsp_iov); 4701 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 4702 4703 if (rc) { 4704 if (rc != -ENODATA) { 4705 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 4706 cifs_dbg(VFS, "Send error in read = %d\n", rc); 4707 trace_smb3_read_err(0, 0, xid, 4708 req->PersistentFileId, 4709 io_parms->tcon->tid, ses->Suid, 4710 io_parms->offset, io_parms->length, 4711 rc); 4712 } else 4713 trace_smb3_read_done(0, 0, xid, 4714 req->PersistentFileId, io_parms->tcon->tid, 4715 ses->Suid, io_parms->offset, 0); 4716 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4717 cifs_small_buf_release(req); 4718 return rc == -ENODATA ? 0 : rc; 4719 } else 4720 trace_smb3_read_done(0, 0, xid, 4721 req->PersistentFileId, 4722 io_parms->tcon->tid, ses->Suid, 4723 io_parms->offset, io_parms->length); 4724 4725 cifs_small_buf_release(req); 4726 4727 *nbytes = le32_to_cpu(rsp->DataLength); 4728 if ((*nbytes > CIFS_MAX_MSGSIZE) || 4729 (*nbytes > io_parms->length)) { 4730 cifs_dbg(FYI, "bad length %d for count %d\n", 4731 *nbytes, io_parms->length); 4732 rc = -EIO; 4733 *nbytes = 0; 4734 } 4735 4736 if (*buf) { 4737 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); 4738 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4739 } else if (resp_buftype != CIFS_NO_BUFFER) { 4740 *buf = rsp_iov.iov_base; 4741 if (resp_buftype == CIFS_SMALL_BUFFER) 4742 *buf_type = CIFS_SMALL_BUFFER; 4743 else if (resp_buftype == CIFS_LARGE_BUFFER) 4744 *buf_type = CIFS_LARGE_BUFFER; 4745 } 4746 return rc; 4747 } 4748 4749 /* 4750 * Check the mid_state and signature on received buffer (if any), and queue the 4751 * workqueue completion task. 4752 */ 4753 static void 4754 smb2_writev_callback(struct mid_q_entry *mid) 4755 { 4756 struct cifs_io_subrequest *wdata = mid->callback_data; 4757 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4758 struct TCP_Server_Info *server = wdata->server; 4759 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 4760 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4761 ssize_t result = 0; 4762 size_t written; 4763 4764 WARN_ONCE(wdata->server != mid->server, 4765 "wdata server %p != mid server %p", 4766 wdata->server, mid->server); 4767 4768 switch (mid->mid_state) { 4769 case MID_RESPONSE_RECEIVED: 4770 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4771 credits.instance = server->reconnect_instance; 4772 result = smb2_check_receive(mid, server, 0); 4773 if (result != 0) 4774 break; 4775 4776 written = le32_to_cpu(rsp->DataLength); 4777 /* 4778 * Mask off high 16 bits when bytes written as returned 4779 * by the server is greater than bytes requested by the 4780 * client. OS/2 servers are known to set incorrect 4781 * CountHigh values. 4782 */ 4783 if (written > wdata->subreq.len) 4784 written &= 0xFFFF; 4785 4786 if (written < wdata->subreq.len) 4787 wdata->result = -ENOSPC; 4788 else 4789 wdata->subreq.len = written; 4790 break; 4791 case MID_REQUEST_SUBMITTED: 4792 case MID_RETRY_NEEDED: 4793 result = -EAGAIN; 4794 break; 4795 case MID_RESPONSE_MALFORMED: 4796 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4797 credits.instance = server->reconnect_instance; 4798 fallthrough; 4799 default: 4800 result = -EIO; 4801 break; 4802 } 4803 #ifdef CONFIG_CIFS_SMB_DIRECT 4804 /* 4805 * If this wdata has a memory registered, the MR can be freed 4806 * The number of MRs available is limited, it's important to recover 4807 * used MR as soon as I/O is finished. Hold MR longer in the later 4808 * I/O process can possibly result in I/O deadlock due to lack of MR 4809 * to send request on I/O retry 4810 */ 4811 if (wdata->mr) { 4812 smbd_deregister_mr(wdata->mr); 4813 wdata->mr = NULL; 4814 } 4815 #endif 4816 if (result) { 4817 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4818 trace_smb3_write_err(wdata->xid, 4819 wdata->req->cfile->fid.persistent_fid, 4820 tcon->tid, tcon->ses->Suid, wdata->subreq.start, 4821 wdata->subreq.len, wdata->result); 4822 if (wdata->result == -ENOSPC) 4823 pr_warn_once("Out of space writing to %s\n", 4824 tcon->tree_name); 4825 } else 4826 trace_smb3_write_done(0 /* no xid */, 4827 wdata->req->cfile->fid.persistent_fid, 4828 tcon->tid, tcon->ses->Suid, 4829 wdata->subreq.start, wdata->subreq.len); 4830 4831 wdata->credits.value = 0; 4832 cifs_write_subrequest_terminated(wdata, result ?: written, true); 4833 release_mid(mid); 4834 add_credits(server, &credits, 0); 4835 } 4836 4837 /* smb2_async_writev - send an async write, and set up mid to handle result */ 4838 void 4839 smb2_async_writev(struct cifs_io_subrequest *wdata) 4840 { 4841 int rc = -EACCES, flags = 0; 4842 struct smb2_write_req *req = NULL; 4843 struct smb2_hdr *shdr; 4844 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4845 struct TCP_Server_Info *server = wdata->server; 4846 struct kvec iov[1]; 4847 struct smb_rqst rqst = { }; 4848 unsigned int total_len, xid = wdata->xid; 4849 struct cifs_io_parms _io_parms; 4850 struct cifs_io_parms *io_parms = NULL; 4851 int credit_request; 4852 4853 if (!wdata->server || test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags)) 4854 server = wdata->server = cifs_pick_channel(tcon->ses); 4855 4856 /* 4857 * in future we may get cifs_io_parms passed in from the caller, 4858 * but for now we construct it here... 4859 */ 4860 _io_parms = (struct cifs_io_parms) { 4861 .tcon = tcon, 4862 .server = server, 4863 .offset = wdata->subreq.start, 4864 .length = wdata->subreq.len, 4865 .persistent_fid = wdata->req->cfile->fid.persistent_fid, 4866 .volatile_fid = wdata->req->cfile->fid.volatile_fid, 4867 .pid = wdata->pid, 4868 }; 4869 io_parms = &_io_parms; 4870 4871 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, 4872 (void **) &req, &total_len); 4873 if (rc) 4874 goto out; 4875 4876 if (smb3_encryption_required(tcon)) 4877 flags |= CIFS_TRANSFORM_REQ; 4878 4879 shdr = (struct smb2_hdr *)req; 4880 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4881 4882 req->PersistentFileId = io_parms->persistent_fid; 4883 req->VolatileFileId = io_parms->volatile_fid; 4884 req->WriteChannelInfoOffset = 0; 4885 req->WriteChannelInfoLength = 0; 4886 req->Channel = SMB2_CHANNEL_NONE; 4887 req->Offset = cpu_to_le64(io_parms->offset); 4888 req->DataOffset = cpu_to_le16( 4889 offsetof(struct smb2_write_req, Buffer)); 4890 req->RemainingBytes = 0; 4891 4892 trace_smb3_write_enter(wdata->xid, 4893 io_parms->persistent_fid, 4894 io_parms->tcon->tid, 4895 io_parms->tcon->ses->Suid, 4896 io_parms->offset, 4897 io_parms->length); 4898 4899 #ifdef CONFIG_CIFS_SMB_DIRECT 4900 /* 4901 * If we want to do a server RDMA read, fill in and append 4902 * smbd_buffer_descriptor_v1 to the end of write request 4903 */ 4904 if (smb3_use_rdma_offload(io_parms)) { 4905 struct smbd_buffer_descriptor_v1 *v1; 4906 size_t data_size = iov_iter_count(&wdata->subreq.io_iter); 4907 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4908 4909 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter, 4910 false, need_invalidate); 4911 if (!wdata->mr) { 4912 rc = -EAGAIN; 4913 goto async_writev_out; 4914 } 4915 req->Length = 0; 4916 req->DataOffset = 0; 4917 req->RemainingBytes = cpu_to_le32(data_size); 4918 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4919 if (need_invalidate) 4920 req->Channel = SMB2_CHANNEL_RDMA_V1; 4921 req->WriteChannelInfoOffset = 4922 cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); 4923 req->WriteChannelInfoLength = 4924 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); 4925 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; 4926 v1->offset = cpu_to_le64(wdata->mr->mr->iova); 4927 v1->token = cpu_to_le32(wdata->mr->mr->rkey); 4928 v1->length = cpu_to_le32(wdata->mr->mr->length); 4929 } 4930 #endif 4931 iov[0].iov_len = total_len - 1; 4932 iov[0].iov_base = (char *)req; 4933 4934 rqst.rq_iov = iov; 4935 rqst.rq_nvec = 1; 4936 rqst.rq_iter = wdata->subreq.io_iter; 4937 rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter); 4938 if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags)) 4939 smb2_set_replay(server, &rqst); 4940 #ifdef CONFIG_CIFS_SMB_DIRECT 4941 if (wdata->mr) 4942 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); 4943 #endif 4944 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 4945 io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter)); 4946 4947 #ifdef CONFIG_CIFS_SMB_DIRECT 4948 /* For RDMA read, I/O size is in RemainingBytes not in Length */ 4949 if (!wdata->mr) 4950 req->Length = cpu_to_le32(io_parms->length); 4951 #else 4952 req->Length = cpu_to_le32(io_parms->length); 4953 #endif 4954 4955 if (wdata->credits.value > 0) { 4956 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len, 4957 SMB2_MAX_BUFFER_SIZE)); 4958 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4959 if (server->credits >= server->max_credits) 4960 shdr->CreditRequest = cpu_to_le16(0); 4961 else 4962 shdr->CreditRequest = cpu_to_le16( 4963 min_t(int, server->max_credits - 4964 server->credits, credit_request)); 4965 4966 rc = adjust_credits(server, &wdata->credits, io_parms->length); 4967 if (rc) 4968 goto async_writev_out; 4969 4970 flags |= CIFS_HAS_CREDITS; 4971 } 4972 4973 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, 4974 wdata, flags, &wdata->credits); 4975 /* Can't touch wdata if rc == 0 */ 4976 if (rc) { 4977 trace_smb3_write_err(xid, 4978 io_parms->persistent_fid, 4979 io_parms->tcon->tid, 4980 io_parms->tcon->ses->Suid, 4981 io_parms->offset, 4982 io_parms->length, 4983 rc); 4984 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4985 } 4986 4987 async_writev_out: 4988 cifs_small_buf_release(req); 4989 out: 4990 if (rc) { 4991 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 4992 cifs_write_subrequest_terminated(wdata, rc, true); 4993 } 4994 } 4995 4996 /* 4997 * SMB2_write function gets iov pointer to kvec array with n_vec as a length. 4998 * The length field from io_parms must be at least 1 and indicates a number of 4999 * elements with data to write that begins with position 1 in iov array. All 5000 * data length is specified by count. 5001 */ 5002 int 5003 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 5004 unsigned int *nbytes, struct kvec *iov, int n_vec) 5005 { 5006 struct smb_rqst rqst; 5007 int rc = 0; 5008 struct smb2_write_req *req = NULL; 5009 struct smb2_write_rsp *rsp = NULL; 5010 int resp_buftype; 5011 struct kvec rsp_iov; 5012 int flags = 0; 5013 unsigned int total_len; 5014 struct TCP_Server_Info *server; 5015 int retries = 0, cur_sleep = 1; 5016 5017 replay_again: 5018 /* reinitialize for possible replay */ 5019 flags = 0; 5020 *nbytes = 0; 5021 if (!io_parms->server) 5022 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 5023 server = io_parms->server; 5024 if (server == NULL) 5025 return -ECONNABORTED; 5026 5027 if (n_vec < 1) 5028 return rc; 5029 5030 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server, 5031 (void **) &req, &total_len); 5032 if (rc) 5033 return rc; 5034 5035 if (smb3_encryption_required(io_parms->tcon)) 5036 flags |= CIFS_TRANSFORM_REQ; 5037 5038 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 5039 5040 req->PersistentFileId = io_parms->persistent_fid; 5041 req->VolatileFileId = io_parms->volatile_fid; 5042 req->WriteChannelInfoOffset = 0; 5043 req->WriteChannelInfoLength = 0; 5044 req->Channel = 0; 5045 req->Length = cpu_to_le32(io_parms->length); 5046 req->Offset = cpu_to_le64(io_parms->offset); 5047 req->DataOffset = cpu_to_le16( 5048 offsetof(struct smb2_write_req, Buffer)); 5049 req->RemainingBytes = 0; 5050 5051 trace_smb3_write_enter(xid, io_parms->persistent_fid, 5052 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 5053 io_parms->offset, io_parms->length); 5054 5055 iov[0].iov_base = (char *)req; 5056 /* 1 for Buffer */ 5057 iov[0].iov_len = total_len - 1; 5058 5059 memset(&rqst, 0, sizeof(struct smb_rqst)); 5060 rqst.rq_iov = iov; 5061 rqst.rq_nvec = n_vec + 1; 5062 5063 if (retries) 5064 smb2_set_replay(server, &rqst); 5065 5066 rc = cifs_send_recv(xid, io_parms->tcon->ses, server, 5067 &rqst, 5068 &resp_buftype, flags, &rsp_iov); 5069 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; 5070 5071 if (rc) { 5072 trace_smb3_write_err(xid, 5073 req->PersistentFileId, 5074 io_parms->tcon->tid, 5075 io_parms->tcon->ses->Suid, 5076 io_parms->offset, io_parms->length, rc); 5077 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); 5078 cifs_dbg(VFS, "Send error in write = %d\n", rc); 5079 } else { 5080 *nbytes = le32_to_cpu(rsp->DataLength); 5081 trace_smb3_write_done(xid, 5082 req->PersistentFileId, 5083 io_parms->tcon->tid, 5084 io_parms->tcon->ses->Suid, 5085 io_parms->offset, *nbytes); 5086 } 5087 5088 cifs_small_buf_release(req); 5089 free_rsp_buf(resp_buftype, rsp); 5090 5091 if (is_replayable_error(rc) && 5092 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep)) 5093 goto replay_again; 5094 5095 return rc; 5096 } 5097 5098 int posix_info_sid_size(const void *beg, const void *end) 5099 { 5100 size_t subauth; 5101 int total; 5102 5103 if (beg + 1 > end) 5104 return -1; 5105 5106 subauth = *(u8 *)(beg+1); 5107 if (subauth < 1 || subauth > 15) 5108 return -1; 5109 5110 total = 1 + 1 + 6 + 4*subauth; 5111 if (beg + total > end) 5112 return -1; 5113 5114 return total; 5115 } 5116 5117 int posix_info_parse(const void *beg, const void *end, 5118 struct smb2_posix_info_parsed *out) 5119 5120 { 5121 int total_len = 0; 5122 int owner_len, group_len; 5123 int name_len; 5124 const void *owner_sid; 5125 const void *group_sid; 5126 const void *name; 5127 5128 /* if no end bound given, assume payload to be correct */ 5129 if (!end) { 5130 const struct smb2_posix_info *p = beg; 5131 5132 end = beg + le32_to_cpu(p->NextEntryOffset); 5133 /* last element will have a 0 offset, pick a sensible bound */ 5134 if (end == beg) 5135 end += 0xFFFF; 5136 } 5137 5138 /* check base buf */ 5139 if (beg + sizeof(struct smb2_posix_info) > end) 5140 return -1; 5141 total_len = sizeof(struct smb2_posix_info); 5142 5143 /* check owner sid */ 5144 owner_sid = beg + total_len; 5145 owner_len = posix_info_sid_size(owner_sid, end); 5146 if (owner_len < 0) 5147 return -1; 5148 total_len += owner_len; 5149 5150 /* check group sid */ 5151 group_sid = beg + total_len; 5152 group_len = posix_info_sid_size(group_sid, end); 5153 if (group_len < 0) 5154 return -1; 5155 total_len += group_len; 5156 5157 /* check name len */ 5158 if (beg + total_len + 4 > end) 5159 return -1; 5160 name_len = le32_to_cpu(*(__le32 *)(beg + total_len)); 5161 if (name_len < 1 || name_len > 0xFFFF) 5162 return -1; 5163 total_len += 4; 5164 5165 /* check name */ 5166 name = beg + total_len; 5167 if (name + name_len > end) 5168 return -1; 5169 total_len += name_len; 5170 5171 if (out) { 5172 out->base = beg; 5173 out->size = total_len; 5174 out->name_len = name_len; 5175 out->name = name; 5176 memcpy(&out->owner, owner_sid, owner_len); 5177 memcpy(&out->group, group_sid, group_len); 5178 } 5179 return total_len; 5180 } 5181 5182 static int posix_info_extra_size(const void *beg, const void *end) 5183 { 5184 int len = posix_info_parse(beg, end, NULL); 5185 5186 if (len < 0) 5187 return -1; 5188 return len - sizeof(struct smb2_posix_info); 5189 } 5190 5191 static unsigned int 5192 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry, 5193 size_t size) 5194 { 5195 int len; 5196 unsigned int entrycount = 0; 5197 unsigned int next_offset = 0; 5198 char *entryptr; 5199 FILE_DIRECTORY_INFO *dir_info; 5200 5201 if (bufstart == NULL) 5202 return 0; 5203 5204 entryptr = bufstart; 5205 5206 while (1) { 5207 if (entryptr + next_offset < entryptr || 5208 entryptr + next_offset > end_of_buf || 5209 entryptr + next_offset + size > end_of_buf) { 5210 cifs_dbg(VFS, "malformed search entry would overflow\n"); 5211 break; 5212 } 5213 5214 entryptr = entryptr + next_offset; 5215 dir_info = (FILE_DIRECTORY_INFO *)entryptr; 5216 5217 if (infotype == SMB_FIND_FILE_POSIX_INFO) 5218 len = posix_info_extra_size(entryptr, end_of_buf); 5219 else 5220 len = le32_to_cpu(dir_info->FileNameLength); 5221 5222 if (len < 0 || 5223 entryptr + len < entryptr || 5224 entryptr + len > end_of_buf || 5225 entryptr + len + size > end_of_buf) { 5226 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", 5227 end_of_buf); 5228 break; 5229 } 5230 5231 *lastentry = entryptr; 5232 entrycount++; 5233 5234 next_offset = le32_to_cpu(dir_info->NextEntryOffset); 5235 if (!next_offset) 5236 break; 5237 } 5238 5239 return entrycount; 5240 } 5241 5242 /* 5243 * Readdir/FindFirst 5244 */ 5245 int SMB2_query_directory_init(const unsigned int xid, 5246 struct cifs_tcon *tcon, 5247 struct TCP_Server_Info *server, 5248 struct smb_rqst *rqst, 5249 u64 persistent_fid, u64 volatile_fid, 5250 int index, int info_level) 5251 { 5252 struct smb2_query_directory_req *req; 5253 unsigned char *bufptr; 5254 __le16 asteriks = cpu_to_le16('*'); 5255 unsigned int output_size = CIFSMaxBufSize - 5256 MAX_SMB2_CREATE_RESPONSE_SIZE - 5257 MAX_SMB2_CLOSE_RESPONSE_SIZE; 5258 unsigned int total_len; 5259 struct kvec *iov = rqst->rq_iov; 5260 int len, rc; 5261 5262 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server, 5263 (void **) &req, &total_len); 5264 if (rc) 5265 return rc; 5266 5267 switch (info_level) { 5268 case SMB_FIND_FILE_DIRECTORY_INFO: 5269 req->FileInformationClass = FILE_DIRECTORY_INFORMATION; 5270 break; 5271 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5272 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; 5273 break; 5274 case SMB_FIND_FILE_POSIX_INFO: 5275 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; 5276 break; 5277 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5278 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION; 5279 break; 5280 default: 5281 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5282 info_level); 5283 return -EINVAL; 5284 } 5285 5286 req->FileIndex = cpu_to_le32(index); 5287 req->PersistentFileId = persistent_fid; 5288 req->VolatileFileId = volatile_fid; 5289 5290 len = 0x2; 5291 bufptr = req->Buffer; 5292 memcpy(bufptr, &asteriks, len); 5293 5294 req->FileNameOffset = 5295 cpu_to_le16(sizeof(struct smb2_query_directory_req)); 5296 req->FileNameLength = cpu_to_le16(len); 5297 /* 5298 * BB could be 30 bytes or so longer if we used SMB2 specific 5299 * buffer lengths, but this is safe and close enough. 5300 */ 5301 output_size = min_t(unsigned int, output_size, server->maxBuf); 5302 output_size = min_t(unsigned int, output_size, 2 << 15); 5303 req->OutputBufferLength = cpu_to_le32(output_size); 5304 5305 iov[0].iov_base = (char *)req; 5306 /* 1 for Buffer */ 5307 iov[0].iov_len = total_len - 1; 5308 5309 iov[1].iov_base = (char *)(req->Buffer); 5310 iov[1].iov_len = len; 5311 5312 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, 5313 tcon->ses->Suid, index, output_size); 5314 5315 return 0; 5316 } 5317 5318 void SMB2_query_directory_free(struct smb_rqst *rqst) 5319 { 5320 if (rqst && rqst->rq_iov) { 5321 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5322 } 5323 } 5324 5325 int 5326 smb2_parse_query_directory(struct cifs_tcon *tcon, 5327 struct kvec *rsp_iov, 5328 int resp_buftype, 5329 struct cifs_search_info *srch_inf) 5330 { 5331 struct smb2_query_directory_rsp *rsp; 5332 size_t info_buf_size; 5333 char *end_of_smb; 5334 int rc; 5335 5336 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base; 5337 5338 switch (srch_inf->info_level) { 5339 case SMB_FIND_FILE_DIRECTORY_INFO: 5340 info_buf_size = sizeof(FILE_DIRECTORY_INFO); 5341 break; 5342 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5343 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO); 5344 break; 5345 case SMB_FIND_FILE_POSIX_INFO: 5346 /* note that posix payload are variable size */ 5347 info_buf_size = sizeof(struct smb2_posix_info); 5348 break; 5349 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5350 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO); 5351 break; 5352 default: 5353 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5354 srch_inf->info_level); 5355 return -EINVAL; 5356 } 5357 5358 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5359 le32_to_cpu(rsp->OutputBufferLength), rsp_iov, 5360 info_buf_size); 5361 if (rc) { 5362 cifs_tcon_dbg(VFS, "bad info payload"); 5363 return rc; 5364 } 5365 5366 srch_inf->unicode = true; 5367 5368 if (srch_inf->ntwrk_buf_start) { 5369 if (srch_inf->smallBuf) 5370 cifs_small_buf_release(srch_inf->ntwrk_buf_start); 5371 else 5372 cifs_buf_release(srch_inf->ntwrk_buf_start); 5373 } 5374 srch_inf->ntwrk_buf_start = (char *)rsp; 5375 srch_inf->srch_entries_start = srch_inf->last_entry = 5376 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); 5377 end_of_smb = rsp_iov->iov_len + (char *)rsp; 5378 5379 srch_inf->entries_in_buffer = num_entries( 5380 srch_inf->info_level, 5381 srch_inf->srch_entries_start, 5382 end_of_smb, 5383 &srch_inf->last_entry, 5384 info_buf_size); 5385 5386 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; 5387 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", 5388 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, 5389 srch_inf->srch_entries_start, srch_inf->last_entry); 5390 if (resp_buftype == CIFS_LARGE_BUFFER) 5391 srch_inf->smallBuf = false; 5392 else if (resp_buftype == CIFS_SMALL_BUFFER) 5393 srch_inf->smallBuf = true; 5394 else 5395 cifs_tcon_dbg(VFS, "Invalid search buffer type\n"); 5396 5397 return 0; 5398 } 5399 5400 int 5401 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, 5402 u64 persistent_fid, u64 volatile_fid, int index, 5403 struct cifs_search_info *srch_inf) 5404 { 5405 struct smb_rqst rqst; 5406 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; 5407 struct smb2_query_directory_rsp *rsp = NULL; 5408 int resp_buftype = CIFS_NO_BUFFER; 5409 struct kvec rsp_iov; 5410 int rc = 0; 5411 struct cifs_ses *ses = tcon->ses; 5412 struct TCP_Server_Info *server; 5413 int flags = 0; 5414 int retries = 0, cur_sleep = 1; 5415 5416 replay_again: 5417 /* reinitialize for possible replay */ 5418 flags = 0; 5419 server = cifs_pick_channel(ses); 5420 5421 if (!ses || !(ses->server)) 5422 return -EIO; 5423 5424 if (smb3_encryption_required(tcon)) 5425 flags |= CIFS_TRANSFORM_REQ; 5426 5427 memset(&rqst, 0, sizeof(struct smb_rqst)); 5428 memset(&iov, 0, sizeof(iov)); 5429 rqst.rq_iov = iov; 5430 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; 5431 5432 rc = SMB2_query_directory_init(xid, tcon, server, 5433 &rqst, persistent_fid, 5434 volatile_fid, index, 5435 srch_inf->info_level); 5436 if (rc) 5437 goto qdir_exit; 5438 5439 if (retries) 5440 smb2_set_replay(server, &rqst); 5441 5442 rc = cifs_send_recv(xid, ses, server, 5443 &rqst, &resp_buftype, flags, &rsp_iov); 5444 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; 5445 5446 if (rc) { 5447 if (rc == -ENODATA && 5448 rsp->hdr.Status == STATUS_NO_MORE_FILES) { 5449 trace_smb3_query_dir_done(xid, persistent_fid, 5450 tcon->tid, tcon->ses->Suid, index, 0); 5451 srch_inf->endOfSearch = true; 5452 rc = 0; 5453 } else { 5454 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5455 tcon->ses->Suid, index, 0, rc); 5456 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 5457 } 5458 goto qdir_exit; 5459 } 5460 5461 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype, 5462 srch_inf); 5463 if (rc) { 5464 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5465 tcon->ses->Suid, index, 0, rc); 5466 goto qdir_exit; 5467 } 5468 resp_buftype = CIFS_NO_BUFFER; 5469 5470 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, 5471 tcon->ses->Suid, index, srch_inf->entries_in_buffer); 5472 5473 qdir_exit: 5474 SMB2_query_directory_free(&rqst); 5475 free_rsp_buf(resp_buftype, rsp); 5476 5477 if (is_replayable_error(rc) && 5478 smb2_should_replay(tcon, &retries, &cur_sleep)) 5479 goto replay_again; 5480 5481 return rc; 5482 } 5483 5484 int 5485 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 5486 struct smb_rqst *rqst, 5487 u64 persistent_fid, u64 volatile_fid, u32 pid, 5488 u8 info_class, u8 info_type, u32 additional_info, 5489 void **data, unsigned int *size) 5490 { 5491 struct smb2_set_info_req *req; 5492 struct kvec *iov = rqst->rq_iov; 5493 unsigned int i, total_len; 5494 int rc; 5495 5496 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server, 5497 (void **) &req, &total_len); 5498 if (rc) 5499 return rc; 5500 5501 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 5502 req->InfoType = info_type; 5503 req->FileInfoClass = info_class; 5504 req->PersistentFileId = persistent_fid; 5505 req->VolatileFileId = volatile_fid; 5506 req->AdditionalInformation = cpu_to_le32(additional_info); 5507 5508 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req)); 5509 req->BufferLength = cpu_to_le32(*size); 5510 5511 memcpy(req->Buffer, *data, *size); 5512 total_len += *size; 5513 5514 iov[0].iov_base = (char *)req; 5515 /* 1 for Buffer */ 5516 iov[0].iov_len = total_len - 1; 5517 5518 for (i = 1; i < rqst->rq_nvec; i++) { 5519 le32_add_cpu(&req->BufferLength, size[i]); 5520 iov[i].iov_base = (char *)data[i]; 5521 iov[i].iov_len = size[i]; 5522 } 5523 5524 return 0; 5525 } 5526 5527 void 5528 SMB2_set_info_free(struct smb_rqst *rqst) 5529 { 5530 if (rqst && rqst->rq_iov) 5531 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5532 } 5533 5534 static int 5535 send_set_info(const unsigned int xid, struct cifs_tcon *tcon, 5536 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, 5537 u8 info_type, u32 additional_info, unsigned int num, 5538 void **data, unsigned int *size) 5539 { 5540 struct smb_rqst rqst; 5541 struct smb2_set_info_rsp *rsp = NULL; 5542 struct kvec *iov; 5543 struct kvec rsp_iov; 5544 int rc = 0; 5545 int resp_buftype; 5546 struct cifs_ses *ses = tcon->ses; 5547 struct TCP_Server_Info *server; 5548 int flags = 0; 5549 int retries = 0, cur_sleep = 1; 5550 5551 replay_again: 5552 /* reinitialize for possible replay */ 5553 flags = 0; 5554 server = cifs_pick_channel(ses); 5555 5556 if (!ses || !server) 5557 return -EIO; 5558 5559 if (!num) 5560 return -EINVAL; 5561 5562 if (smb3_encryption_required(tcon)) 5563 flags |= CIFS_TRANSFORM_REQ; 5564 5565 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL); 5566 if (!iov) 5567 return -ENOMEM; 5568 5569 memset(&rqst, 0, sizeof(struct smb_rqst)); 5570 rqst.rq_iov = iov; 5571 rqst.rq_nvec = num; 5572 5573 rc = SMB2_set_info_init(tcon, server, 5574 &rqst, persistent_fid, volatile_fid, pid, 5575 info_class, info_type, additional_info, 5576 data, size); 5577 if (rc) { 5578 kfree(iov); 5579 return rc; 5580 } 5581 5582 if (retries) 5583 smb2_set_replay(server, &rqst); 5584 5585 rc = cifs_send_recv(xid, ses, server, 5586 &rqst, &resp_buftype, flags, 5587 &rsp_iov); 5588 SMB2_set_info_free(&rqst); 5589 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; 5590 5591 if (rc != 0) { 5592 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 5593 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, 5594 ses->Suid, info_class, (__u32)info_type, rc); 5595 } 5596 5597 free_rsp_buf(resp_buftype, rsp); 5598 kfree(iov); 5599 5600 if (is_replayable_error(rc) && 5601 smb2_should_replay(tcon, &retries, &cur_sleep)) 5602 goto replay_again; 5603 5604 return rc; 5605 } 5606 5607 int 5608 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 5609 u64 volatile_fid, u32 pid, loff_t new_eof) 5610 { 5611 struct smb2_file_eof_info info; 5612 void *data; 5613 unsigned int size; 5614 5615 info.EndOfFile = cpu_to_le64(new_eof); 5616 5617 data = &info; 5618 size = sizeof(struct smb2_file_eof_info); 5619 5620 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof); 5621 5622 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5623 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 5624 0, 1, &data, &size); 5625 } 5626 5627 int 5628 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, 5629 u64 persistent_fid, u64 volatile_fid, 5630 struct cifs_ntsd *pnntsd, int pacllen, int aclflag) 5631 { 5632 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5633 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 5634 1, (void **)&pnntsd, &pacllen); 5635 } 5636 5637 int 5638 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, 5639 u64 persistent_fid, u64 volatile_fid, 5640 struct smb2_file_full_ea_info *buf, int len) 5641 { 5642 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5643 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 5644 0, 1, (void **)&buf, &len); 5645 } 5646 5647 int 5648 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 5649 const u64 persistent_fid, const u64 volatile_fid, 5650 __u8 oplock_level) 5651 { 5652 struct smb_rqst rqst; 5653 int rc; 5654 struct smb2_oplock_break *req = NULL; 5655 struct cifs_ses *ses = tcon->ses; 5656 struct TCP_Server_Info *server; 5657 int flags = CIFS_OBREAK_OP; 5658 unsigned int total_len; 5659 struct kvec iov[1]; 5660 struct kvec rsp_iov; 5661 int resp_buf_type; 5662 int retries = 0, cur_sleep = 1; 5663 5664 replay_again: 5665 /* reinitialize for possible replay */ 5666 flags = CIFS_OBREAK_OP; 5667 server = cifs_pick_channel(ses); 5668 5669 cifs_dbg(FYI, "SMB2_oplock_break\n"); 5670 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 5671 (void **) &req, &total_len); 5672 if (rc) 5673 return rc; 5674 5675 if (smb3_encryption_required(tcon)) 5676 flags |= CIFS_TRANSFORM_REQ; 5677 5678 req->VolatileFid = volatile_fid; 5679 req->PersistentFid = persistent_fid; 5680 req->OplockLevel = oplock_level; 5681 req->hdr.CreditRequest = cpu_to_le16(1); 5682 5683 flags |= CIFS_NO_RSP_BUF; 5684 5685 iov[0].iov_base = (char *)req; 5686 iov[0].iov_len = total_len; 5687 5688 memset(&rqst, 0, sizeof(struct smb_rqst)); 5689 rqst.rq_iov = iov; 5690 rqst.rq_nvec = 1; 5691 5692 if (retries) 5693 smb2_set_replay(server, &rqst); 5694 5695 rc = cifs_send_recv(xid, ses, server, 5696 &rqst, &resp_buf_type, flags, &rsp_iov); 5697 cifs_small_buf_release(req); 5698 if (rc) { 5699 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 5700 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); 5701 } 5702 5703 if (is_replayable_error(rc) && 5704 smb2_should_replay(tcon, &retries, &cur_sleep)) 5705 goto replay_again; 5706 5707 return rc; 5708 } 5709 5710 void 5711 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, 5712 struct kstatfs *kst) 5713 { 5714 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * 5715 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); 5716 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); 5717 kst->f_bfree = kst->f_bavail = 5718 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); 5719 return; 5720 } 5721 5722 static void 5723 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, 5724 struct kstatfs *kst) 5725 { 5726 kst->f_bsize = le32_to_cpu(response_data->BlockSize); 5727 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); 5728 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); 5729 if (response_data->UserBlocksAvail == cpu_to_le64(-1)) 5730 kst->f_bavail = kst->f_bfree; 5731 else 5732 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); 5733 if (response_data->TotalFileNodes != cpu_to_le64(-1)) 5734 kst->f_files = le64_to_cpu(response_data->TotalFileNodes); 5735 if (response_data->FreeFileNodes != cpu_to_le64(-1)) 5736 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); 5737 5738 return; 5739 } 5740 5741 static int 5742 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, 5743 struct TCP_Server_Info *server, 5744 int level, int outbuf_len, u64 persistent_fid, 5745 u64 volatile_fid) 5746 { 5747 int rc; 5748 struct smb2_query_info_req *req; 5749 unsigned int total_len; 5750 5751 cifs_dbg(FYI, "Query FSInfo level %d\n", level); 5752 5753 if ((tcon->ses == NULL) || server == NULL) 5754 return -EIO; 5755 5756 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 5757 (void **) &req, &total_len); 5758 if (rc) 5759 return rc; 5760 5761 req->InfoType = SMB2_O_INFO_FILESYSTEM; 5762 req->FileInfoClass = level; 5763 req->PersistentFileId = persistent_fid; 5764 req->VolatileFileId = volatile_fid; 5765 /* 1 for pad */ 5766 req->InputBufferOffset = 5767 cpu_to_le16(sizeof(struct smb2_query_info_req)); 5768 req->OutputBufferLength = cpu_to_le32( 5769 outbuf_len + sizeof(struct smb2_query_info_rsp)); 5770 5771 iov->iov_base = (char *)req; 5772 iov->iov_len = total_len; 5773 return 0; 5774 } 5775 5776 static inline void free_qfs_info_req(struct kvec *iov) 5777 { 5778 cifs_buf_release(iov->iov_base); 5779 } 5780 5781 int 5782 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, 5783 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 5784 { 5785 struct smb_rqst rqst; 5786 struct smb2_query_info_rsp *rsp = NULL; 5787 struct kvec iov; 5788 struct kvec rsp_iov; 5789 int rc = 0; 5790 int resp_buftype; 5791 struct cifs_ses *ses = tcon->ses; 5792 struct TCP_Server_Info *server; 5793 FILE_SYSTEM_POSIX_INFO *info = NULL; 5794 int flags = 0; 5795 int retries = 0, cur_sleep = 1; 5796 5797 replay_again: 5798 /* reinitialize for possible replay */ 5799 flags = 0; 5800 server = cifs_pick_channel(ses); 5801 5802 rc = build_qfs_info_req(&iov, tcon, server, 5803 FS_POSIX_INFORMATION, 5804 sizeof(FILE_SYSTEM_POSIX_INFO), 5805 persistent_fid, volatile_fid); 5806 if (rc) 5807 return rc; 5808 5809 if (smb3_encryption_required(tcon)) 5810 flags |= CIFS_TRANSFORM_REQ; 5811 5812 memset(&rqst, 0, sizeof(struct smb_rqst)); 5813 rqst.rq_iov = &iov; 5814 rqst.rq_nvec = 1; 5815 5816 if (retries) 5817 smb2_set_replay(server, &rqst); 5818 5819 rc = cifs_send_recv(xid, ses, server, 5820 &rqst, &resp_buftype, flags, &rsp_iov); 5821 free_qfs_info_req(&iov); 5822 if (rc) { 5823 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5824 goto posix_qfsinf_exit; 5825 } 5826 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5827 5828 info = (FILE_SYSTEM_POSIX_INFO *)( 5829 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 5830 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5831 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 5832 sizeof(FILE_SYSTEM_POSIX_INFO)); 5833 if (!rc) 5834 copy_posix_fs_info_to_kstatfs(info, fsdata); 5835 5836 posix_qfsinf_exit: 5837 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5838 5839 if (is_replayable_error(rc) && 5840 smb2_should_replay(tcon, &retries, &cur_sleep)) 5841 goto replay_again; 5842 5843 return rc; 5844 } 5845 5846 int 5847 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 5848 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 5849 { 5850 struct smb_rqst rqst; 5851 struct smb2_query_info_rsp *rsp = NULL; 5852 struct kvec iov; 5853 struct kvec rsp_iov; 5854 int rc = 0; 5855 int resp_buftype; 5856 struct cifs_ses *ses = tcon->ses; 5857 struct TCP_Server_Info *server; 5858 struct smb2_fs_full_size_info *info = NULL; 5859 int flags = 0; 5860 int retries = 0, cur_sleep = 1; 5861 5862 replay_again: 5863 /* reinitialize for possible replay */ 5864 flags = 0; 5865 server = cifs_pick_channel(ses); 5866 5867 rc = build_qfs_info_req(&iov, tcon, server, 5868 FS_FULL_SIZE_INFORMATION, 5869 sizeof(struct smb2_fs_full_size_info), 5870 persistent_fid, volatile_fid); 5871 if (rc) 5872 return rc; 5873 5874 if (smb3_encryption_required(tcon)) 5875 flags |= CIFS_TRANSFORM_REQ; 5876 5877 memset(&rqst, 0, sizeof(struct smb_rqst)); 5878 rqst.rq_iov = &iov; 5879 rqst.rq_nvec = 1; 5880 5881 if (retries) 5882 smb2_set_replay(server, &rqst); 5883 5884 rc = cifs_send_recv(xid, ses, server, 5885 &rqst, &resp_buftype, flags, &rsp_iov); 5886 free_qfs_info_req(&iov); 5887 if (rc) { 5888 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5889 goto qfsinf_exit; 5890 } 5891 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5892 5893 info = (struct smb2_fs_full_size_info *)( 5894 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 5895 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5896 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 5897 sizeof(struct smb2_fs_full_size_info)); 5898 if (!rc) 5899 smb2_copy_fs_info_to_kstatfs(info, fsdata); 5900 5901 qfsinf_exit: 5902 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5903 5904 if (is_replayable_error(rc) && 5905 smb2_should_replay(tcon, &retries, &cur_sleep)) 5906 goto replay_again; 5907 5908 return rc; 5909 } 5910 5911 int 5912 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, 5913 u64 persistent_fid, u64 volatile_fid, int level) 5914 { 5915 struct smb_rqst rqst; 5916 struct smb2_query_info_rsp *rsp = NULL; 5917 struct kvec iov; 5918 struct kvec rsp_iov; 5919 int rc = 0; 5920 int resp_buftype, max_len, min_len; 5921 struct cifs_ses *ses = tcon->ses; 5922 struct TCP_Server_Info *server; 5923 unsigned int rsp_len, offset; 5924 int flags = 0; 5925 int retries = 0, cur_sleep = 1; 5926 5927 replay_again: 5928 /* reinitialize for possible replay */ 5929 flags = 0; 5930 server = cifs_pick_channel(ses); 5931 5932 if (level == FS_DEVICE_INFORMATION) { 5933 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5934 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5935 } else if (level == FS_ATTRIBUTE_INFORMATION) { 5936 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); 5937 min_len = MIN_FS_ATTR_INFO_SIZE; 5938 } else if (level == FS_SECTOR_SIZE_INFORMATION) { 5939 max_len = sizeof(struct smb3_fs_ss_info); 5940 min_len = sizeof(struct smb3_fs_ss_info); 5941 } else if (level == FS_VOLUME_INFORMATION) { 5942 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; 5943 min_len = sizeof(struct smb3_fs_vol_info); 5944 } else { 5945 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); 5946 return -EINVAL; 5947 } 5948 5949 rc = build_qfs_info_req(&iov, tcon, server, 5950 level, max_len, 5951 persistent_fid, volatile_fid); 5952 if (rc) 5953 return rc; 5954 5955 if (smb3_encryption_required(tcon)) 5956 flags |= CIFS_TRANSFORM_REQ; 5957 5958 memset(&rqst, 0, sizeof(struct smb_rqst)); 5959 rqst.rq_iov = &iov; 5960 rqst.rq_nvec = 1; 5961 5962 if (retries) 5963 smb2_set_replay(server, &rqst); 5964 5965 rc = cifs_send_recv(xid, ses, server, 5966 &rqst, &resp_buftype, flags, &rsp_iov); 5967 free_qfs_info_req(&iov); 5968 if (rc) { 5969 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5970 goto qfsattr_exit; 5971 } 5972 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5973 5974 rsp_len = le32_to_cpu(rsp->OutputBufferLength); 5975 offset = le16_to_cpu(rsp->OutputBufferOffset); 5976 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); 5977 if (rc) 5978 goto qfsattr_exit; 5979 5980 if (level == FS_ATTRIBUTE_INFORMATION) 5981 memcpy(&tcon->fsAttrInfo, offset 5982 + (char *)rsp, min_t(unsigned int, 5983 rsp_len, max_len)); 5984 else if (level == FS_DEVICE_INFORMATION) 5985 memcpy(&tcon->fsDevInfo, offset 5986 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); 5987 else if (level == FS_SECTOR_SIZE_INFORMATION) { 5988 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) 5989 (offset + (char *)rsp); 5990 tcon->ss_flags = le32_to_cpu(ss_info->Flags); 5991 tcon->perf_sector_size = 5992 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); 5993 } else if (level == FS_VOLUME_INFORMATION) { 5994 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) 5995 (offset + (char *)rsp); 5996 tcon->vol_serial_number = vol_info->VolumeSerialNumber; 5997 tcon->vol_create_time = vol_info->VolumeCreationTime; 5998 } 5999 6000 qfsattr_exit: 6001 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 6002 6003 if (is_replayable_error(rc) && 6004 smb2_should_replay(tcon, &retries, &cur_sleep)) 6005 goto replay_again; 6006 6007 return rc; 6008 } 6009 6010 int 6011 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, 6012 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6013 const __u32 num_lock, struct smb2_lock_element *buf) 6014 { 6015 struct smb_rqst rqst; 6016 int rc = 0; 6017 struct smb2_lock_req *req = NULL; 6018 struct kvec iov[2]; 6019 struct kvec rsp_iov; 6020 int resp_buf_type; 6021 unsigned int count; 6022 int flags = CIFS_NO_RSP_BUF; 6023 unsigned int total_len; 6024 struct TCP_Server_Info *server; 6025 int retries = 0, cur_sleep = 1; 6026 6027 replay_again: 6028 /* reinitialize for possible replay */ 6029 flags = CIFS_NO_RSP_BUF; 6030 server = cifs_pick_channel(tcon->ses); 6031 6032 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); 6033 6034 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server, 6035 (void **) &req, &total_len); 6036 if (rc) 6037 return rc; 6038 6039 if (smb3_encryption_required(tcon)) 6040 flags |= CIFS_TRANSFORM_REQ; 6041 6042 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 6043 req->LockCount = cpu_to_le16(num_lock); 6044 6045 req->PersistentFileId = persist_fid; 6046 req->VolatileFileId = volatile_fid; 6047 6048 count = num_lock * sizeof(struct smb2_lock_element); 6049 6050 iov[0].iov_base = (char *)req; 6051 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); 6052 iov[1].iov_base = (char *)buf; 6053 iov[1].iov_len = count; 6054 6055 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); 6056 6057 memset(&rqst, 0, sizeof(struct smb_rqst)); 6058 rqst.rq_iov = iov; 6059 rqst.rq_nvec = 2; 6060 6061 if (retries) 6062 smb2_set_replay(server, &rqst); 6063 6064 rc = cifs_send_recv(xid, tcon->ses, server, 6065 &rqst, &resp_buf_type, flags, 6066 &rsp_iov); 6067 cifs_small_buf_release(req); 6068 if (rc) { 6069 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); 6070 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); 6071 trace_smb3_lock_err(xid, persist_fid, tcon->tid, 6072 tcon->ses->Suid, rc); 6073 } 6074 6075 if (is_replayable_error(rc) && 6076 smb2_should_replay(tcon, &retries, &cur_sleep)) 6077 goto replay_again; 6078 6079 return rc; 6080 } 6081 6082 int 6083 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, 6084 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6085 const __u64 length, const __u64 offset, const __u32 lock_flags, 6086 const bool wait) 6087 { 6088 struct smb2_lock_element lock; 6089 6090 lock.Offset = cpu_to_le64(offset); 6091 lock.Length = cpu_to_le64(length); 6092 lock.Flags = cpu_to_le32(lock_flags); 6093 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) 6094 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); 6095 6096 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); 6097 } 6098 6099 int 6100 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 6101 __u8 *lease_key, const __le32 lease_state) 6102 { 6103 struct smb_rqst rqst; 6104 int rc; 6105 struct smb2_lease_ack *req = NULL; 6106 struct cifs_ses *ses = tcon->ses; 6107 int flags = CIFS_OBREAK_OP; 6108 unsigned int total_len; 6109 struct kvec iov[1]; 6110 struct kvec rsp_iov; 6111 int resp_buf_type; 6112 __u64 *please_key_high; 6113 __u64 *please_key_low; 6114 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); 6115 6116 cifs_dbg(FYI, "SMB2_lease_break\n"); 6117 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 6118 (void **) &req, &total_len); 6119 if (rc) 6120 return rc; 6121 6122 if (smb3_encryption_required(tcon)) 6123 flags |= CIFS_TRANSFORM_REQ; 6124 6125 req->hdr.CreditRequest = cpu_to_le16(1); 6126 req->StructureSize = cpu_to_le16(36); 6127 total_len += 12; 6128 6129 memcpy(req->LeaseKey, lease_key, 16); 6130 req->LeaseState = lease_state; 6131 6132 flags |= CIFS_NO_RSP_BUF; 6133 6134 iov[0].iov_base = (char *)req; 6135 iov[0].iov_len = total_len; 6136 6137 memset(&rqst, 0, sizeof(struct smb_rqst)); 6138 rqst.rq_iov = iov; 6139 rqst.rq_nvec = 1; 6140 6141 rc = cifs_send_recv(xid, ses, server, 6142 &rqst, &resp_buf_type, flags, &rsp_iov); 6143 cifs_small_buf_release(req); 6144 6145 please_key_low = (__u64 *)lease_key; 6146 please_key_high = (__u64 *)(lease_key+8); 6147 if (rc) { 6148 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 6149 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 6150 ses->Suid, *please_key_low, *please_key_high, rc); 6151 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); 6152 } else 6153 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid, 6154 ses->Suid, *please_key_low, *please_key_high); 6155 6156 return rc; 6157 } 6158