1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2009, 2013 5 * Etersoft, 2012 6 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Pavel Shilovsky (pshilovsky@samba.org) 2012 8 * 9 * Contains the routines for constructing the SMB2 PDUs themselves 10 * 11 */ 12 13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ 14 /* Note that there are handle based routines which must be */ 15 /* treated slightly differently for reconnection purposes since we never */ 16 /* want to reuse a stale file handle and only the caller knows the file info */ 17 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/vfs.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/uaccess.h> 23 #include <linux/uuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/xattr.h> 26 #include <linux/netfs.h> 27 #include <trace/events/netfs.h> 28 #include "cifsglob.h" 29 #include "cifsacl.h" 30 #include "cifsproto.h" 31 #include "smb2proto.h" 32 #include "cifs_unicode.h" 33 #include "cifs_debug.h" 34 #include "ntlmssp.h" 35 #include "../common/smb2status.h" 36 #include "smb2glob.h" 37 #include "cifspdu.h" 38 #include "cifs_spnego.h" 39 #include "../common/smbdirect/smbdirect.h" 40 #include "smbdirect.h" 41 #include "trace.h" 42 #ifdef CONFIG_CIFS_DFS_UPCALL 43 #include "dfs_cache.h" 44 #endif 45 #include "cached_dir.h" 46 #include "compress.h" 47 #include "fs_context.h" 48 49 /* 50 * The following table defines the expected "StructureSize" of SMB2 requests 51 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. 52 * 53 * Note that commands are defined in smb2pdu.h in le16 but the array below is 54 * indexed by command in host byte order. 55 */ 56 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { 57 /* SMB2_NEGOTIATE */ 36, 58 /* SMB2_SESSION_SETUP */ 25, 59 /* SMB2_LOGOFF */ 4, 60 /* SMB2_TREE_CONNECT */ 9, 61 /* SMB2_TREE_DISCONNECT */ 4, 62 /* SMB2_CREATE */ 57, 63 /* SMB2_CLOSE */ 24, 64 /* SMB2_FLUSH */ 24, 65 /* SMB2_READ */ 49, 66 /* SMB2_WRITE */ 49, 67 /* SMB2_LOCK */ 48, 68 /* SMB2_IOCTL */ 57, 69 /* SMB2_CANCEL */ 4, 70 /* SMB2_ECHO */ 4, 71 /* SMB2_QUERY_DIRECTORY */ 33, 72 /* SMB2_CHANGE_NOTIFY */ 32, 73 /* SMB2_QUERY_INFO */ 41, 74 /* SMB2_SET_INFO */ 33, 75 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ 76 }; 77 78 int smb3_encryption_required(const struct cifs_tcon *tcon) 79 { 80 if (!tcon || !tcon->ses) 81 return 0; 82 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || 83 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) 84 return 1; 85 if (tcon->seal && 86 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 87 return 1; 88 if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && 89 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 90 return 1; 91 return 0; 92 } 93 94 static void 95 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, 96 const struct cifs_tcon *tcon, 97 struct TCP_Server_Info *server) 98 { 99 struct smb3_hdr_req *smb3_hdr; 100 101 shdr->ProtocolId = SMB2_PROTO_NUMBER; 102 shdr->StructureSize = cpu_to_le16(64); 103 shdr->Command = smb2_cmd; 104 105 if (server) { 106 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ 107 if (server->dialect >= SMB30_PROT_ID) { 108 smb3_hdr = (struct smb3_hdr_req *)shdr; 109 /* 110 * if primary channel is not set yet, use default 111 * channel for chan sequence num 112 */ 113 if (SERVER_IS_CHAN(server)) 114 smb3_hdr->ChannelSequence = 115 cpu_to_le16(server->primary_server->channel_sequence_num); 116 else 117 smb3_hdr->ChannelSequence = 118 cpu_to_le16(server->channel_sequence_num); 119 } 120 spin_lock(&server->req_lock); 121 /* Request up to 10 credits but don't go over the limit. */ 122 if (server->credits >= server->max_credits) 123 shdr->CreditRequest = cpu_to_le16(0); 124 else 125 shdr->CreditRequest = cpu_to_le16( 126 min_t(int, server->max_credits - 127 server->credits, 10)); 128 spin_unlock(&server->req_lock); 129 } else { 130 shdr->CreditRequest = cpu_to_le16(2); 131 } 132 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid); 133 134 if (!tcon) 135 goto out; 136 137 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 138 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 139 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 140 shdr->CreditCharge = cpu_to_le16(1); 141 /* else CreditCharge MBZ */ 142 143 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid); 144 /* Uid is not converted */ 145 if (tcon->ses) 146 shdr->SessionId = cpu_to_le64(tcon->ses->Suid); 147 148 /* 149 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have 150 * to pass the path on the Open SMB prefixed by \\server\share. 151 * Not sure when we would need to do the augmented path (if ever) and 152 * setting this flag breaks the SMB2 open operation since it is 153 * illegal to send an empty path name (without \\server\share prefix) 154 * when the DFS flag is set in the SMB open header. We could 155 * consider setting the flag on all operations other than open 156 * but it is safer to net set it for now. 157 */ 158 /* if (tcon->share_flags & SHI1005_FLAGS_DFS) 159 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ 160 161 if (server && server->sign && !smb3_encryption_required(tcon)) 162 shdr->Flags |= SMB2_FLAGS_SIGNED; 163 out: 164 return; 165 } 166 167 /* helper function for code reuse */ 168 static int 169 cifs_chan_skip_or_disable(struct cifs_ses *ses, 170 struct TCP_Server_Info *server, 171 bool from_reconnect) 172 { 173 struct TCP_Server_Info *pserver; 174 unsigned int chan_index; 175 176 if (SERVER_IS_CHAN(server)) { 177 cifs_dbg(VFS, 178 "server %s does not support multichannel anymore. Skip secondary channel\n", 179 ses->server->hostname); 180 181 spin_lock(&ses->chan_lock); 182 chan_index = cifs_ses_get_chan_index(ses, server); 183 if (chan_index == CIFS_INVAL_CHAN_INDEX) { 184 spin_unlock(&ses->chan_lock); 185 goto skip_terminate; 186 } 187 188 ses->chans[chan_index].server = NULL; 189 server->terminate = true; 190 spin_unlock(&ses->chan_lock); 191 192 /* 193 * the above reference of server by channel 194 * needs to be dropped without holding chan_lock 195 * as cifs_put_tcp_session takes a higher lock 196 * i.e. cifs_tcp_ses_lock 197 */ 198 cifs_put_tcp_session(server, from_reconnect); 199 200 cifs_signal_cifsd_for_reconnect(server, false); 201 202 /* mark primary server as needing reconnect */ 203 pserver = server->primary_server; 204 cifs_signal_cifsd_for_reconnect(pserver, false); 205 skip_terminate: 206 return -EHOSTDOWN; 207 } 208 209 cifs_server_dbg(VFS, 210 "server does not support multichannel anymore. Disable all other channels\n"); 211 cifs_disable_secondary_channels(ses); 212 213 214 return 0; 215 } 216 217 static int 218 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, 219 struct TCP_Server_Info *server, bool from_reconnect) 220 { 221 struct cifs_ses *ses; 222 int xid; 223 int rc = 0; 224 225 /* 226 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so 227 * check for tcp and smb session status done differently 228 * for those three - in the calling routine. 229 */ 230 if (tcon == NULL) 231 return 0; 232 233 if (smb2_command == SMB2_TREE_CONNECT) 234 return 0; 235 236 spin_lock(&tcon->tc_lock); 237 if (tcon->status == TID_EXITING) { 238 /* 239 * only tree disconnect allowed when disconnecting ... 240 */ 241 if (smb2_command != SMB2_TREE_DISCONNECT) { 242 spin_unlock(&tcon->tc_lock); 243 cifs_dbg(FYI, "can not send cmd %d while umounting\n", 244 smb2_command); 245 return -ENODEV; 246 } 247 } 248 spin_unlock(&tcon->tc_lock); 249 250 ses = tcon->ses; 251 if (!ses) 252 return -EIO; 253 spin_lock(&ses->ses_lock); 254 if (ses->ses_status == SES_EXITING) { 255 spin_unlock(&ses->ses_lock); 256 return -EIO; 257 } 258 spin_unlock(&ses->ses_lock); 259 if (!ses->server || !server) 260 return -EIO; 261 262 spin_lock(&server->srv_lock); 263 if (server->tcpStatus == CifsNeedReconnect) { 264 /* 265 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE 266 * here since they are implicitly done when session drops. 267 */ 268 switch (smb2_command) { 269 /* 270 * BB Should we keep oplock break and add flush to exceptions? 271 */ 272 case SMB2_TREE_DISCONNECT: 273 case SMB2_CANCEL: 274 case SMB2_CLOSE: 275 case SMB2_OPLOCK_BREAK: 276 spin_unlock(&server->srv_lock); 277 return -EAGAIN; 278 } 279 } 280 281 /* if server is marked for termination, cifsd will cleanup */ 282 if (server->terminate) { 283 spin_unlock(&server->srv_lock); 284 return -EHOSTDOWN; 285 } 286 spin_unlock(&server->srv_lock); 287 288 again: 289 rc = cifs_wait_for_server_reconnect(server, tcon->retry); 290 if (rc) 291 return rc; 292 293 spin_lock(&ses->chan_lock); 294 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { 295 spin_unlock(&ses->chan_lock); 296 return 0; 297 } 298 spin_unlock(&ses->chan_lock); 299 cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d", 300 tcon->ses->chans_need_reconnect, 301 tcon->need_reconnect); 302 303 mutex_lock(&ses->session_mutex); 304 /* 305 * Handle the case where a concurrent thread failed to negotiate or 306 * killed a channel. 307 */ 308 spin_lock(&server->srv_lock); 309 switch (server->tcpStatus) { 310 case CifsExiting: 311 spin_unlock(&server->srv_lock); 312 mutex_unlock(&ses->session_mutex); 313 return -EHOSTDOWN; 314 case CifsNeedReconnect: 315 spin_unlock(&server->srv_lock); 316 mutex_unlock(&ses->session_mutex); 317 if (!tcon->retry) 318 return -EHOSTDOWN; 319 goto again; 320 default: 321 break; 322 } 323 spin_unlock(&server->srv_lock); 324 325 /* 326 * need to prevent multiple threads trying to simultaneously 327 * reconnect the same SMB session 328 */ 329 spin_lock(&ses->ses_lock); 330 spin_lock(&ses->chan_lock); 331 if (!cifs_chan_needs_reconnect(ses, server) && 332 ses->ses_status == SES_GOOD) { 333 spin_unlock(&ses->chan_lock); 334 spin_unlock(&ses->ses_lock); 335 /* this means that we only need to tree connect */ 336 if (tcon->need_reconnect) 337 goto skip_sess_setup; 338 339 mutex_unlock(&ses->session_mutex); 340 goto out; 341 } 342 spin_unlock(&ses->chan_lock); 343 spin_unlock(&ses->ses_lock); 344 345 rc = cifs_negotiate_protocol(0, ses, server); 346 if (rc) { 347 mutex_unlock(&ses->session_mutex); 348 if (!tcon->retry) 349 return -EHOSTDOWN; 350 goto again; 351 } 352 /* 353 * if server stopped supporting multichannel 354 * and the first channel reconnected, disable all the others. 355 */ 356 if (ses->chan_count > 1 && 357 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 358 rc = cifs_chan_skip_or_disable(ses, server, 359 from_reconnect); 360 if (rc) { 361 mutex_unlock(&ses->session_mutex); 362 goto out; 363 } 364 } 365 366 rc = cifs_setup_session(0, ses, server, ses->local_nls); 367 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 368 /* 369 * Try alternate password for next reconnect (key rotation 370 * could be enabled on the server e.g.) if an alternate 371 * password is available and the current password is expired, 372 * but do not swap on non pwd related errors like host down 373 */ 374 if (ses->password2) 375 swap(ses->password2, ses->password); 376 } 377 if (rc) { 378 mutex_unlock(&ses->session_mutex); 379 if (rc == -EACCES && !tcon->retry) 380 return -EHOSTDOWN; 381 goto out; 382 } 383 384 skip_sess_setup: 385 if (!tcon->need_reconnect) { 386 mutex_unlock(&ses->session_mutex); 387 goto out; 388 } 389 cifs_mark_open_files_invalid(tcon); 390 if (tcon->use_persistent) 391 tcon->need_reopen_files = true; 392 393 rc = cifs_tree_connect(0, tcon); 394 395 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); 396 if (rc) { 397 /* If sess reconnected but tcon didn't, something strange ... */ 398 mutex_unlock(&ses->session_mutex); 399 cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc); 400 goto out; 401 } 402 403 spin_lock(&ses->ses_lock); 404 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) { 405 spin_unlock(&ses->ses_lock); 406 mutex_unlock(&ses->session_mutex); 407 goto skip_add_channels; 408 } 409 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS; 410 spin_unlock(&ses->ses_lock); 411 412 if (!rc && 413 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && 414 server->ops->query_server_interfaces) { 415 /* 416 * query server network interfaces, in case they change. 417 * Also mark the session as pending this update while the query 418 * is in progress. This will be used to avoid calling 419 * smb2_reconnect recursively. 420 */ 421 ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; 422 xid = get_xid(); 423 rc = server->ops->query_server_interfaces(xid, tcon, false); 424 free_xid(xid); 425 ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; 426 427 if (!tcon->ipc && !tcon->dummy) 428 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 429 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 430 431 mutex_unlock(&ses->session_mutex); 432 433 if (rc == -EOPNOTSUPP && ses->chan_count > 1) { 434 /* 435 * some servers like Azure SMB server do not advertise 436 * that multichannel has been disabled with server 437 * capabilities, rather return STATUS_NOT_IMPLEMENTED. 438 * treat this as server not supporting multichannel 439 */ 440 441 rc = cifs_chan_skip_or_disable(ses, server, 442 from_reconnect); 443 goto skip_add_channels; 444 } else if (rc) 445 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 446 __func__, rc); 447 448 if (ses->chan_max > ses->chan_count && 449 ses->iface_count && 450 !SERVER_IS_CHAN(server)) { 451 if (ses->chan_count == 1) 452 cifs_server_dbg(VFS, "supports multichannel now\n"); 453 454 cifs_try_adding_channels(ses); 455 } 456 } else { 457 mutex_unlock(&ses->session_mutex); 458 } 459 460 skip_add_channels: 461 spin_lock(&ses->ses_lock); 462 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS; 463 spin_unlock(&ses->ses_lock); 464 465 if (smb2_command != SMB2_INTERNAL_CMD) 466 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 467 468 atomic_inc(&tconInfoReconnectCount); 469 out: 470 /* 471 * Check if handle based operation so we know whether we can continue 472 * or not without returning to caller to reset file handle. 473 */ 474 /* 475 * BB Is flush done by server on drop of tcp session? Should we special 476 * case it and skip above? 477 */ 478 switch (smb2_command) { 479 case SMB2_FLUSH: 480 case SMB2_READ: 481 case SMB2_WRITE: 482 case SMB2_LOCK: 483 case SMB2_QUERY_DIRECTORY: 484 case SMB2_CHANGE_NOTIFY: 485 case SMB2_QUERY_INFO: 486 case SMB2_SET_INFO: 487 case SMB2_IOCTL: 488 rc = -EAGAIN; 489 } 490 return rc; 491 } 492 493 static void 494 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, 495 struct TCP_Server_Info *server, 496 void *buf, 497 unsigned int *total_len) 498 { 499 struct smb2_pdu *spdu = buf; 500 /* lookup word count ie StructureSize from table */ 501 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; 502 503 /* 504 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of 505 * largest operations (Create) 506 */ 507 memset(buf, 0, 256); 508 509 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server); 510 spdu->StructureSize2 = cpu_to_le16(parmsize); 511 512 *total_len = parmsize + sizeof(struct smb2_hdr); 513 } 514 515 /* 516 * Allocate and return pointer to an SMB request hdr, and set basic 517 * SMB information in the SMB header. If the return code is zero, this 518 * function must have filled in request_buf pointer. 519 */ 520 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 521 struct TCP_Server_Info *server, 522 void **request_buf, unsigned int *total_len) 523 { 524 /* BB eventually switch this to SMB2 specific small buf size */ 525 switch (smb2_command) { 526 case SMB2_SET_INFO: 527 case SMB2_QUERY_INFO: 528 *request_buf = cifs_buf_get(); 529 break; 530 default: 531 *request_buf = cifs_small_buf_get(); 532 break; 533 } 534 if (*request_buf == NULL) { 535 /* BB should we add a retry in here if not a writepage? */ 536 return -ENOMEM; 537 } 538 539 fill_small_buf(smb2_command, tcon, server, 540 (struct smb2_hdr *)(*request_buf), 541 total_len); 542 543 if (tcon != NULL) { 544 uint16_t com_code = le16_to_cpu(smb2_command); 545 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); 546 cifs_stats_inc(&tcon->num_smbs_sent); 547 } 548 549 return 0; 550 } 551 552 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 553 struct TCP_Server_Info *server, 554 void **request_buf, unsigned int *total_len) 555 { 556 int rc; 557 558 rc = smb2_reconnect(smb2_command, tcon, server, false); 559 if (rc) 560 return rc; 561 562 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf, 563 total_len); 564 } 565 566 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, 567 struct TCP_Server_Info *server, 568 void **request_buf, unsigned int *total_len) 569 { 570 /* 571 * Skip reconnect in one of the following cases: 572 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs 573 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from 574 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag) 575 */ 576 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO || 577 (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO && 578 (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES))) 579 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, 580 request_buf, total_len); 581 582 return smb2_plain_req_init(SMB2_IOCTL, tcon, server, 583 request_buf, total_len); 584 } 585 586 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ 587 588 static void 589 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) 590 { 591 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; 592 pneg_ctxt->DataLength = cpu_to_le16(38); 593 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); 594 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); 595 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); 596 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; 597 } 598 599 static void 600 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt) 601 { 602 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES; 603 pneg_ctxt->DataLength = 604 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context) 605 - sizeof(struct smb2_neg_context)); 606 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3); 607 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77; 608 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF; 609 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1; 610 } 611 612 static unsigned int 613 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt) 614 { 615 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities); 616 unsigned short num_algs = 1; /* number of signing algorithms sent */ 617 618 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; 619 /* 620 * Context Data length must be rounded to multiple of 8 for some servers 621 */ 622 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - 623 sizeof(struct smb2_neg_context) + 624 (num_algs * sizeof(u16)), 8)); 625 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); 626 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); 627 628 ctxt_len += sizeof(__le16) * num_algs; 629 ctxt_len = ALIGN(ctxt_len, 8); 630 return ctxt_len; 631 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ 632 } 633 634 static void 635 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 636 { 637 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 638 if (require_gcm_256) { 639 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */ 640 pneg_ctxt->CipherCount = cpu_to_le16(1); 641 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM; 642 } else if (enable_gcm_256) { 643 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */ 644 pneg_ctxt->CipherCount = cpu_to_le16(3); 645 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 646 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM; 647 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM; 648 } else { 649 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */ 650 pneg_ctxt->CipherCount = cpu_to_le16(2); 651 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 652 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 653 } 654 } 655 656 static unsigned int 657 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname) 658 { 659 struct nls_table *cp = load_nls_default(); 660 661 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID; 662 663 /* copy up to max of first 100 bytes of server name to NetName field */ 664 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); 665 /* context size is DataLength + minimal smb2_neg_context */ 666 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); 667 } 668 669 static void 670 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) 671 { 672 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; 673 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); 674 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 675 pneg_ctxt->Name[0] = 0x93; 676 pneg_ctxt->Name[1] = 0xAD; 677 pneg_ctxt->Name[2] = 0x25; 678 pneg_ctxt->Name[3] = 0x50; 679 pneg_ctxt->Name[4] = 0x9C; 680 pneg_ctxt->Name[5] = 0xB4; 681 pneg_ctxt->Name[6] = 0x11; 682 pneg_ctxt->Name[7] = 0xE7; 683 pneg_ctxt->Name[8] = 0xB4; 684 pneg_ctxt->Name[9] = 0x23; 685 pneg_ctxt->Name[10] = 0x83; 686 pneg_ctxt->Name[11] = 0xDE; 687 pneg_ctxt->Name[12] = 0x96; 688 pneg_ctxt->Name[13] = 0x8B; 689 pneg_ctxt->Name[14] = 0xCD; 690 pneg_ctxt->Name[15] = 0x7C; 691 } 692 693 static void 694 assemble_neg_contexts(struct smb2_negotiate_req *req, 695 struct TCP_Server_Info *server, unsigned int *total_len) 696 { 697 unsigned int ctxt_len, neg_context_count; 698 struct TCP_Server_Info *pserver; 699 char *pneg_ctxt; 700 char *hostname; 701 702 if (*total_len > 200) { 703 /* In case length corrupted don't want to overrun smb buffer */ 704 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n"); 705 return; 706 } 707 708 /* 709 * round up total_len of fixed part of SMB3 negotiate request to 8 710 * byte boundary before adding negotiate contexts 711 */ 712 *total_len = ALIGN(*total_len, 8); 713 714 pneg_ctxt = (*total_len) + (char *)req; 715 req->NegotiateContextOffset = cpu_to_le32(*total_len); 716 717 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); 718 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); 719 *total_len += ctxt_len; 720 pneg_ctxt += ctxt_len; 721 722 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); 723 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); 724 *total_len += ctxt_len; 725 pneg_ctxt += ctxt_len; 726 727 /* 728 * secondary channels don't have the hostname field populated 729 * use the hostname field in the primary channel instead 730 */ 731 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 732 cifs_server_lock(pserver); 733 hostname = pserver->hostname; 734 if (hostname && (hostname[0] != 0)) { 735 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, 736 hostname); 737 *total_len += ctxt_len; 738 pneg_ctxt += ctxt_len; 739 neg_context_count = 3; 740 } else 741 neg_context_count = 2; 742 cifs_server_unlock(pserver); 743 744 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); 745 *total_len += sizeof(struct smb2_posix_neg_context); 746 pneg_ctxt += sizeof(struct smb2_posix_neg_context); 747 neg_context_count++; 748 749 if (server->compression.requested) { 750 build_compression_ctxt((struct smb2_compression_capabilities_context *) 751 pneg_ctxt); 752 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); 753 *total_len += ctxt_len; 754 pneg_ctxt += ctxt_len; 755 neg_context_count++; 756 } 757 758 if (enable_negotiate_signing) { 759 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *) 760 pneg_ctxt); 761 *total_len += ctxt_len; 762 pneg_ctxt += ctxt_len; 763 neg_context_count++; 764 } 765 766 /* check for and add transport_capabilities and signing capabilities */ 767 req->NegotiateContextCount = cpu_to_le16(neg_context_count); 768 769 } 770 771 /* If invalid preauth context warn but use what we requested, SHA-512 */ 772 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) 773 { 774 unsigned int len = le16_to_cpu(ctxt->DataLength); 775 776 /* 777 * Caller checked that DataLength remains within SMB boundary. We still 778 * need to confirm that one HashAlgorithms member is accounted for. 779 */ 780 if (len < MIN_PREAUTH_CTXT_DATA_LEN) { 781 pr_warn_once("server sent bad preauth context\n"); 782 return; 783 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { 784 pr_warn_once("server sent invalid SaltLength\n"); 785 return; 786 } 787 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) 788 pr_warn_once("Invalid SMB3 hash algorithm count\n"); 789 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) 790 pr_warn_once("unknown SMB3 hash algorithm\n"); 791 } 792 793 static void decode_compress_ctx(struct TCP_Server_Info *server, 794 struct smb2_compression_capabilities_context *ctxt) 795 { 796 unsigned int len = le16_to_cpu(ctxt->DataLength); 797 __le16 alg; 798 799 server->compression.enabled = false; 800 801 /* 802 * Caller checked that DataLength remains within SMB boundary. We still 803 * need to confirm that one CompressionAlgorithms member is accounted 804 * for. 805 */ 806 if (len < 10) { 807 pr_warn_once("server sent bad compression cntxt\n"); 808 return; 809 } 810 811 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { 812 pr_warn_once("invalid SMB3 compress algorithm count\n"); 813 return; 814 } 815 816 alg = ctxt->CompressionAlgorithms[0]; 817 818 /* 'NONE' (0) compressor type is never negotiated */ 819 if (alg == 0 || le16_to_cpu(alg) > 3) { 820 pr_warn_once("invalid compression algorithm '%u'\n", alg); 821 return; 822 } 823 824 server->compression.alg = alg; 825 server->compression.enabled = true; 826 } 827 828 static int decode_encrypt_ctx(struct TCP_Server_Info *server, 829 struct smb2_encryption_neg_context *ctxt) 830 { 831 unsigned int len = le16_to_cpu(ctxt->DataLength); 832 833 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); 834 /* 835 * Caller checked that DataLength remains within SMB boundary. We still 836 * need to confirm that one Cipher flexible array member is accounted 837 * for. 838 */ 839 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { 840 pr_warn_once("server sent bad crypto ctxt len\n"); 841 return -EINVAL; 842 } 843 844 if (le16_to_cpu(ctxt->CipherCount) != 1) { 845 pr_warn_once("Invalid SMB3.11 cipher count\n"); 846 return -EINVAL; 847 } 848 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); 849 if (require_gcm_256) { 850 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) { 851 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n"); 852 return -EOPNOTSUPP; 853 } 854 } else if (ctxt->Ciphers[0] == 0) { 855 /* 856 * e.g. if server only supported AES256_CCM (very unlikely) 857 * or server supported no encryption types or had all disabled. 858 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case 859 * in which mount requested encryption ("seal") checks later 860 * on during tree connection will return proper rc, but if 861 * seal not requested by client, since server is allowed to 862 * return 0 to indicate no supported cipher, we can't fail here 863 */ 864 server->cipher_type = 0; 865 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION; 866 pr_warn_once("Server does not support requested encryption types\n"); 867 return 0; 868 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && 869 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) && 870 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) { 871 /* server returned a cipher we didn't ask for */ 872 pr_warn_once("Invalid SMB3.11 cipher returned\n"); 873 return -EINVAL; 874 } 875 server->cipher_type = ctxt->Ciphers[0]; 876 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 877 return 0; 878 } 879 880 static void decode_signing_ctx(struct TCP_Server_Info *server, 881 struct smb2_signing_capabilities *pctxt) 882 { 883 unsigned int len = le16_to_cpu(pctxt->DataLength); 884 885 /* 886 * Caller checked that DataLength remains within SMB boundary. We still 887 * need to confirm that one SigningAlgorithms flexible array member is 888 * accounted for. 889 */ 890 if ((len < 4) || (len > 16)) { 891 pr_warn_once("server sent bad signing negcontext\n"); 892 return; 893 } 894 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) { 895 pr_warn_once("Invalid signing algorithm count\n"); 896 return; 897 } 898 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) { 899 pr_warn_once("unknown signing algorithm\n"); 900 return; 901 } 902 903 server->signing_negotiated = true; 904 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]); 905 cifs_dbg(FYI, "signing algorithm %d chosen\n", 906 server->signing_algorithm); 907 } 908 909 910 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, 911 struct TCP_Server_Info *server, 912 unsigned int len_of_smb) 913 { 914 struct smb2_neg_context *pctx; 915 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); 916 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); 917 unsigned int len_of_ctxts, i; 918 int rc = 0; 919 920 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); 921 if (len_of_smb <= offset) { 922 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n"); 923 return -EINVAL; 924 } 925 926 len_of_ctxts = len_of_smb - offset; 927 928 for (i = 0; i < ctxt_cnt; i++) { 929 int clen; 930 /* check that offset is not beyond end of SMB */ 931 if (len_of_ctxts < sizeof(struct smb2_neg_context)) 932 break; 933 934 pctx = (struct smb2_neg_context *)(offset + (char *)rsp); 935 clen = sizeof(struct smb2_neg_context) 936 + le16_to_cpu(pctx->DataLength); 937 /* 938 * 2.2.4 SMB2 NEGOTIATE Response 939 * Subsequent negotiate contexts MUST appear at the first 8-byte 940 * aligned offset following the previous negotiate context. 941 */ 942 if (i + 1 != ctxt_cnt) 943 clen = ALIGN(clen, 8); 944 if (clen > len_of_ctxts) 945 break; 946 947 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) 948 decode_preauth_context( 949 (struct smb2_preauth_neg_context *)pctx); 950 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) 951 rc = decode_encrypt_ctx(server, 952 (struct smb2_encryption_neg_context *)pctx); 953 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) 954 decode_compress_ctx(server, 955 (struct smb2_compression_capabilities_context *)pctx); 956 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) 957 server->posix_ext_supported = true; 958 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) 959 decode_signing_ctx(server, 960 (struct smb2_signing_capabilities *)pctx); 961 else 962 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", 963 le16_to_cpu(pctx->ContextType)); 964 if (rc) 965 break; 966 967 offset += clen; 968 len_of_ctxts -= clen; 969 } 970 return rc; 971 } 972 973 static struct create_posix * 974 create_posix_buf(umode_t mode) 975 { 976 struct create_posix *buf; 977 978 buf = kzalloc(sizeof(struct create_posix), 979 GFP_KERNEL); 980 if (!buf) 981 return NULL; 982 983 buf->ccontext.DataOffset = 984 cpu_to_le16(offsetof(struct create_posix, Mode)); 985 buf->ccontext.DataLength = cpu_to_le32(4); 986 buf->ccontext.NameOffset = 987 cpu_to_le16(offsetof(struct create_posix, Name)); 988 buf->ccontext.NameLength = cpu_to_le16(16); 989 990 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 991 buf->Name[0] = 0x93; 992 buf->Name[1] = 0xAD; 993 buf->Name[2] = 0x25; 994 buf->Name[3] = 0x50; 995 buf->Name[4] = 0x9C; 996 buf->Name[5] = 0xB4; 997 buf->Name[6] = 0x11; 998 buf->Name[7] = 0xE7; 999 buf->Name[8] = 0xB4; 1000 buf->Name[9] = 0x23; 1001 buf->Name[10] = 0x83; 1002 buf->Name[11] = 0xDE; 1003 buf->Name[12] = 0x96; 1004 buf->Name[13] = 0x8B; 1005 buf->Name[14] = 0xCD; 1006 buf->Name[15] = 0x7C; 1007 buf->Mode = cpu_to_le32(mode); 1008 cifs_dbg(FYI, "mode on posix create 0%o\n", mode); 1009 return buf; 1010 } 1011 1012 static int 1013 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) 1014 { 1015 unsigned int num = *num_iovec; 1016 1017 iov[num].iov_base = create_posix_buf(mode); 1018 if (mode == ACL_NO_MODE) 1019 cifs_dbg(FYI, "%s: no mode\n", __func__); 1020 if (iov[num].iov_base == NULL) 1021 return -ENOMEM; 1022 iov[num].iov_len = sizeof(struct create_posix); 1023 *num_iovec = num + 1; 1024 return 0; 1025 } 1026 1027 1028 /* 1029 * 1030 * SMB2 Worker functions follow: 1031 * 1032 * The general structure of the worker functions is: 1033 * 1) Call smb2_init (assembles SMB2 header) 1034 * 2) Initialize SMB2 command specific fields in fixed length area of SMB 1035 * 3) Call smb_sendrcv2 (sends request on socket and waits for response) 1036 * 4) Decode SMB2 command specific fields in the fixed length area 1037 * 5) Decode variable length data area (if any for this SMB2 command type) 1038 * 6) Call free smb buffer 1039 * 7) return 1040 * 1041 */ 1042 1043 int 1044 SMB2_negotiate(const unsigned int xid, 1045 struct cifs_ses *ses, 1046 struct TCP_Server_Info *server) 1047 { 1048 struct smb_rqst rqst; 1049 struct smb2_negotiate_req *req; 1050 struct smb2_negotiate_rsp *rsp; 1051 struct kvec iov[1]; 1052 struct kvec rsp_iov; 1053 int rc; 1054 int resp_buftype; 1055 int blob_offset, blob_length; 1056 char *security_blob; 1057 int flags = CIFS_NEG_OP; 1058 unsigned int total_len; 1059 1060 cifs_dbg(FYI, "Negotiate protocol\n"); 1061 1062 if (!server) { 1063 WARN(1, "%s: server is NULL!\n", __func__); 1064 return -EIO; 1065 } 1066 1067 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server, 1068 (void **) &req, &total_len); 1069 if (rc) 1070 return rc; 1071 1072 req->hdr.SessionId = 0; 1073 1074 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1075 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1076 1077 if (strcmp(server->vals->version_string, 1078 SMB3ANY_VERSION_STRING) == 0) { 1079 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1080 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1081 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1082 req->DialectCount = cpu_to_le16(3); 1083 total_len += 6; 1084 } else if (strcmp(server->vals->version_string, 1085 SMBDEFAULT_VERSION_STRING) == 0) { 1086 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1087 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1088 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1089 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1090 req->DialectCount = cpu_to_le16(4); 1091 total_len += 8; 1092 } else { 1093 /* otherwise send specific dialect */ 1094 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id); 1095 req->DialectCount = cpu_to_le16(1); 1096 total_len += 2; 1097 } 1098 1099 /* only one of SMB2 signing flags may be set in SMB2 request */ 1100 if (ses->sign) 1101 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1102 else if (global_secflags & CIFSSEC_MAY_SIGN) 1103 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1104 else 1105 req->SecurityMode = 0; 1106 1107 req->Capabilities = cpu_to_le32(server->vals->req_capabilities); 1108 if (ses->chan_max > 1) 1109 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1110 1111 /* ClientGUID must be zero for SMB2.02 dialect */ 1112 if (server->vals->protocol_id == SMB20_PROT_ID) 1113 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); 1114 else { 1115 memcpy(req->ClientGUID, server->client_guid, 1116 SMB2_CLIENT_GUID_SIZE); 1117 if ((server->vals->protocol_id == SMB311_PROT_ID) || 1118 (strcmp(server->vals->version_string, 1119 SMB3ANY_VERSION_STRING) == 0) || 1120 (strcmp(server->vals->version_string, 1121 SMBDEFAULT_VERSION_STRING) == 0)) 1122 assemble_neg_contexts(req, server, &total_len); 1123 } 1124 iov[0].iov_base = (char *)req; 1125 iov[0].iov_len = total_len; 1126 1127 memset(&rqst, 0, sizeof(struct smb_rqst)); 1128 rqst.rq_iov = iov; 1129 rqst.rq_nvec = 1; 1130 1131 rc = cifs_send_recv(xid, ses, server, 1132 &rqst, &resp_buftype, flags, &rsp_iov); 1133 cifs_small_buf_release(req); 1134 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; 1135 /* 1136 * No tcon so can't do 1137 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1138 */ 1139 if (rc == -EOPNOTSUPP) { 1140 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n"); 1141 goto neg_exit; 1142 } else if (rc != 0) 1143 goto neg_exit; 1144 1145 rc = -EIO; 1146 if (strcmp(server->vals->version_string, 1147 SMB3ANY_VERSION_STRING) == 0) { 1148 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1149 cifs_server_dbg(VFS, 1150 "SMB2 dialect returned but not requested\n"); 1151 goto neg_exit; 1152 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1153 cifs_server_dbg(VFS, 1154 "SMB2.1 dialect returned but not requested\n"); 1155 goto neg_exit; 1156 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1157 /* ops set to 3.0 by default for default so update */ 1158 server->ops = &smb311_operations; 1159 server->vals = &smb311_values; 1160 } 1161 } else if (strcmp(server->vals->version_string, 1162 SMBDEFAULT_VERSION_STRING) == 0) { 1163 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { 1164 cifs_server_dbg(VFS, 1165 "SMB2 dialect returned but not requested\n"); 1166 goto neg_exit; 1167 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { 1168 /* ops set to 3.0 by default for default so update */ 1169 server->ops = &smb21_operations; 1170 server->vals = &smb21_values; 1171 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1172 server->ops = &smb311_operations; 1173 server->vals = &smb311_values; 1174 } 1175 } else if (le16_to_cpu(rsp->DialectRevision) != 1176 server->vals->protocol_id) { 1177 /* if requested single dialect ensure returned dialect matched */ 1178 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", 1179 le16_to_cpu(rsp->DialectRevision)); 1180 goto neg_exit; 1181 } 1182 1183 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); 1184 1185 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) 1186 cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); 1187 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) 1188 cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); 1189 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) 1190 cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); 1191 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) 1192 cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); 1193 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) 1194 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); 1195 else { 1196 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", 1197 le16_to_cpu(rsp->DialectRevision)); 1198 goto neg_exit; 1199 } 1200 1201 rc = 0; 1202 server->dialect = le16_to_cpu(rsp->DialectRevision); 1203 1204 /* 1205 * Keep a copy of the hash after negprot. This hash will be 1206 * the starting hash value for all sessions made from this 1207 * server. 1208 */ 1209 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, 1210 SMB2_PREAUTH_HASH_SIZE); 1211 1212 /* SMB2 only has an extended negflavor */ 1213 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 1214 /* set it to the maximum buffer size value we can send with 1 credit */ 1215 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 1216 SMB2_MAX_BUFFER_SIZE); 1217 server->max_read = le32_to_cpu(rsp->MaxReadSize); 1218 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 1219 server->sec_mode = le16_to_cpu(rsp->SecurityMode); 1220 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) 1221 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", 1222 server->sec_mode); 1223 server->capabilities = le32_to_cpu(rsp->Capabilities); 1224 /* Internal types */ 1225 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; 1226 1227 /* 1228 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context 1229 * Set the cipher type manually. 1230 */ 1231 if ((server->dialect == SMB30_PROT_ID || 1232 server->dialect == SMB302_PROT_ID) && 1233 (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 1234 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; 1235 1236 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, 1237 (struct smb2_hdr *)rsp); 1238 /* 1239 * See MS-SMB2 section 2.2.4: if no blob, client picks default which 1240 * for us will be 1241 * ses->sectype = RawNTLMSSP; 1242 * but for time being this is our only auth choice so doesn't matter. 1243 * We just found a server which sets blob length to zero expecting raw. 1244 */ 1245 if (blob_length == 0) { 1246 cifs_dbg(FYI, "missing security blob on negprot\n"); 1247 server->sec_ntlmssp = true; 1248 } 1249 1250 rc = cifs_enable_signing(server, ses->sign); 1251 if (rc) 1252 goto neg_exit; 1253 if (blob_length) { 1254 rc = decode_negTokenInit(security_blob, blob_length, server); 1255 if (rc == 1) 1256 rc = 0; 1257 else if (rc == 0) 1258 rc = -EIO; 1259 } 1260 1261 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { 1262 if (rsp->NegotiateContextCount) 1263 rc = smb311_decode_neg_context(rsp, server, 1264 rsp_iov.iov_len); 1265 else 1266 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); 1267 } 1268 1269 if (server->cipher_type && !rc) 1270 rc = smb3_crypto_aead_allocate(server); 1271 neg_exit: 1272 free_rsp_buf(resp_buftype, rsp); 1273 return rc; 1274 } 1275 1276 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) 1277 { 1278 int rc; 1279 struct validate_negotiate_info_req *pneg_inbuf; 1280 struct validate_negotiate_info_rsp *pneg_rsp = NULL; 1281 u32 rsplen; 1282 u32 inbuflen; /* max of 4 dialects */ 1283 struct TCP_Server_Info *server = tcon->ses->server; 1284 1285 cifs_dbg(FYI, "validate negotiate\n"); 1286 1287 /* In SMB3.11 preauth integrity supersedes validate negotiate */ 1288 if (server->dialect == SMB311_PROT_ID) 1289 return 0; 1290 1291 /* 1292 * validation ioctl must be signed, so no point sending this if we 1293 * can not sign it (ie are not known user). Even if signing is not 1294 * required (enabled but not negotiated), in those cases we selectively 1295 * sign just this, the first and only signed request on a connection. 1296 * Having validation of negotiate info helps reduce attack vectors. 1297 */ 1298 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) 1299 return 0; /* validation requires signing */ 1300 1301 if (tcon->ses->user_name == NULL) { 1302 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); 1303 return 0; /* validation requires signing */ 1304 } 1305 1306 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) 1307 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); 1308 1309 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); 1310 if (!pneg_inbuf) 1311 return -ENOMEM; 1312 1313 pneg_inbuf->Capabilities = 1314 cpu_to_le32(server->vals->req_capabilities); 1315 if (tcon->ses->chan_max > 1) 1316 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1317 1318 memcpy(pneg_inbuf->Guid, server->client_guid, 1319 SMB2_CLIENT_GUID_SIZE); 1320 1321 if (tcon->ses->sign) 1322 pneg_inbuf->SecurityMode = 1323 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1324 else if (global_secflags & CIFSSEC_MAY_SIGN) 1325 pneg_inbuf->SecurityMode = 1326 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1327 else 1328 pneg_inbuf->SecurityMode = 0; 1329 1330 1331 if (strcmp(server->vals->version_string, 1332 SMB3ANY_VERSION_STRING) == 0) { 1333 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1334 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1335 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1336 pneg_inbuf->DialectCount = cpu_to_le16(3); 1337 /* SMB 2.1 not included so subtract one dialect from len */ 1338 inbuflen = sizeof(*pneg_inbuf) - 1339 (sizeof(pneg_inbuf->Dialects[0])); 1340 } else if (strcmp(server->vals->version_string, 1341 SMBDEFAULT_VERSION_STRING) == 0) { 1342 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1343 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1344 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1345 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1346 pneg_inbuf->DialectCount = cpu_to_le16(4); 1347 /* structure is big enough for 4 dialects */ 1348 inbuflen = sizeof(*pneg_inbuf); 1349 } else { 1350 /* otherwise specific dialect was requested */ 1351 pneg_inbuf->Dialects[0] = 1352 cpu_to_le16(server->vals->protocol_id); 1353 pneg_inbuf->DialectCount = cpu_to_le16(1); 1354 /* structure is big enough for 4 dialects, sending only 1 */ 1355 inbuflen = sizeof(*pneg_inbuf) - 1356 sizeof(pneg_inbuf->Dialects[0]) * 3; 1357 } 1358 1359 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1360 FSCTL_VALIDATE_NEGOTIATE_INFO, 1361 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, 1362 (char **)&pneg_rsp, &rsplen); 1363 if (rc == -EOPNOTSUPP) { 1364 /* 1365 * Old Windows versions or Netapp SMB server can return 1366 * not supported error. Client should accept it. 1367 */ 1368 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n"); 1369 rc = 0; 1370 goto out_free_inbuf; 1371 } else if (rc != 0) { 1372 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", 1373 rc); 1374 rc = -EIO; 1375 goto out_free_inbuf; 1376 } 1377 1378 rc = -EIO; 1379 if (rsplen != sizeof(*pneg_rsp)) { 1380 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n", 1381 rsplen); 1382 1383 /* relax check since Mac returns max bufsize allowed on ioctl */ 1384 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) 1385 goto out_free_rsp; 1386 } 1387 1388 /* check validate negotiate info response matches what we got earlier */ 1389 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect)) 1390 goto vneg_out; 1391 1392 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode)) 1393 goto vneg_out; 1394 1395 /* do not validate server guid because not saved at negprot time yet */ 1396 1397 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | 1398 SMB2_LARGE_FILES) != server->capabilities) 1399 goto vneg_out; 1400 1401 /* validate negotiate successful */ 1402 rc = 0; 1403 cifs_dbg(FYI, "validate negotiate info successful\n"); 1404 goto out_free_rsp; 1405 1406 vneg_out: 1407 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 1408 out_free_rsp: 1409 kfree(pneg_rsp); 1410 out_free_inbuf: 1411 kfree(pneg_inbuf); 1412 return rc; 1413 } 1414 1415 enum securityEnum 1416 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) 1417 { 1418 switch (requested) { 1419 case Kerberos: 1420 case RawNTLMSSP: 1421 return requested; 1422 case NTLMv2: 1423 return RawNTLMSSP; 1424 case Unspecified: 1425 if (server->sec_ntlmssp && 1426 (global_secflags & CIFSSEC_MAY_NTLMSSP)) 1427 return RawNTLMSSP; 1428 if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && 1429 (global_secflags & CIFSSEC_MAY_KRB5)) 1430 return Kerberos; 1431 fallthrough; 1432 default: 1433 return Unspecified; 1434 } 1435 } 1436 1437 struct SMB2_sess_data { 1438 unsigned int xid; 1439 struct cifs_ses *ses; 1440 struct TCP_Server_Info *server; 1441 struct nls_table *nls_cp; 1442 void (*func)(struct SMB2_sess_data *); 1443 int result; 1444 u64 previous_session; 1445 1446 /* we will send the SMB in three pieces: 1447 * a fixed length beginning part, an optional 1448 * SPNEGO blob (which can be zero length), and a 1449 * last part which will include the strings 1450 * and rest of bcc area. This allows us to avoid 1451 * a large buffer 17K allocation 1452 */ 1453 int buf0_type; 1454 struct kvec iov[2]; 1455 }; 1456 1457 static int 1458 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) 1459 { 1460 int rc; 1461 struct cifs_ses *ses = sess_data->ses; 1462 struct TCP_Server_Info *server = sess_data->server; 1463 struct smb2_sess_setup_req *req; 1464 unsigned int total_len; 1465 bool is_binding = false; 1466 1467 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, 1468 (void **) &req, 1469 &total_len); 1470 if (rc) 1471 return rc; 1472 1473 spin_lock(&ses->ses_lock); 1474 is_binding = (ses->ses_status == SES_GOOD); 1475 spin_unlock(&ses->ses_lock); 1476 1477 if (is_binding) { 1478 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1479 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1480 req->PreviousSessionId = 0; 1481 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; 1482 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid); 1483 } else { 1484 /* First session, not a reauthenticate */ 1485 req->hdr.SessionId = 0; 1486 /* 1487 * if reconnect, we need to send previous sess id 1488 * otherwise it is 0 1489 */ 1490 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); 1491 req->Flags = 0; /* MBZ */ 1492 cifs_dbg(FYI, "Fresh session. Previous: %llx\n", 1493 sess_data->previous_session); 1494 } 1495 1496 /* enough to enable echos and oplocks and one max size write */ 1497 if (server->credits >= server->max_credits) 1498 req->hdr.CreditRequest = cpu_to_le16(0); 1499 else 1500 req->hdr.CreditRequest = cpu_to_le16( 1501 min_t(int, server->max_credits - 1502 server->credits, 130)); 1503 1504 /* only one of SMB2 signing flags may be set in SMB2 request */ 1505 if (server->sign) 1506 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; 1507 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ 1508 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; 1509 else 1510 req->SecurityMode = 0; 1511 1512 #ifdef CONFIG_CIFS_DFS_UPCALL 1513 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); 1514 #else 1515 req->Capabilities = 0; 1516 #endif /* DFS_UPCALL */ 1517 1518 req->Channel = 0; /* MBZ */ 1519 1520 sess_data->iov[0].iov_base = (char *)req; 1521 /* 1 for pad */ 1522 sess_data->iov[0].iov_len = total_len - 1; 1523 /* 1524 * This variable will be used to clear the buffer 1525 * allocated above in case of any error in the calling function. 1526 */ 1527 sess_data->buf0_type = CIFS_SMALL_BUFFER; 1528 1529 return 0; 1530 } 1531 1532 static void 1533 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) 1534 { 1535 struct kvec *iov = sess_data->iov; 1536 1537 /* iov[1] is already freed by caller */ 1538 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) 1539 memzero_explicit(iov[0].iov_base, iov[0].iov_len); 1540 1541 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); 1542 sess_data->buf0_type = CIFS_NO_BUFFER; 1543 } 1544 1545 static int 1546 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) 1547 { 1548 int rc; 1549 struct smb_rqst rqst; 1550 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; 1551 struct kvec rsp_iov = { NULL, 0 }; 1552 1553 /* Testing shows that buffer offset must be at location of Buffer[0] */ 1554 req->SecurityBufferOffset = 1555 cpu_to_le16(sizeof(struct smb2_sess_setup_req)); 1556 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); 1557 1558 memset(&rqst, 0, sizeof(struct smb_rqst)); 1559 rqst.rq_iov = sess_data->iov; 1560 rqst.rq_nvec = 2; 1561 1562 /* BB add code to build os and lm fields */ 1563 rc = cifs_send_recv(sess_data->xid, sess_data->ses, 1564 sess_data->server, 1565 &rqst, 1566 &sess_data->buf0_type, 1567 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); 1568 cifs_small_buf_release(sess_data->iov[0].iov_base); 1569 if (rc == 0) 1570 sess_data->ses->expired_pwd = false; 1571 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 1572 if (sess_data->ses->expired_pwd == false) 1573 trace_smb3_key_expired(sess_data->server->hostname, 1574 sess_data->ses->user_name, 1575 sess_data->server->conn_id, 1576 &sess_data->server->dstaddr, rc); 1577 sess_data->ses->expired_pwd = true; 1578 } 1579 1580 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); 1581 1582 return rc; 1583 } 1584 1585 static int 1586 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) 1587 { 1588 int rc = 0; 1589 struct cifs_ses *ses = sess_data->ses; 1590 struct TCP_Server_Info *server = sess_data->server; 1591 1592 cifs_server_lock(server); 1593 if (server->ops->generate_signingkey) { 1594 rc = server->ops->generate_signingkey(ses, server); 1595 if (rc) { 1596 cifs_dbg(FYI, 1597 "SMB3 session key generation failed\n"); 1598 cifs_server_unlock(server); 1599 return rc; 1600 } 1601 } 1602 if (!server->session_estab) { 1603 server->sequence_number = 0x2; 1604 server->session_estab = true; 1605 } 1606 cifs_server_unlock(server); 1607 1608 cifs_dbg(FYI, "SMB2/3 session established successfully\n"); 1609 return rc; 1610 } 1611 1612 #ifdef CONFIG_CIFS_UPCALL 1613 static void 1614 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1615 { 1616 int rc; 1617 struct cifs_ses *ses = sess_data->ses; 1618 struct TCP_Server_Info *server = sess_data->server; 1619 struct cifs_spnego_msg *msg; 1620 struct key *spnego_key = NULL; 1621 struct smb2_sess_setup_rsp *rsp = NULL; 1622 bool is_binding = false; 1623 1624 rc = SMB2_sess_alloc_buffer(sess_data); 1625 if (rc) 1626 goto out; 1627 1628 spnego_key = cifs_get_spnego_key(ses, server); 1629 if (IS_ERR(spnego_key)) { 1630 rc = PTR_ERR(spnego_key); 1631 if (rc == -ENOKEY) 1632 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); 1633 spnego_key = NULL; 1634 goto out; 1635 } 1636 1637 msg = spnego_key->payload.data[0]; 1638 /* 1639 * check version field to make sure that cifs.upcall is 1640 * sending us a response in an expected form 1641 */ 1642 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { 1643 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n", 1644 CIFS_SPNEGO_UPCALL_VERSION, msg->version); 1645 rc = -EKEYREJECTED; 1646 goto out_put_spnego_key; 1647 } 1648 1649 spin_lock(&ses->ses_lock); 1650 is_binding = (ses->ses_status == SES_GOOD); 1651 spin_unlock(&ses->ses_lock); 1652 1653 /* keep session key if binding */ 1654 if (!is_binding) { 1655 kfree_sensitive(ses->auth_key.response); 1656 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, 1657 GFP_KERNEL); 1658 if (!ses->auth_key.response) { 1659 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n", 1660 msg->sesskey_len); 1661 rc = -ENOMEM; 1662 goto out_put_spnego_key; 1663 } 1664 ses->auth_key.len = msg->sesskey_len; 1665 } 1666 1667 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; 1668 sess_data->iov[1].iov_len = msg->secblob_len; 1669 1670 rc = SMB2_sess_sendreceive(sess_data); 1671 if (rc) 1672 goto out_put_spnego_key; 1673 1674 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1675 /* keep session id and flags if binding */ 1676 if (!is_binding) { 1677 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1678 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1679 } 1680 1681 rc = SMB2_sess_establish_session(sess_data); 1682 out_put_spnego_key: 1683 key_invalidate(spnego_key); 1684 key_put(spnego_key); 1685 if (rc) { 1686 kfree_sensitive(ses->auth_key.response); 1687 ses->auth_key.response = NULL; 1688 ses->auth_key.len = 0; 1689 } 1690 out: 1691 sess_data->result = rc; 1692 sess_data->func = NULL; 1693 SMB2_sess_free_buffer(sess_data); 1694 } 1695 #else 1696 static void 1697 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1698 { 1699 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); 1700 sess_data->result = -EOPNOTSUPP; 1701 sess_data->func = NULL; 1702 } 1703 #endif 1704 1705 static void 1706 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); 1707 1708 static void 1709 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) 1710 { 1711 int rc; 1712 struct cifs_ses *ses = sess_data->ses; 1713 struct TCP_Server_Info *server = sess_data->server; 1714 struct smb2_sess_setup_rsp *rsp = NULL; 1715 unsigned char *ntlmssp_blob = NULL; 1716 bool use_spnego = false; /* else use raw ntlmssp */ 1717 u16 blob_length = 0; 1718 bool is_binding = false; 1719 1720 /* 1721 * If memory allocation is successful, caller of this function 1722 * frees it. 1723 */ 1724 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); 1725 if (!ses->ntlmssp) { 1726 rc = -ENOMEM; 1727 goto out_err; 1728 } 1729 ses->ntlmssp->sesskey_per_smbsess = true; 1730 1731 rc = SMB2_sess_alloc_buffer(sess_data); 1732 if (rc) 1733 goto out_err; 1734 1735 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob, 1736 &blob_length, ses, server, 1737 sess_data->nls_cp); 1738 if (rc) 1739 goto out; 1740 1741 if (use_spnego) { 1742 /* BB eventually need to add this */ 1743 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1744 rc = -EOPNOTSUPP; 1745 goto out; 1746 } 1747 sess_data->iov[1].iov_base = ntlmssp_blob; 1748 sess_data->iov[1].iov_len = blob_length; 1749 1750 rc = SMB2_sess_sendreceive(sess_data); 1751 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1752 1753 /* If true, rc here is expected and not an error */ 1754 if (sess_data->buf0_type != CIFS_NO_BUFFER && 1755 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) 1756 rc = 0; 1757 1758 if (rc) 1759 goto out; 1760 1761 if (offsetof(struct smb2_sess_setup_rsp, Buffer) != 1762 le16_to_cpu(rsp->SecurityBufferOffset)) { 1763 cifs_dbg(VFS, "Invalid security buffer offset %d\n", 1764 le16_to_cpu(rsp->SecurityBufferOffset)); 1765 rc = -EIO; 1766 goto out; 1767 } 1768 rc = decode_ntlmssp_challenge(rsp->Buffer, 1769 le16_to_cpu(rsp->SecurityBufferLength), ses); 1770 if (rc) 1771 goto out; 1772 1773 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); 1774 1775 spin_lock(&ses->ses_lock); 1776 is_binding = (ses->ses_status == SES_GOOD); 1777 spin_unlock(&ses->ses_lock); 1778 1779 /* keep existing ses id and flags if binding */ 1780 if (!is_binding) { 1781 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1782 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1783 } 1784 1785 out: 1786 kfree_sensitive(ntlmssp_blob); 1787 SMB2_sess_free_buffer(sess_data); 1788 if (!rc) { 1789 sess_data->result = 0; 1790 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; 1791 return; 1792 } 1793 out_err: 1794 kfree_sensitive(ses->ntlmssp); 1795 ses->ntlmssp = NULL; 1796 sess_data->result = rc; 1797 sess_data->func = NULL; 1798 } 1799 1800 static void 1801 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) 1802 { 1803 int rc; 1804 struct cifs_ses *ses = sess_data->ses; 1805 struct TCP_Server_Info *server = sess_data->server; 1806 struct smb2_sess_setup_req *req; 1807 struct smb2_sess_setup_rsp *rsp = NULL; 1808 unsigned char *ntlmssp_blob = NULL; 1809 bool use_spnego = false; /* else use raw ntlmssp */ 1810 u16 blob_length = 0; 1811 bool is_binding = false; 1812 1813 rc = SMB2_sess_alloc_buffer(sess_data); 1814 if (rc) 1815 goto out; 1816 1817 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; 1818 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1819 1820 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, 1821 ses, server, 1822 sess_data->nls_cp); 1823 if (rc) { 1824 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); 1825 goto out; 1826 } 1827 1828 if (use_spnego) { 1829 /* BB eventually need to add this */ 1830 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1831 rc = -EOPNOTSUPP; 1832 goto out; 1833 } 1834 sess_data->iov[1].iov_base = ntlmssp_blob; 1835 sess_data->iov[1].iov_len = blob_length; 1836 1837 rc = SMB2_sess_sendreceive(sess_data); 1838 if (rc) 1839 goto out; 1840 1841 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1842 1843 spin_lock(&ses->ses_lock); 1844 is_binding = (ses->ses_status == SES_GOOD); 1845 spin_unlock(&ses->ses_lock); 1846 1847 /* keep existing ses id and flags if binding */ 1848 if (!is_binding) { 1849 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1850 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1851 } 1852 1853 rc = SMB2_sess_establish_session(sess_data); 1854 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS 1855 if (ses->server->dialect < SMB30_PROT_ID) { 1856 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__); 1857 /* 1858 * The session id is opaque in terms of endianness, so we can't 1859 * print it as a long long. we dump it as we got it on the wire 1860 */ 1861 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), 1862 &ses->Suid); 1863 cifs_dbg(VFS, "Session Key %*ph\n", 1864 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); 1865 cifs_dbg(VFS, "Signing Key %*ph\n", 1866 SMB3_SIGN_KEY_SIZE, ses->auth_key.response); 1867 } 1868 #endif 1869 out: 1870 kfree_sensitive(ntlmssp_blob); 1871 SMB2_sess_free_buffer(sess_data); 1872 kfree_sensitive(ses->ntlmssp); 1873 ses->ntlmssp = NULL; 1874 sess_data->result = rc; 1875 sess_data->func = NULL; 1876 } 1877 1878 static int 1879 SMB2_select_sec(struct SMB2_sess_data *sess_data) 1880 { 1881 int type; 1882 struct cifs_ses *ses = sess_data->ses; 1883 struct TCP_Server_Info *server = sess_data->server; 1884 1885 type = smb2_select_sectype(server, ses->sectype); 1886 cifs_dbg(FYI, "sess setup type %d\n", type); 1887 if (type == Unspecified) { 1888 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); 1889 return -EINVAL; 1890 } 1891 1892 switch (type) { 1893 case Kerberos: 1894 sess_data->func = SMB2_auth_kerberos; 1895 break; 1896 case RawNTLMSSP: 1897 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; 1898 break; 1899 default: 1900 cifs_dbg(VFS, "secType %d not supported!\n", type); 1901 return -EOPNOTSUPP; 1902 } 1903 1904 return 0; 1905 } 1906 1907 int 1908 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 1909 struct TCP_Server_Info *server, 1910 const struct nls_table *nls_cp) 1911 { 1912 int rc = 0; 1913 struct SMB2_sess_data *sess_data; 1914 1915 cifs_dbg(FYI, "Session Setup\n"); 1916 1917 if (!server) { 1918 WARN(1, "%s: server is NULL!\n", __func__); 1919 return -EIO; 1920 } 1921 1922 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); 1923 if (!sess_data) 1924 return -ENOMEM; 1925 1926 sess_data->xid = xid; 1927 sess_data->ses = ses; 1928 sess_data->server = server; 1929 sess_data->buf0_type = CIFS_NO_BUFFER; 1930 sess_data->nls_cp = (struct nls_table *) nls_cp; 1931 sess_data->previous_session = ses->Suid; 1932 1933 rc = SMB2_select_sec(sess_data); 1934 if (rc) 1935 goto out; 1936 1937 /* 1938 * Initialize the session hash with the server one. 1939 */ 1940 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash, 1941 SMB2_PREAUTH_HASH_SIZE); 1942 1943 while (sess_data->func) 1944 sess_data->func(sess_data); 1945 1946 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) 1947 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n"); 1948 rc = sess_data->result; 1949 out: 1950 kfree_sensitive(sess_data); 1951 return rc; 1952 } 1953 1954 int 1955 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) 1956 { 1957 struct smb_rqst rqst; 1958 struct smb2_logoff_req *req; /* response is also trivial struct */ 1959 int rc = 0; 1960 struct TCP_Server_Info *server; 1961 int flags = 0; 1962 unsigned int total_len; 1963 struct kvec iov[1]; 1964 struct kvec rsp_iov; 1965 int resp_buf_type; 1966 1967 cifs_dbg(FYI, "disconnect session %p\n", ses); 1968 1969 if (ses && (ses->server)) 1970 server = ses->server; 1971 else 1972 return -EIO; 1973 1974 /* no need to send SMB logoff if uid already closed due to reconnect */ 1975 spin_lock(&ses->chan_lock); 1976 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 1977 spin_unlock(&ses->chan_lock); 1978 goto smb2_session_already_dead; 1979 } 1980 spin_unlock(&ses->chan_lock); 1981 1982 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, 1983 (void **) &req, &total_len); 1984 if (rc) 1985 return rc; 1986 1987 /* since no tcon, smb2_init can not do this, so do here */ 1988 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1989 1990 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) 1991 flags |= CIFS_TRANSFORM_REQ; 1992 else if (server->sign) 1993 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1994 1995 flags |= CIFS_NO_RSP_BUF; 1996 1997 iov[0].iov_base = (char *)req; 1998 iov[0].iov_len = total_len; 1999 2000 memset(&rqst, 0, sizeof(struct smb_rqst)); 2001 rqst.rq_iov = iov; 2002 rqst.rq_nvec = 1; 2003 2004 rc = cifs_send_recv(xid, ses, ses->server, 2005 &rqst, &resp_buf_type, flags, &rsp_iov); 2006 cifs_small_buf_release(req); 2007 /* 2008 * No tcon so can't do 2009 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 2010 */ 2011 2012 smb2_session_already_dead: 2013 return rc; 2014 } 2015 2016 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) 2017 { 2018 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); 2019 } 2020 2021 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) 2022 2023 /* These are similar values to what Windows uses */ 2024 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) 2025 { 2026 tcon->max_chunks = 256; 2027 tcon->max_bytes_chunk = 1048576; 2028 tcon->max_bytes_copy = 16777216; 2029 } 2030 2031 int 2032 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, 2033 struct cifs_tcon *tcon, const struct nls_table *cp) 2034 { 2035 struct smb_rqst rqst; 2036 struct smb2_tree_connect_req *req; 2037 struct smb2_tree_connect_rsp *rsp = NULL; 2038 struct kvec iov[2]; 2039 struct kvec rsp_iov = { NULL, 0 }; 2040 int rc = 0; 2041 int resp_buftype; 2042 int unc_path_len; 2043 __le16 *unc_path = NULL; 2044 int flags = 0; 2045 unsigned int total_len; 2046 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2047 2048 cifs_dbg(FYI, "TCON\n"); 2049 2050 if (!server || !tree) 2051 return -EIO; 2052 2053 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 2054 if (unc_path == NULL) 2055 return -ENOMEM; 2056 2057 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp); 2058 if (unc_path_len <= 0) { 2059 kfree(unc_path); 2060 return -EINVAL; 2061 } 2062 unc_path_len *= 2; 2063 2064 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ 2065 tcon->tid = 0; 2066 atomic_set(&tcon->num_remote_opens, 0); 2067 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server, 2068 (void **) &req, &total_len); 2069 if (rc) { 2070 kfree(unc_path); 2071 return rc; 2072 } 2073 2074 if (smb3_encryption_required(tcon)) 2075 flags |= CIFS_TRANSFORM_REQ; 2076 2077 iov[0].iov_base = (char *)req; 2078 /* 1 for pad */ 2079 iov[0].iov_len = total_len - 1; 2080 2081 /* Testing shows that buffer offset must be at location of Buffer[0] */ 2082 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)); 2083 req->PathLength = cpu_to_le16(unc_path_len); 2084 iov[1].iov_base = unc_path; 2085 iov[1].iov_len = unc_path_len; 2086 2087 /* 2088 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 2089 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 2090 * (Samba servers don't always set the flag so also check if null user) 2091 */ 2092 if ((server->dialect == SMB311_PROT_ID) && 2093 !smb3_encryption_required(tcon) && 2094 !(ses->session_flags & 2095 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && 2096 ((ses->user_name != NULL) || (ses->sectype == Kerberos))) 2097 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 2098 2099 memset(&rqst, 0, sizeof(struct smb_rqst)); 2100 rqst.rq_iov = iov; 2101 rqst.rq_nvec = 2; 2102 2103 /* Need 64 for max size write so ask for more in case not there yet */ 2104 if (server->credits >= server->max_credits) 2105 req->hdr.CreditRequest = cpu_to_le16(0); 2106 else 2107 req->hdr.CreditRequest = cpu_to_le16( 2108 min_t(int, server->max_credits - 2109 server->credits, 64)); 2110 2111 rc = cifs_send_recv(xid, ses, server, 2112 &rqst, &resp_buftype, flags, &rsp_iov); 2113 cifs_small_buf_release(req); 2114 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; 2115 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); 2116 if ((rc != 0) || (rsp == NULL)) { 2117 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); 2118 tcon->need_reconnect = true; 2119 goto tcon_error_exit; 2120 } 2121 2122 switch (rsp->ShareType) { 2123 case SMB2_SHARE_TYPE_DISK: 2124 cifs_dbg(FYI, "connection to disk share\n"); 2125 break; 2126 case SMB2_SHARE_TYPE_PIPE: 2127 tcon->pipe = true; 2128 cifs_dbg(FYI, "connection to pipe share\n"); 2129 break; 2130 case SMB2_SHARE_TYPE_PRINT: 2131 tcon->print = true; 2132 cifs_dbg(FYI, "connection to printer\n"); 2133 break; 2134 default: 2135 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 2136 rc = -EOPNOTSUPP; 2137 goto tcon_error_exit; 2138 } 2139 2140 tcon->share_flags = le32_to_cpu(rsp->ShareFlags); 2141 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ 2142 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); 2143 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); 2144 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 2145 2146 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && 2147 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 2148 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n"); 2149 2150 if (tcon->seal && 2151 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 2152 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n"); 2153 2154 init_copy_chunk_defaults(tcon); 2155 if (server->ops->validate_negotiate) 2156 rc = server->ops->validate_negotiate(xid, tcon); 2157 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */ 2158 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT) 2159 server->nosharesock = true; 2160 tcon_exit: 2161 2162 free_rsp_buf(resp_buftype, rsp); 2163 kfree(unc_path); 2164 return rc; 2165 2166 tcon_error_exit: 2167 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) 2168 cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree); 2169 goto tcon_exit; 2170 } 2171 2172 int 2173 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) 2174 { 2175 struct smb_rqst rqst; 2176 struct smb2_tree_disconnect_req *req; /* response is trivial */ 2177 int rc = 0; 2178 struct cifs_ses *ses = tcon->ses; 2179 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2180 int flags = 0; 2181 unsigned int total_len; 2182 struct kvec iov[1]; 2183 struct kvec rsp_iov; 2184 int resp_buf_type; 2185 2186 cifs_dbg(FYI, "Tree Disconnect\n"); 2187 2188 if (!ses || !(ses->server)) 2189 return -EIO; 2190 2191 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name); 2192 spin_lock(&ses->chan_lock); 2193 if ((tcon->need_reconnect) || 2194 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) { 2195 spin_unlock(&ses->chan_lock); 2196 return 0; 2197 } 2198 spin_unlock(&ses->chan_lock); 2199 2200 invalidate_all_cached_dirs(tcon); 2201 2202 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server, 2203 (void **) &req, 2204 &total_len); 2205 if (rc) 2206 return rc; 2207 2208 if (smb3_encryption_required(tcon)) 2209 flags |= CIFS_TRANSFORM_REQ; 2210 2211 flags |= CIFS_NO_RSP_BUF; 2212 2213 iov[0].iov_base = (char *)req; 2214 iov[0].iov_len = total_len; 2215 2216 memset(&rqst, 0, sizeof(struct smb_rqst)); 2217 rqst.rq_iov = iov; 2218 rqst.rq_nvec = 1; 2219 2220 rc = cifs_send_recv(xid, ses, server, 2221 &rqst, &resp_buf_type, flags, &rsp_iov); 2222 cifs_small_buf_release(req); 2223 if (rc) { 2224 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); 2225 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc); 2226 } 2227 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid); 2228 2229 return rc; 2230 } 2231 2232 2233 static struct create_durable * 2234 create_durable_buf(void) 2235 { 2236 struct create_durable *buf; 2237 2238 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2239 if (!buf) 2240 return NULL; 2241 2242 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2243 (struct create_durable, Data)); 2244 buf->ccontext.DataLength = cpu_to_le32(16); 2245 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2246 (struct create_durable, Name)); 2247 buf->ccontext.NameLength = cpu_to_le16(4); 2248 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ 2249 buf->Name[0] = 'D'; 2250 buf->Name[1] = 'H'; 2251 buf->Name[2] = 'n'; 2252 buf->Name[3] = 'Q'; 2253 return buf; 2254 } 2255 2256 static struct create_durable * 2257 create_reconnect_durable_buf(struct cifs_fid *fid) 2258 { 2259 struct create_durable *buf; 2260 2261 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); 2262 if (!buf) 2263 return NULL; 2264 2265 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2266 (struct create_durable, Data)); 2267 buf->ccontext.DataLength = cpu_to_le32(16); 2268 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2269 (struct create_durable, Name)); 2270 buf->ccontext.NameLength = cpu_to_le16(4); 2271 buf->Data.Fid.PersistentFileId = fid->persistent_fid; 2272 buf->Data.Fid.VolatileFileId = fid->volatile_fid; 2273 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ 2274 buf->Name[0] = 'D'; 2275 buf->Name[1] = 'H'; 2276 buf->Name[2] = 'n'; 2277 buf->Name[3] = 'C'; 2278 return buf; 2279 } 2280 2281 static void 2282 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf) 2283 { 2284 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc; 2285 2286 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n", 2287 pdisk_id->DiskFileId, pdisk_id->VolumeId); 2288 buf->IndexNumber = pdisk_id->DiskFileId; 2289 } 2290 2291 static void 2292 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, 2293 struct create_posix_rsp *posix) 2294 { 2295 int sid_len; 2296 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset); 2297 u8 *end = beg + le32_to_cpu(cc->DataLength); 2298 u8 *sid; 2299 2300 memset(posix, 0, sizeof(*posix)); 2301 2302 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0)); 2303 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4)); 2304 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8)); 2305 2306 sid = beg + 12; 2307 sid_len = posix_info_sid_size(sid, end); 2308 if (sid_len < 0) { 2309 cifs_dbg(VFS, "bad owner sid in posix create response\n"); 2310 return; 2311 } 2312 memcpy(&posix->owner, sid, sid_len); 2313 2314 sid = sid + sid_len; 2315 sid_len = posix_info_sid_size(sid, end); 2316 if (sid_len < 0) { 2317 cifs_dbg(VFS, "bad group sid in posix create response\n"); 2318 return; 2319 } 2320 memcpy(&posix->group, sid, sid_len); 2321 2322 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n", 2323 posix->nlink, posix->mode, posix->reparse_tag); 2324 } 2325 2326 int smb2_parse_contexts(struct TCP_Server_Info *server, 2327 struct kvec *rsp_iov, 2328 __u16 *epoch, 2329 char *lease_key, __u8 *oplock, 2330 struct smb2_file_all_info *buf, 2331 struct create_posix_rsp *posix) 2332 { 2333 struct smb2_create_rsp *rsp = rsp_iov->iov_base; 2334 struct create_context *cc; 2335 size_t rem, off, len; 2336 size_t doff, dlen; 2337 size_t noff, nlen; 2338 char *name; 2339 static const char smb3_create_tag_posix[] = { 2340 0x93, 0xAD, 0x25, 0x50, 0x9C, 2341 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83, 2342 0xDE, 0x96, 0x8B, 0xCD, 0x7C 2343 }; 2344 2345 *oplock = 0; 2346 2347 off = le32_to_cpu(rsp->CreateContextsOffset); 2348 rem = le32_to_cpu(rsp->CreateContextsLength); 2349 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) 2350 return -EINVAL; 2351 cc = (struct create_context *)((u8 *)rsp + off); 2352 2353 /* Initialize inode number to 0 in case no valid data in qfid context */ 2354 if (buf) 2355 buf->IndexNumber = 0; 2356 2357 while (rem >= sizeof(*cc)) { 2358 doff = le16_to_cpu(cc->DataOffset); 2359 dlen = le32_to_cpu(cc->DataLength); 2360 if (check_add_overflow(doff, dlen, &len) || len > rem) 2361 return -EINVAL; 2362 2363 noff = le16_to_cpu(cc->NameOffset); 2364 nlen = le16_to_cpu(cc->NameLength); 2365 if (noff + nlen > doff) 2366 return -EINVAL; 2367 2368 name = (char *)cc + noff; 2369 switch (nlen) { 2370 case 4: 2371 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { 2372 *oplock = server->ops->parse_lease_buf(cc, epoch, 2373 lease_key); 2374 } else if (buf && 2375 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { 2376 parse_query_id_ctxt(cc, buf); 2377 } 2378 break; 2379 case 16: 2380 if (posix && !memcmp(name, smb3_create_tag_posix, 16)) 2381 parse_posix_ctxt(cc, buf, posix); 2382 break; 2383 default: 2384 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", 2385 __func__, nlen, dlen); 2386 if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) 2387 cifs_dump_mem("context data: ", cc, dlen); 2388 break; 2389 } 2390 2391 off = le32_to_cpu(cc->Next); 2392 if (!off) 2393 break; 2394 if (check_sub_overflow(rem, off, &rem)) 2395 return -EINVAL; 2396 cc = (struct create_context *)((u8 *)cc + off); 2397 } 2398 2399 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 2400 *oplock = rsp->OplockLevel; 2401 2402 return 0; 2403 } 2404 2405 static int 2406 add_lease_context(struct TCP_Server_Info *server, 2407 struct smb2_create_req *req, 2408 struct kvec *iov, 2409 unsigned int *num_iovec, 2410 u8 *lease_key, 2411 __u8 *oplock, 2412 u8 *parent_lease_key, 2413 __le32 flags) 2414 { 2415 unsigned int num = *num_iovec; 2416 2417 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock, 2418 parent_lease_key, flags); 2419 if (iov[num].iov_base == NULL) 2420 return -ENOMEM; 2421 iov[num].iov_len = server->vals->create_lease_size; 2422 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; 2423 *num_iovec = num + 1; 2424 return 0; 2425 } 2426 2427 static struct create_durable_v2 * 2428 create_durable_v2_buf(struct cifs_open_parms *oparms) 2429 { 2430 struct cifs_fid *pfid = oparms->fid; 2431 struct create_durable_v2 *buf; 2432 2433 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); 2434 if (!buf) 2435 return NULL; 2436 2437 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2438 (struct create_durable_v2, dcontext)); 2439 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); 2440 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2441 (struct create_durable_v2, Name)); 2442 buf->ccontext.NameLength = cpu_to_le16(4); 2443 2444 /* 2445 * NB: Handle timeout defaults to 0, which allows server to choose 2446 * (most servers default to 120 seconds) and most clients default to 0. 2447 * This can be overridden at mount ("handletimeout=") if the user wants 2448 * a different persistent (or resilient) handle timeout for all opens 2449 * on a particular SMB3 mount. 2450 */ 2451 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); 2452 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2453 2454 /* for replay, we should not overwrite the existing create guid */ 2455 if (!oparms->replay) { 2456 generate_random_uuid(buf->dcontext.CreateGuid); 2457 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 2458 } else 2459 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16); 2460 2461 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ 2462 buf->Name[0] = 'D'; 2463 buf->Name[1] = 'H'; 2464 buf->Name[2] = '2'; 2465 buf->Name[3] = 'Q'; 2466 return buf; 2467 } 2468 2469 static struct create_durable_handle_reconnect_v2 * 2470 create_reconnect_durable_v2_buf(struct cifs_fid *fid) 2471 { 2472 struct create_durable_handle_reconnect_v2 *buf; 2473 2474 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), 2475 GFP_KERNEL); 2476 if (!buf) 2477 return NULL; 2478 2479 buf->ccontext.DataOffset = 2480 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2481 dcontext)); 2482 buf->ccontext.DataLength = 2483 cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); 2484 buf->ccontext.NameOffset = 2485 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2486 Name)); 2487 buf->ccontext.NameLength = cpu_to_le16(4); 2488 2489 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; 2490 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; 2491 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2492 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); 2493 2494 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ 2495 buf->Name[0] = 'D'; 2496 buf->Name[1] = 'H'; 2497 buf->Name[2] = '2'; 2498 buf->Name[3] = 'C'; 2499 return buf; 2500 } 2501 2502 static int 2503 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, 2504 struct cifs_open_parms *oparms) 2505 { 2506 unsigned int num = *num_iovec; 2507 2508 iov[num].iov_base = create_durable_v2_buf(oparms); 2509 if (iov[num].iov_base == NULL) 2510 return -ENOMEM; 2511 iov[num].iov_len = sizeof(struct create_durable_v2); 2512 *num_iovec = num + 1; 2513 return 0; 2514 } 2515 2516 static int 2517 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, 2518 struct cifs_open_parms *oparms) 2519 { 2520 unsigned int num = *num_iovec; 2521 2522 /* indicate that we don't need to relock the file */ 2523 oparms->reconnect = false; 2524 2525 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); 2526 if (iov[num].iov_base == NULL) 2527 return -ENOMEM; 2528 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); 2529 *num_iovec = num + 1; 2530 return 0; 2531 } 2532 2533 static int 2534 add_durable_context(struct kvec *iov, unsigned int *num_iovec, 2535 struct cifs_open_parms *oparms, bool use_persistent) 2536 { 2537 unsigned int num = *num_iovec; 2538 2539 if (use_persistent) { 2540 if (oparms->reconnect) 2541 return add_durable_reconnect_v2_context(iov, num_iovec, 2542 oparms); 2543 else 2544 return add_durable_v2_context(iov, num_iovec, oparms); 2545 } 2546 2547 if (oparms->reconnect) { 2548 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); 2549 /* indicate that we don't need to relock the file */ 2550 oparms->reconnect = false; 2551 } else 2552 iov[num].iov_base = create_durable_buf(); 2553 if (iov[num].iov_base == NULL) 2554 return -ENOMEM; 2555 iov[num].iov_len = sizeof(struct create_durable); 2556 *num_iovec = num + 1; 2557 return 0; 2558 } 2559 2560 /* See MS-SMB2 2.2.13.2.7 */ 2561 static struct crt_twarp_ctxt * 2562 create_twarp_buf(__u64 timewarp) 2563 { 2564 struct crt_twarp_ctxt *buf; 2565 2566 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL); 2567 if (!buf) 2568 return NULL; 2569 2570 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2571 (struct crt_twarp_ctxt, Timestamp)); 2572 buf->ccontext.DataLength = cpu_to_le32(8); 2573 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2574 (struct crt_twarp_ctxt, Name)); 2575 buf->ccontext.NameLength = cpu_to_le16(4); 2576 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ 2577 buf->Name[0] = 'T'; 2578 buf->Name[1] = 'W'; 2579 buf->Name[2] = 'r'; 2580 buf->Name[3] = 'p'; 2581 buf->Timestamp = cpu_to_le64(timewarp); 2582 return buf; 2583 } 2584 2585 /* See MS-SMB2 2.2.13.2.7 */ 2586 static int 2587 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) 2588 { 2589 unsigned int num = *num_iovec; 2590 2591 iov[num].iov_base = create_twarp_buf(timewarp); 2592 if (iov[num].iov_base == NULL) 2593 return -ENOMEM; 2594 iov[num].iov_len = sizeof(struct crt_twarp_ctxt); 2595 *num_iovec = num + 1; 2596 return 0; 2597 } 2598 2599 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ 2600 static void setup_owner_group_sids(char *buf) 2601 { 2602 struct owner_group_sids *sids = (struct owner_group_sids *)buf; 2603 2604 /* Populate the user ownership fields S-1-5-88-1 */ 2605 sids->owner.Revision = 1; 2606 sids->owner.NumAuth = 3; 2607 sids->owner.Authority[5] = 5; 2608 sids->owner.SubAuthorities[0] = cpu_to_le32(88); 2609 sids->owner.SubAuthorities[1] = cpu_to_le32(1); 2610 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val); 2611 2612 /* Populate the group ownership fields S-1-5-88-2 */ 2613 sids->group.Revision = 1; 2614 sids->group.NumAuth = 3; 2615 sids->group.Authority[5] = 5; 2616 sids->group.SubAuthorities[0] = cpu_to_le32(88); 2617 sids->group.SubAuthorities[1] = cpu_to_le32(2); 2618 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val); 2619 2620 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val); 2621 } 2622 2623 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */ 2624 static struct crt_sd_ctxt * 2625 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) 2626 { 2627 struct crt_sd_ctxt *buf; 2628 __u8 *ptr, *aclptr; 2629 unsigned int acelen, acl_size, ace_count; 2630 unsigned int owner_offset = 0; 2631 unsigned int group_offset = 0; 2632 struct smb3_acl acl = {}; 2633 2634 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8); 2635 2636 if (set_owner) { 2637 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ 2638 *len += sizeof(struct owner_group_sids); 2639 } 2640 2641 buf = kzalloc(*len, GFP_KERNEL); 2642 if (buf == NULL) 2643 return buf; 2644 2645 ptr = (__u8 *)&buf[1]; 2646 if (set_owner) { 2647 /* offset fields are from beginning of security descriptor not of create context */ 2648 owner_offset = ptr - (__u8 *)&buf->sd; 2649 buf->sd.OffsetOwner = cpu_to_le32(owner_offset); 2650 group_offset = owner_offset + offsetof(struct owner_group_sids, group); 2651 buf->sd.OffsetGroup = cpu_to_le32(group_offset); 2652 2653 setup_owner_group_sids(ptr); 2654 ptr += sizeof(struct owner_group_sids); 2655 } else { 2656 buf->sd.OffsetOwner = 0; 2657 buf->sd.OffsetGroup = 0; 2658 } 2659 2660 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); 2661 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); 2662 buf->ccontext.NameLength = cpu_to_le16(4); 2663 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ 2664 buf->Name[0] = 'S'; 2665 buf->Name[1] = 'e'; 2666 buf->Name[2] = 'c'; 2667 buf->Name[3] = 'D'; 2668 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ 2669 2670 /* 2671 * ACL is "self relative" ie ACL is stored in contiguous block of memory 2672 * and "DP" ie the DACL is present 2673 */ 2674 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); 2675 2676 /* offset owner, group and Sbz1 and SACL are all zero */ 2677 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2678 /* Ship the ACL for now. we will copy it into buf later. */ 2679 aclptr = ptr; 2680 ptr += sizeof(struct smb3_acl); 2681 2682 /* create one ACE to hold the mode embedded in reserved special SID */ 2683 acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode); 2684 ptr += acelen; 2685 acl_size = acelen + sizeof(struct smb3_acl); 2686 ace_count = 1; 2687 2688 if (set_owner) { 2689 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ 2690 acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr); 2691 ptr += acelen; 2692 acl_size += acelen; 2693 ace_count += 1; 2694 } 2695 2696 /* and one more ACE to allow access for authenticated users */ 2697 acelen = setup_authusers_ACE((struct smb_ace *)ptr); 2698 ptr += acelen; 2699 acl_size += acelen; 2700 ace_count += 1; 2701 2702 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2703 acl.AclSize = cpu_to_le16(acl_size); 2704 acl.AceCount = cpu_to_le16(ace_count); 2705 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ 2706 memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2707 2708 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2709 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); 2710 2711 return buf; 2712 } 2713 2714 static int 2715 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner) 2716 { 2717 unsigned int num = *num_iovec; 2718 unsigned int len = 0; 2719 2720 iov[num].iov_base = create_sd_buf(mode, set_owner, &len); 2721 if (iov[num].iov_base == NULL) 2722 return -ENOMEM; 2723 iov[num].iov_len = len; 2724 *num_iovec = num + 1; 2725 return 0; 2726 } 2727 2728 static struct crt_query_id_ctxt * 2729 create_query_id_buf(void) 2730 { 2731 struct crt_query_id_ctxt *buf; 2732 2733 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL); 2734 if (!buf) 2735 return NULL; 2736 2737 buf->ccontext.DataOffset = cpu_to_le16(0); 2738 buf->ccontext.DataLength = cpu_to_le32(0); 2739 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2740 (struct crt_query_id_ctxt, Name)); 2741 buf->ccontext.NameLength = cpu_to_le16(4); 2742 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */ 2743 buf->Name[0] = 'Q'; 2744 buf->Name[1] = 'F'; 2745 buf->Name[2] = 'i'; 2746 buf->Name[3] = 'd'; 2747 return buf; 2748 } 2749 2750 /* See MS-SMB2 2.2.13.2.9 */ 2751 static int 2752 add_query_id_context(struct kvec *iov, unsigned int *num_iovec) 2753 { 2754 unsigned int num = *num_iovec; 2755 2756 iov[num].iov_base = create_query_id_buf(); 2757 if (iov[num].iov_base == NULL) 2758 return -ENOMEM; 2759 iov[num].iov_len = sizeof(struct crt_query_id_ctxt); 2760 *num_iovec = num + 1; 2761 return 0; 2762 } 2763 2764 static void add_ea_context(struct cifs_open_parms *oparms, 2765 struct kvec *rq_iov, unsigned int *num_iovs) 2766 { 2767 struct kvec *iov = oparms->ea_cctx; 2768 2769 if (iov && iov->iov_base && iov->iov_len) { 2770 rq_iov[(*num_iovs)++] = *iov; 2771 memset(iov, 0, sizeof(*iov)); 2772 } 2773 } 2774 2775 static int 2776 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, 2777 const char *treename, const __le16 *path) 2778 { 2779 int treename_len, path_len; 2780 struct nls_table *cp; 2781 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; 2782 2783 /* 2784 * skip leading "\\" 2785 */ 2786 treename_len = strlen(treename); 2787 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) 2788 return -EINVAL; 2789 2790 treename += 2; 2791 treename_len -= 2; 2792 2793 path_len = UniStrnlen((wchar_t *)path, PATH_MAX); 2794 2795 /* make room for one path separator only if @path isn't empty */ 2796 *out_len = treename_len + (path[0] ? 1 : 0) + path_len; 2797 2798 /* 2799 * final path needs to be 8-byte aligned as specified in 2800 * MS-SMB2 2.2.13 SMB2 CREATE Request. 2801 */ 2802 *out_size = round_up(*out_len * sizeof(__le16), 8); 2803 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); 2804 if (!*out_path) 2805 return -ENOMEM; 2806 2807 cp = load_nls_default(); 2808 cifs_strtoUTF16(*out_path, treename, treename_len, cp); 2809 2810 /* Do not append the separator if the path is empty */ 2811 if (path[0] != cpu_to_le16(0x0000)) { 2812 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep); 2813 UniStrcat((wchar_t *)*out_path, (wchar_t *)path); 2814 } 2815 2816 unload_nls(cp); 2817 2818 return 0; 2819 } 2820 2821 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, 2822 umode_t mode, struct cifs_tcon *tcon, 2823 const char *full_path, 2824 struct cifs_sb_info *cifs_sb) 2825 { 2826 struct smb_rqst rqst; 2827 struct smb2_create_req *req; 2828 struct smb2_create_rsp *rsp = NULL; 2829 struct cifs_ses *ses = tcon->ses; 2830 struct kvec iov[3]; /* make sure at least one for each open context */ 2831 struct kvec rsp_iov = {NULL, 0}; 2832 int resp_buftype; 2833 int uni_path_len; 2834 __le16 *copy_path = NULL; 2835 int copy_size; 2836 int rc = 0; 2837 unsigned int n_iov = 2; 2838 __u32 file_attributes = 0; 2839 char *pc_buf = NULL; 2840 int flags = 0; 2841 unsigned int total_len; 2842 __le16 *utf16_path = NULL; 2843 struct TCP_Server_Info *server; 2844 int retries = 0, cur_sleep = 1; 2845 2846 replay_again: 2847 /* reinitialize for possible replay */ 2848 flags = 0; 2849 n_iov = 2; 2850 server = cifs_pick_channel(ses); 2851 2852 cifs_dbg(FYI, "mkdir\n"); 2853 2854 /* resource #1: path allocation */ 2855 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 2856 if (!utf16_path) 2857 return -ENOMEM; 2858 2859 if (!ses || !server) { 2860 rc = -EIO; 2861 goto err_free_path; 2862 } 2863 2864 /* resource #2: request */ 2865 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 2866 (void **) &req, &total_len); 2867 if (rc) 2868 goto err_free_path; 2869 2870 2871 if (smb3_encryption_required(tcon)) 2872 flags |= CIFS_TRANSFORM_REQ; 2873 2874 req->ImpersonationLevel = IL_IMPERSONATION; 2875 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); 2876 /* File attributes ignored on open (used in create though) */ 2877 req->FileAttributes = cpu_to_le32(file_attributes); 2878 req->ShareAccess = FILE_SHARE_ALL_LE; 2879 req->CreateDisposition = cpu_to_le32(FILE_CREATE); 2880 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); 2881 2882 iov[0].iov_base = (char *)req; 2883 /* -1 since last byte is buf[0] which is sent below (path) */ 2884 iov[0].iov_len = total_len - 1; 2885 2886 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 2887 2888 /* [MS-SMB2] 2.2.13 NameOffset: 2889 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 2890 * the SMB2 header, the file name includes a prefix that will 2891 * be processed during DFS name normalization as specified in 2892 * section 3.3.5.9. Otherwise, the file name is relative to 2893 * the share that is identified by the TreeId in the SMB2 2894 * header. 2895 */ 2896 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 2897 int name_len; 2898 2899 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 2900 rc = alloc_path_with_tree_prefix(©_path, ©_size, 2901 &name_len, 2902 tcon->tree_name, utf16_path); 2903 if (rc) 2904 goto err_free_req; 2905 2906 req->NameLength = cpu_to_le16(name_len * 2); 2907 uni_path_len = copy_size; 2908 /* free before overwriting resource */ 2909 kfree(utf16_path); 2910 utf16_path = copy_path; 2911 } else { 2912 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; 2913 /* MUST set path len (NameLength) to 0 opening root of share */ 2914 req->NameLength = cpu_to_le16(uni_path_len - 2); 2915 if (uni_path_len % 8 != 0) { 2916 copy_size = roundup(uni_path_len, 8); 2917 copy_path = kzalloc(copy_size, GFP_KERNEL); 2918 if (!copy_path) { 2919 rc = -ENOMEM; 2920 goto err_free_req; 2921 } 2922 memcpy((char *)copy_path, (const char *)utf16_path, 2923 uni_path_len); 2924 uni_path_len = copy_size; 2925 /* free before overwriting resource */ 2926 kfree(utf16_path); 2927 utf16_path = copy_path; 2928 } 2929 } 2930 2931 iov[1].iov_len = uni_path_len; 2932 iov[1].iov_base = utf16_path; 2933 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; 2934 2935 if (tcon->posix_extensions) { 2936 /* resource #3: posix buf */ 2937 rc = add_posix_context(iov, &n_iov, mode); 2938 if (rc) 2939 goto err_free_req; 2940 req->CreateContextsOffset = cpu_to_le32( 2941 sizeof(struct smb2_create_req) + 2942 iov[1].iov_len); 2943 le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); 2944 pc_buf = iov[n_iov-1].iov_base; 2945 } 2946 2947 2948 memset(&rqst, 0, sizeof(struct smb_rqst)); 2949 rqst.rq_iov = iov; 2950 rqst.rq_nvec = n_iov; 2951 2952 /* no need to inc num_remote_opens because we close it just below */ 2953 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE, 2954 FILE_WRITE_ATTRIBUTES); 2955 2956 if (retries) 2957 smb2_set_replay(server, &rqst); 2958 2959 /* resource #4: response buffer */ 2960 rc = cifs_send_recv(xid, ses, server, 2961 &rqst, &resp_buftype, flags, &rsp_iov); 2962 if (rc) { 2963 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 2964 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, 2965 CREATE_NOT_FILE, 2966 FILE_WRITE_ATTRIBUTES, rc); 2967 goto err_free_rsp_buf; 2968 } 2969 2970 /* 2971 * Although unlikely to be possible for rsp to be null and rc not set, 2972 * adding check below is slightly safer long term (and quiets Coverity 2973 * warning) 2974 */ 2975 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 2976 if (rsp == NULL) { 2977 rc = -EIO; 2978 kfree(pc_buf); 2979 goto err_free_req; 2980 } 2981 2982 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 2983 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); 2984 2985 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); 2986 2987 /* Eventually save off posix specific response info and timestamps */ 2988 2989 err_free_rsp_buf: 2990 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 2991 kfree(pc_buf); 2992 err_free_req: 2993 cifs_small_buf_release(req); 2994 err_free_path: 2995 kfree(utf16_path); 2996 2997 if (is_replayable_error(rc) && 2998 smb2_should_replay(tcon, &retries, &cur_sleep)) 2999 goto replay_again; 3000 3001 return rc; 3002 } 3003 3004 int 3005 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3006 struct smb_rqst *rqst, __u8 *oplock, 3007 struct cifs_open_parms *oparms, __le16 *path) 3008 { 3009 struct smb2_create_req *req; 3010 unsigned int n_iov = 2; 3011 __u32 file_attributes = 0; 3012 int copy_size; 3013 int uni_path_len; 3014 unsigned int total_len; 3015 struct kvec *iov = rqst->rq_iov; 3016 __le16 *copy_path; 3017 int rc; 3018 3019 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 3020 (void **) &req, &total_len); 3021 if (rc) 3022 return rc; 3023 3024 iov[0].iov_base = (char *)req; 3025 /* -1 since last byte is buf[0] which is sent below (path) */ 3026 iov[0].iov_len = total_len - 1; 3027 3028 if (oparms->create_options & CREATE_OPTION_READONLY) 3029 file_attributes |= ATTR_READONLY; 3030 if (oparms->create_options & CREATE_OPTION_SPECIAL) 3031 file_attributes |= ATTR_SYSTEM; 3032 3033 req->ImpersonationLevel = IL_IMPERSONATION; 3034 req->DesiredAccess = cpu_to_le32(oparms->desired_access); 3035 /* File attributes ignored on open (used in create though) */ 3036 req->FileAttributes = cpu_to_le32(file_attributes); 3037 req->ShareAccess = FILE_SHARE_ALL_LE; 3038 3039 req->CreateDisposition = cpu_to_le32(oparms->disposition); 3040 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); 3041 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 3042 3043 /* [MS-SMB2] 2.2.13 NameOffset: 3044 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 3045 * the SMB2 header, the file name includes a prefix that will 3046 * be processed during DFS name normalization as specified in 3047 * section 3.3.5.9. Otherwise, the file name is relative to 3048 * the share that is identified by the TreeId in the SMB2 3049 * header. 3050 */ 3051 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 3052 int name_len; 3053 3054 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 3055 rc = alloc_path_with_tree_prefix(©_path, ©_size, 3056 &name_len, 3057 tcon->tree_name, path); 3058 if (rc) 3059 return rc; 3060 req->NameLength = cpu_to_le16(name_len * 2); 3061 uni_path_len = copy_size; 3062 path = copy_path; 3063 } else { 3064 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; 3065 /* MUST set path len (NameLength) to 0 opening root of share */ 3066 req->NameLength = cpu_to_le16(uni_path_len - 2); 3067 copy_size = round_up(uni_path_len, 8); 3068 copy_path = kzalloc(copy_size, GFP_KERNEL); 3069 if (!copy_path) 3070 return -ENOMEM; 3071 memcpy((char *)copy_path, (const char *)path, 3072 uni_path_len); 3073 uni_path_len = copy_size; 3074 path = copy_path; 3075 } 3076 3077 iov[1].iov_len = uni_path_len; 3078 iov[1].iov_base = path; 3079 3080 if ((!server->oplocks) || (tcon->no_lease)) 3081 *oplock = SMB2_OPLOCK_LEVEL_NONE; 3082 3083 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 3084 *oplock == SMB2_OPLOCK_LEVEL_NONE) 3085 req->RequestedOplockLevel = *oplock; 3086 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && 3087 (oparms->create_options & CREATE_NOT_FILE)) 3088 req->RequestedOplockLevel = *oplock; /* no srv lease support */ 3089 else { 3090 rc = add_lease_context(server, req, iov, &n_iov, 3091 oparms->fid->lease_key, oplock, 3092 oparms->fid->parent_lease_key, 3093 oparms->lease_flags); 3094 if (rc) 3095 return rc; 3096 } 3097 3098 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { 3099 rc = add_durable_context(iov, &n_iov, oparms, 3100 tcon->use_persistent); 3101 if (rc) 3102 return rc; 3103 } 3104 3105 if (tcon->posix_extensions) { 3106 rc = add_posix_context(iov, &n_iov, oparms->mode); 3107 if (rc) 3108 return rc; 3109 } 3110 3111 if (tcon->snapshot_time) { 3112 cifs_dbg(FYI, "adding snapshot context\n"); 3113 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); 3114 if (rc) 3115 return rc; 3116 } 3117 3118 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { 3119 bool set_mode; 3120 bool set_owner; 3121 3122 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) && 3123 (oparms->mode != ACL_NO_MODE)) 3124 set_mode = true; 3125 else { 3126 set_mode = false; 3127 oparms->mode = ACL_NO_MODE; 3128 } 3129 3130 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 3131 set_owner = true; 3132 else 3133 set_owner = false; 3134 3135 if (set_owner | set_mode) { 3136 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); 3137 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner); 3138 if (rc) 3139 return rc; 3140 } 3141 } 3142 3143 add_query_id_context(iov, &n_iov); 3144 add_ea_context(oparms, iov, &n_iov); 3145 3146 if (n_iov > 2) { 3147 /* 3148 * We have create contexts behind iov[1] (the file 3149 * name), point at them from the main create request 3150 */ 3151 req->CreateContextsOffset = cpu_to_le32( 3152 sizeof(struct smb2_create_req) + 3153 iov[1].iov_len); 3154 req->CreateContextsLength = 0; 3155 3156 for (unsigned int i = 2; i < (n_iov-1); i++) { 3157 struct kvec *v = &iov[i]; 3158 size_t len = v->iov_len; 3159 struct create_context *cctx = 3160 (struct create_context *)v->iov_base; 3161 3162 cctx->Next = cpu_to_le32(len); 3163 le32_add_cpu(&req->CreateContextsLength, len); 3164 } 3165 le32_add_cpu(&req->CreateContextsLength, 3166 iov[n_iov-1].iov_len); 3167 } 3168 3169 rqst->rq_nvec = n_iov; 3170 return 0; 3171 } 3172 3173 /* rq_iov[0] is the request and is released by cifs_small_buf_release(). 3174 * All other vectors are freed by kfree(). 3175 */ 3176 void 3177 SMB2_open_free(struct smb_rqst *rqst) 3178 { 3179 int i; 3180 3181 if (rqst && rqst->rq_iov) { 3182 cifs_small_buf_release(rqst->rq_iov[0].iov_base); 3183 for (i = 1; i < rqst->rq_nvec; i++) 3184 if (rqst->rq_iov[i].iov_base != smb2_padding) 3185 kfree(rqst->rq_iov[i].iov_base); 3186 } 3187 } 3188 3189 int 3190 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, 3191 __u8 *oplock, struct smb2_file_all_info *buf, 3192 struct create_posix_rsp *posix, 3193 struct kvec *err_iov, int *buftype) 3194 { 3195 struct smb_rqst rqst; 3196 struct smb2_create_rsp *rsp = NULL; 3197 struct cifs_tcon *tcon = oparms->tcon; 3198 struct cifs_ses *ses = tcon->ses; 3199 struct TCP_Server_Info *server; 3200 struct kvec iov[SMB2_CREATE_IOV_SIZE]; 3201 struct kvec rsp_iov = {NULL, 0}; 3202 int resp_buftype = CIFS_NO_BUFFER; 3203 int rc = 0; 3204 int flags = 0; 3205 int retries = 0, cur_sleep = 1; 3206 3207 replay_again: 3208 /* reinitialize for possible replay */ 3209 flags = 0; 3210 server = cifs_pick_channel(ses); 3211 oparms->replay = !!(retries); 3212 3213 cifs_dbg(FYI, "create/open\n"); 3214 if (!ses || !server) 3215 return -EIO; 3216 3217 if (smb3_encryption_required(tcon)) 3218 flags |= CIFS_TRANSFORM_REQ; 3219 3220 memset(&rqst, 0, sizeof(struct smb_rqst)); 3221 memset(&iov, 0, sizeof(iov)); 3222 rqst.rq_iov = iov; 3223 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; 3224 3225 rc = SMB2_open_init(tcon, server, 3226 &rqst, oplock, oparms, path); 3227 if (rc) 3228 goto creat_exit; 3229 3230 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path, 3231 oparms->create_options, oparms->desired_access); 3232 3233 if (retries) 3234 smb2_set_replay(server, &rqst); 3235 3236 rc = cifs_send_recv(xid, ses, server, 3237 &rqst, &resp_buftype, flags, 3238 &rsp_iov); 3239 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 3240 3241 if (rc != 0) { 3242 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 3243 if (err_iov && rsp) { 3244 *err_iov = rsp_iov; 3245 *buftype = resp_buftype; 3246 resp_buftype = CIFS_NO_BUFFER; 3247 rsp = NULL; 3248 } 3249 trace_smb3_open_err(xid, tcon->tid, ses->Suid, 3250 oparms->create_options, oparms->desired_access, rc); 3251 if (rc == -EREMCHG) { 3252 pr_warn_once("server share %s deleted\n", 3253 tcon->tree_name); 3254 tcon->need_reconnect = true; 3255 } 3256 goto creat_exit; 3257 } else if (rsp == NULL) /* unlikely to happen, but safer to check */ 3258 goto creat_exit; 3259 else 3260 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 3261 oparms->create_options, oparms->desired_access); 3262 3263 atomic_inc(&tcon->num_remote_opens); 3264 oparms->fid->persistent_fid = rsp->PersistentFileId; 3265 oparms->fid->volatile_fid = rsp->VolatileFileId; 3266 oparms->fid->access = oparms->desired_access; 3267 #ifdef CONFIG_CIFS_DEBUG2 3268 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); 3269 #endif /* CIFS_DEBUG2 */ 3270 3271 if (buf) { 3272 buf->CreationTime = rsp->CreationTime; 3273 buf->LastAccessTime = rsp->LastAccessTime; 3274 buf->LastWriteTime = rsp->LastWriteTime; 3275 buf->ChangeTime = rsp->ChangeTime; 3276 buf->AllocationSize = rsp->AllocationSize; 3277 buf->EndOfFile = rsp->EndofFile; 3278 buf->Attributes = rsp->FileAttributes; 3279 buf->NumberOfLinks = cpu_to_le32(1); 3280 buf->DeletePending = 0; 3281 } 3282 3283 3284 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, 3285 oparms->fid->lease_key, oplock, buf, posix); 3286 creat_exit: 3287 SMB2_open_free(&rqst); 3288 free_rsp_buf(resp_buftype, rsp); 3289 3290 if (is_replayable_error(rc) && 3291 smb2_should_replay(tcon, &retries, &cur_sleep)) 3292 goto replay_again; 3293 3294 return rc; 3295 } 3296 3297 int 3298 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3299 struct smb_rqst *rqst, 3300 u64 persistent_fid, u64 volatile_fid, u32 opcode, 3301 char *in_data, u32 indatalen, 3302 __u32 max_response_size) 3303 { 3304 struct smb2_ioctl_req *req; 3305 struct kvec *iov = rqst->rq_iov; 3306 unsigned int total_len; 3307 int rc; 3308 char *in_data_buf; 3309 3310 rc = smb2_ioctl_req_init(opcode, tcon, server, 3311 (void **) &req, &total_len); 3312 if (rc) 3313 return rc; 3314 3315 if (indatalen) { 3316 /* 3317 * indatalen is usually small at a couple of bytes max, so 3318 * just allocate through generic pool 3319 */ 3320 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS); 3321 if (!in_data_buf) { 3322 cifs_small_buf_release(req); 3323 return -ENOMEM; 3324 } 3325 } 3326 3327 req->CtlCode = cpu_to_le32(opcode); 3328 req->PersistentFileId = persistent_fid; 3329 req->VolatileFileId = volatile_fid; 3330 3331 iov[0].iov_base = (char *)req; 3332 /* 3333 * If no input data, the size of ioctl struct in 3334 * protocol spec still includes a 1 byte data buffer, 3335 * but if input data passed to ioctl, we do not 3336 * want to double count this, so we do not send 3337 * the dummy one byte of data in iovec[0] if sending 3338 * input data (in iovec[1]). 3339 */ 3340 if (indatalen) { 3341 req->InputCount = cpu_to_le32(indatalen); 3342 /* do not set InputOffset if no input data */ 3343 req->InputOffset = 3344 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); 3345 rqst->rq_nvec = 2; 3346 iov[0].iov_len = total_len - 1; 3347 iov[1].iov_base = in_data_buf; 3348 iov[1].iov_len = indatalen; 3349 } else { 3350 rqst->rq_nvec = 1; 3351 iov[0].iov_len = total_len; 3352 } 3353 3354 req->OutputOffset = 0; 3355 req->OutputCount = 0; /* MBZ */ 3356 3357 /* 3358 * In most cases max_response_size is set to 16K (CIFSMaxBufSize) 3359 * We Could increase default MaxOutputResponse, but that could require 3360 * more credits. Windows typically sets this smaller, but for some 3361 * ioctls it may be useful to allow server to send more. No point 3362 * limiting what the server can send as long as fits in one credit 3363 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want 3364 * to increase this limit up in the future. 3365 * Note that for snapshot queries that servers like Azure expect that 3366 * the first query be minimal size (and just used to get the number/size 3367 * of previous versions) so response size must be specified as EXACTLY 3368 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple 3369 * of eight bytes. Currently that is the only case where we set max 3370 * response size smaller. 3371 */ 3372 req->MaxOutputResponse = cpu_to_le32(max_response_size); 3373 req->hdr.CreditCharge = 3374 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), 3375 SMB2_MAX_BUFFER_SIZE)); 3376 /* always an FSCTL (for now) */ 3377 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 3378 3379 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ 3380 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) 3381 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 3382 3383 return 0; 3384 } 3385 3386 void 3387 SMB2_ioctl_free(struct smb_rqst *rqst) 3388 { 3389 int i; 3390 3391 if (rqst && rqst->rq_iov) { 3392 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3393 for (i = 1; i < rqst->rq_nvec; i++) 3394 if (rqst->rq_iov[i].iov_base != smb2_padding) 3395 kfree(rqst->rq_iov[i].iov_base); 3396 } 3397 } 3398 3399 3400 /* 3401 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 3402 */ 3403 int 3404 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 3405 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, 3406 u32 max_out_data_len, char **out_data, 3407 u32 *plen /* returned data len */) 3408 { 3409 struct smb_rqst rqst; 3410 struct smb2_ioctl_rsp *rsp = NULL; 3411 struct cifs_ses *ses; 3412 struct TCP_Server_Info *server; 3413 struct kvec iov[SMB2_IOCTL_IOV_SIZE]; 3414 struct kvec rsp_iov = {NULL, 0}; 3415 int resp_buftype = CIFS_NO_BUFFER; 3416 int rc = 0; 3417 int flags = 0; 3418 int retries = 0, cur_sleep = 1; 3419 3420 if (!tcon) 3421 return -EIO; 3422 3423 ses = tcon->ses; 3424 if (!ses) 3425 return -EIO; 3426 3427 replay_again: 3428 /* reinitialize for possible replay */ 3429 flags = 0; 3430 server = cifs_pick_channel(ses); 3431 3432 if (!server) 3433 return -EIO; 3434 3435 cifs_dbg(FYI, "SMB2 IOCTL\n"); 3436 3437 if (out_data != NULL) 3438 *out_data = NULL; 3439 3440 /* zero out returned data len, in case of error */ 3441 if (plen) 3442 *plen = 0; 3443 3444 if (smb3_encryption_required(tcon)) 3445 flags |= CIFS_TRANSFORM_REQ; 3446 3447 memset(&rqst, 0, sizeof(struct smb_rqst)); 3448 memset(&iov, 0, sizeof(iov)); 3449 rqst.rq_iov = iov; 3450 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 3451 3452 rc = SMB2_ioctl_init(tcon, server, 3453 &rqst, persistent_fid, volatile_fid, opcode, 3454 in_data, indatalen, max_out_data_len); 3455 if (rc) 3456 goto ioctl_exit; 3457 3458 if (retries) 3459 smb2_set_replay(server, &rqst); 3460 3461 rc = cifs_send_recv(xid, ses, server, 3462 &rqst, &resp_buftype, flags, 3463 &rsp_iov); 3464 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; 3465 3466 if (rc != 0) 3467 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, 3468 ses->Suid, 0, opcode, rc); 3469 3470 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) { 3471 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3472 goto ioctl_exit; 3473 } else if (rc == -EINVAL) { 3474 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 3475 (opcode != FSCTL_SRV_COPYCHUNK)) { 3476 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3477 goto ioctl_exit; 3478 } 3479 } else if (rc == -E2BIG) { 3480 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) { 3481 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3482 goto ioctl_exit; 3483 } 3484 } 3485 3486 /* check if caller wants to look at return data or just return rc */ 3487 if ((plen == NULL) || (out_data == NULL)) 3488 goto ioctl_exit; 3489 3490 /* 3491 * Although unlikely to be possible for rsp to be null and rc not set, 3492 * adding check below is slightly safer long term (and quiets Coverity 3493 * warning) 3494 */ 3495 if (rsp == NULL) { 3496 rc = -EIO; 3497 goto ioctl_exit; 3498 } 3499 3500 *plen = le32_to_cpu(rsp->OutputCount); 3501 3502 /* We check for obvious errors in the output buffer length and offset */ 3503 if (*plen == 0) 3504 goto ioctl_exit; /* server returned no data */ 3505 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { 3506 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); 3507 *plen = 0; 3508 rc = -EIO; 3509 goto ioctl_exit; 3510 } 3511 3512 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { 3513 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, 3514 le32_to_cpu(rsp->OutputOffset)); 3515 *plen = 0; 3516 rc = -EIO; 3517 goto ioctl_exit; 3518 } 3519 3520 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), 3521 *plen, GFP_KERNEL); 3522 if (*out_data == NULL) { 3523 rc = -ENOMEM; 3524 goto ioctl_exit; 3525 } 3526 3527 ioctl_exit: 3528 SMB2_ioctl_free(&rqst); 3529 free_rsp_buf(resp_buftype, rsp); 3530 3531 if (is_replayable_error(rc) && 3532 smb2_should_replay(tcon, &retries, &cur_sleep)) 3533 goto replay_again; 3534 3535 return rc; 3536 } 3537 3538 /* 3539 * Individual callers to ioctl worker function follow 3540 */ 3541 3542 int 3543 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, 3544 u64 persistent_fid, u64 volatile_fid) 3545 { 3546 int rc; 3547 struct compress_ioctl fsctl_input; 3548 char *ret_data = NULL; 3549 3550 fsctl_input.CompressionState = 3551 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); 3552 3553 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 3554 FSCTL_SET_COMPRESSION, 3555 (char *)&fsctl_input /* data input */, 3556 2 /* in data len */, CIFSMaxBufSize /* max out data */, 3557 &ret_data /* out data */, NULL); 3558 3559 cifs_dbg(FYI, "set compression rc %d\n", rc); 3560 3561 return rc; 3562 } 3563 3564 int 3565 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3566 struct smb_rqst *rqst, 3567 u64 persistent_fid, u64 volatile_fid, bool query_attrs) 3568 { 3569 struct smb2_close_req *req; 3570 struct kvec *iov = rqst->rq_iov; 3571 unsigned int total_len; 3572 int rc; 3573 3574 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server, 3575 (void **) &req, &total_len); 3576 if (rc) 3577 return rc; 3578 3579 req->PersistentFileId = persistent_fid; 3580 req->VolatileFileId = volatile_fid; 3581 if (query_attrs) 3582 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; 3583 else 3584 req->Flags = 0; 3585 iov[0].iov_base = (char *)req; 3586 iov[0].iov_len = total_len; 3587 3588 return 0; 3589 } 3590 3591 void 3592 SMB2_close_free(struct smb_rqst *rqst) 3593 { 3594 if (rqst && rqst->rq_iov) 3595 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3596 } 3597 3598 int 3599 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3600 u64 persistent_fid, u64 volatile_fid, 3601 struct smb2_file_network_open_info *pbuf) 3602 { 3603 struct smb_rqst rqst; 3604 struct smb2_close_rsp *rsp = NULL; 3605 struct cifs_ses *ses = tcon->ses; 3606 struct TCP_Server_Info *server; 3607 struct kvec iov[1]; 3608 struct kvec rsp_iov; 3609 int resp_buftype = CIFS_NO_BUFFER; 3610 int rc = 0; 3611 int flags = 0; 3612 bool query_attrs = false; 3613 int retries = 0, cur_sleep = 1; 3614 3615 replay_again: 3616 /* reinitialize for possible replay */ 3617 flags = 0; 3618 query_attrs = false; 3619 server = cifs_pick_channel(ses); 3620 3621 cifs_dbg(FYI, "Close\n"); 3622 3623 if (!ses || !server) 3624 return -EIO; 3625 3626 if (smb3_encryption_required(tcon)) 3627 flags |= CIFS_TRANSFORM_REQ; 3628 3629 memset(&rqst, 0, sizeof(struct smb_rqst)); 3630 memset(&iov, 0, sizeof(iov)); 3631 rqst.rq_iov = iov; 3632 rqst.rq_nvec = 1; 3633 3634 /* check if need to ask server to return timestamps in close response */ 3635 if (pbuf) 3636 query_attrs = true; 3637 3638 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid); 3639 rc = SMB2_close_init(tcon, server, 3640 &rqst, persistent_fid, volatile_fid, 3641 query_attrs); 3642 if (rc) 3643 goto close_exit; 3644 3645 if (retries) 3646 smb2_set_replay(server, &rqst); 3647 3648 rc = cifs_send_recv(xid, ses, server, 3649 &rqst, &resp_buftype, flags, &rsp_iov); 3650 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; 3651 3652 if (rc != 0) { 3653 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); 3654 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, 3655 rc); 3656 goto close_exit; 3657 } else { 3658 trace_smb3_close_done(xid, persistent_fid, tcon->tid, 3659 ses->Suid); 3660 if (pbuf) 3661 memcpy(&pbuf->network_open_info, 3662 &rsp->network_open_info, 3663 sizeof(pbuf->network_open_info)); 3664 atomic_dec(&tcon->num_remote_opens); 3665 } 3666 3667 close_exit: 3668 SMB2_close_free(&rqst); 3669 free_rsp_buf(resp_buftype, rsp); 3670 3671 /* retry close in a worker thread if this one is interrupted */ 3672 if (is_interrupt_error(rc)) { 3673 int tmp_rc; 3674 3675 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, 3676 volatile_fid); 3677 if (tmp_rc) 3678 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", 3679 persistent_fid, tmp_rc); 3680 } 3681 3682 if (is_replayable_error(rc) && 3683 smb2_should_replay(tcon, &retries, &cur_sleep)) 3684 goto replay_again; 3685 3686 return rc; 3687 } 3688 3689 int 3690 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3691 u64 persistent_fid, u64 volatile_fid) 3692 { 3693 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL); 3694 } 3695 3696 int 3697 smb2_validate_iov(unsigned int offset, unsigned int buffer_length, 3698 struct kvec *iov, unsigned int min_buf_size) 3699 { 3700 unsigned int smb_len = iov->iov_len; 3701 char *end_of_smb = smb_len + (char *)iov->iov_base; 3702 char *begin_of_buf = offset + (char *)iov->iov_base; 3703 char *end_of_buf = begin_of_buf + buffer_length; 3704 3705 3706 if (buffer_length < min_buf_size) { 3707 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", 3708 buffer_length, min_buf_size); 3709 return -EINVAL; 3710 } 3711 3712 /* check if beyond RFC1001 maximum length */ 3713 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { 3714 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", 3715 buffer_length, smb_len); 3716 return -EINVAL; 3717 } 3718 3719 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { 3720 cifs_dbg(VFS, "Invalid server response, bad offset to data\n"); 3721 return -EINVAL; 3722 } 3723 3724 return 0; 3725 } 3726 3727 /* 3728 * If SMB buffer fields are valid, copy into temporary buffer to hold result. 3729 * Caller must free buffer. 3730 */ 3731 int 3732 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, 3733 struct kvec *iov, unsigned int minbufsize, 3734 char *data) 3735 { 3736 char *begin_of_buf = offset + (char *)iov->iov_base; 3737 int rc; 3738 3739 if (!data) 3740 return -EINVAL; 3741 3742 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); 3743 if (rc) 3744 return rc; 3745 3746 memcpy(data, begin_of_buf, minbufsize); 3747 3748 return 0; 3749 } 3750 3751 int 3752 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3753 struct smb_rqst *rqst, 3754 u64 persistent_fid, u64 volatile_fid, 3755 u8 info_class, u8 info_type, u32 additional_info, 3756 size_t output_len, size_t input_len, void *input) 3757 { 3758 struct smb2_query_info_req *req; 3759 struct kvec *iov = rqst->rq_iov; 3760 unsigned int total_len; 3761 size_t len; 3762 int rc; 3763 3764 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) || 3765 len > CIFSMaxBufSize)) 3766 return -EINVAL; 3767 3768 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 3769 (void **) &req, &total_len); 3770 if (rc) 3771 return rc; 3772 3773 req->InfoType = info_type; 3774 req->FileInfoClass = info_class; 3775 req->PersistentFileId = persistent_fid; 3776 req->VolatileFileId = volatile_fid; 3777 req->AdditionalInformation = cpu_to_le32(additional_info); 3778 3779 req->OutputBufferLength = cpu_to_le32(output_len); 3780 if (input_len) { 3781 req->InputBufferLength = cpu_to_le32(input_len); 3782 /* total_len for smb query request never close to le16 max */ 3783 req->InputBufferOffset = cpu_to_le16(total_len - 1); 3784 memcpy(req->Buffer, input, input_len); 3785 } 3786 3787 iov[0].iov_base = (char *)req; 3788 /* 1 for Buffer */ 3789 iov[0].iov_len = len; 3790 return 0; 3791 } 3792 3793 void 3794 SMB2_query_info_free(struct smb_rqst *rqst) 3795 { 3796 if (rqst && rqst->rq_iov) 3797 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3798 } 3799 3800 static int 3801 query_info(const unsigned int xid, struct cifs_tcon *tcon, 3802 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, 3803 u32 additional_info, size_t output_len, size_t min_len, void **data, 3804 u32 *dlen) 3805 { 3806 struct smb_rqst rqst; 3807 struct smb2_query_info_rsp *rsp = NULL; 3808 struct kvec iov[1]; 3809 struct kvec rsp_iov; 3810 int rc = 0; 3811 int resp_buftype = CIFS_NO_BUFFER; 3812 struct cifs_ses *ses = tcon->ses; 3813 struct TCP_Server_Info *server; 3814 int flags = 0; 3815 bool allocated = false; 3816 int retries = 0, cur_sleep = 1; 3817 3818 cifs_dbg(FYI, "Query Info\n"); 3819 3820 if (!ses) 3821 return -EIO; 3822 3823 replay_again: 3824 /* reinitialize for possible replay */ 3825 flags = 0; 3826 allocated = false; 3827 server = cifs_pick_channel(ses); 3828 3829 if (!server) 3830 return -EIO; 3831 3832 if (smb3_encryption_required(tcon)) 3833 flags |= CIFS_TRANSFORM_REQ; 3834 3835 memset(&rqst, 0, sizeof(struct smb_rqst)); 3836 memset(&iov, 0, sizeof(iov)); 3837 rqst.rq_iov = iov; 3838 rqst.rq_nvec = 1; 3839 3840 rc = SMB2_query_info_init(tcon, server, 3841 &rqst, persistent_fid, volatile_fid, 3842 info_class, info_type, additional_info, 3843 output_len, 0, NULL); 3844 if (rc) 3845 goto qinf_exit; 3846 3847 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, 3848 ses->Suid, info_class, (__u32)info_type); 3849 3850 if (retries) 3851 smb2_set_replay(server, &rqst); 3852 3853 rc = cifs_send_recv(xid, ses, server, 3854 &rqst, &resp_buftype, flags, &rsp_iov); 3855 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 3856 3857 if (rc) { 3858 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 3859 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, 3860 ses->Suid, info_class, (__u32)info_type, rc); 3861 goto qinf_exit; 3862 } 3863 3864 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, 3865 ses->Suid, info_class, (__u32)info_type); 3866 3867 if (dlen) { 3868 *dlen = le32_to_cpu(rsp->OutputBufferLength); 3869 if (!*data) { 3870 *data = kmalloc(*dlen, GFP_KERNEL); 3871 if (!*data) { 3872 cifs_tcon_dbg(VFS, 3873 "Error %d allocating memory for acl\n", 3874 rc); 3875 *dlen = 0; 3876 rc = -ENOMEM; 3877 goto qinf_exit; 3878 } 3879 allocated = true; 3880 } 3881 } 3882 3883 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 3884 le32_to_cpu(rsp->OutputBufferLength), 3885 &rsp_iov, dlen ? *dlen : min_len, *data); 3886 if (rc && allocated) { 3887 kfree(*data); 3888 *data = NULL; 3889 *dlen = 0; 3890 } 3891 3892 qinf_exit: 3893 SMB2_query_info_free(&rqst); 3894 free_rsp_buf(resp_buftype, rsp); 3895 3896 if (is_replayable_error(rc) && 3897 smb2_should_replay(tcon, &retries, &cur_sleep)) 3898 goto replay_again; 3899 3900 return rc; 3901 } 3902 3903 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3904 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) 3905 { 3906 return query_info(xid, tcon, persistent_fid, volatile_fid, 3907 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, 3908 sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 3909 sizeof(struct smb2_file_all_info), (void **)&data, 3910 NULL); 3911 } 3912 3913 #if 0 3914 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */ 3915 int 3916 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3917 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen) 3918 { 3919 size_t output_len = sizeof(struct smb311_posix_qinfo *) + 3920 (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2); 3921 *plen = 0; 3922 3923 return query_info(xid, tcon, persistent_fid, volatile_fid, 3924 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0, 3925 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen); 3926 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */ 3927 } 3928 #endif 3929 3930 int 3931 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, 3932 u64 persistent_fid, u64 volatile_fid, 3933 void **data, u32 *plen, u32 extra_info) 3934 { 3935 *plen = 0; 3936 3937 return query_info(xid, tcon, persistent_fid, volatile_fid, 3938 0, SMB2_O_INFO_SECURITY, extra_info, 3939 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); 3940 } 3941 3942 int 3943 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, 3944 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) 3945 { 3946 return query_info(xid, tcon, persistent_fid, volatile_fid, 3947 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, 3948 sizeof(struct smb2_file_internal_info), 3949 sizeof(struct smb2_file_internal_info), 3950 (void **)&uniqueid, NULL); 3951 } 3952 3953 /* 3954 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory 3955 * See MS-SMB2 2.2.35 and 2.2.36 3956 */ 3957 3958 static int 3959 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, 3960 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3961 u64 persistent_fid, u64 volatile_fid, 3962 u32 completion_filter, bool watch_tree) 3963 { 3964 struct smb2_change_notify_req *req; 3965 struct kvec *iov = rqst->rq_iov; 3966 unsigned int total_len; 3967 int rc; 3968 3969 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server, 3970 (void **) &req, &total_len); 3971 if (rc) 3972 return rc; 3973 3974 req->PersistentFileId = persistent_fid; 3975 req->VolatileFileId = volatile_fid; 3976 /* See note 354 of MS-SMB2, 64K max */ 3977 req->OutputBufferLength = 3978 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); 3979 req->CompletionFilter = cpu_to_le32(completion_filter); 3980 if (watch_tree) 3981 req->Flags = cpu_to_le16(SMB2_WATCH_TREE); 3982 else 3983 req->Flags = 0; 3984 3985 iov[0].iov_base = (char *)req; 3986 iov[0].iov_len = total_len; 3987 3988 return 0; 3989 } 3990 3991 int 3992 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, 3993 u64 persistent_fid, u64 volatile_fid, bool watch_tree, 3994 u32 completion_filter, u32 max_out_data_len, char **out_data, 3995 u32 *plen /* returned data len */) 3996 { 3997 struct cifs_ses *ses = tcon->ses; 3998 struct TCP_Server_Info *server; 3999 struct smb_rqst rqst; 4000 struct smb2_change_notify_rsp *smb_rsp; 4001 struct kvec iov[1]; 4002 struct kvec rsp_iov = {NULL, 0}; 4003 int resp_buftype = CIFS_NO_BUFFER; 4004 int flags = 0; 4005 int rc = 0; 4006 int retries = 0, cur_sleep = 1; 4007 4008 replay_again: 4009 /* reinitialize for possible replay */ 4010 flags = 0; 4011 server = cifs_pick_channel(ses); 4012 4013 cifs_dbg(FYI, "change notify\n"); 4014 if (!ses || !server) 4015 return -EIO; 4016 4017 if (smb3_encryption_required(tcon)) 4018 flags |= CIFS_TRANSFORM_REQ; 4019 4020 memset(&rqst, 0, sizeof(struct smb_rqst)); 4021 memset(&iov, 0, sizeof(iov)); 4022 if (plen) 4023 *plen = 0; 4024 4025 rqst.rq_iov = iov; 4026 rqst.rq_nvec = 1; 4027 4028 rc = SMB2_notify_init(xid, &rqst, tcon, server, 4029 persistent_fid, volatile_fid, 4030 completion_filter, watch_tree); 4031 if (rc) 4032 goto cnotify_exit; 4033 4034 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid, 4035 (u8)watch_tree, completion_filter); 4036 4037 if (retries) 4038 smb2_set_replay(server, &rqst); 4039 4040 rc = cifs_send_recv(xid, ses, server, 4041 &rqst, &resp_buftype, flags, &rsp_iov); 4042 4043 if (rc != 0) { 4044 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE); 4045 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid, 4046 (u8)watch_tree, completion_filter, rc); 4047 } else { 4048 trace_smb3_notify_done(xid, persistent_fid, tcon->tid, 4049 ses->Suid, (u8)watch_tree, completion_filter); 4050 /* validate that notify information is plausible */ 4051 if ((rsp_iov.iov_base == NULL) || 4052 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1)) 4053 goto cnotify_exit; 4054 4055 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; 4056 4057 smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4058 le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, 4059 sizeof(struct file_notify_information)); 4060 4061 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), 4062 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); 4063 if (*out_data == NULL) { 4064 rc = -ENOMEM; 4065 goto cnotify_exit; 4066 } else if (plen) 4067 *plen = le32_to_cpu(smb_rsp->OutputBufferLength); 4068 } 4069 4070 cnotify_exit: 4071 if (rqst.rq_iov) 4072 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */ 4073 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4074 4075 if (is_replayable_error(rc) && 4076 smb2_should_replay(tcon, &retries, &cur_sleep)) 4077 goto replay_again; 4078 4079 return rc; 4080 } 4081 4082 4083 4084 /* 4085 * This is a no-op for now. We're not really interested in the reply, but 4086 * rather in the fact that the server sent one and that server->lstrp 4087 * gets updated. 4088 * 4089 * FIXME: maybe we should consider checking that the reply matches request? 4090 */ 4091 static void 4092 smb2_echo_callback(struct mid_q_entry *mid) 4093 { 4094 struct TCP_Server_Info *server = mid->callback_data; 4095 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 4096 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4097 4098 if (mid->mid_state == MID_RESPONSE_RECEIVED 4099 || mid->mid_state == MID_RESPONSE_MALFORMED) { 4100 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4101 credits.instance = server->reconnect_instance; 4102 } 4103 4104 release_mid(mid); 4105 add_credits(server, &credits, CIFS_ECHO_OP); 4106 } 4107 4108 static void cifs_renegotiate_iosize(struct TCP_Server_Info *server, 4109 struct cifs_tcon *tcon) 4110 { 4111 struct cifs_sb_info *cifs_sb; 4112 4113 if (server == NULL || tcon == NULL) 4114 return; 4115 4116 spin_lock(&tcon->sb_list_lock); 4117 list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) 4118 cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); 4119 spin_unlock(&tcon->sb_list_lock); 4120 } 4121 4122 void smb2_reconnect_server(struct work_struct *work) 4123 { 4124 struct TCP_Server_Info *server = container_of(work, 4125 struct TCP_Server_Info, reconnect.work); 4126 struct TCP_Server_Info *pserver; 4127 struct cifs_ses *ses, *ses2; 4128 struct cifs_tcon *tcon, *tcon2; 4129 struct list_head tmp_list, tmp_ses_list; 4130 bool ses_exist = false; 4131 bool tcon_selected = false; 4132 int rc; 4133 bool resched = false; 4134 4135 /* first check if ref count has reached 0, if not inc ref count */ 4136 spin_lock(&cifs_tcp_ses_lock); 4137 if (!server->srv_count) { 4138 spin_unlock(&cifs_tcp_ses_lock); 4139 return; 4140 } 4141 server->srv_count++; 4142 spin_unlock(&cifs_tcp_ses_lock); 4143 4144 /* If server is a channel, select the primary channel */ 4145 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 4146 4147 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 4148 mutex_lock(&pserver->reconnect_mutex); 4149 4150 /* if the server is marked for termination, drop the ref count here */ 4151 if (server->terminate) { 4152 cifs_put_tcp_session(server, true); 4153 mutex_unlock(&pserver->reconnect_mutex); 4154 return; 4155 } 4156 4157 INIT_LIST_HEAD(&tmp_list); 4158 INIT_LIST_HEAD(&tmp_ses_list); 4159 cifs_dbg(FYI, "Reconnecting tcons and channels\n"); 4160 4161 spin_lock(&cifs_tcp_ses_lock); 4162 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4163 spin_lock(&ses->ses_lock); 4164 if (ses->ses_status == SES_EXITING) { 4165 spin_unlock(&ses->ses_lock); 4166 continue; 4167 } 4168 spin_unlock(&ses->ses_lock); 4169 4170 tcon_selected = false; 4171 4172 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 4173 if (tcon->need_reconnect || tcon->need_reopen_files) { 4174 tcon->tc_count++; 4175 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 4176 netfs_trace_tcon_ref_get_reconnect_server); 4177 list_add_tail(&tcon->rlist, &tmp_list); 4178 tcon_selected = true; 4179 } 4180 } 4181 /* 4182 * IPC has the same lifetime as its session and uses its 4183 * refcount. 4184 */ 4185 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { 4186 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); 4187 tcon_selected = true; 4188 cifs_smb_ses_inc_refcount(ses); 4189 } 4190 /* 4191 * handle the case where channel needs to reconnect 4192 * binding session, but tcon is healthy (some other channel 4193 * is active) 4194 */ 4195 spin_lock(&ses->chan_lock); 4196 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { 4197 list_add_tail(&ses->rlist, &tmp_ses_list); 4198 ses_exist = true; 4199 cifs_smb_ses_inc_refcount(ses); 4200 } 4201 spin_unlock(&ses->chan_lock); 4202 } 4203 spin_unlock(&cifs_tcp_ses_lock); 4204 4205 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 4206 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4207 if (!rc) { 4208 cifs_renegotiate_iosize(server, tcon); 4209 cifs_reopen_persistent_handles(tcon); 4210 } else 4211 resched = true; 4212 list_del_init(&tcon->rlist); 4213 if (tcon->ipc) 4214 cifs_put_smb_ses(tcon->ses); 4215 else 4216 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server); 4217 } 4218 4219 if (!ses_exist) 4220 goto done; 4221 4222 /* allocate a dummy tcon struct used for reconnect */ 4223 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server); 4224 if (!tcon) { 4225 resched = true; 4226 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4227 list_del_init(&ses->rlist); 4228 cifs_put_smb_ses(ses); 4229 } 4230 goto done; 4231 } 4232 tcon->status = TID_GOOD; 4233 tcon->dummy = true; 4234 4235 /* now reconnect sessions for necessary channels */ 4236 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4237 tcon->ses = ses; 4238 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4239 if (rc) 4240 resched = true; 4241 list_del_init(&ses->rlist); 4242 cifs_put_smb_ses(ses); 4243 } 4244 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server); 4245 4246 done: 4247 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); 4248 if (resched) 4249 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); 4250 mutex_unlock(&pserver->reconnect_mutex); 4251 4252 /* now we can safely release srv struct */ 4253 cifs_put_tcp_session(server, true); 4254 } 4255 4256 int 4257 SMB2_echo(struct TCP_Server_Info *server) 4258 { 4259 struct smb2_echo_req *req; 4260 int rc = 0; 4261 struct kvec iov[1]; 4262 struct smb_rqst rqst = { .rq_iov = iov, 4263 .rq_nvec = 1 }; 4264 unsigned int total_len; 4265 4266 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); 4267 4268 spin_lock(&server->srv_lock); 4269 if (server->ops->need_neg && 4270 server->ops->need_neg(server)) { 4271 spin_unlock(&server->srv_lock); 4272 /* No need to send echo on newly established connections */ 4273 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 4274 return rc; 4275 } 4276 spin_unlock(&server->srv_lock); 4277 4278 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, 4279 (void **)&req, &total_len); 4280 if (rc) 4281 return rc; 4282 4283 req->hdr.CreditRequest = cpu_to_le16(1); 4284 4285 iov[0].iov_len = total_len; 4286 iov[0].iov_base = (char *)req; 4287 4288 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, 4289 server, CIFS_ECHO_OP, NULL); 4290 if (rc) 4291 cifs_dbg(FYI, "Echo request failed: %d\n", rc); 4292 4293 cifs_small_buf_release(req); 4294 return rc; 4295 } 4296 4297 void 4298 SMB2_flush_free(struct smb_rqst *rqst) 4299 { 4300 if (rqst && rqst->rq_iov) 4301 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 4302 } 4303 4304 int 4305 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, 4306 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 4307 u64 persistent_fid, u64 volatile_fid) 4308 { 4309 struct smb2_flush_req *req; 4310 struct kvec *iov = rqst->rq_iov; 4311 unsigned int total_len; 4312 int rc; 4313 4314 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server, 4315 (void **) &req, &total_len); 4316 if (rc) 4317 return rc; 4318 4319 req->PersistentFileId = persistent_fid; 4320 req->VolatileFileId = volatile_fid; 4321 4322 iov[0].iov_base = (char *)req; 4323 iov[0].iov_len = total_len; 4324 4325 return 0; 4326 } 4327 4328 int 4329 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 4330 u64 volatile_fid) 4331 { 4332 struct cifs_ses *ses = tcon->ses; 4333 struct smb_rqst rqst; 4334 struct kvec iov[1]; 4335 struct kvec rsp_iov = {NULL, 0}; 4336 struct TCP_Server_Info *server; 4337 int resp_buftype = CIFS_NO_BUFFER; 4338 int flags = 0; 4339 int rc = 0; 4340 int retries = 0, cur_sleep = 1; 4341 4342 replay_again: 4343 /* reinitialize for possible replay */ 4344 flags = 0; 4345 server = cifs_pick_channel(ses); 4346 4347 cifs_dbg(FYI, "flush\n"); 4348 if (!ses || !(ses->server)) 4349 return -EIO; 4350 4351 if (smb3_encryption_required(tcon)) 4352 flags |= CIFS_TRANSFORM_REQ; 4353 4354 memset(&rqst, 0, sizeof(struct smb_rqst)); 4355 memset(&iov, 0, sizeof(iov)); 4356 rqst.rq_iov = iov; 4357 rqst.rq_nvec = 1; 4358 4359 rc = SMB2_flush_init(xid, &rqst, tcon, server, 4360 persistent_fid, volatile_fid); 4361 if (rc) 4362 goto flush_exit; 4363 4364 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid); 4365 4366 if (retries) 4367 smb2_set_replay(server, &rqst); 4368 4369 rc = cifs_send_recv(xid, ses, server, 4370 &rqst, &resp_buftype, flags, &rsp_iov); 4371 4372 if (rc != 0) { 4373 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 4374 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, 4375 rc); 4376 } else 4377 trace_smb3_flush_done(xid, persistent_fid, tcon->tid, 4378 ses->Suid); 4379 4380 flush_exit: 4381 SMB2_flush_free(&rqst); 4382 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4383 4384 if (is_replayable_error(rc) && 4385 smb2_should_replay(tcon, &retries, &cur_sleep)) 4386 goto replay_again; 4387 4388 return rc; 4389 } 4390 4391 #ifdef CONFIG_CIFS_SMB_DIRECT 4392 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) 4393 { 4394 struct TCP_Server_Info *server = io_parms->server; 4395 struct cifs_tcon *tcon = io_parms->tcon; 4396 4397 /* we can only offload if we're connected */ 4398 if (!server || !tcon) 4399 return false; 4400 4401 /* we can only offload on an rdma connection */ 4402 if (!server->rdma || !server->smbd_conn) 4403 return false; 4404 4405 /* we don't support signed offload yet */ 4406 if (server->sign) 4407 return false; 4408 4409 /* we don't support encrypted offload yet */ 4410 if (smb3_encryption_required(tcon)) 4411 return false; 4412 4413 /* offload also has its overhead, so only do it if desired */ 4414 if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold) 4415 return false; 4416 4417 return true; 4418 } 4419 #endif /* CONFIG_CIFS_SMB_DIRECT */ 4420 4421 /* 4422 * To form a chain of read requests, any read requests after the first should 4423 * have the end_of_chain boolean set to true. 4424 */ 4425 static int 4426 smb2_new_read_req(void **buf, unsigned int *total_len, 4427 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata, 4428 unsigned int remaining_bytes, int request_type) 4429 { 4430 int rc = -EACCES; 4431 struct smb2_read_req *req = NULL; 4432 struct smb2_hdr *shdr; 4433 struct TCP_Server_Info *server = io_parms->server; 4434 4435 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server, 4436 (void **) &req, total_len); 4437 if (rc) 4438 return rc; 4439 4440 if (server == NULL) 4441 return -ECONNABORTED; 4442 4443 shdr = &req->hdr; 4444 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4445 4446 req->PersistentFileId = io_parms->persistent_fid; 4447 req->VolatileFileId = io_parms->volatile_fid; 4448 req->ReadChannelInfoOffset = 0; /* reserved */ 4449 req->ReadChannelInfoLength = 0; /* reserved */ 4450 req->Channel = 0; /* reserved */ 4451 req->MinimumCount = 0; 4452 req->Length = cpu_to_le32(io_parms->length); 4453 req->Offset = cpu_to_le64(io_parms->offset); 4454 4455 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0, 4456 rdata ? rdata->subreq.debug_index : 0, 4457 rdata ? rdata->xid : 0, 4458 io_parms->persistent_fid, 4459 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4460 io_parms->offset, io_parms->length); 4461 #ifdef CONFIG_CIFS_SMB_DIRECT 4462 /* 4463 * If we want to do a RDMA write, fill in and append 4464 * smbdirect_buffer_descriptor_v1 to the end of read request 4465 */ 4466 if (rdata && smb3_use_rdma_offload(io_parms)) { 4467 struct smbdirect_buffer_descriptor_v1 *v1; 4468 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4469 4470 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter, 4471 true, need_invalidate); 4472 if (!rdata->mr) 4473 return -EAGAIN; 4474 4475 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4476 if (need_invalidate) 4477 req->Channel = SMB2_CHANNEL_RDMA_V1; 4478 req->ReadChannelInfoOffset = 4479 cpu_to_le16(offsetof(struct smb2_read_req, Buffer)); 4480 req->ReadChannelInfoLength = 4481 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1)); 4482 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0]; 4483 v1->offset = cpu_to_le64(rdata->mr->mr->iova); 4484 v1->token = cpu_to_le32(rdata->mr->mr->rkey); 4485 v1->length = cpu_to_le32(rdata->mr->mr->length); 4486 4487 *total_len += sizeof(*v1) - 1; 4488 } 4489 #endif 4490 if (request_type & CHAINED_REQUEST) { 4491 if (!(request_type & END_OF_CHAIN)) { 4492 /* next 8-byte aligned request */ 4493 *total_len = ALIGN(*total_len, 8); 4494 shdr->NextCommand = cpu_to_le32(*total_len); 4495 } else /* END_OF_CHAIN */ 4496 shdr->NextCommand = 0; 4497 if (request_type & RELATED_REQUEST) { 4498 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; 4499 /* 4500 * Related requests use info from previous read request 4501 * in chain. 4502 */ 4503 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); 4504 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); 4505 req->PersistentFileId = (u64)-1; 4506 req->VolatileFileId = (u64)-1; 4507 } 4508 } 4509 if (remaining_bytes > io_parms->length) 4510 req->RemainingBytes = cpu_to_le32(remaining_bytes); 4511 else 4512 req->RemainingBytes = 0; 4513 4514 *buf = req; 4515 return rc; 4516 } 4517 4518 static void 4519 smb2_readv_callback(struct mid_q_entry *mid) 4520 { 4521 struct cifs_io_subrequest *rdata = mid->callback_data; 4522 struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode); 4523 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4524 struct TCP_Server_Info *server = rdata->server; 4525 struct smb2_hdr *shdr = 4526 (struct smb2_hdr *)rdata->iov[0].iov_base; 4527 struct cifs_credits credits = { 4528 .value = 0, 4529 .instance = 0, 4530 .rreq_debug_id = rdata->rreq->debug_id, 4531 .rreq_debug_index = rdata->subreq.debug_index, 4532 }; 4533 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 }; 4534 unsigned int rreq_debug_id = rdata->rreq->debug_id; 4535 unsigned int subreq_debug_index = rdata->subreq.debug_index; 4536 4537 if (rdata->got_bytes) { 4538 rqst.rq_iter = rdata->subreq.io_iter; 4539 } 4540 4541 WARN_ONCE(rdata->server != mid->server, 4542 "rdata server %p != mid server %p", 4543 rdata->server, mid->server); 4544 4545 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n", 4546 __func__, mid->mid, mid->mid_state, rdata->result, 4547 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred); 4548 4549 switch (mid->mid_state) { 4550 case MID_RESPONSE_RECEIVED: 4551 credits.value = le16_to_cpu(shdr->CreditRequest); 4552 credits.instance = server->reconnect_instance; 4553 /* result already set, check signature */ 4554 if (server->sign && !mid->decrypted) { 4555 int rc; 4556 4557 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); 4558 rc = smb2_verify_signature(&rqst, server); 4559 if (rc) 4560 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n", 4561 rc); 4562 } 4563 /* FIXME: should this be counted toward the initiating task? */ 4564 task_io_account_read(rdata->got_bytes); 4565 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4566 break; 4567 case MID_REQUEST_SUBMITTED: 4568 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted); 4569 goto do_retry; 4570 case MID_RETRY_NEEDED: 4571 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed); 4572 do_retry: 4573 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); 4574 rdata->result = -EAGAIN; 4575 if (server->sign && rdata->got_bytes) 4576 /* reset bytes number since we can not check a sign */ 4577 rdata->got_bytes = 0; 4578 /* FIXME: should this be counted toward the initiating task? */ 4579 task_io_account_read(rdata->got_bytes); 4580 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4581 break; 4582 case MID_RESPONSE_MALFORMED: 4583 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed); 4584 credits.value = le16_to_cpu(shdr->CreditRequest); 4585 credits.instance = server->reconnect_instance; 4586 rdata->result = -EIO; 4587 break; 4588 default: 4589 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown); 4590 rdata->result = -EIO; 4591 break; 4592 } 4593 #ifdef CONFIG_CIFS_SMB_DIRECT 4594 /* 4595 * If this rdata has a memory registered, the MR can be freed 4596 * MR needs to be freed as soon as I/O finishes to prevent deadlock 4597 * because they have limited number and are used for future I/Os 4598 */ 4599 if (rdata->mr) { 4600 smbd_deregister_mr(rdata->mr); 4601 rdata->mr = NULL; 4602 } 4603 #endif 4604 if (rdata->result && rdata->result != -ENODATA) { 4605 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 4606 trace_smb3_read_err(rdata->rreq->debug_id, 4607 rdata->subreq.debug_index, 4608 rdata->xid, 4609 rdata->req->cfile->fid.persistent_fid, 4610 tcon->tid, tcon->ses->Suid, 4611 rdata->subreq.start + rdata->subreq.transferred, 4612 rdata->subreq.len - rdata->subreq.transferred, 4613 rdata->result); 4614 } else 4615 trace_smb3_read_done(rdata->rreq->debug_id, 4616 rdata->subreq.debug_index, 4617 rdata->xid, 4618 rdata->req->cfile->fid.persistent_fid, 4619 tcon->tid, tcon->ses->Suid, 4620 rdata->subreq.start + rdata->subreq.transferred, 4621 rdata->got_bytes); 4622 4623 if (rdata->result == -ENODATA) { 4624 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); 4625 rdata->result = 0; 4626 } else { 4627 size_t trans = rdata->subreq.transferred + rdata->got_bytes; 4628 if (trans < rdata->subreq.len && 4629 rdata->subreq.start + trans == ictx->remote_i_size) { 4630 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); 4631 rdata->result = 0; 4632 } 4633 if (rdata->got_bytes) 4634 __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); 4635 } 4636 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, 4637 server->credits, server->in_flight, 4638 0, cifs_trace_rw_credits_read_response_clear); 4639 rdata->credits.value = 0; 4640 rdata->subreq.error = rdata->result; 4641 rdata->subreq.transferred += rdata->got_bytes; 4642 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); 4643 netfs_read_subreq_terminated(&rdata->subreq); 4644 release_mid(mid); 4645 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 4646 server->credits, server->in_flight, 4647 credits.value, cifs_trace_rw_credits_read_response_add); 4648 add_credits(server, &credits, 0); 4649 } 4650 4651 /* smb2_async_readv - send an async read, and set up mid to handle result */ 4652 int 4653 smb2_async_readv(struct cifs_io_subrequest *rdata) 4654 { 4655 int rc, flags = 0; 4656 char *buf; 4657 struct netfs_io_subrequest *subreq = &rdata->subreq; 4658 struct smb2_hdr *shdr; 4659 struct cifs_io_parms io_parms; 4660 struct smb_rqst rqst = { .rq_iov = rdata->iov, 4661 .rq_nvec = 1 }; 4662 struct TCP_Server_Info *server; 4663 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4664 unsigned int total_len; 4665 int credit_request; 4666 4667 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n", 4668 __func__, subreq->start, subreq->len); 4669 4670 if (!rdata->server) 4671 rdata->server = cifs_pick_channel(tcon->ses); 4672 4673 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); 4674 io_parms.server = server = rdata->server; 4675 io_parms.offset = subreq->start + subreq->transferred; 4676 io_parms.length = subreq->len - subreq->transferred; 4677 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; 4678 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; 4679 io_parms.pid = rdata->req->pid; 4680 4681 rc = smb2_new_read_req( 4682 (void **) &buf, &total_len, &io_parms, rdata, 0, 0); 4683 if (rc) 4684 return rc; 4685 4686 if (smb3_encryption_required(io_parms.tcon)) 4687 flags |= CIFS_TRANSFORM_REQ; 4688 4689 rdata->iov[0].iov_base = buf; 4690 rdata->iov[0].iov_len = total_len; 4691 rdata->got_bytes = 0; 4692 rdata->result = 0; 4693 4694 shdr = (struct smb2_hdr *)buf; 4695 4696 if (rdata->credits.value > 0) { 4697 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length, 4698 SMB2_MAX_BUFFER_SIZE)); 4699 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4700 if (server->credits >= server->max_credits) 4701 shdr->CreditRequest = cpu_to_le16(0); 4702 else 4703 shdr->CreditRequest = cpu_to_le16( 4704 min_t(int, server->max_credits - 4705 server->credits, credit_request)); 4706 4707 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust); 4708 if (rc) 4709 goto async_readv_out; 4710 4711 flags |= CIFS_HAS_CREDITS; 4712 } 4713 4714 rc = cifs_call_async(server, &rqst, 4715 cifs_readv_receive, smb2_readv_callback, 4716 smb3_handle_read_data, rdata, flags, 4717 &rdata->credits); 4718 if (rc) { 4719 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 4720 trace_smb3_read_err(rdata->rreq->debug_id, 4721 subreq->debug_index, 4722 rdata->xid, io_parms.persistent_fid, 4723 io_parms.tcon->tid, 4724 io_parms.tcon->ses->Suid, 4725 io_parms.offset, 4726 subreq->len - subreq->transferred, rc); 4727 } 4728 4729 async_readv_out: 4730 cifs_small_buf_release(buf); 4731 return rc; 4732 } 4733 4734 int 4735 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 4736 unsigned int *nbytes, char **buf, int *buf_type) 4737 { 4738 struct smb_rqst rqst; 4739 int resp_buftype, rc; 4740 struct smb2_read_req *req = NULL; 4741 struct smb2_read_rsp *rsp = NULL; 4742 struct kvec iov[1]; 4743 struct kvec rsp_iov; 4744 unsigned int total_len; 4745 int flags = CIFS_LOG_ERROR; 4746 struct cifs_ses *ses = io_parms->tcon->ses; 4747 4748 if (!io_parms->server) 4749 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 4750 4751 *nbytes = 0; 4752 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); 4753 if (rc) 4754 return rc; 4755 4756 if (smb3_encryption_required(io_parms->tcon)) 4757 flags |= CIFS_TRANSFORM_REQ; 4758 4759 iov[0].iov_base = (char *)req; 4760 iov[0].iov_len = total_len; 4761 4762 memset(&rqst, 0, sizeof(struct smb_rqst)); 4763 rqst.rq_iov = iov; 4764 rqst.rq_nvec = 1; 4765 4766 rc = cifs_send_recv(xid, ses, io_parms->server, 4767 &rqst, &resp_buftype, flags, &rsp_iov); 4768 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 4769 4770 if (rc) { 4771 if (rc != -ENODATA) { 4772 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 4773 cifs_dbg(VFS, "Send error in read = %d\n", rc); 4774 trace_smb3_read_err(0, 0, xid, 4775 req->PersistentFileId, 4776 io_parms->tcon->tid, ses->Suid, 4777 io_parms->offset, io_parms->length, 4778 rc); 4779 } else 4780 trace_smb3_read_done(0, 0, xid, 4781 req->PersistentFileId, io_parms->tcon->tid, 4782 ses->Suid, io_parms->offset, 0); 4783 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4784 cifs_small_buf_release(req); 4785 return rc == -ENODATA ? 0 : rc; 4786 } else 4787 trace_smb3_read_done(0, 0, xid, 4788 req->PersistentFileId, 4789 io_parms->tcon->tid, ses->Suid, 4790 io_parms->offset, io_parms->length); 4791 4792 cifs_small_buf_release(req); 4793 4794 *nbytes = le32_to_cpu(rsp->DataLength); 4795 if ((*nbytes > CIFS_MAX_MSGSIZE) || 4796 (*nbytes > io_parms->length)) { 4797 cifs_dbg(FYI, "bad length %d for count %d\n", 4798 *nbytes, io_parms->length); 4799 rc = -EIO; 4800 *nbytes = 0; 4801 } 4802 4803 if (*buf) { 4804 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); 4805 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4806 } else if (resp_buftype != CIFS_NO_BUFFER) { 4807 *buf = rsp_iov.iov_base; 4808 if (resp_buftype == CIFS_SMALL_BUFFER) 4809 *buf_type = CIFS_SMALL_BUFFER; 4810 else if (resp_buftype == CIFS_LARGE_BUFFER) 4811 *buf_type = CIFS_LARGE_BUFFER; 4812 } 4813 return rc; 4814 } 4815 4816 /* 4817 * Check the mid_state and signature on received buffer (if any), and queue the 4818 * workqueue completion task. 4819 */ 4820 static void 4821 smb2_writev_callback(struct mid_q_entry *mid) 4822 { 4823 struct cifs_io_subrequest *wdata = mid->callback_data; 4824 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4825 struct TCP_Server_Info *server = wdata->server; 4826 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 4827 struct cifs_credits credits = { 4828 .value = 0, 4829 .instance = 0, 4830 .rreq_debug_id = wdata->rreq->debug_id, 4831 .rreq_debug_index = wdata->subreq.debug_index, 4832 }; 4833 unsigned int rreq_debug_id = wdata->rreq->debug_id; 4834 unsigned int subreq_debug_index = wdata->subreq.debug_index; 4835 ssize_t result = 0; 4836 size_t written; 4837 4838 WARN_ONCE(wdata->server != mid->server, 4839 "wdata server %p != mid server %p", 4840 wdata->server, mid->server); 4841 4842 switch (mid->mid_state) { 4843 case MID_RESPONSE_RECEIVED: 4844 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress); 4845 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4846 credits.instance = server->reconnect_instance; 4847 result = smb2_check_receive(mid, server, 0); 4848 if (result != 0) { 4849 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad); 4850 break; 4851 } 4852 4853 written = le32_to_cpu(rsp->DataLength); 4854 /* 4855 * Mask off high 16 bits when bytes written as returned 4856 * by the server is greater than bytes requested by the 4857 * client. OS/2 servers are known to set incorrect 4858 * CountHigh values. 4859 */ 4860 if (written > wdata->subreq.len) 4861 written &= 0xFFFF; 4862 4863 cifs_stats_bytes_written(tcon, written); 4864 4865 if (written < wdata->subreq.len) { 4866 wdata->result = -ENOSPC; 4867 } else if (written > 0) { 4868 wdata->subreq.len = written; 4869 __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags); 4870 } 4871 break; 4872 case MID_REQUEST_SUBMITTED: 4873 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted); 4874 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 4875 result = -EAGAIN; 4876 break; 4877 case MID_RETRY_NEEDED: 4878 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed); 4879 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 4880 result = -EAGAIN; 4881 break; 4882 case MID_RESPONSE_MALFORMED: 4883 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed); 4884 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4885 credits.instance = server->reconnect_instance; 4886 result = -EIO; 4887 break; 4888 default: 4889 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown); 4890 result = -EIO; 4891 break; 4892 } 4893 #ifdef CONFIG_CIFS_SMB_DIRECT 4894 /* 4895 * If this wdata has a memory registered, the MR can be freed 4896 * The number of MRs available is limited, it's important to recover 4897 * used MR as soon as I/O is finished. Hold MR longer in the later 4898 * I/O process can possibly result in I/O deadlock due to lack of MR 4899 * to send request on I/O retry 4900 */ 4901 if (wdata->mr) { 4902 smbd_deregister_mr(wdata->mr); 4903 wdata->mr = NULL; 4904 } 4905 #endif 4906 if (result) { 4907 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 4908 trace_smb3_write_err(wdata->rreq->debug_id, 4909 wdata->subreq.debug_index, 4910 wdata->xid, 4911 wdata->req->cfile->fid.persistent_fid, 4912 tcon->tid, tcon->ses->Suid, wdata->subreq.start, 4913 wdata->subreq.len, wdata->result); 4914 if (wdata->result == -ENOSPC) 4915 pr_warn_once("Out of space writing to %s\n", 4916 tcon->tree_name); 4917 } else 4918 trace_smb3_write_done(wdata->rreq->debug_id, 4919 wdata->subreq.debug_index, 4920 wdata->xid, 4921 wdata->req->cfile->fid.persistent_fid, 4922 tcon->tid, tcon->ses->Suid, 4923 wdata->subreq.start, wdata->subreq.len); 4924 4925 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value, 4926 server->credits, server->in_flight, 4927 0, cifs_trace_rw_credits_write_response_clear); 4928 wdata->credits.value = 0; 4929 cifs_write_subrequest_terminated(wdata, result ?: written); 4930 release_mid(mid); 4931 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 4932 server->credits, server->in_flight, 4933 credits.value, cifs_trace_rw_credits_write_response_add); 4934 add_credits(server, &credits, 0); 4935 } 4936 4937 /* smb2_async_writev - send an async write, and set up mid to handle result */ 4938 void 4939 smb2_async_writev(struct cifs_io_subrequest *wdata) 4940 { 4941 int rc = -EACCES, flags = 0; 4942 struct smb2_write_req *req = NULL; 4943 struct smb2_hdr *shdr; 4944 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4945 struct TCP_Server_Info *server = wdata->server; 4946 struct kvec iov[1]; 4947 struct smb_rqst rqst = { }; 4948 unsigned int total_len, xid = wdata->xid; 4949 struct cifs_io_parms _io_parms; 4950 struct cifs_io_parms *io_parms = NULL; 4951 int credit_request; 4952 4953 /* 4954 * in future we may get cifs_io_parms passed in from the caller, 4955 * but for now we construct it here... 4956 */ 4957 _io_parms = (struct cifs_io_parms) { 4958 .tcon = tcon, 4959 .server = server, 4960 .offset = wdata->subreq.start, 4961 .length = wdata->subreq.len, 4962 .persistent_fid = wdata->req->cfile->fid.persistent_fid, 4963 .volatile_fid = wdata->req->cfile->fid.volatile_fid, 4964 .pid = wdata->req->pid, 4965 }; 4966 io_parms = &_io_parms; 4967 4968 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, 4969 (void **) &req, &total_len); 4970 if (rc) 4971 goto out; 4972 4973 rqst.rq_iov = iov; 4974 rqst.rq_iter = wdata->subreq.io_iter; 4975 4976 rqst.rq_iov[0].iov_len = total_len - 1; 4977 rqst.rq_iov[0].iov_base = (char *)req; 4978 rqst.rq_nvec += 1; 4979 4980 if (smb3_encryption_required(tcon)) 4981 flags |= CIFS_TRANSFORM_REQ; 4982 4983 shdr = (struct smb2_hdr *)req; 4984 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4985 4986 req->PersistentFileId = io_parms->persistent_fid; 4987 req->VolatileFileId = io_parms->volatile_fid; 4988 req->WriteChannelInfoOffset = 0; 4989 req->WriteChannelInfoLength = 0; 4990 req->Channel = SMB2_CHANNEL_NONE; 4991 req->Length = cpu_to_le32(io_parms->length); 4992 req->Offset = cpu_to_le64(io_parms->offset); 4993 req->DataOffset = cpu_to_le16( 4994 offsetof(struct smb2_write_req, Buffer)); 4995 req->RemainingBytes = 0; 4996 4997 trace_smb3_write_enter(wdata->rreq->debug_id, 4998 wdata->subreq.debug_index, 4999 wdata->xid, 5000 io_parms->persistent_fid, 5001 io_parms->tcon->tid, 5002 io_parms->tcon->ses->Suid, 5003 io_parms->offset, 5004 io_parms->length); 5005 5006 #ifdef CONFIG_CIFS_SMB_DIRECT 5007 /* 5008 * If we want to do a server RDMA read, fill in and append 5009 * smbdirect_buffer_descriptor_v1 to the end of write request 5010 */ 5011 if (smb3_use_rdma_offload(io_parms)) { 5012 struct smbdirect_buffer_descriptor_v1 *v1; 5013 bool need_invalidate = server->dialect == SMB30_PROT_ID; 5014 5015 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter, 5016 false, need_invalidate); 5017 if (!wdata->mr) { 5018 rc = -EAGAIN; 5019 goto async_writev_out; 5020 } 5021 /* For RDMA read, I/O size is in RemainingBytes not in Length */ 5022 req->RemainingBytes = req->Length; 5023 req->Length = 0; 5024 req->DataOffset = 0; 5025 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 5026 if (need_invalidate) 5027 req->Channel = SMB2_CHANNEL_RDMA_V1; 5028 req->WriteChannelInfoOffset = 5029 cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); 5030 req->WriteChannelInfoLength = 5031 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1)); 5032 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0]; 5033 v1->offset = cpu_to_le64(wdata->mr->mr->iova); 5034 v1->token = cpu_to_le32(wdata->mr->mr->rkey); 5035 v1->length = cpu_to_le32(wdata->mr->mr->length); 5036 5037 rqst.rq_iov[0].iov_len += sizeof(*v1); 5038 5039 /* 5040 * We keep wdata->subreq.io_iter, 5041 * but we have to truncate rqst.rq_iter 5042 */ 5043 iov_iter_truncate(&rqst.rq_iter, 0); 5044 } 5045 #endif 5046 5047 if (wdata->subreq.retry_count > 0) 5048 smb2_set_replay(server, &rqst); 5049 5050 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 5051 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter)); 5052 5053 if (wdata->credits.value > 0) { 5054 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len, 5055 SMB2_MAX_BUFFER_SIZE)); 5056 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 5057 if (server->credits >= server->max_credits) 5058 shdr->CreditRequest = cpu_to_le16(0); 5059 else 5060 shdr->CreditRequest = cpu_to_le16( 5061 min_t(int, server->max_credits - 5062 server->credits, credit_request)); 5063 5064 rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust); 5065 if (rc) 5066 goto async_writev_out; 5067 5068 flags |= CIFS_HAS_CREDITS; 5069 } 5070 5071 /* XXX: compression + encryption is unsupported for now */ 5072 if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst)) 5073 flags |= CIFS_COMPRESS_REQ; 5074 5075 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, 5076 wdata, flags, &wdata->credits); 5077 /* Can't touch wdata if rc == 0 */ 5078 if (rc) { 5079 trace_smb3_write_err(wdata->rreq->debug_id, 5080 wdata->subreq.debug_index, 5081 xid, 5082 io_parms->persistent_fid, 5083 io_parms->tcon->tid, 5084 io_parms->tcon->ses->Suid, 5085 io_parms->offset, 5086 io_parms->length, 5087 rc); 5088 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 5089 } 5090 5091 async_writev_out: 5092 cifs_small_buf_release(req); 5093 out: 5094 if (rc) { 5095 trace_smb3_rw_credits(wdata->rreq->debug_id, 5096 wdata->subreq.debug_index, 5097 wdata->credits.value, 5098 server->credits, server->in_flight, 5099 -(int)wdata->credits.value, 5100 cifs_trace_rw_credits_write_response_clear); 5101 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 5102 cifs_write_subrequest_terminated(wdata, rc); 5103 } 5104 } 5105 5106 /* 5107 * SMB2_write function gets iov pointer to kvec array with n_vec as a length. 5108 * The length field from io_parms must be at least 1 and indicates a number of 5109 * elements with data to write that begins with position 1 in iov array. All 5110 * data length is specified by count. 5111 */ 5112 int 5113 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 5114 unsigned int *nbytes, struct kvec *iov, int n_vec) 5115 { 5116 struct smb_rqst rqst; 5117 int rc = 0; 5118 struct smb2_write_req *req = NULL; 5119 struct smb2_write_rsp *rsp = NULL; 5120 int resp_buftype; 5121 struct kvec rsp_iov; 5122 int flags = 0; 5123 unsigned int total_len; 5124 struct TCP_Server_Info *server; 5125 int retries = 0, cur_sleep = 1; 5126 5127 replay_again: 5128 /* reinitialize for possible replay */ 5129 flags = 0; 5130 *nbytes = 0; 5131 if (!io_parms->server) 5132 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 5133 server = io_parms->server; 5134 if (server == NULL) 5135 return -ECONNABORTED; 5136 5137 if (n_vec < 1) 5138 return rc; 5139 5140 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server, 5141 (void **) &req, &total_len); 5142 if (rc) 5143 return rc; 5144 5145 if (smb3_encryption_required(io_parms->tcon)) 5146 flags |= CIFS_TRANSFORM_REQ; 5147 5148 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 5149 5150 req->PersistentFileId = io_parms->persistent_fid; 5151 req->VolatileFileId = io_parms->volatile_fid; 5152 req->WriteChannelInfoOffset = 0; 5153 req->WriteChannelInfoLength = 0; 5154 req->Channel = 0; 5155 req->Length = cpu_to_le32(io_parms->length); 5156 req->Offset = cpu_to_le64(io_parms->offset); 5157 req->DataOffset = cpu_to_le16( 5158 offsetof(struct smb2_write_req, Buffer)); 5159 req->RemainingBytes = 0; 5160 5161 trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid, 5162 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 5163 io_parms->offset, io_parms->length); 5164 5165 iov[0].iov_base = (char *)req; 5166 /* 1 for Buffer */ 5167 iov[0].iov_len = total_len - 1; 5168 5169 memset(&rqst, 0, sizeof(struct smb_rqst)); 5170 rqst.rq_iov = iov; 5171 rqst.rq_nvec = n_vec + 1; 5172 5173 if (retries) 5174 smb2_set_replay(server, &rqst); 5175 5176 rc = cifs_send_recv(xid, io_parms->tcon->ses, server, 5177 &rqst, 5178 &resp_buftype, flags, &rsp_iov); 5179 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; 5180 5181 if (rc) { 5182 trace_smb3_write_err(0, 0, xid, 5183 req->PersistentFileId, 5184 io_parms->tcon->tid, 5185 io_parms->tcon->ses->Suid, 5186 io_parms->offset, io_parms->length, rc); 5187 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); 5188 cifs_dbg(VFS, "Send error in write = %d\n", rc); 5189 } else { 5190 *nbytes = le32_to_cpu(rsp->DataLength); 5191 cifs_stats_bytes_written(io_parms->tcon, *nbytes); 5192 trace_smb3_write_done(0, 0, xid, 5193 req->PersistentFileId, 5194 io_parms->tcon->tid, 5195 io_parms->tcon->ses->Suid, 5196 io_parms->offset, *nbytes); 5197 } 5198 5199 cifs_small_buf_release(req); 5200 free_rsp_buf(resp_buftype, rsp); 5201 5202 if (is_replayable_error(rc) && 5203 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep)) 5204 goto replay_again; 5205 5206 return rc; 5207 } 5208 5209 int posix_info_sid_size(const void *beg, const void *end) 5210 { 5211 size_t subauth; 5212 int total; 5213 5214 if (beg + 1 > end) 5215 return -1; 5216 5217 subauth = *(u8 *)(beg+1); 5218 if (subauth < 1 || subauth > 15) 5219 return -1; 5220 5221 total = 1 + 1 + 6 + 4*subauth; 5222 if (beg + total > end) 5223 return -1; 5224 5225 return total; 5226 } 5227 5228 int posix_info_parse(const void *beg, const void *end, 5229 struct smb2_posix_info_parsed *out) 5230 5231 { 5232 int total_len = 0; 5233 int owner_len, group_len; 5234 int name_len; 5235 const void *owner_sid; 5236 const void *group_sid; 5237 const void *name; 5238 5239 /* if no end bound given, assume payload to be correct */ 5240 if (!end) { 5241 const struct smb2_posix_info *p = beg; 5242 5243 end = beg + le32_to_cpu(p->NextEntryOffset); 5244 /* last element will have a 0 offset, pick a sensible bound */ 5245 if (end == beg) 5246 end += 0xFFFF; 5247 } 5248 5249 /* check base buf */ 5250 if (beg + sizeof(struct smb2_posix_info) > end) 5251 return -1; 5252 total_len = sizeof(struct smb2_posix_info); 5253 5254 /* check owner sid */ 5255 owner_sid = beg + total_len; 5256 owner_len = posix_info_sid_size(owner_sid, end); 5257 if (owner_len < 0) 5258 return -1; 5259 total_len += owner_len; 5260 5261 /* check group sid */ 5262 group_sid = beg + total_len; 5263 group_len = posix_info_sid_size(group_sid, end); 5264 if (group_len < 0) 5265 return -1; 5266 total_len += group_len; 5267 5268 /* check name len */ 5269 if (beg + total_len + 4 > end) 5270 return -1; 5271 name_len = le32_to_cpu(*(__le32 *)(beg + total_len)); 5272 if (name_len < 1 || name_len > 0xFFFF) 5273 return -1; 5274 total_len += 4; 5275 5276 /* check name */ 5277 name = beg + total_len; 5278 if (name + name_len > end) 5279 return -1; 5280 total_len += name_len; 5281 5282 if (out) { 5283 out->base = beg; 5284 out->size = total_len; 5285 out->name_len = name_len; 5286 out->name = name; 5287 memcpy(&out->owner, owner_sid, owner_len); 5288 memcpy(&out->group, group_sid, group_len); 5289 } 5290 return total_len; 5291 } 5292 5293 static int posix_info_extra_size(const void *beg, const void *end) 5294 { 5295 int len = posix_info_parse(beg, end, NULL); 5296 5297 if (len < 0) 5298 return -1; 5299 return len - sizeof(struct smb2_posix_info); 5300 } 5301 5302 static unsigned int 5303 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry, 5304 size_t size) 5305 { 5306 int len; 5307 unsigned int entrycount = 0; 5308 unsigned int next_offset = 0; 5309 char *entryptr; 5310 FILE_DIRECTORY_INFO *dir_info; 5311 5312 if (bufstart == NULL) 5313 return 0; 5314 5315 entryptr = bufstart; 5316 5317 while (1) { 5318 if (entryptr + next_offset < entryptr || 5319 entryptr + next_offset > end_of_buf || 5320 entryptr + next_offset + size > end_of_buf) { 5321 cifs_dbg(VFS, "malformed search entry would overflow\n"); 5322 break; 5323 } 5324 5325 entryptr = entryptr + next_offset; 5326 dir_info = (FILE_DIRECTORY_INFO *)entryptr; 5327 5328 if (infotype == SMB_FIND_FILE_POSIX_INFO) 5329 len = posix_info_extra_size(entryptr, end_of_buf); 5330 else 5331 len = le32_to_cpu(dir_info->FileNameLength); 5332 5333 if (len < 0 || 5334 entryptr + len < entryptr || 5335 entryptr + len > end_of_buf || 5336 entryptr + len + size > end_of_buf) { 5337 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", 5338 end_of_buf); 5339 break; 5340 } 5341 5342 *lastentry = entryptr; 5343 entrycount++; 5344 5345 next_offset = le32_to_cpu(dir_info->NextEntryOffset); 5346 if (!next_offset) 5347 break; 5348 } 5349 5350 return entrycount; 5351 } 5352 5353 /* 5354 * Readdir/FindFirst 5355 */ 5356 int SMB2_query_directory_init(const unsigned int xid, 5357 struct cifs_tcon *tcon, 5358 struct TCP_Server_Info *server, 5359 struct smb_rqst *rqst, 5360 u64 persistent_fid, u64 volatile_fid, 5361 int index, int info_level) 5362 { 5363 struct smb2_query_directory_req *req; 5364 unsigned char *bufptr; 5365 __le16 asteriks = cpu_to_le16('*'); 5366 unsigned int output_size = CIFSMaxBufSize - 5367 MAX_SMB2_CREATE_RESPONSE_SIZE - 5368 MAX_SMB2_CLOSE_RESPONSE_SIZE; 5369 unsigned int total_len; 5370 struct kvec *iov = rqst->rq_iov; 5371 int len, rc; 5372 5373 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server, 5374 (void **) &req, &total_len); 5375 if (rc) 5376 return rc; 5377 5378 switch (info_level) { 5379 case SMB_FIND_FILE_DIRECTORY_INFO: 5380 req->FileInformationClass = FILE_DIRECTORY_INFORMATION; 5381 break; 5382 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5383 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; 5384 break; 5385 case SMB_FIND_FILE_POSIX_INFO: 5386 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; 5387 break; 5388 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5389 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION; 5390 break; 5391 default: 5392 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5393 info_level); 5394 return -EINVAL; 5395 } 5396 5397 req->FileIndex = cpu_to_le32(index); 5398 req->PersistentFileId = persistent_fid; 5399 req->VolatileFileId = volatile_fid; 5400 5401 len = 0x2; 5402 bufptr = req->Buffer; 5403 memcpy(bufptr, &asteriks, len); 5404 5405 req->FileNameOffset = 5406 cpu_to_le16(sizeof(struct smb2_query_directory_req)); 5407 req->FileNameLength = cpu_to_le16(len); 5408 /* 5409 * BB could be 30 bytes or so longer if we used SMB2 specific 5410 * buffer lengths, but this is safe and close enough. 5411 */ 5412 output_size = min_t(unsigned int, output_size, server->maxBuf); 5413 output_size = min_t(unsigned int, output_size, 2 << 15); 5414 req->OutputBufferLength = cpu_to_le32(output_size); 5415 5416 iov[0].iov_base = (char *)req; 5417 /* 1 for Buffer */ 5418 iov[0].iov_len = total_len - 1; 5419 5420 iov[1].iov_base = (char *)(req->Buffer); 5421 iov[1].iov_len = len; 5422 5423 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, 5424 tcon->ses->Suid, index, output_size); 5425 5426 return 0; 5427 } 5428 5429 void SMB2_query_directory_free(struct smb_rqst *rqst) 5430 { 5431 if (rqst && rqst->rq_iov) { 5432 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5433 } 5434 } 5435 5436 int 5437 smb2_parse_query_directory(struct cifs_tcon *tcon, 5438 struct kvec *rsp_iov, 5439 int resp_buftype, 5440 struct cifs_search_info *srch_inf) 5441 { 5442 struct smb2_query_directory_rsp *rsp; 5443 size_t info_buf_size; 5444 char *end_of_smb; 5445 int rc; 5446 5447 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base; 5448 5449 switch (srch_inf->info_level) { 5450 case SMB_FIND_FILE_DIRECTORY_INFO: 5451 info_buf_size = sizeof(FILE_DIRECTORY_INFO); 5452 break; 5453 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5454 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO); 5455 break; 5456 case SMB_FIND_FILE_POSIX_INFO: 5457 /* note that posix payload are variable size */ 5458 info_buf_size = sizeof(struct smb2_posix_info); 5459 break; 5460 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5461 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO); 5462 break; 5463 default: 5464 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5465 srch_inf->info_level); 5466 return -EINVAL; 5467 } 5468 5469 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5470 le32_to_cpu(rsp->OutputBufferLength), rsp_iov, 5471 info_buf_size); 5472 if (rc) { 5473 cifs_tcon_dbg(VFS, "bad info payload"); 5474 return rc; 5475 } 5476 5477 srch_inf->unicode = true; 5478 5479 if (srch_inf->ntwrk_buf_start) { 5480 if (srch_inf->smallBuf) 5481 cifs_small_buf_release(srch_inf->ntwrk_buf_start); 5482 else 5483 cifs_buf_release(srch_inf->ntwrk_buf_start); 5484 } 5485 srch_inf->ntwrk_buf_start = (char *)rsp; 5486 srch_inf->srch_entries_start = srch_inf->last_entry = 5487 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); 5488 end_of_smb = rsp_iov->iov_len + (char *)rsp; 5489 5490 srch_inf->entries_in_buffer = num_entries( 5491 srch_inf->info_level, 5492 srch_inf->srch_entries_start, 5493 end_of_smb, 5494 &srch_inf->last_entry, 5495 info_buf_size); 5496 5497 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; 5498 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", 5499 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, 5500 srch_inf->srch_entries_start, srch_inf->last_entry); 5501 if (resp_buftype == CIFS_LARGE_BUFFER) 5502 srch_inf->smallBuf = false; 5503 else if (resp_buftype == CIFS_SMALL_BUFFER) 5504 srch_inf->smallBuf = true; 5505 else 5506 cifs_tcon_dbg(VFS, "Invalid search buffer type\n"); 5507 5508 return 0; 5509 } 5510 5511 int 5512 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, 5513 u64 persistent_fid, u64 volatile_fid, int index, 5514 struct cifs_search_info *srch_inf) 5515 { 5516 struct smb_rqst rqst; 5517 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; 5518 struct smb2_query_directory_rsp *rsp = NULL; 5519 int resp_buftype = CIFS_NO_BUFFER; 5520 struct kvec rsp_iov; 5521 int rc = 0; 5522 struct cifs_ses *ses = tcon->ses; 5523 struct TCP_Server_Info *server; 5524 int flags = 0; 5525 int retries = 0, cur_sleep = 1; 5526 5527 replay_again: 5528 /* reinitialize for possible replay */ 5529 flags = 0; 5530 server = cifs_pick_channel(ses); 5531 5532 if (!ses || !(ses->server)) 5533 return -EIO; 5534 5535 if (smb3_encryption_required(tcon)) 5536 flags |= CIFS_TRANSFORM_REQ; 5537 5538 memset(&rqst, 0, sizeof(struct smb_rqst)); 5539 memset(&iov, 0, sizeof(iov)); 5540 rqst.rq_iov = iov; 5541 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; 5542 5543 rc = SMB2_query_directory_init(xid, tcon, server, 5544 &rqst, persistent_fid, 5545 volatile_fid, index, 5546 srch_inf->info_level); 5547 if (rc) 5548 goto qdir_exit; 5549 5550 if (retries) 5551 smb2_set_replay(server, &rqst); 5552 5553 rc = cifs_send_recv(xid, ses, server, 5554 &rqst, &resp_buftype, flags, &rsp_iov); 5555 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; 5556 5557 if (rc) { 5558 if (rc == -ENODATA && 5559 rsp->hdr.Status == STATUS_NO_MORE_FILES) { 5560 trace_smb3_query_dir_done(xid, persistent_fid, 5561 tcon->tid, tcon->ses->Suid, index, 0); 5562 srch_inf->endOfSearch = true; 5563 rc = 0; 5564 } else { 5565 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5566 tcon->ses->Suid, index, 0, rc); 5567 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 5568 } 5569 goto qdir_exit; 5570 } 5571 5572 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype, 5573 srch_inf); 5574 if (rc) { 5575 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5576 tcon->ses->Suid, index, 0, rc); 5577 goto qdir_exit; 5578 } 5579 resp_buftype = CIFS_NO_BUFFER; 5580 5581 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, 5582 tcon->ses->Suid, index, srch_inf->entries_in_buffer); 5583 5584 qdir_exit: 5585 SMB2_query_directory_free(&rqst); 5586 free_rsp_buf(resp_buftype, rsp); 5587 5588 if (is_replayable_error(rc) && 5589 smb2_should_replay(tcon, &retries, &cur_sleep)) 5590 goto replay_again; 5591 5592 return rc; 5593 } 5594 5595 int 5596 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 5597 struct smb_rqst *rqst, 5598 u64 persistent_fid, u64 volatile_fid, u32 pid, 5599 u8 info_class, u8 info_type, u32 additional_info, 5600 void **data, unsigned int *size) 5601 { 5602 struct smb2_set_info_req *req; 5603 struct kvec *iov = rqst->rq_iov; 5604 unsigned int i, total_len; 5605 int rc; 5606 5607 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server, 5608 (void **) &req, &total_len); 5609 if (rc) 5610 return rc; 5611 5612 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 5613 req->InfoType = info_type; 5614 req->FileInfoClass = info_class; 5615 req->PersistentFileId = persistent_fid; 5616 req->VolatileFileId = volatile_fid; 5617 req->AdditionalInformation = cpu_to_le32(additional_info); 5618 5619 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req)); 5620 req->BufferLength = cpu_to_le32(*size); 5621 5622 memcpy(req->Buffer, *data, *size); 5623 total_len += *size; 5624 5625 iov[0].iov_base = (char *)req; 5626 /* 1 for Buffer */ 5627 iov[0].iov_len = total_len - 1; 5628 5629 for (i = 1; i < rqst->rq_nvec; i++) { 5630 le32_add_cpu(&req->BufferLength, size[i]); 5631 iov[i].iov_base = (char *)data[i]; 5632 iov[i].iov_len = size[i]; 5633 } 5634 5635 return 0; 5636 } 5637 5638 void 5639 SMB2_set_info_free(struct smb_rqst *rqst) 5640 { 5641 if (rqst && rqst->rq_iov) 5642 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5643 } 5644 5645 static int 5646 send_set_info(const unsigned int xid, struct cifs_tcon *tcon, 5647 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, 5648 u8 info_type, u32 additional_info, unsigned int num, 5649 void **data, unsigned int *size) 5650 { 5651 struct smb_rqst rqst; 5652 struct smb2_set_info_rsp *rsp = NULL; 5653 struct kvec *iov; 5654 struct kvec rsp_iov; 5655 int rc = 0; 5656 int resp_buftype; 5657 struct cifs_ses *ses = tcon->ses; 5658 struct TCP_Server_Info *server; 5659 int flags = 0; 5660 int retries = 0, cur_sleep = 1; 5661 5662 replay_again: 5663 /* reinitialize for possible replay */ 5664 flags = 0; 5665 server = cifs_pick_channel(ses); 5666 5667 if (!ses || !server) 5668 return -EIO; 5669 5670 if (!num) 5671 return -EINVAL; 5672 5673 if (smb3_encryption_required(tcon)) 5674 flags |= CIFS_TRANSFORM_REQ; 5675 5676 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL); 5677 if (!iov) 5678 return -ENOMEM; 5679 5680 memset(&rqst, 0, sizeof(struct smb_rqst)); 5681 rqst.rq_iov = iov; 5682 rqst.rq_nvec = num; 5683 5684 rc = SMB2_set_info_init(tcon, server, 5685 &rqst, persistent_fid, volatile_fid, pid, 5686 info_class, info_type, additional_info, 5687 data, size); 5688 if (rc) { 5689 kfree(iov); 5690 return rc; 5691 } 5692 5693 if (retries) 5694 smb2_set_replay(server, &rqst); 5695 5696 rc = cifs_send_recv(xid, ses, server, 5697 &rqst, &resp_buftype, flags, 5698 &rsp_iov); 5699 SMB2_set_info_free(&rqst); 5700 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; 5701 5702 if (rc != 0) { 5703 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 5704 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, 5705 ses->Suid, info_class, (__u32)info_type, rc); 5706 } 5707 5708 free_rsp_buf(resp_buftype, rsp); 5709 kfree(iov); 5710 5711 if (is_replayable_error(rc) && 5712 smb2_should_replay(tcon, &retries, &cur_sleep)) 5713 goto replay_again; 5714 5715 return rc; 5716 } 5717 5718 int 5719 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 5720 u64 volatile_fid, u32 pid, loff_t new_eof) 5721 { 5722 struct smb2_file_eof_info info; 5723 void *data; 5724 unsigned int size; 5725 5726 info.EndOfFile = cpu_to_le64(new_eof); 5727 5728 data = &info; 5729 size = sizeof(struct smb2_file_eof_info); 5730 5731 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof); 5732 5733 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5734 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 5735 0, 1, &data, &size); 5736 } 5737 5738 int 5739 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, 5740 u64 persistent_fid, u64 volatile_fid, 5741 struct smb_ntsd *pnntsd, int pacllen, int aclflag) 5742 { 5743 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5744 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 5745 1, (void **)&pnntsd, &pacllen); 5746 } 5747 5748 int 5749 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, 5750 u64 persistent_fid, u64 volatile_fid, 5751 struct smb2_file_full_ea_info *buf, int len) 5752 { 5753 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5754 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 5755 0, 1, (void **)&buf, &len); 5756 } 5757 5758 int 5759 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 5760 const u64 persistent_fid, const u64 volatile_fid, 5761 __u8 oplock_level) 5762 { 5763 struct smb_rqst rqst; 5764 int rc; 5765 struct smb2_oplock_break *req = NULL; 5766 struct cifs_ses *ses = tcon->ses; 5767 struct TCP_Server_Info *server; 5768 int flags = CIFS_OBREAK_OP; 5769 unsigned int total_len; 5770 struct kvec iov[1]; 5771 struct kvec rsp_iov; 5772 int resp_buf_type; 5773 int retries = 0, cur_sleep = 1; 5774 5775 replay_again: 5776 /* reinitialize for possible replay */ 5777 flags = CIFS_OBREAK_OP; 5778 server = cifs_pick_channel(ses); 5779 5780 cifs_dbg(FYI, "SMB2_oplock_break\n"); 5781 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 5782 (void **) &req, &total_len); 5783 if (rc) 5784 return rc; 5785 5786 if (smb3_encryption_required(tcon)) 5787 flags |= CIFS_TRANSFORM_REQ; 5788 5789 req->VolatileFid = volatile_fid; 5790 req->PersistentFid = persistent_fid; 5791 req->OplockLevel = oplock_level; 5792 req->hdr.CreditRequest = cpu_to_le16(1); 5793 5794 flags |= CIFS_NO_RSP_BUF; 5795 5796 iov[0].iov_base = (char *)req; 5797 iov[0].iov_len = total_len; 5798 5799 memset(&rqst, 0, sizeof(struct smb_rqst)); 5800 rqst.rq_iov = iov; 5801 rqst.rq_nvec = 1; 5802 5803 if (retries) 5804 smb2_set_replay(server, &rqst); 5805 5806 rc = cifs_send_recv(xid, ses, server, 5807 &rqst, &resp_buf_type, flags, &rsp_iov); 5808 cifs_small_buf_release(req); 5809 if (rc) { 5810 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 5811 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); 5812 } 5813 5814 if (is_replayable_error(rc) && 5815 smb2_should_replay(tcon, &retries, &cur_sleep)) 5816 goto replay_again; 5817 5818 return rc; 5819 } 5820 5821 void 5822 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, 5823 struct kstatfs *kst) 5824 { 5825 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * 5826 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); 5827 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); 5828 kst->f_bfree = kst->f_bavail = 5829 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); 5830 return; 5831 } 5832 5833 static void 5834 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, 5835 struct kstatfs *kst) 5836 { 5837 kst->f_bsize = le32_to_cpu(response_data->BlockSize); 5838 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); 5839 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); 5840 if (response_data->UserBlocksAvail == cpu_to_le64(-1)) 5841 kst->f_bavail = kst->f_bfree; 5842 else 5843 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); 5844 if (response_data->TotalFileNodes != cpu_to_le64(-1)) 5845 kst->f_files = le64_to_cpu(response_data->TotalFileNodes); 5846 if (response_data->FreeFileNodes != cpu_to_le64(-1)) 5847 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); 5848 5849 return; 5850 } 5851 5852 static int 5853 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, 5854 struct TCP_Server_Info *server, 5855 int level, int outbuf_len, u64 persistent_fid, 5856 u64 volatile_fid) 5857 { 5858 int rc; 5859 struct smb2_query_info_req *req; 5860 unsigned int total_len; 5861 5862 cifs_dbg(FYI, "Query FSInfo level %d\n", level); 5863 5864 if ((tcon->ses == NULL) || server == NULL) 5865 return -EIO; 5866 5867 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 5868 (void **) &req, &total_len); 5869 if (rc) 5870 return rc; 5871 5872 req->InfoType = SMB2_O_INFO_FILESYSTEM; 5873 req->FileInfoClass = level; 5874 req->PersistentFileId = persistent_fid; 5875 req->VolatileFileId = volatile_fid; 5876 /* 1 for pad */ 5877 req->InputBufferOffset = 5878 cpu_to_le16(sizeof(struct smb2_query_info_req)); 5879 req->OutputBufferLength = cpu_to_le32( 5880 outbuf_len + sizeof(struct smb2_query_info_rsp)); 5881 5882 iov->iov_base = (char *)req; 5883 iov->iov_len = total_len; 5884 return 0; 5885 } 5886 5887 static inline void free_qfs_info_req(struct kvec *iov) 5888 { 5889 cifs_buf_release(iov->iov_base); 5890 } 5891 5892 int 5893 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, 5894 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 5895 { 5896 struct smb_rqst rqst; 5897 struct smb2_query_info_rsp *rsp = NULL; 5898 struct kvec iov; 5899 struct kvec rsp_iov; 5900 int rc = 0; 5901 int resp_buftype; 5902 struct cifs_ses *ses = tcon->ses; 5903 struct TCP_Server_Info *server; 5904 FILE_SYSTEM_POSIX_INFO *info = NULL; 5905 int flags = 0; 5906 int retries = 0, cur_sleep = 1; 5907 5908 replay_again: 5909 /* reinitialize for possible replay */ 5910 flags = 0; 5911 server = cifs_pick_channel(ses); 5912 5913 rc = build_qfs_info_req(&iov, tcon, server, 5914 FS_POSIX_INFORMATION, 5915 sizeof(FILE_SYSTEM_POSIX_INFO), 5916 persistent_fid, volatile_fid); 5917 if (rc) 5918 return rc; 5919 5920 if (smb3_encryption_required(tcon)) 5921 flags |= CIFS_TRANSFORM_REQ; 5922 5923 memset(&rqst, 0, sizeof(struct smb_rqst)); 5924 rqst.rq_iov = &iov; 5925 rqst.rq_nvec = 1; 5926 5927 if (retries) 5928 smb2_set_replay(server, &rqst); 5929 5930 rc = cifs_send_recv(xid, ses, server, 5931 &rqst, &resp_buftype, flags, &rsp_iov); 5932 free_qfs_info_req(&iov); 5933 if (rc) { 5934 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 5935 goto posix_qfsinf_exit; 5936 } 5937 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 5938 5939 info = (FILE_SYSTEM_POSIX_INFO *)( 5940 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 5941 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5942 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 5943 sizeof(FILE_SYSTEM_POSIX_INFO)); 5944 if (!rc) 5945 copy_posix_fs_info_to_kstatfs(info, fsdata); 5946 5947 posix_qfsinf_exit: 5948 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 5949 5950 if (is_replayable_error(rc) && 5951 smb2_should_replay(tcon, &retries, &cur_sleep)) 5952 goto replay_again; 5953 5954 return rc; 5955 } 5956 5957 int 5958 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, 5959 u64 persistent_fid, u64 volatile_fid, int level) 5960 { 5961 struct smb_rqst rqst; 5962 struct smb2_query_info_rsp *rsp = NULL; 5963 struct kvec iov; 5964 struct kvec rsp_iov; 5965 int rc = 0; 5966 int resp_buftype, max_len, min_len; 5967 struct cifs_ses *ses = tcon->ses; 5968 struct TCP_Server_Info *server; 5969 unsigned int rsp_len, offset; 5970 int flags = 0; 5971 int retries = 0, cur_sleep = 1; 5972 5973 replay_again: 5974 /* reinitialize for possible replay */ 5975 flags = 0; 5976 server = cifs_pick_channel(ses); 5977 5978 if (level == FS_DEVICE_INFORMATION) { 5979 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5980 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 5981 } else if (level == FS_ATTRIBUTE_INFORMATION) { 5982 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); 5983 min_len = MIN_FS_ATTR_INFO_SIZE; 5984 } else if (level == FS_SECTOR_SIZE_INFORMATION) { 5985 max_len = sizeof(struct smb3_fs_ss_info); 5986 min_len = sizeof(struct smb3_fs_ss_info); 5987 } else if (level == FS_VOLUME_INFORMATION) { 5988 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; 5989 min_len = sizeof(struct smb3_fs_vol_info); 5990 } else { 5991 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); 5992 return -EINVAL; 5993 } 5994 5995 rc = build_qfs_info_req(&iov, tcon, server, 5996 level, max_len, 5997 persistent_fid, volatile_fid); 5998 if (rc) 5999 return rc; 6000 6001 if (smb3_encryption_required(tcon)) 6002 flags |= CIFS_TRANSFORM_REQ; 6003 6004 memset(&rqst, 0, sizeof(struct smb_rqst)); 6005 rqst.rq_iov = &iov; 6006 rqst.rq_nvec = 1; 6007 6008 if (retries) 6009 smb2_set_replay(server, &rqst); 6010 6011 rc = cifs_send_recv(xid, ses, server, 6012 &rqst, &resp_buftype, flags, &rsp_iov); 6013 free_qfs_info_req(&iov); 6014 if (rc) { 6015 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 6016 goto qfsattr_exit; 6017 } 6018 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 6019 6020 rsp_len = le32_to_cpu(rsp->OutputBufferLength); 6021 offset = le16_to_cpu(rsp->OutputBufferOffset); 6022 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); 6023 if (rc) 6024 goto qfsattr_exit; 6025 6026 if (level == FS_ATTRIBUTE_INFORMATION) 6027 memcpy(&tcon->fsAttrInfo, offset 6028 + (char *)rsp, min_t(unsigned int, 6029 rsp_len, max_len)); 6030 else if (level == FS_DEVICE_INFORMATION) 6031 memcpy(&tcon->fsDevInfo, offset 6032 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); 6033 else if (level == FS_SECTOR_SIZE_INFORMATION) { 6034 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) 6035 (offset + (char *)rsp); 6036 tcon->ss_flags = le32_to_cpu(ss_info->Flags); 6037 tcon->perf_sector_size = 6038 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); 6039 } else if (level == FS_VOLUME_INFORMATION) { 6040 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) 6041 (offset + (char *)rsp); 6042 tcon->vol_serial_number = vol_info->VolumeSerialNumber; 6043 tcon->vol_create_time = vol_info->VolumeCreationTime; 6044 } 6045 6046 qfsattr_exit: 6047 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 6048 6049 if (is_replayable_error(rc) && 6050 smb2_should_replay(tcon, &retries, &cur_sleep)) 6051 goto replay_again; 6052 6053 return rc; 6054 } 6055 6056 int 6057 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, 6058 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6059 const __u32 num_lock, struct smb2_lock_element *buf) 6060 { 6061 struct smb_rqst rqst; 6062 int rc = 0; 6063 struct smb2_lock_req *req = NULL; 6064 struct kvec iov[2]; 6065 struct kvec rsp_iov; 6066 int resp_buf_type; 6067 unsigned int count; 6068 int flags = CIFS_NO_RSP_BUF; 6069 unsigned int total_len; 6070 struct TCP_Server_Info *server; 6071 int retries = 0, cur_sleep = 1; 6072 6073 replay_again: 6074 /* reinitialize for possible replay */ 6075 flags = CIFS_NO_RSP_BUF; 6076 server = cifs_pick_channel(tcon->ses); 6077 6078 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); 6079 6080 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server, 6081 (void **) &req, &total_len); 6082 if (rc) 6083 return rc; 6084 6085 if (smb3_encryption_required(tcon)) 6086 flags |= CIFS_TRANSFORM_REQ; 6087 6088 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 6089 req->LockCount = cpu_to_le16(num_lock); 6090 6091 req->PersistentFileId = persist_fid; 6092 req->VolatileFileId = volatile_fid; 6093 6094 count = num_lock * sizeof(struct smb2_lock_element); 6095 6096 iov[0].iov_base = (char *)req; 6097 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); 6098 iov[1].iov_base = (char *)buf; 6099 iov[1].iov_len = count; 6100 6101 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); 6102 6103 memset(&rqst, 0, sizeof(struct smb_rqst)); 6104 rqst.rq_iov = iov; 6105 rqst.rq_nvec = 2; 6106 6107 if (retries) 6108 smb2_set_replay(server, &rqst); 6109 6110 rc = cifs_send_recv(xid, tcon->ses, server, 6111 &rqst, &resp_buf_type, flags, 6112 &rsp_iov); 6113 cifs_small_buf_release(req); 6114 if (rc) { 6115 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); 6116 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); 6117 trace_smb3_lock_err(xid, persist_fid, tcon->tid, 6118 tcon->ses->Suid, rc); 6119 } 6120 6121 if (is_replayable_error(rc) && 6122 smb2_should_replay(tcon, &retries, &cur_sleep)) 6123 goto replay_again; 6124 6125 return rc; 6126 } 6127 6128 int 6129 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, 6130 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6131 const __u64 length, const __u64 offset, const __u32 lock_flags, 6132 const bool wait) 6133 { 6134 struct smb2_lock_element lock; 6135 6136 lock.Offset = cpu_to_le64(offset); 6137 lock.Length = cpu_to_le64(length); 6138 lock.Flags = cpu_to_le32(lock_flags); 6139 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) 6140 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); 6141 6142 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); 6143 } 6144 6145 int 6146 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 6147 __u8 *lease_key, const __le32 lease_state) 6148 { 6149 struct smb_rqst rqst; 6150 int rc; 6151 struct smb2_lease_ack *req = NULL; 6152 struct cifs_ses *ses = tcon->ses; 6153 int flags = CIFS_OBREAK_OP; 6154 unsigned int total_len; 6155 struct kvec iov[1]; 6156 struct kvec rsp_iov; 6157 int resp_buf_type; 6158 __u64 *please_key_high; 6159 __u64 *please_key_low; 6160 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); 6161 6162 cifs_dbg(FYI, "SMB2_lease_break\n"); 6163 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 6164 (void **) &req, &total_len); 6165 if (rc) 6166 return rc; 6167 6168 if (smb3_encryption_required(tcon)) 6169 flags |= CIFS_TRANSFORM_REQ; 6170 6171 req->hdr.CreditRequest = cpu_to_le16(1); 6172 req->StructureSize = cpu_to_le16(36); 6173 total_len += 12; 6174 6175 memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); 6176 req->LeaseState = lease_state; 6177 6178 flags |= CIFS_NO_RSP_BUF; 6179 6180 iov[0].iov_base = (char *)req; 6181 iov[0].iov_len = total_len; 6182 6183 memset(&rqst, 0, sizeof(struct smb_rqst)); 6184 rqst.rq_iov = iov; 6185 rqst.rq_nvec = 1; 6186 6187 rc = cifs_send_recv(xid, ses, server, 6188 &rqst, &resp_buf_type, flags, &rsp_iov); 6189 cifs_small_buf_release(req); 6190 6191 please_key_low = (__u64 *)lease_key; 6192 please_key_high = (__u64 *)(lease_key+8); 6193 if (rc) { 6194 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 6195 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 6196 ses->Suid, *please_key_low, *please_key_high, rc); 6197 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); 6198 } else 6199 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid, 6200 ses->Suid, *please_key_low, *please_key_high); 6201 6202 return rc; 6203 } 6204