1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * Jeremy Allison (jra@samba.org) 2006. 7 * 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/list.h> 12 #include <linux/gfp.h> 13 #include <linux/wait.h> 14 #include <linux/net.h> 15 #include <linux/delay.h> 16 #include <linux/freezer.h> 17 #include <linux/tcp.h> 18 #include <linux/bvec.h> 19 #include <linux/highmem.h> 20 #include <linux/uaccess.h> 21 #include <linux/processor.h> 22 #include <linux/mempool.h> 23 #include <linux/sched/signal.h> 24 #include <linux/task_io_accounting_ops.h> 25 #include "cifspdu.h" 26 #include "cifsglob.h" 27 #include "cifsproto.h" 28 #include "cifs_debug.h" 29 #include "smb2proto.h" 30 #include "smbdirect.h" 31 #include "compress.h" 32 33 /* Max number of iovectors we can use off the stack when sending requests. */ 34 #define CIFS_MAX_IOV_SIZE 8 35 36 static struct mid_q_entry * 37 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 38 { 39 struct mid_q_entry *temp; 40 41 if (server == NULL) { 42 cifs_dbg(VFS, "%s: null TCP session\n", __func__); 43 return NULL; 44 } 45 46 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 47 memset(temp, 0, sizeof(struct mid_q_entry)); 48 kref_init(&temp->refcount); 49 temp->mid = get_mid(smb_buffer); 50 temp->pid = current->pid; 51 temp->command = cpu_to_le16(smb_buffer->Command); 52 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command); 53 /* easier to use jiffies */ 54 /* when mid allocated can be before when sent */ 55 temp->when_alloc = jiffies; 56 temp->server = server; 57 58 /* 59 * The default is for the mid to be synchronous, so the 60 * default callback just wakes up the current task. 61 */ 62 get_task_struct(current); 63 temp->creator = current; 64 temp->callback = cifs_wake_up_task; 65 temp->callback_data = current; 66 67 atomic_inc(&mid_count); 68 temp->mid_state = MID_REQUEST_ALLOCATED; 69 return temp; 70 } 71 72 int 73 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, 74 unsigned int smb_buf_length) 75 { 76 struct kvec iov[2]; 77 struct smb_rqst rqst = { .rq_iov = iov, 78 .rq_nvec = 2 }; 79 80 iov[0].iov_base = smb_buffer; 81 iov[0].iov_len = 4; 82 iov[1].iov_base = (char *)smb_buffer + 4; 83 iov[1].iov_len = smb_buf_length; 84 85 return __smb_send_rqst(server, 1, &rqst); 86 } 87 88 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 89 struct mid_q_entry **ppmidQ) 90 { 91 spin_lock(&ses->ses_lock); 92 if (ses->ses_status == SES_NEW) { 93 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 94 (in_buf->Command != SMB_COM_NEGOTIATE)) { 95 spin_unlock(&ses->ses_lock); 96 return -EAGAIN; 97 } 98 /* else ok - we are setting up session */ 99 } 100 101 if (ses->ses_status == SES_EXITING) { 102 /* check if SMB session is bad because we are setting it up */ 103 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { 104 spin_unlock(&ses->ses_lock); 105 return -EAGAIN; 106 } 107 /* else ok - we are shutting down session */ 108 } 109 spin_unlock(&ses->ses_lock); 110 111 *ppmidQ = alloc_mid(in_buf, ses->server); 112 if (*ppmidQ == NULL) 113 return -ENOMEM; 114 spin_lock(&ses->server->mid_queue_lock); 115 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); 116 spin_unlock(&ses->server->mid_queue_lock); 117 return 0; 118 } 119 120 struct mid_q_entry * 121 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) 122 { 123 int rc; 124 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 125 struct mid_q_entry *mid; 126 127 if (rqst->rq_iov[0].iov_len != 4 || 128 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 129 return ERR_PTR(-EIO); 130 131 /* enable signing if server requires it */ 132 if (server->sign) 133 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 134 135 mid = alloc_mid(hdr, server); 136 if (mid == NULL) 137 return ERR_PTR(-ENOMEM); 138 139 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); 140 if (rc) { 141 release_mid(mid); 142 return ERR_PTR(rc); 143 } 144 145 return mid; 146 } 147 148 /* 149 * 150 * Send an SMB Request. No response info (other than return code) 151 * needs to be parsed. 152 * 153 * flags indicate the type of request buffer and how long to wait 154 * and whether to log NT STATUS code (error) before mapping it to POSIX error 155 * 156 */ 157 int 158 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 159 char *in_buf, int flags) 160 { 161 int rc; 162 struct kvec iov[1]; 163 struct kvec rsp_iov; 164 int resp_buf_type; 165 166 iov[0].iov_base = in_buf; 167 iov[0].iov_len = get_rfc1002_length(in_buf) + 4; 168 flags |= CIFS_NO_RSP_BUF; 169 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); 170 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); 171 172 return rc; 173 } 174 175 int 176 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 177 bool log_error) 178 { 179 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; 180 181 dump_smb(mid->resp_buf, min_t(u32, 92, len)); 182 183 /* convert the length into a more usable form */ 184 if (server->sign) { 185 struct kvec iov[2]; 186 int rc = 0; 187 struct smb_rqst rqst = { .rq_iov = iov, 188 .rq_nvec = 2 }; 189 190 iov[0].iov_base = mid->resp_buf; 191 iov[0].iov_len = 4; 192 iov[1].iov_base = (char *)mid->resp_buf + 4; 193 iov[1].iov_len = len - 4; 194 /* FIXME: add code to kill session */ 195 rc = cifs_verify_signature(&rqst, server, 196 mid->sequence_number); 197 if (rc) 198 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n", 199 rc); 200 } 201 202 /* BB special case reconnect tid and uid here? */ 203 return map_and_check_smb_error(mid, log_error); 204 } 205 206 struct mid_q_entry * 207 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, 208 struct smb_rqst *rqst) 209 { 210 int rc; 211 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 212 struct mid_q_entry *mid; 213 214 if (rqst->rq_iov[0].iov_len != 4 || 215 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 216 return ERR_PTR(-EIO); 217 218 rc = allocate_mid(ses, hdr, &mid); 219 if (rc) 220 return ERR_PTR(rc); 221 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); 222 if (rc) { 223 delete_mid(mid); 224 return ERR_PTR(rc); 225 } 226 return mid; 227 } 228 229 int 230 SendReceive2(const unsigned int xid, struct cifs_ses *ses, 231 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, 232 const int flags, struct kvec *resp_iov) 233 { 234 struct smb_rqst rqst; 235 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; 236 int rc; 237 238 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 239 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec), 240 GFP_KERNEL); 241 if (!new_iov) { 242 /* otherwise cifs_send_recv below sets resp_buf_type */ 243 *resp_buf_type = CIFS_NO_BUFFER; 244 return -ENOMEM; 245 } 246 } else 247 new_iov = s_iov; 248 249 /* 1st iov is a RFC1001 length followed by the rest of the packet */ 250 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); 251 252 new_iov[0].iov_base = new_iov[1].iov_base; 253 new_iov[0].iov_len = 4; 254 new_iov[1].iov_base += 4; 255 new_iov[1].iov_len -= 4; 256 257 memset(&rqst, 0, sizeof(struct smb_rqst)); 258 rqst.rq_iov = new_iov; 259 rqst.rq_nvec = n_vec + 1; 260 261 rc = cifs_send_recv(xid, ses, ses->server, 262 &rqst, resp_buf_type, flags, resp_iov); 263 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) 264 kfree(new_iov); 265 return rc; 266 } 267 268 int 269 SendReceive(const unsigned int xid, struct cifs_ses *ses, 270 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 271 int *pbytes_returned, const int flags) 272 { 273 int rc = 0; 274 struct mid_q_entry *midQ; 275 unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 276 struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 277 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 278 struct cifs_credits credits = { .value = 1, .instance = 0 }; 279 struct TCP_Server_Info *server; 280 281 if (ses == NULL) { 282 cifs_dbg(VFS, "Null smb session\n"); 283 return -EIO; 284 } 285 server = ses->server; 286 if (server == NULL) { 287 cifs_dbg(VFS, "Null tcp session\n"); 288 return -EIO; 289 } 290 291 spin_lock(&server->srv_lock); 292 if (server->tcpStatus == CifsExiting) { 293 spin_unlock(&server->srv_lock); 294 return -ENOENT; 295 } 296 spin_unlock(&server->srv_lock); 297 298 /* Ensure that we do not send more than 50 overlapping requests 299 to the same server. We may make this configurable later or 300 use ses->maxReq */ 301 302 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 303 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 304 len); 305 return -EIO; 306 } 307 308 rc = wait_for_free_request(server, flags, &credits.instance); 309 if (rc) 310 return rc; 311 312 /* make sure that we sign in the same order that we send on this socket 313 and avoid races inside tcp sendmsg code that could cause corruption 314 of smb data */ 315 316 cifs_server_lock(server); 317 318 rc = allocate_mid(ses, in_buf, &midQ); 319 if (rc) { 320 cifs_server_unlock(server); 321 /* Update # of requests on wire to server */ 322 add_credits(server, &credits, 0); 323 return rc; 324 } 325 326 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 327 if (rc) { 328 cifs_server_unlock(server); 329 goto out; 330 } 331 332 midQ->mid_state = MID_REQUEST_SUBMITTED; 333 334 rc = smb_send(server, in_buf, len); 335 cifs_save_when_sent(midQ); 336 337 if (rc < 0) 338 server->sequence_number -= 2; 339 340 cifs_server_unlock(server); 341 342 if (rc < 0) 343 goto out; 344 345 rc = wait_for_response(server, midQ); 346 if (rc != 0) { 347 send_cancel(server, &rqst, midQ); 348 spin_lock(&server->mid_queue_lock); 349 if (midQ->mid_state == MID_REQUEST_SUBMITTED || 350 midQ->mid_state == MID_RESPONSE_RECEIVED) { 351 /* no longer considered to be "in-flight" */ 352 midQ->callback = release_mid; 353 spin_unlock(&server->mid_queue_lock); 354 add_credits(server, &credits, 0); 355 return rc; 356 } 357 spin_unlock(&server->mid_queue_lock); 358 } 359 360 rc = cifs_sync_mid_result(midQ, server); 361 if (rc != 0) { 362 add_credits(server, &credits, 0); 363 return rc; 364 } 365 366 if (!midQ->resp_buf || !out_buf || 367 midQ->mid_state != MID_RESPONSE_READY) { 368 rc = -EIO; 369 cifs_server_dbg(VFS, "Bad MID state?\n"); 370 goto out; 371 } 372 373 *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 374 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 375 rc = cifs_check_receive(midQ, server, 0); 376 out: 377 delete_mid(midQ); 378 add_credits(server, &credits, 0); 379 380 return rc; 381 } 382 383 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows 384 blocking lock to return. */ 385 386 static int 387 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, 388 struct smb_hdr *in_buf, 389 struct smb_hdr *out_buf) 390 { 391 int bytes_returned; 392 struct cifs_ses *ses = tcon->ses; 393 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; 394 395 /* We just modify the current in_buf to change 396 the type of lock from LOCKING_ANDX_SHARED_LOCK 397 or LOCKING_ANDX_EXCLUSIVE_LOCK to 398 LOCKING_ANDX_CANCEL_LOCK. */ 399 400 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; 401 pSMB->Timeout = 0; 402 pSMB->hdr.Mid = get_next_mid(ses->server); 403 404 return SendReceive(xid, ses, in_buf, out_buf, 405 &bytes_returned, 0); 406 } 407 408 int 409 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, 410 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 411 int *pbytes_returned) 412 { 413 int rc = 0; 414 int rstart = 0; 415 struct mid_q_entry *midQ; 416 struct cifs_ses *ses; 417 unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 418 struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 419 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 420 unsigned int instance; 421 struct TCP_Server_Info *server; 422 423 if (tcon == NULL || tcon->ses == NULL) { 424 cifs_dbg(VFS, "Null smb session\n"); 425 return -EIO; 426 } 427 ses = tcon->ses; 428 server = ses->server; 429 430 if (server == NULL) { 431 cifs_dbg(VFS, "Null tcp session\n"); 432 return -EIO; 433 } 434 435 spin_lock(&server->srv_lock); 436 if (server->tcpStatus == CifsExiting) { 437 spin_unlock(&server->srv_lock); 438 return -ENOENT; 439 } 440 spin_unlock(&server->srv_lock); 441 442 /* Ensure that we do not send more than 50 overlapping requests 443 to the same server. We may make this configurable later or 444 use ses->maxReq */ 445 446 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 447 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 448 len); 449 return -EIO; 450 } 451 452 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance); 453 if (rc) 454 return rc; 455 456 /* make sure that we sign in the same order that we send on this socket 457 and avoid races inside tcp sendmsg code that could cause corruption 458 of smb data */ 459 460 cifs_server_lock(server); 461 462 rc = allocate_mid(ses, in_buf, &midQ); 463 if (rc) { 464 cifs_server_unlock(server); 465 return rc; 466 } 467 468 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 469 if (rc) { 470 delete_mid(midQ); 471 cifs_server_unlock(server); 472 return rc; 473 } 474 475 midQ->mid_state = MID_REQUEST_SUBMITTED; 476 rc = smb_send(server, in_buf, len); 477 cifs_save_when_sent(midQ); 478 479 if (rc < 0) 480 server->sequence_number -= 2; 481 482 cifs_server_unlock(server); 483 484 if (rc < 0) { 485 delete_mid(midQ); 486 return rc; 487 } 488 489 /* Wait for a reply - allow signals to interrupt. */ 490 rc = wait_event_interruptible(server->response_q, 491 (!(midQ->mid_state == MID_REQUEST_SUBMITTED || 492 midQ->mid_state == MID_RESPONSE_RECEIVED)) || 493 ((server->tcpStatus != CifsGood) && 494 (server->tcpStatus != CifsNew))); 495 496 /* Were we interrupted by a signal ? */ 497 spin_lock(&server->srv_lock); 498 if ((rc == -ERESTARTSYS) && 499 (midQ->mid_state == MID_REQUEST_SUBMITTED || 500 midQ->mid_state == MID_RESPONSE_RECEIVED) && 501 ((server->tcpStatus == CifsGood) || 502 (server->tcpStatus == CifsNew))) { 503 spin_unlock(&server->srv_lock); 504 505 if (in_buf->Command == SMB_COM_TRANSACTION2) { 506 /* POSIX lock. We send a NT_CANCEL SMB to cause the 507 blocking lock to return. */ 508 rc = send_cancel(server, &rqst, midQ); 509 if (rc) { 510 delete_mid(midQ); 511 return rc; 512 } 513 } else { 514 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK 515 to cause the blocking lock to return. */ 516 517 rc = send_lock_cancel(xid, tcon, in_buf, out_buf); 518 519 /* If we get -ENOLCK back the lock may have 520 already been removed. Don't exit in this case. */ 521 if (rc && rc != -ENOLCK) { 522 delete_mid(midQ); 523 return rc; 524 } 525 } 526 527 rc = wait_for_response(server, midQ); 528 if (rc) { 529 send_cancel(server, &rqst, midQ); 530 spin_lock(&server->mid_queue_lock); 531 if (midQ->mid_state == MID_REQUEST_SUBMITTED || 532 midQ->mid_state == MID_RESPONSE_RECEIVED) { 533 /* no longer considered to be "in-flight" */ 534 midQ->callback = release_mid; 535 spin_unlock(&server->mid_queue_lock); 536 return rc; 537 } 538 spin_unlock(&server->mid_queue_lock); 539 } 540 541 /* We got the response - restart system call. */ 542 rstart = 1; 543 spin_lock(&server->srv_lock); 544 } 545 spin_unlock(&server->srv_lock); 546 547 rc = cifs_sync_mid_result(midQ, server); 548 if (rc != 0) 549 return rc; 550 551 /* rcvd frame is ok */ 552 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) { 553 rc = -EIO; 554 cifs_tcon_dbg(VFS, "Bad MID state?\n"); 555 goto out; 556 } 557 558 *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 559 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 560 rc = cifs_check_receive(midQ, server, 0); 561 out: 562 delete_mid(midQ); 563 if (rstart && rc == -EACCES) 564 return -ERESTARTSYS; 565 return rc; 566 } 567