1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31 #include "compress.h"
32
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
35
36 static struct mid_q_entry *
alloc_mid(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)37 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
38 {
39 struct mid_q_entry *temp;
40
41 if (server == NULL) {
42 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
43 return NULL;
44 }
45
46 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
47 memset(temp, 0, sizeof(struct mid_q_entry));
48 kref_init(&temp->refcount);
49 spin_lock_init(&temp->mid_lock);
50 temp->mid = get_mid(smb_buffer);
51 temp->pid = current->pid;
52 temp->command = cpu_to_le16(smb_buffer->Command);
53 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
54 /* easier to use jiffies */
55 /* when mid allocated can be before when sent */
56 temp->when_alloc = jiffies;
57 temp->server = server;
58
59 /*
60 * The default is for the mid to be synchronous, so the
61 * default callback just wakes up the current task.
62 */
63 get_task_struct(current);
64 temp->creator = current;
65 temp->callback = cifs_wake_up_task;
66 temp->callback_data = current;
67
68 atomic_inc(&mid_count);
69 temp->mid_state = MID_REQUEST_ALLOCATED;
70 return temp;
71 }
72
73 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)74 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
75 unsigned int smb_buf_length)
76 {
77 struct kvec iov[2];
78 struct smb_rqst rqst = { .rq_iov = iov,
79 .rq_nvec = 2 };
80
81 iov[0].iov_base = smb_buffer;
82 iov[0].iov_len = 4;
83 iov[1].iov_base = (char *)smb_buffer + 4;
84 iov[1].iov_len = smb_buf_length;
85
86 return __smb_send_rqst(server, 1, &rqst);
87 }
88
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)89 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
90 struct mid_q_entry **ppmidQ)
91 {
92 spin_lock(&ses->ses_lock);
93 if (ses->ses_status == SES_NEW) {
94 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
95 (in_buf->Command != SMB_COM_NEGOTIATE)) {
96 spin_unlock(&ses->ses_lock);
97 return -EAGAIN;
98 }
99 /* else ok - we are setting up session */
100 }
101
102 if (ses->ses_status == SES_EXITING) {
103 /* check if SMB session is bad because we are setting it up */
104 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
105 spin_unlock(&ses->ses_lock);
106 return -EAGAIN;
107 }
108 /* else ok - we are shutting down session */
109 }
110 spin_unlock(&ses->ses_lock);
111
112 *ppmidQ = alloc_mid(in_buf, ses->server);
113 if (*ppmidQ == NULL)
114 return -ENOMEM;
115 spin_lock(&ses->server->mid_queue_lock);
116 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
117 spin_unlock(&ses->server->mid_queue_lock);
118 return 0;
119 }
120
121 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)122 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
123 {
124 int rc;
125 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
126 struct mid_q_entry *mid;
127
128 if (rqst->rq_iov[0].iov_len != 4 ||
129 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
130 return ERR_PTR(-EIO);
131
132 /* enable signing if server requires it */
133 if (server->sign)
134 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
135
136 mid = alloc_mid(hdr, server);
137 if (mid == NULL)
138 return ERR_PTR(-ENOMEM);
139
140 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
141 if (rc) {
142 release_mid(mid);
143 return ERR_PTR(rc);
144 }
145
146 return mid;
147 }
148
149 /*
150 *
151 * Send an SMB Request. No response info (other than return code)
152 * needs to be parsed.
153 *
154 * flags indicate the type of request buffer and how long to wait
155 * and whether to log NT STATUS code (error) before mapping it to POSIX error
156 *
157 */
158 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)159 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
160 char *in_buf, int flags)
161 {
162 int rc;
163 struct kvec iov[1];
164 struct kvec rsp_iov;
165 int resp_buf_type;
166
167 iov[0].iov_base = in_buf;
168 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
169 flags |= CIFS_NO_RSP_BUF;
170 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
171 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
172
173 return rc;
174 }
175
176 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)177 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
178 bool log_error)
179 {
180 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
181
182 dump_smb(mid->resp_buf, min_t(u32, 92, len));
183
184 /* convert the length into a more usable form */
185 if (server->sign) {
186 struct kvec iov[2];
187 int rc = 0;
188 struct smb_rqst rqst = { .rq_iov = iov,
189 .rq_nvec = 2 };
190
191 iov[0].iov_base = mid->resp_buf;
192 iov[0].iov_len = 4;
193 iov[1].iov_base = (char *)mid->resp_buf + 4;
194 iov[1].iov_len = len - 4;
195 /* FIXME: add code to kill session */
196 rc = cifs_verify_signature(&rqst, server,
197 mid->sequence_number);
198 if (rc)
199 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
200 rc);
201 }
202
203 /* BB special case reconnect tid and uid here? */
204 return map_and_check_smb_error(mid, log_error);
205 }
206
207 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)208 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
209 struct smb_rqst *rqst)
210 {
211 int rc;
212 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
213 struct mid_q_entry *mid;
214
215 if (rqst->rq_iov[0].iov_len != 4 ||
216 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
217 return ERR_PTR(-EIO);
218
219 rc = allocate_mid(ses, hdr, &mid);
220 if (rc)
221 return ERR_PTR(rc);
222 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
223 if (rc) {
224 delete_mid(mid);
225 return ERR_PTR(rc);
226 }
227 return mid;
228 }
229
230 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)231 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
232 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
233 const int flags, struct kvec *resp_iov)
234 {
235 struct smb_rqst rqst;
236 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
237 int rc;
238
239 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
240 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
241 GFP_KERNEL);
242 if (!new_iov) {
243 /* otherwise cifs_send_recv below sets resp_buf_type */
244 *resp_buf_type = CIFS_NO_BUFFER;
245 return -ENOMEM;
246 }
247 } else
248 new_iov = s_iov;
249
250 /* 1st iov is a RFC1001 length followed by the rest of the packet */
251 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
252
253 new_iov[0].iov_base = new_iov[1].iov_base;
254 new_iov[0].iov_len = 4;
255 new_iov[1].iov_base += 4;
256 new_iov[1].iov_len -= 4;
257
258 memset(&rqst, 0, sizeof(struct smb_rqst));
259 rqst.rq_iov = new_iov;
260 rqst.rq_nvec = n_vec + 1;
261
262 rc = cifs_send_recv(xid, ses, ses->server,
263 &rqst, resp_buf_type, flags, resp_iov);
264 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
265 kfree(new_iov);
266 return rc;
267 }
268
269 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)270 SendReceive(const unsigned int xid, struct cifs_ses *ses,
271 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
272 int *pbytes_returned, const int flags)
273 {
274 int rc = 0;
275 struct mid_q_entry *midQ;
276 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
277 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
278 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
279 struct cifs_credits credits = { .value = 1, .instance = 0 };
280 struct TCP_Server_Info *server;
281
282 if (ses == NULL) {
283 cifs_dbg(VFS, "Null smb session\n");
284 return -EIO;
285 }
286 server = ses->server;
287 if (server == NULL) {
288 cifs_dbg(VFS, "Null tcp session\n");
289 return -EIO;
290 }
291
292 spin_lock(&server->srv_lock);
293 if (server->tcpStatus == CifsExiting) {
294 spin_unlock(&server->srv_lock);
295 return -ENOENT;
296 }
297 spin_unlock(&server->srv_lock);
298
299 /* Ensure that we do not send more than 50 overlapping requests
300 to the same server. We may make this configurable later or
301 use ses->maxReq */
302
303 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
304 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
305 len);
306 return -EIO;
307 }
308
309 rc = wait_for_free_request(server, flags, &credits.instance);
310 if (rc)
311 return rc;
312
313 /* make sure that we sign in the same order that we send on this socket
314 and avoid races inside tcp sendmsg code that could cause corruption
315 of smb data */
316
317 cifs_server_lock(server);
318
319 rc = allocate_mid(ses, in_buf, &midQ);
320 if (rc) {
321 cifs_server_unlock(server);
322 /* Update # of requests on wire to server */
323 add_credits(server, &credits, 0);
324 return rc;
325 }
326
327 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
328 if (rc) {
329 cifs_server_unlock(server);
330 goto out;
331 }
332
333 midQ->mid_state = MID_REQUEST_SUBMITTED;
334
335 rc = smb_send(server, in_buf, len);
336 cifs_save_when_sent(midQ);
337
338 if (rc < 0)
339 server->sequence_number -= 2;
340
341 cifs_server_unlock(server);
342
343 if (rc < 0)
344 goto out;
345
346 rc = wait_for_response(server, midQ);
347 if (rc != 0) {
348 send_cancel(server, &rqst, midQ);
349 spin_lock(&midQ->mid_lock);
350 if (midQ->callback) {
351 /* no longer considered to be "in-flight" */
352 midQ->callback = release_mid;
353 spin_unlock(&midQ->mid_lock);
354 add_credits(server, &credits, 0);
355 return rc;
356 }
357 spin_unlock(&midQ->mid_lock);
358 }
359
360 rc = cifs_sync_mid_result(midQ, server);
361 if (rc != 0) {
362 add_credits(server, &credits, 0);
363 return rc;
364 }
365
366 if (!midQ->resp_buf || !out_buf ||
367 midQ->mid_state != MID_RESPONSE_READY) {
368 rc = -EIO;
369 cifs_server_dbg(VFS, "Bad MID state?\n");
370 goto out;
371 }
372
373 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
374 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
375 rc = cifs_check_receive(midQ, server, 0);
376 out:
377 delete_mid(midQ);
378 add_credits(server, &credits, 0);
379
380 return rc;
381 }
382
383 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
384 blocking lock to return. */
385
386 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)387 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
388 struct smb_hdr *in_buf,
389 struct smb_hdr *out_buf)
390 {
391 int bytes_returned;
392 struct cifs_ses *ses = tcon->ses;
393 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
394
395 /* We just modify the current in_buf to change
396 the type of lock from LOCKING_ANDX_SHARED_LOCK
397 or LOCKING_ANDX_EXCLUSIVE_LOCK to
398 LOCKING_ANDX_CANCEL_LOCK. */
399
400 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
401 pSMB->Timeout = 0;
402 pSMB->hdr.Mid = get_next_mid(ses->server);
403
404 return SendReceive(xid, ses, in_buf, out_buf,
405 &bytes_returned, 0);
406 }
407
408 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)409 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
410 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
411 int *pbytes_returned)
412 {
413 int rc = 0;
414 int rstart = 0;
415 struct mid_q_entry *midQ;
416 struct cifs_ses *ses;
417 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
418 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
419 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
420 unsigned int instance;
421 struct TCP_Server_Info *server;
422
423 if (tcon == NULL || tcon->ses == NULL) {
424 cifs_dbg(VFS, "Null smb session\n");
425 return -EIO;
426 }
427 ses = tcon->ses;
428 server = ses->server;
429
430 if (server == NULL) {
431 cifs_dbg(VFS, "Null tcp session\n");
432 return -EIO;
433 }
434
435 spin_lock(&server->srv_lock);
436 if (server->tcpStatus == CifsExiting) {
437 spin_unlock(&server->srv_lock);
438 return -ENOENT;
439 }
440 spin_unlock(&server->srv_lock);
441
442 /* Ensure that we do not send more than 50 overlapping requests
443 to the same server. We may make this configurable later or
444 use ses->maxReq */
445
446 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
447 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
448 len);
449 return -EIO;
450 }
451
452 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
453 if (rc)
454 return rc;
455
456 /* make sure that we sign in the same order that we send on this socket
457 and avoid races inside tcp sendmsg code that could cause corruption
458 of smb data */
459
460 cifs_server_lock(server);
461
462 rc = allocate_mid(ses, in_buf, &midQ);
463 if (rc) {
464 cifs_server_unlock(server);
465 return rc;
466 }
467
468 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
469 if (rc) {
470 delete_mid(midQ);
471 cifs_server_unlock(server);
472 return rc;
473 }
474
475 midQ->mid_state = MID_REQUEST_SUBMITTED;
476 rc = smb_send(server, in_buf, len);
477 cifs_save_when_sent(midQ);
478
479 if (rc < 0)
480 server->sequence_number -= 2;
481
482 cifs_server_unlock(server);
483
484 if (rc < 0) {
485 delete_mid(midQ);
486 return rc;
487 }
488
489 /* Wait for a reply - allow signals to interrupt. */
490 rc = wait_event_interruptible(server->response_q,
491 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
492 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
493 ((server->tcpStatus != CifsGood) &&
494 (server->tcpStatus != CifsNew)));
495
496 /* Were we interrupted by a signal ? */
497 spin_lock(&server->srv_lock);
498 if ((rc == -ERESTARTSYS) &&
499 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
500 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
501 ((server->tcpStatus == CifsGood) ||
502 (server->tcpStatus == CifsNew))) {
503 spin_unlock(&server->srv_lock);
504
505 if (in_buf->Command == SMB_COM_TRANSACTION2) {
506 /* POSIX lock. We send a NT_CANCEL SMB to cause the
507 blocking lock to return. */
508 rc = send_cancel(server, &rqst, midQ);
509 if (rc) {
510 delete_mid(midQ);
511 return rc;
512 }
513 } else {
514 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
515 to cause the blocking lock to return. */
516
517 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
518
519 /* If we get -ENOLCK back the lock may have
520 already been removed. Don't exit in this case. */
521 if (rc && rc != -ENOLCK) {
522 delete_mid(midQ);
523 return rc;
524 }
525 }
526
527 rc = wait_for_response(server, midQ);
528 if (rc) {
529 send_cancel(server, &rqst, midQ);
530 spin_lock(&midQ->mid_lock);
531 if (midQ->callback) {
532 /* no longer considered to be "in-flight" */
533 midQ->callback = release_mid;
534 spin_unlock(&midQ->mid_lock);
535 return rc;
536 }
537 spin_unlock(&midQ->mid_lock);
538 }
539
540 /* We got the response - restart system call. */
541 rstart = 1;
542 spin_lock(&server->srv_lock);
543 }
544 spin_unlock(&server->srv_lock);
545
546 rc = cifs_sync_mid_result(midQ, server);
547 if (rc != 0)
548 return rc;
549
550 /* rcvd frame is ok */
551 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
552 rc = -EIO;
553 cifs_tcon_dbg(VFS, "Bad MID state?\n");
554 goto out;
555 }
556
557 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
558 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
559 rc = cifs_check_receive(midQ, server, 0);
560 out:
561 delete_mid(midQ);
562 if (rstart && rc == -EACCES)
563 return -ERESTARTSYS;
564 return rc;
565 }
566