1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31 #include "compress.h"
32
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
35
36 void
cifs_wake_up_task(struct mid_q_entry * mid)37 cifs_wake_up_task(struct mid_q_entry *mid)
38 {
39 if (mid->mid_state == MID_RESPONSE_RECEIVED)
40 mid->mid_state = MID_RESPONSE_READY;
41 wake_up_process(mid->callback_data);
42 }
43
44 static struct mid_q_entry *
alloc_mid(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)45 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
46 {
47 struct mid_q_entry *temp;
48
49 if (server == NULL) {
50 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
51 return NULL;
52 }
53
54 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
55 memset(temp, 0, sizeof(struct mid_q_entry));
56 kref_init(&temp->refcount);
57 temp->mid = get_mid(smb_buffer);
58 temp->pid = current->pid;
59 temp->command = cpu_to_le16(smb_buffer->Command);
60 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
61 /* easier to use jiffies */
62 /* when mid allocated can be before when sent */
63 temp->when_alloc = jiffies;
64 temp->server = server;
65
66 /*
67 * The default is for the mid to be synchronous, so the
68 * default callback just wakes up the current task.
69 */
70 get_task_struct(current);
71 temp->creator = current;
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
74
75 atomic_inc(&mid_count);
76 temp->mid_state = MID_REQUEST_ALLOCATED;
77 return temp;
78 }
79
__release_mid(struct kref * refcount)80 void __release_mid(struct kref *refcount)
81 {
82 struct mid_q_entry *midEntry =
83 container_of(refcount, struct mid_q_entry, refcount);
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command = midEntry->server->vals->lock_cmd;
86 __u16 smb_cmd = le16_to_cpu(midEntry->command);
87 unsigned long now;
88 unsigned long roundtrip_time;
89 #endif
90 struct TCP_Server_Info *server = midEntry->server;
91
92 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
93 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
94 midEntry->mid_state == MID_RESPONSE_READY) &&
95 server->ops->handle_cancelled_mid)
96 server->ops->handle_cancelled_mid(midEntry, server);
97
98 midEntry->mid_state = MID_FREE;
99 atomic_dec(&mid_count);
100 if (midEntry->large_buf)
101 cifs_buf_release(midEntry->resp_buf);
102 else
103 cifs_small_buf_release(midEntry->resp_buf);
104 #ifdef CONFIG_CIFS_STATS2
105 now = jiffies;
106 if (now < midEntry->when_alloc)
107 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
108 roundtrip_time = now - midEntry->when_alloc;
109
110 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
111 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 } else {
115 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
116 server->slowest_cmd[smb_cmd] = roundtrip_time;
117 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
118 server->fastest_cmd[smb_cmd] = roundtrip_time;
119 }
120 cifs_stats_inc(&server->num_cmds[smb_cmd]);
121 server->time_per_cmd[smb_cmd] += roundtrip_time;
122 }
123 /*
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
131 * checks
132 */
133 if ((slow_rsp_threshold != 0) &&
134 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
135 (midEntry->command != command)) {
136 /*
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
139 */
140 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
141 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
142
143 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
144 midEntry->when_sent, midEntry->when_received);
145 if (cifsFYI & CIFS_TIMER) {
146 pr_debug("slow rsp: cmd %d mid %llu",
147 midEntry->command, midEntry->mid);
148 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now - midEntry->when_alloc,
150 now - midEntry->when_sent,
151 now - midEntry->when_received);
152 }
153 }
154 #endif
155 put_task_struct(midEntry->creator);
156
157 mempool_free(midEntry, cifs_mid_poolp);
158 }
159
160 void
delete_mid(struct mid_q_entry * mid)161 delete_mid(struct mid_q_entry *mid)
162 {
163 spin_lock(&mid->server->mid_lock);
164 if (!(mid->mid_flags & MID_DELETED)) {
165 list_del_init(&mid->qhead);
166 mid->mid_flags |= MID_DELETED;
167 }
168 spin_unlock(&mid->server->mid_lock);
169
170 release_mid(mid);
171 }
172
173 /*
174 * smb_send_kvec - send an array of kvecs to the server
175 * @server: Server to send the data to
176 * @smb_msg: Message to send
177 * @sent: amount of data sent on socket is stored here
178 *
179 * Our basic "send data to server" function. Should be called with srv_mutex
180 * held. The caller is responsible for handling the results.
181 */
182 int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)183 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
184 size_t *sent)
185 {
186 int rc = 0;
187 int retries = 0;
188 struct socket *ssocket = server->ssocket;
189
190 *sent = 0;
191
192 if (server->noblocksnd)
193 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
194 else
195 smb_msg->msg_flags = MSG_NOSIGNAL;
196
197 while (msg_data_left(smb_msg)) {
198 /*
199 * If blocking send, we try 3 times, since each can block
200 * for 5 seconds. For nonblocking we have to try more
201 * but wait increasing amounts of time allowing time for
202 * socket to clear. The overall time we wait in either
203 * case to send on the socket is about 15 seconds.
204 * Similarly we wait for 15 seconds for a response from
205 * the server in SendReceive[2] for the server to send
206 * a response back for most types of requests (except
207 * SMB Write past end of file which can be slow, and
208 * blocking lock operations). NFS waits slightly longer
209 * than CIFS, but this can make it take longer for
210 * nonresponsive servers to be detected and 15 seconds
211 * is more than enough time for modern networks to
212 * send a packet. In most cases if we fail to send
213 * after the retries we will kill the socket and
214 * reconnect which may clear the network problem.
215 */
216 rc = sock_sendmsg(ssocket, smb_msg);
217 if (rc == -EAGAIN) {
218 retries++;
219 if (retries >= 14 ||
220 (!server->noblocksnd && (retries > 2))) {
221 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
222 ssocket);
223 return -EAGAIN;
224 }
225 msleep(1 << retries);
226 continue;
227 }
228
229 if (rc < 0)
230 return rc;
231
232 if (rc == 0) {
233 /* should never happen, letting socket clear before
234 retrying is our only obvious option here */
235 cifs_server_dbg(VFS, "tcp sent no data\n");
236 msleep(500);
237 continue;
238 }
239
240 /* send was at least partially successful */
241 *sent += rc;
242 retries = 0; /* in case we get ENOSPC on the next send */
243 }
244 return 0;
245 }
246
247 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)248 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
249 {
250 unsigned int i;
251 struct kvec *iov;
252 int nvec;
253 unsigned long buflen = 0;
254
255 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
256 rqst->rq_iov[0].iov_len == 4) {
257 iov = &rqst->rq_iov[1];
258 nvec = rqst->rq_nvec - 1;
259 } else {
260 iov = rqst->rq_iov;
261 nvec = rqst->rq_nvec;
262 }
263
264 /* total up iov array first */
265 for (i = 0; i < nvec; i++)
266 buflen += iov[i].iov_len;
267
268 buflen += iov_iter_count(&rqst->rq_iter);
269 return buflen;
270 }
271
272 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)273 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
274 struct smb_rqst *rqst)
275 {
276 int rc;
277 struct kvec *iov;
278 int n_vec;
279 unsigned int send_length = 0;
280 unsigned int i, j;
281 sigset_t mask, oldmask;
282 size_t total_len = 0, sent, size;
283 struct socket *ssocket = server->ssocket;
284 struct msghdr smb_msg = {};
285 __be32 rfc1002_marker;
286
287 cifs_in_send_inc(server);
288 if (cifs_rdma_enabled(server)) {
289 /* return -EAGAIN when connecting or reconnecting */
290 rc = -EAGAIN;
291 if (server->smbd_conn)
292 rc = smbd_send(server, num_rqst, rqst);
293 goto smbd_done;
294 }
295
296 rc = -EAGAIN;
297 if (ssocket == NULL)
298 goto out;
299
300 rc = -ERESTARTSYS;
301 if (fatal_signal_pending(current)) {
302 cifs_dbg(FYI, "signal pending before send request\n");
303 goto out;
304 }
305
306 rc = 0;
307 /* cork the socket */
308 tcp_sock_set_cork(ssocket->sk, true);
309
310 for (j = 0; j < num_rqst; j++)
311 send_length += smb_rqst_len(server, &rqst[j]);
312 rfc1002_marker = cpu_to_be32(send_length);
313
314 /*
315 * We should not allow signals to interrupt the network send because
316 * any partial send will cause session reconnects thus increasing
317 * latency of system calls and overload a server with unnecessary
318 * requests.
319 */
320
321 sigfillset(&mask);
322 sigprocmask(SIG_BLOCK, &mask, &oldmask);
323
324 /* Generate a rfc1002 marker for SMB2+ */
325 if (!is_smb1(server)) {
326 struct kvec hiov = {
327 .iov_base = &rfc1002_marker,
328 .iov_len = 4
329 };
330 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
331 rc = smb_send_kvec(server, &smb_msg, &sent);
332 if (rc < 0)
333 goto unmask;
334
335 total_len += sent;
336 send_length += 4;
337 }
338
339 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
340
341 for (j = 0; j < num_rqst; j++) {
342 iov = rqst[j].rq_iov;
343 n_vec = rqst[j].rq_nvec;
344
345 size = 0;
346 for (i = 0; i < n_vec; i++) {
347 dump_smb(iov[i].iov_base, iov[i].iov_len);
348 size += iov[i].iov_len;
349 }
350
351 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
352
353 rc = smb_send_kvec(server, &smb_msg, &sent);
354 if (rc < 0)
355 goto unmask;
356
357 total_len += sent;
358
359 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
360 smb_msg.msg_iter = rqst[j].rq_iter;
361 rc = smb_send_kvec(server, &smb_msg, &sent);
362 if (rc < 0)
363 break;
364 total_len += sent;
365 }
366
367 }
368
369 unmask:
370 sigprocmask(SIG_SETMASK, &oldmask, NULL);
371
372 /*
373 * If signal is pending but we have already sent the whole packet to
374 * the server we need to return success status to allow a corresponding
375 * mid entry to be kept in the pending requests queue thus allowing
376 * to handle responses from the server by the client.
377 *
378 * If only part of the packet has been sent there is no need to hide
379 * interrupt because the session will be reconnected anyway, so there
380 * won't be any response from the server to handle.
381 */
382
383 if (signal_pending(current) && (total_len != send_length)) {
384 cifs_dbg(FYI, "signal is pending after attempt to send\n");
385 rc = -ERESTARTSYS;
386 }
387
388 /* uncork it */
389 tcp_sock_set_cork(ssocket->sk, false);
390
391 if ((total_len > 0) && (total_len != send_length)) {
392 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
393 send_length, total_len);
394 /*
395 * If we have only sent part of an SMB then the next SMB could
396 * be taken as the remainder of this one. We need to kill the
397 * socket so the server throws away the partial SMB
398 */
399 cifs_signal_cifsd_for_reconnect(server, false);
400 trace_smb3_partial_send_reconnect(server->CurrentMid,
401 server->conn_id, server->hostname);
402 }
403 smbd_done:
404 /*
405 * there's hardly any use for the layers above to know the
406 * actual error code here. All they should do at this point is
407 * to retry the connection and hope it goes away.
408 */
409 if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
410 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
411 rc);
412 rc = -ECONNABORTED;
413 cifs_signal_cifsd_for_reconnect(server, false);
414 } else if (rc > 0)
415 rc = 0;
416 out:
417 cifs_in_send_dec(server);
418 return rc;
419 }
420
421 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)422 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
423 struct smb_rqst *rqst, int flags)
424 {
425 struct smb2_transform_hdr tr_hdr;
426 struct smb_rqst new_rqst[MAX_COMPOUND] = {};
427 struct kvec iov = {
428 .iov_base = &tr_hdr,
429 .iov_len = sizeof(tr_hdr),
430 };
431 int rc;
432
433 if (flags & CIFS_COMPRESS_REQ)
434 return smb_compress(server, &rqst[0], __smb_send_rqst);
435
436 if (!(flags & CIFS_TRANSFORM_REQ))
437 return __smb_send_rqst(server, num_rqst, rqst);
438
439 if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
440 return -EIO;
441
442 if (!server->ops->init_transform_rq) {
443 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
444 return -EIO;
445 }
446
447 new_rqst[0].rq_iov = &iov;
448 new_rqst[0].rq_nvec = 1;
449
450 rc = server->ops->init_transform_rq(server, num_rqst + 1,
451 new_rqst, rqst);
452 if (!rc) {
453 rc = __smb_send_rqst(server, num_rqst + 1, new_rqst);
454 smb3_free_compound_rqst(num_rqst, &new_rqst[1]);
455 }
456 return rc;
457 }
458
459 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)460 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
461 unsigned int smb_buf_length)
462 {
463 struct kvec iov[2];
464 struct smb_rqst rqst = { .rq_iov = iov,
465 .rq_nvec = 2 };
466
467 iov[0].iov_base = smb_buffer;
468 iov[0].iov_len = 4;
469 iov[1].iov_base = (char *)smb_buffer + 4;
470 iov[1].iov_len = smb_buf_length;
471
472 return __smb_send_rqst(server, 1, &rqst);
473 }
474
475 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)476 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
477 const int timeout, const int flags,
478 unsigned int *instance)
479 {
480 long rc;
481 int *credits;
482 int optype;
483 long int t;
484 int scredits, in_flight;
485
486 if (timeout < 0)
487 t = MAX_JIFFY_OFFSET;
488 else
489 t = msecs_to_jiffies(timeout);
490
491 optype = flags & CIFS_OP_MASK;
492
493 *instance = 0;
494
495 credits = server->ops->get_credits_field(server, optype);
496 /* Since an echo is already inflight, no need to wait to send another */
497 if (*credits <= 0 && optype == CIFS_ECHO_OP)
498 return -EAGAIN;
499
500 spin_lock(&server->req_lock);
501 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
502 /* oplock breaks must not be held up */
503 server->in_flight++;
504 if (server->in_flight > server->max_in_flight)
505 server->max_in_flight = server->in_flight;
506 *credits -= 1;
507 *instance = server->reconnect_instance;
508 scredits = *credits;
509 in_flight = server->in_flight;
510 spin_unlock(&server->req_lock);
511
512 trace_smb3_nblk_credits(server->CurrentMid,
513 server->conn_id, server->hostname, scredits, -1, in_flight);
514 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
515 __func__, 1, scredits);
516
517 return 0;
518 }
519
520 while (1) {
521 spin_unlock(&server->req_lock);
522
523 spin_lock(&server->srv_lock);
524 if (server->tcpStatus == CifsExiting) {
525 spin_unlock(&server->srv_lock);
526 return -ENOENT;
527 }
528 spin_unlock(&server->srv_lock);
529
530 spin_lock(&server->req_lock);
531 if (*credits < num_credits) {
532 scredits = *credits;
533 spin_unlock(&server->req_lock);
534
535 cifs_num_waiters_inc(server);
536 rc = wait_event_killable_timeout(server->request_q,
537 has_credits(server, credits, num_credits), t);
538 cifs_num_waiters_dec(server);
539 if (!rc) {
540 spin_lock(&server->req_lock);
541 scredits = *credits;
542 in_flight = server->in_flight;
543 spin_unlock(&server->req_lock);
544
545 trace_smb3_credit_timeout(server->CurrentMid,
546 server->conn_id, server->hostname, scredits,
547 num_credits, in_flight);
548 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
549 timeout);
550 return -EBUSY;
551 }
552 if (rc == -ERESTARTSYS)
553 return -ERESTARTSYS;
554 spin_lock(&server->req_lock);
555 } else {
556 /*
557 * For normal commands, reserve the last MAX_COMPOUND
558 * credits to compound requests.
559 * Otherwise these compounds could be permanently
560 * starved for credits by single-credit requests.
561 *
562 * To prevent spinning CPU, block this thread until
563 * there are >MAX_COMPOUND credits available.
564 * But only do this is we already have a lot of
565 * credits in flight to avoid triggering this check
566 * for servers that are slow to hand out credits on
567 * new sessions.
568 */
569 if (!optype && num_credits == 1 &&
570 server->in_flight > 2 * MAX_COMPOUND &&
571 *credits <= MAX_COMPOUND) {
572 spin_unlock(&server->req_lock);
573
574 cifs_num_waiters_inc(server);
575 rc = wait_event_killable_timeout(
576 server->request_q,
577 has_credits(server, credits,
578 MAX_COMPOUND + 1),
579 t);
580 cifs_num_waiters_dec(server);
581 if (!rc) {
582 spin_lock(&server->req_lock);
583 scredits = *credits;
584 in_flight = server->in_flight;
585 spin_unlock(&server->req_lock);
586
587 trace_smb3_credit_timeout(
588 server->CurrentMid,
589 server->conn_id, server->hostname,
590 scredits, num_credits, in_flight);
591 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
592 timeout);
593 return -EBUSY;
594 }
595 if (rc == -ERESTARTSYS)
596 return -ERESTARTSYS;
597 spin_lock(&server->req_lock);
598 continue;
599 }
600
601 /*
602 * Can not count locking commands against total
603 * as they are allowed to block on server.
604 */
605
606 /* update # of requests on the wire to server */
607 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
608 *credits -= num_credits;
609 server->in_flight += num_credits;
610 if (server->in_flight > server->max_in_flight)
611 server->max_in_flight = server->in_flight;
612 *instance = server->reconnect_instance;
613 }
614 scredits = *credits;
615 in_flight = server->in_flight;
616 spin_unlock(&server->req_lock);
617
618 trace_smb3_waitff_credits(server->CurrentMid,
619 server->conn_id, server->hostname, scredits,
620 -(num_credits), in_flight);
621 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
622 __func__, num_credits, scredits);
623 break;
624 }
625 }
626 return 0;
627 }
628
629 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)630 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
631 unsigned int *instance)
632 {
633 return wait_for_free_credits(server, 1, -1, flags,
634 instance);
635 }
636
637 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)638 wait_for_compound_request(struct TCP_Server_Info *server, int num,
639 const int flags, unsigned int *instance)
640 {
641 int *credits;
642 int scredits, in_flight;
643
644 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
645
646 spin_lock(&server->req_lock);
647 scredits = *credits;
648 in_flight = server->in_flight;
649
650 if (*credits < num) {
651 /*
652 * If the server is tight on resources or just gives us less
653 * credits for other reasons (e.g. requests are coming out of
654 * order and the server delays granting more credits until it
655 * processes a missing mid) and we exhausted most available
656 * credits there may be situations when we try to send
657 * a compound request but we don't have enough credits. At this
658 * point the client needs to decide if it should wait for
659 * additional credits or fail the request. If at least one
660 * request is in flight there is a high probability that the
661 * server will return enough credits to satisfy this compound
662 * request.
663 *
664 * Return immediately if no requests in flight since we will be
665 * stuck on waiting for credits.
666 */
667 if (server->in_flight == 0) {
668 spin_unlock(&server->req_lock);
669 trace_smb3_insufficient_credits(server->CurrentMid,
670 server->conn_id, server->hostname, scredits,
671 num, in_flight);
672 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
673 __func__, in_flight, num, scredits);
674 return -EDEADLK;
675 }
676 }
677 spin_unlock(&server->req_lock);
678
679 return wait_for_free_credits(server, num, 60000, flags,
680 instance);
681 }
682
683 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,size_t size,size_t * num,struct cifs_credits * credits)684 cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
685 size_t *num, struct cifs_credits *credits)
686 {
687 *num = size;
688 credits->value = 0;
689 credits->instance = server->reconnect_instance;
690 return 0;
691 }
692
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)693 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
694 struct mid_q_entry **ppmidQ)
695 {
696 spin_lock(&ses->ses_lock);
697 if (ses->ses_status == SES_NEW) {
698 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
699 (in_buf->Command != SMB_COM_NEGOTIATE)) {
700 spin_unlock(&ses->ses_lock);
701 return -EAGAIN;
702 }
703 /* else ok - we are setting up session */
704 }
705
706 if (ses->ses_status == SES_EXITING) {
707 /* check if SMB session is bad because we are setting it up */
708 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
709 spin_unlock(&ses->ses_lock);
710 return -EAGAIN;
711 }
712 /* else ok - we are shutting down session */
713 }
714 spin_unlock(&ses->ses_lock);
715
716 *ppmidQ = alloc_mid(in_buf, ses->server);
717 if (*ppmidQ == NULL)
718 return -ENOMEM;
719 spin_lock(&ses->server->mid_lock);
720 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
721 spin_unlock(&ses->server->mid_lock);
722 return 0;
723 }
724
725 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)726 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
727 {
728 int error;
729
730 error = wait_event_state(server->response_q,
731 midQ->mid_state != MID_REQUEST_SUBMITTED &&
732 midQ->mid_state != MID_RESPONSE_RECEIVED,
733 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
734 if (error < 0)
735 return -ERESTARTSYS;
736
737 return 0;
738 }
739
740 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)741 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
742 {
743 int rc;
744 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
745 struct mid_q_entry *mid;
746
747 if (rqst->rq_iov[0].iov_len != 4 ||
748 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
749 return ERR_PTR(-EIO);
750
751 /* enable signing if server requires it */
752 if (server->sign)
753 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
754
755 mid = alloc_mid(hdr, server);
756 if (mid == NULL)
757 return ERR_PTR(-ENOMEM);
758
759 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
760 if (rc) {
761 release_mid(mid);
762 return ERR_PTR(rc);
763 }
764
765 return mid;
766 }
767
768 /*
769 * Send a SMB request and set the callback function in the mid to handle
770 * the result. Caller is responsible for dealing with timeouts.
771 */
772 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)773 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
774 mid_receive_t *receive, mid_callback_t *callback,
775 mid_handle_t *handle, void *cbdata, const int flags,
776 const struct cifs_credits *exist_credits)
777 {
778 int rc;
779 struct mid_q_entry *mid;
780 struct cifs_credits credits = { .value = 0, .instance = 0 };
781 unsigned int instance;
782 int optype;
783
784 optype = flags & CIFS_OP_MASK;
785
786 if ((flags & CIFS_HAS_CREDITS) == 0) {
787 rc = wait_for_free_request(server, flags, &instance);
788 if (rc)
789 return rc;
790 credits.value = 1;
791 credits.instance = instance;
792 } else
793 instance = exist_credits->instance;
794
795 cifs_server_lock(server);
796
797 /*
798 * We can't use credits obtained from the previous session to send this
799 * request. Check if there were reconnects after we obtained credits and
800 * return -EAGAIN in such cases to let callers handle it.
801 */
802 if (instance != server->reconnect_instance) {
803 cifs_server_unlock(server);
804 add_credits_and_wake_if(server, &credits, optype);
805 return -EAGAIN;
806 }
807
808 mid = server->ops->setup_async_request(server, rqst);
809 if (IS_ERR(mid)) {
810 cifs_server_unlock(server);
811 add_credits_and_wake_if(server, &credits, optype);
812 return PTR_ERR(mid);
813 }
814
815 mid->receive = receive;
816 mid->callback = callback;
817 mid->callback_data = cbdata;
818 mid->handle = handle;
819 mid->mid_state = MID_REQUEST_SUBMITTED;
820
821 /* put it on the pending_mid_q */
822 spin_lock(&server->mid_lock);
823 list_add_tail(&mid->qhead, &server->pending_mid_q);
824 spin_unlock(&server->mid_lock);
825
826 /*
827 * Need to store the time in mid before calling I/O. For call_async,
828 * I/O response may come back and free the mid entry on another thread.
829 */
830 cifs_save_when_sent(mid);
831 rc = smb_send_rqst(server, 1, rqst, flags);
832
833 if (rc < 0) {
834 revert_current_mid(server, mid->credits);
835 server->sequence_number -= 2;
836 delete_mid(mid);
837 }
838
839 cifs_server_unlock(server);
840
841 if (rc == 0)
842 return 0;
843
844 add_credits_and_wake_if(server, &credits, optype);
845 return rc;
846 }
847
848 /*
849 *
850 * Send an SMB Request. No response info (other than return code)
851 * needs to be parsed.
852 *
853 * flags indicate the type of request buffer and how long to wait
854 * and whether to log NT STATUS code (error) before mapping it to POSIX error
855 *
856 */
857 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)858 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
859 char *in_buf, int flags)
860 {
861 int rc;
862 struct kvec iov[1];
863 struct kvec rsp_iov;
864 int resp_buf_type;
865
866 iov[0].iov_base = in_buf;
867 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
868 flags |= CIFS_NO_RSP_BUF;
869 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
870 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
871
872 return rc;
873 }
874
875 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)876 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
877 {
878 int rc = 0;
879
880 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
882
883 spin_lock(&server->mid_lock);
884 switch (mid->mid_state) {
885 case MID_RESPONSE_READY:
886 spin_unlock(&server->mid_lock);
887 return rc;
888 case MID_RETRY_NEEDED:
889 rc = -EAGAIN;
890 break;
891 case MID_RESPONSE_MALFORMED:
892 rc = -EIO;
893 break;
894 case MID_SHUTDOWN:
895 rc = -EHOSTDOWN;
896 break;
897 case MID_RC:
898 rc = mid->mid_rc;
899 break;
900 default:
901 if (!(mid->mid_flags & MID_DELETED)) {
902 list_del_init(&mid->qhead);
903 mid->mid_flags |= MID_DELETED;
904 }
905 spin_unlock(&server->mid_lock);
906 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
907 __func__, mid->mid, mid->mid_state);
908 rc = -EIO;
909 goto sync_mid_done;
910 }
911 spin_unlock(&server->mid_lock);
912
913 sync_mid_done:
914 release_mid(mid);
915 return rc;
916 }
917
918 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)919 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
920 struct mid_q_entry *mid)
921 {
922 return server->ops->send_cancel ?
923 server->ops->send_cancel(server, rqst, mid) : 0;
924 }
925
926 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)927 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
928 bool log_error)
929 {
930 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
931
932 dump_smb(mid->resp_buf, min_t(u32, 92, len));
933
934 /* convert the length into a more usable form */
935 if (server->sign) {
936 struct kvec iov[2];
937 int rc = 0;
938 struct smb_rqst rqst = { .rq_iov = iov,
939 .rq_nvec = 2 };
940
941 iov[0].iov_base = mid->resp_buf;
942 iov[0].iov_len = 4;
943 iov[1].iov_base = (char *)mid->resp_buf + 4;
944 iov[1].iov_len = len - 4;
945 /* FIXME: add code to kill session */
946 rc = cifs_verify_signature(&rqst, server,
947 mid->sequence_number);
948 if (rc)
949 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
950 rc);
951 }
952
953 /* BB special case reconnect tid and uid here? */
954 return map_and_check_smb_error(mid, log_error);
955 }
956
957 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)958 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
959 struct smb_rqst *rqst)
960 {
961 int rc;
962 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
963 struct mid_q_entry *mid;
964
965 if (rqst->rq_iov[0].iov_len != 4 ||
966 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
967 return ERR_PTR(-EIO);
968
969 rc = allocate_mid(ses, hdr, &mid);
970 if (rc)
971 return ERR_PTR(rc);
972 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
973 if (rc) {
974 delete_mid(mid);
975 return ERR_PTR(rc);
976 }
977 return mid;
978 }
979
980 static void
cifs_compound_callback(struct mid_q_entry * mid)981 cifs_compound_callback(struct mid_q_entry *mid)
982 {
983 struct TCP_Server_Info *server = mid->server;
984 struct cifs_credits credits = {
985 .value = server->ops->get_credits(mid),
986 .instance = server->reconnect_instance,
987 };
988
989 add_credits(server, &credits, mid->optype);
990
991 if (mid->mid_state == MID_RESPONSE_RECEIVED)
992 mid->mid_state = MID_RESPONSE_READY;
993 }
994
995 static void
cifs_compound_last_callback(struct mid_q_entry * mid)996 cifs_compound_last_callback(struct mid_q_entry *mid)
997 {
998 cifs_compound_callback(mid);
999 cifs_wake_up_task(mid);
1000 }
1001
1002 static void
cifs_cancelled_callback(struct mid_q_entry * mid)1003 cifs_cancelled_callback(struct mid_q_entry *mid)
1004 {
1005 cifs_compound_callback(mid);
1006 release_mid(mid);
1007 }
1008
1009 /*
1010 * Return a channel (master if none) of @ses that can be used to send
1011 * regular requests.
1012 *
1013 * If we are currently binding a new channel (negprot/sess.setup),
1014 * return the new incomplete channel.
1015 */
cifs_pick_channel(struct cifs_ses * ses)1016 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1017 {
1018 uint index = 0;
1019 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1020 struct TCP_Server_Info *server = NULL;
1021 int i, start, cur;
1022
1023 if (!ses)
1024 return NULL;
1025
1026 spin_lock(&ses->chan_lock);
1027 start = atomic_inc_return(&ses->chan_seq);
1028 for (i = 0; i < ses->chan_count; i++) {
1029 cur = (start + i) % ses->chan_count;
1030 server = ses->chans[cur].server;
1031 if (!server || server->terminate)
1032 continue;
1033
1034 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
1035 continue;
1036
1037 /*
1038 * strictly speaking, we should pick up req_lock to read
1039 * server->in_flight. But it shouldn't matter much here if we
1040 * race while reading this data. The worst that can happen is
1041 * that we could use a channel that's not least loaded. Avoiding
1042 * taking the lock could help reduce wait time, which is
1043 * important for this function
1044 */
1045 if (server->in_flight < min_in_flight) {
1046 min_in_flight = server->in_flight;
1047 index = cur;
1048 }
1049 if (server->in_flight > max_in_flight)
1050 max_in_flight = server->in_flight;
1051 }
1052
1053 /* if all channels are equally loaded, fall back to round-robin */
1054 if (min_in_flight == max_in_flight)
1055 index = (uint)start % ses->chan_count;
1056
1057 server = ses->chans[index].server;
1058 spin_unlock(&ses->chan_lock);
1059
1060 return server;
1061 }
1062
1063 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1064 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1065 struct TCP_Server_Info *server,
1066 const int flags, const int num_rqst, struct smb_rqst *rqst,
1067 int *resp_buf_type, struct kvec *resp_iov)
1068 {
1069 int i, j, optype, rc = 0;
1070 struct mid_q_entry *midQ[MAX_COMPOUND];
1071 bool cancelled_mid[MAX_COMPOUND] = {false};
1072 struct cifs_credits credits[MAX_COMPOUND] = {
1073 { .value = 0, .instance = 0 }
1074 };
1075 unsigned int instance;
1076 char *buf;
1077
1078 optype = flags & CIFS_OP_MASK;
1079
1080 for (i = 0; i < num_rqst; i++)
1081 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1082
1083 if (!ses || !ses->server || !server) {
1084 cifs_dbg(VFS, "Null session\n");
1085 return -EIO;
1086 }
1087
1088 spin_lock(&server->srv_lock);
1089 if (server->tcpStatus == CifsExiting) {
1090 spin_unlock(&server->srv_lock);
1091 return -ENOENT;
1092 }
1093 spin_unlock(&server->srv_lock);
1094
1095 /*
1096 * Wait for all the requests to become available.
1097 * This approach still leaves the possibility to be stuck waiting for
1098 * credits if the server doesn't grant credits to the outstanding
1099 * requests and if the client is completely idle, not generating any
1100 * other requests.
1101 * This can be handled by the eventual session reconnect.
1102 */
1103 rc = wait_for_compound_request(server, num_rqst, flags,
1104 &instance);
1105 if (rc)
1106 return rc;
1107
1108 for (i = 0; i < num_rqst; i++) {
1109 credits[i].value = 1;
1110 credits[i].instance = instance;
1111 }
1112
1113 /*
1114 * Make sure that we sign in the same order that we send on this socket
1115 * and avoid races inside tcp sendmsg code that could cause corruption
1116 * of smb data.
1117 */
1118
1119 cifs_server_lock(server);
1120
1121 /*
1122 * All the parts of the compound chain belong obtained credits from the
1123 * same session. We can not use credits obtained from the previous
1124 * session to send this request. Check if there were reconnects after
1125 * we obtained credits and return -EAGAIN in such cases to let callers
1126 * handle it.
1127 */
1128 if (instance != server->reconnect_instance) {
1129 cifs_server_unlock(server);
1130 for (j = 0; j < num_rqst; j++)
1131 add_credits(server, &credits[j], optype);
1132 return -EAGAIN;
1133 }
1134
1135 for (i = 0; i < num_rqst; i++) {
1136 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1137 if (IS_ERR(midQ[i])) {
1138 revert_current_mid(server, i);
1139 for (j = 0; j < i; j++)
1140 delete_mid(midQ[j]);
1141 cifs_server_unlock(server);
1142
1143 /* Update # of requests on wire to server */
1144 for (j = 0; j < num_rqst; j++)
1145 add_credits(server, &credits[j], optype);
1146 return PTR_ERR(midQ[i]);
1147 }
1148
1149 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1150 midQ[i]->optype = optype;
1151 /*
1152 * Invoke callback for every part of the compound chain
1153 * to calculate credits properly. Wake up this thread only when
1154 * the last element is received.
1155 */
1156 if (i < num_rqst - 1)
1157 midQ[i]->callback = cifs_compound_callback;
1158 else
1159 midQ[i]->callback = cifs_compound_last_callback;
1160 }
1161 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1162
1163 for (i = 0; i < num_rqst; i++)
1164 cifs_save_when_sent(midQ[i]);
1165
1166 if (rc < 0) {
1167 revert_current_mid(server, num_rqst);
1168 server->sequence_number -= 2;
1169 }
1170
1171 cifs_server_unlock(server);
1172
1173 /*
1174 * If sending failed for some reason or it is an oplock break that we
1175 * will not receive a response to - return credits back
1176 */
1177 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1178 for (i = 0; i < num_rqst; i++)
1179 add_credits(server, &credits[i], optype);
1180 goto out;
1181 }
1182
1183 /*
1184 * At this point the request is passed to the network stack - we assume
1185 * that any credits taken from the server structure on the client have
1186 * been spent and we can't return them back. Once we receive responses
1187 * we will collect credits granted by the server in the mid callbacks
1188 * and add those credits to the server structure.
1189 */
1190
1191 /*
1192 * Compounding is never used during session establish.
1193 */
1194 spin_lock(&ses->ses_lock);
1195 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1196 spin_unlock(&ses->ses_lock);
1197
1198 cifs_server_lock(server);
1199 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1200 cifs_server_unlock(server);
1201
1202 spin_lock(&ses->ses_lock);
1203 }
1204 spin_unlock(&ses->ses_lock);
1205
1206 for (i = 0; i < num_rqst; i++) {
1207 rc = wait_for_response(server, midQ[i]);
1208 if (rc != 0)
1209 break;
1210 }
1211 if (rc != 0) {
1212 for (; i < num_rqst; i++) {
1213 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1214 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1215 send_cancel(server, &rqst[i], midQ[i]);
1216 spin_lock(&server->mid_lock);
1217 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1218 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1219 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1220 midQ[i]->callback = cifs_cancelled_callback;
1221 cancelled_mid[i] = true;
1222 credits[i].value = 0;
1223 }
1224 spin_unlock(&server->mid_lock);
1225 }
1226 }
1227
1228 for (i = 0; i < num_rqst; i++) {
1229 if (rc < 0)
1230 goto out;
1231
1232 rc = cifs_sync_mid_result(midQ[i], server);
1233 if (rc != 0) {
1234 /* mark this mid as cancelled to not free it below */
1235 cancelled_mid[i] = true;
1236 goto out;
1237 }
1238
1239 if (!midQ[i]->resp_buf ||
1240 midQ[i]->mid_state != MID_RESPONSE_READY) {
1241 rc = -EIO;
1242 cifs_dbg(FYI, "Bad MID state?\n");
1243 goto out;
1244 }
1245
1246 buf = (char *)midQ[i]->resp_buf;
1247 resp_iov[i].iov_base = buf;
1248 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1249 HEADER_PREAMBLE_SIZE(server);
1250
1251 if (midQ[i]->large_buf)
1252 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1253 else
1254 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1255
1256 rc = server->ops->check_receive(midQ[i], server,
1257 flags & CIFS_LOG_ERROR);
1258
1259 /* mark it so buf will not be freed by delete_mid */
1260 if ((flags & CIFS_NO_RSP_BUF) == 0)
1261 midQ[i]->resp_buf = NULL;
1262
1263 }
1264
1265 /*
1266 * Compounding is never used during session establish.
1267 */
1268 spin_lock(&ses->ses_lock);
1269 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1270 struct kvec iov = {
1271 .iov_base = resp_iov[0].iov_base,
1272 .iov_len = resp_iov[0].iov_len
1273 };
1274 spin_unlock(&ses->ses_lock);
1275 cifs_server_lock(server);
1276 smb311_update_preauth_hash(ses, server, &iov, 1);
1277 cifs_server_unlock(server);
1278 spin_lock(&ses->ses_lock);
1279 }
1280 spin_unlock(&ses->ses_lock);
1281
1282 out:
1283 /*
1284 * This will dequeue all mids. After this it is important that the
1285 * demultiplex_thread will not process any of these mids any further.
1286 * This is prevented above by using a noop callback that will not
1287 * wake this thread except for the very last PDU.
1288 */
1289 for (i = 0; i < num_rqst; i++) {
1290 if (!cancelled_mid[i])
1291 delete_mid(midQ[i]);
1292 }
1293
1294 return rc;
1295 }
1296
1297 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1298 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1299 struct TCP_Server_Info *server,
1300 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1301 struct kvec *resp_iov)
1302 {
1303 return compound_send_recv(xid, ses, server, flags, 1,
1304 rqst, resp_buf_type, resp_iov);
1305 }
1306
1307 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1308 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1309 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1310 const int flags, struct kvec *resp_iov)
1311 {
1312 struct smb_rqst rqst;
1313 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1314 int rc;
1315
1316 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1317 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1318 GFP_KERNEL);
1319 if (!new_iov) {
1320 /* otherwise cifs_send_recv below sets resp_buf_type */
1321 *resp_buf_type = CIFS_NO_BUFFER;
1322 return -ENOMEM;
1323 }
1324 } else
1325 new_iov = s_iov;
1326
1327 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1328 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1329
1330 new_iov[0].iov_base = new_iov[1].iov_base;
1331 new_iov[0].iov_len = 4;
1332 new_iov[1].iov_base += 4;
1333 new_iov[1].iov_len -= 4;
1334
1335 memset(&rqst, 0, sizeof(struct smb_rqst));
1336 rqst.rq_iov = new_iov;
1337 rqst.rq_nvec = n_vec + 1;
1338
1339 rc = cifs_send_recv(xid, ses, ses->server,
1340 &rqst, resp_buf_type, flags, resp_iov);
1341 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1342 kfree(new_iov);
1343 return rc;
1344 }
1345
1346 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1347 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1348 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1349 int *pbytes_returned, const int flags)
1350 {
1351 int rc = 0;
1352 struct mid_q_entry *midQ;
1353 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1354 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1355 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1356 struct cifs_credits credits = { .value = 1, .instance = 0 };
1357 struct TCP_Server_Info *server;
1358
1359 if (ses == NULL) {
1360 cifs_dbg(VFS, "Null smb session\n");
1361 return -EIO;
1362 }
1363 server = ses->server;
1364 if (server == NULL) {
1365 cifs_dbg(VFS, "Null tcp session\n");
1366 return -EIO;
1367 }
1368
1369 spin_lock(&server->srv_lock);
1370 if (server->tcpStatus == CifsExiting) {
1371 spin_unlock(&server->srv_lock);
1372 return -ENOENT;
1373 }
1374 spin_unlock(&server->srv_lock);
1375
1376 /* Ensure that we do not send more than 50 overlapping requests
1377 to the same server. We may make this configurable later or
1378 use ses->maxReq */
1379
1380 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1381 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1382 len);
1383 return -EIO;
1384 }
1385
1386 rc = wait_for_free_request(server, flags, &credits.instance);
1387 if (rc)
1388 return rc;
1389
1390 /* make sure that we sign in the same order that we send on this socket
1391 and avoid races inside tcp sendmsg code that could cause corruption
1392 of smb data */
1393
1394 cifs_server_lock(server);
1395
1396 rc = allocate_mid(ses, in_buf, &midQ);
1397 if (rc) {
1398 cifs_server_unlock(server);
1399 /* Update # of requests on wire to server */
1400 add_credits(server, &credits, 0);
1401 return rc;
1402 }
1403
1404 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1405 if (rc) {
1406 cifs_server_unlock(server);
1407 goto out;
1408 }
1409
1410 midQ->mid_state = MID_REQUEST_SUBMITTED;
1411
1412 rc = smb_send(server, in_buf, len);
1413 cifs_save_when_sent(midQ);
1414
1415 if (rc < 0)
1416 server->sequence_number -= 2;
1417
1418 cifs_server_unlock(server);
1419
1420 if (rc < 0)
1421 goto out;
1422
1423 rc = wait_for_response(server, midQ);
1424 if (rc != 0) {
1425 send_cancel(server, &rqst, midQ);
1426 spin_lock(&server->mid_lock);
1427 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1428 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1429 /* no longer considered to be "in-flight" */
1430 midQ->callback = release_mid;
1431 spin_unlock(&server->mid_lock);
1432 add_credits(server, &credits, 0);
1433 return rc;
1434 }
1435 spin_unlock(&server->mid_lock);
1436 }
1437
1438 rc = cifs_sync_mid_result(midQ, server);
1439 if (rc != 0) {
1440 add_credits(server, &credits, 0);
1441 return rc;
1442 }
1443
1444 if (!midQ->resp_buf || !out_buf ||
1445 midQ->mid_state != MID_RESPONSE_READY) {
1446 rc = -EIO;
1447 cifs_server_dbg(VFS, "Bad MID state?\n");
1448 goto out;
1449 }
1450
1451 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1452 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1453 rc = cifs_check_receive(midQ, server, 0);
1454 out:
1455 delete_mid(midQ);
1456 add_credits(server, &credits, 0);
1457
1458 return rc;
1459 }
1460
1461 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1462 blocking lock to return. */
1463
1464 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1465 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1466 struct smb_hdr *in_buf,
1467 struct smb_hdr *out_buf)
1468 {
1469 int bytes_returned;
1470 struct cifs_ses *ses = tcon->ses;
1471 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1472
1473 /* We just modify the current in_buf to change
1474 the type of lock from LOCKING_ANDX_SHARED_LOCK
1475 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1476 LOCKING_ANDX_CANCEL_LOCK. */
1477
1478 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1479 pSMB->Timeout = 0;
1480 pSMB->hdr.Mid = get_next_mid(ses->server);
1481
1482 return SendReceive(xid, ses, in_buf, out_buf,
1483 &bytes_returned, 0);
1484 }
1485
1486 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1487 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1488 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1489 int *pbytes_returned)
1490 {
1491 int rc = 0;
1492 int rstart = 0;
1493 struct mid_q_entry *midQ;
1494 struct cifs_ses *ses;
1495 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1496 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1497 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1498 unsigned int instance;
1499 struct TCP_Server_Info *server;
1500
1501 if (tcon == NULL || tcon->ses == NULL) {
1502 cifs_dbg(VFS, "Null smb session\n");
1503 return -EIO;
1504 }
1505 ses = tcon->ses;
1506 server = ses->server;
1507
1508 if (server == NULL) {
1509 cifs_dbg(VFS, "Null tcp session\n");
1510 return -EIO;
1511 }
1512
1513 spin_lock(&server->srv_lock);
1514 if (server->tcpStatus == CifsExiting) {
1515 spin_unlock(&server->srv_lock);
1516 return -ENOENT;
1517 }
1518 spin_unlock(&server->srv_lock);
1519
1520 /* Ensure that we do not send more than 50 overlapping requests
1521 to the same server. We may make this configurable later or
1522 use ses->maxReq */
1523
1524 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1525 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1526 len);
1527 return -EIO;
1528 }
1529
1530 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1531 if (rc)
1532 return rc;
1533
1534 /* make sure that we sign in the same order that we send on this socket
1535 and avoid races inside tcp sendmsg code that could cause corruption
1536 of smb data */
1537
1538 cifs_server_lock(server);
1539
1540 rc = allocate_mid(ses, in_buf, &midQ);
1541 if (rc) {
1542 cifs_server_unlock(server);
1543 return rc;
1544 }
1545
1546 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1547 if (rc) {
1548 delete_mid(midQ);
1549 cifs_server_unlock(server);
1550 return rc;
1551 }
1552
1553 midQ->mid_state = MID_REQUEST_SUBMITTED;
1554 rc = smb_send(server, in_buf, len);
1555 cifs_save_when_sent(midQ);
1556
1557 if (rc < 0)
1558 server->sequence_number -= 2;
1559
1560 cifs_server_unlock(server);
1561
1562 if (rc < 0) {
1563 delete_mid(midQ);
1564 return rc;
1565 }
1566
1567 /* Wait for a reply - allow signals to interrupt. */
1568 rc = wait_event_interruptible(server->response_q,
1569 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1570 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1571 ((server->tcpStatus != CifsGood) &&
1572 (server->tcpStatus != CifsNew)));
1573
1574 /* Were we interrupted by a signal ? */
1575 spin_lock(&server->srv_lock);
1576 if ((rc == -ERESTARTSYS) &&
1577 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1578 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1579 ((server->tcpStatus == CifsGood) ||
1580 (server->tcpStatus == CifsNew))) {
1581 spin_unlock(&server->srv_lock);
1582
1583 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1584 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1585 blocking lock to return. */
1586 rc = send_cancel(server, &rqst, midQ);
1587 if (rc) {
1588 delete_mid(midQ);
1589 return rc;
1590 }
1591 } else {
1592 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1593 to cause the blocking lock to return. */
1594
1595 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1596
1597 /* If we get -ENOLCK back the lock may have
1598 already been removed. Don't exit in this case. */
1599 if (rc && rc != -ENOLCK) {
1600 delete_mid(midQ);
1601 return rc;
1602 }
1603 }
1604
1605 rc = wait_for_response(server, midQ);
1606 if (rc) {
1607 send_cancel(server, &rqst, midQ);
1608 spin_lock(&server->mid_lock);
1609 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1610 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1611 /* no longer considered to be "in-flight" */
1612 midQ->callback = release_mid;
1613 spin_unlock(&server->mid_lock);
1614 return rc;
1615 }
1616 spin_unlock(&server->mid_lock);
1617 }
1618
1619 /* We got the response - restart system call. */
1620 rstart = 1;
1621 spin_lock(&server->srv_lock);
1622 }
1623 spin_unlock(&server->srv_lock);
1624
1625 rc = cifs_sync_mid_result(midQ, server);
1626 if (rc != 0)
1627 return rc;
1628
1629 /* rcvd frame is ok */
1630 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1631 rc = -EIO;
1632 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1633 goto out;
1634 }
1635
1636 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1637 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1638 rc = cifs_check_receive(midQ, server, 0);
1639 out:
1640 delete_mid(midQ);
1641 if (rstart && rc == -EACCES)
1642 return -ERESTARTSYS;
1643 return rc;
1644 }
1645
1646 /*
1647 * Discard any remaining data in the current SMB. To do this, we borrow the
1648 * current bigbuf.
1649 */
1650 int
cifs_discard_remaining_data(struct TCP_Server_Info * server)1651 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1652 {
1653 unsigned int rfclen = server->pdu_size;
1654 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1655 server->total_read;
1656
1657 while (remaining > 0) {
1658 ssize_t length;
1659
1660 length = cifs_discard_from_socket(server,
1661 min_t(size_t, remaining,
1662 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1663 if (length < 0)
1664 return length;
1665 server->total_read += length;
1666 remaining -= length;
1667 }
1668
1669 return 0;
1670 }
1671
1672 static int
__cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid,bool malformed)1673 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1674 bool malformed)
1675 {
1676 int length;
1677
1678 length = cifs_discard_remaining_data(server);
1679 dequeue_mid(mid, malformed);
1680 mid->resp_buf = server->smallbuf;
1681 server->smallbuf = NULL;
1682 return length;
1683 }
1684
1685 static int
cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1686 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1687 {
1688 struct cifs_io_subrequest *rdata = mid->callback_data;
1689
1690 return __cifs_readv_discard(server, mid, rdata->result);
1691 }
1692
1693 int
cifs_readv_receive(struct TCP_Server_Info * server,struct mid_q_entry * mid)1694 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1695 {
1696 int length, len;
1697 unsigned int data_offset, data_len;
1698 struct cifs_io_subrequest *rdata = mid->callback_data;
1699 char *buf = server->smallbuf;
1700 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1701 bool use_rdma_mr = false;
1702
1703 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
1704 __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
1705
1706 /*
1707 * read the rest of READ_RSP header (sans Data array), or whatever we
1708 * can if there's not enough data. At this point, we've read down to
1709 * the Mid.
1710 */
1711 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1712 HEADER_SIZE(server) + 1;
1713
1714 length = cifs_read_from_socket(server,
1715 buf + HEADER_SIZE(server) - 1, len);
1716 if (length < 0)
1717 return length;
1718 server->total_read += length;
1719
1720 if (server->ops->is_session_expired &&
1721 server->ops->is_session_expired(buf)) {
1722 cifs_reconnect(server, true);
1723 return -1;
1724 }
1725
1726 if (server->ops->is_status_pending &&
1727 server->ops->is_status_pending(buf, server)) {
1728 cifs_discard_remaining_data(server);
1729 return -1;
1730 }
1731
1732 /* set up first two iov for signature check and to get credits */
1733 rdata->iov[0].iov_base = buf;
1734 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1735 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1736 rdata->iov[1].iov_len =
1737 server->total_read - HEADER_PREAMBLE_SIZE(server);
1738 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1739 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1740 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1741 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1742
1743 /* Was the SMB read successful? */
1744 rdata->result = server->ops->map_error(buf, false);
1745 if (rdata->result != 0) {
1746 cifs_dbg(FYI, "%s: server returned error %d\n",
1747 __func__, rdata->result);
1748 /* normal error on read response */
1749 return __cifs_readv_discard(server, mid, false);
1750 }
1751
1752 /* Is there enough to get to the rest of the READ_RSP header? */
1753 if (server->total_read < server->vals->read_rsp_size) {
1754 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1755 __func__, server->total_read,
1756 server->vals->read_rsp_size);
1757 rdata->result = -EIO;
1758 return cifs_readv_discard(server, mid);
1759 }
1760
1761 data_offset = server->ops->read_data_offset(buf) +
1762 HEADER_PREAMBLE_SIZE(server);
1763 if (data_offset < server->total_read) {
1764 /*
1765 * win2k8 sometimes sends an offset of 0 when the read
1766 * is beyond the EOF. Treat it as if the data starts just after
1767 * the header.
1768 */
1769 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1770 __func__, data_offset);
1771 data_offset = server->total_read;
1772 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1773 /* data_offset is beyond the end of smallbuf */
1774 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1775 __func__, data_offset);
1776 rdata->result = -EIO;
1777 return cifs_readv_discard(server, mid);
1778 }
1779
1780 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1781 __func__, server->total_read, data_offset);
1782
1783 len = data_offset - server->total_read;
1784 if (len > 0) {
1785 /* read any junk before data into the rest of smallbuf */
1786 length = cifs_read_from_socket(server,
1787 buf + server->total_read, len);
1788 if (length < 0)
1789 return length;
1790 server->total_read += length;
1791 }
1792
1793 /* how much data is in the response? */
1794 #ifdef CONFIG_CIFS_SMB_DIRECT
1795 use_rdma_mr = rdata->mr;
1796 #endif
1797 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1798 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1799 /* data_len is corrupt -- discard frame */
1800 rdata->result = -EIO;
1801 return cifs_readv_discard(server, mid);
1802 }
1803
1804 #ifdef CONFIG_CIFS_SMB_DIRECT
1805 if (rdata->mr)
1806 length = data_len; /* An RDMA read is already done. */
1807 else
1808 #endif
1809 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
1810 data_len);
1811 if (length > 0)
1812 rdata->got_bytes += length;
1813 server->total_read += length;
1814
1815 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1816 server->total_read, buflen, data_len);
1817
1818 /* discard anything left over */
1819 if (server->total_read < buflen)
1820 return cifs_readv_discard(server, mid);
1821
1822 dequeue_mid(mid, false);
1823 mid->resp_buf = server->smallbuf;
1824 server->smallbuf = NULL;
1825 return length;
1826 }
1827