1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include <linux/task_work.h>
26 #include "cifspdu.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
30 #include "smb2proto.h"
31 #include "smbdirect.h"
32 #include "compress.h"
33
34 void
cifs_wake_up_task(struct mid_q_entry * mid)35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37 if (mid->mid_state == MID_RESPONSE_RECEIVED)
38 mid->mid_state = MID_RESPONSE_READY;
39 wake_up_process(mid->callback_data);
40 }
41
__release_mid(struct kref * refcount)42 void __release_mid(struct kref *refcount)
43 {
44 struct mid_q_entry *midEntry =
45 container_of(refcount, struct mid_q_entry, refcount);
46 #ifdef CONFIG_CIFS_STATS2
47 __le16 command = midEntry->server->vals->lock_cmd;
48 __u16 smb_cmd = le16_to_cpu(midEntry->command);
49 unsigned long now;
50 unsigned long roundtrip_time;
51 #endif
52 struct TCP_Server_Info *server = midEntry->server;
53
54 if (midEntry->resp_buf && (midEntry->wait_cancelled) &&
55 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
56 midEntry->mid_state == MID_RESPONSE_READY) &&
57 server->ops->handle_cancelled_mid)
58 server->ops->handle_cancelled_mid(midEntry, server);
59
60 midEntry->mid_state = MID_FREE;
61 atomic_dec(&mid_count);
62 if (midEntry->large_buf)
63 cifs_buf_release(midEntry->resp_buf);
64 else
65 cifs_small_buf_release(midEntry->resp_buf);
66 #ifdef CONFIG_CIFS_STATS2
67 now = jiffies;
68 if (now < midEntry->when_alloc)
69 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
70 roundtrip_time = now - midEntry->when_alloc;
71
72 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
73 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
74 server->slowest_cmd[smb_cmd] = roundtrip_time;
75 server->fastest_cmd[smb_cmd] = roundtrip_time;
76 } else {
77 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
78 server->slowest_cmd[smb_cmd] = roundtrip_time;
79 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
80 server->fastest_cmd[smb_cmd] = roundtrip_time;
81 }
82 cifs_stats_inc(&server->num_cmds[smb_cmd]);
83 server->time_per_cmd[smb_cmd] += roundtrip_time;
84 }
85 /*
86 * commands taking longer than one second (default) can be indications
87 * that something is wrong, unless it is quite a slow link or a very
88 * busy server. Note that this calc is unlikely or impossible to wrap
89 * as long as slow_rsp_threshold is not set way above recommended max
90 * value (32767 ie 9 hours) and is generally harmless even if wrong
91 * since only affects debug counters - so leaving the calc as simple
92 * comparison rather than doing multiple conversions and overflow
93 * checks
94 */
95 if ((slow_rsp_threshold != 0) &&
96 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
97 (midEntry->command != command)) {
98 /*
99 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
100 * NB: le16_to_cpu returns unsigned so can not be negative below
101 */
102 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
103 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
104
105 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
106 midEntry->when_sent, midEntry->when_received);
107 if (cifsFYI & CIFS_TIMER) {
108 pr_debug("slow rsp: cmd %d mid %llu",
109 midEntry->command, midEntry->mid);
110 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
111 now - midEntry->when_alloc,
112 now - midEntry->when_sent,
113 now - midEntry->when_received);
114 }
115 }
116 #endif
117 put_task_struct(midEntry->creator);
118
119 mempool_free(midEntry, cifs_mid_poolp);
120 }
121
122 void
delete_mid(struct mid_q_entry * mid)123 delete_mid(struct mid_q_entry *mid)
124 {
125 spin_lock(&mid->server->mid_queue_lock);
126 if (mid->deleted_from_q == false) {
127 list_del_init(&mid->qhead);
128 mid->deleted_from_q = true;
129 }
130 spin_unlock(&mid->server->mid_queue_lock);
131
132 release_mid(mid);
133 }
134
135 /*
136 * smb_send_kvec - send an array of kvecs to the server
137 * @server: Server to send the data to
138 * @smb_msg: Message to send
139 * @sent: amount of data sent on socket is stored here
140 *
141 * Our basic "send data to server" function. Should be called with srv_mutex
142 * held. The caller is responsible for handling the results.
143 */
144 int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)145 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
146 size_t *sent)
147 {
148 int rc = 0;
149 int retries = 0;
150 struct socket *ssocket = server->ssocket;
151
152 *sent = 0;
153
154 if (server->noblocksnd)
155 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
156 else
157 smb_msg->msg_flags = MSG_NOSIGNAL;
158
159 while (msg_data_left(smb_msg)) {
160 /*
161 * If blocking send, we try 3 times, since each can block
162 * for 5 seconds. For nonblocking we have to try more
163 * but wait increasing amounts of time allowing time for
164 * socket to clear. The overall time we wait in either
165 * case to send on the socket is about 15 seconds.
166 * Similarly we wait for 15 seconds for a response from
167 * the server in SendReceive[2] for the server to send
168 * a response back for most types of requests (except
169 * SMB Write past end of file which can be slow, and
170 * blocking lock operations). NFS waits slightly longer
171 * than CIFS, but this can make it take longer for
172 * nonresponsive servers to be detected and 15 seconds
173 * is more than enough time for modern networks to
174 * send a packet. In most cases if we fail to send
175 * after the retries we will kill the socket and
176 * reconnect which may clear the network problem.
177 *
178 * Even if regular signals are masked, EINTR might be
179 * propagated from sk_stream_wait_memory() to here when
180 * TIF_NOTIFY_SIGNAL is used for task work. For example,
181 * certain io_uring completions will use that. Treat
182 * having EINTR with pending task work the same as EAGAIN
183 * to avoid unnecessary reconnects.
184 */
185 rc = sock_sendmsg(ssocket, smb_msg);
186 if (rc == -EAGAIN || unlikely(rc == -EINTR && task_work_pending(current))) {
187 retries++;
188 if (retries >= 14 ||
189 (!server->noblocksnd && (retries > 2))) {
190 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
191 ssocket);
192 return -EAGAIN;
193 }
194 msleep(1 << retries);
195 continue;
196 }
197
198 if (rc < 0)
199 return rc;
200
201 if (rc == 0) {
202 /* should never happen, letting socket clear before
203 retrying is our only obvious option here */
204 cifs_server_dbg(VFS, "tcp sent no data\n");
205 msleep(500);
206 continue;
207 }
208
209 /* send was at least partially successful */
210 *sent += rc;
211 retries = 0; /* in case we get ENOSPC on the next send */
212 }
213 return 0;
214 }
215
216 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)217 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
218 {
219 unsigned int i;
220 struct kvec *iov;
221 int nvec;
222 unsigned long buflen = 0;
223
224 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
225 rqst->rq_iov[0].iov_len == 4) {
226 iov = &rqst->rq_iov[1];
227 nvec = rqst->rq_nvec - 1;
228 } else {
229 iov = rqst->rq_iov;
230 nvec = rqst->rq_nvec;
231 }
232
233 /* total up iov array first */
234 for (i = 0; i < nvec; i++)
235 buflen += iov[i].iov_len;
236
237 buflen += iov_iter_count(&rqst->rq_iter);
238 return buflen;
239 }
240
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)241 int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
242 struct smb_rqst *rqst)
243 {
244 int rc;
245 struct kvec *iov;
246 int n_vec;
247 unsigned int send_length = 0;
248 unsigned int i, j;
249 sigset_t mask, oldmask;
250 size_t total_len = 0, sent, size;
251 struct socket *ssocket = server->ssocket;
252 struct msghdr smb_msg = {};
253 __be32 rfc1002_marker;
254
255 cifs_in_send_inc(server);
256 if (cifs_rdma_enabled(server)) {
257 /* return -EAGAIN when connecting or reconnecting */
258 rc = -EAGAIN;
259 if (server->smbd_conn)
260 rc = smbd_send(server, num_rqst, rqst);
261 goto smbd_done;
262 }
263
264 rc = -EAGAIN;
265 if (ssocket == NULL)
266 goto out;
267
268 rc = -ERESTARTSYS;
269 if (fatal_signal_pending(current)) {
270 cifs_dbg(FYI, "signal pending before send request\n");
271 goto out;
272 }
273
274 rc = 0;
275 /* cork the socket */
276 tcp_sock_set_cork(ssocket->sk, true);
277
278 for (j = 0; j < num_rqst; j++)
279 send_length += smb_rqst_len(server, &rqst[j]);
280 rfc1002_marker = cpu_to_be32(send_length);
281
282 /*
283 * We should not allow signals to interrupt the network send because
284 * any partial send will cause session reconnects thus increasing
285 * latency of system calls and overload a server with unnecessary
286 * requests.
287 */
288
289 sigfillset(&mask);
290 sigprocmask(SIG_BLOCK, &mask, &oldmask);
291
292 /* Generate a rfc1002 marker for SMB2+ */
293 if (!is_smb1(server)) {
294 struct kvec hiov = {
295 .iov_base = &rfc1002_marker,
296 .iov_len = 4
297 };
298 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
299 rc = smb_send_kvec(server, &smb_msg, &sent);
300 if (rc < 0)
301 goto unmask;
302
303 total_len += sent;
304 send_length += 4;
305 }
306
307 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
308
309 for (j = 0; j < num_rqst; j++) {
310 iov = rqst[j].rq_iov;
311 n_vec = rqst[j].rq_nvec;
312
313 size = 0;
314 for (i = 0; i < n_vec; i++) {
315 dump_smb(iov[i].iov_base, iov[i].iov_len);
316 size += iov[i].iov_len;
317 }
318
319 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
320
321 rc = smb_send_kvec(server, &smb_msg, &sent);
322 if (rc < 0)
323 goto unmask;
324
325 total_len += sent;
326
327 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
328 smb_msg.msg_iter = rqst[j].rq_iter;
329 rc = smb_send_kvec(server, &smb_msg, &sent);
330 if (rc < 0)
331 break;
332 total_len += sent;
333 }
334 }
335
336 unmask:
337 sigprocmask(SIG_SETMASK, &oldmask, NULL);
338
339 /*
340 * If signal is pending but we have already sent the whole packet to
341 * the server we need to return success status to allow a corresponding
342 * mid entry to be kept in the pending requests queue thus allowing
343 * to handle responses from the server by the client.
344 *
345 * If only part of the packet has been sent there is no need to hide
346 * interrupt because the session will be reconnected anyway, so there
347 * won't be any response from the server to handle.
348 */
349
350 if (signal_pending(current) && (total_len != send_length)) {
351 cifs_dbg(FYI, "signal is pending after attempt to send\n");
352 rc = -ERESTARTSYS;
353 }
354
355 /* uncork it */
356 tcp_sock_set_cork(ssocket->sk, false);
357
358 if ((total_len > 0) && (total_len != send_length)) {
359 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
360 send_length, total_len);
361 /*
362 * If we have only sent part of an SMB then the next SMB could
363 * be taken as the remainder of this one. We need to kill the
364 * socket so the server throws away the partial SMB
365 */
366 cifs_signal_cifsd_for_reconnect(server, false);
367 trace_smb3_partial_send_reconnect(server->current_mid,
368 server->conn_id, server->hostname);
369 }
370 smbd_done:
371 /*
372 * there's hardly any use for the layers above to know the
373 * actual error code here. All they should do at this point is
374 * to retry the connection and hope it goes away.
375 */
376 if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
377 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
378 rc);
379 rc = -ECONNABORTED;
380 cifs_signal_cifsd_for_reconnect(server, false);
381 } else if (rc > 0)
382 rc = 0;
383 out:
384 cifs_in_send_dec(server);
385 return rc;
386 }
387
388 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)389 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
390 struct smb_rqst *rqst, int flags)
391 {
392 struct smb2_transform_hdr tr_hdr;
393 struct smb_rqst new_rqst[MAX_COMPOUND] = {};
394 struct kvec iov = {
395 .iov_base = &tr_hdr,
396 .iov_len = sizeof(tr_hdr),
397 };
398 int rc;
399
400 if (flags & CIFS_COMPRESS_REQ)
401 return smb_compress(server, &rqst[0], __smb_send_rqst);
402
403 if (!(flags & CIFS_TRANSFORM_REQ))
404 return __smb_send_rqst(server, num_rqst, rqst);
405
406 if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
407 return -EIO;
408
409 if (!server->ops->init_transform_rq) {
410 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
411 return -EIO;
412 }
413
414 new_rqst[0].rq_iov = &iov;
415 new_rqst[0].rq_nvec = 1;
416
417 rc = server->ops->init_transform_rq(server, num_rqst + 1,
418 new_rqst, rqst);
419 if (!rc) {
420 rc = __smb_send_rqst(server, num_rqst + 1, new_rqst);
421 smb3_free_compound_rqst(num_rqst, &new_rqst[1]);
422 }
423 return rc;
424 }
425
426 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)427 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
428 const int timeout, const int flags,
429 unsigned int *instance)
430 {
431 long rc;
432 int *credits;
433 int optype;
434 long int t;
435 int scredits, in_flight;
436
437 if (timeout < 0)
438 t = MAX_JIFFY_OFFSET;
439 else
440 t = msecs_to_jiffies(timeout);
441
442 optype = flags & CIFS_OP_MASK;
443
444 *instance = 0;
445
446 credits = server->ops->get_credits_field(server, optype);
447 /* Since an echo is already inflight, no need to wait to send another */
448 if (*credits <= 0 && optype == CIFS_ECHO_OP)
449 return -EAGAIN;
450
451 spin_lock(&server->req_lock);
452 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
453 /* oplock breaks must not be held up */
454 server->in_flight++;
455 if (server->in_flight > server->max_in_flight)
456 server->max_in_flight = server->in_flight;
457 *credits -= 1;
458 *instance = server->reconnect_instance;
459 scredits = *credits;
460 in_flight = server->in_flight;
461 spin_unlock(&server->req_lock);
462
463 trace_smb3_nblk_credits(server->current_mid,
464 server->conn_id, server->hostname, scredits, -1, in_flight);
465 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
466 __func__, 1, scredits);
467
468 return 0;
469 }
470
471 while (1) {
472 spin_unlock(&server->req_lock);
473
474 spin_lock(&server->srv_lock);
475 if (server->tcpStatus == CifsExiting) {
476 spin_unlock(&server->srv_lock);
477 return -ENOENT;
478 }
479 spin_unlock(&server->srv_lock);
480
481 spin_lock(&server->req_lock);
482 if (*credits < num_credits) {
483 scredits = *credits;
484 spin_unlock(&server->req_lock);
485
486 cifs_num_waiters_inc(server);
487 rc = wait_event_killable_timeout(server->request_q,
488 has_credits(server, credits, num_credits), t);
489 cifs_num_waiters_dec(server);
490 if (!rc) {
491 spin_lock(&server->req_lock);
492 scredits = *credits;
493 in_flight = server->in_flight;
494 spin_unlock(&server->req_lock);
495
496 trace_smb3_credit_timeout(server->current_mid,
497 server->conn_id, server->hostname, scredits,
498 num_credits, in_flight);
499 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
500 timeout);
501 return -EBUSY;
502 }
503 if (rc == -ERESTARTSYS)
504 return -ERESTARTSYS;
505 spin_lock(&server->req_lock);
506 } else {
507 /*
508 * For normal commands, reserve the last MAX_COMPOUND
509 * credits to compound requests.
510 * Otherwise these compounds could be permanently
511 * starved for credits by single-credit requests.
512 *
513 * To prevent spinning CPU, block this thread until
514 * there are >MAX_COMPOUND credits available.
515 * But only do this is we already have a lot of
516 * credits in flight to avoid triggering this check
517 * for servers that are slow to hand out credits on
518 * new sessions.
519 */
520 if (!optype && num_credits == 1 &&
521 server->in_flight > 2 * MAX_COMPOUND &&
522 *credits <= MAX_COMPOUND) {
523 spin_unlock(&server->req_lock);
524
525 cifs_num_waiters_inc(server);
526 rc = wait_event_killable_timeout(
527 server->request_q,
528 has_credits(server, credits,
529 MAX_COMPOUND + 1),
530 t);
531 cifs_num_waiters_dec(server);
532 if (!rc) {
533 spin_lock(&server->req_lock);
534 scredits = *credits;
535 in_flight = server->in_flight;
536 spin_unlock(&server->req_lock);
537
538 trace_smb3_credit_timeout(
539 server->current_mid,
540 server->conn_id, server->hostname,
541 scredits, num_credits, in_flight);
542 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
543 timeout);
544 return -EBUSY;
545 }
546 if (rc == -ERESTARTSYS)
547 return -ERESTARTSYS;
548 spin_lock(&server->req_lock);
549 continue;
550 }
551
552 /*
553 * Can not count locking commands against total
554 * as they are allowed to block on server.
555 */
556
557 /* update # of requests on the wire to server */
558 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
559 *credits -= num_credits;
560 server->in_flight += num_credits;
561 if (server->in_flight > server->max_in_flight)
562 server->max_in_flight = server->in_flight;
563 *instance = server->reconnect_instance;
564 }
565 scredits = *credits;
566 in_flight = server->in_flight;
567 spin_unlock(&server->req_lock);
568
569 trace_smb3_waitff_credits(server->current_mid,
570 server->conn_id, server->hostname, scredits,
571 -(num_credits), in_flight);
572 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
573 __func__, num_credits, scredits);
574 break;
575 }
576 }
577 return 0;
578 }
579
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)580 int wait_for_free_request(struct TCP_Server_Info *server, const int flags,
581 unsigned int *instance)
582 {
583 return wait_for_free_credits(server, 1, -1, flags,
584 instance);
585 }
586
587 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)588 wait_for_compound_request(struct TCP_Server_Info *server, int num,
589 const int flags, unsigned int *instance)
590 {
591 int *credits;
592 int scredits, in_flight;
593
594 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
595
596 spin_lock(&server->req_lock);
597 scredits = *credits;
598 in_flight = server->in_flight;
599
600 if (*credits < num) {
601 /*
602 * If the server is tight on resources or just gives us less
603 * credits for other reasons (e.g. requests are coming out of
604 * order and the server delays granting more credits until it
605 * processes a missing mid) and we exhausted most available
606 * credits there may be situations when we try to send
607 * a compound request but we don't have enough credits. At this
608 * point the client needs to decide if it should wait for
609 * additional credits or fail the request. If at least one
610 * request is in flight there is a high probability that the
611 * server will return enough credits to satisfy this compound
612 * request.
613 *
614 * Return immediately if no requests in flight since we will be
615 * stuck on waiting for credits.
616 */
617 if (server->in_flight == 0) {
618 spin_unlock(&server->req_lock);
619 trace_smb3_insufficient_credits(server->current_mid,
620 server->conn_id, server->hostname, scredits,
621 num, in_flight);
622 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
623 __func__, in_flight, num, scredits);
624 return -EDEADLK;
625 }
626 }
627 spin_unlock(&server->req_lock);
628
629 return wait_for_free_credits(server, num, 60000, flags,
630 instance);
631 }
632
633 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,size_t size,size_t * num,struct cifs_credits * credits)634 cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
635 size_t *num, struct cifs_credits *credits)
636 {
637 *num = size;
638 credits->value = 0;
639 credits->instance = server->reconnect_instance;
640 return 0;
641 }
642
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)643 int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
644 {
645 int error;
646
647 error = wait_event_state(server->response_q,
648 midQ->mid_state != MID_REQUEST_SUBMITTED &&
649 midQ->mid_state != MID_RESPONSE_RECEIVED,
650 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
651 if (error < 0)
652 return -ERESTARTSYS;
653
654 return 0;
655 }
656
657 /*
658 * Send a SMB request and set the callback function in the mid to handle
659 * the result. Caller is responsible for dealing with timeouts.
660 */
661 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)662 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
663 mid_receive_t *receive, mid_callback_t *callback,
664 mid_handle_t *handle, void *cbdata, const int flags,
665 const struct cifs_credits *exist_credits)
666 {
667 int rc;
668 struct mid_q_entry *mid;
669 struct cifs_credits credits = { .value = 0, .instance = 0 };
670 unsigned int instance;
671 int optype;
672
673 optype = flags & CIFS_OP_MASK;
674
675 if ((flags & CIFS_HAS_CREDITS) == 0) {
676 rc = wait_for_free_request(server, flags, &instance);
677 if (rc)
678 return rc;
679 credits.value = 1;
680 credits.instance = instance;
681 } else
682 instance = exist_credits->instance;
683
684 cifs_server_lock(server);
685
686 /*
687 * We can't use credits obtained from the previous session to send this
688 * request. Check if there were reconnects after we obtained credits and
689 * return -EAGAIN in such cases to let callers handle it.
690 */
691 if (instance != server->reconnect_instance) {
692 cifs_server_unlock(server);
693 add_credits_and_wake_if(server, &credits, optype);
694 return -EAGAIN;
695 }
696
697 mid = server->ops->setup_async_request(server, rqst);
698 if (IS_ERR(mid)) {
699 cifs_server_unlock(server);
700 add_credits_and_wake_if(server, &credits, optype);
701 return PTR_ERR(mid);
702 }
703
704 mid->receive = receive;
705 mid->callback = callback;
706 mid->callback_data = cbdata;
707 mid->handle = handle;
708 mid->mid_state = MID_REQUEST_SUBMITTED;
709
710 /* put it on the pending_mid_q */
711 spin_lock(&server->mid_queue_lock);
712 list_add_tail(&mid->qhead, &server->pending_mid_q);
713 spin_unlock(&server->mid_queue_lock);
714
715 /*
716 * Need to store the time in mid before calling I/O. For call_async,
717 * I/O response may come back and free the mid entry on another thread.
718 */
719 cifs_save_when_sent(mid);
720 rc = smb_send_rqst(server, 1, rqst, flags);
721
722 if (rc < 0) {
723 revert_current_mid(server, mid->credits);
724 server->sequence_number -= 2;
725 delete_mid(mid);
726 }
727
728 cifs_server_unlock(server);
729
730 if (rc == 0)
731 return 0;
732
733 add_credits_and_wake_if(server, &credits, optype);
734 return rc;
735 }
736
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)737 int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
738 {
739 int rc = 0;
740
741 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
742 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
743
744 spin_lock(&server->mid_queue_lock);
745 switch (mid->mid_state) {
746 case MID_RESPONSE_READY:
747 spin_unlock(&server->mid_queue_lock);
748 return rc;
749 case MID_RETRY_NEEDED:
750 rc = -EAGAIN;
751 break;
752 case MID_RESPONSE_MALFORMED:
753 rc = -EIO;
754 break;
755 case MID_SHUTDOWN:
756 rc = -EHOSTDOWN;
757 break;
758 case MID_RC:
759 rc = mid->mid_rc;
760 break;
761 default:
762 if (mid->deleted_from_q == false) {
763 list_del_init(&mid->qhead);
764 mid->deleted_from_q = true;
765 }
766 spin_unlock(&server->mid_queue_lock);
767 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
768 __func__, mid->mid, mid->mid_state);
769 rc = -EIO;
770 goto sync_mid_done;
771 }
772 spin_unlock(&server->mid_queue_lock);
773
774 sync_mid_done:
775 release_mid(mid);
776 return rc;
777 }
778
779 static void
cifs_compound_callback(struct mid_q_entry * mid)780 cifs_compound_callback(struct mid_q_entry *mid)
781 {
782 struct TCP_Server_Info *server = mid->server;
783 struct cifs_credits credits = {
784 .value = server->ops->get_credits(mid),
785 .instance = server->reconnect_instance,
786 };
787
788 add_credits(server, &credits, mid->optype);
789
790 if (mid->mid_state == MID_RESPONSE_RECEIVED)
791 mid->mid_state = MID_RESPONSE_READY;
792 }
793
794 static void
cifs_compound_last_callback(struct mid_q_entry * mid)795 cifs_compound_last_callback(struct mid_q_entry *mid)
796 {
797 cifs_compound_callback(mid);
798 cifs_wake_up_task(mid);
799 }
800
801 static void
cifs_cancelled_callback(struct mid_q_entry * mid)802 cifs_cancelled_callback(struct mid_q_entry *mid)
803 {
804 cifs_compound_callback(mid);
805 release_mid(mid);
806 }
807
808 /*
809 * Return a channel (master if none) of @ses that can be used to send
810 * regular requests.
811 *
812 * If we are currently binding a new channel (negprot/sess.setup),
813 * return the new incomplete channel.
814 */
cifs_pick_channel(struct cifs_ses * ses)815 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
816 {
817 uint index = 0;
818 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
819 struct TCP_Server_Info *server = NULL;
820 int i, start, cur;
821
822 if (!ses)
823 return NULL;
824
825 spin_lock(&ses->chan_lock);
826 start = atomic_inc_return(&ses->chan_seq);
827 for (i = 0; i < ses->chan_count; i++) {
828 cur = (start + i) % ses->chan_count;
829 server = ses->chans[cur].server;
830 if (!server || server->terminate)
831 continue;
832
833 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
834 continue;
835
836 /*
837 * strictly speaking, we should pick up req_lock to read
838 * server->in_flight. But it shouldn't matter much here if we
839 * race while reading this data. The worst that can happen is
840 * that we could use a channel that's not least loaded. Avoiding
841 * taking the lock could help reduce wait time, which is
842 * important for this function
843 */
844 if (server->in_flight < min_in_flight) {
845 min_in_flight = server->in_flight;
846 index = cur;
847 }
848 if (server->in_flight > max_in_flight)
849 max_in_flight = server->in_flight;
850 }
851
852 /* if all channels are equally loaded, fall back to round-robin */
853 if (min_in_flight == max_in_flight)
854 index = (uint)start % ses->chan_count;
855
856 server = ses->chans[index].server;
857 spin_unlock(&ses->chan_lock);
858
859 return server;
860 }
861
862 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)863 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
864 struct TCP_Server_Info *server,
865 const int flags, const int num_rqst, struct smb_rqst *rqst,
866 int *resp_buf_type, struct kvec *resp_iov)
867 {
868 int i, j, optype, rc = 0;
869 struct mid_q_entry *midQ[MAX_COMPOUND];
870 bool cancelled_mid[MAX_COMPOUND] = {false};
871 struct cifs_credits credits[MAX_COMPOUND] = {
872 { .value = 0, .instance = 0 }
873 };
874 unsigned int instance;
875 char *buf;
876
877 optype = flags & CIFS_OP_MASK;
878
879 for (i = 0; i < num_rqst; i++)
880 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
881
882 if (!ses || !ses->server || !server) {
883 cifs_dbg(VFS, "Null session\n");
884 return -EIO;
885 }
886
887 spin_lock(&server->srv_lock);
888 if (server->tcpStatus == CifsExiting) {
889 spin_unlock(&server->srv_lock);
890 return -ENOENT;
891 }
892 spin_unlock(&server->srv_lock);
893
894 /*
895 * Wait for all the requests to become available.
896 * This approach still leaves the possibility to be stuck waiting for
897 * credits if the server doesn't grant credits to the outstanding
898 * requests and if the client is completely idle, not generating any
899 * other requests.
900 * This can be handled by the eventual session reconnect.
901 */
902 rc = wait_for_compound_request(server, num_rqst, flags,
903 &instance);
904 if (rc)
905 return rc;
906
907 for (i = 0; i < num_rqst; i++) {
908 credits[i].value = 1;
909 credits[i].instance = instance;
910 }
911
912 /*
913 * Make sure that we sign in the same order that we send on this socket
914 * and avoid races inside tcp sendmsg code that could cause corruption
915 * of smb data.
916 */
917
918 cifs_server_lock(server);
919
920 /*
921 * All the parts of the compound chain belong obtained credits from the
922 * same session. We can not use credits obtained from the previous
923 * session to send this request. Check if there were reconnects after
924 * we obtained credits and return -EAGAIN in such cases to let callers
925 * handle it.
926 */
927 if (instance != server->reconnect_instance) {
928 cifs_server_unlock(server);
929 for (j = 0; j < num_rqst; j++)
930 add_credits(server, &credits[j], optype);
931 return -EAGAIN;
932 }
933
934 for (i = 0; i < num_rqst; i++) {
935 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
936 if (IS_ERR(midQ[i])) {
937 revert_current_mid(server, i);
938 for (j = 0; j < i; j++)
939 delete_mid(midQ[j]);
940 cifs_server_unlock(server);
941
942 /* Update # of requests on wire to server */
943 for (j = 0; j < num_rqst; j++)
944 add_credits(server, &credits[j], optype);
945 return PTR_ERR(midQ[i]);
946 }
947
948 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
949 midQ[i]->optype = optype;
950 /*
951 * Invoke callback for every part of the compound chain
952 * to calculate credits properly. Wake up this thread only when
953 * the last element is received.
954 */
955 if (i < num_rqst - 1)
956 midQ[i]->callback = cifs_compound_callback;
957 else
958 midQ[i]->callback = cifs_compound_last_callback;
959 }
960 rc = smb_send_rqst(server, num_rqst, rqst, flags);
961
962 for (i = 0; i < num_rqst; i++)
963 cifs_save_when_sent(midQ[i]);
964
965 if (rc < 0) {
966 revert_current_mid(server, num_rqst);
967 server->sequence_number -= 2;
968 }
969
970 cifs_server_unlock(server);
971
972 /*
973 * If sending failed for some reason or it is an oplock break that we
974 * will not receive a response to - return credits back
975 */
976 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
977 for (i = 0; i < num_rqst; i++)
978 add_credits(server, &credits[i], optype);
979 goto out;
980 }
981
982 /*
983 * At this point the request is passed to the network stack - we assume
984 * that any credits taken from the server structure on the client have
985 * been spent and we can't return them back. Once we receive responses
986 * we will collect credits granted by the server in the mid callbacks
987 * and add those credits to the server structure.
988 */
989
990 /*
991 * Compounding is never used during session establish.
992 */
993 spin_lock(&ses->ses_lock);
994 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
995 spin_unlock(&ses->ses_lock);
996
997 cifs_server_lock(server);
998 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
999 cifs_server_unlock(server);
1000
1001 spin_lock(&ses->ses_lock);
1002 }
1003 spin_unlock(&ses->ses_lock);
1004
1005 for (i = 0; i < num_rqst; i++) {
1006 rc = wait_for_response(server, midQ[i]);
1007 if (rc != 0)
1008 break;
1009 }
1010 if (rc != 0) {
1011 for (; i < num_rqst; i++) {
1012 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1013 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1014 send_cancel(server, &rqst[i], midQ[i]);
1015 spin_lock(&midQ[i]->mid_lock);
1016 midQ[i]->wait_cancelled = true;
1017 if (midQ[i]->callback) {
1018 midQ[i]->callback = cifs_cancelled_callback;
1019 cancelled_mid[i] = true;
1020 credits[i].value = 0;
1021 }
1022 spin_unlock(&midQ[i]->mid_lock);
1023 }
1024 }
1025
1026 for (i = 0; i < num_rqst; i++) {
1027 if (rc < 0)
1028 goto out;
1029
1030 rc = cifs_sync_mid_result(midQ[i], server);
1031 if (rc != 0) {
1032 /* mark this mid as cancelled to not free it below */
1033 cancelled_mid[i] = true;
1034 goto out;
1035 }
1036
1037 if (!midQ[i]->resp_buf ||
1038 midQ[i]->mid_state != MID_RESPONSE_READY) {
1039 rc = -EIO;
1040 cifs_dbg(FYI, "Bad MID state?\n");
1041 goto out;
1042 }
1043
1044 buf = (char *)midQ[i]->resp_buf;
1045 resp_iov[i].iov_base = buf;
1046 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1047 HEADER_PREAMBLE_SIZE(server);
1048
1049 if (midQ[i]->large_buf)
1050 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1051 else
1052 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1053
1054 rc = server->ops->check_receive(midQ[i], server,
1055 flags & CIFS_LOG_ERROR);
1056
1057 /* mark it so buf will not be freed by delete_mid */
1058 if ((flags & CIFS_NO_RSP_BUF) == 0)
1059 midQ[i]->resp_buf = NULL;
1060
1061 }
1062
1063 /*
1064 * Compounding is never used during session establish.
1065 */
1066 spin_lock(&ses->ses_lock);
1067 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1068 struct kvec iov = {
1069 .iov_base = resp_iov[0].iov_base,
1070 .iov_len = resp_iov[0].iov_len
1071 };
1072 spin_unlock(&ses->ses_lock);
1073 cifs_server_lock(server);
1074 smb311_update_preauth_hash(ses, server, &iov, 1);
1075 cifs_server_unlock(server);
1076 spin_lock(&ses->ses_lock);
1077 }
1078 spin_unlock(&ses->ses_lock);
1079
1080 out:
1081 /*
1082 * This will dequeue all mids. After this it is important that the
1083 * demultiplex_thread will not process any of these mids any further.
1084 * This is prevented above by using a noop callback that will not
1085 * wake this thread except for the very last PDU.
1086 */
1087 for (i = 0; i < num_rqst; i++) {
1088 if (!cancelled_mid[i])
1089 delete_mid(midQ[i]);
1090 }
1091
1092 return rc;
1093 }
1094
1095 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1096 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1097 struct TCP_Server_Info *server,
1098 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1099 struct kvec *resp_iov)
1100 {
1101 return compound_send_recv(xid, ses, server, flags, 1,
1102 rqst, resp_buf_type, resp_iov);
1103 }
1104
1105
1106 /*
1107 * Discard any remaining data in the current SMB. To do this, we borrow the
1108 * current bigbuf.
1109 */
1110 int
cifs_discard_remaining_data(struct TCP_Server_Info * server)1111 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1112 {
1113 unsigned int rfclen = server->pdu_size;
1114 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1115 server->total_read;
1116
1117 while (remaining > 0) {
1118 ssize_t length;
1119
1120 length = cifs_discard_from_socket(server,
1121 min_t(size_t, remaining,
1122 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1123 if (length < 0)
1124 return length;
1125 server->total_read += length;
1126 remaining -= length;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static int
__cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid,bool malformed)1133 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1134 bool malformed)
1135 {
1136 int length;
1137
1138 length = cifs_discard_remaining_data(server);
1139 dequeue_mid(mid, malformed);
1140 mid->resp_buf = server->smallbuf;
1141 server->smallbuf = NULL;
1142 return length;
1143 }
1144
1145 static int
cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1146 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1147 {
1148 struct cifs_io_subrequest *rdata = mid->callback_data;
1149
1150 return __cifs_readv_discard(server, mid, rdata->result);
1151 }
1152
1153 int
cifs_readv_receive(struct TCP_Server_Info * server,struct mid_q_entry * mid)1154 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1155 {
1156 int length, len;
1157 unsigned int data_offset, data_len;
1158 struct cifs_io_subrequest *rdata = mid->callback_data;
1159 char *buf = server->smallbuf;
1160 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1161 bool use_rdma_mr = false;
1162
1163 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
1164 __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
1165
1166 /*
1167 * read the rest of READ_RSP header (sans Data array), or whatever we
1168 * can if there's not enough data. At this point, we've read down to
1169 * the Mid.
1170 */
1171 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1172 HEADER_SIZE(server) + 1;
1173
1174 length = cifs_read_from_socket(server,
1175 buf + HEADER_SIZE(server) - 1, len);
1176 if (length < 0)
1177 return length;
1178 server->total_read += length;
1179
1180 if (server->ops->is_session_expired &&
1181 server->ops->is_session_expired(buf)) {
1182 cifs_reconnect(server, true);
1183 return -1;
1184 }
1185
1186 if (server->ops->is_status_pending &&
1187 server->ops->is_status_pending(buf, server)) {
1188 cifs_discard_remaining_data(server);
1189 return -1;
1190 }
1191
1192 /* set up first two iov for signature check and to get credits */
1193 rdata->iov[0].iov_base = buf;
1194 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1195 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1196 rdata->iov[1].iov_len =
1197 server->total_read - HEADER_PREAMBLE_SIZE(server);
1198 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1199 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1200 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1201 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1202
1203 /* Was the SMB read successful? */
1204 rdata->result = server->ops->map_error(buf, false);
1205 if (rdata->result != 0) {
1206 cifs_dbg(FYI, "%s: server returned error %d\n",
1207 __func__, rdata->result);
1208 /* normal error on read response */
1209 return __cifs_readv_discard(server, mid, false);
1210 }
1211
1212 /* Is there enough to get to the rest of the READ_RSP header? */
1213 if (server->total_read < server->vals->read_rsp_size) {
1214 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1215 __func__, server->total_read,
1216 server->vals->read_rsp_size);
1217 rdata->result = -EIO;
1218 return cifs_readv_discard(server, mid);
1219 }
1220
1221 data_offset = server->ops->read_data_offset(buf) +
1222 HEADER_PREAMBLE_SIZE(server);
1223 if (data_offset < server->total_read) {
1224 /*
1225 * win2k8 sometimes sends an offset of 0 when the read
1226 * is beyond the EOF. Treat it as if the data starts just after
1227 * the header.
1228 */
1229 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1230 __func__, data_offset);
1231 data_offset = server->total_read;
1232 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1233 /* data_offset is beyond the end of smallbuf */
1234 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1235 __func__, data_offset);
1236 rdata->result = -EIO;
1237 return cifs_readv_discard(server, mid);
1238 }
1239
1240 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1241 __func__, server->total_read, data_offset);
1242
1243 len = data_offset - server->total_read;
1244 if (len > 0) {
1245 /* read any junk before data into the rest of smallbuf */
1246 length = cifs_read_from_socket(server,
1247 buf + server->total_read, len);
1248 if (length < 0)
1249 return length;
1250 server->total_read += length;
1251 }
1252
1253 /* how much data is in the response? */
1254 #ifdef CONFIG_CIFS_SMB_DIRECT
1255 use_rdma_mr = rdata->mr;
1256 #endif
1257 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1258 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1259 /* data_len is corrupt -- discard frame */
1260 rdata->result = -EIO;
1261 return cifs_readv_discard(server, mid);
1262 }
1263
1264 #ifdef CONFIG_CIFS_SMB_DIRECT
1265 if (rdata->mr)
1266 length = data_len; /* An RDMA read is already done. */
1267 else
1268 #endif
1269 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
1270 data_len);
1271 if (length > 0)
1272 rdata->got_bytes += length;
1273 server->total_read += length;
1274
1275 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1276 server->total_read, buflen, data_len);
1277
1278 /* discard anything left over */
1279 if (server->total_read < buflen)
1280 return cifs_readv_discard(server, mid);
1281
1282 dequeue_mid(mid, false);
1283 mid->resp_buf = server->smallbuf;
1284 server->smallbuf = NULL;
1285 return length;
1286 }
1287