xref: /linux/net/ceph/messenger_v2.c (revision c4c22b846eceff05b1129b8844a80310e55a7f87)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ceph msgr2 protocol implementation
4  *
5  * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6  */
7 
8 #include <linux/ceph/ceph_debug.h>
9 
10 #include <crypto/aead.h>
11 #include <crypto/hash.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27 
28 #include "crypto.h"  /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29 
30 #define FRAME_TAG_HELLO			1
31 #define FRAME_TAG_AUTH_REQUEST		2
32 #define FRAME_TAG_AUTH_BAD_METHOD	3
33 #define FRAME_TAG_AUTH_REPLY_MORE	4
34 #define FRAME_TAG_AUTH_REQUEST_MORE	5
35 #define FRAME_TAG_AUTH_DONE		6
36 #define FRAME_TAG_AUTH_SIGNATURE	7
37 #define FRAME_TAG_CLIENT_IDENT		8
38 #define FRAME_TAG_SERVER_IDENT		9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT	11
41 #define FRAME_TAG_SESSION_RESET		12
42 #define FRAME_TAG_SESSION_RETRY		13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL	14
44 #define FRAME_TAG_SESSION_RECONNECT_OK	15
45 #define FRAME_TAG_WAIT			16
46 #define FRAME_TAG_MESSAGE		17
47 #define FRAME_TAG_KEEPALIVE2		18
48 #define FRAME_TAG_KEEPALIVE2_ACK	19
49 #define FRAME_TAG_ACK			20
50 
51 #define FRAME_LATE_STATUS_ABORTED	0x1
52 #define FRAME_LATE_STATUS_COMPLETE	0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK	0xf
54 
55 #define IN_S_HANDLE_PREAMBLE			1
56 #define IN_S_HANDLE_CONTROL			2
57 #define IN_S_HANDLE_CONTROL_REMAINDER		3
58 #define IN_S_PREPARE_READ_DATA			4
59 #define IN_S_PREPARE_READ_DATA_CONT		5
60 #define IN_S_PREPARE_READ_ENC_PAGE		6
61 #define IN_S_PREPARE_SPARSE_DATA		7
62 #define IN_S_PREPARE_SPARSE_DATA_CONT		8
63 #define IN_S_HANDLE_EPILOGUE			9
64 #define IN_S_FINISH_SKIP			10
65 
66 #define OUT_S_QUEUE_DATA		1
67 #define OUT_S_QUEUE_DATA_CONT		2
68 #define OUT_S_QUEUE_ENC_PAGE		3
69 #define OUT_S_QUEUE_ZEROS		4
70 #define OUT_S_FINISH_MESSAGE		5
71 #define OUT_S_GET_NEXT			6
72 
73 #define CTRL_BODY(p)	((void *)(p) + CEPH_PREAMBLE_LEN)
74 #define FRONT_PAD(p)	((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
75 #define MIDDLE_PAD(p)	(FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
76 #define DATA_PAD(p)	(MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
77 
78 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
79 
80 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
81 {
82 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
83 	int ret;
84 
85 	msg.msg_iter = *it;
86 	while (iov_iter_count(it)) {
87 		ret = sock_recvmsg(sock, &msg, msg.msg_flags);
88 		if (ret <= 0) {
89 			if (ret == -EAGAIN)
90 				ret = 0;
91 			return ret;
92 		}
93 
94 		iov_iter_advance(it, ret);
95 	}
96 
97 	WARN_ON(msg_data_left(&msg));
98 	return 1;
99 }
100 
101 /*
102  * Read as much as possible.
103  *
104  * Return:
105  *   1 - done, nothing (else) to read
106  *   0 - socket is empty, need to wait
107  *  <0 - error
108  */
109 static int ceph_tcp_recv(struct ceph_connection *con)
110 {
111 	int ret;
112 
113 	dout("%s con %p %s %zu\n", __func__, con,
114 	     iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
115 	     iov_iter_count(&con->v2.in_iter));
116 	ret = do_recvmsg(con->sock, &con->v2.in_iter);
117 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
118 	     iov_iter_count(&con->v2.in_iter));
119 	return ret;
120 }
121 
122 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
123 {
124 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
125 	int ret;
126 
127 	msg.msg_iter = *it;
128 	while (iov_iter_count(it)) {
129 		ret = sock_sendmsg(sock, &msg);
130 		if (ret <= 0) {
131 			if (ret == -EAGAIN)
132 				ret = 0;
133 			return ret;
134 		}
135 
136 		iov_iter_advance(it, ret);
137 	}
138 
139 	WARN_ON(msg_data_left(&msg));
140 	return 1;
141 }
142 
143 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
144 {
145 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
146 	struct bio_vec bv;
147 	int ret;
148 
149 	if (WARN_ON(!iov_iter_is_bvec(it)))
150 		return -EINVAL;
151 
152 	while (iov_iter_count(it)) {
153 		/* iov_iter_iovec() for ITER_BVEC */
154 		bvec_set_page(&bv, it->bvec->bv_page,
155 			      min(iov_iter_count(it),
156 				  it->bvec->bv_len - it->iov_offset),
157 			      it->bvec->bv_offset + it->iov_offset);
158 
159 		/*
160 		 * MSG_SPLICE_PAGES cannot properly handle pages with
161 		 * page_count == 0, we need to fall back to sendmsg if
162 		 * that's the case.
163 		 *
164 		 * Same goes for slab pages: skb_can_coalesce() allows
165 		 * coalescing neighboring slab objects into a single frag
166 		 * which triggers one of hardened usercopy checks.
167 		 */
168 		if (sendpage_ok(bv.bv_page))
169 			msg.msg_flags |= MSG_SPLICE_PAGES;
170 		else
171 			msg.msg_flags &= ~MSG_SPLICE_PAGES;
172 
173 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
174 		ret = sock_sendmsg(sock, &msg);
175 		if (ret <= 0) {
176 			if (ret == -EAGAIN)
177 				ret = 0;
178 			return ret;
179 		}
180 
181 		iov_iter_advance(it, ret);
182 	}
183 
184 	return 1;
185 }
186 
187 /*
188  * Write as much as possible.  The socket is expected to be corked,
189  * so we don't bother with MSG_MORE here.
190  *
191  * Return:
192  *   1 - done, nothing (else) to write
193  *   0 - socket is full, need to wait
194  *  <0 - error
195  */
196 static int ceph_tcp_send(struct ceph_connection *con)
197 {
198 	int ret;
199 
200 	dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
201 	     iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
202 	if (con->v2.out_iter_sendpage)
203 		ret = do_try_sendpage(con->sock, &con->v2.out_iter);
204 	else
205 		ret = do_sendmsg(con->sock, &con->v2.out_iter);
206 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
207 	     iov_iter_count(&con->v2.out_iter));
208 	return ret;
209 }
210 
211 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
212 {
213 	BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
214 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
215 
216 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
217 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
218 	con->v2.in_kvec_cnt++;
219 
220 	con->v2.in_iter.nr_segs++;
221 	con->v2.in_iter.count += len;
222 }
223 
224 static void reset_in_kvecs(struct ceph_connection *con)
225 {
226 	WARN_ON(iov_iter_count(&con->v2.in_iter));
227 
228 	con->v2.in_kvec_cnt = 0;
229 	iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
230 }
231 
232 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
233 {
234 	WARN_ON(iov_iter_count(&con->v2.in_iter));
235 
236 	con->v2.in_bvec = *bv;
237 	iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
238 }
239 
240 static void set_in_skip(struct ceph_connection *con, int len)
241 {
242 	WARN_ON(iov_iter_count(&con->v2.in_iter));
243 
244 	dout("%s con %p len %d\n", __func__, con, len);
245 	iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
246 }
247 
248 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
249 {
250 	BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
251 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
252 	WARN_ON(con->v2.out_zero);
253 
254 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
255 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
256 	con->v2.out_kvec_cnt++;
257 
258 	con->v2.out_iter.nr_segs++;
259 	con->v2.out_iter.count += len;
260 }
261 
262 static void reset_out_kvecs(struct ceph_connection *con)
263 {
264 	WARN_ON(iov_iter_count(&con->v2.out_iter));
265 	WARN_ON(con->v2.out_zero);
266 
267 	con->v2.out_kvec_cnt = 0;
268 
269 	iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
270 	con->v2.out_iter_sendpage = false;
271 }
272 
273 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
274 			 bool zerocopy)
275 {
276 	WARN_ON(iov_iter_count(&con->v2.out_iter));
277 	WARN_ON(con->v2.out_zero);
278 
279 	con->v2.out_bvec = *bv;
280 	con->v2.out_iter_sendpage = zerocopy;
281 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
282 		      con->v2.out_bvec.bv_len);
283 }
284 
285 static void set_out_bvec_zero(struct ceph_connection *con)
286 {
287 	WARN_ON(iov_iter_count(&con->v2.out_iter));
288 	WARN_ON(!con->v2.out_zero);
289 
290 	bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
291 		      min(con->v2.out_zero, (int)PAGE_SIZE), 0);
292 	con->v2.out_iter_sendpage = true;
293 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
294 		      con->v2.out_bvec.bv_len);
295 }
296 
297 static void out_zero_add(struct ceph_connection *con, int len)
298 {
299 	dout("%s con %p len %d\n", __func__, con, len);
300 	con->v2.out_zero += len;
301 }
302 
303 static void *alloc_conn_buf(struct ceph_connection *con, int len)
304 {
305 	void *buf;
306 
307 	dout("%s con %p len %d\n", __func__, con, len);
308 
309 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
310 		return NULL;
311 
312 	buf = kvmalloc(len, GFP_NOIO);
313 	if (!buf)
314 		return NULL;
315 
316 	con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
317 	return buf;
318 }
319 
320 static void free_conn_bufs(struct ceph_connection *con)
321 {
322 	while (con->v2.conn_buf_cnt)
323 		kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
324 }
325 
326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 {
328 	BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329 
330 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
331 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
332 	con->v2.in_sign_kvec_cnt++;
333 }
334 
335 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 {
337 	con->v2.in_sign_kvec_cnt = 0;
338 }
339 
340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 {
342 	BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343 
344 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
345 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
346 	con->v2.out_sign_kvec_cnt++;
347 }
348 
349 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 {
351 	con->v2.out_sign_kvec_cnt = 0;
352 }
353 
354 static bool con_secure(struct ceph_connection *con)
355 {
356 	return con->v2.con_mode == CEPH_CON_MODE_SECURE;
357 }
358 
359 static int front_len(const struct ceph_msg *msg)
360 {
361 	return le32_to_cpu(msg->hdr.front_len);
362 }
363 
364 static int middle_len(const struct ceph_msg *msg)
365 {
366 	return le32_to_cpu(msg->hdr.middle_len);
367 }
368 
369 static int data_len(const struct ceph_msg *msg)
370 {
371 	return le32_to_cpu(msg->hdr.data_len);
372 }
373 
374 static bool need_padding(int len)
375 {
376 	return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
377 }
378 
379 static int padded_len(int len)
380 {
381 	return ALIGN(len, CEPH_GCM_BLOCK_LEN);
382 }
383 
384 static int padding_len(int len)
385 {
386 	return padded_len(len) - len;
387 }
388 
389 /* preamble + control segment */
390 static int head_onwire_len(int ctrl_len, bool secure)
391 {
392 	int head_len;
393 	int rem_len;
394 
395 	BUG_ON(ctrl_len < 1 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
396 
397 	if (secure) {
398 		head_len = CEPH_PREAMBLE_SECURE_LEN;
399 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
400 			rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
401 			head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
402 		}
403 	} else {
404 		head_len = CEPH_PREAMBLE_PLAIN_LEN + ctrl_len + CEPH_CRC_LEN;
405 	}
406 	return head_len;
407 }
408 
409 /* front, middle and data segments + epilogue */
410 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
411 			     bool secure)
412 {
413 	BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
414 	       middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
415 	       data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
416 
417 	if (!front_len && !middle_len && !data_len)
418 		return 0;
419 
420 	if (!secure)
421 		return front_len + middle_len + data_len +
422 		       CEPH_EPILOGUE_PLAIN_LEN;
423 
424 	return padded_len(front_len) + padded_len(middle_len) +
425 	       padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
426 }
427 
428 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
429 {
430 	return __tail_onwire_len(front_len(msg), middle_len(msg),
431 				 data_len(msg), secure);
432 }
433 
434 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
435 #define MESSAGE_HEAD_PLAIN_LEN	(CEPH_PREAMBLE_PLAIN_LEN +		\
436 				 sizeof(struct ceph_msg_header2) +	\
437 				 CEPH_CRC_LEN)
438 
439 static const int frame_aligns[] = {
440 	sizeof(void *),
441 	sizeof(void *),
442 	sizeof(void *),
443 	PAGE_SIZE
444 };
445 
446 /*
447  * Discards trailing empty segments, unless there is just one segment.
448  * A frame always has at least one (possibly empty) segment.
449  */
450 static int calc_segment_count(const int *lens, int len_cnt)
451 {
452 	int i;
453 
454 	for (i = len_cnt - 1; i >= 0; i--) {
455 		if (lens[i])
456 			return i + 1;
457 	}
458 
459 	return 1;
460 }
461 
462 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
463 			    const int *lens, int len_cnt)
464 {
465 	int i;
466 
467 	memset(desc, 0, sizeof(*desc));
468 
469 	desc->fd_tag = tag;
470 	desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
471 	BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
472 	for (i = 0; i < desc->fd_seg_cnt; i++) {
473 		desc->fd_lens[i] = lens[i];
474 		desc->fd_aligns[i] = frame_aligns[i];
475 	}
476 }
477 
478 /*
479  * Preamble crc covers everything up to itself (28 bytes) and
480  * is calculated and verified irrespective of the connection mode
481  * (i.e. even if the frame is encrypted).
482  */
483 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
484 {
485 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
486 	void *start = p;
487 	int i;
488 
489 	memset(p, 0, CEPH_PREAMBLE_LEN);
490 
491 	ceph_encode_8(&p, desc->fd_tag);
492 	ceph_encode_8(&p, desc->fd_seg_cnt);
493 	for (i = 0; i < desc->fd_seg_cnt; i++) {
494 		ceph_encode_32(&p, desc->fd_lens[i]);
495 		ceph_encode_16(&p, desc->fd_aligns[i]);
496 	}
497 
498 	put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
499 }
500 
501 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
502 {
503 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
504 	u32 crc, expected_crc;
505 	int i;
506 
507 	crc = crc32c(0, p, crcp - p);
508 	expected_crc = get_unaligned_le32(crcp);
509 	if (crc != expected_crc) {
510 		pr_err("bad preamble crc, calculated %u, expected %u\n",
511 		       crc, expected_crc);
512 		return -EBADMSG;
513 	}
514 
515 	memset(desc, 0, sizeof(*desc));
516 
517 	desc->fd_tag = ceph_decode_8(&p);
518 	desc->fd_seg_cnt = ceph_decode_8(&p);
519 	if (desc->fd_seg_cnt < 1 ||
520 	    desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
521 		pr_err("bad segment count %d\n", desc->fd_seg_cnt);
522 		return -EINVAL;
523 	}
524 	for (i = 0; i < desc->fd_seg_cnt; i++) {
525 		desc->fd_lens[i] = ceph_decode_32(&p);
526 		desc->fd_aligns[i] = ceph_decode_16(&p);
527 	}
528 
529 	/*
530 	 * This would fire for FRAME_TAG_WAIT (it has one empty
531 	 * segment), but we should never get it as client.
532 	 */
533 	if (desc->fd_lens[0] < 1 ||
534 	    desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
535 		pr_err("bad control segment length %d\n", desc->fd_lens[0]);
536 		return -EINVAL;
537 	}
538 
539 	if (desc->fd_lens[1] < 0 ||
540 	    desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
541 		pr_err("bad front segment length %d\n", desc->fd_lens[1]);
542 		return -EINVAL;
543 	}
544 	if (desc->fd_lens[2] < 0 ||
545 	    desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
546 		pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
547 		return -EINVAL;
548 	}
549 	if (desc->fd_lens[3] < 0 ||
550 	    desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
551 		pr_err("bad data segment length %d\n", desc->fd_lens[3]);
552 		return -EINVAL;
553 	}
554 
555 	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
556 		pr_err("last segment empty, segment count %d\n",
557 		       desc->fd_seg_cnt);
558 		return -EINVAL;
559 	}
560 
561 	return 0;
562 }
563 
564 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
565 {
566 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
567 						 FRAME_LATE_STATUS_COMPLETE;
568 	cpu_to_le32s(&con->v2.out_epil.front_crc);
569 	cpu_to_le32s(&con->v2.out_epil.middle_crc);
570 	cpu_to_le32s(&con->v2.out_epil.data_crc);
571 }
572 
573 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
574 {
575 	memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
576 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
577 						 FRAME_LATE_STATUS_COMPLETE;
578 }
579 
580 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
581 			   u32 *data_crc)
582 {
583 	u8 late_status;
584 
585 	late_status = ceph_decode_8(&p);
586 	if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
587 			FRAME_LATE_STATUS_COMPLETE) {
588 		/* we should never get an aborted message as client */
589 		pr_err("bad late_status 0x%x\n", late_status);
590 		return -EINVAL;
591 	}
592 
593 	if (front_crc && middle_crc && data_crc) {
594 		*front_crc = ceph_decode_32(&p);
595 		*middle_crc = ceph_decode_32(&p);
596 		*data_crc = ceph_decode_32(&p);
597 	}
598 
599 	return 0;
600 }
601 
602 static void fill_header(struct ceph_msg_header *hdr,
603 			const struct ceph_msg_header2 *hdr2,
604 			int front_len, int middle_len, int data_len,
605 			const struct ceph_entity_name *peer_name)
606 {
607 	hdr->seq = hdr2->seq;
608 	hdr->tid = hdr2->tid;
609 	hdr->type = hdr2->type;
610 	hdr->priority = hdr2->priority;
611 	hdr->version = hdr2->version;
612 	hdr->front_len = cpu_to_le32(front_len);
613 	hdr->middle_len = cpu_to_le32(middle_len);
614 	hdr->data_len = cpu_to_le32(data_len);
615 	hdr->data_off = hdr2->data_off;
616 	hdr->src = *peer_name;
617 	hdr->compat_version = hdr2->compat_version;
618 	hdr->reserved = 0;
619 	hdr->crc = 0;
620 }
621 
622 static void fill_header2(struct ceph_msg_header2 *hdr2,
623 			 const struct ceph_msg_header *hdr, u64 ack_seq)
624 {
625 	hdr2->seq = hdr->seq;
626 	hdr2->tid = hdr->tid;
627 	hdr2->type = hdr->type;
628 	hdr2->priority = hdr->priority;
629 	hdr2->version = hdr->version;
630 	hdr2->data_pre_padding_len = 0;
631 	hdr2->data_off = hdr->data_off;
632 	hdr2->ack_seq = cpu_to_le64(ack_seq);
633 	hdr2->flags = 0;
634 	hdr2->compat_version = hdr->compat_version;
635 	hdr2->reserved = 0;
636 }
637 
638 static int verify_control_crc(struct ceph_connection *con)
639 {
640 	int ctrl_len = con->v2.in_desc.fd_lens[0];
641 	u32 crc, expected_crc;
642 
643 	WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
644 	WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
645 
646 	crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
647 	expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
648 	if (crc != expected_crc) {
649 		pr_err("bad control crc, calculated %u, expected %u\n",
650 		       crc, expected_crc);
651 		return -EBADMSG;
652 	}
653 
654 	return 0;
655 }
656 
657 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
658 				u32 middle_crc, u32 data_crc)
659 {
660 	if (front_len(con->in_msg)) {
661 		con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
662 					   front_len(con->in_msg));
663 	} else {
664 		WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
665 		con->in_front_crc = -1;
666 	}
667 
668 	if (middle_len(con->in_msg))
669 		con->in_middle_crc = crc32c(-1,
670 					    con->in_msg->middle->vec.iov_base,
671 					    middle_len(con->in_msg));
672 	else if (data_len(con->in_msg))
673 		con->in_middle_crc = -1;
674 	else
675 		con->in_middle_crc = 0;
676 
677 	if (!data_len(con->in_msg))
678 		con->in_data_crc = 0;
679 
680 	dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
681 	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
682 
683 	if (con->in_front_crc != front_crc) {
684 		pr_err("bad front crc, calculated %u, expected %u\n",
685 		       con->in_front_crc, front_crc);
686 		return -EBADMSG;
687 	}
688 	if (con->in_middle_crc != middle_crc) {
689 		pr_err("bad middle crc, calculated %u, expected %u\n",
690 		       con->in_middle_crc, middle_crc);
691 		return -EBADMSG;
692 	}
693 	if (con->in_data_crc != data_crc) {
694 		pr_err("bad data crc, calculated %u, expected %u\n",
695 		       con->in_data_crc, data_crc);
696 		return -EBADMSG;
697 	}
698 
699 	return 0;
700 }
701 
702 static int setup_crypto(struct ceph_connection *con,
703 			const u8 *session_key, int session_key_len,
704 			const u8 *con_secret, int con_secret_len)
705 {
706 	unsigned int noio_flag;
707 	int ret;
708 
709 	dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
710 	     __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
711 	WARN_ON(con->v2.hmac_key_set || con->v2.gcm_tfm || con->v2.gcm_req);
712 
713 	if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
714 	    con->v2.con_mode != CEPH_CON_MODE_SECURE) {
715 		pr_err("bad con_mode %d\n", con->v2.con_mode);
716 		return -EINVAL;
717 	}
718 
719 	if (!session_key_len) {
720 		WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
721 		WARN_ON(con_secret_len);
722 		return 0;  /* auth_none */
723 	}
724 
725 	hmac_sha256_preparekey(&con->v2.hmac_key, session_key, session_key_len);
726 	con->v2.hmac_key_set = true;
727 
728 	if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
729 		WARN_ON(con_secret_len);
730 		return 0;  /* auth_x, plain mode */
731 	}
732 
733 	if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
734 		pr_err("con_secret too small %d\n", con_secret_len);
735 		return -EINVAL;
736 	}
737 
738 	noio_flag = memalloc_noio_save();
739 	con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
740 	memalloc_noio_restore(noio_flag);
741 	if (IS_ERR(con->v2.gcm_tfm)) {
742 		ret = PTR_ERR(con->v2.gcm_tfm);
743 		con->v2.gcm_tfm = NULL;
744 		pr_err("failed to allocate gcm tfm context: %d\n", ret);
745 		return ret;
746 	}
747 
748 	WARN_ON((unsigned long)con_secret &
749 		crypto_aead_alignmask(con->v2.gcm_tfm));
750 	ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
751 	if (ret) {
752 		pr_err("failed to set gcm key: %d\n", ret);
753 		return ret;
754 	}
755 
756 	WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
757 	ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
758 	if (ret) {
759 		pr_err("failed to set gcm tag size: %d\n", ret);
760 		return ret;
761 	}
762 
763 	con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
764 	if (!con->v2.gcm_req) {
765 		pr_err("failed to allocate gcm request\n");
766 		return -ENOMEM;
767 	}
768 
769 	crypto_init_wait(&con->v2.gcm_wait);
770 	aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
771 				  crypto_req_done, &con->v2.gcm_wait);
772 
773 	memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
774 	       CEPH_GCM_IV_LEN);
775 	memcpy(&con->v2.out_gcm_nonce,
776 	       con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
777 	       CEPH_GCM_IV_LEN);
778 	return 0;  /* auth_x, secure mode */
779 }
780 
781 static void con_hmac_sha256(struct ceph_connection *con,
782 			    const struct kvec *kvecs, int kvec_cnt,
783 			    u8 hmac[SHA256_DIGEST_SIZE])
784 {
785 	struct hmac_sha256_ctx ctx;
786 	int i;
787 
788 	dout("%s con %p hmac_key_set %d kvec_cnt %d\n", __func__, con,
789 	     con->v2.hmac_key_set, kvec_cnt);
790 
791 	if (!con->v2.hmac_key_set) {
792 		memset(hmac, 0, SHA256_DIGEST_SIZE);
793 		return;  /* auth_none */
794 	}
795 
796 	/* auth_x, both plain and secure modes */
797 	hmac_sha256_init(&ctx, &con->v2.hmac_key);
798 	for (i = 0; i < kvec_cnt; i++)
799 		hmac_sha256_update(&ctx, kvecs[i].iov_base, kvecs[i].iov_len);
800 	hmac_sha256_final(&ctx, hmac);
801 }
802 
803 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
804 {
805 	u64 counter;
806 
807 	counter = le64_to_cpu(nonce->counter);
808 	nonce->counter = cpu_to_le64(counter + 1);
809 }
810 
811 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
812 		     struct scatterlist *src, struct scatterlist *dst,
813 		     int src_len)
814 {
815 	struct ceph_gcm_nonce *nonce;
816 	int ret;
817 
818 	nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
819 
820 	aead_request_set_ad(con->v2.gcm_req, 0);  /* no AAD */
821 	aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
822 	ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
823 					crypto_aead_decrypt(con->v2.gcm_req),
824 			      &con->v2.gcm_wait);
825 	if (ret)
826 		return ret;
827 
828 	gcm_inc_nonce(nonce);
829 	return 0;
830 }
831 
832 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
833 			struct bio_vec *bv)
834 {
835 	struct page *page;
836 	size_t off, len;
837 
838 	WARN_ON(!cursor->total_resid);
839 
840 	/* skip zero-length data items */
841 	while (!cursor->resid)
842 		ceph_msg_data_advance(cursor, 0);
843 
844 	/* get a piece of data, cursor isn't advanced */
845 	page = ceph_msg_data_next(cursor, &off, &len);
846 	bvec_set_page(bv, page, len, off);
847 }
848 
849 static int calc_sg_cnt(void *buf, int buf_len)
850 {
851 	int sg_cnt;
852 
853 	if (!buf_len)
854 		return 0;
855 
856 	sg_cnt = need_padding(buf_len) ? 1 : 0;
857 	if (is_vmalloc_addr(buf)) {
858 		WARN_ON(offset_in_page(buf));
859 		sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
860 	} else {
861 		sg_cnt++;
862 	}
863 
864 	return sg_cnt;
865 }
866 
867 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
868 {
869 	int data_len = cursor->total_resid;
870 	struct bio_vec bv;
871 	int sg_cnt;
872 
873 	if (!data_len)
874 		return 0;
875 
876 	sg_cnt = need_padding(data_len) ? 1 : 0;
877 	do {
878 		get_bvec_at(cursor, &bv);
879 		sg_cnt++;
880 
881 		ceph_msg_data_advance(cursor, bv.bv_len);
882 	} while (cursor->total_resid);
883 
884 	return sg_cnt;
885 }
886 
887 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
888 {
889 	void *end = buf + buf_len;
890 	struct page *page;
891 	int len;
892 	void *p;
893 
894 	if (!buf_len)
895 		return;
896 
897 	if (is_vmalloc_addr(buf)) {
898 		p = buf;
899 		do {
900 			page = vmalloc_to_page(p);
901 			len = min_t(int, end - p, PAGE_SIZE);
902 			WARN_ON(!page || !len || offset_in_page(p));
903 			sg_set_page(*sg, page, len, 0);
904 			*sg = sg_next(*sg);
905 			p += len;
906 		} while (p != end);
907 	} else {
908 		sg_set_buf(*sg, buf, buf_len);
909 		*sg = sg_next(*sg);
910 	}
911 
912 	if (need_padding(buf_len)) {
913 		sg_set_buf(*sg, pad, padding_len(buf_len));
914 		*sg = sg_next(*sg);
915 	}
916 }
917 
918 static void init_sgs_cursor(struct scatterlist **sg,
919 			    struct ceph_msg_data_cursor *cursor, u8 *pad)
920 {
921 	int data_len = cursor->total_resid;
922 	struct bio_vec bv;
923 
924 	if (!data_len)
925 		return;
926 
927 	do {
928 		get_bvec_at(cursor, &bv);
929 		sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
930 		*sg = sg_next(*sg);
931 
932 		ceph_msg_data_advance(cursor, bv.bv_len);
933 	} while (cursor->total_resid);
934 
935 	if (need_padding(data_len)) {
936 		sg_set_buf(*sg, pad, padding_len(data_len));
937 		*sg = sg_next(*sg);
938 	}
939 }
940 
941 /**
942  * init_sgs_pages: set up scatterlist on an array of page pointers
943  * @sg:		scatterlist to populate
944  * @pages:	pointer to page array
945  * @dpos:	position in the array to start (bytes)
946  * @dlen:	len to add to sg (bytes)
947  * @pad:	pointer to pad destination (if any)
948  *
949  * Populate the scatterlist from the page array, starting at an arbitrary
950  * byte in the array and running for a specified length.
951  */
952 static void init_sgs_pages(struct scatterlist **sg, struct page **pages,
953 			   int dpos, int dlen, u8 *pad)
954 {
955 	int idx = dpos >> PAGE_SHIFT;
956 	int off = offset_in_page(dpos);
957 	int resid = dlen;
958 
959 	do {
960 		int len = min(resid, (int)PAGE_SIZE - off);
961 
962 		sg_set_page(*sg, pages[idx], len, off);
963 		*sg = sg_next(*sg);
964 		off = 0;
965 		++idx;
966 		resid -= len;
967 	} while (resid);
968 
969 	if (need_padding(dlen)) {
970 		sg_set_buf(*sg, pad, padding_len(dlen));
971 		*sg = sg_next(*sg);
972 	}
973 }
974 
975 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
976 			     u8 *front_pad, u8 *middle_pad, u8 *data_pad,
977 			     void *epilogue, struct page **pages, int dpos,
978 			     bool add_tag)
979 {
980 	struct ceph_msg_data_cursor cursor;
981 	struct scatterlist *cur_sg;
982 	int dlen = data_len(msg);
983 	int sg_cnt;
984 	int ret;
985 
986 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
987 		return 0;
988 
989 	sg_cnt = 1;  /* epilogue + [auth tag] */
990 	if (front_len(msg))
991 		sg_cnt += calc_sg_cnt(msg->front.iov_base,
992 				      front_len(msg));
993 	if (middle_len(msg))
994 		sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
995 				      middle_len(msg));
996 	if (dlen) {
997 		if (pages) {
998 			sg_cnt += calc_pages_for(dpos, dlen);
999 			if (need_padding(dlen))
1000 				sg_cnt++;
1001 		} else {
1002 			ceph_msg_data_cursor_init(&cursor, msg, dlen);
1003 			sg_cnt += calc_sg_cnt_cursor(&cursor);
1004 		}
1005 	}
1006 
1007 	ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
1008 	if (ret)
1009 		return ret;
1010 
1011 	cur_sg = sgt->sgl;
1012 	if (front_len(msg))
1013 		init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
1014 			 front_pad);
1015 	if (middle_len(msg))
1016 		init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
1017 			 middle_pad);
1018 	if (dlen) {
1019 		if (pages) {
1020 			init_sgs_pages(&cur_sg, pages, dpos, dlen, data_pad);
1021 		} else {
1022 			ceph_msg_data_cursor_init(&cursor, msg, dlen);
1023 			init_sgs_cursor(&cur_sg, &cursor, data_pad);
1024 		}
1025 	}
1026 
1027 	WARN_ON(!sg_is_last(cur_sg));
1028 	sg_set_buf(cur_sg, epilogue,
1029 		   CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1030 	return 0;
1031 }
1032 
1033 static int decrypt_preamble(struct ceph_connection *con)
1034 {
1035 	struct scatterlist sg;
1036 
1037 	sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1038 	return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1039 }
1040 
1041 static int decrypt_control_remainder(struct ceph_connection *con)
1042 {
1043 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1044 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1045 	int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1046 	struct scatterlist sgs[2];
1047 
1048 	WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1049 	WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1050 
1051 	sg_init_table(sgs, 2);
1052 	sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1053 	sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1054 
1055 	return gcm_crypt(con, false, sgs, sgs,
1056 			 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1057 }
1058 
1059 /* Process sparse read data that lives in a buffer */
1060 static int process_v2_sparse_read(struct ceph_connection *con,
1061 				  struct page **pages, int spos)
1062 {
1063 	struct ceph_msg_data_cursor cursor;
1064 	int ret;
1065 
1066 	ceph_msg_data_cursor_init(&cursor, con->in_msg,
1067 				  con->in_msg->sparse_read_total);
1068 
1069 	for (;;) {
1070 		char *buf = NULL;
1071 
1072 		ret = con->ops->sparse_read(con, &cursor, &buf);
1073 		if (ret <= 0)
1074 			return ret;
1075 
1076 		dout("%s: sparse_read return %x buf %p\n", __func__, ret, buf);
1077 
1078 		do {
1079 			int idx = spos >> PAGE_SHIFT;
1080 			int soff = offset_in_page(spos);
1081 			struct page *spage = con->v2.in_enc_pages[idx];
1082 			int len = min_t(int, ret, PAGE_SIZE - soff);
1083 
1084 			if (buf) {
1085 				memcpy_from_page(buf, spage, soff, len);
1086 				buf += len;
1087 			} else {
1088 				struct bio_vec bv;
1089 
1090 				get_bvec_at(&cursor, &bv);
1091 				len = min_t(int, len, bv.bv_len);
1092 				memcpy_page(bv.bv_page, bv.bv_offset,
1093 					    spage, soff, len);
1094 				ceph_msg_data_advance(&cursor, len);
1095 			}
1096 			spos += len;
1097 			ret -= len;
1098 		} while (ret);
1099 	}
1100 }
1101 
1102 static int decrypt_tail(struct ceph_connection *con)
1103 {
1104 	struct sg_table enc_sgt = {};
1105 	struct sg_table sgt = {};
1106 	struct page **pages = NULL;
1107 	bool sparse = !!con->in_msg->sparse_read_total;
1108 	int dpos = 0;
1109 	int tail_len;
1110 	int ret;
1111 
1112 	tail_len = tail_onwire_len(con->in_msg, true);
1113 	ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1114 					con->v2.in_enc_page_cnt, 0, tail_len,
1115 					GFP_NOIO);
1116 	if (ret)
1117 		goto out;
1118 
1119 	if (sparse) {
1120 		dpos = padded_len(front_len(con->in_msg) + padded_len(middle_len(con->in_msg)));
1121 		pages = con->v2.in_enc_pages;
1122 	}
1123 
1124 	ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1125 				MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1126 				con->v2.in_buf, pages, dpos, true);
1127 	if (ret)
1128 		goto out;
1129 
1130 	dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1131 	     con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1132 	ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1133 	if (ret)
1134 		goto out;
1135 
1136 	if (sparse && data_len(con->in_msg)) {
1137 		ret = process_v2_sparse_read(con, con->v2.in_enc_pages, dpos);
1138 		if (ret)
1139 			goto out;
1140 	}
1141 
1142 	WARN_ON(!con->v2.in_enc_page_cnt);
1143 	ceph_release_page_vector(con->v2.in_enc_pages,
1144 				 con->v2.in_enc_page_cnt);
1145 	con->v2.in_enc_pages = NULL;
1146 	con->v2.in_enc_page_cnt = 0;
1147 
1148 out:
1149 	sg_free_table(&sgt);
1150 	sg_free_table(&enc_sgt);
1151 	return ret;
1152 }
1153 
1154 static int prepare_banner(struct ceph_connection *con)
1155 {
1156 	int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1157 	void *buf, *p;
1158 
1159 	buf = alloc_conn_buf(con, buf_len);
1160 	if (!buf)
1161 		return -ENOMEM;
1162 
1163 	p = buf;
1164 	ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1165 	ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1166 	ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1167 	ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1168 	WARN_ON(p != buf + buf_len);
1169 
1170 	add_out_kvec(con, buf, buf_len);
1171 	add_out_sign_kvec(con, buf, buf_len);
1172 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1173 	return 0;
1174 }
1175 
1176 /*
1177  * base:
1178  *   preamble
1179  *   control body (ctrl_len bytes)
1180  *   space for control crc
1181  *
1182  * extdata (optional):
1183  *   control body (extdata_len bytes)
1184  *
1185  * Compute control crc and gather base and extdata into:
1186  *
1187  *   preamble
1188  *   control body (ctrl_len + extdata_len bytes)
1189  *   control crc
1190  *
1191  * Preamble should already be encoded at the start of base.
1192  */
1193 static void prepare_head_plain(struct ceph_connection *con, void *base,
1194 			       int ctrl_len, void *extdata, int extdata_len,
1195 			       bool to_be_signed)
1196 {
1197 	int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1198 	void *crcp = base + base_len - CEPH_CRC_LEN;
1199 	u32 crc;
1200 
1201 	crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1202 	if (extdata_len)
1203 		crc = crc32c(crc, extdata, extdata_len);
1204 	put_unaligned_le32(crc, crcp);
1205 
1206 	if (!extdata_len) {
1207 		add_out_kvec(con, base, base_len);
1208 		if (to_be_signed)
1209 			add_out_sign_kvec(con, base, base_len);
1210 		return;
1211 	}
1212 
1213 	add_out_kvec(con, base, crcp - base);
1214 	add_out_kvec(con, extdata, extdata_len);
1215 	add_out_kvec(con, crcp, CEPH_CRC_LEN);
1216 	if (to_be_signed) {
1217 		add_out_sign_kvec(con, base, crcp - base);
1218 		add_out_sign_kvec(con, extdata, extdata_len);
1219 		add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1220 	}
1221 }
1222 
1223 static int prepare_head_secure_small(struct ceph_connection *con,
1224 				     void *base, int ctrl_len)
1225 {
1226 	struct scatterlist sg;
1227 	int ret;
1228 
1229 	/* inline buffer padding? */
1230 	if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1231 		memset(CTRL_BODY(base) + ctrl_len, 0,
1232 		       CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1233 
1234 	sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1235 	ret = gcm_crypt(con, true, &sg, &sg,
1236 			CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1237 	if (ret)
1238 		return ret;
1239 
1240 	add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1241 	return 0;
1242 }
1243 
1244 /*
1245  * base:
1246  *   preamble
1247  *   control body (ctrl_len bytes)
1248  *   space for padding, if needed
1249  *   space for control remainder auth tag
1250  *   space for preamble auth tag
1251  *
1252  * Encrypt preamble and the inline portion, then encrypt the remainder
1253  * and gather into:
1254  *
1255  *   preamble
1256  *   control body (48 bytes)
1257  *   preamble auth tag
1258  *   control body (ctrl_len - 48 bytes)
1259  *   zero padding, if needed
1260  *   control remainder auth tag
1261  *
1262  * Preamble should already be encoded at the start of base.
1263  */
1264 static int prepare_head_secure_big(struct ceph_connection *con,
1265 				   void *base, int ctrl_len)
1266 {
1267 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1268 	void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1269 	void *rem_tag = rem + padded_len(rem_len);
1270 	void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1271 	struct scatterlist sgs[2];
1272 	int ret;
1273 
1274 	sg_init_table(sgs, 2);
1275 	sg_set_buf(&sgs[0], base, rem - base);
1276 	sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1277 	ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1278 	if (ret)
1279 		return ret;
1280 
1281 	/* control remainder padding? */
1282 	if (need_padding(rem_len))
1283 		memset(rem + rem_len, 0, padding_len(rem_len));
1284 
1285 	sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1286 	ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1287 	if (ret)
1288 		return ret;
1289 
1290 	add_out_kvec(con, base, rem - base);
1291 	add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1292 	add_out_kvec(con, rem, pmbl_tag - rem);
1293 	return 0;
1294 }
1295 
1296 static int __prepare_control(struct ceph_connection *con, int tag,
1297 			     void *base, int ctrl_len, void *extdata,
1298 			     int extdata_len, bool to_be_signed)
1299 {
1300 	int total_len = ctrl_len + extdata_len;
1301 	struct ceph_frame_desc desc;
1302 	int ret;
1303 
1304 	dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1305 	     total_len, ctrl_len, extdata_len);
1306 
1307 	/* extdata may be vmalloc'ed but not base */
1308 	if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1309 		return -EINVAL;
1310 
1311 	init_frame_desc(&desc, tag, &total_len, 1);
1312 	encode_preamble(&desc, base);
1313 
1314 	if (con_secure(con)) {
1315 		if (WARN_ON(extdata_len || to_be_signed))
1316 			return -EINVAL;
1317 
1318 		if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1319 			/* fully inlined, inline buffer may need padding */
1320 			ret = prepare_head_secure_small(con, base, ctrl_len);
1321 		else
1322 			/* partially inlined, inline buffer is full */
1323 			ret = prepare_head_secure_big(con, base, ctrl_len);
1324 		if (ret)
1325 			return ret;
1326 	} else {
1327 		prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1328 				   to_be_signed);
1329 	}
1330 
1331 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1332 	return 0;
1333 }
1334 
1335 static int prepare_control(struct ceph_connection *con, int tag,
1336 			   void *base, int ctrl_len)
1337 {
1338 	return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1339 }
1340 
1341 static int prepare_hello(struct ceph_connection *con)
1342 {
1343 	void *buf, *p;
1344 	int ctrl_len;
1345 
1346 	ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1347 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1348 	if (!buf)
1349 		return -ENOMEM;
1350 
1351 	p = CTRL_BODY(buf);
1352 	ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1353 	ceph_encode_entity_addr(&p, &con->peer_addr);
1354 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1355 
1356 	return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1357 				 NULL, 0, true);
1358 }
1359 
1360 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1361 #define AUTH_BUF_LEN	(512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1362 
1363 static int prepare_auth_request(struct ceph_connection *con)
1364 {
1365 	void *authorizer, *authorizer_copy;
1366 	int ctrl_len, authorizer_len;
1367 	void *buf;
1368 	int ret;
1369 
1370 	ctrl_len = AUTH_BUF_LEN;
1371 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1372 	if (!buf)
1373 		return -ENOMEM;
1374 
1375 	mutex_unlock(&con->mutex);
1376 	ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1377 					 &authorizer, &authorizer_len);
1378 	mutex_lock(&con->mutex);
1379 	if (con->state != CEPH_CON_S_V2_HELLO) {
1380 		dout("%s con %p state changed to %d\n", __func__, con,
1381 		     con->state);
1382 		return -EAGAIN;
1383 	}
1384 
1385 	dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1386 	if (ret)
1387 		return ret;
1388 
1389 	authorizer_copy = alloc_conn_buf(con, authorizer_len);
1390 	if (!authorizer_copy)
1391 		return -ENOMEM;
1392 
1393 	memcpy(authorizer_copy, authorizer, authorizer_len);
1394 
1395 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1396 				 authorizer_copy, authorizer_len, true);
1397 }
1398 
1399 static int prepare_auth_request_more(struct ceph_connection *con,
1400 				     void *reply, int reply_len)
1401 {
1402 	int ctrl_len, authorizer_len;
1403 	void *authorizer;
1404 	void *buf;
1405 	int ret;
1406 
1407 	ctrl_len = AUTH_BUF_LEN;
1408 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1409 	if (!buf)
1410 		return -ENOMEM;
1411 
1412 	mutex_unlock(&con->mutex);
1413 	ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1414 					       CTRL_BODY(buf), &ctrl_len,
1415 					       &authorizer, &authorizer_len);
1416 	mutex_lock(&con->mutex);
1417 	if (con->state != CEPH_CON_S_V2_AUTH) {
1418 		dout("%s con %p state changed to %d\n", __func__, con,
1419 		     con->state);
1420 		return -EAGAIN;
1421 	}
1422 
1423 	dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1424 	if (ret)
1425 		return ret;
1426 
1427 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1428 				 ctrl_len, authorizer, authorizer_len, true);
1429 }
1430 
1431 static int prepare_auth_signature(struct ceph_connection *con)
1432 {
1433 	void *buf;
1434 
1435 	buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1436 						  con_secure(con)));
1437 	if (!buf)
1438 		return -ENOMEM;
1439 
1440 	con_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1441 			CTRL_BODY(buf));
1442 
1443 	return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1444 			       SHA256_DIGEST_SIZE);
1445 }
1446 
1447 static int prepare_client_ident(struct ceph_connection *con)
1448 {
1449 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1450 	struct ceph_client *client = from_msgr(con->msgr);
1451 	u64 global_id = ceph_client_gid(client);
1452 	void *buf, *p;
1453 	int ctrl_len;
1454 
1455 	WARN_ON(con->v2.server_cookie);
1456 	WARN_ON(con->v2.connect_seq);
1457 	WARN_ON(con->v2.peer_global_seq);
1458 
1459 	if (!con->v2.client_cookie) {
1460 		do {
1461 			get_random_bytes(&con->v2.client_cookie,
1462 					 sizeof(con->v2.client_cookie));
1463 		} while (!con->v2.client_cookie);
1464 		dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1465 		     con->v2.client_cookie);
1466 	} else {
1467 		dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1468 		     con->v2.client_cookie);
1469 	}
1470 
1471 	dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1472 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1473 	     ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1474 	     global_id, con->v2.global_seq, client->supported_features,
1475 	     client->required_features, con->v2.client_cookie);
1476 
1477 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1478 		   ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1479 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1480 	if (!buf)
1481 		return -ENOMEM;
1482 
1483 	p = CTRL_BODY(buf);
1484 	ceph_encode_8(&p, 2);  /* addrvec marker */
1485 	ceph_encode_32(&p, 1);  /* addr_cnt */
1486 	ceph_encode_entity_addr(&p, my_addr);
1487 	ceph_encode_entity_addr(&p, &con->peer_addr);
1488 	ceph_encode_64(&p, global_id);
1489 	ceph_encode_64(&p, con->v2.global_seq);
1490 	ceph_encode_64(&p, client->supported_features);
1491 	ceph_encode_64(&p, client->required_features);
1492 	ceph_encode_64(&p, 0);  /* flags */
1493 	ceph_encode_64(&p, con->v2.client_cookie);
1494 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1495 
1496 	return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1497 }
1498 
1499 static int prepare_session_reconnect(struct ceph_connection *con)
1500 {
1501 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1502 	void *buf, *p;
1503 	int ctrl_len;
1504 
1505 	WARN_ON(!con->v2.client_cookie);
1506 	WARN_ON(!con->v2.server_cookie);
1507 	WARN_ON(!con->v2.connect_seq);
1508 	WARN_ON(!con->v2.peer_global_seq);
1509 
1510 	dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1511 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1512 	     con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1513 	     con->v2.connect_seq, con->in_seq);
1514 
1515 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1516 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1517 	if (!buf)
1518 		return -ENOMEM;
1519 
1520 	p = CTRL_BODY(buf);
1521 	ceph_encode_8(&p, 2);  /* entity_addrvec_t marker */
1522 	ceph_encode_32(&p, 1);  /* my_addrs len */
1523 	ceph_encode_entity_addr(&p, my_addr);
1524 	ceph_encode_64(&p, con->v2.client_cookie);
1525 	ceph_encode_64(&p, con->v2.server_cookie);
1526 	ceph_encode_64(&p, con->v2.global_seq);
1527 	ceph_encode_64(&p, con->v2.connect_seq);
1528 	ceph_encode_64(&p, con->in_seq);
1529 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1530 
1531 	return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1532 }
1533 
1534 static int prepare_keepalive2(struct ceph_connection *con)
1535 {
1536 	struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1537 	struct timespec64 now;
1538 
1539 	ktime_get_real_ts64(&now);
1540 	dout("%s con %p timestamp %ptSp\n", __func__, con, &now);
1541 
1542 	ceph_encode_timespec64(ts, &now);
1543 
1544 	reset_out_kvecs(con);
1545 	return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1546 			       sizeof(struct ceph_timespec));
1547 }
1548 
1549 static int prepare_ack(struct ceph_connection *con)
1550 {
1551 	void *p;
1552 
1553 	dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1554 	     con->in_seq_acked, con->in_seq);
1555 	con->in_seq_acked = con->in_seq;
1556 
1557 	p = CTRL_BODY(con->v2.out_buf);
1558 	ceph_encode_64(&p, con->in_seq_acked);
1559 
1560 	reset_out_kvecs(con);
1561 	return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1562 }
1563 
1564 static void prepare_epilogue_plain(struct ceph_connection *con,
1565 				   struct ceph_msg *msg, bool aborted)
1566 {
1567 	dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1568 	     msg, aborted, con->v2.out_epil.front_crc,
1569 	     con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1570 
1571 	encode_epilogue_plain(con, aborted);
1572 	add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1573 }
1574 
1575 /*
1576  * For "used" empty segments, crc is -1.  For unused (trailing)
1577  * segments, crc is 0.
1578  */
1579 static void prepare_message_plain(struct ceph_connection *con,
1580 				  struct ceph_msg *msg)
1581 {
1582 	prepare_head_plain(con, con->v2.out_buf,
1583 			   sizeof(struct ceph_msg_header2), NULL, 0, false);
1584 
1585 	if (!front_len(msg) && !middle_len(msg)) {
1586 		if (!data_len(msg)) {
1587 			/*
1588 			 * Empty message: once the head is written,
1589 			 * we are done -- there is no epilogue.
1590 			 */
1591 			con->v2.out_state = OUT_S_FINISH_MESSAGE;
1592 			return;
1593 		}
1594 
1595 		con->v2.out_epil.front_crc = -1;
1596 		con->v2.out_epil.middle_crc = -1;
1597 		con->v2.out_state = OUT_S_QUEUE_DATA;
1598 		return;
1599 	}
1600 
1601 	if (front_len(msg)) {
1602 		con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1603 						    front_len(msg));
1604 		add_out_kvec(con, msg->front.iov_base, front_len(msg));
1605 	} else {
1606 		/* middle (at least) is there, checked above */
1607 		con->v2.out_epil.front_crc = -1;
1608 	}
1609 
1610 	if (middle_len(msg)) {
1611 		con->v2.out_epil.middle_crc =
1612 			crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1613 		add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1614 	} else {
1615 		con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1616 	}
1617 
1618 	if (data_len(msg)) {
1619 		con->v2.out_state = OUT_S_QUEUE_DATA;
1620 	} else {
1621 		con->v2.out_epil.data_crc = 0;
1622 		prepare_epilogue_plain(con, msg, false);
1623 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1624 	}
1625 }
1626 
1627 /*
1628  * Unfortunately the kernel crypto API doesn't support streaming
1629  * (piecewise) operation for AEAD algorithms, so we can't get away
1630  * with a fixed size buffer and a couple sgs.  Instead, we have to
1631  * allocate pages for the entire tail of the message (currently up
1632  * to ~32M) and two sgs arrays (up to ~256K each)...
1633  */
1634 static int prepare_message_secure(struct ceph_connection *con,
1635 				  struct ceph_msg *msg)
1636 {
1637 	void *zerop = page_address(ceph_zero_page);
1638 	struct sg_table enc_sgt = {};
1639 	struct sg_table sgt = {};
1640 	struct page **enc_pages;
1641 	int enc_page_cnt;
1642 	int tail_len;
1643 	int ret;
1644 
1645 	ret = prepare_head_secure_small(con, con->v2.out_buf,
1646 					sizeof(struct ceph_msg_header2));
1647 	if (ret)
1648 		return ret;
1649 
1650 	tail_len = tail_onwire_len(msg, true);
1651 	if (!tail_len) {
1652 		/*
1653 		 * Empty message: once the head is written,
1654 		 * we are done -- there is no epilogue.
1655 		 */
1656 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1657 		return 0;
1658 	}
1659 
1660 	encode_epilogue_secure(con, false);
1661 	ret = setup_message_sgs(&sgt, msg, zerop, zerop, zerop,
1662 				&con->v2.out_epil, NULL, 0, false);
1663 	if (ret)
1664 		goto out;
1665 
1666 	enc_page_cnt = calc_pages_for(0, tail_len);
1667 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1668 	if (IS_ERR(enc_pages)) {
1669 		ret = PTR_ERR(enc_pages);
1670 		goto out;
1671 	}
1672 
1673 	WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1674 	con->v2.out_enc_pages = enc_pages;
1675 	con->v2.out_enc_page_cnt = enc_page_cnt;
1676 	con->v2.out_enc_resid = tail_len;
1677 	con->v2.out_enc_i = 0;
1678 
1679 	ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1680 					0, tail_len, GFP_NOIO);
1681 	if (ret)
1682 		goto out;
1683 
1684 	ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1685 			tail_len - CEPH_GCM_TAG_LEN);
1686 	if (ret)
1687 		goto out;
1688 
1689 	dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1690 	     msg, sgt.orig_nents, enc_page_cnt);
1691 	con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1692 
1693 out:
1694 	sg_free_table(&sgt);
1695 	sg_free_table(&enc_sgt);
1696 	return ret;
1697 }
1698 
1699 static int prepare_message(struct ceph_connection *con, struct ceph_msg *msg)
1700 {
1701 	int lens[] = {
1702 		sizeof(struct ceph_msg_header2),
1703 		front_len(msg),
1704 		middle_len(msg),
1705 		data_len(msg)
1706 	};
1707 	struct ceph_frame_desc desc;
1708 	int ret;
1709 
1710 	dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1711 	     msg, lens[0], lens[1], lens[2], lens[3]);
1712 
1713 	if (con->in_seq > con->in_seq_acked) {
1714 		dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1715 		     con->in_seq_acked, con->in_seq);
1716 		con->in_seq_acked = con->in_seq;
1717 	}
1718 
1719 	reset_out_kvecs(con);
1720 	init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1721 	encode_preamble(&desc, con->v2.out_buf);
1722 	fill_header2(CTRL_BODY(con->v2.out_buf), &msg->hdr,
1723 		     con->in_seq_acked);
1724 
1725 	if (con_secure(con)) {
1726 		ret = prepare_message_secure(con, msg);
1727 		if (ret)
1728 			return ret;
1729 	} else {
1730 		prepare_message_plain(con, msg);
1731 	}
1732 
1733 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1734 	return 0;
1735 }
1736 
1737 static int prepare_read_banner_prefix(struct ceph_connection *con)
1738 {
1739 	void *buf;
1740 
1741 	buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1742 	if (!buf)
1743 		return -ENOMEM;
1744 
1745 	reset_in_kvecs(con);
1746 	add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1747 	add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1748 	con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1749 	return 0;
1750 }
1751 
1752 static int prepare_read_banner_payload(struct ceph_connection *con,
1753 				       int payload_len)
1754 {
1755 	void *buf;
1756 
1757 	buf = alloc_conn_buf(con, payload_len);
1758 	if (!buf)
1759 		return -ENOMEM;
1760 
1761 	reset_in_kvecs(con);
1762 	add_in_kvec(con, buf, payload_len);
1763 	add_in_sign_kvec(con, buf, payload_len);
1764 	con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1765 	return 0;
1766 }
1767 
1768 static void prepare_read_preamble(struct ceph_connection *con)
1769 {
1770 	reset_in_kvecs(con);
1771 	add_in_kvec(con, con->v2.in_buf,
1772 		    con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1773 				      CEPH_PREAMBLE_PLAIN_LEN);
1774 	con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1775 }
1776 
1777 static int prepare_read_control(struct ceph_connection *con)
1778 {
1779 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1780 	int head_len;
1781 	void *buf;
1782 
1783 	reset_in_kvecs(con);
1784 	if (con->state == CEPH_CON_S_V2_HELLO ||
1785 	    con->state == CEPH_CON_S_V2_AUTH) {
1786 		head_len = head_onwire_len(ctrl_len, false);
1787 		buf = alloc_conn_buf(con, head_len);
1788 		if (!buf)
1789 			return -ENOMEM;
1790 
1791 		/* preserve preamble */
1792 		memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1793 
1794 		add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1795 		add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1796 		add_in_sign_kvec(con, buf, head_len);
1797 	} else {
1798 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1799 			buf = alloc_conn_buf(con, ctrl_len);
1800 			if (!buf)
1801 				return -ENOMEM;
1802 
1803 			add_in_kvec(con, buf, ctrl_len);
1804 		} else {
1805 			add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1806 		}
1807 		add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1808 	}
1809 	con->v2.in_state = IN_S_HANDLE_CONTROL;
1810 	return 0;
1811 }
1812 
1813 static int prepare_read_control_remainder(struct ceph_connection *con)
1814 {
1815 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1816 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1817 	void *buf;
1818 
1819 	buf = alloc_conn_buf(con, ctrl_len);
1820 	if (!buf)
1821 		return -ENOMEM;
1822 
1823 	memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1824 
1825 	reset_in_kvecs(con);
1826 	add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1827 	add_in_kvec(con, con->v2.in_buf,
1828 		    padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1829 	con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1830 	return 0;
1831 }
1832 
1833 static int prepare_read_data(struct ceph_connection *con)
1834 {
1835 	struct bio_vec bv;
1836 
1837 	con->in_data_crc = -1;
1838 	ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1839 				  data_len(con->in_msg));
1840 
1841 	get_bvec_at(&con->v2.in_cursor, &bv);
1842 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1843 		if (unlikely(!con->bounce_page)) {
1844 			con->bounce_page = alloc_page(GFP_NOIO);
1845 			if (!con->bounce_page) {
1846 				pr_err("failed to allocate bounce page\n");
1847 				return -ENOMEM;
1848 			}
1849 		}
1850 
1851 		bv.bv_page = con->bounce_page;
1852 		bv.bv_offset = 0;
1853 	}
1854 	set_in_bvec(con, &bv);
1855 	con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1856 	return 0;
1857 }
1858 
1859 static void prepare_read_data_cont(struct ceph_connection *con)
1860 {
1861 	struct bio_vec bv;
1862 
1863 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1864 		con->in_data_crc = crc32c(con->in_data_crc,
1865 					  page_address(con->bounce_page),
1866 					  con->v2.in_bvec.bv_len);
1867 
1868 		get_bvec_at(&con->v2.in_cursor, &bv);
1869 		memcpy_to_page(bv.bv_page, bv.bv_offset,
1870 			       page_address(con->bounce_page),
1871 			       con->v2.in_bvec.bv_len);
1872 	} else {
1873 		con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1874 						    con->v2.in_bvec.bv_page,
1875 						    con->v2.in_bvec.bv_offset,
1876 						    con->v2.in_bvec.bv_len);
1877 	}
1878 
1879 	ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1880 	if (con->v2.in_cursor.total_resid) {
1881 		get_bvec_at(&con->v2.in_cursor, &bv);
1882 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1883 			bv.bv_page = con->bounce_page;
1884 			bv.bv_offset = 0;
1885 		}
1886 		set_in_bvec(con, &bv);
1887 		WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1888 		return;
1889 	}
1890 
1891 	/*
1892 	 * We've read all data.  Prepare to read epilogue.
1893 	 */
1894 	reset_in_kvecs(con);
1895 	add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1896 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1897 }
1898 
1899 static int prepare_sparse_read_cont(struct ceph_connection *con)
1900 {
1901 	int ret;
1902 	struct bio_vec bv;
1903 	char *buf = NULL;
1904 	struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1905 
1906 	WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
1907 
1908 	if (iov_iter_is_bvec(&con->v2.in_iter)) {
1909 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1910 			con->in_data_crc = crc32c(con->in_data_crc,
1911 						  page_address(con->bounce_page),
1912 						  con->v2.in_bvec.bv_len);
1913 			get_bvec_at(cursor, &bv);
1914 			memcpy_to_page(bv.bv_page, bv.bv_offset,
1915 				       page_address(con->bounce_page),
1916 				       con->v2.in_bvec.bv_len);
1917 		} else {
1918 			con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1919 							    con->v2.in_bvec.bv_page,
1920 							    con->v2.in_bvec.bv_offset,
1921 							    con->v2.in_bvec.bv_len);
1922 		}
1923 
1924 		ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
1925 		cursor->sr_resid -= con->v2.in_bvec.bv_len;
1926 		dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
1927 		     con->v2.in_bvec.bv_len, cursor->sr_resid);
1928 		WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
1929 		if (cursor->sr_resid) {
1930 			get_bvec_at(cursor, &bv);
1931 			if (bv.bv_len > cursor->sr_resid)
1932 				bv.bv_len = cursor->sr_resid;
1933 			if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1934 				bv.bv_page = con->bounce_page;
1935 				bv.bv_offset = 0;
1936 			}
1937 			set_in_bvec(con, &bv);
1938 			con->v2.data_len_remain -= bv.bv_len;
1939 			return 0;
1940 		}
1941 	} else if (iov_iter_is_kvec(&con->v2.in_iter)) {
1942 		/* On first call, we have no kvec so don't compute crc */
1943 		if (con->v2.in_kvec_cnt) {
1944 			WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
1945 			con->in_data_crc = crc32c(con->in_data_crc,
1946 						  con->v2.in_kvecs[0].iov_base,
1947 						  con->v2.in_kvecs[0].iov_len);
1948 		}
1949 	} else {
1950 		return -EIO;
1951 	}
1952 
1953 	/* get next extent */
1954 	ret = con->ops->sparse_read(con, cursor, &buf);
1955 	if (ret <= 0) {
1956 		if (ret < 0)
1957 			return ret;
1958 
1959 		reset_in_kvecs(con);
1960 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1961 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1962 		return 0;
1963 	}
1964 
1965 	if (buf) {
1966 		/* receive into buffer */
1967 		reset_in_kvecs(con);
1968 		add_in_kvec(con, buf, ret);
1969 		con->v2.data_len_remain -= ret;
1970 		return 0;
1971 	}
1972 
1973 	if (ret > cursor->total_resid) {
1974 		pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
1975 			__func__, ret, cursor->total_resid, cursor->resid);
1976 		return -EIO;
1977 	}
1978 	get_bvec_at(cursor, &bv);
1979 	if (bv.bv_len > cursor->sr_resid)
1980 		bv.bv_len = cursor->sr_resid;
1981 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1982 		if (unlikely(!con->bounce_page)) {
1983 			con->bounce_page = alloc_page(GFP_NOIO);
1984 			if (!con->bounce_page) {
1985 				pr_err("failed to allocate bounce page\n");
1986 				return -ENOMEM;
1987 			}
1988 		}
1989 
1990 		bv.bv_page = con->bounce_page;
1991 		bv.bv_offset = 0;
1992 	}
1993 	set_in_bvec(con, &bv);
1994 	con->v2.data_len_remain -= ret;
1995 	return ret;
1996 }
1997 
1998 static int prepare_sparse_read_data(struct ceph_connection *con)
1999 {
2000 	struct ceph_msg *msg = con->in_msg;
2001 
2002 	dout("%s: starting sparse read\n", __func__);
2003 
2004 	if (WARN_ON_ONCE(!con->ops->sparse_read))
2005 		return -EOPNOTSUPP;
2006 
2007 	if (!con_secure(con))
2008 		con->in_data_crc = -1;
2009 
2010 	ceph_msg_data_cursor_init(&con->v2.in_cursor, msg,
2011 				  msg->sparse_read_total);
2012 
2013 	reset_in_kvecs(con);
2014 	con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
2015 	con->v2.data_len_remain = data_len(msg);
2016 	return prepare_sparse_read_cont(con);
2017 }
2018 
2019 static int prepare_read_tail_plain(struct ceph_connection *con)
2020 {
2021 	struct ceph_msg *msg = con->in_msg;
2022 
2023 	if (!front_len(msg) && !middle_len(msg)) {
2024 		WARN_ON(!data_len(msg));
2025 		return prepare_read_data(con);
2026 	}
2027 
2028 	reset_in_kvecs(con);
2029 	if (front_len(msg)) {
2030 		add_in_kvec(con, msg->front.iov_base, front_len(msg));
2031 		WARN_ON(msg->front.iov_len != front_len(msg));
2032 	}
2033 	if (middle_len(msg)) {
2034 		add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
2035 		WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
2036 	}
2037 
2038 	if (data_len(msg)) {
2039 		if (msg->sparse_read_total)
2040 			con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
2041 		else
2042 			con->v2.in_state = IN_S_PREPARE_READ_DATA;
2043 	} else {
2044 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
2045 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2046 	}
2047 	return 0;
2048 }
2049 
2050 static void prepare_read_enc_page(struct ceph_connection *con)
2051 {
2052 	struct bio_vec bv;
2053 
2054 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
2055 	     con->v2.in_enc_resid);
2056 	WARN_ON(!con->v2.in_enc_resid);
2057 
2058 	bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
2059 		      min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
2060 
2061 	set_in_bvec(con, &bv);
2062 	con->v2.in_enc_i++;
2063 	con->v2.in_enc_resid -= bv.bv_len;
2064 
2065 	if (con->v2.in_enc_resid) {
2066 		con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
2067 		return;
2068 	}
2069 
2070 	/*
2071 	 * We are set to read the last piece of ciphertext (ending
2072 	 * with epilogue) + auth tag.
2073 	 */
2074 	WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
2075 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2076 }
2077 
2078 static int prepare_read_tail_secure(struct ceph_connection *con)
2079 {
2080 	struct page **enc_pages;
2081 	int enc_page_cnt;
2082 	int tail_len;
2083 
2084 	tail_len = tail_onwire_len(con->in_msg, true);
2085 	WARN_ON(!tail_len);
2086 
2087 	enc_page_cnt = calc_pages_for(0, tail_len);
2088 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
2089 	if (IS_ERR(enc_pages))
2090 		return PTR_ERR(enc_pages);
2091 
2092 	WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
2093 	con->v2.in_enc_pages = enc_pages;
2094 	con->v2.in_enc_page_cnt = enc_page_cnt;
2095 	con->v2.in_enc_resid = tail_len;
2096 	con->v2.in_enc_i = 0;
2097 
2098 	prepare_read_enc_page(con);
2099 	return 0;
2100 }
2101 
2102 static void __finish_skip(struct ceph_connection *con)
2103 {
2104 	con->in_seq++;
2105 	prepare_read_preamble(con);
2106 }
2107 
2108 static void prepare_skip_message(struct ceph_connection *con)
2109 {
2110 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2111 	int tail_len;
2112 
2113 	dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
2114 	     desc->fd_lens[2], desc->fd_lens[3]);
2115 
2116 	tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
2117 				     desc->fd_lens[3], con_secure(con));
2118 	if (!tail_len) {
2119 		__finish_skip(con);
2120 	} else {
2121 		set_in_skip(con, tail_len);
2122 		con->v2.in_state = IN_S_FINISH_SKIP;
2123 	}
2124 }
2125 
2126 static int process_banner_prefix(struct ceph_connection *con)
2127 {
2128 	int payload_len;
2129 	void *p;
2130 
2131 	WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
2132 
2133 	p = con->v2.in_kvecs[0].iov_base;
2134 	if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
2135 		if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
2136 			con->error_msg = "server is speaking msgr1 protocol";
2137 		else
2138 			con->error_msg = "protocol error, bad banner";
2139 		return -EINVAL;
2140 	}
2141 
2142 	p += CEPH_BANNER_V2_LEN;
2143 	payload_len = ceph_decode_16(&p);
2144 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2145 
2146 	return prepare_read_banner_payload(con, payload_len);
2147 }
2148 
2149 static int process_banner_payload(struct ceph_connection *con)
2150 {
2151 	void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
2152 	u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
2153 	u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
2154 	u64 server_feat, server_req_feat;
2155 	void *p;
2156 	int ret;
2157 
2158 	p = con->v2.in_kvecs[0].iov_base;
2159 	ceph_decode_64_safe(&p, end, server_feat, bad);
2160 	ceph_decode_64_safe(&p, end, server_req_feat, bad);
2161 
2162 	dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
2163 	     __func__, con, server_feat, server_req_feat);
2164 
2165 	if (req_feat & ~server_feat) {
2166 		pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2167 		       server_feat, req_feat & ~server_feat);
2168 		con->error_msg = "missing required protocol features";
2169 		return -EINVAL;
2170 	}
2171 	if (server_req_feat & ~feat) {
2172 		pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2173 		       feat, server_req_feat & ~feat);
2174 		con->error_msg = "missing required protocol features";
2175 		return -EINVAL;
2176 	}
2177 
2178 	/* no reset_out_kvecs() as our banner may still be pending */
2179 	ret = prepare_hello(con);
2180 	if (ret) {
2181 		pr_err("prepare_hello failed: %d\n", ret);
2182 		return ret;
2183 	}
2184 
2185 	con->state = CEPH_CON_S_V2_HELLO;
2186 	prepare_read_preamble(con);
2187 	return 0;
2188 
2189 bad:
2190 	pr_err("failed to decode banner payload\n");
2191 	return -EINVAL;
2192 }
2193 
2194 static int process_hello(struct ceph_connection *con, void *p, void *end)
2195 {
2196 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
2197 	struct ceph_entity_addr addr_for_me;
2198 	u8 entity_type;
2199 	int ret;
2200 
2201 	if (con->state != CEPH_CON_S_V2_HELLO) {
2202 		con->error_msg = "protocol error, unexpected hello";
2203 		return -EINVAL;
2204 	}
2205 
2206 	ceph_decode_8_safe(&p, end, entity_type, bad);
2207 	ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2208 	if (ret) {
2209 		pr_err("failed to decode addr_for_me: %d\n", ret);
2210 		return ret;
2211 	}
2212 
2213 	dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2214 	     entity_type, ceph_pr_addr(&addr_for_me));
2215 
2216 	if (entity_type != con->peer_name.type) {
2217 		pr_err("bad peer type, want %d, got %d\n",
2218 		       con->peer_name.type, entity_type);
2219 		con->error_msg = "wrong peer at address";
2220 		return -EINVAL;
2221 	}
2222 
2223 	/*
2224 	 * Set our address to the address our first peer (i.e. monitor)
2225 	 * sees that we are connecting from.  If we are behind some sort
2226 	 * of NAT and want to be identified by some private (not NATed)
2227 	 * address, ip option should be used.
2228 	 */
2229 	if (ceph_addr_is_blank(my_addr)) {
2230 		memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2231 		       sizeof(my_addr->in_addr));
2232 		ceph_addr_set_port(my_addr, 0);
2233 		dout("%s con %p set my addr %s, as seen by peer %s\n",
2234 		     __func__, con, ceph_pr_addr(my_addr),
2235 		     ceph_pr_addr(&con->peer_addr));
2236 	} else {
2237 		dout("%s con %p my addr already set %s\n",
2238 		     __func__, con, ceph_pr_addr(my_addr));
2239 	}
2240 
2241 	WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2242 	WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2243 	WARN_ON(!my_addr->nonce);
2244 
2245 	/* no reset_out_kvecs() as our hello may still be pending */
2246 	ret = prepare_auth_request(con);
2247 	if (ret) {
2248 		if (ret != -EAGAIN)
2249 			pr_err("prepare_auth_request failed: %d\n", ret);
2250 		return ret;
2251 	}
2252 
2253 	con->state = CEPH_CON_S_V2_AUTH;
2254 	return 0;
2255 
2256 bad:
2257 	pr_err("failed to decode hello\n");
2258 	return -EINVAL;
2259 }
2260 
2261 static int process_auth_bad_method(struct ceph_connection *con,
2262 				   void *p, void *end)
2263 {
2264 	int allowed_protos[8], allowed_modes[8];
2265 	int allowed_proto_cnt, allowed_mode_cnt;
2266 	int used_proto, result;
2267 	int ret;
2268 	int i;
2269 
2270 	if (con->state != CEPH_CON_S_V2_AUTH) {
2271 		con->error_msg = "protocol error, unexpected auth_bad_method";
2272 		return -EINVAL;
2273 	}
2274 
2275 	ceph_decode_32_safe(&p, end, used_proto, bad);
2276 	ceph_decode_32_safe(&p, end, result, bad);
2277 	dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2278 	     result);
2279 
2280 	ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2281 	if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2282 		pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2283 		return -EINVAL;
2284 	}
2285 	for (i = 0; i < allowed_proto_cnt; i++) {
2286 		ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2287 		dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2288 		     i, allowed_protos[i]);
2289 	}
2290 
2291 	ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2292 	if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2293 		pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2294 		return -EINVAL;
2295 	}
2296 	for (i = 0; i < allowed_mode_cnt; i++) {
2297 		ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2298 		dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2299 		     i, allowed_modes[i]);
2300 	}
2301 
2302 	mutex_unlock(&con->mutex);
2303 	ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2304 					       allowed_protos,
2305 					       allowed_proto_cnt,
2306 					       allowed_modes,
2307 					       allowed_mode_cnt);
2308 	mutex_lock(&con->mutex);
2309 	if (con->state != CEPH_CON_S_V2_AUTH) {
2310 		dout("%s con %p state changed to %d\n", __func__, con,
2311 		     con->state);
2312 		return -EAGAIN;
2313 	}
2314 
2315 	dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2316 	return ret;
2317 
2318 bad:
2319 	pr_err("failed to decode auth_bad_method\n");
2320 	return -EINVAL;
2321 }
2322 
2323 static int process_auth_reply_more(struct ceph_connection *con,
2324 				   void *p, void *end)
2325 {
2326 	int payload_len;
2327 	int ret;
2328 
2329 	if (con->state != CEPH_CON_S_V2_AUTH) {
2330 		con->error_msg = "protocol error, unexpected auth_reply_more";
2331 		return -EINVAL;
2332 	}
2333 
2334 	ceph_decode_32_safe(&p, end, payload_len, bad);
2335 	ceph_decode_need(&p, end, payload_len, bad);
2336 
2337 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2338 
2339 	reset_out_kvecs(con);
2340 	ret = prepare_auth_request_more(con, p, payload_len);
2341 	if (ret) {
2342 		if (ret != -EAGAIN)
2343 			pr_err("prepare_auth_request_more failed: %d\n", ret);
2344 		return ret;
2345 	}
2346 
2347 	return 0;
2348 
2349 bad:
2350 	pr_err("failed to decode auth_reply_more\n");
2351 	return -EINVAL;
2352 }
2353 
2354 /*
2355  * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2356  * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2357  * setup_crypto().  __aligned(16) isn't guaranteed to work for stack
2358  * objects, so do it by hand.
2359  */
2360 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2361 {
2362 	u8 session_key_buf[CEPH_MAX_KEY_LEN + 16];
2363 	u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2364 	u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2365 	u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2366 	int session_key_len, con_secret_len;
2367 	int payload_len;
2368 	u64 global_id;
2369 	int ret;
2370 
2371 	if (con->state != CEPH_CON_S_V2_AUTH) {
2372 		con->error_msg = "protocol error, unexpected auth_done";
2373 		return -EINVAL;
2374 	}
2375 
2376 	ceph_decode_64_safe(&p, end, global_id, bad);
2377 	ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2378 
2379 	ceph_decode_32_safe(&p, end, payload_len, bad);
2380 	ceph_decode_need(&p, end, payload_len, bad);
2381 
2382 	dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2383 	     __func__, con, global_id, con->v2.con_mode, payload_len);
2384 
2385 	mutex_unlock(&con->mutex);
2386 	session_key_len = 0;
2387 	con_secret_len = 0;
2388 	ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2389 					 session_key, &session_key_len,
2390 					 con_secret, &con_secret_len);
2391 	mutex_lock(&con->mutex);
2392 	if (con->state != CEPH_CON_S_V2_AUTH) {
2393 		dout("%s con %p state changed to %d\n", __func__, con,
2394 		     con->state);
2395 		ret = -EAGAIN;
2396 		goto out;
2397 	}
2398 
2399 	dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2400 	if (ret)
2401 		goto out;
2402 
2403 	ret = setup_crypto(con, session_key, session_key_len, con_secret,
2404 			   con_secret_len);
2405 	if (ret)
2406 		goto out;
2407 
2408 	reset_out_kvecs(con);
2409 	ret = prepare_auth_signature(con);
2410 	if (ret) {
2411 		pr_err("prepare_auth_signature failed: %d\n", ret);
2412 		goto out;
2413 	}
2414 
2415 	con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2416 
2417 out:
2418 	memzero_explicit(session_key_buf, sizeof(session_key_buf));
2419 	memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2420 	return ret;
2421 
2422 bad:
2423 	pr_err("failed to decode auth_done\n");
2424 	return -EINVAL;
2425 }
2426 
2427 static int process_auth_signature(struct ceph_connection *con,
2428 				  void *p, void *end)
2429 {
2430 	u8 hmac[SHA256_DIGEST_SIZE];
2431 	int ret;
2432 
2433 	if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2434 		con->error_msg = "protocol error, unexpected auth_signature";
2435 		return -EINVAL;
2436 	}
2437 
2438 	con_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
2439 			hmac);
2440 
2441 	ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2442 	if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2443 		con->error_msg = "integrity error, bad auth signature";
2444 		return -EBADMSG;
2445 	}
2446 
2447 	dout("%s con %p auth signature ok\n", __func__, con);
2448 
2449 	/* no reset_out_kvecs() as our auth_signature may still be pending */
2450 	if (!con->v2.server_cookie) {
2451 		ret = prepare_client_ident(con);
2452 		if (ret) {
2453 			pr_err("prepare_client_ident failed: %d\n", ret);
2454 			return ret;
2455 		}
2456 
2457 		con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2458 	} else {
2459 		ret = prepare_session_reconnect(con);
2460 		if (ret) {
2461 			pr_err("prepare_session_reconnect failed: %d\n", ret);
2462 			return ret;
2463 		}
2464 
2465 		con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2466 	}
2467 
2468 	return 0;
2469 
2470 bad:
2471 	pr_err("failed to decode auth_signature\n");
2472 	return -EINVAL;
2473 }
2474 
2475 static int process_server_ident(struct ceph_connection *con,
2476 				void *p, void *end)
2477 {
2478 	struct ceph_client *client = from_msgr(con->msgr);
2479 	u64 features, required_features;
2480 	struct ceph_entity_addr addr;
2481 	u64 global_seq;
2482 	u64 global_id;
2483 	u64 cookie;
2484 	u64 flags;
2485 	int ret;
2486 
2487 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2488 		con->error_msg = "protocol error, unexpected server_ident";
2489 		return -EINVAL;
2490 	}
2491 
2492 	ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2493 	if (ret) {
2494 		pr_err("failed to decode server addrs: %d\n", ret);
2495 		return ret;
2496 	}
2497 
2498 	ceph_decode_64_safe(&p, end, global_id, bad);
2499 	ceph_decode_64_safe(&p, end, global_seq, bad);
2500 	ceph_decode_64_safe(&p, end, features, bad);
2501 	ceph_decode_64_safe(&p, end, required_features, bad);
2502 	ceph_decode_64_safe(&p, end, flags, bad);
2503 	ceph_decode_64_safe(&p, end, cookie, bad);
2504 
2505 	dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2506 	     __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2507 	     global_id, global_seq, features, required_features, flags, cookie);
2508 
2509 	/* is this who we intended to talk to? */
2510 	if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2511 		pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2512 		       ceph_pr_addr(&con->peer_addr),
2513 		       le32_to_cpu(con->peer_addr.nonce),
2514 		       ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2515 		con->error_msg = "wrong peer at address";
2516 		return -EINVAL;
2517 	}
2518 
2519 	if (client->required_features & ~features) {
2520 		pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2521 		       features, client->required_features & ~features);
2522 		con->error_msg = "missing required protocol features";
2523 		return -EINVAL;
2524 	}
2525 
2526 	/*
2527 	 * Both name->type and name->num are set in ceph_con_open() but
2528 	 * name->num may be bogus in the initial monmap.  name->type is
2529 	 * verified in handle_hello().
2530 	 */
2531 	WARN_ON(!con->peer_name.type);
2532 	con->peer_name.num = cpu_to_le64(global_id);
2533 	con->v2.peer_global_seq = global_seq;
2534 	con->peer_features = features;
2535 	WARN_ON(required_features & ~client->supported_features);
2536 	con->v2.server_cookie = cookie;
2537 
2538 	if (flags & CEPH_MSG_CONNECT_LOSSY) {
2539 		ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2540 		WARN_ON(con->v2.server_cookie);
2541 	} else {
2542 		WARN_ON(!con->v2.server_cookie);
2543 	}
2544 
2545 	clear_in_sign_kvecs(con);
2546 	clear_out_sign_kvecs(con);
2547 	free_conn_bufs(con);
2548 	con->delay = 0;  /* reset backoff memory */
2549 
2550 	con->state = CEPH_CON_S_OPEN;
2551 	con->v2.out_state = OUT_S_GET_NEXT;
2552 	return 0;
2553 
2554 bad:
2555 	pr_err("failed to decode server_ident\n");
2556 	return -EINVAL;
2557 }
2558 
2559 static int process_ident_missing_features(struct ceph_connection *con,
2560 					  void *p, void *end)
2561 {
2562 	struct ceph_client *client = from_msgr(con->msgr);
2563 	u64 missing_features;
2564 
2565 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2566 		con->error_msg = "protocol error, unexpected ident_missing_features";
2567 		return -EINVAL;
2568 	}
2569 
2570 	ceph_decode_64_safe(&p, end, missing_features, bad);
2571 	pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2572 	       client->supported_features, missing_features);
2573 	con->error_msg = "missing required protocol features";
2574 	return -EINVAL;
2575 
2576 bad:
2577 	pr_err("failed to decode ident_missing_features\n");
2578 	return -EINVAL;
2579 }
2580 
2581 static int process_session_reconnect_ok(struct ceph_connection *con,
2582 					void *p, void *end)
2583 {
2584 	u64 seq;
2585 
2586 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2587 		con->error_msg = "protocol error, unexpected session_reconnect_ok";
2588 		return -EINVAL;
2589 	}
2590 
2591 	ceph_decode_64_safe(&p, end, seq, bad);
2592 
2593 	dout("%s con %p seq %llu\n", __func__, con, seq);
2594 	ceph_con_discard_requeued(con, seq);
2595 
2596 	clear_in_sign_kvecs(con);
2597 	clear_out_sign_kvecs(con);
2598 	free_conn_bufs(con);
2599 	con->delay = 0;  /* reset backoff memory */
2600 
2601 	con->state = CEPH_CON_S_OPEN;
2602 	con->v2.out_state = OUT_S_GET_NEXT;
2603 	return 0;
2604 
2605 bad:
2606 	pr_err("failed to decode session_reconnect_ok\n");
2607 	return -EINVAL;
2608 }
2609 
2610 static int process_session_retry(struct ceph_connection *con,
2611 				 void *p, void *end)
2612 {
2613 	u64 connect_seq;
2614 	int ret;
2615 
2616 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2617 		con->error_msg = "protocol error, unexpected session_retry";
2618 		return -EINVAL;
2619 	}
2620 
2621 	ceph_decode_64_safe(&p, end, connect_seq, bad);
2622 
2623 	dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2624 	WARN_ON(connect_seq <= con->v2.connect_seq);
2625 	con->v2.connect_seq = connect_seq + 1;
2626 
2627 	free_conn_bufs(con);
2628 
2629 	reset_out_kvecs(con);
2630 	ret = prepare_session_reconnect(con);
2631 	if (ret) {
2632 		pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2633 		return ret;
2634 	}
2635 
2636 	return 0;
2637 
2638 bad:
2639 	pr_err("failed to decode session_retry\n");
2640 	return -EINVAL;
2641 }
2642 
2643 static int process_session_retry_global(struct ceph_connection *con,
2644 					void *p, void *end)
2645 {
2646 	u64 global_seq;
2647 	int ret;
2648 
2649 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2650 		con->error_msg = "protocol error, unexpected session_retry_global";
2651 		return -EINVAL;
2652 	}
2653 
2654 	ceph_decode_64_safe(&p, end, global_seq, bad);
2655 
2656 	dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2657 	WARN_ON(global_seq <= con->v2.global_seq);
2658 	con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2659 
2660 	free_conn_bufs(con);
2661 
2662 	reset_out_kvecs(con);
2663 	ret = prepare_session_reconnect(con);
2664 	if (ret) {
2665 		pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2666 		return ret;
2667 	}
2668 
2669 	return 0;
2670 
2671 bad:
2672 	pr_err("failed to decode session_retry_global\n");
2673 	return -EINVAL;
2674 }
2675 
2676 static int process_session_reset(struct ceph_connection *con,
2677 				 void *p, void *end)
2678 {
2679 	bool full;
2680 	int ret;
2681 
2682 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2683 		con->error_msg = "protocol error, unexpected session_reset";
2684 		return -EINVAL;
2685 	}
2686 
2687 	ceph_decode_8_safe(&p, end, full, bad);
2688 	if (!full) {
2689 		con->error_msg = "protocol error, bad session_reset";
2690 		return -EINVAL;
2691 	}
2692 
2693 	pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2694 		ceph_pr_addr(&con->peer_addr));
2695 	ceph_con_reset_session(con);
2696 
2697 	mutex_unlock(&con->mutex);
2698 	if (con->ops->peer_reset)
2699 		con->ops->peer_reset(con);
2700 	mutex_lock(&con->mutex);
2701 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2702 		dout("%s con %p state changed to %d\n", __func__, con,
2703 		     con->state);
2704 		return -EAGAIN;
2705 	}
2706 
2707 	free_conn_bufs(con);
2708 
2709 	reset_out_kvecs(con);
2710 	ret = prepare_client_ident(con);
2711 	if (ret) {
2712 		pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2713 		return ret;
2714 	}
2715 
2716 	con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2717 	return 0;
2718 
2719 bad:
2720 	pr_err("failed to decode session_reset\n");
2721 	return -EINVAL;
2722 }
2723 
2724 static int process_keepalive2_ack(struct ceph_connection *con,
2725 				  void *p, void *end)
2726 {
2727 	if (con->state != CEPH_CON_S_OPEN) {
2728 		con->error_msg = "protocol error, unexpected keepalive2_ack";
2729 		return -EINVAL;
2730 	}
2731 
2732 	ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2733 	ceph_decode_timespec64(&con->last_keepalive_ack, p);
2734 
2735 	dout("%s con %p timestamp %ptSp\n", __func__, con, &con->last_keepalive_ack);
2736 
2737 	return 0;
2738 
2739 bad:
2740 	pr_err("failed to decode keepalive2_ack\n");
2741 	return -EINVAL;
2742 }
2743 
2744 static int process_ack(struct ceph_connection *con, void *p, void *end)
2745 {
2746 	u64 seq;
2747 
2748 	if (con->state != CEPH_CON_S_OPEN) {
2749 		con->error_msg = "protocol error, unexpected ack";
2750 		return -EINVAL;
2751 	}
2752 
2753 	ceph_decode_64_safe(&p, end, seq, bad);
2754 
2755 	dout("%s con %p seq %llu\n", __func__, con, seq);
2756 	ceph_con_discard_sent(con, seq);
2757 	return 0;
2758 
2759 bad:
2760 	pr_err("failed to decode ack\n");
2761 	return -EINVAL;
2762 }
2763 
2764 static int process_control(struct ceph_connection *con, void *p, void *end)
2765 {
2766 	int tag = con->v2.in_desc.fd_tag;
2767 	int ret;
2768 
2769 	dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2770 
2771 	switch (tag) {
2772 	case FRAME_TAG_HELLO:
2773 		ret = process_hello(con, p, end);
2774 		break;
2775 	case FRAME_TAG_AUTH_BAD_METHOD:
2776 		ret = process_auth_bad_method(con, p, end);
2777 		break;
2778 	case FRAME_TAG_AUTH_REPLY_MORE:
2779 		ret = process_auth_reply_more(con, p, end);
2780 		break;
2781 	case FRAME_TAG_AUTH_DONE:
2782 		ret = process_auth_done(con, p, end);
2783 		break;
2784 	case FRAME_TAG_AUTH_SIGNATURE:
2785 		ret = process_auth_signature(con, p, end);
2786 		break;
2787 	case FRAME_TAG_SERVER_IDENT:
2788 		ret = process_server_ident(con, p, end);
2789 		break;
2790 	case FRAME_TAG_IDENT_MISSING_FEATURES:
2791 		ret = process_ident_missing_features(con, p, end);
2792 		break;
2793 	case FRAME_TAG_SESSION_RECONNECT_OK:
2794 		ret = process_session_reconnect_ok(con, p, end);
2795 		break;
2796 	case FRAME_TAG_SESSION_RETRY:
2797 		ret = process_session_retry(con, p, end);
2798 		break;
2799 	case FRAME_TAG_SESSION_RETRY_GLOBAL:
2800 		ret = process_session_retry_global(con, p, end);
2801 		break;
2802 	case FRAME_TAG_SESSION_RESET:
2803 		ret = process_session_reset(con, p, end);
2804 		break;
2805 	case FRAME_TAG_KEEPALIVE2_ACK:
2806 		ret = process_keepalive2_ack(con, p, end);
2807 		break;
2808 	case FRAME_TAG_ACK:
2809 		ret = process_ack(con, p, end);
2810 		break;
2811 	default:
2812 		pr_err("bad tag %d\n", tag);
2813 		con->error_msg = "protocol error, bad tag";
2814 		return -EINVAL;
2815 	}
2816 	if (ret) {
2817 		dout("%s con %p error %d\n", __func__, con, ret);
2818 		return ret;
2819 	}
2820 
2821 	prepare_read_preamble(con);
2822 	return 0;
2823 }
2824 
2825 /*
2826  * Return:
2827  *   1 - con->in_msg set, read message
2828  *   0 - skip message
2829  *  <0 - error
2830  */
2831 static int process_message_header(struct ceph_connection *con,
2832 				  void *p, void *end)
2833 {
2834 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2835 	struct ceph_msg_header2 *hdr2;
2836 	struct ceph_msg_header hdr;
2837 	int skip;
2838 	int ret;
2839 	u64 seq;
2840 
2841 	ceph_decode_need(&p, end, sizeof(*hdr2), bad);
2842 	hdr2 = p;
2843 
2844 	/* verify seq# */
2845 	seq = le64_to_cpu(hdr2->seq);
2846 	if ((s64)seq - (s64)con->in_seq < 1) {
2847 		pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2848 			ENTITY_NAME(con->peer_name),
2849 			ceph_pr_addr(&con->peer_addr),
2850 			seq, con->in_seq + 1);
2851 		return 0;
2852 	}
2853 	if ((s64)seq - (s64)con->in_seq > 1) {
2854 		pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2855 		con->error_msg = "bad message sequence # for incoming message";
2856 		return -EBADE;
2857 	}
2858 
2859 	ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2860 
2861 	fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2862 		    desc->fd_lens[3], &con->peer_name);
2863 	ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2864 	if (ret)
2865 		return ret;
2866 
2867 	WARN_ON(!con->in_msg ^ skip);
2868 	if (skip)
2869 		return 0;
2870 
2871 	WARN_ON(!con->in_msg);
2872 	WARN_ON(con->in_msg->con != con);
2873 	return 1;
2874 
2875 bad:
2876 	pr_err("failed to decode message header\n");
2877 	return -EINVAL;
2878 }
2879 
2880 static int process_message(struct ceph_connection *con)
2881 {
2882 	ceph_con_process_message(con);
2883 
2884 	/*
2885 	 * We could have been closed by ceph_con_close() because
2886 	 * ceph_con_process_message() temporarily drops con->mutex.
2887 	 */
2888 	if (con->state != CEPH_CON_S_OPEN) {
2889 		dout("%s con %p state changed to %d\n", __func__, con,
2890 		     con->state);
2891 		return -EAGAIN;
2892 	}
2893 
2894 	prepare_read_preamble(con);
2895 	return 0;
2896 }
2897 
2898 static int __handle_control(struct ceph_connection *con, void *p)
2899 {
2900 	void *end = p + con->v2.in_desc.fd_lens[0];
2901 	struct ceph_msg *msg;
2902 	int ret;
2903 
2904 	if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2905 		return process_control(con, p, end);
2906 
2907 	if (con->state != CEPH_CON_S_OPEN) {
2908 		con->error_msg = "protocol error, unexpected message";
2909 		return -EINVAL;
2910 	}
2911 
2912 	ret = process_message_header(con, p, end);
2913 	if (ret < 0)
2914 		return ret;
2915 	if (ret == 0) {
2916 		prepare_skip_message(con);
2917 		return 0;
2918 	}
2919 
2920 	msg = con->in_msg;  /* set in process_message_header() */
2921 	if (front_len(msg)) {
2922 		WARN_ON(front_len(msg) > msg->front_alloc_len);
2923 		msg->front.iov_len = front_len(msg);
2924 	} else {
2925 		msg->front.iov_len = 0;
2926 	}
2927 	if (middle_len(msg)) {
2928 		WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2929 		msg->middle->vec.iov_len = middle_len(msg);
2930 	} else if (msg->middle) {
2931 		msg->middle->vec.iov_len = 0;
2932 	}
2933 
2934 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2935 		return process_message(con);
2936 
2937 	if (con_secure(con))
2938 		return prepare_read_tail_secure(con);
2939 
2940 	return prepare_read_tail_plain(con);
2941 }
2942 
2943 static int handle_preamble(struct ceph_connection *con)
2944 {
2945 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2946 	int ret;
2947 
2948 	if (con_secure(con)) {
2949 		ret = decrypt_preamble(con);
2950 		if (ret) {
2951 			if (ret == -EBADMSG)
2952 				con->error_msg = "integrity error, bad preamble auth tag";
2953 			return ret;
2954 		}
2955 	}
2956 
2957 	ret = decode_preamble(con->v2.in_buf, desc);
2958 	if (ret) {
2959 		if (ret == -EBADMSG)
2960 			con->error_msg = "integrity error, bad crc";
2961 		else
2962 			con->error_msg = "protocol error, bad preamble";
2963 		return ret;
2964 	}
2965 
2966 	dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2967 	     con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2968 	     desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2969 
2970 	if (!con_secure(con))
2971 		return prepare_read_control(con);
2972 
2973 	if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2974 		return prepare_read_control_remainder(con);
2975 
2976 	return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2977 }
2978 
2979 static int handle_control(struct ceph_connection *con)
2980 {
2981 	int ctrl_len = con->v2.in_desc.fd_lens[0];
2982 	void *buf;
2983 	int ret;
2984 
2985 	WARN_ON(con_secure(con));
2986 
2987 	ret = verify_control_crc(con);
2988 	if (ret) {
2989 		con->error_msg = "integrity error, bad crc";
2990 		return ret;
2991 	}
2992 
2993 	if (con->state == CEPH_CON_S_V2_AUTH) {
2994 		buf = alloc_conn_buf(con, ctrl_len);
2995 		if (!buf)
2996 			return -ENOMEM;
2997 
2998 		memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2999 		return __handle_control(con, buf);
3000 	}
3001 
3002 	return __handle_control(con, con->v2.in_kvecs[0].iov_base);
3003 }
3004 
3005 static int handle_control_remainder(struct ceph_connection *con)
3006 {
3007 	int ret;
3008 
3009 	WARN_ON(!con_secure(con));
3010 
3011 	ret = decrypt_control_remainder(con);
3012 	if (ret) {
3013 		if (ret == -EBADMSG)
3014 			con->error_msg = "integrity error, bad control remainder auth tag";
3015 		return ret;
3016 	}
3017 
3018 	return __handle_control(con, con->v2.in_kvecs[0].iov_base -
3019 				     CEPH_PREAMBLE_INLINE_LEN);
3020 }
3021 
3022 static int handle_epilogue(struct ceph_connection *con)
3023 {
3024 	u32 front_crc, middle_crc, data_crc;
3025 	int ret;
3026 
3027 	if (con_secure(con)) {
3028 		ret = decrypt_tail(con);
3029 		if (ret) {
3030 			if (ret == -EBADMSG)
3031 				con->error_msg = "integrity error, bad epilogue auth tag";
3032 			return ret;
3033 		}
3034 
3035 		/* just late_status */
3036 		ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
3037 		if (ret) {
3038 			con->error_msg = "protocol error, bad epilogue";
3039 			return ret;
3040 		}
3041 	} else {
3042 		ret = decode_epilogue(con->v2.in_buf, &front_crc,
3043 				      &middle_crc, &data_crc);
3044 		if (ret) {
3045 			con->error_msg = "protocol error, bad epilogue";
3046 			return ret;
3047 		}
3048 
3049 		ret = verify_epilogue_crcs(con, front_crc, middle_crc,
3050 					   data_crc);
3051 		if (ret) {
3052 			con->error_msg = "integrity error, bad crc";
3053 			return ret;
3054 		}
3055 	}
3056 
3057 	return process_message(con);
3058 }
3059 
3060 static void finish_skip(struct ceph_connection *con)
3061 {
3062 	dout("%s con %p\n", __func__, con);
3063 
3064 	if (con_secure(con))
3065 		gcm_inc_nonce(&con->v2.in_gcm_nonce);
3066 
3067 	__finish_skip(con);
3068 }
3069 
3070 static int populate_in_iter(struct ceph_connection *con)
3071 {
3072 	int ret;
3073 
3074 	dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
3075 	     con->v2.in_state);
3076 	WARN_ON(iov_iter_count(&con->v2.in_iter));
3077 
3078 	if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
3079 		ret = process_banner_prefix(con);
3080 	} else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
3081 		ret = process_banner_payload(con);
3082 	} else if ((con->state >= CEPH_CON_S_V2_HELLO &&
3083 		    con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
3084 		   con->state == CEPH_CON_S_OPEN) {
3085 		switch (con->v2.in_state) {
3086 		case IN_S_HANDLE_PREAMBLE:
3087 			ret = handle_preamble(con);
3088 			break;
3089 		case IN_S_HANDLE_CONTROL:
3090 			ret = handle_control(con);
3091 			break;
3092 		case IN_S_HANDLE_CONTROL_REMAINDER:
3093 			ret = handle_control_remainder(con);
3094 			break;
3095 		case IN_S_PREPARE_READ_DATA:
3096 			ret = prepare_read_data(con);
3097 			break;
3098 		case IN_S_PREPARE_READ_DATA_CONT:
3099 			prepare_read_data_cont(con);
3100 			ret = 0;
3101 			break;
3102 		case IN_S_PREPARE_READ_ENC_PAGE:
3103 			prepare_read_enc_page(con);
3104 			ret = 0;
3105 			break;
3106 		case IN_S_PREPARE_SPARSE_DATA:
3107 			ret = prepare_sparse_read_data(con);
3108 			break;
3109 		case IN_S_PREPARE_SPARSE_DATA_CONT:
3110 			ret = prepare_sparse_read_cont(con);
3111 			break;
3112 		case IN_S_HANDLE_EPILOGUE:
3113 			ret = handle_epilogue(con);
3114 			break;
3115 		case IN_S_FINISH_SKIP:
3116 			finish_skip(con);
3117 			ret = 0;
3118 			break;
3119 		default:
3120 			WARN(1, "bad in_state %d", con->v2.in_state);
3121 			return -EINVAL;
3122 		}
3123 	} else {
3124 		WARN(1, "bad state %d", con->state);
3125 		return -EINVAL;
3126 	}
3127 	if (ret) {
3128 		dout("%s con %p error %d\n", __func__, con, ret);
3129 		return ret;
3130 	}
3131 
3132 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3133 		return -ENODATA;
3134 	dout("%s con %p populated %zu\n", __func__, con,
3135 	     iov_iter_count(&con->v2.in_iter));
3136 	return 1;
3137 }
3138 
3139 int ceph_con_v2_try_read(struct ceph_connection *con)
3140 {
3141 	int ret;
3142 
3143 	dout("%s con %p state %d need %zu\n", __func__, con, con->state,
3144 	     iov_iter_count(&con->v2.in_iter));
3145 
3146 	if (con->state == CEPH_CON_S_PREOPEN)
3147 		return 0;
3148 
3149 	/*
3150 	 * We should always have something pending here.  If not,
3151 	 * avoid calling populate_in_iter() as if we read something
3152 	 * (ceph_tcp_recv() would immediately return 1).
3153 	 */
3154 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3155 		return -ENODATA;
3156 
3157 	for (;;) {
3158 		ret = ceph_tcp_recv(con);
3159 		if (ret <= 0)
3160 			return ret;
3161 
3162 		ret = populate_in_iter(con);
3163 		if (ret <= 0) {
3164 			if (ret && ret != -EAGAIN && !con->error_msg)
3165 				con->error_msg = "read processing error";
3166 			return ret;
3167 		}
3168 	}
3169 }
3170 
3171 static void queue_data(struct ceph_connection *con, struct ceph_msg *msg)
3172 {
3173 	struct bio_vec bv;
3174 
3175 	con->v2.out_epil.data_crc = -1;
3176 	ceph_msg_data_cursor_init(&con->v2.out_cursor, msg,
3177 				  data_len(msg));
3178 
3179 	get_bvec_at(&con->v2.out_cursor, &bv);
3180 	set_out_bvec(con, &bv, true);
3181 	con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
3182 }
3183 
3184 static void queue_data_cont(struct ceph_connection *con, struct ceph_msg *msg)
3185 {
3186 	struct bio_vec bv;
3187 
3188 	con->v2.out_epil.data_crc = ceph_crc32c_page(
3189 		con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3190 		con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
3191 
3192 	ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
3193 	if (con->v2.out_cursor.total_resid) {
3194 		get_bvec_at(&con->v2.out_cursor, &bv);
3195 		set_out_bvec(con, &bv, true);
3196 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
3197 		return;
3198 	}
3199 
3200 	/*
3201 	 * We've written all data.  Queue epilogue.  Once it's written,
3202 	 * we are done.
3203 	 */
3204 	reset_out_kvecs(con);
3205 	prepare_epilogue_plain(con, msg, false);
3206 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3207 }
3208 
3209 static void queue_enc_page(struct ceph_connection *con)
3210 {
3211 	struct bio_vec bv;
3212 
3213 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
3214 	     con->v2.out_enc_resid);
3215 	WARN_ON(!con->v2.out_enc_resid);
3216 
3217 	bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
3218 		      min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
3219 
3220 	set_out_bvec(con, &bv, false);
3221 	con->v2.out_enc_i++;
3222 	con->v2.out_enc_resid -= bv.bv_len;
3223 
3224 	if (con->v2.out_enc_resid) {
3225 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3226 		return;
3227 	}
3228 
3229 	/*
3230 	 * We've queued the last piece of ciphertext (ending with
3231 	 * epilogue) + auth tag.  Once it's written, we are done.
3232 	 */
3233 	WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3234 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3235 }
3236 
3237 static void queue_zeros(struct ceph_connection *con, struct ceph_msg *msg)
3238 {
3239 	dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3240 
3241 	if (con->v2.out_zero) {
3242 		set_out_bvec_zero(con);
3243 		con->v2.out_zero -= con->v2.out_bvec.bv_len;
3244 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3245 		return;
3246 	}
3247 
3248 	/*
3249 	 * We've zero-filled everything up to epilogue.  Queue epilogue
3250 	 * with late_status set to ABORTED and crcs adjusted for zeros.
3251 	 * Once it's written, we are done patching up for the revoke.
3252 	 */
3253 	reset_out_kvecs(con);
3254 	prepare_epilogue_plain(con, msg, true);
3255 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3256 }
3257 
3258 static void finish_message(struct ceph_connection *con)
3259 {
3260 	dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3261 
3262 	/* we end up here both plain and secure modes */
3263 	if (con->v2.out_enc_pages) {
3264 		WARN_ON(!con->v2.out_enc_page_cnt);
3265 		ceph_release_page_vector(con->v2.out_enc_pages,
3266 					 con->v2.out_enc_page_cnt);
3267 		con->v2.out_enc_pages = NULL;
3268 		con->v2.out_enc_page_cnt = 0;
3269 	}
3270 	/* message may have been revoked */
3271 	if (con->out_msg) {
3272 		ceph_msg_put(con->out_msg);
3273 		con->out_msg = NULL;
3274 	}
3275 
3276 	con->v2.out_state = OUT_S_GET_NEXT;
3277 }
3278 
3279 static int populate_out_iter(struct ceph_connection *con)
3280 {
3281 	struct ceph_msg *msg;
3282 	int ret;
3283 
3284 	dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3285 	     con->v2.out_state);
3286 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3287 
3288 	if (con->state != CEPH_CON_S_OPEN) {
3289 		WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3290 			con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3291 		goto nothing_pending;
3292 	}
3293 
3294 	switch (con->v2.out_state) {
3295 	case OUT_S_QUEUE_DATA:
3296 		WARN_ON(!con->out_msg);
3297 		queue_data(con, con->out_msg);
3298 		goto populated;
3299 	case OUT_S_QUEUE_DATA_CONT:
3300 		WARN_ON(!con->out_msg);
3301 		queue_data_cont(con, con->out_msg);
3302 		goto populated;
3303 	case OUT_S_QUEUE_ENC_PAGE:
3304 		queue_enc_page(con);
3305 		goto populated;
3306 	case OUT_S_QUEUE_ZEROS:
3307 		WARN_ON(con->out_msg);  /* revoked */
3308 		queue_zeros(con, con->out_msg);
3309 		goto populated;
3310 	case OUT_S_FINISH_MESSAGE:
3311 		finish_message(con);
3312 		break;
3313 	case OUT_S_GET_NEXT:
3314 		break;
3315 	default:
3316 		WARN(1, "bad out_state %d", con->v2.out_state);
3317 		return -EINVAL;
3318 	}
3319 
3320 	WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3321 	if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3322 		ret = prepare_keepalive2(con);
3323 		if (ret) {
3324 			pr_err("prepare_keepalive2 failed: %d\n", ret);
3325 			return ret;
3326 		}
3327 	} else if ((msg = ceph_con_get_out_msg(con)) != NULL) {
3328 		ret = prepare_message(con, msg);
3329 		if (ret) {
3330 			pr_err("prepare_message failed: %d\n", ret);
3331 			return ret;
3332 		}
3333 	} else if (con->in_seq > con->in_seq_acked) {
3334 		ret = prepare_ack(con);
3335 		if (ret) {
3336 			pr_err("prepare_ack failed: %d\n", ret);
3337 			return ret;
3338 		}
3339 	} else {
3340 		goto nothing_pending;
3341 	}
3342 
3343 populated:
3344 	if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3345 		return -ENODATA;
3346 	dout("%s con %p populated %zu\n", __func__, con,
3347 	     iov_iter_count(&con->v2.out_iter));
3348 	return 1;
3349 
3350 nothing_pending:
3351 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3352 	dout("%s con %p nothing pending\n", __func__, con);
3353 	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3354 	return 0;
3355 }
3356 
3357 int ceph_con_v2_try_write(struct ceph_connection *con)
3358 {
3359 	int ret;
3360 
3361 	dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3362 	     iov_iter_count(&con->v2.out_iter));
3363 
3364 	/* open the socket first? */
3365 	if (con->state == CEPH_CON_S_PREOPEN) {
3366 		WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3367 
3368 		/*
3369 		 * Always bump global_seq.  Bump connect_seq only if
3370 		 * there is a session (i.e. we are reconnecting and will
3371 		 * send session_reconnect instead of client_ident).
3372 		 */
3373 		con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3374 		if (con->v2.server_cookie)
3375 			con->v2.connect_seq++;
3376 
3377 		ret = prepare_read_banner_prefix(con);
3378 		if (ret) {
3379 			pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3380 			con->error_msg = "connect error";
3381 			return ret;
3382 		}
3383 
3384 		reset_out_kvecs(con);
3385 		ret = prepare_banner(con);
3386 		if (ret) {
3387 			pr_err("prepare_banner failed: %d\n", ret);
3388 			con->error_msg = "connect error";
3389 			return ret;
3390 		}
3391 
3392 		ret = ceph_tcp_connect(con);
3393 		if (ret) {
3394 			pr_err("ceph_tcp_connect failed: %d\n", ret);
3395 			con->error_msg = "connect error";
3396 			return ret;
3397 		}
3398 	}
3399 
3400 	if (!iov_iter_count(&con->v2.out_iter)) {
3401 		ret = populate_out_iter(con);
3402 		if (ret <= 0) {
3403 			if (ret && ret != -EAGAIN && !con->error_msg)
3404 				con->error_msg = "write processing error";
3405 			return ret;
3406 		}
3407 	}
3408 
3409 	tcp_sock_set_cork(con->sock->sk, true);
3410 	for (;;) {
3411 		ret = ceph_tcp_send(con);
3412 		if (ret <= 0)
3413 			break;
3414 
3415 		ret = populate_out_iter(con);
3416 		if (ret <= 0) {
3417 			if (ret && ret != -EAGAIN && !con->error_msg)
3418 				con->error_msg = "write processing error";
3419 			break;
3420 		}
3421 	}
3422 
3423 	tcp_sock_set_cork(con->sock->sk, false);
3424 	return ret;
3425 }
3426 
3427 static u32 crc32c_zeros(u32 crc, int zero_len)
3428 {
3429 	int len;
3430 
3431 	while (zero_len) {
3432 		len = min(zero_len, (int)PAGE_SIZE);
3433 		crc = crc32c(crc, page_address(ceph_zero_page), len);
3434 		zero_len -= len;
3435 	}
3436 
3437 	return crc;
3438 }
3439 
3440 static void prepare_zero_front(struct ceph_connection *con,
3441 			       struct ceph_msg *msg, int resid)
3442 {
3443 	int sent;
3444 
3445 	WARN_ON(!resid || resid > front_len(msg));
3446 	sent = front_len(msg) - resid;
3447 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3448 
3449 	if (sent) {
3450 		con->v2.out_epil.front_crc =
3451 			crc32c(-1, msg->front.iov_base, sent);
3452 		con->v2.out_epil.front_crc =
3453 			crc32c_zeros(con->v2.out_epil.front_crc, resid);
3454 	} else {
3455 		con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3456 	}
3457 
3458 	con->v2.out_iter.count -= resid;
3459 	out_zero_add(con, resid);
3460 }
3461 
3462 static void prepare_zero_middle(struct ceph_connection *con,
3463 				struct ceph_msg *msg, int resid)
3464 {
3465 	int sent;
3466 
3467 	WARN_ON(!resid || resid > middle_len(msg));
3468 	sent = middle_len(msg) - resid;
3469 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3470 
3471 	if (sent) {
3472 		con->v2.out_epil.middle_crc =
3473 			crc32c(-1, msg->middle->vec.iov_base, sent);
3474 		con->v2.out_epil.middle_crc =
3475 			crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3476 	} else {
3477 		con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3478 	}
3479 
3480 	con->v2.out_iter.count -= resid;
3481 	out_zero_add(con, resid);
3482 }
3483 
3484 static void prepare_zero_data(struct ceph_connection *con,
3485 			      struct ceph_msg *msg)
3486 {
3487 	dout("%s con %p\n", __func__, con);
3488 	con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(msg));
3489 	out_zero_add(con, data_len(msg));
3490 }
3491 
3492 static void revoke_at_queue_data(struct ceph_connection *con,
3493 				 struct ceph_msg *msg)
3494 {
3495 	int boundary;
3496 	int resid;
3497 
3498 	WARN_ON(!data_len(msg));
3499 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3500 	resid = iov_iter_count(&con->v2.out_iter);
3501 
3502 	boundary = front_len(msg) + middle_len(msg);
3503 	if (resid > boundary) {
3504 		resid -= boundary;
3505 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3506 		dout("%s con %p was sending head\n", __func__, con);
3507 		if (front_len(msg))
3508 			prepare_zero_front(con, msg, front_len(msg));
3509 		if (middle_len(msg))
3510 			prepare_zero_middle(con, msg, middle_len(msg));
3511 		prepare_zero_data(con, msg);
3512 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3513 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3514 		return;
3515 	}
3516 
3517 	boundary = middle_len(msg);
3518 	if (resid > boundary) {
3519 		resid -= boundary;
3520 		dout("%s con %p was sending front\n", __func__, con);
3521 		prepare_zero_front(con, msg, resid);
3522 		if (middle_len(msg))
3523 			prepare_zero_middle(con, msg, middle_len(msg));
3524 		prepare_zero_data(con, msg);
3525 		queue_zeros(con, msg);
3526 		return;
3527 	}
3528 
3529 	WARN_ON(!resid);
3530 	dout("%s con %p was sending middle\n", __func__, con);
3531 	prepare_zero_middle(con, msg, resid);
3532 	prepare_zero_data(con, msg);
3533 	queue_zeros(con, msg);
3534 }
3535 
3536 static void revoke_at_queue_data_cont(struct ceph_connection *con,
3537 				      struct ceph_msg *msg)
3538 {
3539 	int sent, resid;  /* current piece of data */
3540 
3541 	WARN_ON(!data_len(msg));
3542 	WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3543 	resid = iov_iter_count(&con->v2.out_iter);
3544 	WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3545 	sent = con->v2.out_bvec.bv_len - resid;
3546 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3547 
3548 	if (sent) {
3549 		con->v2.out_epil.data_crc = ceph_crc32c_page(
3550 			con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3551 			con->v2.out_bvec.bv_offset, sent);
3552 		ceph_msg_data_advance(&con->v2.out_cursor, sent);
3553 	}
3554 	WARN_ON(resid > con->v2.out_cursor.total_resid);
3555 	con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3556 						con->v2.out_cursor.total_resid);
3557 
3558 	con->v2.out_iter.count -= resid;
3559 	out_zero_add(con, con->v2.out_cursor.total_resid);
3560 	queue_zeros(con, msg);
3561 }
3562 
3563 static void revoke_at_finish_message(struct ceph_connection *con,
3564 				     struct ceph_msg *msg)
3565 {
3566 	int boundary;
3567 	int resid;
3568 
3569 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3570 	resid = iov_iter_count(&con->v2.out_iter);
3571 
3572 	if (!front_len(msg) && !middle_len(msg) &&
3573 	    !data_len(msg)) {
3574 		WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3575 		dout("%s con %p was sending head (empty message) - noop\n",
3576 		     __func__, con);
3577 		return;
3578 	}
3579 
3580 	boundary = front_len(msg) + middle_len(msg) +
3581 		   CEPH_EPILOGUE_PLAIN_LEN;
3582 	if (resid > boundary) {
3583 		resid -= boundary;
3584 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3585 		dout("%s con %p was sending head\n", __func__, con);
3586 		if (front_len(msg))
3587 			prepare_zero_front(con, msg, front_len(msg));
3588 		if (middle_len(msg))
3589 			prepare_zero_middle(con, msg, middle_len(msg));
3590 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3591 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3592 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3593 		return;
3594 	}
3595 
3596 	boundary = middle_len(msg) + CEPH_EPILOGUE_PLAIN_LEN;
3597 	if (resid > boundary) {
3598 		resid -= boundary;
3599 		dout("%s con %p was sending front\n", __func__, con);
3600 		prepare_zero_front(con, msg, resid);
3601 		if (middle_len(msg))
3602 			prepare_zero_middle(con, msg, middle_len(msg));
3603 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3604 		queue_zeros(con, msg);
3605 		return;
3606 	}
3607 
3608 	boundary = CEPH_EPILOGUE_PLAIN_LEN;
3609 	if (resid > boundary) {
3610 		resid -= boundary;
3611 		dout("%s con %p was sending middle\n", __func__, con);
3612 		prepare_zero_middle(con, msg, resid);
3613 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3614 		queue_zeros(con, msg);
3615 		return;
3616 	}
3617 
3618 	WARN_ON(!resid);
3619 	dout("%s con %p was sending epilogue - noop\n", __func__, con);
3620 }
3621 
3622 void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg)
3623 {
3624 	WARN_ON(con->v2.out_zero);
3625 
3626 	if (con_secure(con)) {
3627 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3628 			con->v2.out_state != OUT_S_FINISH_MESSAGE);
3629 		dout("%s con %p secure - noop\n", __func__, con);
3630 		return;
3631 	}
3632 
3633 	switch (con->v2.out_state) {
3634 	case OUT_S_QUEUE_DATA:
3635 		revoke_at_queue_data(con, msg);
3636 		break;
3637 	case OUT_S_QUEUE_DATA_CONT:
3638 		revoke_at_queue_data_cont(con, msg);
3639 		break;
3640 	case OUT_S_FINISH_MESSAGE:
3641 		revoke_at_finish_message(con, msg);
3642 		break;
3643 	default:
3644 		WARN(1, "bad out_state %d", con->v2.out_state);
3645 		break;
3646 	}
3647 }
3648 
3649 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3650 {
3651 	int remaining;
3652 	int resid;
3653 
3654 	WARN_ON(con_secure(con));
3655 	WARN_ON(!data_len(con->in_msg));
3656 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3657 	resid = iov_iter_count(&con->v2.in_iter);
3658 	WARN_ON(!resid);
3659 
3660 	remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3661 	dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3662 	     remaining);
3663 	con->v2.in_iter.count -= resid;
3664 	set_in_skip(con, resid + remaining);
3665 	con->v2.in_state = IN_S_FINISH_SKIP;
3666 }
3667 
3668 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3669 {
3670 	int recved, resid;  /* current piece of data */
3671 	int remaining;
3672 
3673 	WARN_ON(con_secure(con));
3674 	WARN_ON(!data_len(con->in_msg));
3675 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3676 	resid = iov_iter_count(&con->v2.in_iter);
3677 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3678 	recved = con->v2.in_bvec.bv_len - resid;
3679 	dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3680 
3681 	if (recved)
3682 		ceph_msg_data_advance(&con->v2.in_cursor, recved);
3683 	WARN_ON(resid > con->v2.in_cursor.total_resid);
3684 
3685 	remaining = CEPH_EPILOGUE_PLAIN_LEN;
3686 	dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3687 	     con->v2.in_cursor.total_resid, remaining);
3688 	con->v2.in_iter.count -= resid;
3689 	set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3690 	con->v2.in_state = IN_S_FINISH_SKIP;
3691 }
3692 
3693 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3694 {
3695 	int resid;  /* current enc page (not necessarily data) */
3696 
3697 	WARN_ON(!con_secure(con));
3698 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3699 	resid = iov_iter_count(&con->v2.in_iter);
3700 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3701 
3702 	dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3703 	     con->v2.in_enc_resid);
3704 	con->v2.in_iter.count -= resid;
3705 	set_in_skip(con, resid + con->v2.in_enc_resid);
3706 	con->v2.in_state = IN_S_FINISH_SKIP;
3707 }
3708 
3709 static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
3710 {
3711 	int resid;  /* current piece of data */
3712 	int remaining;
3713 
3714 	WARN_ON(con_secure(con));
3715 	WARN_ON(!data_len(con->in_msg));
3716 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3717 	resid = iov_iter_count(&con->v2.in_iter);
3718 	dout("%s con %p resid %d\n", __func__, con, resid);
3719 
3720 	remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
3721 	con->v2.in_iter.count -= resid;
3722 	set_in_skip(con, resid + remaining);
3723 	con->v2.in_state = IN_S_FINISH_SKIP;
3724 }
3725 
3726 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3727 {
3728 	int resid;
3729 
3730 	resid = iov_iter_count(&con->v2.in_iter);
3731 	WARN_ON(!resid);
3732 
3733 	dout("%s con %p resid %d\n", __func__, con, resid);
3734 	con->v2.in_iter.count -= resid;
3735 	set_in_skip(con, resid);
3736 	con->v2.in_state = IN_S_FINISH_SKIP;
3737 }
3738 
3739 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3740 {
3741 	switch (con->v2.in_state) {
3742 	case IN_S_PREPARE_SPARSE_DATA:
3743 	case IN_S_PREPARE_READ_DATA:
3744 		revoke_at_prepare_read_data(con);
3745 		break;
3746 	case IN_S_PREPARE_READ_DATA_CONT:
3747 		revoke_at_prepare_read_data_cont(con);
3748 		break;
3749 	case IN_S_PREPARE_READ_ENC_PAGE:
3750 		revoke_at_prepare_read_enc_page(con);
3751 		break;
3752 	case IN_S_PREPARE_SPARSE_DATA_CONT:
3753 		revoke_at_prepare_sparse_data(con);
3754 		break;
3755 	case IN_S_HANDLE_EPILOGUE:
3756 		revoke_at_handle_epilogue(con);
3757 		break;
3758 	default:
3759 		WARN(1, "bad in_state %d", con->v2.in_state);
3760 		break;
3761 	}
3762 }
3763 
3764 bool ceph_con_v2_opened(struct ceph_connection *con)
3765 {
3766 	return con->v2.peer_global_seq;
3767 }
3768 
3769 void ceph_con_v2_reset_session(struct ceph_connection *con)
3770 {
3771 	con->v2.client_cookie = 0;
3772 	con->v2.server_cookie = 0;
3773 	con->v2.global_seq = 0;
3774 	con->v2.connect_seq = 0;
3775 	con->v2.peer_global_seq = 0;
3776 }
3777 
3778 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3779 {
3780 	iov_iter_truncate(&con->v2.in_iter, 0);
3781 	iov_iter_truncate(&con->v2.out_iter, 0);
3782 	con->v2.out_zero = 0;
3783 
3784 	clear_in_sign_kvecs(con);
3785 	clear_out_sign_kvecs(con);
3786 	free_conn_bufs(con);
3787 
3788 	if (con->v2.in_enc_pages) {
3789 		WARN_ON(!con->v2.in_enc_page_cnt);
3790 		ceph_release_page_vector(con->v2.in_enc_pages,
3791 					 con->v2.in_enc_page_cnt);
3792 		con->v2.in_enc_pages = NULL;
3793 		con->v2.in_enc_page_cnt = 0;
3794 	}
3795 	if (con->v2.out_enc_pages) {
3796 		WARN_ON(!con->v2.out_enc_page_cnt);
3797 		ceph_release_page_vector(con->v2.out_enc_pages,
3798 					 con->v2.out_enc_page_cnt);
3799 		con->v2.out_enc_pages = NULL;
3800 		con->v2.out_enc_page_cnt = 0;
3801 	}
3802 
3803 	con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3804 	memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3805 	memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3806 
3807 	memzero_explicit(&con->v2.hmac_key, sizeof(con->v2.hmac_key));
3808 	con->v2.hmac_key_set = false;
3809 	if (con->v2.gcm_req) {
3810 		aead_request_free(con->v2.gcm_req);
3811 		con->v2.gcm_req = NULL;
3812 	}
3813 	if (con->v2.gcm_tfm) {
3814 		crypto_free_aead(con->v2.gcm_tfm);
3815 		con->v2.gcm_tfm = NULL;
3816 	}
3817 }
3818