xref: /linux/net/ceph/messenger_v2.c (revision 8bd9238e511d02831022ff0270865c54ccc482d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ceph msgr2 protocol implementation
4  *
5  * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6  */
7 
8 #include <linux/ceph/ceph_debug.h>
9 
10 #include <crypto/aead.h>
11 #include <crypto/hash.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27 
28 #include "crypto.h"  /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29 
30 #define FRAME_TAG_HELLO			1
31 #define FRAME_TAG_AUTH_REQUEST		2
32 #define FRAME_TAG_AUTH_BAD_METHOD	3
33 #define FRAME_TAG_AUTH_REPLY_MORE	4
34 #define FRAME_TAG_AUTH_REQUEST_MORE	5
35 #define FRAME_TAG_AUTH_DONE		6
36 #define FRAME_TAG_AUTH_SIGNATURE	7
37 #define FRAME_TAG_CLIENT_IDENT		8
38 #define FRAME_TAG_SERVER_IDENT		9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT	11
41 #define FRAME_TAG_SESSION_RESET		12
42 #define FRAME_TAG_SESSION_RETRY		13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL	14
44 #define FRAME_TAG_SESSION_RECONNECT_OK	15
45 #define FRAME_TAG_WAIT			16
46 #define FRAME_TAG_MESSAGE		17
47 #define FRAME_TAG_KEEPALIVE2		18
48 #define FRAME_TAG_KEEPALIVE2_ACK	19
49 #define FRAME_TAG_ACK			20
50 
51 #define FRAME_LATE_STATUS_ABORTED	0x1
52 #define FRAME_LATE_STATUS_COMPLETE	0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK	0xf
54 
55 #define IN_S_HANDLE_PREAMBLE			1
56 #define IN_S_HANDLE_CONTROL			2
57 #define IN_S_HANDLE_CONTROL_REMAINDER		3
58 #define IN_S_PREPARE_READ_DATA			4
59 #define IN_S_PREPARE_READ_DATA_CONT		5
60 #define IN_S_PREPARE_READ_ENC_PAGE		6
61 #define IN_S_PREPARE_SPARSE_DATA		7
62 #define IN_S_PREPARE_SPARSE_DATA_CONT		8
63 #define IN_S_HANDLE_EPILOGUE			9
64 #define IN_S_FINISH_SKIP			10
65 
66 #define OUT_S_QUEUE_DATA		1
67 #define OUT_S_QUEUE_DATA_CONT		2
68 #define OUT_S_QUEUE_ENC_PAGE		3
69 #define OUT_S_QUEUE_ZEROS		4
70 #define OUT_S_FINISH_MESSAGE		5
71 #define OUT_S_GET_NEXT			6
72 
73 #define CTRL_BODY(p)	((void *)(p) + CEPH_PREAMBLE_LEN)
74 #define FRONT_PAD(p)	((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
75 #define MIDDLE_PAD(p)	(FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
76 #define DATA_PAD(p)	(MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
77 
78 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
79 
do_recvmsg(struct socket * sock,struct iov_iter * it)80 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
81 {
82 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
83 	int ret;
84 
85 	msg.msg_iter = *it;
86 	while (iov_iter_count(it)) {
87 		ret = sock_recvmsg(sock, &msg, msg.msg_flags);
88 		if (ret <= 0) {
89 			if (ret == -EAGAIN)
90 				ret = 0;
91 			return ret;
92 		}
93 
94 		iov_iter_advance(it, ret);
95 	}
96 
97 	WARN_ON(msg_data_left(&msg));
98 	return 1;
99 }
100 
101 /*
102  * Read as much as possible.
103  *
104  * Return:
105  *   1 - done, nothing (else) to read
106  *   0 - socket is empty, need to wait
107  *  <0 - error
108  */
ceph_tcp_recv(struct ceph_connection * con)109 static int ceph_tcp_recv(struct ceph_connection *con)
110 {
111 	int ret;
112 
113 	dout("%s con %p %s %zu\n", __func__, con,
114 	     iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
115 	     iov_iter_count(&con->v2.in_iter));
116 	ret = do_recvmsg(con->sock, &con->v2.in_iter);
117 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
118 	     iov_iter_count(&con->v2.in_iter));
119 	return ret;
120 }
121 
do_sendmsg(struct socket * sock,struct iov_iter * it)122 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
123 {
124 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
125 	int ret;
126 
127 	msg.msg_iter = *it;
128 	while (iov_iter_count(it)) {
129 		ret = sock_sendmsg(sock, &msg);
130 		if (ret <= 0) {
131 			if (ret == -EAGAIN)
132 				ret = 0;
133 			return ret;
134 		}
135 
136 		iov_iter_advance(it, ret);
137 	}
138 
139 	WARN_ON(msg_data_left(&msg));
140 	return 1;
141 }
142 
do_try_sendpage(struct socket * sock,struct iov_iter * it)143 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
144 {
145 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
146 	struct bio_vec bv;
147 	int ret;
148 
149 	if (WARN_ON(!iov_iter_is_bvec(it)))
150 		return -EINVAL;
151 
152 	while (iov_iter_count(it)) {
153 		/* iov_iter_iovec() for ITER_BVEC */
154 		bvec_set_page(&bv, it->bvec->bv_page,
155 			      min(iov_iter_count(it),
156 				  it->bvec->bv_len - it->iov_offset),
157 			      it->bvec->bv_offset + it->iov_offset);
158 
159 		/*
160 		 * MSG_SPLICE_PAGES cannot properly handle pages with
161 		 * page_count == 0, we need to fall back to sendmsg if
162 		 * that's the case.
163 		 *
164 		 * Same goes for slab pages: skb_can_coalesce() allows
165 		 * coalescing neighboring slab objects into a single frag
166 		 * which triggers one of hardened usercopy checks.
167 		 */
168 		if (sendpage_ok(bv.bv_page))
169 			msg.msg_flags |= MSG_SPLICE_PAGES;
170 		else
171 			msg.msg_flags &= ~MSG_SPLICE_PAGES;
172 
173 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
174 		ret = sock_sendmsg(sock, &msg);
175 		if (ret <= 0) {
176 			if (ret == -EAGAIN)
177 				ret = 0;
178 			return ret;
179 		}
180 
181 		iov_iter_advance(it, ret);
182 	}
183 
184 	return 1;
185 }
186 
187 /*
188  * Write as much as possible.  The socket is expected to be corked,
189  * so we don't bother with MSG_MORE here.
190  *
191  * Return:
192  *   1 - done, nothing (else) to write
193  *   0 - socket is full, need to wait
194  *  <0 - error
195  */
ceph_tcp_send(struct ceph_connection * con)196 static int ceph_tcp_send(struct ceph_connection *con)
197 {
198 	int ret;
199 
200 	dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
201 	     iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
202 	if (con->v2.out_iter_sendpage)
203 		ret = do_try_sendpage(con->sock, &con->v2.out_iter);
204 	else
205 		ret = do_sendmsg(con->sock, &con->v2.out_iter);
206 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
207 	     iov_iter_count(&con->v2.out_iter));
208 	return ret;
209 }
210 
add_in_kvec(struct ceph_connection * con,void * buf,int len)211 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
212 {
213 	BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
214 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
215 
216 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
217 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
218 	con->v2.in_kvec_cnt++;
219 
220 	con->v2.in_iter.nr_segs++;
221 	con->v2.in_iter.count += len;
222 }
223 
reset_in_kvecs(struct ceph_connection * con)224 static void reset_in_kvecs(struct ceph_connection *con)
225 {
226 	WARN_ON(iov_iter_count(&con->v2.in_iter));
227 
228 	con->v2.in_kvec_cnt = 0;
229 	iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
230 }
231 
set_in_bvec(struct ceph_connection * con,const struct bio_vec * bv)232 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
233 {
234 	WARN_ON(iov_iter_count(&con->v2.in_iter));
235 
236 	con->v2.in_bvec = *bv;
237 	iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
238 }
239 
set_in_skip(struct ceph_connection * con,int len)240 static void set_in_skip(struct ceph_connection *con, int len)
241 {
242 	WARN_ON(iov_iter_count(&con->v2.in_iter));
243 
244 	dout("%s con %p len %d\n", __func__, con, len);
245 	iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
246 }
247 
add_out_kvec(struct ceph_connection * con,void * buf,int len)248 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
249 {
250 	BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
251 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
252 	WARN_ON(con->v2.out_zero);
253 
254 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
255 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
256 	con->v2.out_kvec_cnt++;
257 
258 	con->v2.out_iter.nr_segs++;
259 	con->v2.out_iter.count += len;
260 }
261 
reset_out_kvecs(struct ceph_connection * con)262 static void reset_out_kvecs(struct ceph_connection *con)
263 {
264 	WARN_ON(iov_iter_count(&con->v2.out_iter));
265 	WARN_ON(con->v2.out_zero);
266 
267 	con->v2.out_kvec_cnt = 0;
268 
269 	iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
270 	con->v2.out_iter_sendpage = false;
271 }
272 
set_out_bvec(struct ceph_connection * con,const struct bio_vec * bv,bool zerocopy)273 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
274 			 bool zerocopy)
275 {
276 	WARN_ON(iov_iter_count(&con->v2.out_iter));
277 	WARN_ON(con->v2.out_zero);
278 
279 	con->v2.out_bvec = *bv;
280 	con->v2.out_iter_sendpage = zerocopy;
281 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
282 		      con->v2.out_bvec.bv_len);
283 }
284 
set_out_bvec_zero(struct ceph_connection * con)285 static void set_out_bvec_zero(struct ceph_connection *con)
286 {
287 	WARN_ON(iov_iter_count(&con->v2.out_iter));
288 	WARN_ON(!con->v2.out_zero);
289 
290 	bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
291 		      min(con->v2.out_zero, (int)PAGE_SIZE), 0);
292 	con->v2.out_iter_sendpage = true;
293 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
294 		      con->v2.out_bvec.bv_len);
295 }
296 
out_zero_add(struct ceph_connection * con,int len)297 static void out_zero_add(struct ceph_connection *con, int len)
298 {
299 	dout("%s con %p len %d\n", __func__, con, len);
300 	con->v2.out_zero += len;
301 }
302 
alloc_conn_buf(struct ceph_connection * con,int len)303 static void *alloc_conn_buf(struct ceph_connection *con, int len)
304 {
305 	void *buf;
306 
307 	dout("%s con %p len %d\n", __func__, con, len);
308 
309 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
310 		return NULL;
311 
312 	buf = kvmalloc(len, GFP_NOIO);
313 	if (!buf)
314 		return NULL;
315 
316 	con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
317 	return buf;
318 }
319 
free_conn_bufs(struct ceph_connection * con)320 static void free_conn_bufs(struct ceph_connection *con)
321 {
322 	while (con->v2.conn_buf_cnt)
323 		kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
324 }
325 
add_in_sign_kvec(struct ceph_connection * con,void * buf,int len)326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 {
328 	BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329 
330 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
331 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
332 	con->v2.in_sign_kvec_cnt++;
333 }
334 
clear_in_sign_kvecs(struct ceph_connection * con)335 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 {
337 	con->v2.in_sign_kvec_cnt = 0;
338 }
339 
add_out_sign_kvec(struct ceph_connection * con,void * buf,int len)340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 {
342 	BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343 
344 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
345 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
346 	con->v2.out_sign_kvec_cnt++;
347 }
348 
clear_out_sign_kvecs(struct ceph_connection * con)349 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 {
351 	con->v2.out_sign_kvec_cnt = 0;
352 }
353 
con_secure(struct ceph_connection * con)354 static bool con_secure(struct ceph_connection *con)
355 {
356 	return con->v2.con_mode == CEPH_CON_MODE_SECURE;
357 }
358 
front_len(const struct ceph_msg * msg)359 static int front_len(const struct ceph_msg *msg)
360 {
361 	return le32_to_cpu(msg->hdr.front_len);
362 }
363 
middle_len(const struct ceph_msg * msg)364 static int middle_len(const struct ceph_msg *msg)
365 {
366 	return le32_to_cpu(msg->hdr.middle_len);
367 }
368 
data_len(const struct ceph_msg * msg)369 static int data_len(const struct ceph_msg *msg)
370 {
371 	return le32_to_cpu(msg->hdr.data_len);
372 }
373 
need_padding(int len)374 static bool need_padding(int len)
375 {
376 	return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
377 }
378 
padded_len(int len)379 static int padded_len(int len)
380 {
381 	return ALIGN(len, CEPH_GCM_BLOCK_LEN);
382 }
383 
padding_len(int len)384 static int padding_len(int len)
385 {
386 	return padded_len(len) - len;
387 }
388 
389 /* preamble + control segment */
head_onwire_len(int ctrl_len,bool secure)390 static int head_onwire_len(int ctrl_len, bool secure)
391 {
392 	int head_len;
393 	int rem_len;
394 
395 	BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
396 
397 	if (secure) {
398 		head_len = CEPH_PREAMBLE_SECURE_LEN;
399 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
400 			rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
401 			head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
402 		}
403 	} else {
404 		head_len = CEPH_PREAMBLE_PLAIN_LEN;
405 		if (ctrl_len)
406 			head_len += ctrl_len + CEPH_CRC_LEN;
407 	}
408 	return head_len;
409 }
410 
411 /* front, middle and data segments + epilogue */
__tail_onwire_len(int front_len,int middle_len,int data_len,bool secure)412 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
413 			     bool secure)
414 {
415 	BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
416 	       middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
417 	       data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
418 
419 	if (!front_len && !middle_len && !data_len)
420 		return 0;
421 
422 	if (!secure)
423 		return front_len + middle_len + data_len +
424 		       CEPH_EPILOGUE_PLAIN_LEN;
425 
426 	return padded_len(front_len) + padded_len(middle_len) +
427 	       padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
428 }
429 
tail_onwire_len(const struct ceph_msg * msg,bool secure)430 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
431 {
432 	return __tail_onwire_len(front_len(msg), middle_len(msg),
433 				 data_len(msg), secure);
434 }
435 
436 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
437 #define MESSAGE_HEAD_PLAIN_LEN	(CEPH_PREAMBLE_PLAIN_LEN +		\
438 				 sizeof(struct ceph_msg_header2) +	\
439 				 CEPH_CRC_LEN)
440 
441 static const int frame_aligns[] = {
442 	sizeof(void *),
443 	sizeof(void *),
444 	sizeof(void *),
445 	PAGE_SIZE
446 };
447 
448 /*
449  * Discards trailing empty segments, unless there is just one segment.
450  * A frame always has at least one (possibly empty) segment.
451  */
calc_segment_count(const int * lens,int len_cnt)452 static int calc_segment_count(const int *lens, int len_cnt)
453 {
454 	int i;
455 
456 	for (i = len_cnt - 1; i >= 0; i--) {
457 		if (lens[i])
458 			return i + 1;
459 	}
460 
461 	return 1;
462 }
463 
init_frame_desc(struct ceph_frame_desc * desc,int tag,const int * lens,int len_cnt)464 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
465 			    const int *lens, int len_cnt)
466 {
467 	int i;
468 
469 	memset(desc, 0, sizeof(*desc));
470 
471 	desc->fd_tag = tag;
472 	desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
473 	BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
474 	for (i = 0; i < desc->fd_seg_cnt; i++) {
475 		desc->fd_lens[i] = lens[i];
476 		desc->fd_aligns[i] = frame_aligns[i];
477 	}
478 }
479 
480 /*
481  * Preamble crc covers everything up to itself (28 bytes) and
482  * is calculated and verified irrespective of the connection mode
483  * (i.e. even if the frame is encrypted).
484  */
encode_preamble(const struct ceph_frame_desc * desc,void * p)485 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
486 {
487 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
488 	void *start = p;
489 	int i;
490 
491 	memset(p, 0, CEPH_PREAMBLE_LEN);
492 
493 	ceph_encode_8(&p, desc->fd_tag);
494 	ceph_encode_8(&p, desc->fd_seg_cnt);
495 	for (i = 0; i < desc->fd_seg_cnt; i++) {
496 		ceph_encode_32(&p, desc->fd_lens[i]);
497 		ceph_encode_16(&p, desc->fd_aligns[i]);
498 	}
499 
500 	put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
501 }
502 
decode_preamble(void * p,struct ceph_frame_desc * desc)503 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
504 {
505 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
506 	u32 crc, expected_crc;
507 	int i;
508 
509 	crc = crc32c(0, p, crcp - p);
510 	expected_crc = get_unaligned_le32(crcp);
511 	if (crc != expected_crc) {
512 		pr_err("bad preamble crc, calculated %u, expected %u\n",
513 		       crc, expected_crc);
514 		return -EBADMSG;
515 	}
516 
517 	memset(desc, 0, sizeof(*desc));
518 
519 	desc->fd_tag = ceph_decode_8(&p);
520 	desc->fd_seg_cnt = ceph_decode_8(&p);
521 	if (desc->fd_seg_cnt < 1 ||
522 	    desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
523 		pr_err("bad segment count %d\n", desc->fd_seg_cnt);
524 		return -EINVAL;
525 	}
526 	for (i = 0; i < desc->fd_seg_cnt; i++) {
527 		desc->fd_lens[i] = ceph_decode_32(&p);
528 		desc->fd_aligns[i] = ceph_decode_16(&p);
529 	}
530 
531 	if (desc->fd_lens[0] < 0 ||
532 	    desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
533 		pr_err("bad control segment length %d\n", desc->fd_lens[0]);
534 		return -EINVAL;
535 	}
536 	if (desc->fd_lens[1] < 0 ||
537 	    desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
538 		pr_err("bad front segment length %d\n", desc->fd_lens[1]);
539 		return -EINVAL;
540 	}
541 	if (desc->fd_lens[2] < 0 ||
542 	    desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
543 		pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
544 		return -EINVAL;
545 	}
546 	if (desc->fd_lens[3] < 0 ||
547 	    desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
548 		pr_err("bad data segment length %d\n", desc->fd_lens[3]);
549 		return -EINVAL;
550 	}
551 
552 	/*
553 	 * This would fire for FRAME_TAG_WAIT (it has one empty
554 	 * segment), but we should never get it as client.
555 	 */
556 	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
557 		pr_err("last segment empty, segment count %d\n",
558 		       desc->fd_seg_cnt);
559 		return -EINVAL;
560 	}
561 
562 	return 0;
563 }
564 
encode_epilogue_plain(struct ceph_connection * con,bool aborted)565 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
566 {
567 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
568 						 FRAME_LATE_STATUS_COMPLETE;
569 	cpu_to_le32s(&con->v2.out_epil.front_crc);
570 	cpu_to_le32s(&con->v2.out_epil.middle_crc);
571 	cpu_to_le32s(&con->v2.out_epil.data_crc);
572 }
573 
encode_epilogue_secure(struct ceph_connection * con,bool aborted)574 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
575 {
576 	memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
577 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
578 						 FRAME_LATE_STATUS_COMPLETE;
579 }
580 
decode_epilogue(void * p,u32 * front_crc,u32 * middle_crc,u32 * data_crc)581 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
582 			   u32 *data_crc)
583 {
584 	u8 late_status;
585 
586 	late_status = ceph_decode_8(&p);
587 	if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
588 			FRAME_LATE_STATUS_COMPLETE) {
589 		/* we should never get an aborted message as client */
590 		pr_err("bad late_status 0x%x\n", late_status);
591 		return -EINVAL;
592 	}
593 
594 	if (front_crc && middle_crc && data_crc) {
595 		*front_crc = ceph_decode_32(&p);
596 		*middle_crc = ceph_decode_32(&p);
597 		*data_crc = ceph_decode_32(&p);
598 	}
599 
600 	return 0;
601 }
602 
fill_header(struct ceph_msg_header * hdr,const struct ceph_msg_header2 * hdr2,int front_len,int middle_len,int data_len,const struct ceph_entity_name * peer_name)603 static void fill_header(struct ceph_msg_header *hdr,
604 			const struct ceph_msg_header2 *hdr2,
605 			int front_len, int middle_len, int data_len,
606 			const struct ceph_entity_name *peer_name)
607 {
608 	hdr->seq = hdr2->seq;
609 	hdr->tid = hdr2->tid;
610 	hdr->type = hdr2->type;
611 	hdr->priority = hdr2->priority;
612 	hdr->version = hdr2->version;
613 	hdr->front_len = cpu_to_le32(front_len);
614 	hdr->middle_len = cpu_to_le32(middle_len);
615 	hdr->data_len = cpu_to_le32(data_len);
616 	hdr->data_off = hdr2->data_off;
617 	hdr->src = *peer_name;
618 	hdr->compat_version = hdr2->compat_version;
619 	hdr->reserved = 0;
620 	hdr->crc = 0;
621 }
622 
fill_header2(struct ceph_msg_header2 * hdr2,const struct ceph_msg_header * hdr,u64 ack_seq)623 static void fill_header2(struct ceph_msg_header2 *hdr2,
624 			 const struct ceph_msg_header *hdr, u64 ack_seq)
625 {
626 	hdr2->seq = hdr->seq;
627 	hdr2->tid = hdr->tid;
628 	hdr2->type = hdr->type;
629 	hdr2->priority = hdr->priority;
630 	hdr2->version = hdr->version;
631 	hdr2->data_pre_padding_len = 0;
632 	hdr2->data_off = hdr->data_off;
633 	hdr2->ack_seq = cpu_to_le64(ack_seq);
634 	hdr2->flags = 0;
635 	hdr2->compat_version = hdr->compat_version;
636 	hdr2->reserved = 0;
637 }
638 
verify_control_crc(struct ceph_connection * con)639 static int verify_control_crc(struct ceph_connection *con)
640 {
641 	int ctrl_len = con->v2.in_desc.fd_lens[0];
642 	u32 crc, expected_crc;
643 
644 	WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
645 	WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
646 
647 	crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
648 	expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
649 	if (crc != expected_crc) {
650 		pr_err("bad control crc, calculated %u, expected %u\n",
651 		       crc, expected_crc);
652 		return -EBADMSG;
653 	}
654 
655 	return 0;
656 }
657 
verify_epilogue_crcs(struct ceph_connection * con,u32 front_crc,u32 middle_crc,u32 data_crc)658 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
659 				u32 middle_crc, u32 data_crc)
660 {
661 	if (front_len(con->in_msg)) {
662 		con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
663 					   front_len(con->in_msg));
664 	} else {
665 		WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
666 		con->in_front_crc = -1;
667 	}
668 
669 	if (middle_len(con->in_msg))
670 		con->in_middle_crc = crc32c(-1,
671 					    con->in_msg->middle->vec.iov_base,
672 					    middle_len(con->in_msg));
673 	else if (data_len(con->in_msg))
674 		con->in_middle_crc = -1;
675 	else
676 		con->in_middle_crc = 0;
677 
678 	if (!data_len(con->in_msg))
679 		con->in_data_crc = 0;
680 
681 	dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
682 	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
683 
684 	if (con->in_front_crc != front_crc) {
685 		pr_err("bad front crc, calculated %u, expected %u\n",
686 		       con->in_front_crc, front_crc);
687 		return -EBADMSG;
688 	}
689 	if (con->in_middle_crc != middle_crc) {
690 		pr_err("bad middle crc, calculated %u, expected %u\n",
691 		       con->in_middle_crc, middle_crc);
692 		return -EBADMSG;
693 	}
694 	if (con->in_data_crc != data_crc) {
695 		pr_err("bad data crc, calculated %u, expected %u\n",
696 		       con->in_data_crc, data_crc);
697 		return -EBADMSG;
698 	}
699 
700 	return 0;
701 }
702 
setup_crypto(struct ceph_connection * con,const u8 * session_key,int session_key_len,const u8 * con_secret,int con_secret_len)703 static int setup_crypto(struct ceph_connection *con,
704 			const u8 *session_key, int session_key_len,
705 			const u8 *con_secret, int con_secret_len)
706 {
707 	unsigned int noio_flag;
708 	int ret;
709 
710 	dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
711 	     __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
712 	WARN_ON(con->v2.hmac_key_set || con->v2.gcm_tfm || con->v2.gcm_req);
713 
714 	if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
715 	    con->v2.con_mode != CEPH_CON_MODE_SECURE) {
716 		pr_err("bad con_mode %d\n", con->v2.con_mode);
717 		return -EINVAL;
718 	}
719 
720 	if (!session_key_len) {
721 		WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
722 		WARN_ON(con_secret_len);
723 		return 0;  /* auth_none */
724 	}
725 
726 	hmac_sha256_preparekey(&con->v2.hmac_key, session_key, session_key_len);
727 	con->v2.hmac_key_set = true;
728 
729 	if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
730 		WARN_ON(con_secret_len);
731 		return 0;  /* auth_x, plain mode */
732 	}
733 
734 	if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
735 		pr_err("con_secret too small %d\n", con_secret_len);
736 		return -EINVAL;
737 	}
738 
739 	noio_flag = memalloc_noio_save();
740 	con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
741 	memalloc_noio_restore(noio_flag);
742 	if (IS_ERR(con->v2.gcm_tfm)) {
743 		ret = PTR_ERR(con->v2.gcm_tfm);
744 		con->v2.gcm_tfm = NULL;
745 		pr_err("failed to allocate gcm tfm context: %d\n", ret);
746 		return ret;
747 	}
748 
749 	WARN_ON((unsigned long)con_secret &
750 		crypto_aead_alignmask(con->v2.gcm_tfm));
751 	ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
752 	if (ret) {
753 		pr_err("failed to set gcm key: %d\n", ret);
754 		return ret;
755 	}
756 
757 	WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
758 	ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
759 	if (ret) {
760 		pr_err("failed to set gcm tag size: %d\n", ret);
761 		return ret;
762 	}
763 
764 	con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
765 	if (!con->v2.gcm_req) {
766 		pr_err("failed to allocate gcm request\n");
767 		return -ENOMEM;
768 	}
769 
770 	crypto_init_wait(&con->v2.gcm_wait);
771 	aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
772 				  crypto_req_done, &con->v2.gcm_wait);
773 
774 	memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
775 	       CEPH_GCM_IV_LEN);
776 	memcpy(&con->v2.out_gcm_nonce,
777 	       con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
778 	       CEPH_GCM_IV_LEN);
779 	return 0;  /* auth_x, secure mode */
780 }
781 
ceph_hmac_sha256(struct ceph_connection * con,const struct kvec * kvecs,int kvec_cnt,u8 hmac[SHA256_DIGEST_SIZE])782 static void ceph_hmac_sha256(struct ceph_connection *con,
783 			     const struct kvec *kvecs, int kvec_cnt,
784 			     u8 hmac[SHA256_DIGEST_SIZE])
785 {
786 	struct hmac_sha256_ctx ctx;
787 	int i;
788 
789 	dout("%s con %p hmac_key_set %d kvec_cnt %d\n", __func__, con,
790 	     con->v2.hmac_key_set, kvec_cnt);
791 
792 	if (!con->v2.hmac_key_set) {
793 		memset(hmac, 0, SHA256_DIGEST_SIZE);
794 		return;  /* auth_none */
795 	}
796 
797 	/* auth_x, both plain and secure modes */
798 	hmac_sha256_init(&ctx, &con->v2.hmac_key);
799 	for (i = 0; i < kvec_cnt; i++)
800 		hmac_sha256_update(&ctx, kvecs[i].iov_base, kvecs[i].iov_len);
801 	hmac_sha256_final(&ctx, hmac);
802 }
803 
gcm_inc_nonce(struct ceph_gcm_nonce * nonce)804 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
805 {
806 	u64 counter;
807 
808 	counter = le64_to_cpu(nonce->counter);
809 	nonce->counter = cpu_to_le64(counter + 1);
810 }
811 
gcm_crypt(struct ceph_connection * con,bool encrypt,struct scatterlist * src,struct scatterlist * dst,int src_len)812 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
813 		     struct scatterlist *src, struct scatterlist *dst,
814 		     int src_len)
815 {
816 	struct ceph_gcm_nonce *nonce;
817 	int ret;
818 
819 	nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
820 
821 	aead_request_set_ad(con->v2.gcm_req, 0);  /* no AAD */
822 	aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
823 	ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
824 					crypto_aead_decrypt(con->v2.gcm_req),
825 			      &con->v2.gcm_wait);
826 	if (ret)
827 		return ret;
828 
829 	gcm_inc_nonce(nonce);
830 	return 0;
831 }
832 
get_bvec_at(struct ceph_msg_data_cursor * cursor,struct bio_vec * bv)833 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
834 			struct bio_vec *bv)
835 {
836 	struct page *page;
837 	size_t off, len;
838 
839 	WARN_ON(!cursor->total_resid);
840 
841 	/* skip zero-length data items */
842 	while (!cursor->resid)
843 		ceph_msg_data_advance(cursor, 0);
844 
845 	/* get a piece of data, cursor isn't advanced */
846 	page = ceph_msg_data_next(cursor, &off, &len);
847 	bvec_set_page(bv, page, len, off);
848 }
849 
calc_sg_cnt(void * buf,int buf_len)850 static int calc_sg_cnt(void *buf, int buf_len)
851 {
852 	int sg_cnt;
853 
854 	if (!buf_len)
855 		return 0;
856 
857 	sg_cnt = need_padding(buf_len) ? 1 : 0;
858 	if (is_vmalloc_addr(buf)) {
859 		WARN_ON(offset_in_page(buf));
860 		sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
861 	} else {
862 		sg_cnt++;
863 	}
864 
865 	return sg_cnt;
866 }
867 
calc_sg_cnt_cursor(struct ceph_msg_data_cursor * cursor)868 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
869 {
870 	int data_len = cursor->total_resid;
871 	struct bio_vec bv;
872 	int sg_cnt;
873 
874 	if (!data_len)
875 		return 0;
876 
877 	sg_cnt = need_padding(data_len) ? 1 : 0;
878 	do {
879 		get_bvec_at(cursor, &bv);
880 		sg_cnt++;
881 
882 		ceph_msg_data_advance(cursor, bv.bv_len);
883 	} while (cursor->total_resid);
884 
885 	return sg_cnt;
886 }
887 
init_sgs(struct scatterlist ** sg,void * buf,int buf_len,u8 * pad)888 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
889 {
890 	void *end = buf + buf_len;
891 	struct page *page;
892 	int len;
893 	void *p;
894 
895 	if (!buf_len)
896 		return;
897 
898 	if (is_vmalloc_addr(buf)) {
899 		p = buf;
900 		do {
901 			page = vmalloc_to_page(p);
902 			len = min_t(int, end - p, PAGE_SIZE);
903 			WARN_ON(!page || !len || offset_in_page(p));
904 			sg_set_page(*sg, page, len, 0);
905 			*sg = sg_next(*sg);
906 			p += len;
907 		} while (p != end);
908 	} else {
909 		sg_set_buf(*sg, buf, buf_len);
910 		*sg = sg_next(*sg);
911 	}
912 
913 	if (need_padding(buf_len)) {
914 		sg_set_buf(*sg, pad, padding_len(buf_len));
915 		*sg = sg_next(*sg);
916 	}
917 }
918 
init_sgs_cursor(struct scatterlist ** sg,struct ceph_msg_data_cursor * cursor,u8 * pad)919 static void init_sgs_cursor(struct scatterlist **sg,
920 			    struct ceph_msg_data_cursor *cursor, u8 *pad)
921 {
922 	int data_len = cursor->total_resid;
923 	struct bio_vec bv;
924 
925 	if (!data_len)
926 		return;
927 
928 	do {
929 		get_bvec_at(cursor, &bv);
930 		sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
931 		*sg = sg_next(*sg);
932 
933 		ceph_msg_data_advance(cursor, bv.bv_len);
934 	} while (cursor->total_resid);
935 
936 	if (need_padding(data_len)) {
937 		sg_set_buf(*sg, pad, padding_len(data_len));
938 		*sg = sg_next(*sg);
939 	}
940 }
941 
942 /**
943  * init_sgs_pages: set up scatterlist on an array of page pointers
944  * @sg:		scatterlist to populate
945  * @pages:	pointer to page array
946  * @dpos:	position in the array to start (bytes)
947  * @dlen:	len to add to sg (bytes)
948  * @pad:	pointer to pad destination (if any)
949  *
950  * Populate the scatterlist from the page array, starting at an arbitrary
951  * byte in the array and running for a specified length.
952  */
init_sgs_pages(struct scatterlist ** sg,struct page ** pages,int dpos,int dlen,u8 * pad)953 static void init_sgs_pages(struct scatterlist **sg, struct page **pages,
954 			   int dpos, int dlen, u8 *pad)
955 {
956 	int idx = dpos >> PAGE_SHIFT;
957 	int off = offset_in_page(dpos);
958 	int resid = dlen;
959 
960 	do {
961 		int len = min(resid, (int)PAGE_SIZE - off);
962 
963 		sg_set_page(*sg, pages[idx], len, off);
964 		*sg = sg_next(*sg);
965 		off = 0;
966 		++idx;
967 		resid -= len;
968 	} while (resid);
969 
970 	if (need_padding(dlen)) {
971 		sg_set_buf(*sg, pad, padding_len(dlen));
972 		*sg = sg_next(*sg);
973 	}
974 }
975 
setup_message_sgs(struct sg_table * sgt,struct ceph_msg * msg,u8 * front_pad,u8 * middle_pad,u8 * data_pad,void * epilogue,struct page ** pages,int dpos,bool add_tag)976 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
977 			     u8 *front_pad, u8 *middle_pad, u8 *data_pad,
978 			     void *epilogue, struct page **pages, int dpos,
979 			     bool add_tag)
980 {
981 	struct ceph_msg_data_cursor cursor;
982 	struct scatterlist *cur_sg;
983 	int dlen = data_len(msg);
984 	int sg_cnt;
985 	int ret;
986 
987 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
988 		return 0;
989 
990 	sg_cnt = 1;  /* epilogue + [auth tag] */
991 	if (front_len(msg))
992 		sg_cnt += calc_sg_cnt(msg->front.iov_base,
993 				      front_len(msg));
994 	if (middle_len(msg))
995 		sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
996 				      middle_len(msg));
997 	if (dlen) {
998 		if (pages) {
999 			sg_cnt += calc_pages_for(dpos, dlen);
1000 			if (need_padding(dlen))
1001 				sg_cnt++;
1002 		} else {
1003 			ceph_msg_data_cursor_init(&cursor, msg, dlen);
1004 			sg_cnt += calc_sg_cnt_cursor(&cursor);
1005 		}
1006 	}
1007 
1008 	ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
1009 	if (ret)
1010 		return ret;
1011 
1012 	cur_sg = sgt->sgl;
1013 	if (front_len(msg))
1014 		init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
1015 			 front_pad);
1016 	if (middle_len(msg))
1017 		init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
1018 			 middle_pad);
1019 	if (dlen) {
1020 		if (pages) {
1021 			init_sgs_pages(&cur_sg, pages, dpos, dlen, data_pad);
1022 		} else {
1023 			ceph_msg_data_cursor_init(&cursor, msg, dlen);
1024 			init_sgs_cursor(&cur_sg, &cursor, data_pad);
1025 		}
1026 	}
1027 
1028 	WARN_ON(!sg_is_last(cur_sg));
1029 	sg_set_buf(cur_sg, epilogue,
1030 		   CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1031 	return 0;
1032 }
1033 
decrypt_preamble(struct ceph_connection * con)1034 static int decrypt_preamble(struct ceph_connection *con)
1035 {
1036 	struct scatterlist sg;
1037 
1038 	sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1039 	return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1040 }
1041 
decrypt_control_remainder(struct ceph_connection * con)1042 static int decrypt_control_remainder(struct ceph_connection *con)
1043 {
1044 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1045 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1046 	int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1047 	struct scatterlist sgs[2];
1048 
1049 	WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1050 	WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1051 
1052 	sg_init_table(sgs, 2);
1053 	sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1054 	sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1055 
1056 	return gcm_crypt(con, false, sgs, sgs,
1057 			 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1058 }
1059 
1060 /* Process sparse read data that lives in a buffer */
process_v2_sparse_read(struct ceph_connection * con,struct page ** pages,int spos)1061 static int process_v2_sparse_read(struct ceph_connection *con,
1062 				  struct page **pages, int spos)
1063 {
1064 	struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1065 	int ret;
1066 
1067 	for (;;) {
1068 		char *buf = NULL;
1069 
1070 		ret = con->ops->sparse_read(con, cursor, &buf);
1071 		if (ret <= 0)
1072 			return ret;
1073 
1074 		dout("%s: sparse_read return %x buf %p\n", __func__, ret, buf);
1075 
1076 		do {
1077 			int idx = spos >> PAGE_SHIFT;
1078 			int soff = offset_in_page(spos);
1079 			struct page *spage = con->v2.in_enc_pages[idx];
1080 			int len = min_t(int, ret, PAGE_SIZE - soff);
1081 
1082 			if (buf) {
1083 				memcpy_from_page(buf, spage, soff, len);
1084 				buf += len;
1085 			} else {
1086 				struct bio_vec bv;
1087 
1088 				get_bvec_at(cursor, &bv);
1089 				len = min_t(int, len, bv.bv_len);
1090 				memcpy_page(bv.bv_page, bv.bv_offset,
1091 					    spage, soff, len);
1092 				ceph_msg_data_advance(cursor, len);
1093 			}
1094 			spos += len;
1095 			ret -= len;
1096 		} while (ret);
1097 	}
1098 }
1099 
decrypt_tail(struct ceph_connection * con)1100 static int decrypt_tail(struct ceph_connection *con)
1101 {
1102 	struct sg_table enc_sgt = {};
1103 	struct sg_table sgt = {};
1104 	struct page **pages = NULL;
1105 	bool sparse = !!con->in_msg->sparse_read_total;
1106 	int dpos = 0;
1107 	int tail_len;
1108 	int ret;
1109 
1110 	tail_len = tail_onwire_len(con->in_msg, true);
1111 	ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1112 					con->v2.in_enc_page_cnt, 0, tail_len,
1113 					GFP_NOIO);
1114 	if (ret)
1115 		goto out;
1116 
1117 	if (sparse) {
1118 		dpos = padded_len(front_len(con->in_msg) + padded_len(middle_len(con->in_msg)));
1119 		pages = con->v2.in_enc_pages;
1120 	}
1121 
1122 	ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1123 				MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1124 				con->v2.in_buf, pages, dpos, true);
1125 	if (ret)
1126 		goto out;
1127 
1128 	dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1129 	     con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1130 	ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1131 	if (ret)
1132 		goto out;
1133 
1134 	if (sparse && data_len(con->in_msg)) {
1135 		ret = process_v2_sparse_read(con, con->v2.in_enc_pages, dpos);
1136 		if (ret)
1137 			goto out;
1138 	}
1139 
1140 	WARN_ON(!con->v2.in_enc_page_cnt);
1141 	ceph_release_page_vector(con->v2.in_enc_pages,
1142 				 con->v2.in_enc_page_cnt);
1143 	con->v2.in_enc_pages = NULL;
1144 	con->v2.in_enc_page_cnt = 0;
1145 
1146 out:
1147 	sg_free_table(&sgt);
1148 	sg_free_table(&enc_sgt);
1149 	return ret;
1150 }
1151 
prepare_banner(struct ceph_connection * con)1152 static int prepare_banner(struct ceph_connection *con)
1153 {
1154 	int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1155 	void *buf, *p;
1156 
1157 	buf = alloc_conn_buf(con, buf_len);
1158 	if (!buf)
1159 		return -ENOMEM;
1160 
1161 	p = buf;
1162 	ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1163 	ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1164 	ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1165 	ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1166 	WARN_ON(p != buf + buf_len);
1167 
1168 	add_out_kvec(con, buf, buf_len);
1169 	add_out_sign_kvec(con, buf, buf_len);
1170 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1171 	return 0;
1172 }
1173 
1174 /*
1175  * base:
1176  *   preamble
1177  *   control body (ctrl_len bytes)
1178  *   space for control crc
1179  *
1180  * extdata (optional):
1181  *   control body (extdata_len bytes)
1182  *
1183  * Compute control crc and gather base and extdata into:
1184  *
1185  *   preamble
1186  *   control body (ctrl_len + extdata_len bytes)
1187  *   control crc
1188  *
1189  * Preamble should already be encoded at the start of base.
1190  */
prepare_head_plain(struct ceph_connection * con,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1191 static void prepare_head_plain(struct ceph_connection *con, void *base,
1192 			       int ctrl_len, void *extdata, int extdata_len,
1193 			       bool to_be_signed)
1194 {
1195 	int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1196 	void *crcp = base + base_len - CEPH_CRC_LEN;
1197 	u32 crc;
1198 
1199 	crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1200 	if (extdata_len)
1201 		crc = crc32c(crc, extdata, extdata_len);
1202 	put_unaligned_le32(crc, crcp);
1203 
1204 	if (!extdata_len) {
1205 		add_out_kvec(con, base, base_len);
1206 		if (to_be_signed)
1207 			add_out_sign_kvec(con, base, base_len);
1208 		return;
1209 	}
1210 
1211 	add_out_kvec(con, base, crcp - base);
1212 	add_out_kvec(con, extdata, extdata_len);
1213 	add_out_kvec(con, crcp, CEPH_CRC_LEN);
1214 	if (to_be_signed) {
1215 		add_out_sign_kvec(con, base, crcp - base);
1216 		add_out_sign_kvec(con, extdata, extdata_len);
1217 		add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1218 	}
1219 }
1220 
prepare_head_secure_small(struct ceph_connection * con,void * base,int ctrl_len)1221 static int prepare_head_secure_small(struct ceph_connection *con,
1222 				     void *base, int ctrl_len)
1223 {
1224 	struct scatterlist sg;
1225 	int ret;
1226 
1227 	/* inline buffer padding? */
1228 	if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1229 		memset(CTRL_BODY(base) + ctrl_len, 0,
1230 		       CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1231 
1232 	sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1233 	ret = gcm_crypt(con, true, &sg, &sg,
1234 			CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1235 	if (ret)
1236 		return ret;
1237 
1238 	add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1239 	return 0;
1240 }
1241 
1242 /*
1243  * base:
1244  *   preamble
1245  *   control body (ctrl_len bytes)
1246  *   space for padding, if needed
1247  *   space for control remainder auth tag
1248  *   space for preamble auth tag
1249  *
1250  * Encrypt preamble and the inline portion, then encrypt the remainder
1251  * and gather into:
1252  *
1253  *   preamble
1254  *   control body (48 bytes)
1255  *   preamble auth tag
1256  *   control body (ctrl_len - 48 bytes)
1257  *   zero padding, if needed
1258  *   control remainder auth tag
1259  *
1260  * Preamble should already be encoded at the start of base.
1261  */
prepare_head_secure_big(struct ceph_connection * con,void * base,int ctrl_len)1262 static int prepare_head_secure_big(struct ceph_connection *con,
1263 				   void *base, int ctrl_len)
1264 {
1265 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1266 	void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1267 	void *rem_tag = rem + padded_len(rem_len);
1268 	void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1269 	struct scatterlist sgs[2];
1270 	int ret;
1271 
1272 	sg_init_table(sgs, 2);
1273 	sg_set_buf(&sgs[0], base, rem - base);
1274 	sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1275 	ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1276 	if (ret)
1277 		return ret;
1278 
1279 	/* control remainder padding? */
1280 	if (need_padding(rem_len))
1281 		memset(rem + rem_len, 0, padding_len(rem_len));
1282 
1283 	sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1284 	ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1285 	if (ret)
1286 		return ret;
1287 
1288 	add_out_kvec(con, base, rem - base);
1289 	add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1290 	add_out_kvec(con, rem, pmbl_tag - rem);
1291 	return 0;
1292 }
1293 
__prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1294 static int __prepare_control(struct ceph_connection *con, int tag,
1295 			     void *base, int ctrl_len, void *extdata,
1296 			     int extdata_len, bool to_be_signed)
1297 {
1298 	int total_len = ctrl_len + extdata_len;
1299 	struct ceph_frame_desc desc;
1300 	int ret;
1301 
1302 	dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1303 	     total_len, ctrl_len, extdata_len);
1304 
1305 	/* extdata may be vmalloc'ed but not base */
1306 	if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1307 		return -EINVAL;
1308 
1309 	init_frame_desc(&desc, tag, &total_len, 1);
1310 	encode_preamble(&desc, base);
1311 
1312 	if (con_secure(con)) {
1313 		if (WARN_ON(extdata_len || to_be_signed))
1314 			return -EINVAL;
1315 
1316 		if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1317 			/* fully inlined, inline buffer may need padding */
1318 			ret = prepare_head_secure_small(con, base, ctrl_len);
1319 		else
1320 			/* partially inlined, inline buffer is full */
1321 			ret = prepare_head_secure_big(con, base, ctrl_len);
1322 		if (ret)
1323 			return ret;
1324 	} else {
1325 		prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1326 				   to_be_signed);
1327 	}
1328 
1329 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1330 	return 0;
1331 }
1332 
prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len)1333 static int prepare_control(struct ceph_connection *con, int tag,
1334 			   void *base, int ctrl_len)
1335 {
1336 	return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1337 }
1338 
prepare_hello(struct ceph_connection * con)1339 static int prepare_hello(struct ceph_connection *con)
1340 {
1341 	void *buf, *p;
1342 	int ctrl_len;
1343 
1344 	ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1345 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1346 	if (!buf)
1347 		return -ENOMEM;
1348 
1349 	p = CTRL_BODY(buf);
1350 	ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1351 	ceph_encode_entity_addr(&p, &con->peer_addr);
1352 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1353 
1354 	return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1355 				 NULL, 0, true);
1356 }
1357 
1358 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1359 #define AUTH_BUF_LEN	(512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1360 
prepare_auth_request(struct ceph_connection * con)1361 static int prepare_auth_request(struct ceph_connection *con)
1362 {
1363 	void *authorizer, *authorizer_copy;
1364 	int ctrl_len, authorizer_len;
1365 	void *buf;
1366 	int ret;
1367 
1368 	ctrl_len = AUTH_BUF_LEN;
1369 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1370 	if (!buf)
1371 		return -ENOMEM;
1372 
1373 	mutex_unlock(&con->mutex);
1374 	ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1375 					 &authorizer, &authorizer_len);
1376 	mutex_lock(&con->mutex);
1377 	if (con->state != CEPH_CON_S_V2_HELLO) {
1378 		dout("%s con %p state changed to %d\n", __func__, con,
1379 		     con->state);
1380 		return -EAGAIN;
1381 	}
1382 
1383 	dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1384 	if (ret)
1385 		return ret;
1386 
1387 	authorizer_copy = alloc_conn_buf(con, authorizer_len);
1388 	if (!authorizer_copy)
1389 		return -ENOMEM;
1390 
1391 	memcpy(authorizer_copy, authorizer, authorizer_len);
1392 
1393 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1394 				 authorizer_copy, authorizer_len, true);
1395 }
1396 
prepare_auth_request_more(struct ceph_connection * con,void * reply,int reply_len)1397 static int prepare_auth_request_more(struct ceph_connection *con,
1398 				     void *reply, int reply_len)
1399 {
1400 	int ctrl_len, authorizer_len;
1401 	void *authorizer;
1402 	void *buf;
1403 	int ret;
1404 
1405 	ctrl_len = AUTH_BUF_LEN;
1406 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1407 	if (!buf)
1408 		return -ENOMEM;
1409 
1410 	mutex_unlock(&con->mutex);
1411 	ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1412 					       CTRL_BODY(buf), &ctrl_len,
1413 					       &authorizer, &authorizer_len);
1414 	mutex_lock(&con->mutex);
1415 	if (con->state != CEPH_CON_S_V2_AUTH) {
1416 		dout("%s con %p state changed to %d\n", __func__, con,
1417 		     con->state);
1418 		return -EAGAIN;
1419 	}
1420 
1421 	dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1422 	if (ret)
1423 		return ret;
1424 
1425 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1426 				 ctrl_len, authorizer, authorizer_len, true);
1427 }
1428 
prepare_auth_signature(struct ceph_connection * con)1429 static int prepare_auth_signature(struct ceph_connection *con)
1430 {
1431 	void *buf;
1432 
1433 	buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1434 						  con_secure(con)));
1435 	if (!buf)
1436 		return -ENOMEM;
1437 
1438 	ceph_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1439 			 CTRL_BODY(buf));
1440 
1441 	return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1442 			       SHA256_DIGEST_SIZE);
1443 }
1444 
prepare_client_ident(struct ceph_connection * con)1445 static int prepare_client_ident(struct ceph_connection *con)
1446 {
1447 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1448 	struct ceph_client *client = from_msgr(con->msgr);
1449 	u64 global_id = ceph_client_gid(client);
1450 	void *buf, *p;
1451 	int ctrl_len;
1452 
1453 	WARN_ON(con->v2.server_cookie);
1454 	WARN_ON(con->v2.connect_seq);
1455 	WARN_ON(con->v2.peer_global_seq);
1456 
1457 	if (!con->v2.client_cookie) {
1458 		do {
1459 			get_random_bytes(&con->v2.client_cookie,
1460 					 sizeof(con->v2.client_cookie));
1461 		} while (!con->v2.client_cookie);
1462 		dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1463 		     con->v2.client_cookie);
1464 	} else {
1465 		dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1466 		     con->v2.client_cookie);
1467 	}
1468 
1469 	dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1470 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1471 	     ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1472 	     global_id, con->v2.global_seq, client->supported_features,
1473 	     client->required_features, con->v2.client_cookie);
1474 
1475 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1476 		   ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1477 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1478 	if (!buf)
1479 		return -ENOMEM;
1480 
1481 	p = CTRL_BODY(buf);
1482 	ceph_encode_8(&p, 2);  /* addrvec marker */
1483 	ceph_encode_32(&p, 1);  /* addr_cnt */
1484 	ceph_encode_entity_addr(&p, my_addr);
1485 	ceph_encode_entity_addr(&p, &con->peer_addr);
1486 	ceph_encode_64(&p, global_id);
1487 	ceph_encode_64(&p, con->v2.global_seq);
1488 	ceph_encode_64(&p, client->supported_features);
1489 	ceph_encode_64(&p, client->required_features);
1490 	ceph_encode_64(&p, 0);  /* flags */
1491 	ceph_encode_64(&p, con->v2.client_cookie);
1492 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1493 
1494 	return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1495 }
1496 
prepare_session_reconnect(struct ceph_connection * con)1497 static int prepare_session_reconnect(struct ceph_connection *con)
1498 {
1499 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1500 	void *buf, *p;
1501 	int ctrl_len;
1502 
1503 	WARN_ON(!con->v2.client_cookie);
1504 	WARN_ON(!con->v2.server_cookie);
1505 	WARN_ON(!con->v2.connect_seq);
1506 	WARN_ON(!con->v2.peer_global_seq);
1507 
1508 	dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1509 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1510 	     con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1511 	     con->v2.connect_seq, con->in_seq);
1512 
1513 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1514 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1515 	if (!buf)
1516 		return -ENOMEM;
1517 
1518 	p = CTRL_BODY(buf);
1519 	ceph_encode_8(&p, 2);  /* entity_addrvec_t marker */
1520 	ceph_encode_32(&p, 1);  /* my_addrs len */
1521 	ceph_encode_entity_addr(&p, my_addr);
1522 	ceph_encode_64(&p, con->v2.client_cookie);
1523 	ceph_encode_64(&p, con->v2.server_cookie);
1524 	ceph_encode_64(&p, con->v2.global_seq);
1525 	ceph_encode_64(&p, con->v2.connect_seq);
1526 	ceph_encode_64(&p, con->in_seq);
1527 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1528 
1529 	return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1530 }
1531 
prepare_keepalive2(struct ceph_connection * con)1532 static int prepare_keepalive2(struct ceph_connection *con)
1533 {
1534 	struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1535 	struct timespec64 now;
1536 
1537 	ktime_get_real_ts64(&now);
1538 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1539 	     now.tv_nsec);
1540 
1541 	ceph_encode_timespec64(ts, &now);
1542 
1543 	reset_out_kvecs(con);
1544 	return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1545 			       sizeof(struct ceph_timespec));
1546 }
1547 
prepare_ack(struct ceph_connection * con)1548 static int prepare_ack(struct ceph_connection *con)
1549 {
1550 	void *p;
1551 
1552 	dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1553 	     con->in_seq_acked, con->in_seq);
1554 	con->in_seq_acked = con->in_seq;
1555 
1556 	p = CTRL_BODY(con->v2.out_buf);
1557 	ceph_encode_64(&p, con->in_seq_acked);
1558 
1559 	reset_out_kvecs(con);
1560 	return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1561 }
1562 
prepare_epilogue_plain(struct ceph_connection * con,struct ceph_msg * msg,bool aborted)1563 static void prepare_epilogue_plain(struct ceph_connection *con,
1564 				   struct ceph_msg *msg, bool aborted)
1565 {
1566 	dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1567 	     msg, aborted, con->v2.out_epil.front_crc,
1568 	     con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1569 
1570 	encode_epilogue_plain(con, aborted);
1571 	add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1572 }
1573 
1574 /*
1575  * For "used" empty segments, crc is -1.  For unused (trailing)
1576  * segments, crc is 0.
1577  */
prepare_message_plain(struct ceph_connection * con,struct ceph_msg * msg)1578 static void prepare_message_plain(struct ceph_connection *con,
1579 				  struct ceph_msg *msg)
1580 {
1581 	prepare_head_plain(con, con->v2.out_buf,
1582 			   sizeof(struct ceph_msg_header2), NULL, 0, false);
1583 
1584 	if (!front_len(msg) && !middle_len(msg)) {
1585 		if (!data_len(msg)) {
1586 			/*
1587 			 * Empty message: once the head is written,
1588 			 * we are done -- there is no epilogue.
1589 			 */
1590 			con->v2.out_state = OUT_S_FINISH_MESSAGE;
1591 			return;
1592 		}
1593 
1594 		con->v2.out_epil.front_crc = -1;
1595 		con->v2.out_epil.middle_crc = -1;
1596 		con->v2.out_state = OUT_S_QUEUE_DATA;
1597 		return;
1598 	}
1599 
1600 	if (front_len(msg)) {
1601 		con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1602 						    front_len(msg));
1603 		add_out_kvec(con, msg->front.iov_base, front_len(msg));
1604 	} else {
1605 		/* middle (at least) is there, checked above */
1606 		con->v2.out_epil.front_crc = -1;
1607 	}
1608 
1609 	if (middle_len(msg)) {
1610 		con->v2.out_epil.middle_crc =
1611 			crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1612 		add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1613 	} else {
1614 		con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1615 	}
1616 
1617 	if (data_len(msg)) {
1618 		con->v2.out_state = OUT_S_QUEUE_DATA;
1619 	} else {
1620 		con->v2.out_epil.data_crc = 0;
1621 		prepare_epilogue_plain(con, msg, false);
1622 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1623 	}
1624 }
1625 
1626 /*
1627  * Unfortunately the kernel crypto API doesn't support streaming
1628  * (piecewise) operation for AEAD algorithms, so we can't get away
1629  * with a fixed size buffer and a couple sgs.  Instead, we have to
1630  * allocate pages for the entire tail of the message (currently up
1631  * to ~32M) and two sgs arrays (up to ~256K each)...
1632  */
prepare_message_secure(struct ceph_connection * con,struct ceph_msg * msg)1633 static int prepare_message_secure(struct ceph_connection *con,
1634 				  struct ceph_msg *msg)
1635 {
1636 	void *zerop = page_address(ceph_zero_page);
1637 	struct sg_table enc_sgt = {};
1638 	struct sg_table sgt = {};
1639 	struct page **enc_pages;
1640 	int enc_page_cnt;
1641 	int tail_len;
1642 	int ret;
1643 
1644 	ret = prepare_head_secure_small(con, con->v2.out_buf,
1645 					sizeof(struct ceph_msg_header2));
1646 	if (ret)
1647 		return ret;
1648 
1649 	tail_len = tail_onwire_len(msg, true);
1650 	if (!tail_len) {
1651 		/*
1652 		 * Empty message: once the head is written,
1653 		 * we are done -- there is no epilogue.
1654 		 */
1655 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1656 		return 0;
1657 	}
1658 
1659 	encode_epilogue_secure(con, false);
1660 	ret = setup_message_sgs(&sgt, msg, zerop, zerop, zerop,
1661 				&con->v2.out_epil, NULL, 0, false);
1662 	if (ret)
1663 		goto out;
1664 
1665 	enc_page_cnt = calc_pages_for(0, tail_len);
1666 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1667 	if (IS_ERR(enc_pages)) {
1668 		ret = PTR_ERR(enc_pages);
1669 		goto out;
1670 	}
1671 
1672 	WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1673 	con->v2.out_enc_pages = enc_pages;
1674 	con->v2.out_enc_page_cnt = enc_page_cnt;
1675 	con->v2.out_enc_resid = tail_len;
1676 	con->v2.out_enc_i = 0;
1677 
1678 	ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1679 					0, tail_len, GFP_NOIO);
1680 	if (ret)
1681 		goto out;
1682 
1683 	ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1684 			tail_len - CEPH_GCM_TAG_LEN);
1685 	if (ret)
1686 		goto out;
1687 
1688 	dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1689 	     msg, sgt.orig_nents, enc_page_cnt);
1690 	con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1691 
1692 out:
1693 	sg_free_table(&sgt);
1694 	sg_free_table(&enc_sgt);
1695 	return ret;
1696 }
1697 
prepare_message(struct ceph_connection * con,struct ceph_msg * msg)1698 static int prepare_message(struct ceph_connection *con, struct ceph_msg *msg)
1699 {
1700 	int lens[] = {
1701 		sizeof(struct ceph_msg_header2),
1702 		front_len(msg),
1703 		middle_len(msg),
1704 		data_len(msg)
1705 	};
1706 	struct ceph_frame_desc desc;
1707 	int ret;
1708 
1709 	dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1710 	     msg, lens[0], lens[1], lens[2], lens[3]);
1711 
1712 	if (con->in_seq > con->in_seq_acked) {
1713 		dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1714 		     con->in_seq_acked, con->in_seq);
1715 		con->in_seq_acked = con->in_seq;
1716 	}
1717 
1718 	reset_out_kvecs(con);
1719 	init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1720 	encode_preamble(&desc, con->v2.out_buf);
1721 	fill_header2(CTRL_BODY(con->v2.out_buf), &msg->hdr,
1722 		     con->in_seq_acked);
1723 
1724 	if (con_secure(con)) {
1725 		ret = prepare_message_secure(con, msg);
1726 		if (ret)
1727 			return ret;
1728 	} else {
1729 		prepare_message_plain(con, msg);
1730 	}
1731 
1732 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1733 	return 0;
1734 }
1735 
prepare_read_banner_prefix(struct ceph_connection * con)1736 static int prepare_read_banner_prefix(struct ceph_connection *con)
1737 {
1738 	void *buf;
1739 
1740 	buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1741 	if (!buf)
1742 		return -ENOMEM;
1743 
1744 	reset_in_kvecs(con);
1745 	add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1746 	add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1747 	con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1748 	return 0;
1749 }
1750 
prepare_read_banner_payload(struct ceph_connection * con,int payload_len)1751 static int prepare_read_banner_payload(struct ceph_connection *con,
1752 				       int payload_len)
1753 {
1754 	void *buf;
1755 
1756 	buf = alloc_conn_buf(con, payload_len);
1757 	if (!buf)
1758 		return -ENOMEM;
1759 
1760 	reset_in_kvecs(con);
1761 	add_in_kvec(con, buf, payload_len);
1762 	add_in_sign_kvec(con, buf, payload_len);
1763 	con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1764 	return 0;
1765 }
1766 
prepare_read_preamble(struct ceph_connection * con)1767 static void prepare_read_preamble(struct ceph_connection *con)
1768 {
1769 	reset_in_kvecs(con);
1770 	add_in_kvec(con, con->v2.in_buf,
1771 		    con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1772 				      CEPH_PREAMBLE_PLAIN_LEN);
1773 	con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1774 }
1775 
prepare_read_control(struct ceph_connection * con)1776 static int prepare_read_control(struct ceph_connection *con)
1777 {
1778 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1779 	int head_len;
1780 	void *buf;
1781 
1782 	reset_in_kvecs(con);
1783 	if (con->state == CEPH_CON_S_V2_HELLO ||
1784 	    con->state == CEPH_CON_S_V2_AUTH) {
1785 		head_len = head_onwire_len(ctrl_len, false);
1786 		buf = alloc_conn_buf(con, head_len);
1787 		if (!buf)
1788 			return -ENOMEM;
1789 
1790 		/* preserve preamble */
1791 		memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1792 
1793 		add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1794 		add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1795 		add_in_sign_kvec(con, buf, head_len);
1796 	} else {
1797 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1798 			buf = alloc_conn_buf(con, ctrl_len);
1799 			if (!buf)
1800 				return -ENOMEM;
1801 
1802 			add_in_kvec(con, buf, ctrl_len);
1803 		} else {
1804 			add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1805 		}
1806 		add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1807 	}
1808 	con->v2.in_state = IN_S_HANDLE_CONTROL;
1809 	return 0;
1810 }
1811 
prepare_read_control_remainder(struct ceph_connection * con)1812 static int prepare_read_control_remainder(struct ceph_connection *con)
1813 {
1814 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1815 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1816 	void *buf;
1817 
1818 	buf = alloc_conn_buf(con, ctrl_len);
1819 	if (!buf)
1820 		return -ENOMEM;
1821 
1822 	memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1823 
1824 	reset_in_kvecs(con);
1825 	add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1826 	add_in_kvec(con, con->v2.in_buf,
1827 		    padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1828 	con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1829 	return 0;
1830 }
1831 
prepare_read_data(struct ceph_connection * con)1832 static int prepare_read_data(struct ceph_connection *con)
1833 {
1834 	struct bio_vec bv;
1835 
1836 	con->in_data_crc = -1;
1837 	ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1838 				  data_len(con->in_msg));
1839 
1840 	get_bvec_at(&con->v2.in_cursor, &bv);
1841 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1842 		if (unlikely(!con->bounce_page)) {
1843 			con->bounce_page = alloc_page(GFP_NOIO);
1844 			if (!con->bounce_page) {
1845 				pr_err("failed to allocate bounce page\n");
1846 				return -ENOMEM;
1847 			}
1848 		}
1849 
1850 		bv.bv_page = con->bounce_page;
1851 		bv.bv_offset = 0;
1852 	}
1853 	set_in_bvec(con, &bv);
1854 	con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1855 	return 0;
1856 }
1857 
prepare_read_data_cont(struct ceph_connection * con)1858 static void prepare_read_data_cont(struct ceph_connection *con)
1859 {
1860 	struct bio_vec bv;
1861 
1862 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1863 		con->in_data_crc = crc32c(con->in_data_crc,
1864 					  page_address(con->bounce_page),
1865 					  con->v2.in_bvec.bv_len);
1866 
1867 		get_bvec_at(&con->v2.in_cursor, &bv);
1868 		memcpy_to_page(bv.bv_page, bv.bv_offset,
1869 			       page_address(con->bounce_page),
1870 			       con->v2.in_bvec.bv_len);
1871 	} else {
1872 		con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1873 						    con->v2.in_bvec.bv_page,
1874 						    con->v2.in_bvec.bv_offset,
1875 						    con->v2.in_bvec.bv_len);
1876 	}
1877 
1878 	ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1879 	if (con->v2.in_cursor.total_resid) {
1880 		get_bvec_at(&con->v2.in_cursor, &bv);
1881 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1882 			bv.bv_page = con->bounce_page;
1883 			bv.bv_offset = 0;
1884 		}
1885 		set_in_bvec(con, &bv);
1886 		WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1887 		return;
1888 	}
1889 
1890 	/*
1891 	 * We've read all data.  Prepare to read epilogue.
1892 	 */
1893 	reset_in_kvecs(con);
1894 	add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1895 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1896 }
1897 
prepare_sparse_read_cont(struct ceph_connection * con)1898 static int prepare_sparse_read_cont(struct ceph_connection *con)
1899 {
1900 	int ret;
1901 	struct bio_vec bv;
1902 	char *buf = NULL;
1903 	struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1904 
1905 	WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
1906 
1907 	if (iov_iter_is_bvec(&con->v2.in_iter)) {
1908 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1909 			con->in_data_crc = crc32c(con->in_data_crc,
1910 						  page_address(con->bounce_page),
1911 						  con->v2.in_bvec.bv_len);
1912 			get_bvec_at(cursor, &bv);
1913 			memcpy_to_page(bv.bv_page, bv.bv_offset,
1914 				       page_address(con->bounce_page),
1915 				       con->v2.in_bvec.bv_len);
1916 		} else {
1917 			con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1918 							    con->v2.in_bvec.bv_page,
1919 							    con->v2.in_bvec.bv_offset,
1920 							    con->v2.in_bvec.bv_len);
1921 		}
1922 
1923 		ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
1924 		cursor->sr_resid -= con->v2.in_bvec.bv_len;
1925 		dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
1926 		     con->v2.in_bvec.bv_len, cursor->sr_resid);
1927 		WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
1928 		if (cursor->sr_resid) {
1929 			get_bvec_at(cursor, &bv);
1930 			if (bv.bv_len > cursor->sr_resid)
1931 				bv.bv_len = cursor->sr_resid;
1932 			if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1933 				bv.bv_page = con->bounce_page;
1934 				bv.bv_offset = 0;
1935 			}
1936 			set_in_bvec(con, &bv);
1937 			con->v2.data_len_remain -= bv.bv_len;
1938 			return 0;
1939 		}
1940 	} else if (iov_iter_is_kvec(&con->v2.in_iter)) {
1941 		/* On first call, we have no kvec so don't compute crc */
1942 		if (con->v2.in_kvec_cnt) {
1943 			WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
1944 			con->in_data_crc = crc32c(con->in_data_crc,
1945 						  con->v2.in_kvecs[0].iov_base,
1946 						  con->v2.in_kvecs[0].iov_len);
1947 		}
1948 	} else {
1949 		return -EIO;
1950 	}
1951 
1952 	/* get next extent */
1953 	ret = con->ops->sparse_read(con, cursor, &buf);
1954 	if (ret <= 0) {
1955 		if (ret < 0)
1956 			return ret;
1957 
1958 		reset_in_kvecs(con);
1959 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1960 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1961 		return 0;
1962 	}
1963 
1964 	if (buf) {
1965 		/* receive into buffer */
1966 		reset_in_kvecs(con);
1967 		add_in_kvec(con, buf, ret);
1968 		con->v2.data_len_remain -= ret;
1969 		return 0;
1970 	}
1971 
1972 	if (ret > cursor->total_resid) {
1973 		pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
1974 			__func__, ret, cursor->total_resid, cursor->resid);
1975 		return -EIO;
1976 	}
1977 	get_bvec_at(cursor, &bv);
1978 	if (bv.bv_len > cursor->sr_resid)
1979 		bv.bv_len = cursor->sr_resid;
1980 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1981 		if (unlikely(!con->bounce_page)) {
1982 			con->bounce_page = alloc_page(GFP_NOIO);
1983 			if (!con->bounce_page) {
1984 				pr_err("failed to allocate bounce page\n");
1985 				return -ENOMEM;
1986 			}
1987 		}
1988 
1989 		bv.bv_page = con->bounce_page;
1990 		bv.bv_offset = 0;
1991 	}
1992 	set_in_bvec(con, &bv);
1993 	con->v2.data_len_remain -= ret;
1994 	return ret;
1995 }
1996 
prepare_sparse_read_data(struct ceph_connection * con)1997 static int prepare_sparse_read_data(struct ceph_connection *con)
1998 {
1999 	struct ceph_msg *msg = con->in_msg;
2000 
2001 	dout("%s: starting sparse read\n", __func__);
2002 
2003 	if (WARN_ON_ONCE(!con->ops->sparse_read))
2004 		return -EOPNOTSUPP;
2005 
2006 	if (!con_secure(con))
2007 		con->in_data_crc = -1;
2008 
2009 	ceph_msg_data_cursor_init(&con->v2.in_cursor, msg,
2010 				  msg->sparse_read_total);
2011 
2012 	reset_in_kvecs(con);
2013 	con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
2014 	con->v2.data_len_remain = data_len(msg);
2015 	return prepare_sparse_read_cont(con);
2016 }
2017 
prepare_read_tail_plain(struct ceph_connection * con)2018 static int prepare_read_tail_plain(struct ceph_connection *con)
2019 {
2020 	struct ceph_msg *msg = con->in_msg;
2021 
2022 	if (!front_len(msg) && !middle_len(msg)) {
2023 		WARN_ON(!data_len(msg));
2024 		return prepare_read_data(con);
2025 	}
2026 
2027 	reset_in_kvecs(con);
2028 	if (front_len(msg)) {
2029 		add_in_kvec(con, msg->front.iov_base, front_len(msg));
2030 		WARN_ON(msg->front.iov_len != front_len(msg));
2031 	}
2032 	if (middle_len(msg)) {
2033 		add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
2034 		WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
2035 	}
2036 
2037 	if (data_len(msg)) {
2038 		if (msg->sparse_read_total)
2039 			con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
2040 		else
2041 			con->v2.in_state = IN_S_PREPARE_READ_DATA;
2042 	} else {
2043 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
2044 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2045 	}
2046 	return 0;
2047 }
2048 
prepare_read_enc_page(struct ceph_connection * con)2049 static void prepare_read_enc_page(struct ceph_connection *con)
2050 {
2051 	struct bio_vec bv;
2052 
2053 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
2054 	     con->v2.in_enc_resid);
2055 	WARN_ON(!con->v2.in_enc_resid);
2056 
2057 	bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
2058 		      min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
2059 
2060 	set_in_bvec(con, &bv);
2061 	con->v2.in_enc_i++;
2062 	con->v2.in_enc_resid -= bv.bv_len;
2063 
2064 	if (con->v2.in_enc_resid) {
2065 		con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
2066 		return;
2067 	}
2068 
2069 	/*
2070 	 * We are set to read the last piece of ciphertext (ending
2071 	 * with epilogue) + auth tag.
2072 	 */
2073 	WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
2074 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2075 }
2076 
prepare_read_tail_secure(struct ceph_connection * con)2077 static int prepare_read_tail_secure(struct ceph_connection *con)
2078 {
2079 	struct page **enc_pages;
2080 	int enc_page_cnt;
2081 	int tail_len;
2082 
2083 	tail_len = tail_onwire_len(con->in_msg, true);
2084 	WARN_ON(!tail_len);
2085 
2086 	enc_page_cnt = calc_pages_for(0, tail_len);
2087 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
2088 	if (IS_ERR(enc_pages))
2089 		return PTR_ERR(enc_pages);
2090 
2091 	WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
2092 	con->v2.in_enc_pages = enc_pages;
2093 	con->v2.in_enc_page_cnt = enc_page_cnt;
2094 	con->v2.in_enc_resid = tail_len;
2095 	con->v2.in_enc_i = 0;
2096 
2097 	prepare_read_enc_page(con);
2098 	return 0;
2099 }
2100 
__finish_skip(struct ceph_connection * con)2101 static void __finish_skip(struct ceph_connection *con)
2102 {
2103 	con->in_seq++;
2104 	prepare_read_preamble(con);
2105 }
2106 
prepare_skip_message(struct ceph_connection * con)2107 static void prepare_skip_message(struct ceph_connection *con)
2108 {
2109 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2110 	int tail_len;
2111 
2112 	dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
2113 	     desc->fd_lens[2], desc->fd_lens[3]);
2114 
2115 	tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
2116 				     desc->fd_lens[3], con_secure(con));
2117 	if (!tail_len) {
2118 		__finish_skip(con);
2119 	} else {
2120 		set_in_skip(con, tail_len);
2121 		con->v2.in_state = IN_S_FINISH_SKIP;
2122 	}
2123 }
2124 
process_banner_prefix(struct ceph_connection * con)2125 static int process_banner_prefix(struct ceph_connection *con)
2126 {
2127 	int payload_len;
2128 	void *p;
2129 
2130 	WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
2131 
2132 	p = con->v2.in_kvecs[0].iov_base;
2133 	if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
2134 		if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
2135 			con->error_msg = "server is speaking msgr1 protocol";
2136 		else
2137 			con->error_msg = "protocol error, bad banner";
2138 		return -EINVAL;
2139 	}
2140 
2141 	p += CEPH_BANNER_V2_LEN;
2142 	payload_len = ceph_decode_16(&p);
2143 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2144 
2145 	return prepare_read_banner_payload(con, payload_len);
2146 }
2147 
process_banner_payload(struct ceph_connection * con)2148 static int process_banner_payload(struct ceph_connection *con)
2149 {
2150 	void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
2151 	u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
2152 	u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
2153 	u64 server_feat, server_req_feat;
2154 	void *p;
2155 	int ret;
2156 
2157 	p = con->v2.in_kvecs[0].iov_base;
2158 	ceph_decode_64_safe(&p, end, server_feat, bad);
2159 	ceph_decode_64_safe(&p, end, server_req_feat, bad);
2160 
2161 	dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
2162 	     __func__, con, server_feat, server_req_feat);
2163 
2164 	if (req_feat & ~server_feat) {
2165 		pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2166 		       server_feat, req_feat & ~server_feat);
2167 		con->error_msg = "missing required protocol features";
2168 		return -EINVAL;
2169 	}
2170 	if (server_req_feat & ~feat) {
2171 		pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2172 		       feat, server_req_feat & ~feat);
2173 		con->error_msg = "missing required protocol features";
2174 		return -EINVAL;
2175 	}
2176 
2177 	/* no reset_out_kvecs() as our banner may still be pending */
2178 	ret = prepare_hello(con);
2179 	if (ret) {
2180 		pr_err("prepare_hello failed: %d\n", ret);
2181 		return ret;
2182 	}
2183 
2184 	con->state = CEPH_CON_S_V2_HELLO;
2185 	prepare_read_preamble(con);
2186 	return 0;
2187 
2188 bad:
2189 	pr_err("failed to decode banner payload\n");
2190 	return -EINVAL;
2191 }
2192 
process_hello(struct ceph_connection * con,void * p,void * end)2193 static int process_hello(struct ceph_connection *con, void *p, void *end)
2194 {
2195 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
2196 	struct ceph_entity_addr addr_for_me;
2197 	u8 entity_type;
2198 	int ret;
2199 
2200 	if (con->state != CEPH_CON_S_V2_HELLO) {
2201 		con->error_msg = "protocol error, unexpected hello";
2202 		return -EINVAL;
2203 	}
2204 
2205 	ceph_decode_8_safe(&p, end, entity_type, bad);
2206 	ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2207 	if (ret) {
2208 		pr_err("failed to decode addr_for_me: %d\n", ret);
2209 		return ret;
2210 	}
2211 
2212 	dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2213 	     entity_type, ceph_pr_addr(&addr_for_me));
2214 
2215 	if (entity_type != con->peer_name.type) {
2216 		pr_err("bad peer type, want %d, got %d\n",
2217 		       con->peer_name.type, entity_type);
2218 		con->error_msg = "wrong peer at address";
2219 		return -EINVAL;
2220 	}
2221 
2222 	/*
2223 	 * Set our address to the address our first peer (i.e. monitor)
2224 	 * sees that we are connecting from.  If we are behind some sort
2225 	 * of NAT and want to be identified by some private (not NATed)
2226 	 * address, ip option should be used.
2227 	 */
2228 	if (ceph_addr_is_blank(my_addr)) {
2229 		memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2230 		       sizeof(my_addr->in_addr));
2231 		ceph_addr_set_port(my_addr, 0);
2232 		dout("%s con %p set my addr %s, as seen by peer %s\n",
2233 		     __func__, con, ceph_pr_addr(my_addr),
2234 		     ceph_pr_addr(&con->peer_addr));
2235 	} else {
2236 		dout("%s con %p my addr already set %s\n",
2237 		     __func__, con, ceph_pr_addr(my_addr));
2238 	}
2239 
2240 	WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2241 	WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2242 	WARN_ON(!my_addr->nonce);
2243 
2244 	/* no reset_out_kvecs() as our hello may still be pending */
2245 	ret = prepare_auth_request(con);
2246 	if (ret) {
2247 		if (ret != -EAGAIN)
2248 			pr_err("prepare_auth_request failed: %d\n", ret);
2249 		return ret;
2250 	}
2251 
2252 	con->state = CEPH_CON_S_V2_AUTH;
2253 	return 0;
2254 
2255 bad:
2256 	pr_err("failed to decode hello\n");
2257 	return -EINVAL;
2258 }
2259 
process_auth_bad_method(struct ceph_connection * con,void * p,void * end)2260 static int process_auth_bad_method(struct ceph_connection *con,
2261 				   void *p, void *end)
2262 {
2263 	int allowed_protos[8], allowed_modes[8];
2264 	int allowed_proto_cnt, allowed_mode_cnt;
2265 	int used_proto, result;
2266 	int ret;
2267 	int i;
2268 
2269 	if (con->state != CEPH_CON_S_V2_AUTH) {
2270 		con->error_msg = "protocol error, unexpected auth_bad_method";
2271 		return -EINVAL;
2272 	}
2273 
2274 	ceph_decode_32_safe(&p, end, used_proto, bad);
2275 	ceph_decode_32_safe(&p, end, result, bad);
2276 	dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2277 	     result);
2278 
2279 	ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2280 	if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2281 		pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2282 		return -EINVAL;
2283 	}
2284 	for (i = 0; i < allowed_proto_cnt; i++) {
2285 		ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2286 		dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2287 		     i, allowed_protos[i]);
2288 	}
2289 
2290 	ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2291 	if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2292 		pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2293 		return -EINVAL;
2294 	}
2295 	for (i = 0; i < allowed_mode_cnt; i++) {
2296 		ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2297 		dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2298 		     i, allowed_modes[i]);
2299 	}
2300 
2301 	mutex_unlock(&con->mutex);
2302 	ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2303 					       allowed_protos,
2304 					       allowed_proto_cnt,
2305 					       allowed_modes,
2306 					       allowed_mode_cnt);
2307 	mutex_lock(&con->mutex);
2308 	if (con->state != CEPH_CON_S_V2_AUTH) {
2309 		dout("%s con %p state changed to %d\n", __func__, con,
2310 		     con->state);
2311 		return -EAGAIN;
2312 	}
2313 
2314 	dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2315 	return ret;
2316 
2317 bad:
2318 	pr_err("failed to decode auth_bad_method\n");
2319 	return -EINVAL;
2320 }
2321 
process_auth_reply_more(struct ceph_connection * con,void * p,void * end)2322 static int process_auth_reply_more(struct ceph_connection *con,
2323 				   void *p, void *end)
2324 {
2325 	int payload_len;
2326 	int ret;
2327 
2328 	if (con->state != CEPH_CON_S_V2_AUTH) {
2329 		con->error_msg = "protocol error, unexpected auth_reply_more";
2330 		return -EINVAL;
2331 	}
2332 
2333 	ceph_decode_32_safe(&p, end, payload_len, bad);
2334 	ceph_decode_need(&p, end, payload_len, bad);
2335 
2336 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2337 
2338 	reset_out_kvecs(con);
2339 	ret = prepare_auth_request_more(con, p, payload_len);
2340 	if (ret) {
2341 		if (ret != -EAGAIN)
2342 			pr_err("prepare_auth_request_more failed: %d\n", ret);
2343 		return ret;
2344 	}
2345 
2346 	return 0;
2347 
2348 bad:
2349 	pr_err("failed to decode auth_reply_more\n");
2350 	return -EINVAL;
2351 }
2352 
2353 /*
2354  * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2355  * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2356  * setup_crypto().  __aligned(16) isn't guaranteed to work for stack
2357  * objects, so do it by hand.
2358  */
process_auth_done(struct ceph_connection * con,void * p,void * end)2359 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2360 {
2361 	u8 session_key_buf[CEPH_KEY_LEN + 16];
2362 	u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2363 	u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2364 	u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2365 	int session_key_len, con_secret_len;
2366 	int payload_len;
2367 	u64 global_id;
2368 	int ret;
2369 
2370 	if (con->state != CEPH_CON_S_V2_AUTH) {
2371 		con->error_msg = "protocol error, unexpected auth_done";
2372 		return -EINVAL;
2373 	}
2374 
2375 	ceph_decode_64_safe(&p, end, global_id, bad);
2376 	ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2377 	ceph_decode_32_safe(&p, end, payload_len, bad);
2378 
2379 	dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2380 	     __func__, con, global_id, con->v2.con_mode, payload_len);
2381 
2382 	mutex_unlock(&con->mutex);
2383 	session_key_len = 0;
2384 	con_secret_len = 0;
2385 	ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2386 					 session_key, &session_key_len,
2387 					 con_secret, &con_secret_len);
2388 	mutex_lock(&con->mutex);
2389 	if (con->state != CEPH_CON_S_V2_AUTH) {
2390 		dout("%s con %p state changed to %d\n", __func__, con,
2391 		     con->state);
2392 		ret = -EAGAIN;
2393 		goto out;
2394 	}
2395 
2396 	dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2397 	if (ret)
2398 		goto out;
2399 
2400 	ret = setup_crypto(con, session_key, session_key_len, con_secret,
2401 			   con_secret_len);
2402 	if (ret)
2403 		goto out;
2404 
2405 	reset_out_kvecs(con);
2406 	ret = prepare_auth_signature(con);
2407 	if (ret) {
2408 		pr_err("prepare_auth_signature failed: %d\n", ret);
2409 		goto out;
2410 	}
2411 
2412 	con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2413 
2414 out:
2415 	memzero_explicit(session_key_buf, sizeof(session_key_buf));
2416 	memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2417 	return ret;
2418 
2419 bad:
2420 	pr_err("failed to decode auth_done\n");
2421 	return -EINVAL;
2422 }
2423 
process_auth_signature(struct ceph_connection * con,void * p,void * end)2424 static int process_auth_signature(struct ceph_connection *con,
2425 				  void *p, void *end)
2426 {
2427 	u8 hmac[SHA256_DIGEST_SIZE];
2428 	int ret;
2429 
2430 	if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2431 		con->error_msg = "protocol error, unexpected auth_signature";
2432 		return -EINVAL;
2433 	}
2434 
2435 	ceph_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
2436 			 hmac);
2437 
2438 	ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2439 	if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2440 		con->error_msg = "integrity error, bad auth signature";
2441 		return -EBADMSG;
2442 	}
2443 
2444 	dout("%s con %p auth signature ok\n", __func__, con);
2445 
2446 	/* no reset_out_kvecs() as our auth_signature may still be pending */
2447 	if (!con->v2.server_cookie) {
2448 		ret = prepare_client_ident(con);
2449 		if (ret) {
2450 			pr_err("prepare_client_ident failed: %d\n", ret);
2451 			return ret;
2452 		}
2453 
2454 		con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2455 	} else {
2456 		ret = prepare_session_reconnect(con);
2457 		if (ret) {
2458 			pr_err("prepare_session_reconnect failed: %d\n", ret);
2459 			return ret;
2460 		}
2461 
2462 		con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2463 	}
2464 
2465 	return 0;
2466 
2467 bad:
2468 	pr_err("failed to decode auth_signature\n");
2469 	return -EINVAL;
2470 }
2471 
process_server_ident(struct ceph_connection * con,void * p,void * end)2472 static int process_server_ident(struct ceph_connection *con,
2473 				void *p, void *end)
2474 {
2475 	struct ceph_client *client = from_msgr(con->msgr);
2476 	u64 features, required_features;
2477 	struct ceph_entity_addr addr;
2478 	u64 global_seq;
2479 	u64 global_id;
2480 	u64 cookie;
2481 	u64 flags;
2482 	int ret;
2483 
2484 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2485 		con->error_msg = "protocol error, unexpected server_ident";
2486 		return -EINVAL;
2487 	}
2488 
2489 	ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2490 	if (ret) {
2491 		pr_err("failed to decode server addrs: %d\n", ret);
2492 		return ret;
2493 	}
2494 
2495 	ceph_decode_64_safe(&p, end, global_id, bad);
2496 	ceph_decode_64_safe(&p, end, global_seq, bad);
2497 	ceph_decode_64_safe(&p, end, features, bad);
2498 	ceph_decode_64_safe(&p, end, required_features, bad);
2499 	ceph_decode_64_safe(&p, end, flags, bad);
2500 	ceph_decode_64_safe(&p, end, cookie, bad);
2501 
2502 	dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2503 	     __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2504 	     global_id, global_seq, features, required_features, flags, cookie);
2505 
2506 	/* is this who we intended to talk to? */
2507 	if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2508 		pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2509 		       ceph_pr_addr(&con->peer_addr),
2510 		       le32_to_cpu(con->peer_addr.nonce),
2511 		       ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2512 		con->error_msg = "wrong peer at address";
2513 		return -EINVAL;
2514 	}
2515 
2516 	if (client->required_features & ~features) {
2517 		pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2518 		       features, client->required_features & ~features);
2519 		con->error_msg = "missing required protocol features";
2520 		return -EINVAL;
2521 	}
2522 
2523 	/*
2524 	 * Both name->type and name->num are set in ceph_con_open() but
2525 	 * name->num may be bogus in the initial monmap.  name->type is
2526 	 * verified in handle_hello().
2527 	 */
2528 	WARN_ON(!con->peer_name.type);
2529 	con->peer_name.num = cpu_to_le64(global_id);
2530 	con->v2.peer_global_seq = global_seq;
2531 	con->peer_features = features;
2532 	WARN_ON(required_features & ~client->supported_features);
2533 	con->v2.server_cookie = cookie;
2534 
2535 	if (flags & CEPH_MSG_CONNECT_LOSSY) {
2536 		ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2537 		WARN_ON(con->v2.server_cookie);
2538 	} else {
2539 		WARN_ON(!con->v2.server_cookie);
2540 	}
2541 
2542 	clear_in_sign_kvecs(con);
2543 	clear_out_sign_kvecs(con);
2544 	free_conn_bufs(con);
2545 	con->delay = 0;  /* reset backoff memory */
2546 
2547 	con->state = CEPH_CON_S_OPEN;
2548 	con->v2.out_state = OUT_S_GET_NEXT;
2549 	return 0;
2550 
2551 bad:
2552 	pr_err("failed to decode server_ident\n");
2553 	return -EINVAL;
2554 }
2555 
process_ident_missing_features(struct ceph_connection * con,void * p,void * end)2556 static int process_ident_missing_features(struct ceph_connection *con,
2557 					  void *p, void *end)
2558 {
2559 	struct ceph_client *client = from_msgr(con->msgr);
2560 	u64 missing_features;
2561 
2562 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2563 		con->error_msg = "protocol error, unexpected ident_missing_features";
2564 		return -EINVAL;
2565 	}
2566 
2567 	ceph_decode_64_safe(&p, end, missing_features, bad);
2568 	pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2569 	       client->supported_features, missing_features);
2570 	con->error_msg = "missing required protocol features";
2571 	return -EINVAL;
2572 
2573 bad:
2574 	pr_err("failed to decode ident_missing_features\n");
2575 	return -EINVAL;
2576 }
2577 
process_session_reconnect_ok(struct ceph_connection * con,void * p,void * end)2578 static int process_session_reconnect_ok(struct ceph_connection *con,
2579 					void *p, void *end)
2580 {
2581 	u64 seq;
2582 
2583 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2584 		con->error_msg = "protocol error, unexpected session_reconnect_ok";
2585 		return -EINVAL;
2586 	}
2587 
2588 	ceph_decode_64_safe(&p, end, seq, bad);
2589 
2590 	dout("%s con %p seq %llu\n", __func__, con, seq);
2591 	ceph_con_discard_requeued(con, seq);
2592 
2593 	clear_in_sign_kvecs(con);
2594 	clear_out_sign_kvecs(con);
2595 	free_conn_bufs(con);
2596 	con->delay = 0;  /* reset backoff memory */
2597 
2598 	con->state = CEPH_CON_S_OPEN;
2599 	con->v2.out_state = OUT_S_GET_NEXT;
2600 	return 0;
2601 
2602 bad:
2603 	pr_err("failed to decode session_reconnect_ok\n");
2604 	return -EINVAL;
2605 }
2606 
process_session_retry(struct ceph_connection * con,void * p,void * end)2607 static int process_session_retry(struct ceph_connection *con,
2608 				 void *p, void *end)
2609 {
2610 	u64 connect_seq;
2611 	int ret;
2612 
2613 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2614 		con->error_msg = "protocol error, unexpected session_retry";
2615 		return -EINVAL;
2616 	}
2617 
2618 	ceph_decode_64_safe(&p, end, connect_seq, bad);
2619 
2620 	dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2621 	WARN_ON(connect_seq <= con->v2.connect_seq);
2622 	con->v2.connect_seq = connect_seq + 1;
2623 
2624 	free_conn_bufs(con);
2625 
2626 	reset_out_kvecs(con);
2627 	ret = prepare_session_reconnect(con);
2628 	if (ret) {
2629 		pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2630 		return ret;
2631 	}
2632 
2633 	return 0;
2634 
2635 bad:
2636 	pr_err("failed to decode session_retry\n");
2637 	return -EINVAL;
2638 }
2639 
process_session_retry_global(struct ceph_connection * con,void * p,void * end)2640 static int process_session_retry_global(struct ceph_connection *con,
2641 					void *p, void *end)
2642 {
2643 	u64 global_seq;
2644 	int ret;
2645 
2646 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2647 		con->error_msg = "protocol error, unexpected session_retry_global";
2648 		return -EINVAL;
2649 	}
2650 
2651 	ceph_decode_64_safe(&p, end, global_seq, bad);
2652 
2653 	dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2654 	WARN_ON(global_seq <= con->v2.global_seq);
2655 	con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2656 
2657 	free_conn_bufs(con);
2658 
2659 	reset_out_kvecs(con);
2660 	ret = prepare_session_reconnect(con);
2661 	if (ret) {
2662 		pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2663 		return ret;
2664 	}
2665 
2666 	return 0;
2667 
2668 bad:
2669 	pr_err("failed to decode session_retry_global\n");
2670 	return -EINVAL;
2671 }
2672 
process_session_reset(struct ceph_connection * con,void * p,void * end)2673 static int process_session_reset(struct ceph_connection *con,
2674 				 void *p, void *end)
2675 {
2676 	bool full;
2677 	int ret;
2678 
2679 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2680 		con->error_msg = "protocol error, unexpected session_reset";
2681 		return -EINVAL;
2682 	}
2683 
2684 	ceph_decode_8_safe(&p, end, full, bad);
2685 	if (!full) {
2686 		con->error_msg = "protocol error, bad session_reset";
2687 		return -EINVAL;
2688 	}
2689 
2690 	pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2691 		ceph_pr_addr(&con->peer_addr));
2692 	ceph_con_reset_session(con);
2693 
2694 	mutex_unlock(&con->mutex);
2695 	if (con->ops->peer_reset)
2696 		con->ops->peer_reset(con);
2697 	mutex_lock(&con->mutex);
2698 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2699 		dout("%s con %p state changed to %d\n", __func__, con,
2700 		     con->state);
2701 		return -EAGAIN;
2702 	}
2703 
2704 	free_conn_bufs(con);
2705 
2706 	reset_out_kvecs(con);
2707 	ret = prepare_client_ident(con);
2708 	if (ret) {
2709 		pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2710 		return ret;
2711 	}
2712 
2713 	con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2714 	return 0;
2715 
2716 bad:
2717 	pr_err("failed to decode session_reset\n");
2718 	return -EINVAL;
2719 }
2720 
process_keepalive2_ack(struct ceph_connection * con,void * p,void * end)2721 static int process_keepalive2_ack(struct ceph_connection *con,
2722 				  void *p, void *end)
2723 {
2724 	if (con->state != CEPH_CON_S_OPEN) {
2725 		con->error_msg = "protocol error, unexpected keepalive2_ack";
2726 		return -EINVAL;
2727 	}
2728 
2729 	ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2730 	ceph_decode_timespec64(&con->last_keepalive_ack, p);
2731 
2732 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2733 	     con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2734 
2735 	return 0;
2736 
2737 bad:
2738 	pr_err("failed to decode keepalive2_ack\n");
2739 	return -EINVAL;
2740 }
2741 
process_ack(struct ceph_connection * con,void * p,void * end)2742 static int process_ack(struct ceph_connection *con, void *p, void *end)
2743 {
2744 	u64 seq;
2745 
2746 	if (con->state != CEPH_CON_S_OPEN) {
2747 		con->error_msg = "protocol error, unexpected ack";
2748 		return -EINVAL;
2749 	}
2750 
2751 	ceph_decode_64_safe(&p, end, seq, bad);
2752 
2753 	dout("%s con %p seq %llu\n", __func__, con, seq);
2754 	ceph_con_discard_sent(con, seq);
2755 	return 0;
2756 
2757 bad:
2758 	pr_err("failed to decode ack\n");
2759 	return -EINVAL;
2760 }
2761 
process_control(struct ceph_connection * con,void * p,void * end)2762 static int process_control(struct ceph_connection *con, void *p, void *end)
2763 {
2764 	int tag = con->v2.in_desc.fd_tag;
2765 	int ret;
2766 
2767 	dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2768 
2769 	switch (tag) {
2770 	case FRAME_TAG_HELLO:
2771 		ret = process_hello(con, p, end);
2772 		break;
2773 	case FRAME_TAG_AUTH_BAD_METHOD:
2774 		ret = process_auth_bad_method(con, p, end);
2775 		break;
2776 	case FRAME_TAG_AUTH_REPLY_MORE:
2777 		ret = process_auth_reply_more(con, p, end);
2778 		break;
2779 	case FRAME_TAG_AUTH_DONE:
2780 		ret = process_auth_done(con, p, end);
2781 		break;
2782 	case FRAME_TAG_AUTH_SIGNATURE:
2783 		ret = process_auth_signature(con, p, end);
2784 		break;
2785 	case FRAME_TAG_SERVER_IDENT:
2786 		ret = process_server_ident(con, p, end);
2787 		break;
2788 	case FRAME_TAG_IDENT_MISSING_FEATURES:
2789 		ret = process_ident_missing_features(con, p, end);
2790 		break;
2791 	case FRAME_TAG_SESSION_RECONNECT_OK:
2792 		ret = process_session_reconnect_ok(con, p, end);
2793 		break;
2794 	case FRAME_TAG_SESSION_RETRY:
2795 		ret = process_session_retry(con, p, end);
2796 		break;
2797 	case FRAME_TAG_SESSION_RETRY_GLOBAL:
2798 		ret = process_session_retry_global(con, p, end);
2799 		break;
2800 	case FRAME_TAG_SESSION_RESET:
2801 		ret = process_session_reset(con, p, end);
2802 		break;
2803 	case FRAME_TAG_KEEPALIVE2_ACK:
2804 		ret = process_keepalive2_ack(con, p, end);
2805 		break;
2806 	case FRAME_TAG_ACK:
2807 		ret = process_ack(con, p, end);
2808 		break;
2809 	default:
2810 		pr_err("bad tag %d\n", tag);
2811 		con->error_msg = "protocol error, bad tag";
2812 		return -EINVAL;
2813 	}
2814 	if (ret) {
2815 		dout("%s con %p error %d\n", __func__, con, ret);
2816 		return ret;
2817 	}
2818 
2819 	prepare_read_preamble(con);
2820 	return 0;
2821 }
2822 
2823 /*
2824  * Return:
2825  *   1 - con->in_msg set, read message
2826  *   0 - skip message
2827  *  <0 - error
2828  */
process_message_header(struct ceph_connection * con,void * p,void * end)2829 static int process_message_header(struct ceph_connection *con,
2830 				  void *p, void *end)
2831 {
2832 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2833 	struct ceph_msg_header2 *hdr2 = p;
2834 	struct ceph_msg_header hdr;
2835 	int skip;
2836 	int ret;
2837 	u64 seq;
2838 
2839 	/* verify seq# */
2840 	seq = le64_to_cpu(hdr2->seq);
2841 	if ((s64)seq - (s64)con->in_seq < 1) {
2842 		pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2843 			ENTITY_NAME(con->peer_name),
2844 			ceph_pr_addr(&con->peer_addr),
2845 			seq, con->in_seq + 1);
2846 		return 0;
2847 	}
2848 	if ((s64)seq - (s64)con->in_seq > 1) {
2849 		pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2850 		con->error_msg = "bad message sequence # for incoming message";
2851 		return -EBADE;
2852 	}
2853 
2854 	ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2855 
2856 	fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2857 		    desc->fd_lens[3], &con->peer_name);
2858 	ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2859 	if (ret)
2860 		return ret;
2861 
2862 	WARN_ON(!con->in_msg ^ skip);
2863 	if (skip)
2864 		return 0;
2865 
2866 	WARN_ON(!con->in_msg);
2867 	WARN_ON(con->in_msg->con != con);
2868 	return 1;
2869 }
2870 
process_message(struct ceph_connection * con)2871 static int process_message(struct ceph_connection *con)
2872 {
2873 	ceph_con_process_message(con);
2874 
2875 	/*
2876 	 * We could have been closed by ceph_con_close() because
2877 	 * ceph_con_process_message() temporarily drops con->mutex.
2878 	 */
2879 	if (con->state != CEPH_CON_S_OPEN) {
2880 		dout("%s con %p state changed to %d\n", __func__, con,
2881 		     con->state);
2882 		return -EAGAIN;
2883 	}
2884 
2885 	prepare_read_preamble(con);
2886 	return 0;
2887 }
2888 
__handle_control(struct ceph_connection * con,void * p)2889 static int __handle_control(struct ceph_connection *con, void *p)
2890 {
2891 	void *end = p + con->v2.in_desc.fd_lens[0];
2892 	struct ceph_msg *msg;
2893 	int ret;
2894 
2895 	if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2896 		return process_control(con, p, end);
2897 
2898 	ret = process_message_header(con, p, end);
2899 	if (ret < 0)
2900 		return ret;
2901 	if (ret == 0) {
2902 		prepare_skip_message(con);
2903 		return 0;
2904 	}
2905 
2906 	msg = con->in_msg;  /* set in process_message_header() */
2907 	if (front_len(msg)) {
2908 		WARN_ON(front_len(msg) > msg->front_alloc_len);
2909 		msg->front.iov_len = front_len(msg);
2910 	} else {
2911 		msg->front.iov_len = 0;
2912 	}
2913 	if (middle_len(msg)) {
2914 		WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2915 		msg->middle->vec.iov_len = middle_len(msg);
2916 	} else if (msg->middle) {
2917 		msg->middle->vec.iov_len = 0;
2918 	}
2919 
2920 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2921 		return process_message(con);
2922 
2923 	if (con_secure(con))
2924 		return prepare_read_tail_secure(con);
2925 
2926 	return prepare_read_tail_plain(con);
2927 }
2928 
handle_preamble(struct ceph_connection * con)2929 static int handle_preamble(struct ceph_connection *con)
2930 {
2931 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2932 	int ret;
2933 
2934 	if (con_secure(con)) {
2935 		ret = decrypt_preamble(con);
2936 		if (ret) {
2937 			if (ret == -EBADMSG)
2938 				con->error_msg = "integrity error, bad preamble auth tag";
2939 			return ret;
2940 		}
2941 	}
2942 
2943 	ret = decode_preamble(con->v2.in_buf, desc);
2944 	if (ret) {
2945 		if (ret == -EBADMSG)
2946 			con->error_msg = "integrity error, bad crc";
2947 		else
2948 			con->error_msg = "protocol error, bad preamble";
2949 		return ret;
2950 	}
2951 
2952 	dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2953 	     con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2954 	     desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2955 
2956 	if (!con_secure(con))
2957 		return prepare_read_control(con);
2958 
2959 	if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2960 		return prepare_read_control_remainder(con);
2961 
2962 	return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2963 }
2964 
handle_control(struct ceph_connection * con)2965 static int handle_control(struct ceph_connection *con)
2966 {
2967 	int ctrl_len = con->v2.in_desc.fd_lens[0];
2968 	void *buf;
2969 	int ret;
2970 
2971 	WARN_ON(con_secure(con));
2972 
2973 	ret = verify_control_crc(con);
2974 	if (ret) {
2975 		con->error_msg = "integrity error, bad crc";
2976 		return ret;
2977 	}
2978 
2979 	if (con->state == CEPH_CON_S_V2_AUTH) {
2980 		buf = alloc_conn_buf(con, ctrl_len);
2981 		if (!buf)
2982 			return -ENOMEM;
2983 
2984 		memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2985 		return __handle_control(con, buf);
2986 	}
2987 
2988 	return __handle_control(con, con->v2.in_kvecs[0].iov_base);
2989 }
2990 
handle_control_remainder(struct ceph_connection * con)2991 static int handle_control_remainder(struct ceph_connection *con)
2992 {
2993 	int ret;
2994 
2995 	WARN_ON(!con_secure(con));
2996 
2997 	ret = decrypt_control_remainder(con);
2998 	if (ret) {
2999 		if (ret == -EBADMSG)
3000 			con->error_msg = "integrity error, bad control remainder auth tag";
3001 		return ret;
3002 	}
3003 
3004 	return __handle_control(con, con->v2.in_kvecs[0].iov_base -
3005 				     CEPH_PREAMBLE_INLINE_LEN);
3006 }
3007 
handle_epilogue(struct ceph_connection * con)3008 static int handle_epilogue(struct ceph_connection *con)
3009 {
3010 	u32 front_crc, middle_crc, data_crc;
3011 	int ret;
3012 
3013 	if (con_secure(con)) {
3014 		ret = decrypt_tail(con);
3015 		if (ret) {
3016 			if (ret == -EBADMSG)
3017 				con->error_msg = "integrity error, bad epilogue auth tag";
3018 			return ret;
3019 		}
3020 
3021 		/* just late_status */
3022 		ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
3023 		if (ret) {
3024 			con->error_msg = "protocol error, bad epilogue";
3025 			return ret;
3026 		}
3027 	} else {
3028 		ret = decode_epilogue(con->v2.in_buf, &front_crc,
3029 				      &middle_crc, &data_crc);
3030 		if (ret) {
3031 			con->error_msg = "protocol error, bad epilogue";
3032 			return ret;
3033 		}
3034 
3035 		ret = verify_epilogue_crcs(con, front_crc, middle_crc,
3036 					   data_crc);
3037 		if (ret) {
3038 			con->error_msg = "integrity error, bad crc";
3039 			return ret;
3040 		}
3041 	}
3042 
3043 	return process_message(con);
3044 }
3045 
finish_skip(struct ceph_connection * con)3046 static void finish_skip(struct ceph_connection *con)
3047 {
3048 	dout("%s con %p\n", __func__, con);
3049 
3050 	if (con_secure(con))
3051 		gcm_inc_nonce(&con->v2.in_gcm_nonce);
3052 
3053 	__finish_skip(con);
3054 }
3055 
populate_in_iter(struct ceph_connection * con)3056 static int populate_in_iter(struct ceph_connection *con)
3057 {
3058 	int ret;
3059 
3060 	dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
3061 	     con->v2.in_state);
3062 	WARN_ON(iov_iter_count(&con->v2.in_iter));
3063 
3064 	if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
3065 		ret = process_banner_prefix(con);
3066 	} else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
3067 		ret = process_banner_payload(con);
3068 	} else if ((con->state >= CEPH_CON_S_V2_HELLO &&
3069 		    con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
3070 		   con->state == CEPH_CON_S_OPEN) {
3071 		switch (con->v2.in_state) {
3072 		case IN_S_HANDLE_PREAMBLE:
3073 			ret = handle_preamble(con);
3074 			break;
3075 		case IN_S_HANDLE_CONTROL:
3076 			ret = handle_control(con);
3077 			break;
3078 		case IN_S_HANDLE_CONTROL_REMAINDER:
3079 			ret = handle_control_remainder(con);
3080 			break;
3081 		case IN_S_PREPARE_READ_DATA:
3082 			ret = prepare_read_data(con);
3083 			break;
3084 		case IN_S_PREPARE_READ_DATA_CONT:
3085 			prepare_read_data_cont(con);
3086 			ret = 0;
3087 			break;
3088 		case IN_S_PREPARE_READ_ENC_PAGE:
3089 			prepare_read_enc_page(con);
3090 			ret = 0;
3091 			break;
3092 		case IN_S_PREPARE_SPARSE_DATA:
3093 			ret = prepare_sparse_read_data(con);
3094 			break;
3095 		case IN_S_PREPARE_SPARSE_DATA_CONT:
3096 			ret = prepare_sparse_read_cont(con);
3097 			break;
3098 		case IN_S_HANDLE_EPILOGUE:
3099 			ret = handle_epilogue(con);
3100 			break;
3101 		case IN_S_FINISH_SKIP:
3102 			finish_skip(con);
3103 			ret = 0;
3104 			break;
3105 		default:
3106 			WARN(1, "bad in_state %d", con->v2.in_state);
3107 			return -EINVAL;
3108 		}
3109 	} else {
3110 		WARN(1, "bad state %d", con->state);
3111 		return -EINVAL;
3112 	}
3113 	if (ret) {
3114 		dout("%s con %p error %d\n", __func__, con, ret);
3115 		return ret;
3116 	}
3117 
3118 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3119 		return -ENODATA;
3120 	dout("%s con %p populated %zu\n", __func__, con,
3121 	     iov_iter_count(&con->v2.in_iter));
3122 	return 1;
3123 }
3124 
ceph_con_v2_try_read(struct ceph_connection * con)3125 int ceph_con_v2_try_read(struct ceph_connection *con)
3126 {
3127 	int ret;
3128 
3129 	dout("%s con %p state %d need %zu\n", __func__, con, con->state,
3130 	     iov_iter_count(&con->v2.in_iter));
3131 
3132 	if (con->state == CEPH_CON_S_PREOPEN)
3133 		return 0;
3134 
3135 	/*
3136 	 * We should always have something pending here.  If not,
3137 	 * avoid calling populate_in_iter() as if we read something
3138 	 * (ceph_tcp_recv() would immediately return 1).
3139 	 */
3140 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3141 		return -ENODATA;
3142 
3143 	for (;;) {
3144 		ret = ceph_tcp_recv(con);
3145 		if (ret <= 0)
3146 			return ret;
3147 
3148 		ret = populate_in_iter(con);
3149 		if (ret <= 0) {
3150 			if (ret && ret != -EAGAIN && !con->error_msg)
3151 				con->error_msg = "read processing error";
3152 			return ret;
3153 		}
3154 	}
3155 }
3156 
queue_data(struct ceph_connection * con,struct ceph_msg * msg)3157 static void queue_data(struct ceph_connection *con, struct ceph_msg *msg)
3158 {
3159 	struct bio_vec bv;
3160 
3161 	con->v2.out_epil.data_crc = -1;
3162 	ceph_msg_data_cursor_init(&con->v2.out_cursor, msg,
3163 				  data_len(msg));
3164 
3165 	get_bvec_at(&con->v2.out_cursor, &bv);
3166 	set_out_bvec(con, &bv, true);
3167 	con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
3168 }
3169 
queue_data_cont(struct ceph_connection * con,struct ceph_msg * msg)3170 static void queue_data_cont(struct ceph_connection *con, struct ceph_msg *msg)
3171 {
3172 	struct bio_vec bv;
3173 
3174 	con->v2.out_epil.data_crc = ceph_crc32c_page(
3175 		con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3176 		con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
3177 
3178 	ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
3179 	if (con->v2.out_cursor.total_resid) {
3180 		get_bvec_at(&con->v2.out_cursor, &bv);
3181 		set_out_bvec(con, &bv, true);
3182 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
3183 		return;
3184 	}
3185 
3186 	/*
3187 	 * We've written all data.  Queue epilogue.  Once it's written,
3188 	 * we are done.
3189 	 */
3190 	reset_out_kvecs(con);
3191 	prepare_epilogue_plain(con, msg, false);
3192 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3193 }
3194 
queue_enc_page(struct ceph_connection * con)3195 static void queue_enc_page(struct ceph_connection *con)
3196 {
3197 	struct bio_vec bv;
3198 
3199 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
3200 	     con->v2.out_enc_resid);
3201 	WARN_ON(!con->v2.out_enc_resid);
3202 
3203 	bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
3204 		      min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
3205 
3206 	set_out_bvec(con, &bv, false);
3207 	con->v2.out_enc_i++;
3208 	con->v2.out_enc_resid -= bv.bv_len;
3209 
3210 	if (con->v2.out_enc_resid) {
3211 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3212 		return;
3213 	}
3214 
3215 	/*
3216 	 * We've queued the last piece of ciphertext (ending with
3217 	 * epilogue) + auth tag.  Once it's written, we are done.
3218 	 */
3219 	WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3220 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3221 }
3222 
queue_zeros(struct ceph_connection * con,struct ceph_msg * msg)3223 static void queue_zeros(struct ceph_connection *con, struct ceph_msg *msg)
3224 {
3225 	dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3226 
3227 	if (con->v2.out_zero) {
3228 		set_out_bvec_zero(con);
3229 		con->v2.out_zero -= con->v2.out_bvec.bv_len;
3230 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3231 		return;
3232 	}
3233 
3234 	/*
3235 	 * We've zero-filled everything up to epilogue.  Queue epilogue
3236 	 * with late_status set to ABORTED and crcs adjusted for zeros.
3237 	 * Once it's written, we are done patching up for the revoke.
3238 	 */
3239 	reset_out_kvecs(con);
3240 	prepare_epilogue_plain(con, msg, true);
3241 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3242 }
3243 
finish_message(struct ceph_connection * con)3244 static void finish_message(struct ceph_connection *con)
3245 {
3246 	dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3247 
3248 	/* we end up here both plain and secure modes */
3249 	if (con->v2.out_enc_pages) {
3250 		WARN_ON(!con->v2.out_enc_page_cnt);
3251 		ceph_release_page_vector(con->v2.out_enc_pages,
3252 					 con->v2.out_enc_page_cnt);
3253 		con->v2.out_enc_pages = NULL;
3254 		con->v2.out_enc_page_cnt = 0;
3255 	}
3256 	/* message may have been revoked */
3257 	if (con->out_msg) {
3258 		ceph_msg_put(con->out_msg);
3259 		con->out_msg = NULL;
3260 	}
3261 
3262 	con->v2.out_state = OUT_S_GET_NEXT;
3263 }
3264 
populate_out_iter(struct ceph_connection * con)3265 static int populate_out_iter(struct ceph_connection *con)
3266 {
3267 	struct ceph_msg *msg;
3268 	int ret;
3269 
3270 	dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3271 	     con->v2.out_state);
3272 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3273 
3274 	if (con->state != CEPH_CON_S_OPEN) {
3275 		WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3276 			con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3277 		goto nothing_pending;
3278 	}
3279 
3280 	switch (con->v2.out_state) {
3281 	case OUT_S_QUEUE_DATA:
3282 		WARN_ON(!con->out_msg);
3283 		queue_data(con, con->out_msg);
3284 		goto populated;
3285 	case OUT_S_QUEUE_DATA_CONT:
3286 		WARN_ON(!con->out_msg);
3287 		queue_data_cont(con, con->out_msg);
3288 		goto populated;
3289 	case OUT_S_QUEUE_ENC_PAGE:
3290 		queue_enc_page(con);
3291 		goto populated;
3292 	case OUT_S_QUEUE_ZEROS:
3293 		WARN_ON(con->out_msg);  /* revoked */
3294 		queue_zeros(con, con->out_msg);
3295 		goto populated;
3296 	case OUT_S_FINISH_MESSAGE:
3297 		finish_message(con);
3298 		break;
3299 	case OUT_S_GET_NEXT:
3300 		break;
3301 	default:
3302 		WARN(1, "bad out_state %d", con->v2.out_state);
3303 		return -EINVAL;
3304 	}
3305 
3306 	WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3307 	if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3308 		ret = prepare_keepalive2(con);
3309 		if (ret) {
3310 			pr_err("prepare_keepalive2 failed: %d\n", ret);
3311 			return ret;
3312 		}
3313 	} else if ((msg = ceph_con_get_out_msg(con)) != NULL) {
3314 		ret = prepare_message(con, msg);
3315 		if (ret) {
3316 			pr_err("prepare_message failed: %d\n", ret);
3317 			return ret;
3318 		}
3319 	} else if (con->in_seq > con->in_seq_acked) {
3320 		ret = prepare_ack(con);
3321 		if (ret) {
3322 			pr_err("prepare_ack failed: %d\n", ret);
3323 			return ret;
3324 		}
3325 	} else {
3326 		goto nothing_pending;
3327 	}
3328 
3329 populated:
3330 	if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3331 		return -ENODATA;
3332 	dout("%s con %p populated %zu\n", __func__, con,
3333 	     iov_iter_count(&con->v2.out_iter));
3334 	return 1;
3335 
3336 nothing_pending:
3337 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3338 	dout("%s con %p nothing pending\n", __func__, con);
3339 	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3340 	return 0;
3341 }
3342 
ceph_con_v2_try_write(struct ceph_connection * con)3343 int ceph_con_v2_try_write(struct ceph_connection *con)
3344 {
3345 	int ret;
3346 
3347 	dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3348 	     iov_iter_count(&con->v2.out_iter));
3349 
3350 	/* open the socket first? */
3351 	if (con->state == CEPH_CON_S_PREOPEN) {
3352 		WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3353 
3354 		/*
3355 		 * Always bump global_seq.  Bump connect_seq only if
3356 		 * there is a session (i.e. we are reconnecting and will
3357 		 * send session_reconnect instead of client_ident).
3358 		 */
3359 		con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3360 		if (con->v2.server_cookie)
3361 			con->v2.connect_seq++;
3362 
3363 		ret = prepare_read_banner_prefix(con);
3364 		if (ret) {
3365 			pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3366 			con->error_msg = "connect error";
3367 			return ret;
3368 		}
3369 
3370 		reset_out_kvecs(con);
3371 		ret = prepare_banner(con);
3372 		if (ret) {
3373 			pr_err("prepare_banner failed: %d\n", ret);
3374 			con->error_msg = "connect error";
3375 			return ret;
3376 		}
3377 
3378 		ret = ceph_tcp_connect(con);
3379 		if (ret) {
3380 			pr_err("ceph_tcp_connect failed: %d\n", ret);
3381 			con->error_msg = "connect error";
3382 			return ret;
3383 		}
3384 	}
3385 
3386 	if (!iov_iter_count(&con->v2.out_iter)) {
3387 		ret = populate_out_iter(con);
3388 		if (ret <= 0) {
3389 			if (ret && ret != -EAGAIN && !con->error_msg)
3390 				con->error_msg = "write processing error";
3391 			return ret;
3392 		}
3393 	}
3394 
3395 	tcp_sock_set_cork(con->sock->sk, true);
3396 	for (;;) {
3397 		ret = ceph_tcp_send(con);
3398 		if (ret <= 0)
3399 			break;
3400 
3401 		ret = populate_out_iter(con);
3402 		if (ret <= 0) {
3403 			if (ret && ret != -EAGAIN && !con->error_msg)
3404 				con->error_msg = "write processing error";
3405 			break;
3406 		}
3407 	}
3408 
3409 	tcp_sock_set_cork(con->sock->sk, false);
3410 	return ret;
3411 }
3412 
crc32c_zeros(u32 crc,int zero_len)3413 static u32 crc32c_zeros(u32 crc, int zero_len)
3414 {
3415 	int len;
3416 
3417 	while (zero_len) {
3418 		len = min(zero_len, (int)PAGE_SIZE);
3419 		crc = crc32c(crc, page_address(ceph_zero_page), len);
3420 		zero_len -= len;
3421 	}
3422 
3423 	return crc;
3424 }
3425 
prepare_zero_front(struct ceph_connection * con,struct ceph_msg * msg,int resid)3426 static void prepare_zero_front(struct ceph_connection *con,
3427 			       struct ceph_msg *msg, int resid)
3428 {
3429 	int sent;
3430 
3431 	WARN_ON(!resid || resid > front_len(msg));
3432 	sent = front_len(msg) - resid;
3433 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3434 
3435 	if (sent) {
3436 		con->v2.out_epil.front_crc =
3437 			crc32c(-1, msg->front.iov_base, sent);
3438 		con->v2.out_epil.front_crc =
3439 			crc32c_zeros(con->v2.out_epil.front_crc, resid);
3440 	} else {
3441 		con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3442 	}
3443 
3444 	con->v2.out_iter.count -= resid;
3445 	out_zero_add(con, resid);
3446 }
3447 
prepare_zero_middle(struct ceph_connection * con,struct ceph_msg * msg,int resid)3448 static void prepare_zero_middle(struct ceph_connection *con,
3449 				struct ceph_msg *msg, int resid)
3450 {
3451 	int sent;
3452 
3453 	WARN_ON(!resid || resid > middle_len(msg));
3454 	sent = middle_len(msg) - resid;
3455 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3456 
3457 	if (sent) {
3458 		con->v2.out_epil.middle_crc =
3459 			crc32c(-1, msg->middle->vec.iov_base, sent);
3460 		con->v2.out_epil.middle_crc =
3461 			crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3462 	} else {
3463 		con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3464 	}
3465 
3466 	con->v2.out_iter.count -= resid;
3467 	out_zero_add(con, resid);
3468 }
3469 
prepare_zero_data(struct ceph_connection * con,struct ceph_msg * msg)3470 static void prepare_zero_data(struct ceph_connection *con,
3471 			      struct ceph_msg *msg)
3472 {
3473 	dout("%s con %p\n", __func__, con);
3474 	con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(msg));
3475 	out_zero_add(con, data_len(msg));
3476 }
3477 
revoke_at_queue_data(struct ceph_connection * con,struct ceph_msg * msg)3478 static void revoke_at_queue_data(struct ceph_connection *con,
3479 				 struct ceph_msg *msg)
3480 {
3481 	int boundary;
3482 	int resid;
3483 
3484 	WARN_ON(!data_len(msg));
3485 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3486 	resid = iov_iter_count(&con->v2.out_iter);
3487 
3488 	boundary = front_len(msg) + middle_len(msg);
3489 	if (resid > boundary) {
3490 		resid -= boundary;
3491 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3492 		dout("%s con %p was sending head\n", __func__, con);
3493 		if (front_len(msg))
3494 			prepare_zero_front(con, msg, front_len(msg));
3495 		if (middle_len(msg))
3496 			prepare_zero_middle(con, msg, middle_len(msg));
3497 		prepare_zero_data(con, msg);
3498 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3499 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3500 		return;
3501 	}
3502 
3503 	boundary = middle_len(msg);
3504 	if (resid > boundary) {
3505 		resid -= boundary;
3506 		dout("%s con %p was sending front\n", __func__, con);
3507 		prepare_zero_front(con, msg, resid);
3508 		if (middle_len(msg))
3509 			prepare_zero_middle(con, msg, middle_len(msg));
3510 		prepare_zero_data(con, msg);
3511 		queue_zeros(con, msg);
3512 		return;
3513 	}
3514 
3515 	WARN_ON(!resid);
3516 	dout("%s con %p was sending middle\n", __func__, con);
3517 	prepare_zero_middle(con, msg, resid);
3518 	prepare_zero_data(con, msg);
3519 	queue_zeros(con, msg);
3520 }
3521 
revoke_at_queue_data_cont(struct ceph_connection * con,struct ceph_msg * msg)3522 static void revoke_at_queue_data_cont(struct ceph_connection *con,
3523 				      struct ceph_msg *msg)
3524 {
3525 	int sent, resid;  /* current piece of data */
3526 
3527 	WARN_ON(!data_len(msg));
3528 	WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3529 	resid = iov_iter_count(&con->v2.out_iter);
3530 	WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3531 	sent = con->v2.out_bvec.bv_len - resid;
3532 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3533 
3534 	if (sent) {
3535 		con->v2.out_epil.data_crc = ceph_crc32c_page(
3536 			con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3537 			con->v2.out_bvec.bv_offset, sent);
3538 		ceph_msg_data_advance(&con->v2.out_cursor, sent);
3539 	}
3540 	WARN_ON(resid > con->v2.out_cursor.total_resid);
3541 	con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3542 						con->v2.out_cursor.total_resid);
3543 
3544 	con->v2.out_iter.count -= resid;
3545 	out_zero_add(con, con->v2.out_cursor.total_resid);
3546 	queue_zeros(con, msg);
3547 }
3548 
revoke_at_finish_message(struct ceph_connection * con,struct ceph_msg * msg)3549 static void revoke_at_finish_message(struct ceph_connection *con,
3550 				     struct ceph_msg *msg)
3551 {
3552 	int boundary;
3553 	int resid;
3554 
3555 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3556 	resid = iov_iter_count(&con->v2.out_iter);
3557 
3558 	if (!front_len(msg) && !middle_len(msg) &&
3559 	    !data_len(msg)) {
3560 		WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3561 		dout("%s con %p was sending head (empty message) - noop\n",
3562 		     __func__, con);
3563 		return;
3564 	}
3565 
3566 	boundary = front_len(msg) + middle_len(msg) +
3567 		   CEPH_EPILOGUE_PLAIN_LEN;
3568 	if (resid > boundary) {
3569 		resid -= boundary;
3570 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3571 		dout("%s con %p was sending head\n", __func__, con);
3572 		if (front_len(msg))
3573 			prepare_zero_front(con, msg, front_len(msg));
3574 		if (middle_len(msg))
3575 			prepare_zero_middle(con, msg, middle_len(msg));
3576 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3577 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3578 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3579 		return;
3580 	}
3581 
3582 	boundary = middle_len(msg) + CEPH_EPILOGUE_PLAIN_LEN;
3583 	if (resid > boundary) {
3584 		resid -= boundary;
3585 		dout("%s con %p was sending front\n", __func__, con);
3586 		prepare_zero_front(con, msg, resid);
3587 		if (middle_len(msg))
3588 			prepare_zero_middle(con, msg, middle_len(msg));
3589 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3590 		queue_zeros(con, msg);
3591 		return;
3592 	}
3593 
3594 	boundary = CEPH_EPILOGUE_PLAIN_LEN;
3595 	if (resid > boundary) {
3596 		resid -= boundary;
3597 		dout("%s con %p was sending middle\n", __func__, con);
3598 		prepare_zero_middle(con, msg, resid);
3599 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3600 		queue_zeros(con, msg);
3601 		return;
3602 	}
3603 
3604 	WARN_ON(!resid);
3605 	dout("%s con %p was sending epilogue - noop\n", __func__, con);
3606 }
3607 
ceph_con_v2_revoke(struct ceph_connection * con,struct ceph_msg * msg)3608 void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg)
3609 {
3610 	WARN_ON(con->v2.out_zero);
3611 
3612 	if (con_secure(con)) {
3613 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3614 			con->v2.out_state != OUT_S_FINISH_MESSAGE);
3615 		dout("%s con %p secure - noop\n", __func__, con);
3616 		return;
3617 	}
3618 
3619 	switch (con->v2.out_state) {
3620 	case OUT_S_QUEUE_DATA:
3621 		revoke_at_queue_data(con, msg);
3622 		break;
3623 	case OUT_S_QUEUE_DATA_CONT:
3624 		revoke_at_queue_data_cont(con, msg);
3625 		break;
3626 	case OUT_S_FINISH_MESSAGE:
3627 		revoke_at_finish_message(con, msg);
3628 		break;
3629 	default:
3630 		WARN(1, "bad out_state %d", con->v2.out_state);
3631 		break;
3632 	}
3633 }
3634 
revoke_at_prepare_read_data(struct ceph_connection * con)3635 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3636 {
3637 	int remaining;
3638 	int resid;
3639 
3640 	WARN_ON(con_secure(con));
3641 	WARN_ON(!data_len(con->in_msg));
3642 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3643 	resid = iov_iter_count(&con->v2.in_iter);
3644 	WARN_ON(!resid);
3645 
3646 	remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3647 	dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3648 	     remaining);
3649 	con->v2.in_iter.count -= resid;
3650 	set_in_skip(con, resid + remaining);
3651 	con->v2.in_state = IN_S_FINISH_SKIP;
3652 }
3653 
revoke_at_prepare_read_data_cont(struct ceph_connection * con)3654 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3655 {
3656 	int recved, resid;  /* current piece of data */
3657 	int remaining;
3658 
3659 	WARN_ON(con_secure(con));
3660 	WARN_ON(!data_len(con->in_msg));
3661 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3662 	resid = iov_iter_count(&con->v2.in_iter);
3663 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3664 	recved = con->v2.in_bvec.bv_len - resid;
3665 	dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3666 
3667 	if (recved)
3668 		ceph_msg_data_advance(&con->v2.in_cursor, recved);
3669 	WARN_ON(resid > con->v2.in_cursor.total_resid);
3670 
3671 	remaining = CEPH_EPILOGUE_PLAIN_LEN;
3672 	dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3673 	     con->v2.in_cursor.total_resid, remaining);
3674 	con->v2.in_iter.count -= resid;
3675 	set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3676 	con->v2.in_state = IN_S_FINISH_SKIP;
3677 }
3678 
revoke_at_prepare_read_enc_page(struct ceph_connection * con)3679 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3680 {
3681 	int resid;  /* current enc page (not necessarily data) */
3682 
3683 	WARN_ON(!con_secure(con));
3684 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3685 	resid = iov_iter_count(&con->v2.in_iter);
3686 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3687 
3688 	dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3689 	     con->v2.in_enc_resid);
3690 	con->v2.in_iter.count -= resid;
3691 	set_in_skip(con, resid + con->v2.in_enc_resid);
3692 	con->v2.in_state = IN_S_FINISH_SKIP;
3693 }
3694 
revoke_at_prepare_sparse_data(struct ceph_connection * con)3695 static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
3696 {
3697 	int resid;  /* current piece of data */
3698 	int remaining;
3699 
3700 	WARN_ON(con_secure(con));
3701 	WARN_ON(!data_len(con->in_msg));
3702 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3703 	resid = iov_iter_count(&con->v2.in_iter);
3704 	dout("%s con %p resid %d\n", __func__, con, resid);
3705 
3706 	remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
3707 	con->v2.in_iter.count -= resid;
3708 	set_in_skip(con, resid + remaining);
3709 	con->v2.in_state = IN_S_FINISH_SKIP;
3710 }
3711 
revoke_at_handle_epilogue(struct ceph_connection * con)3712 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3713 {
3714 	int resid;
3715 
3716 	resid = iov_iter_count(&con->v2.in_iter);
3717 	WARN_ON(!resid);
3718 
3719 	dout("%s con %p resid %d\n", __func__, con, resid);
3720 	con->v2.in_iter.count -= resid;
3721 	set_in_skip(con, resid);
3722 	con->v2.in_state = IN_S_FINISH_SKIP;
3723 }
3724 
ceph_con_v2_revoke_incoming(struct ceph_connection * con)3725 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3726 {
3727 	switch (con->v2.in_state) {
3728 	case IN_S_PREPARE_SPARSE_DATA:
3729 	case IN_S_PREPARE_READ_DATA:
3730 		revoke_at_prepare_read_data(con);
3731 		break;
3732 	case IN_S_PREPARE_READ_DATA_CONT:
3733 		revoke_at_prepare_read_data_cont(con);
3734 		break;
3735 	case IN_S_PREPARE_READ_ENC_PAGE:
3736 		revoke_at_prepare_read_enc_page(con);
3737 		break;
3738 	case IN_S_PREPARE_SPARSE_DATA_CONT:
3739 		revoke_at_prepare_sparse_data(con);
3740 		break;
3741 	case IN_S_HANDLE_EPILOGUE:
3742 		revoke_at_handle_epilogue(con);
3743 		break;
3744 	default:
3745 		WARN(1, "bad in_state %d", con->v2.in_state);
3746 		break;
3747 	}
3748 }
3749 
ceph_con_v2_opened(struct ceph_connection * con)3750 bool ceph_con_v2_opened(struct ceph_connection *con)
3751 {
3752 	return con->v2.peer_global_seq;
3753 }
3754 
ceph_con_v2_reset_session(struct ceph_connection * con)3755 void ceph_con_v2_reset_session(struct ceph_connection *con)
3756 {
3757 	con->v2.client_cookie = 0;
3758 	con->v2.server_cookie = 0;
3759 	con->v2.global_seq = 0;
3760 	con->v2.connect_seq = 0;
3761 	con->v2.peer_global_seq = 0;
3762 }
3763 
ceph_con_v2_reset_protocol(struct ceph_connection * con)3764 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3765 {
3766 	iov_iter_truncate(&con->v2.in_iter, 0);
3767 	iov_iter_truncate(&con->v2.out_iter, 0);
3768 	con->v2.out_zero = 0;
3769 
3770 	clear_in_sign_kvecs(con);
3771 	clear_out_sign_kvecs(con);
3772 	free_conn_bufs(con);
3773 
3774 	if (con->v2.in_enc_pages) {
3775 		WARN_ON(!con->v2.in_enc_page_cnt);
3776 		ceph_release_page_vector(con->v2.in_enc_pages,
3777 					 con->v2.in_enc_page_cnt);
3778 		con->v2.in_enc_pages = NULL;
3779 		con->v2.in_enc_page_cnt = 0;
3780 	}
3781 	if (con->v2.out_enc_pages) {
3782 		WARN_ON(!con->v2.out_enc_page_cnt);
3783 		ceph_release_page_vector(con->v2.out_enc_pages,
3784 					 con->v2.out_enc_page_cnt);
3785 		con->v2.out_enc_pages = NULL;
3786 		con->v2.out_enc_page_cnt = 0;
3787 	}
3788 
3789 	con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3790 	memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3791 	memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3792 
3793 	memzero_explicit(&con->v2.hmac_key, sizeof(con->v2.hmac_key));
3794 	con->v2.hmac_key_set = false;
3795 	if (con->v2.gcm_req) {
3796 		aead_request_free(con->v2.gcm_req);
3797 		con->v2.gcm_req = NULL;
3798 	}
3799 	if (con->v2.gcm_tfm) {
3800 		crypto_free_aead(con->v2.gcm_tfm);
3801 		con->v2.gcm_tfm = NULL;
3802 	}
3803 }
3804