xref: /linux/drivers/nvme/target/tcp.c (revision d458a240344c4369bf6f3da203f2779515177738)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP target.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/err.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
14 #include <net/sock.h>
15 #include <net/tcp.h>
16 #include <net/tls.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/inet.h>
20 #include <linux/llist.h>
21 #include <trace/events/sock.h>
22 
23 #include "nvmet.h"
24 
25 #define NVMET_TCP_DEF_INLINE_DATA_SIZE	(4 * PAGE_SIZE)
26 #define NVMET_TCP_MAXH2CDATA		0x400000 /* 16M arbitrary limit */
27 #define NVMET_TCP_BACKLOG 128
28 
param_store_val(const char * str,int * val,int min,int max)29 static int param_store_val(const char *str, int *val, int min, int max)
30 {
31 	int ret, new_val;
32 
33 	ret = kstrtoint(str, 10, &new_val);
34 	if (ret)
35 		return -EINVAL;
36 
37 	if (new_val < min || new_val > max)
38 		return -EINVAL;
39 
40 	*val = new_val;
41 	return 0;
42 }
43 
set_params(const char * str,const struct kernel_param * kp)44 static int set_params(const char *str, const struct kernel_param *kp)
45 {
46 	return param_store_val(str, kp->arg, 0, INT_MAX);
47 }
48 
49 static const struct kernel_param_ops set_param_ops = {
50 	.set	= set_params,
51 	.get	= param_get_int,
52 };
53 
54 /* Define the socket priority to use for connections were it is desirable
55  * that the NIC consider performing optimized packet processing or filtering.
56  * A non-zero value being sufficient to indicate general consideration of any
57  * possible optimization.  Making it a module param allows for alternative
58  * values that may be unique for some NIC implementations.
59  */
60 static int so_priority;
61 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
62 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
63 
64 /* Define a time period (in usecs) that io_work() shall sample an activated
65  * queue before determining it to be idle.  This optional module behavior
66  * can enable NIC solutions that support socket optimized packet processing
67  * using advanced interrupt moderation techniques.
68  */
69 static int idle_poll_period_usecs;
70 device_param_cb(idle_poll_period_usecs, &set_param_ops,
71 		&idle_poll_period_usecs, 0644);
72 MODULE_PARM_DESC(idle_poll_period_usecs,
73 		"nvmet tcp io_work poll till idle time period in usecs: Default 0");
74 
75 #ifdef CONFIG_NVME_TARGET_TCP_TLS
76 /*
77  * TLS handshake timeout
78  */
79 static int tls_handshake_timeout = 10;
80 module_param(tls_handshake_timeout, int, 0644);
81 MODULE_PARM_DESC(tls_handshake_timeout,
82 		 "nvme TLS handshake timeout in seconds (default 10)");
83 #endif
84 
85 #define NVMET_TCP_RECV_BUDGET		8
86 #define NVMET_TCP_SEND_BUDGET		8
87 #define NVMET_TCP_IO_WORK_BUDGET	64
88 
89 enum nvmet_tcp_send_state {
90 	NVMET_TCP_SEND_DATA_PDU,
91 	NVMET_TCP_SEND_DATA,
92 	NVMET_TCP_SEND_R2T,
93 	NVMET_TCP_SEND_DDGST,
94 	NVMET_TCP_SEND_RESPONSE
95 };
96 
97 enum nvmet_tcp_recv_state {
98 	NVMET_TCP_RECV_PDU,
99 	NVMET_TCP_RECV_DATA,
100 	NVMET_TCP_RECV_DDGST,
101 	NVMET_TCP_RECV_ERR,
102 };
103 
104 enum {
105 	NVMET_TCP_F_INIT_FAILED = (1 << 0),
106 };
107 
108 struct nvmet_tcp_cmd {
109 	struct nvmet_tcp_queue		*queue;
110 	struct nvmet_req		req;
111 
112 	struct nvme_tcp_cmd_pdu		*cmd_pdu;
113 	struct nvme_tcp_rsp_pdu		*rsp_pdu;
114 	struct nvme_tcp_data_pdu	*data_pdu;
115 	struct nvme_tcp_r2t_pdu		*r2t_pdu;
116 
117 	u32				rbytes_done;
118 	u32				wbytes_done;
119 
120 	u32				pdu_len;
121 	u32				pdu_recv;
122 	int				sg_idx;
123 	char				recv_cbuf[CMSG_LEN(sizeof(char))];
124 	struct msghdr			recv_msg;
125 	struct bio_vec			*iov;
126 	u32				flags;
127 
128 	struct list_head		entry;
129 	struct llist_node		lentry;
130 
131 	/* send state */
132 	u32				offset;
133 	struct scatterlist		*cur_sg;
134 	enum nvmet_tcp_send_state	state;
135 
136 	__le32				exp_ddgst;
137 	__le32				recv_ddgst;
138 };
139 
140 enum nvmet_tcp_queue_state {
141 	NVMET_TCP_Q_CONNECTING,
142 	NVMET_TCP_Q_TLS_HANDSHAKE,
143 	NVMET_TCP_Q_LIVE,
144 	NVMET_TCP_Q_DISCONNECTING,
145 	NVMET_TCP_Q_FAILED,
146 };
147 
148 struct nvmet_tcp_queue {
149 	struct socket		*sock;
150 	struct nvmet_tcp_port	*port;
151 	struct work_struct	io_work;
152 	struct nvmet_cq		nvme_cq;
153 	struct nvmet_sq		nvme_sq;
154 	struct kref		kref;
155 
156 	/* send state */
157 	struct nvmet_tcp_cmd	*cmds;
158 	unsigned int		nr_cmds;
159 	struct list_head	free_list;
160 	struct llist_head	resp_list;
161 	struct list_head	resp_send_list;
162 	int			send_list_len;
163 	struct nvmet_tcp_cmd	*snd_cmd;
164 
165 	/* recv state */
166 	int			offset;
167 	int			left;
168 	enum nvmet_tcp_recv_state rcv_state;
169 	struct nvmet_tcp_cmd	*cmd;
170 	union nvme_tcp_pdu	pdu;
171 
172 	/* digest state */
173 	bool			hdr_digest;
174 	bool			data_digest;
175 
176 	/* TLS state */
177 	key_serial_t		tls_pskid;
178 	struct delayed_work	tls_handshake_tmo_work;
179 
180 	unsigned long           poll_end;
181 
182 	spinlock_t		state_lock;
183 	enum nvmet_tcp_queue_state state;
184 
185 	struct sockaddr_storage	sockaddr;
186 	struct sockaddr_storage	sockaddr_peer;
187 	struct work_struct	release_work;
188 
189 	int			idx;
190 	struct list_head	queue_list;
191 
192 	struct nvmet_tcp_cmd	connect;
193 
194 	struct page_frag_cache	pf_cache;
195 
196 	void (*data_ready)(struct sock *);
197 	void (*state_change)(struct sock *);
198 	void (*write_space)(struct sock *);
199 };
200 
201 struct nvmet_tcp_port {
202 	struct socket		*sock;
203 	struct work_struct	accept_work;
204 	struct nvmet_port	*nport;
205 	struct sockaddr_storage addr;
206 	void (*data_ready)(struct sock *);
207 };
208 
209 static DEFINE_IDA(nvmet_tcp_queue_ida);
210 static LIST_HEAD(nvmet_tcp_queue_list);
211 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
212 
213 static struct workqueue_struct *nvmet_tcp_wq;
214 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
215 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
216 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
217 
nvmet_tcp_cmd_tag(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd)218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
219 		struct nvmet_tcp_cmd *cmd)
220 {
221 	if (unlikely(!queue->nr_cmds)) {
222 		/* We didn't allocate cmds yet, send 0xffff */
223 		return USHRT_MAX;
224 	}
225 
226 	return cmd - queue->cmds;
227 }
228 
nvmet_tcp_has_data_in(struct nvmet_tcp_cmd * cmd)229 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
230 {
231 	return nvme_is_write(cmd->req.cmd) &&
232 		cmd->rbytes_done < cmd->req.transfer_len;
233 }
234 
nvmet_tcp_need_data_in(struct nvmet_tcp_cmd * cmd)235 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
236 {
237 	return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
238 }
239 
nvmet_tcp_need_data_out(struct nvmet_tcp_cmd * cmd)240 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
241 {
242 	return !nvme_is_write(cmd->req.cmd) &&
243 		cmd->req.transfer_len > 0 &&
244 		!cmd->req.cqe->status;
245 }
246 
nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd * cmd)247 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
248 {
249 	return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
250 		!cmd->rbytes_done;
251 }
252 
253 static inline struct nvmet_tcp_cmd *
nvmet_tcp_get_cmd(struct nvmet_tcp_queue * queue)254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
255 {
256 	struct nvmet_tcp_cmd *cmd;
257 
258 	cmd = list_first_entry_or_null(&queue->free_list,
259 				struct nvmet_tcp_cmd, entry);
260 	if (!cmd)
261 		return NULL;
262 	list_del_init(&cmd->entry);
263 
264 	cmd->rbytes_done = cmd->wbytes_done = 0;
265 	cmd->pdu_len = 0;
266 	cmd->pdu_recv = 0;
267 	cmd->iov = NULL;
268 	cmd->flags = 0;
269 	return cmd;
270 }
271 
nvmet_tcp_put_cmd(struct nvmet_tcp_cmd * cmd)272 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
273 {
274 	if (unlikely(cmd == &cmd->queue->connect))
275 		return;
276 
277 	list_add_tail(&cmd->entry, &cmd->queue->free_list);
278 }
279 
queue_cpu(struct nvmet_tcp_queue * queue)280 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
281 {
282 	return queue->sock->sk->sk_incoming_cpu;
283 }
284 
nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue)285 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
286 {
287 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
288 }
289 
nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue)290 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
291 {
292 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
293 }
294 
nvmet_tcp_hdgst(void * pdu,size_t len)295 static inline void nvmet_tcp_hdgst(void *pdu, size_t len)
296 {
297 	put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len);
298 }
299 
nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len)300 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
301 	void *pdu, size_t len)
302 {
303 	struct nvme_tcp_hdr *hdr = pdu;
304 	__le32 recv_digest;
305 	__le32 exp_digest;
306 
307 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
308 		pr_err("queue %d: header digest enabled but no header digest\n",
309 			queue->idx);
310 		return -EPROTO;
311 	}
312 
313 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
314 	nvmet_tcp_hdgst(pdu, len);
315 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
316 	if (recv_digest != exp_digest) {
317 		pr_err("queue %d: header digest error: recv %#x expected %#x\n",
318 			queue->idx, le32_to_cpu(recv_digest),
319 			le32_to_cpu(exp_digest));
320 		return -EPROTO;
321 	}
322 
323 	return 0;
324 }
325 
nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu)326 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
327 {
328 	struct nvme_tcp_hdr *hdr = pdu;
329 	u8 digest_len = nvmet_tcp_hdgst_len(queue);
330 	u32 len;
331 
332 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
333 		(hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
334 
335 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
336 		pr_err("queue %d: data digest flag is cleared\n", queue->idx);
337 		return -EPROTO;
338 	}
339 
340 	return 0;
341 }
342 
343 /* If cmd buffers are NULL, no operation is performed */
nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd * cmd)344 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
345 {
346 	kfree(cmd->iov);
347 	sgl_free(cmd->req.sg);
348 	cmd->iov = NULL;
349 	cmd->req.sg = NULL;
350 }
351 
nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd * cmd)352 static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
353 {
354 	struct bio_vec *iov = cmd->iov;
355 	struct scatterlist *sg;
356 	u32 length, offset, sg_offset;
357 	unsigned int sg_remaining;
358 	int nr_pages;
359 
360 	length = cmd->pdu_len;
361 	nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
362 	offset = cmd->rbytes_done;
363 	cmd->sg_idx = offset / PAGE_SIZE;
364 	sg_offset = offset % PAGE_SIZE;
365 	if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
366 		return -EPROTO;
367 
368 	sg = &cmd->req.sg[cmd->sg_idx];
369 	sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
370 
371 	while (length) {
372 		if (!sg_remaining)
373 			return -EPROTO;
374 
375 		if (!sg->length || sg->length <= sg_offset)
376 			return -EPROTO;
377 
378 		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
379 
380 		bvec_set_page(iov, sg_page(sg), iov_len,
381 				sg->offset + sg_offset);
382 
383 		length -= iov_len;
384 		sg = sg_next(sg);
385 		sg_remaining--;
386 		iov++;
387 		sg_offset = 0;
388 	}
389 
390 	iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
391 		      nr_pages, cmd->pdu_len);
392 	return 0;
393 }
394 
nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status)395 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
396 {
397 	/*
398 	 * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path.
399 	 * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has
400 	 * already been consumed and queue teardown has started.
401 	 *
402 	 * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues
403 	 * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work()
404 	 * cancels it, the queue must not keep that old receive state.
405 	 * Otherwise the next nvmet_tcp_io_work() run can reach
406 	 * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again.
407 	 *
408 	 * That is why queue->rcv_state needs to be updated before we return.
409 	 */
410 	queue->rcv_state = NVMET_TCP_RECV_ERR;
411 	if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl)
412 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
413 	else
414 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
415 }
416 
nvmet_tcp_map_data(struct nvmet_tcp_cmd * cmd)417 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
418 {
419 	struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
420 	u32 len = le32_to_cpu(sgl->length);
421 
422 	if (!len)
423 		return 0;
424 
425 	if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
426 			  NVME_SGL_FMT_OFFSET)) {
427 		if (!nvme_is_write(cmd->req.cmd))
428 			return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
429 
430 		if (len > cmd->req.port->inline_data_size)
431 			return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
432 		cmd->pdu_len = len;
433 	}
434 	cmd->req.transfer_len += len;
435 
436 	cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
437 	if (!cmd->req.sg)
438 		return NVME_SC_INTERNAL;
439 	cmd->cur_sg = cmd->req.sg;
440 
441 	if (nvmet_tcp_has_data_in(cmd)) {
442 		cmd->iov = kmalloc_objs(*cmd->iov, cmd->req.sg_cnt);
443 		if (!cmd->iov)
444 			goto err;
445 	}
446 
447 	return 0;
448 err:
449 	nvmet_tcp_free_cmd_buffers(cmd);
450 	return NVME_SC_INTERNAL;
451 }
452 
nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd * cmd)453 static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd)
454 {
455 	size_t total_len = cmd->req.transfer_len;
456 	struct scatterlist *sg = cmd->req.sg;
457 	u32 crc = ~0;
458 
459 	while (total_len) {
460 		size_t len = min_t(size_t, total_len, sg->length);
461 
462 		/*
463 		 * Note that the scatterlist does not contain any highmem pages,
464 		 * as it was allocated by sgl_alloc() with GFP_KERNEL.
465 		 */
466 		crc = crc32c(crc, sg_virt(sg), len);
467 		total_len -= len;
468 		sg = sg_next(sg);
469 	}
470 	cmd->exp_ddgst = cpu_to_le32(~crc);
471 }
472 
nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd * cmd)473 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
474 {
475 	struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
476 	struct nvmet_tcp_queue *queue = cmd->queue;
477 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
478 	u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
479 
480 	cmd->offset = 0;
481 	cmd->state = NVMET_TCP_SEND_DATA_PDU;
482 
483 	pdu->hdr.type = nvme_tcp_c2h_data;
484 	pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
485 						NVME_TCP_F_DATA_SUCCESS : 0);
486 	pdu->hdr.hlen = sizeof(*pdu);
487 	pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
488 	pdu->hdr.plen =
489 		cpu_to_le32(pdu->hdr.hlen + hdgst +
490 				cmd->req.transfer_len + ddgst);
491 	pdu->command_id = cmd->req.cqe->command_id;
492 	pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
493 	pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
494 
495 	if (queue->data_digest) {
496 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
497 		nvmet_tcp_calc_ddgst(cmd);
498 	}
499 
500 	if (cmd->queue->hdr_digest) {
501 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
502 		nvmet_tcp_hdgst(pdu, sizeof(*pdu));
503 	}
504 }
505 
nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd * cmd)506 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
507 {
508 	struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
509 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
510 
511 	cmd->offset = 0;
512 	cmd->state = NVMET_TCP_SEND_R2T;
513 
514 	pdu->hdr.type = nvme_tcp_r2t;
515 	pdu->hdr.flags = 0;
516 	pdu->hdr.hlen = sizeof(*pdu);
517 	pdu->hdr.pdo = 0;
518 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
519 
520 	pdu->command_id = cmd->req.cmd->common.command_id;
521 	pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
522 	pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
523 	pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
524 	if (cmd->queue->hdr_digest) {
525 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
526 		nvmet_tcp_hdgst(pdu, sizeof(*pdu));
527 	}
528 }
529 
nvmet_setup_response_pdu(struct nvmet_tcp_cmd * cmd)530 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
531 {
532 	struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
533 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
534 
535 	cmd->offset = 0;
536 	cmd->state = NVMET_TCP_SEND_RESPONSE;
537 
538 	pdu->hdr.type = nvme_tcp_rsp;
539 	pdu->hdr.flags = 0;
540 	pdu->hdr.hlen = sizeof(*pdu);
541 	pdu->hdr.pdo = 0;
542 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
543 	if (cmd->queue->hdr_digest) {
544 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
545 		nvmet_tcp_hdgst(pdu, sizeof(*pdu));
546 	}
547 }
548 
nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue)549 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
550 {
551 	struct llist_node *node;
552 	struct nvmet_tcp_cmd *cmd;
553 
554 	for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
555 		cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
556 		list_add(&cmd->entry, &queue->resp_send_list);
557 		queue->send_list_len++;
558 	}
559 }
560 
nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue)561 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
562 {
563 	queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
564 				struct nvmet_tcp_cmd, entry);
565 	if (!queue->snd_cmd) {
566 		nvmet_tcp_process_resp_list(queue);
567 		queue->snd_cmd =
568 			list_first_entry_or_null(&queue->resp_send_list,
569 					struct nvmet_tcp_cmd, entry);
570 		if (unlikely(!queue->snd_cmd))
571 			return NULL;
572 	}
573 
574 	list_del_init(&queue->snd_cmd->entry);
575 	queue->send_list_len--;
576 
577 	if (nvmet_tcp_need_data_out(queue->snd_cmd))
578 		nvmet_setup_c2h_data_pdu(queue->snd_cmd);
579 	else if (nvmet_tcp_need_data_in(queue->snd_cmd))
580 		nvmet_setup_r2t_pdu(queue->snd_cmd);
581 	else
582 		nvmet_setup_response_pdu(queue->snd_cmd);
583 
584 	return queue->snd_cmd;
585 }
586 
nvmet_tcp_queue_response(struct nvmet_req * req)587 static void nvmet_tcp_queue_response(struct nvmet_req *req)
588 {
589 	struct nvmet_tcp_cmd *cmd =
590 		container_of(req, struct nvmet_tcp_cmd, req);
591 	struct nvmet_tcp_queue	*queue = cmd->queue;
592 	enum nvmet_tcp_recv_state queue_state;
593 	struct nvmet_tcp_cmd *queue_cmd;
594 	struct nvme_sgl_desc *sgl;
595 	u32 len;
596 
597 	/* Pairs with store_release in nvmet_prepare_receive_pdu() */
598 	queue_state = smp_load_acquire(&queue->rcv_state);
599 	queue_cmd = READ_ONCE(queue->cmd);
600 
601 	if (unlikely(cmd == queue_cmd)) {
602 		sgl = &cmd->req.cmd->common.dptr.sgl;
603 		len = le32_to_cpu(sgl->length);
604 
605 		/*
606 		 * Wait for inline data before processing the response.
607 		 * Avoid using helpers, this might happen before
608 		 * nvmet_req_init is completed.
609 		 */
610 		if (queue_state == NVMET_TCP_RECV_PDU &&
611 		    len && len <= cmd->req.port->inline_data_size &&
612 		    nvme_is_write(cmd->req.cmd))
613 			return;
614 	}
615 
616 	llist_add(&cmd->lentry, &queue->resp_list);
617 	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
618 }
619 
nvmet_tcp_execute_request(struct nvmet_tcp_cmd * cmd)620 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
621 {
622 	if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
623 		nvmet_tcp_queue_response(&cmd->req);
624 	else
625 		cmd->req.execute(&cmd->req);
626 }
627 
nvmet_try_send_data_pdu(struct nvmet_tcp_cmd * cmd)628 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
629 {
630 	struct msghdr msg = {
631 		.msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
632 	};
633 	struct bio_vec bvec;
634 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
635 	int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
636 	int ret;
637 
638 	bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
639 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
640 	ret = sock_sendmsg(cmd->queue->sock, &msg);
641 	if (ret <= 0)
642 		return ret;
643 
644 	cmd->offset += ret;
645 	left -= ret;
646 
647 	if (left)
648 		return -EAGAIN;
649 
650 	cmd->state = NVMET_TCP_SEND_DATA;
651 	cmd->offset  = 0;
652 	return 1;
653 }
654 
nvmet_try_send_data(struct nvmet_tcp_cmd * cmd,bool last_in_batch)655 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
656 {
657 	struct nvmet_tcp_queue *queue = cmd->queue;
658 	int ret;
659 
660 	while (cmd->cur_sg) {
661 		struct msghdr msg = {
662 			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
663 		};
664 		struct page *page = sg_page(cmd->cur_sg);
665 		struct bio_vec bvec;
666 		u32 left = cmd->cur_sg->length - cmd->offset;
667 
668 		if ((!last_in_batch && cmd->queue->send_list_len) ||
669 		    cmd->wbytes_done + left < cmd->req.transfer_len ||
670 		    queue->data_digest || !queue->nvme_sq.sqhd_disabled)
671 			msg.msg_flags |= MSG_MORE;
672 
673 		bvec_set_page(&bvec, page, left, cmd->offset);
674 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
675 		ret = sock_sendmsg(cmd->queue->sock, &msg);
676 		if (ret <= 0)
677 			return ret;
678 
679 		cmd->offset += ret;
680 		cmd->wbytes_done += ret;
681 
682 		/* Done with sg?*/
683 		if (cmd->offset == cmd->cur_sg->length) {
684 			cmd->cur_sg = sg_next(cmd->cur_sg);
685 			cmd->offset = 0;
686 		}
687 	}
688 
689 	if (queue->data_digest) {
690 		cmd->state = NVMET_TCP_SEND_DDGST;
691 		cmd->offset = 0;
692 	} else {
693 		if (queue->nvme_sq.sqhd_disabled) {
694 			cmd->queue->snd_cmd = NULL;
695 			nvmet_tcp_put_cmd(cmd);
696 		} else {
697 			nvmet_setup_response_pdu(cmd);
698 		}
699 	}
700 
701 	if (queue->nvme_sq.sqhd_disabled)
702 		nvmet_tcp_free_cmd_buffers(cmd);
703 
704 	return 1;
705 
706 }
707 
nvmet_try_send_response(struct nvmet_tcp_cmd * cmd,bool last_in_batch)708 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
709 		bool last_in_batch)
710 {
711 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
712 	struct bio_vec bvec;
713 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
714 	int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
715 	int ret;
716 
717 	if (!last_in_batch && cmd->queue->send_list_len)
718 		msg.msg_flags |= MSG_MORE;
719 	else
720 		msg.msg_flags |= MSG_EOR;
721 
722 	bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
723 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
724 	ret = sock_sendmsg(cmd->queue->sock, &msg);
725 	if (ret <= 0)
726 		return ret;
727 	cmd->offset += ret;
728 	left -= ret;
729 
730 	if (left)
731 		return -EAGAIN;
732 
733 	nvmet_tcp_free_cmd_buffers(cmd);
734 	cmd->queue->snd_cmd = NULL;
735 	nvmet_tcp_put_cmd(cmd);
736 	return 1;
737 }
738 
nvmet_try_send_r2t(struct nvmet_tcp_cmd * cmd,bool last_in_batch)739 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
740 {
741 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
742 	struct bio_vec bvec;
743 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
744 	int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
745 	int ret;
746 
747 	if (!last_in_batch && cmd->queue->send_list_len)
748 		msg.msg_flags |= MSG_MORE;
749 	else
750 		msg.msg_flags |= MSG_EOR;
751 
752 	bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
753 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
754 	ret = sock_sendmsg(cmd->queue->sock, &msg);
755 	if (ret <= 0)
756 		return ret;
757 	cmd->offset += ret;
758 	left -= ret;
759 
760 	if (left)
761 		return -EAGAIN;
762 
763 	cmd->queue->snd_cmd = NULL;
764 	return 1;
765 }
766 
nvmet_try_send_ddgst(struct nvmet_tcp_cmd * cmd,bool last_in_batch)767 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
768 {
769 	struct nvmet_tcp_queue *queue = cmd->queue;
770 	int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
771 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
772 	struct kvec iov = {
773 		.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
774 		.iov_len = left
775 	};
776 	int ret;
777 
778 	if (!last_in_batch && cmd->queue->send_list_len)
779 		msg.msg_flags |= MSG_MORE;
780 	else
781 		msg.msg_flags |= MSG_EOR;
782 
783 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
784 	if (unlikely(ret <= 0))
785 		return ret;
786 
787 	cmd->offset += ret;
788 	left -= ret;
789 
790 	if (left)
791 		return -EAGAIN;
792 
793 	if (queue->nvme_sq.sqhd_disabled) {
794 		cmd->queue->snd_cmd = NULL;
795 		nvmet_tcp_put_cmd(cmd);
796 	} else {
797 		nvmet_setup_response_pdu(cmd);
798 	}
799 	return 1;
800 }
801 
nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch)802 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
803 		bool last_in_batch)
804 {
805 	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
806 	int ret = 0;
807 
808 	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
809 		cmd = nvmet_tcp_fetch_cmd(queue);
810 		if (unlikely(!cmd))
811 			return 0;
812 	}
813 
814 	if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
815 		ret = nvmet_try_send_data_pdu(cmd);
816 		if (ret <= 0)
817 			goto done_send;
818 	}
819 
820 	if (cmd->state == NVMET_TCP_SEND_DATA) {
821 		ret = nvmet_try_send_data(cmd, last_in_batch);
822 		if (ret <= 0)
823 			goto done_send;
824 	}
825 
826 	if (cmd->state == NVMET_TCP_SEND_DDGST) {
827 		ret = nvmet_try_send_ddgst(cmd, last_in_batch);
828 		if (ret <= 0)
829 			goto done_send;
830 	}
831 
832 	if (cmd->state == NVMET_TCP_SEND_R2T) {
833 		ret = nvmet_try_send_r2t(cmd, last_in_batch);
834 		if (ret <= 0)
835 			goto done_send;
836 	}
837 
838 	if (cmd->state == NVMET_TCP_SEND_RESPONSE)
839 		ret = nvmet_try_send_response(cmd, last_in_batch);
840 
841 done_send:
842 	if (ret < 0) {
843 		if (ret == -EAGAIN)
844 			return 0;
845 		return ret;
846 	}
847 
848 	return 1;
849 }
850 
nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends)851 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
852 		int budget, int *sends)
853 {
854 	int i, ret = 0;
855 
856 	for (i = 0; i < budget; i++) {
857 		ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
858 		if (unlikely(ret < 0)) {
859 			nvmet_tcp_socket_error(queue, ret);
860 			goto done;
861 		} else if (ret == 0) {
862 			break;
863 		}
864 		(*sends)++;
865 	}
866 done:
867 	return ret;
868 }
869 
nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue)870 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
871 {
872 	queue->offset = 0;
873 	queue->left = sizeof(struct nvme_tcp_hdr);
874 	WRITE_ONCE(queue->cmd, NULL);
875 	/* Ensure rcv_state is visible only after queue->cmd is set */
876 	smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
877 }
878 
nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue)879 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
880 {
881 	struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
882 	struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
883 	struct msghdr msg = {};
884 	struct kvec iov;
885 	int ret;
886 
887 	if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
888 		pr_err("bad nvme-tcp pdu length (%d)\n",
889 			le32_to_cpu(icreq->hdr.plen));
890 		return -EPROTO;
891 	}
892 
893 	if (icreq->pfv != NVME_TCP_PFV_1_0) {
894 		pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
895 		return -EPROTO;
896 	}
897 
898 	if (icreq->hpda != 0) {
899 		pr_err("queue %d: unsupported hpda %d\n", queue->idx,
900 			icreq->hpda);
901 		return -EPROTO;
902 	}
903 
904 	queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
905 	queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
906 
907 	memset(icresp, 0, sizeof(*icresp));
908 	icresp->hdr.type = nvme_tcp_icresp;
909 	icresp->hdr.hlen = sizeof(*icresp);
910 	icresp->hdr.pdo = 0;
911 	icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
912 	icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
913 	icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
914 	icresp->cpda = 0;
915 	if (queue->hdr_digest)
916 		icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
917 	if (queue->data_digest)
918 		icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
919 
920 	iov.iov_base = icresp;
921 	iov.iov_len = sizeof(*icresp);
922 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
923 	if (ret < 0) {
924 		spin_lock_bh(&queue->state_lock);
925 		if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
926 			spin_unlock_bh(&queue->state_lock);
927 			return -ESHUTDOWN;
928 		}
929 		queue->state = NVMET_TCP_Q_FAILED;
930 		spin_unlock_bh(&queue->state_lock);
931 		return ret; /* queue removal will cleanup */
932 	}
933 
934 	spin_lock_bh(&queue->state_lock);
935 	if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
936 		spin_unlock_bh(&queue->state_lock);
937 		/* Tell nvmet_tcp_socket_error() teardown is in progress. */
938 		return -ESHUTDOWN;
939 	}
940 	queue->state = NVMET_TCP_Q_LIVE;
941 	spin_unlock_bh(&queue->state_lock);
942 	nvmet_prepare_receive_pdu(queue);
943 	return 0;
944 }
945 
nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req)946 static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
947 		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
948 {
949 	size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
950 	int ret;
951 
952 	/*
953 	 * This command has not been processed yet, hence we are trying to
954 	 * figure out if there is still pending data left to receive. If
955 	 * we don't, we can simply prepare for the next pdu and bail out,
956 	 * otherwise we will need to prepare a buffer and receive the
957 	 * stale data before continuing forward.
958 	 */
959 	if (!nvme_is_write(cmd->req.cmd) || !data_len ||
960 	    data_len > cmd->req.port->inline_data_size) {
961 		nvmet_prepare_receive_pdu(queue);
962 		return 0;
963 	}
964 
965 	ret = nvmet_tcp_map_data(cmd);
966 	if (unlikely(ret)) {
967 		pr_err("queue %d: failed to map data\n", queue->idx);
968 		return -EPROTO;
969 	}
970 
971 	queue->rcv_state = NVMET_TCP_RECV_DATA;
972 	cmd->flags |= NVMET_TCP_F_INIT_FAILED;
973 	ret = nvmet_tcp_build_pdu_iovec(cmd);
974 	if (unlikely(ret))
975 		pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
976 
977 	return ret;
978 }
979 
nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue)980 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
981 {
982 	struct nvme_tcp_data_pdu *data = &queue->pdu.data;
983 	struct nvmet_tcp_cmd *cmd;
984 	unsigned int exp_data_len;
985 
986 	if (likely(queue->nr_cmds)) {
987 		if (unlikely(data->ttag >= queue->nr_cmds)) {
988 			pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
989 				queue->idx, data->ttag, queue->nr_cmds);
990 			goto err_proto;
991 		}
992 		cmd = &queue->cmds[data->ttag];
993 	} else {
994 		cmd = &queue->connect;
995 	}
996 
997 	if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
998 		pr_err("ttag %u unexpected data offset %u (expected %u)\n",
999 			data->ttag, le32_to_cpu(data->data_offset),
1000 			cmd->rbytes_done);
1001 		goto err_proto;
1002 	}
1003 
1004 	exp_data_len = le32_to_cpu(data->hdr.plen) -
1005 			nvmet_tcp_hdgst_len(queue) -
1006 			nvmet_tcp_ddgst_len(queue) -
1007 			sizeof(*data);
1008 
1009 	cmd->pdu_len = le32_to_cpu(data->data_length);
1010 	if (unlikely(cmd->pdu_len != exp_data_len ||
1011 		     cmd->pdu_len == 0 ||
1012 		     cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
1013 		pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
1014 		goto err_proto;
1015 	}
1016        /*
1017 	* Ensure command data structures are initialized. We must check both
1018 	* cmd->req.sg and cmd->iov because they can have different NULL states:
1019 	* - Uninitialized commands: both NULL
1020 	* - READ commands: cmd->req.sg allocated, cmd->iov NULL
1021 	* - WRITE commands: both allocated
1022 	*/
1023 	if (unlikely(!cmd->req.sg || !cmd->iov)) {
1024 		pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
1025 			queue->idx, data->ttag);
1026 		goto err_proto;
1027 	}
1028 	cmd->pdu_recv = 0;
1029 	if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) {
1030 		pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
1031 		goto err_proto;
1032 	}
1033 	queue->cmd = cmd;
1034 	queue->rcv_state = NVMET_TCP_RECV_DATA;
1035 
1036 	return 0;
1037 
1038 err_proto:
1039 	/* FIXME: use proper transport errors */
1040 	return -EPROTO;
1041 }
1042 
nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue)1043 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1044 {
1045 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1046 	struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1047 	struct nvmet_req *req;
1048 	int ret;
1049 
1050 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1051 		if (hdr->type != nvme_tcp_icreq) {
1052 			pr_err("unexpected pdu type (%d) before icreq\n",
1053 				hdr->type);
1054 			return -EPROTO;
1055 		}
1056 		return nvmet_tcp_handle_icreq(queue);
1057 	}
1058 
1059 	if (unlikely(hdr->type == nvme_tcp_icreq)) {
1060 		pr_err("queue %d: received icreq pdu in state %d\n",
1061 			queue->idx, queue->state);
1062 		return -EPROTO;
1063 	}
1064 
1065 	if (hdr->type == nvme_tcp_h2c_data) {
1066 		ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1067 		if (unlikely(ret))
1068 			return ret;
1069 		return 0;
1070 	}
1071 
1072 	queue->cmd = nvmet_tcp_get_cmd(queue);
1073 	if (unlikely(!queue->cmd)) {
1074 		/* This should never happen */
1075 		pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1076 			queue->idx, queue->nr_cmds, queue->send_list_len,
1077 			nvme_cmd->common.opcode);
1078 		return -ENOMEM;
1079 	}
1080 
1081 	req = &queue->cmd->req;
1082 	memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1083 
1084 	if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) {
1085 		pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
1086 			req->cmd, req->cmd->common.command_id,
1087 			req->cmd->common.opcode,
1088 			le32_to_cpu(req->cmd->common.dptr.sgl.length),
1089 			le16_to_cpu(req->cqe->status));
1090 
1091 		return nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1092 	}
1093 
1094 	ret = nvmet_tcp_map_data(queue->cmd);
1095 	if (unlikely(ret)) {
1096 		pr_err("queue %d: failed to map data\n", queue->idx);
1097 		if (nvmet_tcp_has_inline_data(queue->cmd))
1098 			return -EPROTO;
1099 
1100 		nvmet_req_complete(req, ret);
1101 		ret = -EAGAIN;
1102 		goto out;
1103 	}
1104 
1105 	if (nvmet_tcp_need_data_in(queue->cmd)) {
1106 		if (nvmet_tcp_has_inline_data(queue->cmd)) {
1107 			queue->rcv_state = NVMET_TCP_RECV_DATA;
1108 			ret = nvmet_tcp_build_pdu_iovec(queue->cmd);
1109 			if (unlikely(ret))
1110 				pr_err("queue %d: failed to build PDU iovec\n",
1111 					queue->idx);
1112 			return ret;
1113 		}
1114 		/* send back R2T */
1115 		nvmet_tcp_queue_response(&queue->cmd->req);
1116 		goto out;
1117 	}
1118 
1119 	queue->cmd->req.execute(&queue->cmd->req);
1120 out:
1121 	nvmet_prepare_receive_pdu(queue);
1122 	return ret;
1123 }
1124 
1125 static const u8 nvme_tcp_pdu_sizes[] = {
1126 	[nvme_tcp_icreq]	= sizeof(struct nvme_tcp_icreq_pdu),
1127 	[nvme_tcp_cmd]		= sizeof(struct nvme_tcp_cmd_pdu),
1128 	[nvme_tcp_h2c_data]	= sizeof(struct nvme_tcp_data_pdu),
1129 };
1130 
nvmet_tcp_pdu_size(u8 type)1131 static inline u8 nvmet_tcp_pdu_size(u8 type)
1132 {
1133 	size_t idx = type;
1134 
1135 	return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1136 		nvme_tcp_pdu_sizes[idx]) ?
1137 			nvme_tcp_pdu_sizes[idx] : 0;
1138 }
1139 
nvmet_tcp_pdu_valid(u8 type)1140 static inline bool nvmet_tcp_pdu_valid(u8 type)
1141 {
1142 	switch (type) {
1143 	case nvme_tcp_icreq:
1144 	case nvme_tcp_cmd:
1145 	case nvme_tcp_h2c_data:
1146 		/* fallthru */
1147 		return true;
1148 	}
1149 
1150 	return false;
1151 }
1152 
nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue * queue,struct msghdr * msg,char * cbuf)1153 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
1154 		struct msghdr *msg, char *cbuf)
1155 {
1156 	struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
1157 	u8 ctype, level, description;
1158 	int ret = 0;
1159 
1160 	ctype = tls_get_record_type(queue->sock->sk, cmsg);
1161 	switch (ctype) {
1162 	case 0:
1163 		break;
1164 	case TLS_RECORD_TYPE_DATA:
1165 		break;
1166 	case TLS_RECORD_TYPE_ALERT:
1167 		tls_alert_recv(queue->sock->sk, msg, &level, &description);
1168 		if (level == TLS_ALERT_LEVEL_FATAL) {
1169 			pr_err("queue %d: TLS Alert desc %u\n",
1170 			       queue->idx, description);
1171 			ret = -ENOTCONN;
1172 		} else {
1173 			pr_warn("queue %d: TLS Alert desc %u\n",
1174 			       queue->idx, description);
1175 			ret = -EAGAIN;
1176 		}
1177 		break;
1178 	default:
1179 		/* discard this record type */
1180 		pr_err("queue %d: TLS record %d unhandled\n",
1181 		       queue->idx, ctype);
1182 		ret = -EAGAIN;
1183 		break;
1184 	}
1185 	return ret;
1186 }
1187 
nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue)1188 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1189 {
1190 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1191 	int len, ret;
1192 	struct kvec iov;
1193 	char cbuf[CMSG_LEN(sizeof(char))] = {};
1194 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1195 
1196 recv:
1197 	iov.iov_base = (void *)&queue->pdu + queue->offset;
1198 	iov.iov_len = queue->left;
1199 	if (queue->tls_pskid) {
1200 		msg.msg_control = cbuf;
1201 		msg.msg_controllen = sizeof(cbuf);
1202 	}
1203 	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1204 			iov.iov_len, msg.msg_flags);
1205 	if (unlikely(len < 0))
1206 		return len;
1207 	if (queue->tls_pskid) {
1208 		ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1209 		if (ret < 0)
1210 			return ret;
1211 	}
1212 
1213 	queue->offset += len;
1214 	queue->left -= len;
1215 	if (queue->left)
1216 		return -EAGAIN;
1217 
1218 	if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1219 		u8 hdgst = nvmet_tcp_hdgst_len(queue);
1220 
1221 		if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1222 			pr_err("unexpected pdu type %d\n", hdr->type);
1223 			return -EIO;
1224 		}
1225 
1226 		if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1227 			pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1228 			return -EIO;
1229 		}
1230 
1231 		queue->left = hdr->hlen - queue->offset + hdgst;
1232 		goto recv;
1233 	}
1234 
1235 	if (queue->hdr_digest &&
1236 	    nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen))
1237 		return -EPROTO;
1238 
1239 	if (queue->data_digest &&
1240 	    nvmet_tcp_check_ddgst(queue, &queue->pdu))
1241 		return -EPROTO;
1242 
1243 	return nvmet_tcp_done_recv_pdu(queue);
1244 }
1245 
nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd * cmd)1246 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1247 {
1248 	struct nvmet_tcp_queue *queue = cmd->queue;
1249 
1250 	nvmet_tcp_calc_ddgst(cmd);
1251 	queue->offset = 0;
1252 	queue->left = NVME_TCP_DIGEST_LENGTH;
1253 	queue->rcv_state = NVMET_TCP_RECV_DDGST;
1254 }
1255 
nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue)1256 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1257 {
1258 	struct nvmet_tcp_cmd  *cmd = queue->cmd;
1259 	int len, ret;
1260 
1261 	while (msg_data_left(&cmd->recv_msg)) {
1262 		len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1263 			cmd->recv_msg.msg_flags);
1264 		if (len <= 0)
1265 			return len;
1266 		if (queue->tls_pskid) {
1267 			ret = nvmet_tcp_tls_record_ok(cmd->queue,
1268 					&cmd->recv_msg, cmd->recv_cbuf);
1269 			if (ret < 0)
1270 				return ret;
1271 		}
1272 
1273 		cmd->pdu_recv += len;
1274 		cmd->rbytes_done += len;
1275 	}
1276 
1277 	if (queue->data_digest) {
1278 		nvmet_tcp_prep_recv_ddgst(cmd);
1279 		return 0;
1280 	}
1281 
1282 	if (cmd->rbytes_done == cmd->req.transfer_len)
1283 		nvmet_tcp_execute_request(cmd);
1284 
1285 	nvmet_prepare_receive_pdu(queue);
1286 	return 0;
1287 }
1288 
nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue)1289 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1290 {
1291 	struct nvmet_tcp_cmd *cmd = queue->cmd;
1292 	int ret, len;
1293 	char cbuf[CMSG_LEN(sizeof(char))] = {};
1294 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1295 	struct kvec iov = {
1296 		.iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1297 		.iov_len = queue->left
1298 	};
1299 
1300 	if (queue->tls_pskid) {
1301 		msg.msg_control = cbuf;
1302 		msg.msg_controllen = sizeof(cbuf);
1303 	}
1304 	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1305 			iov.iov_len, msg.msg_flags);
1306 	if (unlikely(len < 0))
1307 		return len;
1308 	if (queue->tls_pskid) {
1309 		ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1310 		if (ret < 0)
1311 			return ret;
1312 	}
1313 
1314 	queue->offset += len;
1315 	queue->left -= len;
1316 	if (queue->left)
1317 		return -EAGAIN;
1318 
1319 	if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1320 		pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1321 			queue->idx, cmd->req.cmd->common.command_id,
1322 			queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1323 			le32_to_cpu(cmd->exp_ddgst));
1324 		if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED)) {
1325 			cmd->req.cqe->status = NVME_SC_CMD_SEQ_ERROR;
1326 			nvmet_req_uninit(&cmd->req);
1327 		}
1328 		nvmet_tcp_free_cmd_buffers(cmd);
1329 		ret = -EPROTO;
1330 		goto out;
1331 	}
1332 
1333 	if (cmd->rbytes_done == cmd->req.transfer_len)
1334 		nvmet_tcp_execute_request(cmd);
1335 
1336 	ret = 0;
1337 out:
1338 	nvmet_prepare_receive_pdu(queue);
1339 	return ret;
1340 }
1341 
nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue)1342 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1343 {
1344 	int result = 0;
1345 
1346 	if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1347 		return 0;
1348 
1349 	if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1350 		result = nvmet_tcp_try_recv_pdu(queue);
1351 		if (result != 0)
1352 			goto done_recv;
1353 	}
1354 
1355 	if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1356 		result = nvmet_tcp_try_recv_data(queue);
1357 		if (result != 0)
1358 			goto done_recv;
1359 	}
1360 
1361 	if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1362 		result = nvmet_tcp_try_recv_ddgst(queue);
1363 		if (result != 0)
1364 			goto done_recv;
1365 	}
1366 
1367 done_recv:
1368 	if (result < 0) {
1369 		if (result == -EAGAIN)
1370 			return 0;
1371 		return result;
1372 	}
1373 	return 1;
1374 }
1375 
nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs)1376 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1377 		int budget, int *recvs)
1378 {
1379 	int i, ret = 0;
1380 
1381 	for (i = 0; i < budget; i++) {
1382 		ret = nvmet_tcp_try_recv_one(queue);
1383 		if (unlikely(ret < 0)) {
1384 			nvmet_tcp_socket_error(queue, ret);
1385 			goto done;
1386 		} else if (ret == 0) {
1387 			break;
1388 		}
1389 		(*recvs)++;
1390 	}
1391 done:
1392 	return ret;
1393 }
1394 
nvmet_tcp_release_queue(struct kref * kref)1395 static void nvmet_tcp_release_queue(struct kref *kref)
1396 {
1397 	struct nvmet_tcp_queue *queue =
1398 		container_of(kref, struct nvmet_tcp_queue, kref);
1399 
1400 	WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1401 	queue_work(nvmet_wq, &queue->release_work);
1402 }
1403 
nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue)1404 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1405 {
1406 	spin_lock_bh(&queue->state_lock);
1407 	if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1408 		/* Socket closed during handshake */
1409 		tls_handshake_cancel(queue->sock->sk);
1410 	}
1411 	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1412 		queue->state = NVMET_TCP_Q_DISCONNECTING;
1413 		kref_put(&queue->kref, nvmet_tcp_release_queue);
1414 	}
1415 	spin_unlock_bh(&queue->state_lock);
1416 }
1417 
nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue)1418 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1419 {
1420 	queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1421 }
1422 
nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops)1423 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1424 		int ops)
1425 {
1426 	if (!idle_poll_period_usecs)
1427 		return false;
1428 
1429 	if (ops)
1430 		nvmet_tcp_arm_queue_deadline(queue);
1431 
1432 	return !time_after(jiffies, queue->poll_end);
1433 }
1434 
nvmet_tcp_io_work(struct work_struct * w)1435 static void nvmet_tcp_io_work(struct work_struct *w)
1436 {
1437 	struct nvmet_tcp_queue *queue =
1438 		container_of(w, struct nvmet_tcp_queue, io_work);
1439 	bool pending;
1440 	int ret, ops = 0;
1441 
1442 	do {
1443 		pending = false;
1444 
1445 		ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1446 		if (ret > 0)
1447 			pending = true;
1448 		else if (ret < 0)
1449 			return;
1450 
1451 		ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1452 		if (ret > 0)
1453 			pending = true;
1454 		else if (ret < 0)
1455 			return;
1456 
1457 	} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1458 
1459 	/*
1460 	 * Requeue the worker if idle deadline period is in progress or any
1461 	 * ops activity was recorded during the do-while loop above.
1462 	 */
1463 	if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1464 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1465 }
1466 
nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c)1467 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1468 		struct nvmet_tcp_cmd *c)
1469 {
1470 	u8 hdgst = nvmet_tcp_hdgst_len(queue);
1471 
1472 	c->queue = queue;
1473 	c->req.port = queue->port->nport;
1474 
1475 	c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1476 			sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1477 	if (!c->cmd_pdu)
1478 		return -ENOMEM;
1479 	c->req.cmd = &c->cmd_pdu->cmd;
1480 
1481 	c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1482 			sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1483 	if (!c->rsp_pdu)
1484 		goto out_free_cmd;
1485 	c->req.cqe = &c->rsp_pdu->cqe;
1486 
1487 	c->data_pdu = page_frag_alloc(&queue->pf_cache,
1488 			sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1489 	if (!c->data_pdu)
1490 		goto out_free_rsp;
1491 
1492 	c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1493 			sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1494 	if (!c->r2t_pdu)
1495 		goto out_free_data;
1496 
1497 	if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1498 		c->recv_msg.msg_control = c->recv_cbuf;
1499 		c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
1500 	}
1501 	c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1502 
1503 	list_add_tail(&c->entry, &queue->free_list);
1504 
1505 	return 0;
1506 out_free_data:
1507 	page_frag_free(c->data_pdu);
1508 out_free_rsp:
1509 	page_frag_free(c->rsp_pdu);
1510 out_free_cmd:
1511 	page_frag_free(c->cmd_pdu);
1512 	return -ENOMEM;
1513 }
1514 
nvmet_tcp_free_cmd(struct nvmet_tcp_cmd * c)1515 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1516 {
1517 	page_frag_free(c->r2t_pdu);
1518 	page_frag_free(c->data_pdu);
1519 	page_frag_free(c->rsp_pdu);
1520 	page_frag_free(c->cmd_pdu);
1521 }
1522 
nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue)1523 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1524 {
1525 	struct nvmet_tcp_cmd *cmds;
1526 	int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1527 
1528 	cmds = kvzalloc_objs(struct nvmet_tcp_cmd, nr_cmds);
1529 	if (!cmds)
1530 		goto out;
1531 
1532 	for (i = 0; i < nr_cmds; i++) {
1533 		ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1534 		if (ret)
1535 			goto out_free;
1536 	}
1537 
1538 	queue->cmds = cmds;
1539 
1540 	return 0;
1541 out_free:
1542 	while (--i >= 0)
1543 		nvmet_tcp_free_cmd(cmds + i);
1544 	kvfree(cmds);
1545 out:
1546 	return ret;
1547 }
1548 
nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue)1549 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1550 {
1551 	struct nvmet_tcp_cmd *cmds = queue->cmds;
1552 	int i;
1553 
1554 	for (i = 0; i < queue->nr_cmds; i++)
1555 		nvmet_tcp_free_cmd(cmds + i);
1556 
1557 	nvmet_tcp_free_cmd(&queue->connect);
1558 	kvfree(cmds);
1559 }
1560 
nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue)1561 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1562 {
1563 	struct socket *sock = queue->sock;
1564 
1565 	if (!queue->state_change)
1566 		return;
1567 
1568 	write_lock_bh(&sock->sk->sk_callback_lock);
1569 	sock->sk->sk_data_ready =  queue->data_ready;
1570 	sock->sk->sk_state_change = queue->state_change;
1571 	sock->sk->sk_write_space = queue->write_space;
1572 	sock->sk->sk_user_data = NULL;
1573 	write_unlock_bh(&sock->sk->sk_callback_lock);
1574 }
1575 
nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue)1576 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1577 {
1578 	struct nvmet_tcp_cmd *cmd = queue->cmds;
1579 	int i;
1580 
1581 	for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1582 		if (nvmet_tcp_need_data_in(cmd))
1583 			nvmet_req_uninit(&cmd->req);
1584 	}
1585 
1586 	if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1587 		/* failed in connect */
1588 		nvmet_req_uninit(&queue->connect.req);
1589 	}
1590 }
1591 
nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue * queue)1592 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1593 {
1594 	struct nvmet_tcp_cmd *cmd = queue->cmds;
1595 	int i;
1596 
1597 	for (i = 0; i < queue->nr_cmds; i++, cmd++)
1598 		nvmet_tcp_free_cmd_buffers(cmd);
1599 	nvmet_tcp_free_cmd_buffers(&queue->connect);
1600 }
1601 
nvmet_tcp_release_queue_work(struct work_struct * w)1602 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1603 {
1604 	struct nvmet_tcp_queue *queue =
1605 		container_of(w, struct nvmet_tcp_queue, release_work);
1606 
1607 	mutex_lock(&nvmet_tcp_queue_mutex);
1608 	list_del_init(&queue->queue_list);
1609 	mutex_unlock(&nvmet_tcp_queue_mutex);
1610 
1611 	nvmet_tcp_restore_socket_callbacks(queue);
1612 	cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1613 	cancel_work_sync(&queue->io_work);
1614 	/* stop accepting incoming data */
1615 	queue->rcv_state = NVMET_TCP_RECV_ERR;
1616 
1617 	nvmet_sq_put_tls_key(&queue->nvme_sq);
1618 	nvmet_tcp_uninit_data_in_cmds(queue);
1619 	nvmet_sq_destroy(&queue->nvme_sq);
1620 	nvmet_cq_put(&queue->nvme_cq);
1621 	cancel_work_sync(&queue->io_work);
1622 	nvmet_tcp_free_cmd_data_in_buffers(queue);
1623 	/* ->sock will be released by fput() */
1624 	fput(queue->sock->file);
1625 	nvmet_tcp_free_cmds(queue);
1626 	ida_free(&nvmet_tcp_queue_ida, queue->idx);
1627 	page_frag_cache_drain(&queue->pf_cache);
1628 	kfree(queue);
1629 }
1630 
nvmet_tcp_data_ready(struct sock * sk)1631 static void nvmet_tcp_data_ready(struct sock *sk)
1632 {
1633 	struct nvmet_tcp_queue *queue;
1634 
1635 	trace_sk_data_ready(sk);
1636 
1637 	read_lock_bh(&sk->sk_callback_lock);
1638 	queue = sk->sk_user_data;
1639 	if (likely(queue)) {
1640 		if (queue->data_ready)
1641 			queue->data_ready(sk);
1642 		if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1643 			queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1644 				      &queue->io_work);
1645 	}
1646 	read_unlock_bh(&sk->sk_callback_lock);
1647 }
1648 
nvmet_tcp_write_space(struct sock * sk)1649 static void nvmet_tcp_write_space(struct sock *sk)
1650 {
1651 	struct nvmet_tcp_queue *queue;
1652 
1653 	read_lock_bh(&sk->sk_callback_lock);
1654 	queue = sk->sk_user_data;
1655 	if (unlikely(!queue))
1656 		goto out;
1657 
1658 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1659 		queue->write_space(sk);
1660 		goto out;
1661 	}
1662 
1663 	if (sk_stream_is_writeable(sk)) {
1664 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1665 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1666 	}
1667 out:
1668 	read_unlock_bh(&sk->sk_callback_lock);
1669 }
1670 
nvmet_tcp_state_change(struct sock * sk)1671 static void nvmet_tcp_state_change(struct sock *sk)
1672 {
1673 	struct nvmet_tcp_queue *queue;
1674 
1675 	read_lock_bh(&sk->sk_callback_lock);
1676 	queue = sk->sk_user_data;
1677 	if (!queue)
1678 		goto done;
1679 
1680 	switch (sk->sk_state) {
1681 	case TCP_FIN_WAIT2:
1682 	case TCP_LAST_ACK:
1683 		break;
1684 	case TCP_FIN_WAIT1:
1685 	case TCP_CLOSE_WAIT:
1686 	case TCP_CLOSE:
1687 		/* FALLTHRU */
1688 		nvmet_tcp_schedule_release_queue(queue);
1689 		break;
1690 	default:
1691 		pr_warn("queue %d unhandled state %d\n",
1692 			queue->idx, sk->sk_state);
1693 	}
1694 done:
1695 	read_unlock_bh(&sk->sk_callback_lock);
1696 }
1697 
nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue)1698 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1699 {
1700 	struct socket *sock = queue->sock;
1701 	struct inet_sock *inet = inet_sk(sock->sk);
1702 	int ret;
1703 
1704 	ret = kernel_getsockname(sock,
1705 		(struct sockaddr *)&queue->sockaddr);
1706 	if (ret < 0)
1707 		return ret;
1708 
1709 	ret = kernel_getpeername(sock,
1710 		(struct sockaddr *)&queue->sockaddr_peer);
1711 	if (ret < 0)
1712 		return ret;
1713 
1714 	/*
1715 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
1716 	 * close. This is done to prevent stale data from being sent should
1717 	 * the network connection be restored before TCP times out.
1718 	 */
1719 	sock_no_linger(sock->sk);
1720 
1721 	if (so_priority > 0)
1722 		sock_set_priority(sock->sk, so_priority);
1723 
1724 	/* Set socket type of service */
1725 	if (inet->rcv_tos > 0)
1726 		ip_sock_set_tos(sock->sk, inet->rcv_tos);
1727 
1728 	ret = 0;
1729 	write_lock_bh(&sock->sk->sk_callback_lock);
1730 	if (sock->sk->sk_state != TCP_ESTABLISHED) {
1731 		/*
1732 		 * If the socket is already closing, don't even start
1733 		 * consuming it
1734 		 */
1735 		ret = -ENOTCONN;
1736 	} else {
1737 		sock->sk->sk_user_data = queue;
1738 		queue->data_ready = sock->sk->sk_data_ready;
1739 		sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1740 		queue->state_change = sock->sk->sk_state_change;
1741 		sock->sk->sk_state_change = nvmet_tcp_state_change;
1742 		queue->write_space = sock->sk->sk_write_space;
1743 		sock->sk->sk_write_space = nvmet_tcp_write_space;
1744 		if (idle_poll_period_usecs)
1745 			nvmet_tcp_arm_queue_deadline(queue);
1746 		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1747 	}
1748 	write_unlock_bh(&sock->sk->sk_callback_lock);
1749 
1750 	return ret;
1751 }
1752 
1753 #ifdef CONFIG_NVME_TARGET_TCP_TLS
nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue * queue)1754 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1755 {
1756 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1757 	int len, ret;
1758 	struct kvec iov = {
1759 		.iov_base = (u8 *)&queue->pdu + queue->offset,
1760 		.iov_len = sizeof(struct nvme_tcp_hdr),
1761 	};
1762 	char cbuf[CMSG_LEN(sizeof(char))] = {};
1763 	struct msghdr msg = {
1764 		.msg_control = cbuf,
1765 		.msg_controllen = sizeof(cbuf),
1766 		.msg_flags = MSG_PEEK,
1767 	};
1768 
1769 	if (nvmet_port_secure_channel_required(queue->port->nport))
1770 		return 0;
1771 
1772 	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1773 			iov.iov_len, msg.msg_flags);
1774 	if (unlikely(len < 0)) {
1775 		pr_debug("queue %d: peek error %d\n",
1776 			 queue->idx, len);
1777 		return len;
1778 	}
1779 
1780 	ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1781 	if (ret < 0)
1782 		return ret;
1783 
1784 	if (len < sizeof(struct nvme_tcp_hdr)) {
1785 		pr_debug("queue %d: short read, %d bytes missing\n",
1786 			 queue->idx, (int)iov.iov_len - len);
1787 		return -EAGAIN;
1788 	}
1789 	pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1790 		 queue->idx, hdr->type, hdr->hlen, hdr->plen,
1791 		 (int)sizeof(struct nvme_tcp_icreq_pdu));
1792 	if (hdr->type == nvme_tcp_icreq &&
1793 	    hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
1794 	    hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
1795 		pr_debug("queue %d: icreq detected\n",
1796 			 queue->idx);
1797 		return len;
1798 	}
1799 	return 0;
1800 }
1801 
nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue * queue,key_serial_t peerid)1802 static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue,
1803 				    key_serial_t peerid)
1804 {
1805 	struct key *tls_key = nvme_tls_key_lookup(peerid);
1806 	int status = 0;
1807 
1808 	if (IS_ERR(tls_key)) {
1809 		pr_warn("%s: queue %d failed to lookup key %x\n",
1810 			__func__, queue->idx, peerid);
1811 		spin_lock_bh(&queue->state_lock);
1812 		queue->state = NVMET_TCP_Q_FAILED;
1813 		spin_unlock_bh(&queue->state_lock);
1814 		status = PTR_ERR(tls_key);
1815 	} else {
1816 		pr_debug("%s: queue %d using TLS PSK %x\n",
1817 			 __func__, queue->idx, peerid);
1818 		queue->nvme_sq.tls_key = tls_key;
1819 	}
1820 	return status;
1821 }
1822 
nvmet_tcp_tls_handshake_done(void * data,int status,key_serial_t peerid)1823 static void nvmet_tcp_tls_handshake_done(void *data, int status,
1824 					 key_serial_t peerid)
1825 {
1826 	struct nvmet_tcp_queue *queue = data;
1827 
1828 	pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1829 		 queue->idx, peerid, status);
1830 	spin_lock_bh(&queue->state_lock);
1831 	if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1832 		spin_unlock_bh(&queue->state_lock);
1833 		return;
1834 	}
1835 	if (!status) {
1836 		queue->tls_pskid = peerid;
1837 		queue->state = NVMET_TCP_Q_CONNECTING;
1838 	} else
1839 		queue->state = NVMET_TCP_Q_FAILED;
1840 	spin_unlock_bh(&queue->state_lock);
1841 
1842 	cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1843 
1844 	if (!status)
1845 		status = nvmet_tcp_tls_key_lookup(queue, peerid);
1846 
1847 	if (status)
1848 		nvmet_tcp_schedule_release_queue(queue);
1849 	else
1850 		nvmet_tcp_set_queue_sock(queue);
1851 	kref_put(&queue->kref, nvmet_tcp_release_queue);
1852 }
1853 
nvmet_tcp_tls_handshake_timeout(struct work_struct * w)1854 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
1855 {
1856 	struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1857 			struct nvmet_tcp_queue, tls_handshake_tmo_work);
1858 
1859 	pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1860 	/*
1861 	 * If tls_handshake_cancel() fails we've lost the race with
1862 	 * nvmet_tcp_tls_handshake_done() */
1863 	if (!tls_handshake_cancel(queue->sock->sk))
1864 		return;
1865 	spin_lock_bh(&queue->state_lock);
1866 	if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1867 		spin_unlock_bh(&queue->state_lock);
1868 		return;
1869 	}
1870 	queue->state = NVMET_TCP_Q_FAILED;
1871 	spin_unlock_bh(&queue->state_lock);
1872 	nvmet_tcp_schedule_release_queue(queue);
1873 	kref_put(&queue->kref, nvmet_tcp_release_queue);
1874 }
1875 
nvmet_tcp_tls_handshake(struct nvmet_tcp_queue * queue)1876 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1877 {
1878 	int ret = -EOPNOTSUPP;
1879 	struct tls_handshake_args args;
1880 
1881 	if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1882 		pr_warn("cannot start TLS in state %d\n", queue->state);
1883 		return -EINVAL;
1884 	}
1885 
1886 	kref_get(&queue->kref);
1887 	pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1888 	memset(&args, 0, sizeof(args));
1889 	args.ta_sock = queue->sock;
1890 	args.ta_done = nvmet_tcp_tls_handshake_done;
1891 	args.ta_data = queue;
1892 	args.ta_keyring = key_serial(queue->port->nport->keyring);
1893 	args.ta_timeout_ms = tls_handshake_timeout * 1000;
1894 
1895 	ret = tls_server_hello_psk(&args, GFP_KERNEL);
1896 	if (ret) {
1897 		kref_put(&queue->kref, nvmet_tcp_release_queue);
1898 		pr_err("failed to start TLS, err=%d\n", ret);
1899 	} else {
1900 		queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1901 				   tls_handshake_timeout * HZ);
1902 	}
1903 	return ret;
1904 }
1905 #else
nvmet_tcp_tls_handshake_timeout(struct work_struct * w)1906 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
1907 #endif
1908 
nvmet_tcp_alloc_queue(struct nvmet_tcp_port * port,struct socket * newsock)1909 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1910 		struct socket *newsock)
1911 {
1912 	struct nvmet_tcp_queue *queue;
1913 	struct file *sock_file = NULL;
1914 	int ret;
1915 
1916 	queue = kzalloc_obj(*queue);
1917 	if (!queue) {
1918 		ret = -ENOMEM;
1919 		goto out_release;
1920 	}
1921 
1922 	INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1923 	INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1924 	kref_init(&queue->kref);
1925 	queue->sock = newsock;
1926 	queue->port = port;
1927 	queue->nr_cmds = 0;
1928 	spin_lock_init(&queue->state_lock);
1929 	if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1930 	    NVMF_TCP_SECTYPE_TLS13)
1931 		queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1932 	else
1933 		queue->state = NVMET_TCP_Q_CONNECTING;
1934 	INIT_LIST_HEAD(&queue->free_list);
1935 	init_llist_head(&queue->resp_list);
1936 	INIT_LIST_HEAD(&queue->resp_send_list);
1937 
1938 	sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1939 	if (IS_ERR(sock_file)) {
1940 		ret = PTR_ERR(sock_file);
1941 		goto out_free_queue;
1942 	}
1943 
1944 	queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1945 	if (queue->idx < 0) {
1946 		ret = queue->idx;
1947 		goto out_sock;
1948 	}
1949 
1950 	ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1951 	if (ret)
1952 		goto out_ida_remove;
1953 
1954 	nvmet_cq_init(&queue->nvme_cq);
1955 	ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
1956 	if (ret)
1957 		goto out_free_connect;
1958 
1959 	nvmet_prepare_receive_pdu(queue);
1960 
1961 	mutex_lock(&nvmet_tcp_queue_mutex);
1962 	list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1963 	mutex_unlock(&nvmet_tcp_queue_mutex);
1964 
1965 	INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1966 			  nvmet_tcp_tls_handshake_timeout);
1967 #ifdef CONFIG_NVME_TARGET_TCP_TLS
1968 	if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1969 		struct sock *sk = queue->sock->sk;
1970 
1971 		/* Restore the default callbacks before starting upcall */
1972 		write_lock_bh(&sk->sk_callback_lock);
1973 		sk->sk_user_data = NULL;
1974 		sk->sk_data_ready = port->data_ready;
1975 		write_unlock_bh(&sk->sk_callback_lock);
1976 		if (!nvmet_tcp_try_peek_pdu(queue)) {
1977 			if (!nvmet_tcp_tls_handshake(queue))
1978 				return;
1979 			/* TLS handshake failed, terminate the connection */
1980 			goto out_destroy_sq;
1981 		}
1982 		/* Not a TLS connection, continue with normal processing */
1983 		queue->state = NVMET_TCP_Q_CONNECTING;
1984 	}
1985 #endif
1986 
1987 	ret = nvmet_tcp_set_queue_sock(queue);
1988 	if (ret)
1989 		goto out_destroy_sq;
1990 
1991 	return;
1992 out_destroy_sq:
1993 	mutex_lock(&nvmet_tcp_queue_mutex);
1994 	list_del_init(&queue->queue_list);
1995 	mutex_unlock(&nvmet_tcp_queue_mutex);
1996 	nvmet_sq_destroy(&queue->nvme_sq);
1997 out_free_connect:
1998 	nvmet_cq_put(&queue->nvme_cq);
1999 	nvmet_tcp_free_cmd(&queue->connect);
2000 out_ida_remove:
2001 	ida_free(&nvmet_tcp_queue_ida, queue->idx);
2002 out_sock:
2003 	fput(queue->sock->file);
2004 out_free_queue:
2005 	kfree(queue);
2006 out_release:
2007 	pr_err("failed to allocate queue, error %d\n", ret);
2008 	if (!sock_file)
2009 		sock_release(newsock);
2010 }
2011 
nvmet_tcp_accept_work(struct work_struct * w)2012 static void nvmet_tcp_accept_work(struct work_struct *w)
2013 {
2014 	struct nvmet_tcp_port *port =
2015 		container_of(w, struct nvmet_tcp_port, accept_work);
2016 	struct socket *newsock;
2017 	int ret;
2018 
2019 	while (true) {
2020 		ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
2021 		if (ret < 0) {
2022 			if (ret != -EAGAIN)
2023 				pr_warn("failed to accept err=%d\n", ret);
2024 			return;
2025 		}
2026 		nvmet_tcp_alloc_queue(port, newsock);
2027 	}
2028 }
2029 
nvmet_tcp_listen_data_ready(struct sock * sk)2030 static void nvmet_tcp_listen_data_ready(struct sock *sk)
2031 {
2032 	struct nvmet_tcp_port *port;
2033 
2034 	trace_sk_data_ready(sk);
2035 
2036 	if (sk->sk_state != TCP_LISTEN)
2037 		return;
2038 
2039 	read_lock_bh(&sk->sk_callback_lock);
2040 	port = sk->sk_user_data;
2041 	if (port)
2042 		queue_work(nvmet_wq, &port->accept_work);
2043 	read_unlock_bh(&sk->sk_callback_lock);
2044 }
2045 
nvmet_tcp_add_port(struct nvmet_port * nport)2046 static int nvmet_tcp_add_port(struct nvmet_port *nport)
2047 {
2048 	struct nvmet_tcp_port *port;
2049 	__kernel_sa_family_t af;
2050 	int ret;
2051 
2052 	port = kzalloc_obj(*port);
2053 	if (!port)
2054 		return -ENOMEM;
2055 
2056 	switch (nport->disc_addr.adrfam) {
2057 	case NVMF_ADDR_FAMILY_IP4:
2058 		af = AF_INET;
2059 		break;
2060 	case NVMF_ADDR_FAMILY_IP6:
2061 		af = AF_INET6;
2062 		break;
2063 	default:
2064 		pr_err("address family %d not supported\n",
2065 				nport->disc_addr.adrfam);
2066 		ret = -EINVAL;
2067 		goto err_port;
2068 	}
2069 
2070 	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
2071 			nport->disc_addr.trsvcid, &port->addr);
2072 	if (ret) {
2073 		pr_err("malformed ip/port passed: %s:%s\n",
2074 			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
2075 		goto err_port;
2076 	}
2077 
2078 	port->nport = nport;
2079 	INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
2080 	if (port->nport->inline_data_size < 0)
2081 		port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
2082 
2083 	ret = sock_create(port->addr.ss_family, SOCK_STREAM,
2084 				IPPROTO_TCP, &port->sock);
2085 	if (ret) {
2086 		pr_err("failed to create a socket\n");
2087 		goto err_port;
2088 	}
2089 
2090 	port->sock->sk->sk_user_data = port;
2091 	port->data_ready = port->sock->sk->sk_data_ready;
2092 	port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
2093 	sock_set_reuseaddr(port->sock->sk);
2094 	tcp_sock_set_nodelay(port->sock->sk);
2095 	if (so_priority > 0)
2096 		sock_set_priority(port->sock->sk, so_priority);
2097 
2098 	ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr,
2099 			sizeof(port->addr));
2100 	if (ret) {
2101 		pr_err("failed to bind port socket %d\n", ret);
2102 		goto err_sock;
2103 	}
2104 
2105 	ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
2106 	if (ret) {
2107 		pr_err("failed to listen %d on port sock\n", ret);
2108 		goto err_sock;
2109 	}
2110 
2111 	nport->priv = port;
2112 	pr_info("enabling port %d (%pISpc)\n",
2113 		le16_to_cpu(nport->disc_addr.portid), &port->addr);
2114 
2115 	return 0;
2116 
2117 err_sock:
2118 	sock_release(port->sock);
2119 err_port:
2120 	kfree(port);
2121 	return ret;
2122 }
2123 
nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port * port)2124 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
2125 {
2126 	struct nvmet_tcp_queue *queue;
2127 
2128 	mutex_lock(&nvmet_tcp_queue_mutex);
2129 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2130 		if (queue->port == port)
2131 			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2132 	mutex_unlock(&nvmet_tcp_queue_mutex);
2133 }
2134 
nvmet_tcp_remove_port(struct nvmet_port * nport)2135 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
2136 {
2137 	struct nvmet_tcp_port *port = nport->priv;
2138 
2139 	write_lock_bh(&port->sock->sk->sk_callback_lock);
2140 	port->sock->sk->sk_data_ready = port->data_ready;
2141 	port->sock->sk->sk_user_data = NULL;
2142 	write_unlock_bh(&port->sock->sk->sk_callback_lock);
2143 	cancel_work_sync(&port->accept_work);
2144 	/*
2145 	 * Destroy the remaining queues, which are not belong to any
2146 	 * controller yet.
2147 	 */
2148 	nvmet_tcp_destroy_port_queues(port);
2149 
2150 	sock_release(port->sock);
2151 	kfree(port);
2152 }
2153 
nvmet_tcp_delete_ctrl(struct nvmet_ctrl * ctrl)2154 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
2155 {
2156 	struct nvmet_tcp_queue *queue;
2157 
2158 	mutex_lock(&nvmet_tcp_queue_mutex);
2159 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2160 		if (queue->nvme_sq.ctrl == ctrl)
2161 			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2162 	mutex_unlock(&nvmet_tcp_queue_mutex);
2163 }
2164 
nvmet_tcp_install_queue(struct nvmet_sq * sq)2165 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
2166 {
2167 	struct nvmet_tcp_queue *queue =
2168 		container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2169 
2170 	if (sq->qid == 0) {
2171 		struct nvmet_tcp_queue *q;
2172 		int pending = 0;
2173 
2174 		/* Check for pending controller teardown */
2175 		mutex_lock(&nvmet_tcp_queue_mutex);
2176 		list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
2177 			if (q->nvme_sq.ctrl == sq->ctrl &&
2178 			    q->state == NVMET_TCP_Q_DISCONNECTING)
2179 				pending++;
2180 		}
2181 		mutex_unlock(&nvmet_tcp_queue_mutex);
2182 		if (pending > NVMET_TCP_BACKLOG)
2183 			return NVME_SC_CONNECT_CTRL_BUSY;
2184 	}
2185 
2186 	queue->nr_cmds = sq->size * 2;
2187 	if (nvmet_tcp_alloc_cmds(queue)) {
2188 		queue->nr_cmds = 0;
2189 		return NVME_SC_INTERNAL;
2190 	}
2191 	return 0;
2192 }
2193 
nvmet_tcp_disc_port_addr(struct nvmet_req * req,struct nvmet_port * nport,char * traddr)2194 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
2195 		struct nvmet_port *nport, char *traddr)
2196 {
2197 	struct nvmet_tcp_port *port = nport->priv;
2198 
2199 	if (inet_addr_is_any(&port->addr)) {
2200 		struct nvmet_tcp_cmd *cmd =
2201 			container_of(req, struct nvmet_tcp_cmd, req);
2202 		struct nvmet_tcp_queue *queue = cmd->queue;
2203 
2204 		sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
2205 	} else {
2206 		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
2207 	}
2208 }
2209 
nvmet_tcp_host_port_addr(struct nvmet_ctrl * ctrl,char * traddr,size_t traddr_len)2210 static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
2211 			char *traddr, size_t traddr_len)
2212 {
2213 	struct nvmet_sq *sq = ctrl->sqs[0];
2214 	struct nvmet_tcp_queue *queue =
2215 		container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2216 
2217 	if (queue->sockaddr_peer.ss_family == AF_UNSPEC)
2218 		return -EINVAL;
2219 	return snprintf(traddr, traddr_len, "%pISc",
2220 			(struct sockaddr *)&queue->sockaddr_peer);
2221 }
2222 
2223 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
2224 	.owner			= THIS_MODULE,
2225 	.type			= NVMF_TRTYPE_TCP,
2226 	.msdbd			= 1,
2227 	.add_port		= nvmet_tcp_add_port,
2228 	.remove_port		= nvmet_tcp_remove_port,
2229 	.queue_response		= nvmet_tcp_queue_response,
2230 	.delete_ctrl		= nvmet_tcp_delete_ctrl,
2231 	.install_queue		= nvmet_tcp_install_queue,
2232 	.disc_traddr		= nvmet_tcp_disc_port_addr,
2233 	.host_traddr		= nvmet_tcp_host_port_addr,
2234 };
2235 
nvmet_tcp_init(void)2236 static int __init nvmet_tcp_init(void)
2237 {
2238 	int ret;
2239 
2240 	nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
2241 				WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
2242 	if (!nvmet_tcp_wq)
2243 		return -ENOMEM;
2244 
2245 	ret = nvmet_register_transport(&nvmet_tcp_ops);
2246 	if (ret)
2247 		goto err;
2248 
2249 	return 0;
2250 err:
2251 	destroy_workqueue(nvmet_tcp_wq);
2252 	return ret;
2253 }
2254 
nvmet_tcp_exit(void)2255 static void __exit nvmet_tcp_exit(void)
2256 {
2257 	struct nvmet_tcp_queue *queue;
2258 
2259 	nvmet_unregister_transport(&nvmet_tcp_ops);
2260 
2261 	flush_workqueue(nvmet_wq);
2262 	mutex_lock(&nvmet_tcp_queue_mutex);
2263 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2264 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2265 	mutex_unlock(&nvmet_tcp_queue_mutex);
2266 	flush_workqueue(nvmet_wq);
2267 
2268 	destroy_workqueue(nvmet_tcp_wq);
2269 	ida_destroy(&nvmet_tcp_queue_ida);
2270 }
2271 
2272 module_init(nvmet_tcp_init);
2273 module_exit(nvmet_tcp_exit);
2274 
2275 MODULE_DESCRIPTION("NVMe target TCP transport driver");
2276 MODULE_LICENSE("GPL v2");
2277 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
2278