xref: /linux/drivers/nvme/host/tcp.c (revision a028739a4330881a6a3b5aa4a39381bbcacf2f2f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP host.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/crc32.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
14 #include <net/sock.h>
15 #include <net/tcp.h>
16 #include <net/tls.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <net/busy_poll.h>
21 #include <trace/events/sock.h>
22 
23 #include "nvme.h"
24 #include "fabrics.h"
25 
26 struct nvme_tcp_queue;
27 
28 /*
29  * Define the socket priority to use for connections where it is desirable
30  * that the NIC consider performing optimized packet processing or filtering.
31  * A non-zero value being sufficient to indicate general consideration of any
32  * possible optimization.  Making it a module param allows for alternative
33  * values that may be unique for some NIC implementations.
34  */
35 static int so_priority;
36 module_param(so_priority, int, 0644);
37 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
38 
39 /*
40  * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
41  * from sysfs.
42  */
43 static bool wq_unbound;
44 module_param(wq_unbound, bool, 0644);
45 MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
46 
47 /*
48  * TLS handshake timeout
49  */
50 static int tls_handshake_timeout = 10;
51 #ifdef CONFIG_NVME_TCP_TLS
52 module_param(tls_handshake_timeout, int, 0644);
53 MODULE_PARM_DESC(tls_handshake_timeout,
54 		 "nvme TLS handshake timeout in seconds (default 10)");
55 #endif
56 
57 static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
58 
59 #ifdef CONFIG_DEBUG_LOCK_ALLOC
60 /* lockdep can detect a circular dependency of the form
61  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
62  * because dependencies are tracked for both nvme-tcp and user contexts. Using
63  * a separate class prevents lockdep from conflating nvme-tcp socket use with
64  * user-space socket API use.
65  */
66 static struct lock_class_key nvme_tcp_sk_key[2];
67 static struct lock_class_key nvme_tcp_slock_key[2];
68 
nvme_tcp_reclassify_socket(struct socket * sock)69 static void nvme_tcp_reclassify_socket(struct socket *sock)
70 {
71 	struct sock *sk = sock->sk;
72 
73 	if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
74 		return;
75 
76 	switch (sk->sk_family) {
77 	case AF_INET:
78 		sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
79 					      &nvme_tcp_slock_key[0],
80 					      "sk_lock-AF_INET-NVME",
81 					      &nvme_tcp_sk_key[0]);
82 		break;
83 	case AF_INET6:
84 		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
85 					      &nvme_tcp_slock_key[1],
86 					      "sk_lock-AF_INET6-NVME",
87 					      &nvme_tcp_sk_key[1]);
88 		break;
89 	default:
90 		WARN_ON_ONCE(1);
91 	}
92 }
93 #else
nvme_tcp_reclassify_socket(struct socket * sock)94 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
95 #endif
96 
97 enum nvme_tcp_send_state {
98 	NVME_TCP_SEND_CMD_PDU = 0,
99 	NVME_TCP_SEND_H2C_PDU,
100 	NVME_TCP_SEND_DATA,
101 	NVME_TCP_SEND_DDGST,
102 };
103 
104 struct nvme_tcp_request {
105 	struct nvme_request	req;
106 	void			*pdu;
107 	struct nvme_tcp_queue	*queue;
108 	u32			data_len;
109 	u32			pdu_len;
110 	u32			pdu_sent;
111 	u32			h2cdata_left;
112 	u32			h2cdata_offset;
113 	u16			ttag;
114 	__le16			status;
115 	struct list_head	entry;
116 	struct llist_node	lentry;
117 	__le32			ddgst;
118 
119 	struct bio		*curr_bio;
120 	struct iov_iter		iter;
121 
122 	/* send state */
123 	size_t			offset;
124 	size_t			data_sent;
125 	enum nvme_tcp_send_state state;
126 };
127 
128 enum nvme_tcp_queue_flags {
129 	NVME_TCP_Q_ALLOCATED	= 0,
130 	NVME_TCP_Q_LIVE		= 1,
131 	NVME_TCP_Q_POLLING	= 2,
132 	NVME_TCP_Q_IO_CPU_SET	= 3,
133 };
134 
135 enum nvme_tcp_recv_state {
136 	NVME_TCP_RECV_PDU = 0,
137 	NVME_TCP_RECV_DATA,
138 	NVME_TCP_RECV_DDGST,
139 };
140 
141 struct nvme_tcp_ctrl;
142 struct nvme_tcp_queue {
143 	struct socket		*sock;
144 	struct work_struct	io_work;
145 	int			io_cpu;
146 
147 	struct mutex		queue_lock;
148 	struct mutex		send_mutex;
149 	struct llist_head	req_list;
150 	struct list_head	send_list;
151 
152 	/* recv state */
153 	void			*pdu;
154 	int			pdu_remaining;
155 	int			pdu_offset;
156 	size_t			data_remaining;
157 	size_t			ddgst_remaining;
158 	unsigned int		nr_cqe;
159 
160 	/* send state */
161 	struct nvme_tcp_request *request;
162 
163 	u32			maxh2cdata;
164 	size_t			cmnd_capsule_len;
165 	struct nvme_tcp_ctrl	*ctrl;
166 	unsigned long		flags;
167 	bool			rd_enabled;
168 
169 	bool			hdr_digest;
170 	bool			data_digest;
171 	bool			tls_enabled;
172 	u32			rcv_crc;
173 	u32			snd_crc;
174 	__le32			exp_ddgst;
175 	__le32			recv_ddgst;
176 	struct completion       tls_complete;
177 	int                     tls_err;
178 	struct page_frag_cache	pf_cache;
179 
180 	void (*state_change)(struct sock *);
181 	void (*data_ready)(struct sock *);
182 	void (*write_space)(struct sock *);
183 };
184 
185 struct nvme_tcp_ctrl {
186 	/* read only in the hot path */
187 	struct nvme_tcp_queue	*queues;
188 	struct blk_mq_tag_set	tag_set;
189 
190 	/* other member variables */
191 	struct list_head	list;
192 	struct blk_mq_tag_set	admin_tag_set;
193 	struct sockaddr_storage addr;
194 	struct sockaddr_storage src_addr;
195 	struct nvme_ctrl	ctrl;
196 
197 	struct work_struct	err_work;
198 	struct delayed_work	connect_work;
199 	struct nvme_tcp_request async_req;
200 	u32			io_queues[HCTX_MAX_TYPES];
201 };
202 
203 static LIST_HEAD(nvme_tcp_ctrl_list);
204 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
205 static struct workqueue_struct *nvme_tcp_wq;
206 static const struct blk_mq_ops nvme_tcp_mq_ops;
207 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
208 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
209 
to_tcp_ctrl(struct nvme_ctrl * ctrl)210 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
211 {
212 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
213 }
214 
nvme_tcp_queue_id(struct nvme_tcp_queue * queue)215 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
216 {
217 	return queue - queue->ctrl->queues;
218 }
219 
nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)220 static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
221 {
222 	switch (type) {
223 	case nvme_tcp_c2h_term:
224 	case nvme_tcp_c2h_data:
225 	case nvme_tcp_r2t:
226 	case nvme_tcp_rsp:
227 		return true;
228 	default:
229 		return false;
230 	}
231 }
232 
233 /*
234  * Check if the queue is TLS encrypted
235  */
nvme_tcp_queue_tls(struct nvme_tcp_queue * queue)236 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
237 {
238 	if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
239 		return 0;
240 
241 	return queue->tls_enabled;
242 }
243 
244 /*
245  * Check if TLS is configured for the controller.
246  */
nvme_tcp_tls_configured(struct nvme_ctrl * ctrl)247 static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
248 {
249 	if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
250 		return 0;
251 
252 	return ctrl->opts->tls || ctrl->opts->concat;
253 }
254 
nvme_tcp_tagset(struct nvme_tcp_queue * queue)255 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
256 {
257 	u32 queue_idx = nvme_tcp_queue_id(queue);
258 
259 	if (queue_idx == 0)
260 		return queue->ctrl->admin_tag_set.tags[queue_idx];
261 	return queue->ctrl->tag_set.tags[queue_idx - 1];
262 }
263 
nvme_tcp_hdgst_len(struct nvme_tcp_queue * queue)264 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
265 {
266 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
267 }
268 
nvme_tcp_ddgst_len(struct nvme_tcp_queue * queue)269 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
270 {
271 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
272 }
273 
nvme_tcp_req_cmd_pdu(struct nvme_tcp_request * req)274 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
275 {
276 	return req->pdu;
277 }
278 
nvme_tcp_req_data_pdu(struct nvme_tcp_request * req)279 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
280 {
281 	/* use the pdu space in the back for the data pdu */
282 	return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
283 		sizeof(struct nvme_tcp_data_pdu);
284 }
285 
nvme_tcp_inline_data_size(struct nvme_tcp_request * req)286 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
287 {
288 	if (nvme_is_fabrics(req->req.cmd))
289 		return NVME_TCP_ADMIN_CCSZ;
290 	return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
291 }
292 
nvme_tcp_async_req(struct nvme_tcp_request * req)293 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
294 {
295 	return req == &req->queue->ctrl->async_req;
296 }
297 
nvme_tcp_has_inline_data(struct nvme_tcp_request * req)298 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
299 {
300 	struct request *rq;
301 
302 	if (unlikely(nvme_tcp_async_req(req)))
303 		return false; /* async events don't have a request */
304 
305 	rq = blk_mq_rq_from_pdu(req);
306 
307 	return rq_data_dir(rq) == WRITE && req->data_len &&
308 		req->data_len <= nvme_tcp_inline_data_size(req);
309 }
310 
nvme_tcp_req_cur_page(struct nvme_tcp_request * req)311 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
312 {
313 	return req->iter.bvec->bv_page;
314 }
315 
nvme_tcp_req_cur_offset(struct nvme_tcp_request * req)316 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
317 {
318 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
319 }
320 
nvme_tcp_req_cur_length(struct nvme_tcp_request * req)321 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
322 {
323 	return min_t(size_t, iov_iter_single_seg_count(&req->iter),
324 			req->pdu_len - req->pdu_sent);
325 }
326 
nvme_tcp_pdu_data_left(struct nvme_tcp_request * req)327 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
328 {
329 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
330 			req->pdu_len - req->pdu_sent : 0;
331 }
332 
nvme_tcp_pdu_last_send(struct nvme_tcp_request * req,int len)333 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
334 		int len)
335 {
336 	return nvme_tcp_pdu_data_left(req) <= len;
337 }
338 
nvme_tcp_init_iter(struct nvme_tcp_request * req,unsigned int dir)339 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
340 		unsigned int dir)
341 {
342 	struct request *rq = blk_mq_rq_from_pdu(req);
343 	struct bio_vec *vec;
344 	unsigned int size;
345 	int nr_bvec;
346 	size_t offset;
347 
348 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
349 		vec = &rq->special_vec;
350 		nr_bvec = 1;
351 		size = blk_rq_payload_bytes(rq);
352 		offset = 0;
353 	} else {
354 		struct bio *bio = req->curr_bio;
355 		struct bvec_iter bi;
356 		struct bio_vec bv;
357 
358 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
359 		nr_bvec = 0;
360 		bio_for_each_bvec(bv, bio, bi) {
361 			nr_bvec++;
362 		}
363 		size = bio->bi_iter.bi_size;
364 		offset = bio->bi_iter.bi_bvec_done;
365 	}
366 
367 	iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
368 	req->iter.iov_offset = offset;
369 }
370 
nvme_tcp_advance_req(struct nvme_tcp_request * req,int len)371 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
372 		int len)
373 {
374 	req->data_sent += len;
375 	req->pdu_sent += len;
376 	iov_iter_advance(&req->iter, len);
377 	if (!iov_iter_count(&req->iter) &&
378 	    req->data_sent < req->data_len) {
379 		req->curr_bio = req->curr_bio->bi_next;
380 		nvme_tcp_init_iter(req, ITER_SOURCE);
381 	}
382 }
383 
nvme_tcp_send_all(struct nvme_tcp_queue * queue)384 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
385 {
386 	int ret;
387 
388 	/* drain the send queue as much as we can... */
389 	do {
390 		ret = nvme_tcp_try_send(queue);
391 	} while (ret > 0);
392 }
393 
nvme_tcp_queue_has_pending(struct nvme_tcp_queue * queue)394 static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
395 {
396 	return !list_empty(&queue->send_list) ||
397 		!llist_empty(&queue->req_list);
398 }
399 
nvme_tcp_queue_more(struct nvme_tcp_queue * queue)400 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
401 {
402 	return !nvme_tcp_queue_tls(queue) &&
403 		nvme_tcp_queue_has_pending(queue);
404 }
405 
nvme_tcp_queue_request(struct nvme_tcp_request * req,bool last)406 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
407 		bool last)
408 {
409 	struct nvme_tcp_queue *queue = req->queue;
410 	bool empty;
411 
412 	empty = llist_add(&req->lentry, &queue->req_list) &&
413 		list_empty(&queue->send_list) && !queue->request;
414 
415 	/*
416 	 * if we're the first on the send_list and we can try to send
417 	 * directly, otherwise queue io_work. Also, only do that if we
418 	 * are on the same cpu, so we don't introduce contention.
419 	 */
420 	if (queue->io_cpu == raw_smp_processor_id() &&
421 	    empty && mutex_trylock(&queue->send_mutex)) {
422 		nvme_tcp_send_all(queue);
423 		mutex_unlock(&queue->send_mutex);
424 	}
425 
426 	if (last && nvme_tcp_queue_has_pending(queue))
427 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
428 }
429 
nvme_tcp_process_req_list(struct nvme_tcp_queue * queue)430 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
431 {
432 	struct nvme_tcp_request *req;
433 	struct llist_node *node;
434 
435 	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
436 		req = llist_entry(node, struct nvme_tcp_request, lentry);
437 		list_add(&req->entry, &queue->send_list);
438 	}
439 }
440 
441 static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue * queue)442 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
443 {
444 	struct nvme_tcp_request *req;
445 
446 	req = list_first_entry_or_null(&queue->send_list,
447 			struct nvme_tcp_request, entry);
448 	if (!req) {
449 		nvme_tcp_process_req_list(queue);
450 		req = list_first_entry_or_null(&queue->send_list,
451 				struct nvme_tcp_request, entry);
452 		if (unlikely(!req))
453 			return NULL;
454 	}
455 
456 	list_del_init(&req->entry);
457 	init_llist_node(&req->lentry);
458 	return req;
459 }
460 
461 #define NVME_TCP_CRC_SEED (~0)
462 
nvme_tcp_ddgst_update(u32 * crcp,struct page * page,size_t off,size_t len)463 static inline void nvme_tcp_ddgst_update(u32 *crcp,
464 		struct page *page, size_t off, size_t len)
465 {
466 	page += off / PAGE_SIZE;
467 	off %= PAGE_SIZE;
468 	while (len) {
469 		const void *vaddr = kmap_local_page(page);
470 		size_t n = min(len, (size_t)PAGE_SIZE - off);
471 
472 		*crcp = crc32c(*crcp, vaddr + off, n);
473 		kunmap_local(vaddr);
474 		page++;
475 		off = 0;
476 		len -= n;
477 	}
478 }
479 
nvme_tcp_ddgst_final(u32 crc)480 static inline __le32 nvme_tcp_ddgst_final(u32 crc)
481 {
482 	return cpu_to_le32(~crc);
483 }
484 
nvme_tcp_hdgst(const void * pdu,size_t len)485 static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len)
486 {
487 	return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len));
488 }
489 
nvme_tcp_set_hdgst(void * pdu,size_t len)490 static inline void nvme_tcp_set_hdgst(void *pdu, size_t len)
491 {
492 	*(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len);
493 }
494 
nvme_tcp_verify_hdgst(struct nvme_tcp_queue * queue,void * pdu,size_t pdu_len)495 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
496 		void *pdu, size_t pdu_len)
497 {
498 	struct nvme_tcp_hdr *hdr = pdu;
499 	__le32 recv_digest;
500 	__le32 exp_digest;
501 
502 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
503 		dev_err(queue->ctrl->ctrl.device,
504 			"queue %d: header digest flag is cleared\n",
505 			nvme_tcp_queue_id(queue));
506 		return -EPROTO;
507 	}
508 
509 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
510 	exp_digest = nvme_tcp_hdgst(pdu, pdu_len);
511 	if (recv_digest != exp_digest) {
512 		dev_err(queue->ctrl->ctrl.device,
513 			"header digest error: recv %#x expected %#x\n",
514 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
515 		return -EIO;
516 	}
517 
518 	return 0;
519 }
520 
nvme_tcp_check_ddgst(struct nvme_tcp_queue * queue,void * pdu)521 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
522 {
523 	struct nvme_tcp_hdr *hdr = pdu;
524 	u8 digest_len = nvme_tcp_hdgst_len(queue);
525 	u32 len;
526 
527 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
528 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
529 
530 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
531 		dev_err(queue->ctrl->ctrl.device,
532 			"queue %d: data digest flag is cleared\n",
533 		nvme_tcp_queue_id(queue));
534 		return -EPROTO;
535 	}
536 	queue->rcv_crc = NVME_TCP_CRC_SEED;
537 
538 	return 0;
539 }
540 
nvme_tcp_exit_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx)541 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
542 		struct request *rq, unsigned int hctx_idx)
543 {
544 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
545 
546 	page_frag_free(req->pdu);
547 }
548 
nvme_tcp_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)549 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
550 		struct request *rq, unsigned int hctx_idx,
551 		unsigned int numa_node)
552 {
553 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
554 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
555 	struct nvme_tcp_cmd_pdu *pdu;
556 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
557 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
558 	u8 hdgst = nvme_tcp_hdgst_len(queue);
559 
560 	req->pdu = page_frag_alloc(&queue->pf_cache,
561 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
562 		GFP_KERNEL | __GFP_ZERO);
563 	if (!req->pdu)
564 		return -ENOMEM;
565 
566 	pdu = req->pdu;
567 	req->queue = queue;
568 	nvme_req(rq)->ctrl = &ctrl->ctrl;
569 	nvme_req(rq)->cmd = &pdu->cmd;
570 	init_llist_node(&req->lentry);
571 	INIT_LIST_HEAD(&req->entry);
572 
573 	return 0;
574 }
575 
nvme_tcp_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)576 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
577 		unsigned int hctx_idx)
578 {
579 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
580 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
581 
582 	hctx->driver_data = queue;
583 	return 0;
584 }
585 
nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)586 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
587 		unsigned int hctx_idx)
588 {
589 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
590 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
591 
592 	hctx->driver_data = queue;
593 	return 0;
594 }
595 
596 static enum nvme_tcp_recv_state
nvme_tcp_recv_state(struct nvme_tcp_queue * queue)597 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
598 {
599 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
600 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
601 		NVME_TCP_RECV_DATA;
602 }
603 
nvme_tcp_init_recv_ctx(struct nvme_tcp_queue * queue)604 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
605 {
606 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
607 				nvme_tcp_hdgst_len(queue);
608 	queue->pdu_offset = 0;
609 	queue->data_remaining = -1;
610 	queue->ddgst_remaining = 0;
611 }
612 
nvme_tcp_error_recovery(struct nvme_ctrl * ctrl)613 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
614 {
615 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
616 		return;
617 
618 	dev_warn(ctrl->device, "starting error recovery\n");
619 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
620 }
621 
nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue * queue,struct nvme_completion * cqe)622 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
623 		struct nvme_completion *cqe)
624 {
625 	struct nvme_tcp_request *req;
626 	struct request *rq;
627 
628 	rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
629 	if (!rq) {
630 		dev_err(queue->ctrl->ctrl.device,
631 			"got bad cqe.command_id %#x on queue %d\n",
632 			cqe->command_id, nvme_tcp_queue_id(queue));
633 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
634 		return -EINVAL;
635 	}
636 
637 	req = blk_mq_rq_to_pdu(rq);
638 	if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
639 		req->status = cqe->status;
640 
641 	if (!nvme_try_complete_req(rq, req->status, cqe->result))
642 		nvme_complete_rq(rq);
643 	queue->nr_cqe++;
644 
645 	return 0;
646 }
647 
nvme_tcp_handle_c2h_data(struct nvme_tcp_queue * queue,struct nvme_tcp_data_pdu * pdu)648 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
649 		struct nvme_tcp_data_pdu *pdu)
650 {
651 	struct request *rq;
652 
653 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
654 	if (!rq) {
655 		dev_err(queue->ctrl->ctrl.device,
656 			"got bad c2hdata.command_id %#x on queue %d\n",
657 			pdu->command_id, nvme_tcp_queue_id(queue));
658 		return -ENOENT;
659 	}
660 
661 	if (!blk_rq_payload_bytes(rq)) {
662 		dev_err(queue->ctrl->ctrl.device,
663 			"queue %d tag %#x unexpected data\n",
664 			nvme_tcp_queue_id(queue), rq->tag);
665 		return -EIO;
666 	}
667 
668 	queue->data_remaining = le32_to_cpu(pdu->data_length);
669 
670 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
671 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
672 		dev_err(queue->ctrl->ctrl.device,
673 			"queue %d tag %#x SUCCESS set but not last PDU\n",
674 			nvme_tcp_queue_id(queue), rq->tag);
675 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
676 		return -EPROTO;
677 	}
678 
679 	return 0;
680 }
681 
nvme_tcp_handle_comp(struct nvme_tcp_queue * queue,struct nvme_tcp_rsp_pdu * pdu)682 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
683 		struct nvme_tcp_rsp_pdu *pdu)
684 {
685 	struct nvme_completion *cqe = &pdu->cqe;
686 	int ret = 0;
687 
688 	/*
689 	 * AEN requests are special as they don't time out and can
690 	 * survive any kind of queue freeze and often don't respond to
691 	 * aborts.  We don't even bother to allocate a struct request
692 	 * for them but rather special case them here.
693 	 */
694 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
695 				     cqe->command_id)))
696 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
697 				&cqe->result);
698 	else
699 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
700 
701 	return ret;
702 }
703 
nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request * req)704 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
705 {
706 	struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
707 	struct nvme_tcp_queue *queue = req->queue;
708 	struct request *rq = blk_mq_rq_from_pdu(req);
709 	u32 h2cdata_sent = req->pdu_len;
710 	u8 hdgst = nvme_tcp_hdgst_len(queue);
711 	u8 ddgst = nvme_tcp_ddgst_len(queue);
712 
713 	req->state = NVME_TCP_SEND_H2C_PDU;
714 	req->offset = 0;
715 	req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
716 	req->pdu_sent = 0;
717 	req->h2cdata_left -= req->pdu_len;
718 	req->h2cdata_offset += h2cdata_sent;
719 
720 	memset(data, 0, sizeof(*data));
721 	data->hdr.type = nvme_tcp_h2c_data;
722 	if (!req->h2cdata_left)
723 		data->hdr.flags = NVME_TCP_F_DATA_LAST;
724 	if (queue->hdr_digest)
725 		data->hdr.flags |= NVME_TCP_F_HDGST;
726 	if (queue->data_digest)
727 		data->hdr.flags |= NVME_TCP_F_DDGST;
728 	data->hdr.hlen = sizeof(*data);
729 	data->hdr.pdo = data->hdr.hlen + hdgst;
730 	data->hdr.plen =
731 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
732 	data->ttag = req->ttag;
733 	data->command_id = nvme_cid(rq);
734 	data->data_offset = cpu_to_le32(req->h2cdata_offset);
735 	data->data_length = cpu_to_le32(req->pdu_len);
736 }
737 
nvme_tcp_handle_r2t(struct nvme_tcp_queue * queue,struct nvme_tcp_r2t_pdu * pdu)738 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
739 		struct nvme_tcp_r2t_pdu *pdu)
740 {
741 	struct nvme_tcp_request *req;
742 	struct request *rq;
743 	u32 r2t_length = le32_to_cpu(pdu->r2t_length);
744 	u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
745 
746 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
747 	if (!rq) {
748 		dev_err(queue->ctrl->ctrl.device,
749 			"got bad r2t.command_id %#x on queue %d\n",
750 			pdu->command_id, nvme_tcp_queue_id(queue));
751 		return -ENOENT;
752 	}
753 	req = blk_mq_rq_to_pdu(rq);
754 
755 	if (unlikely(!r2t_length)) {
756 		dev_err(queue->ctrl->ctrl.device,
757 			"req %d r2t len is %u, probably a bug...\n",
758 			rq->tag, r2t_length);
759 		return -EPROTO;
760 	}
761 
762 	if (unlikely(req->data_sent + r2t_length > req->data_len)) {
763 		dev_err(queue->ctrl->ctrl.device,
764 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
765 			rq->tag, r2t_length, req->data_len, req->data_sent);
766 		return -EPROTO;
767 	}
768 
769 	if (unlikely(r2t_offset < req->data_sent)) {
770 		dev_err(queue->ctrl->ctrl.device,
771 			"req %d unexpected r2t offset %u (expected %zu)\n",
772 			rq->tag, r2t_offset, req->data_sent);
773 		return -EPROTO;
774 	}
775 
776 	if (llist_on_list(&req->lentry) ||
777 	    !list_empty(&req->entry)) {
778 		dev_err(queue->ctrl->ctrl.device,
779 			"req %d unexpected r2t while processing request\n",
780 			rq->tag);
781 		return -EPROTO;
782 	}
783 
784 	req->pdu_len = 0;
785 	req->h2cdata_left = r2t_length;
786 	req->h2cdata_offset = r2t_offset;
787 	req->ttag = pdu->ttag;
788 
789 	nvme_tcp_setup_h2c_data_pdu(req);
790 
791 	llist_add(&req->lentry, &queue->req_list);
792 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
793 
794 	return 0;
795 }
796 
nvme_tcp_handle_c2h_term(struct nvme_tcp_queue * queue,struct nvme_tcp_term_pdu * pdu)797 static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
798 		struct nvme_tcp_term_pdu *pdu)
799 {
800 	u16 fes;
801 	const char *msg;
802 	u32 plen = le32_to_cpu(pdu->hdr.plen);
803 
804 	static const char * const msg_table[] = {
805 		[NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
806 		[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
807 		[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
808 		[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
809 		[NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
810 		[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
811 	};
812 
813 	if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
814 	    plen > NVME_TCP_MAX_C2HTERM_PLEN) {
815 		dev_err(queue->ctrl->ctrl.device,
816 			"Received a malformed C2HTermReq PDU (plen = %u)\n",
817 			plen);
818 		return;
819 	}
820 
821 	fes = le16_to_cpu(pdu->fes);
822 	if (fes && fes < ARRAY_SIZE(msg_table))
823 		msg = msg_table[fes];
824 	else
825 		msg = "Unknown";
826 
827 	dev_err(queue->ctrl->ctrl.device,
828 		"Received C2HTermReq (FES = %s)\n", msg);
829 }
830 
nvme_tcp_recv_pdu(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)831 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
832 		unsigned int *offset, size_t *len)
833 {
834 	struct nvme_tcp_hdr *hdr;
835 	char *pdu = queue->pdu;
836 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
837 	int ret;
838 
839 	ret = skb_copy_bits(skb, *offset,
840 		&pdu[queue->pdu_offset], rcv_len);
841 	if (unlikely(ret))
842 		return ret;
843 
844 	queue->pdu_remaining -= rcv_len;
845 	queue->pdu_offset += rcv_len;
846 	*offset += rcv_len;
847 	*len -= rcv_len;
848 	if (queue->pdu_remaining)
849 		return 0;
850 
851 	hdr = queue->pdu;
852 	if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
853 		if (!nvme_tcp_recv_pdu_supported(hdr->type))
854 			goto unsupported_pdu;
855 
856 		dev_err(queue->ctrl->ctrl.device,
857 			"pdu type %d has unexpected header length (%d)\n",
858 			hdr->type, hdr->hlen);
859 		return -EPROTO;
860 	}
861 
862 	if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
863 		/*
864 		 * C2HTermReq never includes Header or Data digests.
865 		 * Skip the checks.
866 		 */
867 		nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
868 		return -EINVAL;
869 	}
870 
871 	if (queue->hdr_digest) {
872 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
873 		if (unlikely(ret))
874 			return ret;
875 	}
876 
877 
878 	if (queue->data_digest) {
879 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
880 		if (unlikely(ret))
881 			return ret;
882 	}
883 
884 	switch (hdr->type) {
885 	case nvme_tcp_c2h_data:
886 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
887 	case nvme_tcp_rsp:
888 		nvme_tcp_init_recv_ctx(queue);
889 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
890 	case nvme_tcp_r2t:
891 		nvme_tcp_init_recv_ctx(queue);
892 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
893 	default:
894 		goto unsupported_pdu;
895 	}
896 
897 unsupported_pdu:
898 	dev_err(queue->ctrl->ctrl.device,
899 		"unsupported pdu type (%d)\n", hdr->type);
900 	return -EINVAL;
901 }
902 
nvme_tcp_end_request(struct request * rq,u16 status)903 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
904 {
905 	union nvme_result res = {};
906 
907 	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
908 		nvme_complete_rq(rq);
909 }
910 
nvme_tcp_recv_data(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)911 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
912 			      unsigned int *offset, size_t *len)
913 {
914 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
915 	struct request *rq =
916 		nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
917 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
918 
919 	while (true) {
920 		int recv_len, ret;
921 
922 		recv_len = min_t(size_t, *len, queue->data_remaining);
923 		if (!recv_len)
924 			break;
925 
926 		if (!iov_iter_count(&req->iter)) {
927 			req->curr_bio = req->curr_bio->bi_next;
928 
929 			/*
930 			 * If we don't have any bios it means the controller
931 			 * sent more data than we requested, hence error
932 			 */
933 			if (!req->curr_bio) {
934 				dev_err(queue->ctrl->ctrl.device,
935 					"queue %d no space in request %#x",
936 					nvme_tcp_queue_id(queue), rq->tag);
937 				nvme_tcp_init_recv_ctx(queue);
938 				return -EIO;
939 			}
940 			nvme_tcp_init_iter(req, ITER_DEST);
941 		}
942 
943 		/* we can read only from what is left in this bio */
944 		recv_len = min_t(size_t, recv_len,
945 				iov_iter_count(&req->iter));
946 
947 		if (queue->data_digest)
948 			ret = skb_copy_and_crc32c_datagram_iter(skb, *offset,
949 				&req->iter, recv_len, &queue->rcv_crc);
950 		else
951 			ret = skb_copy_datagram_iter(skb, *offset,
952 					&req->iter, recv_len);
953 		if (ret) {
954 			dev_err(queue->ctrl->ctrl.device,
955 				"queue %d failed to copy request %#x data",
956 				nvme_tcp_queue_id(queue), rq->tag);
957 			return ret;
958 		}
959 
960 		*len -= recv_len;
961 		*offset += recv_len;
962 		queue->data_remaining -= recv_len;
963 	}
964 
965 	if (!queue->data_remaining) {
966 		if (queue->data_digest) {
967 			queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc);
968 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
969 		} else {
970 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
971 				nvme_tcp_end_request(rq,
972 						le16_to_cpu(req->status));
973 				queue->nr_cqe++;
974 			}
975 			nvme_tcp_init_recv_ctx(queue);
976 		}
977 	}
978 
979 	return 0;
980 }
981 
nvme_tcp_recv_ddgst(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)982 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
983 		struct sk_buff *skb, unsigned int *offset, size_t *len)
984 {
985 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
986 	char *ddgst = (char *)&queue->recv_ddgst;
987 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
988 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
989 	int ret;
990 
991 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
992 	if (unlikely(ret))
993 		return ret;
994 
995 	queue->ddgst_remaining -= recv_len;
996 	*offset += recv_len;
997 	*len -= recv_len;
998 	if (queue->ddgst_remaining)
999 		return 0;
1000 
1001 	if (queue->recv_ddgst != queue->exp_ddgst) {
1002 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
1003 					pdu->command_id);
1004 		struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1005 
1006 		req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
1007 
1008 		dev_err(queue->ctrl->ctrl.device,
1009 			"data digest error: recv %#x expected %#x\n",
1010 			le32_to_cpu(queue->recv_ddgst),
1011 			le32_to_cpu(queue->exp_ddgst));
1012 	}
1013 
1014 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
1015 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
1016 					pdu->command_id);
1017 		struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1018 
1019 		nvme_tcp_end_request(rq, le16_to_cpu(req->status));
1020 		queue->nr_cqe++;
1021 	}
1022 
1023 	nvme_tcp_init_recv_ctx(queue);
1024 	return 0;
1025 }
1026 
nvme_tcp_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)1027 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1028 			     unsigned int offset, size_t len)
1029 {
1030 	struct nvme_tcp_queue *queue = desc->arg.data;
1031 	size_t consumed = len;
1032 	int result;
1033 
1034 	if (unlikely(!queue->rd_enabled))
1035 		return -EFAULT;
1036 
1037 	while (len) {
1038 		switch (nvme_tcp_recv_state(queue)) {
1039 		case NVME_TCP_RECV_PDU:
1040 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
1041 			break;
1042 		case NVME_TCP_RECV_DATA:
1043 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
1044 			break;
1045 		case NVME_TCP_RECV_DDGST:
1046 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
1047 			break;
1048 		default:
1049 			result = -EFAULT;
1050 		}
1051 		if (result) {
1052 			dev_err(queue->ctrl->ctrl.device,
1053 				"receive failed:  %d\n", result);
1054 			queue->rd_enabled = false;
1055 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1056 			return result;
1057 		}
1058 	}
1059 
1060 	return consumed;
1061 }
1062 
nvme_tcp_data_ready(struct sock * sk)1063 static void nvme_tcp_data_ready(struct sock *sk)
1064 {
1065 	struct nvme_tcp_queue *queue;
1066 
1067 	trace_sk_data_ready(sk);
1068 
1069 	read_lock_bh(&sk->sk_callback_lock);
1070 	queue = sk->sk_user_data;
1071 	if (likely(queue && queue->rd_enabled) &&
1072 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
1073 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1074 	read_unlock_bh(&sk->sk_callback_lock);
1075 }
1076 
nvme_tcp_write_space(struct sock * sk)1077 static void nvme_tcp_write_space(struct sock *sk)
1078 {
1079 	struct nvme_tcp_queue *queue;
1080 
1081 	read_lock_bh(&sk->sk_callback_lock);
1082 	queue = sk->sk_user_data;
1083 	if (likely(queue && sk_stream_is_writeable(sk))) {
1084 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1085 		/* Ensure pending TLS partial records are retried */
1086 		if (nvme_tcp_queue_tls(queue))
1087 			queue->write_space(sk);
1088 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1089 	}
1090 	read_unlock_bh(&sk->sk_callback_lock);
1091 }
1092 
nvme_tcp_state_change(struct sock * sk)1093 static void nvme_tcp_state_change(struct sock *sk)
1094 {
1095 	struct nvme_tcp_queue *queue;
1096 
1097 	read_lock_bh(&sk->sk_callback_lock);
1098 	queue = sk->sk_user_data;
1099 	if (!queue)
1100 		goto done;
1101 
1102 	switch (sk->sk_state) {
1103 	case TCP_CLOSE:
1104 	case TCP_CLOSE_WAIT:
1105 	case TCP_LAST_ACK:
1106 	case TCP_FIN_WAIT1:
1107 	case TCP_FIN_WAIT2:
1108 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1109 		break;
1110 	default:
1111 		dev_info(queue->ctrl->ctrl.device,
1112 			"queue %d socket state %d\n",
1113 			nvme_tcp_queue_id(queue), sk->sk_state);
1114 	}
1115 
1116 	queue->state_change(sk);
1117 done:
1118 	read_unlock_bh(&sk->sk_callback_lock);
1119 }
1120 
nvme_tcp_done_send_req(struct nvme_tcp_queue * queue)1121 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
1122 {
1123 	queue->request = NULL;
1124 }
1125 
nvme_tcp_fail_request(struct nvme_tcp_request * req)1126 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
1127 {
1128 	if (nvme_tcp_async_req(req)) {
1129 		union nvme_result res = {};
1130 
1131 		nvme_complete_async_event(&req->queue->ctrl->ctrl,
1132 				cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
1133 	} else {
1134 		nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
1135 				NVME_SC_HOST_PATH_ERROR);
1136 	}
1137 }
1138 
nvme_tcp_try_send_data(struct nvme_tcp_request * req)1139 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
1140 {
1141 	struct nvme_tcp_queue *queue = req->queue;
1142 	int req_data_len = req->data_len;
1143 	u32 h2cdata_left = req->h2cdata_left;
1144 
1145 	while (true) {
1146 		struct bio_vec bvec;
1147 		struct msghdr msg = {
1148 			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1149 		};
1150 		struct page *page = nvme_tcp_req_cur_page(req);
1151 		size_t offset = nvme_tcp_req_cur_offset(req);
1152 		size_t len = nvme_tcp_req_cur_length(req);
1153 		bool last = nvme_tcp_pdu_last_send(req, len);
1154 		int req_data_sent = req->data_sent;
1155 		int ret;
1156 
1157 		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1158 			msg.msg_flags |= MSG_EOR;
1159 		else
1160 			msg.msg_flags |= MSG_MORE;
1161 
1162 		if (!sendpages_ok(page, len, offset))
1163 			msg.msg_flags &= ~MSG_SPLICE_PAGES;
1164 
1165 		bvec_set_page(&bvec, page, len, offset);
1166 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1167 		ret = sock_sendmsg(queue->sock, &msg);
1168 		if (ret <= 0)
1169 			return ret;
1170 
1171 		if (queue->data_digest)
1172 			nvme_tcp_ddgst_update(&queue->snd_crc, page,
1173 					offset, ret);
1174 
1175 		/*
1176 		 * update the request iterator except for the last payload send
1177 		 * in the request where we don't want to modify it as we may
1178 		 * compete with the RX path completing the request.
1179 		 */
1180 		if (req_data_sent + ret < req_data_len)
1181 			nvme_tcp_advance_req(req, ret);
1182 
1183 		/* fully successful last send in current PDU */
1184 		if (last && ret == len) {
1185 			if (queue->data_digest) {
1186 				req->ddgst =
1187 					nvme_tcp_ddgst_final(queue->snd_crc);
1188 				req->state = NVME_TCP_SEND_DDGST;
1189 				req->offset = 0;
1190 			} else {
1191 				if (h2cdata_left)
1192 					nvme_tcp_setup_h2c_data_pdu(req);
1193 				else
1194 					nvme_tcp_done_send_req(queue);
1195 			}
1196 			return 1;
1197 		}
1198 	}
1199 	return -EAGAIN;
1200 }
1201 
nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request * req)1202 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1203 {
1204 	struct nvme_tcp_queue *queue = req->queue;
1205 	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1206 	struct bio_vec bvec;
1207 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
1208 	bool inline_data = nvme_tcp_has_inline_data(req);
1209 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1210 	int len = sizeof(*pdu) + hdgst - req->offset;
1211 	int ret;
1212 
1213 	if (inline_data || nvme_tcp_queue_more(queue))
1214 		msg.msg_flags |= MSG_MORE;
1215 	else
1216 		msg.msg_flags |= MSG_EOR;
1217 
1218 	if (queue->hdr_digest && !req->offset)
1219 		nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
1220 
1221 	bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1222 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1223 	ret = sock_sendmsg(queue->sock, &msg);
1224 	if (unlikely(ret <= 0))
1225 		return ret;
1226 
1227 	len -= ret;
1228 	if (!len) {
1229 		if (inline_data) {
1230 			req->state = NVME_TCP_SEND_DATA;
1231 			if (queue->data_digest)
1232 				queue->snd_crc = NVME_TCP_CRC_SEED;
1233 		} else {
1234 			nvme_tcp_done_send_req(queue);
1235 		}
1236 		return 1;
1237 	}
1238 	req->offset += ret;
1239 
1240 	return -EAGAIN;
1241 }
1242 
nvme_tcp_try_send_data_pdu(struct nvme_tcp_request * req)1243 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1244 {
1245 	struct nvme_tcp_queue *queue = req->queue;
1246 	struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1247 	struct bio_vec bvec;
1248 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
1249 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1250 	int len = sizeof(*pdu) - req->offset + hdgst;
1251 	int ret;
1252 
1253 	if (queue->hdr_digest && !req->offset)
1254 		nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
1255 
1256 	if (!req->h2cdata_left)
1257 		msg.msg_flags |= MSG_SPLICE_PAGES;
1258 
1259 	bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1260 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1261 	ret = sock_sendmsg(queue->sock, &msg);
1262 	if (unlikely(ret <= 0))
1263 		return ret;
1264 
1265 	len -= ret;
1266 	if (!len) {
1267 		req->state = NVME_TCP_SEND_DATA;
1268 		if (queue->data_digest)
1269 			queue->snd_crc = NVME_TCP_CRC_SEED;
1270 		return 1;
1271 	}
1272 	req->offset += ret;
1273 
1274 	return -EAGAIN;
1275 }
1276 
nvme_tcp_try_send_ddgst(struct nvme_tcp_request * req)1277 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1278 {
1279 	struct nvme_tcp_queue *queue = req->queue;
1280 	size_t offset = req->offset;
1281 	u32 h2cdata_left = req->h2cdata_left;
1282 	int ret;
1283 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1284 	struct kvec iov = {
1285 		.iov_base = (u8 *)&req->ddgst + req->offset,
1286 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1287 	};
1288 
1289 	if (nvme_tcp_queue_more(queue))
1290 		msg.msg_flags |= MSG_MORE;
1291 	else
1292 		msg.msg_flags |= MSG_EOR;
1293 
1294 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1295 	if (unlikely(ret <= 0))
1296 		return ret;
1297 
1298 	if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1299 		if (h2cdata_left)
1300 			nvme_tcp_setup_h2c_data_pdu(req);
1301 		else
1302 			nvme_tcp_done_send_req(queue);
1303 		return 1;
1304 	}
1305 
1306 	req->offset += ret;
1307 	return -EAGAIN;
1308 }
1309 
nvme_tcp_try_send(struct nvme_tcp_queue * queue)1310 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1311 {
1312 	struct nvme_tcp_request *req;
1313 	unsigned int noreclaim_flag;
1314 	int ret = 1;
1315 
1316 	if (!queue->request) {
1317 		queue->request = nvme_tcp_fetch_request(queue);
1318 		if (!queue->request)
1319 			return 0;
1320 	}
1321 	req = queue->request;
1322 
1323 	noreclaim_flag = memalloc_noreclaim_save();
1324 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
1325 		ret = nvme_tcp_try_send_cmd_pdu(req);
1326 		if (ret <= 0)
1327 			goto done;
1328 		if (!nvme_tcp_has_inline_data(req))
1329 			goto out;
1330 	}
1331 
1332 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
1333 		ret = nvme_tcp_try_send_data_pdu(req);
1334 		if (ret <= 0)
1335 			goto done;
1336 	}
1337 
1338 	if (req->state == NVME_TCP_SEND_DATA) {
1339 		ret = nvme_tcp_try_send_data(req);
1340 		if (ret <= 0)
1341 			goto done;
1342 	}
1343 
1344 	if (req->state == NVME_TCP_SEND_DDGST)
1345 		ret = nvme_tcp_try_send_ddgst(req);
1346 done:
1347 	if (ret == -EAGAIN) {
1348 		ret = 0;
1349 	} else if (ret < 0) {
1350 		dev_err(queue->ctrl->ctrl.device,
1351 			"failed to send request %d\n", ret);
1352 		nvme_tcp_fail_request(queue->request);
1353 		nvme_tcp_done_send_req(queue);
1354 	}
1355 out:
1356 	memalloc_noreclaim_restore(noreclaim_flag);
1357 	return ret;
1358 }
1359 
nvme_tcp_try_recv(struct nvme_tcp_queue * queue)1360 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1361 {
1362 	struct socket *sock = queue->sock;
1363 	struct sock *sk = sock->sk;
1364 	read_descriptor_t rd_desc;
1365 	int consumed;
1366 
1367 	rd_desc.arg.data = queue;
1368 	rd_desc.count = 1;
1369 	lock_sock(sk);
1370 	queue->nr_cqe = 0;
1371 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1372 	release_sock(sk);
1373 	return consumed == -EAGAIN ? 0 : consumed;
1374 }
1375 
nvme_tcp_io_work(struct work_struct * w)1376 static void nvme_tcp_io_work(struct work_struct *w)
1377 {
1378 	struct nvme_tcp_queue *queue =
1379 		container_of(w, struct nvme_tcp_queue, io_work);
1380 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
1381 
1382 	do {
1383 		bool pending = false;
1384 		int result;
1385 
1386 		if (mutex_trylock(&queue->send_mutex)) {
1387 			result = nvme_tcp_try_send(queue);
1388 			mutex_unlock(&queue->send_mutex);
1389 			if (result > 0)
1390 				pending = true;
1391 			else if (unlikely(result < 0))
1392 				break;
1393 		}
1394 
1395 		result = nvme_tcp_try_recv(queue);
1396 		if (result > 0)
1397 			pending = true;
1398 		else if (unlikely(result < 0))
1399 			return;
1400 
1401 		/* did we get some space after spending time in recv? */
1402 		if (nvme_tcp_queue_has_pending(queue) &&
1403 		    sk_stream_is_writeable(queue->sock->sk))
1404 			pending = true;
1405 
1406 		if (!pending || !queue->rd_enabled)
1407 			return;
1408 
1409 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
1410 
1411 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1412 }
1413 
nvme_tcp_free_async_req(struct nvme_tcp_ctrl * ctrl)1414 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1415 {
1416 	struct nvme_tcp_request *async = &ctrl->async_req;
1417 
1418 	page_frag_free(async->pdu);
1419 }
1420 
nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl * ctrl)1421 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1422 {
1423 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
1424 	struct nvme_tcp_request *async = &ctrl->async_req;
1425 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1426 
1427 	async->pdu = page_frag_alloc(&queue->pf_cache,
1428 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1429 		GFP_KERNEL | __GFP_ZERO);
1430 	if (!async->pdu)
1431 		return -ENOMEM;
1432 
1433 	async->queue = &ctrl->queues[0];
1434 	return 0;
1435 }
1436 
nvme_tcp_free_queue(struct nvme_ctrl * nctrl,int qid)1437 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1438 {
1439 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1440 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1441 	unsigned int noreclaim_flag;
1442 
1443 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1444 		return;
1445 
1446 	page_frag_cache_drain(&queue->pf_cache);
1447 
1448 	noreclaim_flag = memalloc_noreclaim_save();
1449 	/* ->sock will be released by fput() */
1450 	fput(queue->sock->file);
1451 	queue->sock = NULL;
1452 	memalloc_noreclaim_restore(noreclaim_flag);
1453 
1454 	kfree(queue->pdu);
1455 	mutex_destroy(&queue->send_mutex);
1456 	mutex_destroy(&queue->queue_lock);
1457 }
1458 
nvme_tcp_init_connection(struct nvme_tcp_queue * queue)1459 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1460 {
1461 	struct nvme_tcp_icreq_pdu *icreq;
1462 	struct nvme_tcp_icresp_pdu *icresp;
1463 	char cbuf[CMSG_LEN(sizeof(char))] = {};
1464 	u8 ctype;
1465 	struct msghdr msg = {};
1466 	struct kvec iov;
1467 	bool ctrl_hdgst, ctrl_ddgst;
1468 	u32 maxh2cdata;
1469 	int ret;
1470 
1471 	icreq = kzalloc_obj(*icreq);
1472 	if (!icreq)
1473 		return -ENOMEM;
1474 
1475 	icresp = kzalloc_obj(*icresp);
1476 	if (!icresp) {
1477 		ret = -ENOMEM;
1478 		goto free_icreq;
1479 	}
1480 
1481 	icreq->hdr.type = nvme_tcp_icreq;
1482 	icreq->hdr.hlen = sizeof(*icreq);
1483 	icreq->hdr.pdo = 0;
1484 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1485 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1486 	icreq->maxr2t = 0; /* single inflight r2t supported */
1487 	icreq->hpda = 0; /* no alignment constraint */
1488 	if (queue->hdr_digest)
1489 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1490 	if (queue->data_digest)
1491 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1492 
1493 	iov.iov_base = icreq;
1494 	iov.iov_len = sizeof(*icreq);
1495 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1496 	if (ret < 0) {
1497 		pr_warn("queue %d: failed to send icreq, error %d\n",
1498 			nvme_tcp_queue_id(queue), ret);
1499 		goto free_icresp;
1500 	}
1501 
1502 	memset(&msg, 0, sizeof(msg));
1503 	iov.iov_base = icresp;
1504 	iov.iov_len = sizeof(*icresp);
1505 	if (nvme_tcp_queue_tls(queue)) {
1506 		msg.msg_control = cbuf;
1507 		msg.msg_controllen = sizeof(cbuf);
1508 	}
1509 	msg.msg_flags = MSG_WAITALL;
1510 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1511 			iov.iov_len, msg.msg_flags);
1512 	if (ret >= 0 && ret < sizeof(*icresp))
1513 		ret = -ECONNRESET;
1514 	if (ret < 0) {
1515 		pr_warn("queue %d: failed to receive icresp, error %d\n",
1516 			nvme_tcp_queue_id(queue), ret);
1517 		goto free_icresp;
1518 	}
1519 	ret = -ENOTCONN;
1520 	if (nvme_tcp_queue_tls(queue)) {
1521 		ctype = tls_get_record_type(queue->sock->sk,
1522 					    (struct cmsghdr *)cbuf);
1523 		if (ctype != TLS_RECORD_TYPE_DATA) {
1524 			pr_err("queue %d: unhandled TLS record %d\n",
1525 			       nvme_tcp_queue_id(queue), ctype);
1526 			goto free_icresp;
1527 		}
1528 	}
1529 	ret = -EINVAL;
1530 	if (icresp->hdr.type != nvme_tcp_icresp) {
1531 		pr_err("queue %d: bad type returned %d\n",
1532 			nvme_tcp_queue_id(queue), icresp->hdr.type);
1533 		goto free_icresp;
1534 	}
1535 
1536 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1537 		pr_err("queue %d: bad pdu length returned %d\n",
1538 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
1539 		goto free_icresp;
1540 	}
1541 
1542 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
1543 		pr_err("queue %d: bad pfv returned %d\n",
1544 			nvme_tcp_queue_id(queue), icresp->pfv);
1545 		goto free_icresp;
1546 	}
1547 
1548 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1549 	if ((queue->data_digest && !ctrl_ddgst) ||
1550 	    (!queue->data_digest && ctrl_ddgst)) {
1551 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1552 			nvme_tcp_queue_id(queue),
1553 			queue->data_digest ? "enabled" : "disabled",
1554 			ctrl_ddgst ? "enabled" : "disabled");
1555 		goto free_icresp;
1556 	}
1557 
1558 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1559 	if ((queue->hdr_digest && !ctrl_hdgst) ||
1560 	    (!queue->hdr_digest && ctrl_hdgst)) {
1561 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1562 			nvme_tcp_queue_id(queue),
1563 			queue->hdr_digest ? "enabled" : "disabled",
1564 			ctrl_hdgst ? "enabled" : "disabled");
1565 		goto free_icresp;
1566 	}
1567 
1568 	if (icresp->cpda != 0) {
1569 		pr_err("queue %d: unsupported cpda returned %d\n",
1570 			nvme_tcp_queue_id(queue), icresp->cpda);
1571 		goto free_icresp;
1572 	}
1573 
1574 	maxh2cdata = le32_to_cpu(icresp->maxdata);
1575 	if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1576 		pr_err("queue %d: invalid maxh2cdata returned %u\n",
1577 		       nvme_tcp_queue_id(queue), maxh2cdata);
1578 		goto free_icresp;
1579 	}
1580 	queue->maxh2cdata = maxh2cdata;
1581 
1582 	ret = 0;
1583 free_icresp:
1584 	kfree(icresp);
1585 free_icreq:
1586 	kfree(icreq);
1587 	return ret;
1588 }
1589 
nvme_tcp_admin_queue(struct nvme_tcp_queue * queue)1590 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1591 {
1592 	return nvme_tcp_queue_id(queue) == 0;
1593 }
1594 
nvme_tcp_default_queue(struct nvme_tcp_queue * queue)1595 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1596 {
1597 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1598 	int qid = nvme_tcp_queue_id(queue);
1599 
1600 	return !nvme_tcp_admin_queue(queue) &&
1601 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1602 }
1603 
nvme_tcp_read_queue(struct nvme_tcp_queue * queue)1604 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1605 {
1606 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1607 	int qid = nvme_tcp_queue_id(queue);
1608 
1609 	return !nvme_tcp_admin_queue(queue) &&
1610 		!nvme_tcp_default_queue(queue) &&
1611 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1612 			  ctrl->io_queues[HCTX_TYPE_READ];
1613 }
1614 
nvme_tcp_poll_queue(struct nvme_tcp_queue * queue)1615 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1616 {
1617 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1618 	int qid = nvme_tcp_queue_id(queue);
1619 
1620 	return !nvme_tcp_admin_queue(queue) &&
1621 		!nvme_tcp_default_queue(queue) &&
1622 		!nvme_tcp_read_queue(queue) &&
1623 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1624 			  ctrl->io_queues[HCTX_TYPE_READ] +
1625 			  ctrl->io_queues[HCTX_TYPE_POLL];
1626 }
1627 
1628 /*
1629  * Track the number of queues assigned to each cpu using a global per-cpu
1630  * counter and select the least used cpu from the mq_map. Our goal is to spread
1631  * different controllers I/O threads across different cpu cores.
1632  *
1633  * Note that the accounting is not 100% perfect, but we don't need to be, we're
1634  * simply putting our best effort to select the best candidate cpu core that we
1635  * find at any given point.
1636  */
nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue * queue)1637 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1638 {
1639 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1640 	struct blk_mq_tag_set *set = &ctrl->tag_set;
1641 	int qid = nvme_tcp_queue_id(queue) - 1;
1642 	unsigned int *mq_map = NULL;
1643 	int cpu, min_queues = INT_MAX, io_cpu;
1644 
1645 	if (wq_unbound)
1646 		goto out;
1647 
1648 	if (nvme_tcp_default_queue(queue))
1649 		mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
1650 	else if (nvme_tcp_read_queue(queue))
1651 		mq_map = set->map[HCTX_TYPE_READ].mq_map;
1652 	else if (nvme_tcp_poll_queue(queue))
1653 		mq_map = set->map[HCTX_TYPE_POLL].mq_map;
1654 
1655 	if (WARN_ON(!mq_map))
1656 		goto out;
1657 
1658 	/* Search for the least used cpu from the mq_map */
1659 	io_cpu = WORK_CPU_UNBOUND;
1660 	for_each_online_cpu(cpu) {
1661 		int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
1662 
1663 		if (mq_map[cpu] != qid)
1664 			continue;
1665 		if (num_queues < min_queues) {
1666 			io_cpu = cpu;
1667 			min_queues = num_queues;
1668 		}
1669 	}
1670 	if (io_cpu != WORK_CPU_UNBOUND) {
1671 		queue->io_cpu = io_cpu;
1672 		atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
1673 		set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
1674 	}
1675 out:
1676 	dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
1677 		qid, queue->io_cpu);
1678 }
1679 
nvme_tcp_tls_done(void * data,int status,key_serial_t pskid)1680 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
1681 {
1682 	struct nvme_tcp_queue *queue = data;
1683 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1684 	int qid = nvme_tcp_queue_id(queue);
1685 	struct key *tls_key;
1686 
1687 	dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
1688 		qid, pskid, status);
1689 
1690 	if (status) {
1691 		queue->tls_err = -status;
1692 		goto out_complete;
1693 	}
1694 
1695 	tls_key = nvme_tls_key_lookup(pskid);
1696 	if (IS_ERR(tls_key)) {
1697 		dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
1698 			 qid, pskid);
1699 		queue->tls_err = -ENOKEY;
1700 	} else {
1701 		queue->tls_enabled = true;
1702 		if (qid == 0)
1703 			ctrl->ctrl.tls_pskid = key_serial(tls_key);
1704 		key_put(tls_key);
1705 		queue->tls_err = 0;
1706 	}
1707 
1708 out_complete:
1709 	complete(&queue->tls_complete);
1710 }
1711 
nvme_tcp_start_tls(struct nvme_ctrl * nctrl,struct nvme_tcp_queue * queue,key_serial_t pskid)1712 static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
1713 			      struct nvme_tcp_queue *queue,
1714 			      key_serial_t pskid)
1715 {
1716 	int qid = nvme_tcp_queue_id(queue);
1717 	int ret;
1718 	struct tls_handshake_args args;
1719 	unsigned long tmo = tls_handshake_timeout * HZ;
1720 	key_serial_t keyring = nvme_keyring_id();
1721 
1722 	dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
1723 		qid, pskid);
1724 	memset(&args, 0, sizeof(args));
1725 	args.ta_sock = queue->sock;
1726 	args.ta_done = nvme_tcp_tls_done;
1727 	args.ta_data = queue;
1728 	args.ta_my_peerids[0] = pskid;
1729 	args.ta_num_peerids = 1;
1730 	if (nctrl->opts->keyring)
1731 		keyring = key_serial(nctrl->opts->keyring);
1732 	args.ta_keyring = keyring;
1733 	args.ta_timeout_ms = tls_handshake_timeout * 1000;
1734 	queue->tls_err = -EOPNOTSUPP;
1735 	init_completion(&queue->tls_complete);
1736 	ret = tls_client_hello_psk(&args, GFP_KERNEL);
1737 	if (ret) {
1738 		dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
1739 			qid, ret);
1740 		return ret;
1741 	}
1742 	ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
1743 	if (ret <= 0) {
1744 		if (ret == 0)
1745 			ret = -ETIMEDOUT;
1746 
1747 		dev_err(nctrl->device,
1748 			"queue %d: TLS handshake failed, error %d\n",
1749 			qid, ret);
1750 		tls_handshake_cancel(queue->sock->sk);
1751 	} else {
1752 		if (queue->tls_err) {
1753 			dev_err(nctrl->device,
1754 				"queue %d: TLS handshake complete, error %d\n",
1755 				qid, queue->tls_err);
1756 		} else {
1757 			dev_dbg(nctrl->device,
1758 				"queue %d: TLS handshake complete\n", qid);
1759 		}
1760 		ret = queue->tls_err;
1761 	}
1762 	return ret;
1763 }
1764 
nvme_tcp_alloc_queue(struct nvme_ctrl * nctrl,int qid,key_serial_t pskid)1765 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
1766 				key_serial_t pskid)
1767 {
1768 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1769 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1770 	int ret, rcv_pdu_size;
1771 	struct file *sock_file;
1772 
1773 	mutex_init(&queue->queue_lock);
1774 	queue->ctrl = ctrl;
1775 	init_llist_head(&queue->req_list);
1776 	INIT_LIST_HEAD(&queue->send_list);
1777 	mutex_init(&queue->send_mutex);
1778 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1779 
1780 	if (qid > 0)
1781 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1782 	else
1783 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1784 						NVME_TCP_ADMIN_CCSZ;
1785 
1786 	ret = sock_create_kern(current->nsproxy->net_ns,
1787 			ctrl->addr.ss_family, SOCK_STREAM,
1788 			IPPROTO_TCP, &queue->sock);
1789 	if (ret) {
1790 		dev_err(nctrl->device,
1791 			"failed to create socket: %d\n", ret);
1792 		goto err_destroy_mutex;
1793 	}
1794 
1795 	sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1796 	if (IS_ERR(sock_file)) {
1797 		ret = PTR_ERR(sock_file);
1798 		goto err_destroy_mutex;
1799 	}
1800 
1801 	sk_net_refcnt_upgrade(queue->sock->sk);
1802 	nvme_tcp_reclassify_socket(queue->sock);
1803 
1804 	/* Single syn retry */
1805 	tcp_sock_set_syncnt(queue->sock->sk, 1);
1806 
1807 	/* Set TCP no delay */
1808 	tcp_sock_set_nodelay(queue->sock->sk);
1809 
1810 	/*
1811 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
1812 	 * close. This is done to prevent stale data from being sent should
1813 	 * the network connection be restored before TCP times out.
1814 	 */
1815 	sock_no_linger(queue->sock->sk);
1816 
1817 	if (so_priority > 0)
1818 		sock_set_priority(queue->sock->sk, so_priority);
1819 
1820 	/* Set socket type of service */
1821 	if (nctrl->opts->tos >= 0)
1822 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1823 
1824 	/* Set 10 seconds timeout for icresp recvmsg */
1825 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1826 
1827 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
1828 	queue->sock->sk->sk_use_task_frag = false;
1829 	queue->io_cpu = WORK_CPU_UNBOUND;
1830 	queue->request = NULL;
1831 	queue->data_remaining = 0;
1832 	queue->ddgst_remaining = 0;
1833 	queue->pdu_remaining = 0;
1834 	queue->pdu_offset = 0;
1835 	sk_set_memalloc(queue->sock->sk);
1836 
1837 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1838 		ret = kernel_bind(queue->sock, (struct sockaddr_unsized *)&ctrl->src_addr,
1839 			sizeof(ctrl->src_addr));
1840 		if (ret) {
1841 			dev_err(nctrl->device,
1842 				"failed to bind queue %d socket %d\n",
1843 				qid, ret);
1844 			goto err_sock;
1845 		}
1846 	}
1847 
1848 	if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1849 		char *iface = nctrl->opts->host_iface;
1850 		sockptr_t optval = KERNEL_SOCKPTR(iface);
1851 
1852 		ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1853 				      optval, strlen(iface));
1854 		if (ret) {
1855 			dev_err(nctrl->device,
1856 			  "failed to bind to interface %s queue %d err %d\n",
1857 			  iface, qid, ret);
1858 			goto err_sock;
1859 		}
1860 	}
1861 
1862 	queue->hdr_digest = nctrl->opts->hdr_digest;
1863 	queue->data_digest = nctrl->opts->data_digest;
1864 
1865 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1866 			nvme_tcp_hdgst_len(queue);
1867 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1868 	if (!queue->pdu) {
1869 		ret = -ENOMEM;
1870 		goto err_sock;
1871 	}
1872 
1873 	dev_dbg(nctrl->device, "connecting queue %d\n",
1874 			nvme_tcp_queue_id(queue));
1875 
1876 	ret = kernel_connect(queue->sock, (struct sockaddr_unsized *)&ctrl->addr,
1877 		sizeof(ctrl->addr), 0);
1878 	if (ret) {
1879 		dev_err(nctrl->device,
1880 			"failed to connect socket: %d\n", ret);
1881 		goto err_rcv_pdu;
1882 	}
1883 
1884 	/* If PSKs are configured try to start TLS */
1885 	if (nvme_tcp_tls_configured(nctrl) && pskid) {
1886 		ret = nvme_tcp_start_tls(nctrl, queue, pskid);
1887 		if (ret)
1888 			goto err_init_connect;
1889 	}
1890 
1891 	ret = nvme_tcp_init_connection(queue);
1892 	if (ret)
1893 		goto err_init_connect;
1894 
1895 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1896 
1897 	return 0;
1898 
1899 err_init_connect:
1900 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1901 err_rcv_pdu:
1902 	kfree(queue->pdu);
1903 err_sock:
1904 	/* ->sock will be released by fput() */
1905 	fput(queue->sock->file);
1906 	queue->sock = NULL;
1907 err_destroy_mutex:
1908 	mutex_destroy(&queue->send_mutex);
1909 	mutex_destroy(&queue->queue_lock);
1910 	return ret;
1911 }
1912 
nvme_tcp_restore_sock_ops(struct nvme_tcp_queue * queue)1913 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1914 {
1915 	struct socket *sock = queue->sock;
1916 
1917 	write_lock_bh(&sock->sk->sk_callback_lock);
1918 	sock->sk->sk_user_data  = NULL;
1919 	sock->sk->sk_data_ready = queue->data_ready;
1920 	sock->sk->sk_state_change = queue->state_change;
1921 	sock->sk->sk_write_space  = queue->write_space;
1922 	write_unlock_bh(&sock->sk->sk_callback_lock);
1923 }
1924 
__nvme_tcp_stop_queue(struct nvme_tcp_queue * queue)1925 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1926 {
1927 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1928 	nvme_tcp_restore_sock_ops(queue);
1929 	cancel_work_sync(&queue->io_work);
1930 }
1931 
nvme_tcp_stop_queue_nowait(struct nvme_ctrl * nctrl,int qid)1932 static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
1933 {
1934 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1935 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1936 
1937 	if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1938 		return;
1939 
1940 	if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
1941 		atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
1942 
1943 	mutex_lock(&queue->queue_lock);
1944 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1945 		__nvme_tcp_stop_queue(queue);
1946 	/* Stopping the queue will disable TLS */
1947 	queue->tls_enabled = false;
1948 	mutex_unlock(&queue->queue_lock);
1949 }
1950 
nvme_tcp_wait_queue(struct nvme_ctrl * nctrl,int qid)1951 static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
1952 {
1953 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1954 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1955 	int timeout = 100;
1956 
1957 	while (timeout > 0) {
1958 		if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
1959 		    !sk_wmem_alloc_get(queue->sock->sk))
1960 			return;
1961 		msleep(2);
1962 		timeout -= 2;
1963 	}
1964 	dev_warn(nctrl->device,
1965 		 "qid %d: timeout draining sock wmem allocation expired\n",
1966 		 qid);
1967 }
1968 
nvme_tcp_stop_queue(struct nvme_ctrl * nctrl,int qid)1969 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1970 {
1971 	nvme_tcp_stop_queue_nowait(nctrl, qid);
1972 	nvme_tcp_wait_queue(nctrl, qid);
1973 }
1974 
1975 
nvme_tcp_setup_sock_ops(struct nvme_tcp_queue * queue)1976 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1977 {
1978 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
1979 	queue->sock->sk->sk_user_data = queue;
1980 	queue->state_change = queue->sock->sk->sk_state_change;
1981 	queue->data_ready = queue->sock->sk->sk_data_ready;
1982 	queue->write_space = queue->sock->sk->sk_write_space;
1983 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1984 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1985 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1986 #ifdef CONFIG_NET_RX_BUSY_POLL
1987 	queue->sock->sk->sk_ll_usec = 1;
1988 #endif
1989 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1990 }
1991 
nvme_tcp_start_queue(struct nvme_ctrl * nctrl,int idx)1992 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1993 {
1994 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1995 	struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1996 	int ret;
1997 
1998 	queue->rd_enabled = true;
1999 	nvme_tcp_init_recv_ctx(queue);
2000 	nvme_tcp_setup_sock_ops(queue);
2001 
2002 	if (idx) {
2003 		nvme_tcp_set_queue_io_cpu(queue);
2004 		ret = nvmf_connect_io_queue(nctrl, idx);
2005 	} else
2006 		ret = nvmf_connect_admin_queue(nctrl);
2007 
2008 	if (!ret) {
2009 		set_bit(NVME_TCP_Q_LIVE, &queue->flags);
2010 	} else {
2011 		if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
2012 			__nvme_tcp_stop_queue(queue);
2013 		dev_err(nctrl->device,
2014 			"failed to connect queue: %d ret=%d\n", idx, ret);
2015 	}
2016 	return ret;
2017 }
2018 
nvme_tcp_free_admin_queue(struct nvme_ctrl * ctrl)2019 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
2020 {
2021 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
2022 		cancel_work_sync(&ctrl->async_event_work);
2023 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
2024 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
2025 	}
2026 
2027 	nvme_tcp_free_queue(ctrl, 0);
2028 }
2029 
nvme_tcp_free_io_queues(struct nvme_ctrl * ctrl)2030 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
2031 {
2032 	int i;
2033 
2034 	for (i = 1; i < ctrl->queue_count; i++)
2035 		nvme_tcp_free_queue(ctrl, i);
2036 }
2037 
nvme_tcp_stop_io_queues(struct nvme_ctrl * ctrl)2038 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
2039 {
2040 	int i;
2041 
2042 	for (i = 1; i < ctrl->queue_count; i++)
2043 		nvme_tcp_stop_queue_nowait(ctrl, i);
2044 	for (i = 1; i < ctrl->queue_count; i++)
2045 		nvme_tcp_wait_queue(ctrl, i);
2046 }
2047 
nvme_tcp_start_io_queues(struct nvme_ctrl * ctrl,int first,int last)2048 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
2049 				    int first, int last)
2050 {
2051 	int i, ret;
2052 
2053 	for (i = first; i < last; i++) {
2054 		ret = nvme_tcp_start_queue(ctrl, i);
2055 		if (ret)
2056 			goto out_stop_queues;
2057 	}
2058 
2059 	return 0;
2060 
2061 out_stop_queues:
2062 	for (i--; i >= first; i--)
2063 		nvme_tcp_stop_queue(ctrl, i);
2064 	return ret;
2065 }
2066 
nvme_tcp_alloc_admin_queue(struct nvme_ctrl * ctrl)2067 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
2068 {
2069 	int ret;
2070 	key_serial_t pskid = 0;
2071 
2072 	if (nvme_tcp_tls_configured(ctrl)) {
2073 		if (ctrl->opts->tls_key)
2074 			pskid = key_serial(ctrl->opts->tls_key);
2075 		else if (ctrl->opts->tls) {
2076 			pskid = nvme_tls_psk_default(ctrl->opts->keyring,
2077 						      ctrl->opts->host->nqn,
2078 						      ctrl->opts->subsysnqn);
2079 			if (!pskid) {
2080 				dev_err(ctrl->device, "no valid PSK found\n");
2081 				return -ENOKEY;
2082 			}
2083 		}
2084 	}
2085 
2086 	ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
2087 	if (ret)
2088 		return ret;
2089 
2090 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
2091 	if (ret)
2092 		goto out_free_queue;
2093 
2094 	return 0;
2095 
2096 out_free_queue:
2097 	nvme_tcp_free_queue(ctrl, 0);
2098 	return ret;
2099 }
2100 
__nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)2101 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
2102 {
2103 	int i, ret;
2104 
2105 	if (nvme_tcp_tls_configured(ctrl)) {
2106 		if (ctrl->opts->concat) {
2107 			/*
2108 			 * The generated PSK is stored in the
2109 			 * fabric options
2110 			 */
2111 			if (!ctrl->opts->tls_key) {
2112 				dev_err(ctrl->device, "no PSK generated\n");
2113 				return -ENOKEY;
2114 			}
2115 			if (ctrl->tls_pskid &&
2116 			    ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) {
2117 				dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid);
2118 				ctrl->tls_pskid = 0;
2119 			}
2120 		} else if (!ctrl->tls_pskid) {
2121 			dev_err(ctrl->device, "no PSK negotiated\n");
2122 			return -ENOKEY;
2123 		}
2124 	}
2125 
2126 	for (i = 1; i < ctrl->queue_count; i++) {
2127 		ret = nvme_tcp_alloc_queue(ctrl, i,
2128 				ctrl->tls_pskid);
2129 		if (ret)
2130 			goto out_free_queues;
2131 	}
2132 
2133 	return 0;
2134 
2135 out_free_queues:
2136 	for (i--; i >= 1; i--)
2137 		nvme_tcp_free_queue(ctrl, i);
2138 
2139 	return ret;
2140 }
2141 
nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)2142 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
2143 {
2144 	unsigned int nr_io_queues;
2145 	int ret;
2146 
2147 	nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
2148 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
2149 	if (ret)
2150 		return ret;
2151 
2152 	if (nr_io_queues == 0) {
2153 		dev_err(ctrl->device,
2154 			"unable to set any I/O queues\n");
2155 		return -ENOMEM;
2156 	}
2157 
2158 	ctrl->queue_count = nr_io_queues + 1;
2159 	dev_info(ctrl->device,
2160 		"creating %d I/O queues.\n", nr_io_queues);
2161 
2162 	nvmf_set_io_queues(ctrl->opts, nr_io_queues,
2163 			   to_tcp_ctrl(ctrl)->io_queues);
2164 	return __nvme_tcp_alloc_io_queues(ctrl);
2165 }
2166 
nvme_tcp_configure_io_queues(struct nvme_ctrl * ctrl,bool new)2167 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
2168 {
2169 	int ret, nr_queues;
2170 
2171 	ret = nvme_tcp_alloc_io_queues(ctrl);
2172 	if (ret)
2173 		return ret;
2174 
2175 	if (new) {
2176 		ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
2177 				&nvme_tcp_mq_ops,
2178 				ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
2179 				sizeof(struct nvme_tcp_request));
2180 		if (ret)
2181 			goto out_free_io_queues;
2182 	}
2183 
2184 	/*
2185 	 * Only start IO queues for which we have allocated the tagset
2186 	 * and limited it to the available queues. On reconnects, the
2187 	 * queue number might have changed.
2188 	 */
2189 	nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
2190 	ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
2191 	if (ret)
2192 		goto out_cleanup_connect_q;
2193 
2194 	if (!new) {
2195 		nvme_start_freeze(ctrl);
2196 		nvme_unquiesce_io_queues(ctrl);
2197 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
2198 			/*
2199 			 * If we timed out waiting for freeze we are likely to
2200 			 * be stuck.  Fail the controller initialization just
2201 			 * to be safe.
2202 			 */
2203 			ret = -ENODEV;
2204 			nvme_unfreeze(ctrl);
2205 			goto out_wait_freeze_timed_out;
2206 		}
2207 		blk_mq_update_nr_hw_queues(ctrl->tagset,
2208 			ctrl->queue_count - 1);
2209 		nvme_unfreeze(ctrl);
2210 	}
2211 
2212 	/*
2213 	 * If the number of queues has increased (reconnect case)
2214 	 * start all new queues now.
2215 	 */
2216 	ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
2217 				       ctrl->tagset->nr_hw_queues + 1);
2218 	if (ret)
2219 		goto out_wait_freeze_timed_out;
2220 
2221 	return 0;
2222 
2223 out_wait_freeze_timed_out:
2224 	nvme_quiesce_io_queues(ctrl);
2225 	nvme_sync_io_queues(ctrl);
2226 	nvme_tcp_stop_io_queues(ctrl);
2227 out_cleanup_connect_q:
2228 	nvme_cancel_tagset(ctrl);
2229 	if (new)
2230 		nvme_remove_io_tag_set(ctrl);
2231 out_free_io_queues:
2232 	nvme_tcp_free_io_queues(ctrl);
2233 	return ret;
2234 }
2235 
nvme_tcp_configure_admin_queue(struct nvme_ctrl * ctrl,bool new)2236 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
2237 {
2238 	int error;
2239 
2240 	error = nvme_tcp_alloc_admin_queue(ctrl);
2241 	if (error)
2242 		return error;
2243 
2244 	if (new) {
2245 		error = nvme_alloc_admin_tag_set(ctrl,
2246 				&to_tcp_ctrl(ctrl)->admin_tag_set,
2247 				&nvme_tcp_admin_mq_ops,
2248 				sizeof(struct nvme_tcp_request));
2249 		if (error)
2250 			goto out_free_queue;
2251 	}
2252 
2253 	error = nvme_tcp_start_queue(ctrl, 0);
2254 	if (error)
2255 		goto out_cleanup_tagset;
2256 
2257 	if (ctrl->opts->concat && !ctrl->tls_pskid)
2258 		return 0;
2259 
2260 	error = nvme_enable_ctrl(ctrl);
2261 	if (error)
2262 		goto out_stop_queue;
2263 
2264 	nvme_unquiesce_admin_queue(ctrl);
2265 
2266 	error = nvme_init_ctrl_finish(ctrl, false);
2267 	if (error)
2268 		goto out_quiesce_queue;
2269 
2270 	return 0;
2271 
2272 out_quiesce_queue:
2273 	nvme_quiesce_admin_queue(ctrl);
2274 	blk_sync_queue(ctrl->admin_q);
2275 out_stop_queue:
2276 	nvme_tcp_stop_queue(ctrl, 0);
2277 	nvme_cancel_admin_tagset(ctrl);
2278 out_cleanup_tagset:
2279 	if (new)
2280 		nvme_remove_admin_tag_set(ctrl);
2281 out_free_queue:
2282 	nvme_tcp_free_admin_queue(ctrl);
2283 	return error;
2284 }
2285 
nvme_tcp_teardown_admin_queue(struct nvme_ctrl * ctrl,bool remove)2286 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2287 		bool remove)
2288 {
2289 	nvme_quiesce_admin_queue(ctrl);
2290 	blk_sync_queue(ctrl->admin_q);
2291 	nvme_tcp_stop_queue(ctrl, 0);
2292 	nvme_cancel_admin_tagset(ctrl);
2293 	if (remove) {
2294 		nvme_unquiesce_admin_queue(ctrl);
2295 		nvme_remove_admin_tag_set(ctrl);
2296 	}
2297 	nvme_tcp_free_admin_queue(ctrl);
2298 	if (ctrl->tls_pskid) {
2299 		dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
2300 			ctrl->tls_pskid);
2301 		ctrl->tls_pskid = 0;
2302 	}
2303 }
2304 
nvme_tcp_teardown_io_queues(struct nvme_ctrl * ctrl,bool remove)2305 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2306 		bool remove)
2307 {
2308 	if (ctrl->queue_count <= 1)
2309 		return;
2310 	nvme_quiesce_io_queues(ctrl);
2311 	nvme_sync_io_queues(ctrl);
2312 	nvme_tcp_stop_io_queues(ctrl);
2313 	nvme_cancel_tagset(ctrl);
2314 	if (remove) {
2315 		nvme_unquiesce_io_queues(ctrl);
2316 		nvme_remove_io_tag_set(ctrl);
2317 	}
2318 	nvme_tcp_free_io_queues(ctrl);
2319 }
2320 
nvme_tcp_reconnect_or_remove(struct nvme_ctrl * ctrl,int status)2321 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
2322 		int status)
2323 {
2324 	enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2325 
2326 	/* If we are resetting/deleting then do nothing */
2327 	if (state != NVME_CTRL_CONNECTING) {
2328 		WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
2329 		return;
2330 	}
2331 
2332 	if (nvmf_should_reconnect(ctrl, status)) {
2333 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2334 			ctrl->opts->reconnect_delay);
2335 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2336 				ctrl->opts->reconnect_delay * HZ);
2337 	} else {
2338 		dev_info(ctrl->device, "Removing controller (%d)...\n",
2339 			 status);
2340 		nvme_delete_ctrl(ctrl);
2341 	}
2342 }
2343 
2344 /*
2345  * The TLS key is set by secure concatenation after negotiation has been
2346  * completed on the admin queue. We need to revoke the key when:
2347  * - concatenation is enabled (otherwise it's a static key set by the user)
2348  * and
2349  * - the generated key is present in ctrl->tls_key (otherwise there's nothing
2350  *   to revoke)
2351  * and
2352  * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
2353  *   negotiation has not run).
2354  *
2355  * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called
2356  * twice during secure concatenation, once on a 'normal' connection to run the
2357  * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
2358  * and once after the negotiation (which uses the key, so it _must_ be set).
2359  */
nvme_tcp_key_revoke_needed(struct nvme_ctrl * ctrl)2360 static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl)
2361 {
2362 	return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid;
2363 }
2364 
nvme_tcp_setup_ctrl(struct nvme_ctrl * ctrl,bool new)2365 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2366 {
2367 	struct nvmf_ctrl_options *opts = ctrl->opts;
2368 	int ret;
2369 
2370 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
2371 	if (ret)
2372 		return ret;
2373 
2374 	if (ctrl->opts->concat && !ctrl->tls_pskid) {
2375 		/* See comments for nvme_tcp_key_revoke_needed() */
2376 		dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
2377 		nvme_stop_keep_alive(ctrl);
2378 		nvme_tcp_teardown_admin_queue(ctrl, false);
2379 		ret = nvme_tcp_configure_admin_queue(ctrl, false);
2380 		if (ret)
2381 			goto destroy_admin;
2382 	}
2383 
2384 	if (ctrl->icdoff) {
2385 		ret = -EOPNOTSUPP;
2386 		dev_err(ctrl->device, "icdoff is not supported!\n");
2387 		goto destroy_admin;
2388 	}
2389 
2390 	if (!nvme_ctrl_sgl_supported(ctrl)) {
2391 		ret = -EOPNOTSUPP;
2392 		dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2393 		goto destroy_admin;
2394 	}
2395 
2396 	if (opts->queue_size > ctrl->sqsize + 1)
2397 		dev_warn(ctrl->device,
2398 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
2399 			opts->queue_size, ctrl->sqsize + 1);
2400 
2401 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2402 		dev_warn(ctrl->device,
2403 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
2404 			ctrl->sqsize + 1, ctrl->maxcmd);
2405 		ctrl->sqsize = ctrl->maxcmd - 1;
2406 	}
2407 
2408 	if (ctrl->queue_count > 1) {
2409 		ret = nvme_tcp_configure_io_queues(ctrl, new);
2410 		if (ret)
2411 			goto destroy_admin;
2412 	}
2413 
2414 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2415 		/*
2416 		 * state change failure is ok if we started ctrl delete,
2417 		 * unless we're during creation of a new controller to
2418 		 * avoid races with teardown flow.
2419 		 */
2420 		enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2421 
2422 		WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2423 			     state != NVME_CTRL_DELETING_NOIO);
2424 		WARN_ON_ONCE(new);
2425 		ret = -EINVAL;
2426 		goto destroy_io;
2427 	}
2428 
2429 	nvme_start_ctrl(ctrl);
2430 	return 0;
2431 
2432 destroy_io:
2433 	if (ctrl->queue_count > 1) {
2434 		nvme_quiesce_io_queues(ctrl);
2435 		nvme_sync_io_queues(ctrl);
2436 		nvme_tcp_stop_io_queues(ctrl);
2437 		nvme_cancel_tagset(ctrl);
2438 		if (new)
2439 			nvme_remove_io_tag_set(ctrl);
2440 		nvme_tcp_free_io_queues(ctrl);
2441 	}
2442 destroy_admin:
2443 	nvme_stop_keep_alive(ctrl);
2444 	nvme_tcp_teardown_admin_queue(ctrl, new);
2445 	return ret;
2446 }
2447 
nvme_tcp_reconnect_ctrl_work(struct work_struct * work)2448 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2449 {
2450 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2451 			struct nvme_tcp_ctrl, connect_work);
2452 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2453 	int ret;
2454 
2455 	++ctrl->nr_reconnects;
2456 
2457 	ret = nvme_tcp_setup_ctrl(ctrl, false);
2458 	if (ret)
2459 		goto requeue;
2460 
2461 	dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n",
2462 		 ctrl->nr_reconnects, ctrl->opts->max_reconnects);
2463 
2464 	ctrl->nr_reconnects = 0;
2465 
2466 	return;
2467 
2468 requeue:
2469 	dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n",
2470 		 ctrl->nr_reconnects, ctrl->opts->max_reconnects);
2471 	nvme_tcp_reconnect_or_remove(ctrl, ret);
2472 }
2473 
nvme_tcp_error_recovery_work(struct work_struct * work)2474 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2475 {
2476 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2477 				struct nvme_tcp_ctrl, err_work);
2478 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2479 
2480 	if (nvme_tcp_key_revoke_needed(ctrl))
2481 		nvme_auth_revoke_tls_key(ctrl);
2482 	nvme_stop_keep_alive(ctrl);
2483 	flush_work(&ctrl->async_event_work);
2484 	nvme_tcp_teardown_io_queues(ctrl, false);
2485 	/* unquiesce to fail fast pending requests */
2486 	nvme_unquiesce_io_queues(ctrl);
2487 	nvme_tcp_teardown_admin_queue(ctrl, false);
2488 	nvme_unquiesce_admin_queue(ctrl);
2489 	nvme_auth_stop(ctrl);
2490 
2491 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2492 		/* state change failure is ok if we started ctrl delete */
2493 		enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2494 
2495 		WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2496 			     state != NVME_CTRL_DELETING_NOIO);
2497 		return;
2498 	}
2499 
2500 	nvme_tcp_reconnect_or_remove(ctrl, 0);
2501 }
2502 
nvme_tcp_teardown_ctrl(struct nvme_ctrl * ctrl,bool shutdown)2503 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2504 {
2505 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
2506 	nvme_quiesce_admin_queue(ctrl);
2507 	nvme_disable_ctrl(ctrl, shutdown);
2508 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2509 }
2510 
nvme_tcp_delete_ctrl(struct nvme_ctrl * ctrl)2511 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2512 {
2513 	nvme_tcp_teardown_ctrl(ctrl, true);
2514 }
2515 
nvme_reset_ctrl_work(struct work_struct * work)2516 static void nvme_reset_ctrl_work(struct work_struct *work)
2517 {
2518 	struct nvme_ctrl *ctrl =
2519 		container_of(work, struct nvme_ctrl, reset_work);
2520 	int ret;
2521 
2522 	if (nvme_tcp_key_revoke_needed(ctrl))
2523 		nvme_auth_revoke_tls_key(ctrl);
2524 	nvme_stop_ctrl(ctrl);
2525 	nvme_tcp_teardown_ctrl(ctrl, false);
2526 
2527 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2528 		/* state change failure is ok if we started ctrl delete */
2529 		enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2530 
2531 		WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2532 			     state != NVME_CTRL_DELETING_NOIO);
2533 		return;
2534 	}
2535 
2536 	ret = nvme_tcp_setup_ctrl(ctrl, false);
2537 	if (ret)
2538 		goto out_fail;
2539 
2540 	return;
2541 
2542 out_fail:
2543 	++ctrl->nr_reconnects;
2544 	nvme_tcp_reconnect_or_remove(ctrl, ret);
2545 }
2546 
nvme_tcp_stop_ctrl(struct nvme_ctrl * ctrl)2547 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2548 {
2549 	flush_work(&to_tcp_ctrl(ctrl)->err_work);
2550 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2551 }
2552 
nvme_tcp_free_ctrl(struct nvme_ctrl * nctrl)2553 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2554 {
2555 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2556 
2557 	if (list_empty(&ctrl->list))
2558 		goto free_ctrl;
2559 
2560 	mutex_lock(&nvme_tcp_ctrl_mutex);
2561 	list_del(&ctrl->list);
2562 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2563 
2564 	nvmf_free_options(nctrl->opts);
2565 free_ctrl:
2566 	kfree(ctrl->queues);
2567 	kfree(ctrl);
2568 }
2569 
nvme_tcp_set_sg_null(struct nvme_command * c)2570 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2571 {
2572 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2573 
2574 	sg->addr = 0;
2575 	sg->length = 0;
2576 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2577 			NVME_SGL_FMT_TRANSPORT_A;
2578 }
2579 
nvme_tcp_set_sg_inline(struct nvme_tcp_queue * queue,struct nvme_command * c,u32 data_len)2580 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2581 		struct nvme_command *c, u32 data_len)
2582 {
2583 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2584 
2585 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2586 	sg->length = cpu_to_le32(data_len);
2587 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2588 }
2589 
nvme_tcp_set_sg_host_data(struct nvme_command * c,u32 data_len)2590 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2591 		u32 data_len)
2592 {
2593 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2594 
2595 	sg->addr = 0;
2596 	sg->length = cpu_to_le32(data_len);
2597 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2598 			NVME_SGL_FMT_TRANSPORT_A;
2599 }
2600 
nvme_tcp_submit_async_event(struct nvme_ctrl * arg)2601 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2602 {
2603 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2604 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
2605 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2606 	struct nvme_command *cmd = &pdu->cmd;
2607 	u8 hdgst = nvme_tcp_hdgst_len(queue);
2608 
2609 	memset(pdu, 0, sizeof(*pdu));
2610 	pdu->hdr.type = nvme_tcp_cmd;
2611 	if (queue->hdr_digest)
2612 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2613 	pdu->hdr.hlen = sizeof(*pdu);
2614 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2615 
2616 	cmd->common.opcode = nvme_admin_async_event;
2617 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2618 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
2619 	nvme_tcp_set_sg_null(cmd);
2620 
2621 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2622 	ctrl->async_req.offset = 0;
2623 	ctrl->async_req.curr_bio = NULL;
2624 	ctrl->async_req.data_len = 0;
2625 	init_llist_node(&ctrl->async_req.lentry);
2626 	INIT_LIST_HEAD(&ctrl->async_req.entry);
2627 
2628 	nvme_tcp_queue_request(&ctrl->async_req, true);
2629 }
2630 
nvme_tcp_complete_timed_out(struct request * rq)2631 static void nvme_tcp_complete_timed_out(struct request *rq)
2632 {
2633 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2634 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2635 
2636 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2637 	nvmf_complete_timed_out_request(rq);
2638 }
2639 
nvme_tcp_timeout(struct request * rq)2640 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2641 {
2642 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2643 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2644 	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2645 	struct nvme_command *cmd = &pdu->cmd;
2646 	int qid = nvme_tcp_queue_id(req->queue);
2647 
2648 	dev_warn(ctrl->device,
2649 		 "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
2650 		 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
2651 		 nvme_fabrics_opcode_str(qid, cmd), qid);
2652 
2653 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
2654 		/*
2655 		 * If we are resetting, connecting or deleting we should
2656 		 * complete immediately because we may block controller
2657 		 * teardown or setup sequence
2658 		 * - ctrl disable/shutdown fabrics requests
2659 		 * - connect requests
2660 		 * - initialization admin requests
2661 		 * - I/O requests that entered after unquiescing and
2662 		 *   the controller stopped responding
2663 		 *
2664 		 * All other requests should be cancelled by the error
2665 		 * recovery work, so it's fine that we fail it here.
2666 		 */
2667 		nvme_tcp_complete_timed_out(rq);
2668 		return BLK_EH_DONE;
2669 	}
2670 
2671 	/*
2672 	 * LIVE state should trigger the normal error recovery which will
2673 	 * handle completing this request.
2674 	 */
2675 	nvme_tcp_error_recovery(ctrl);
2676 	return BLK_EH_RESET_TIMER;
2677 }
2678 
nvme_tcp_map_data(struct nvme_tcp_queue * queue,struct request * rq)2679 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2680 			struct request *rq)
2681 {
2682 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2683 	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2684 	struct nvme_command *c = &pdu->cmd;
2685 
2686 	c->common.flags |= NVME_CMD_SGL_METABUF;
2687 
2688 	if (!blk_rq_nr_phys_segments(rq))
2689 		nvme_tcp_set_sg_null(c);
2690 	else if (rq_data_dir(rq) == WRITE &&
2691 	    req->data_len <= nvme_tcp_inline_data_size(req))
2692 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
2693 	else
2694 		nvme_tcp_set_sg_host_data(c, req->data_len);
2695 
2696 	return 0;
2697 }
2698 
nvme_tcp_setup_cmd_pdu(struct nvme_ns * ns,struct request * rq)2699 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2700 		struct request *rq)
2701 {
2702 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2703 	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2704 	struct nvme_tcp_queue *queue = req->queue;
2705 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2706 	blk_status_t ret;
2707 
2708 	ret = nvme_setup_cmd(ns, rq);
2709 	if (ret)
2710 		return ret;
2711 
2712 	req->state = NVME_TCP_SEND_CMD_PDU;
2713 	req->status = cpu_to_le16(NVME_SC_SUCCESS);
2714 	req->offset = 0;
2715 	req->data_sent = 0;
2716 	req->pdu_len = 0;
2717 	req->pdu_sent = 0;
2718 	req->h2cdata_left = 0;
2719 	req->data_len = blk_rq_nr_phys_segments(rq) ?
2720 				blk_rq_payload_bytes(rq) : 0;
2721 	req->curr_bio = rq->bio;
2722 	if (req->curr_bio && req->data_len)
2723 		nvme_tcp_init_iter(req, rq_data_dir(rq));
2724 
2725 	if (rq_data_dir(rq) == WRITE &&
2726 	    req->data_len <= nvme_tcp_inline_data_size(req))
2727 		req->pdu_len = req->data_len;
2728 
2729 	pdu->hdr.type = nvme_tcp_cmd;
2730 	pdu->hdr.flags = 0;
2731 	if (queue->hdr_digest)
2732 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2733 	if (queue->data_digest && req->pdu_len) {
2734 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
2735 		ddgst = nvme_tcp_ddgst_len(queue);
2736 	}
2737 	pdu->hdr.hlen = sizeof(*pdu);
2738 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2739 	pdu->hdr.plen =
2740 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2741 
2742 	ret = nvme_tcp_map_data(queue, rq);
2743 	if (unlikely(ret)) {
2744 		nvme_cleanup_cmd(rq);
2745 		dev_err(queue->ctrl->ctrl.device,
2746 			"Failed to map data (%d)\n", ret);
2747 		return ret;
2748 	}
2749 
2750 	return 0;
2751 }
2752 
nvme_tcp_commit_rqs(struct blk_mq_hw_ctx * hctx)2753 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2754 {
2755 	struct nvme_tcp_queue *queue = hctx->driver_data;
2756 
2757 	if (!llist_empty(&queue->req_list))
2758 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2759 }
2760 
nvme_tcp_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)2761 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2762 		const struct blk_mq_queue_data *bd)
2763 {
2764 	struct nvme_ns *ns = hctx->queue->queuedata;
2765 	struct nvme_tcp_queue *queue = hctx->driver_data;
2766 	struct request *rq = bd->rq;
2767 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2768 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2769 	blk_status_t ret;
2770 
2771 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2772 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2773 
2774 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2775 	if (unlikely(ret))
2776 		return ret;
2777 
2778 	nvme_start_request(rq);
2779 
2780 	nvme_tcp_queue_request(req, bd->last);
2781 
2782 	return BLK_STS_OK;
2783 }
2784 
nvme_tcp_map_queues(struct blk_mq_tag_set * set)2785 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2786 {
2787 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2788 
2789 	nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2790 }
2791 
nvme_tcp_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)2792 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2793 {
2794 	struct nvme_tcp_queue *queue = hctx->driver_data;
2795 	struct sock *sk = queue->sock->sk;
2796 	int ret;
2797 
2798 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2799 		return 0;
2800 
2801 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2802 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2803 		sk_busy_loop(sk, true);
2804 	ret = nvme_tcp_try_recv(queue);
2805 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2806 	return ret < 0 ? ret : queue->nr_cqe;
2807 }
2808 
nvme_tcp_get_address(struct nvme_ctrl * ctrl,char * buf,int size)2809 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2810 {
2811 	struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2812 	struct sockaddr_storage src_addr;
2813 	int ret, len;
2814 
2815 	len = nvmf_get_address(ctrl, buf, size);
2816 
2817 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2818 		return len;
2819 
2820 	mutex_lock(&queue->queue_lock);
2821 
2822 	ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2823 	if (ret > 0) {
2824 		if (len > 0)
2825 			len--; /* strip trailing newline */
2826 		len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2827 				(len) ? "," : "", &src_addr);
2828 	}
2829 
2830 	mutex_unlock(&queue->queue_lock);
2831 
2832 	return len;
2833 }
2834 
2835 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2836 	.queue_rq	= nvme_tcp_queue_rq,
2837 	.commit_rqs	= nvme_tcp_commit_rqs,
2838 	.complete	= nvme_complete_rq,
2839 	.init_request	= nvme_tcp_init_request,
2840 	.exit_request	= nvme_tcp_exit_request,
2841 	.init_hctx	= nvme_tcp_init_hctx,
2842 	.timeout	= nvme_tcp_timeout,
2843 	.map_queues	= nvme_tcp_map_queues,
2844 	.poll		= nvme_tcp_poll,
2845 };
2846 
2847 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2848 	.queue_rq	= nvme_tcp_queue_rq,
2849 	.complete	= nvme_complete_rq,
2850 	.init_request	= nvme_tcp_init_request,
2851 	.exit_request	= nvme_tcp_exit_request,
2852 	.init_hctx	= nvme_tcp_init_admin_hctx,
2853 	.timeout	= nvme_tcp_timeout,
2854 };
2855 
2856 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2857 	.name			= "tcp",
2858 	.module			= THIS_MODULE,
2859 	.flags			= NVME_F_FABRICS | NVME_F_BLOCKING,
2860 	.reg_read32		= nvmf_reg_read32,
2861 	.reg_read64		= nvmf_reg_read64,
2862 	.reg_write32		= nvmf_reg_write32,
2863 	.subsystem_reset	= nvmf_subsystem_reset,
2864 	.free_ctrl		= nvme_tcp_free_ctrl,
2865 	.submit_async_event	= nvme_tcp_submit_async_event,
2866 	.delete_ctrl		= nvme_tcp_delete_ctrl,
2867 	.get_address		= nvme_tcp_get_address,
2868 	.stop_ctrl		= nvme_tcp_stop_ctrl,
2869 	.get_virt_boundary	= nvmf_get_virt_boundary,
2870 };
2871 
2872 static bool
nvme_tcp_existing_controller(struct nvmf_ctrl_options * opts)2873 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2874 {
2875 	struct nvme_tcp_ctrl *ctrl;
2876 	bool found = false;
2877 
2878 	mutex_lock(&nvme_tcp_ctrl_mutex);
2879 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2880 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2881 		if (found)
2882 			break;
2883 	}
2884 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2885 
2886 	return found;
2887 }
2888 
nvme_tcp_alloc_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)2889 static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
2890 		struct nvmf_ctrl_options *opts)
2891 {
2892 	struct nvme_tcp_ctrl *ctrl;
2893 	int ret;
2894 
2895 	ctrl = kzalloc_obj(*ctrl);
2896 	if (!ctrl)
2897 		return ERR_PTR(-ENOMEM);
2898 
2899 	INIT_LIST_HEAD(&ctrl->list);
2900 	ctrl->ctrl.opts = opts;
2901 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2902 				opts->nr_poll_queues + 1;
2903 	ctrl->ctrl.sqsize = opts->queue_size - 1;
2904 	ctrl->ctrl.kato = opts->kato;
2905 
2906 	INIT_DELAYED_WORK(&ctrl->connect_work,
2907 			nvme_tcp_reconnect_ctrl_work);
2908 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2909 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2910 
2911 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2912 		opts->trsvcid =
2913 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2914 		if (!opts->trsvcid) {
2915 			ret = -ENOMEM;
2916 			goto out_free_ctrl;
2917 		}
2918 		opts->mask |= NVMF_OPT_TRSVCID;
2919 	}
2920 
2921 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2922 			opts->traddr, opts->trsvcid, &ctrl->addr);
2923 	if (ret) {
2924 		pr_err("malformed address passed: %s:%s\n",
2925 			opts->traddr, opts->trsvcid);
2926 		goto out_free_ctrl;
2927 	}
2928 
2929 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2930 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2931 			opts->host_traddr, NULL, &ctrl->src_addr);
2932 		if (ret) {
2933 			pr_err("malformed src address passed: %s\n",
2934 			       opts->host_traddr);
2935 			goto out_free_ctrl;
2936 		}
2937 	}
2938 
2939 	if (opts->mask & NVMF_OPT_HOST_IFACE) {
2940 		if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2941 			pr_err("invalid interface passed: %s\n",
2942 			       opts->host_iface);
2943 			ret = -ENODEV;
2944 			goto out_free_ctrl;
2945 		}
2946 	}
2947 
2948 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2949 		ret = -EALREADY;
2950 		goto out_free_ctrl;
2951 	}
2952 
2953 	ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count);
2954 	if (!ctrl->queues) {
2955 		ret = -ENOMEM;
2956 		goto out_free_ctrl;
2957 	}
2958 
2959 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2960 	if (ret)
2961 		goto out_kfree_queues;
2962 
2963 	return ctrl;
2964 out_kfree_queues:
2965 	kfree(ctrl->queues);
2966 out_free_ctrl:
2967 	kfree(ctrl);
2968 	return ERR_PTR(ret);
2969 }
2970 
nvme_tcp_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)2971 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2972 		struct nvmf_ctrl_options *opts)
2973 {
2974 	struct nvme_tcp_ctrl *ctrl;
2975 	int ret;
2976 
2977 	ctrl = nvme_tcp_alloc_ctrl(dev, opts);
2978 	if (IS_ERR(ctrl))
2979 		return ERR_CAST(ctrl);
2980 
2981 	ret = nvme_add_ctrl(&ctrl->ctrl);
2982 	if (ret)
2983 		goto out_put_ctrl;
2984 
2985 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2986 		WARN_ON_ONCE(1);
2987 		ret = -EINTR;
2988 		goto out_uninit_ctrl;
2989 	}
2990 
2991 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2992 	if (ret)
2993 		goto out_uninit_ctrl;
2994 
2995 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
2996 		nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
2997 
2998 	mutex_lock(&nvme_tcp_ctrl_mutex);
2999 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
3000 	mutex_unlock(&nvme_tcp_ctrl_mutex);
3001 
3002 	return &ctrl->ctrl;
3003 
3004 out_uninit_ctrl:
3005 	nvme_uninit_ctrl(&ctrl->ctrl);
3006 out_put_ctrl:
3007 	nvme_put_ctrl(&ctrl->ctrl);
3008 	if (ret > 0)
3009 		ret = -EIO;
3010 	return ERR_PTR(ret);
3011 }
3012 
3013 static struct nvmf_transport_ops nvme_tcp_transport = {
3014 	.name		= "tcp",
3015 	.module		= THIS_MODULE,
3016 	.required_opts	= NVMF_OPT_TRADDR,
3017 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
3018 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
3019 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
3020 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
3021 			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
3022 			  NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT,
3023 	.create_ctrl	= nvme_tcp_create_ctrl,
3024 };
3025 
nvme_tcp_init_module(void)3026 static int __init nvme_tcp_init_module(void)
3027 {
3028 	unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
3029 	int cpu;
3030 
3031 	BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
3032 	BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
3033 	BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
3034 	BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
3035 	BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
3036 	BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
3037 	BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
3038 	BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
3039 
3040 	if (wq_unbound)
3041 		wq_flags |= WQ_UNBOUND;
3042 
3043 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
3044 	if (!nvme_tcp_wq)
3045 		return -ENOMEM;
3046 
3047 	for_each_possible_cpu(cpu)
3048 		atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
3049 
3050 	nvmf_register_transport(&nvme_tcp_transport);
3051 	return 0;
3052 }
3053 
nvme_tcp_cleanup_module(void)3054 static void __exit nvme_tcp_cleanup_module(void)
3055 {
3056 	struct nvme_tcp_ctrl *ctrl;
3057 
3058 	nvmf_unregister_transport(&nvme_tcp_transport);
3059 
3060 	mutex_lock(&nvme_tcp_ctrl_mutex);
3061 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
3062 		nvme_delete_ctrl(&ctrl->ctrl);
3063 	mutex_unlock(&nvme_tcp_ctrl_mutex);
3064 	flush_workqueue(nvme_delete_wq);
3065 
3066 	destroy_workqueue(nvme_tcp_wq);
3067 }
3068 
3069 module_init(nvme_tcp_init_module);
3070 module_exit(nvme_tcp_cleanup_module);
3071 
3072 MODULE_DESCRIPTION("NVMe host TCP transport driver");
3073 MODULE_LICENSE("GPL v2");
3074