1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/crc32.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
14 #include <net/sock.h>
15 #include <net/tcp.h>
16 #include <net/tls.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <net/busy_poll.h>
21 #include <trace/events/sock.h>
22
23 #include "nvme.h"
24 #include "fabrics.h"
25
26 struct nvme_tcp_queue;
27
28 /*
29 * Define the socket priority to use for connections where it is desirable
30 * that the NIC consider performing optimized packet processing or filtering.
31 * A non-zero value being sufficient to indicate general consideration of any
32 * possible optimization. Making it a module param allows for alternative
33 * values that may be unique for some NIC implementations.
34 */
35 static int so_priority;
36 module_param(so_priority, int, 0644);
37 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
38
39 /*
40 * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
41 * from sysfs.
42 */
43 static bool wq_unbound;
44 module_param(wq_unbound, bool, 0644);
45 MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
46
47 /*
48 * TLS handshake timeout
49 */
50 static int tls_handshake_timeout = 10;
51 #ifdef CONFIG_NVME_TCP_TLS
52 module_param(tls_handshake_timeout, int, 0644);
53 MODULE_PARM_DESC(tls_handshake_timeout,
54 "nvme TLS handshake timeout in seconds (default 10)");
55 #endif
56
57 static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
58
59 #ifdef CONFIG_DEBUG_LOCK_ALLOC
60 /* lockdep can detect a circular dependency of the form
61 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
62 * because dependencies are tracked for both nvme-tcp and user contexts. Using
63 * a separate class prevents lockdep from conflating nvme-tcp socket use with
64 * user-space socket API use.
65 */
66 static struct lock_class_key nvme_tcp_sk_key[2];
67 static struct lock_class_key nvme_tcp_slock_key[2];
68
nvme_tcp_reclassify_socket(struct socket * sock)69 static void nvme_tcp_reclassify_socket(struct socket *sock)
70 {
71 struct sock *sk = sock->sk;
72
73 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
74 return;
75
76 switch (sk->sk_family) {
77 case AF_INET:
78 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
79 &nvme_tcp_slock_key[0],
80 "sk_lock-AF_INET-NVME",
81 &nvme_tcp_sk_key[0]);
82 break;
83 case AF_INET6:
84 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
85 &nvme_tcp_slock_key[1],
86 "sk_lock-AF_INET6-NVME",
87 &nvme_tcp_sk_key[1]);
88 break;
89 default:
90 WARN_ON_ONCE(1);
91 }
92 }
93 #else
nvme_tcp_reclassify_socket(struct socket * sock)94 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
95 #endif
96
97 enum nvme_tcp_send_state {
98 NVME_TCP_SEND_CMD_PDU = 0,
99 NVME_TCP_SEND_H2C_PDU,
100 NVME_TCP_SEND_DATA,
101 NVME_TCP_SEND_DDGST,
102 };
103
104 struct nvme_tcp_request {
105 struct nvme_request req;
106 void *pdu;
107 struct nvme_tcp_queue *queue;
108 u32 data_len;
109 u32 pdu_len;
110 u32 pdu_sent;
111 u32 h2cdata_left;
112 u32 h2cdata_offset;
113 u16 ttag;
114 __le16 status;
115 struct list_head entry;
116 struct llist_node lentry;
117 __le32 ddgst;
118
119 struct bio *curr_bio;
120 struct iov_iter iter;
121
122 /* send state */
123 size_t offset;
124 size_t data_sent;
125 enum nvme_tcp_send_state state;
126 };
127
128 enum nvme_tcp_queue_flags {
129 NVME_TCP_Q_ALLOCATED = 0,
130 NVME_TCP_Q_LIVE = 1,
131 NVME_TCP_Q_POLLING = 2,
132 NVME_TCP_Q_IO_CPU_SET = 3,
133 };
134
135 enum nvme_tcp_recv_state {
136 NVME_TCP_RECV_PDU = 0,
137 NVME_TCP_RECV_DATA,
138 NVME_TCP_RECV_DDGST,
139 };
140
141 struct nvme_tcp_ctrl;
142 struct nvme_tcp_queue {
143 struct socket *sock;
144 struct work_struct io_work;
145 int io_cpu;
146
147 struct mutex queue_lock;
148 struct mutex send_mutex;
149 struct llist_head req_list;
150 struct list_head send_list;
151
152 /* recv state */
153 void *pdu;
154 int pdu_remaining;
155 int pdu_offset;
156 size_t data_remaining;
157 size_t ddgst_remaining;
158 unsigned int nr_cqe;
159
160 /* send state */
161 struct nvme_tcp_request *request;
162
163 u32 maxh2cdata;
164 size_t cmnd_capsule_len;
165 struct nvme_tcp_ctrl *ctrl;
166 unsigned long flags;
167 bool rd_enabled;
168
169 bool hdr_digest;
170 bool data_digest;
171 bool tls_enabled;
172 u32 rcv_crc;
173 u32 snd_crc;
174 __le32 exp_ddgst;
175 __le32 recv_ddgst;
176 struct completion tls_complete;
177 int tls_err;
178 struct page_frag_cache pf_cache;
179
180 void (*state_change)(struct sock *);
181 void (*data_ready)(struct sock *);
182 void (*write_space)(struct sock *);
183 };
184
185 struct nvme_tcp_ctrl {
186 /* read only in the hot path */
187 struct nvme_tcp_queue *queues;
188 struct blk_mq_tag_set tag_set;
189
190 /* other member variables */
191 struct list_head list;
192 struct blk_mq_tag_set admin_tag_set;
193 struct sockaddr_storage addr;
194 struct sockaddr_storage src_addr;
195 struct nvme_ctrl ctrl;
196
197 struct work_struct err_work;
198 struct delayed_work connect_work;
199 struct nvme_tcp_request async_req;
200 u32 io_queues[HCTX_MAX_TYPES];
201 };
202
203 static LIST_HEAD(nvme_tcp_ctrl_list);
204 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
205 static struct workqueue_struct *nvme_tcp_wq;
206 static const struct blk_mq_ops nvme_tcp_mq_ops;
207 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
208 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
209
to_tcp_ctrl(struct nvme_ctrl * ctrl)210 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
211 {
212 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
213 }
214
nvme_tcp_queue_id(struct nvme_tcp_queue * queue)215 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
216 {
217 return queue - queue->ctrl->queues;
218 }
219
nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)220 static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
221 {
222 switch (type) {
223 case nvme_tcp_c2h_term:
224 case nvme_tcp_c2h_data:
225 case nvme_tcp_r2t:
226 case nvme_tcp_rsp:
227 return true;
228 default:
229 return false;
230 }
231 }
232
233 /*
234 * Check if the queue is TLS encrypted
235 */
nvme_tcp_queue_tls(struct nvme_tcp_queue * queue)236 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
237 {
238 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
239 return 0;
240
241 return queue->tls_enabled;
242 }
243
244 /*
245 * Check if TLS is configured for the controller.
246 */
nvme_tcp_tls_configured(struct nvme_ctrl * ctrl)247 static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
248 {
249 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
250 return 0;
251
252 return ctrl->opts->tls || ctrl->opts->concat;
253 }
254
nvme_tcp_tagset(struct nvme_tcp_queue * queue)255 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
256 {
257 u32 queue_idx = nvme_tcp_queue_id(queue);
258
259 if (queue_idx == 0)
260 return queue->ctrl->admin_tag_set.tags[queue_idx];
261 return queue->ctrl->tag_set.tags[queue_idx - 1];
262 }
263
nvme_tcp_hdgst_len(struct nvme_tcp_queue * queue)264 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
265 {
266 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
267 }
268
nvme_tcp_ddgst_len(struct nvme_tcp_queue * queue)269 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
270 {
271 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
272 }
273
nvme_tcp_req_cmd_pdu(struct nvme_tcp_request * req)274 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
275 {
276 return req->pdu;
277 }
278
nvme_tcp_req_data_pdu(struct nvme_tcp_request * req)279 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
280 {
281 /* use the pdu space in the back for the data pdu */
282 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
283 sizeof(struct nvme_tcp_data_pdu);
284 }
285
nvme_tcp_inline_data_size(struct nvme_tcp_request * req)286 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
287 {
288 if (nvme_is_fabrics(req->req.cmd))
289 return NVME_TCP_ADMIN_CCSZ;
290 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
291 }
292
nvme_tcp_async_req(struct nvme_tcp_request * req)293 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
294 {
295 return req == &req->queue->ctrl->async_req;
296 }
297
nvme_tcp_has_inline_data(struct nvme_tcp_request * req)298 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
299 {
300 struct request *rq;
301
302 if (unlikely(nvme_tcp_async_req(req)))
303 return false; /* async events don't have a request */
304
305 rq = blk_mq_rq_from_pdu(req);
306
307 return rq_data_dir(rq) == WRITE && req->data_len &&
308 req->data_len <= nvme_tcp_inline_data_size(req);
309 }
310
nvme_tcp_req_cur_page(struct nvme_tcp_request * req)311 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
312 {
313 return req->iter.bvec->bv_page;
314 }
315
nvme_tcp_req_cur_offset(struct nvme_tcp_request * req)316 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
317 {
318 return req->iter.bvec->bv_offset + req->iter.iov_offset;
319 }
320
nvme_tcp_req_cur_length(struct nvme_tcp_request * req)321 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
322 {
323 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
324 req->pdu_len - req->pdu_sent);
325 }
326
nvme_tcp_pdu_data_left(struct nvme_tcp_request * req)327 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
328 {
329 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
330 req->pdu_len - req->pdu_sent : 0;
331 }
332
nvme_tcp_pdu_last_send(struct nvme_tcp_request * req,int len)333 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
334 int len)
335 {
336 return nvme_tcp_pdu_data_left(req) <= len;
337 }
338
nvme_tcp_init_iter(struct nvme_tcp_request * req,unsigned int dir)339 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
340 unsigned int dir)
341 {
342 struct request *rq = blk_mq_rq_from_pdu(req);
343 struct bio_vec *vec;
344 unsigned int size;
345 int nr_bvec;
346 size_t offset;
347
348 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
349 vec = &rq->special_vec;
350 nr_bvec = 1;
351 size = blk_rq_payload_bytes(rq);
352 offset = 0;
353 } else {
354 struct bio *bio = req->curr_bio;
355 struct bvec_iter bi;
356 struct bio_vec bv;
357
358 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
359 nr_bvec = 0;
360 bio_for_each_bvec(bv, bio, bi) {
361 nr_bvec++;
362 }
363 size = bio->bi_iter.bi_size;
364 offset = bio->bi_iter.bi_bvec_done;
365 }
366
367 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
368 req->iter.iov_offset = offset;
369 }
370
nvme_tcp_advance_req(struct nvme_tcp_request * req,int len)371 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
372 int len)
373 {
374 req->data_sent += len;
375 req->pdu_sent += len;
376 iov_iter_advance(&req->iter, len);
377 if (!iov_iter_count(&req->iter) &&
378 req->data_sent < req->data_len) {
379 req->curr_bio = req->curr_bio->bi_next;
380 nvme_tcp_init_iter(req, ITER_SOURCE);
381 }
382 }
383
nvme_tcp_send_all(struct nvme_tcp_queue * queue)384 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
385 {
386 int ret;
387
388 /* drain the send queue as much as we can... */
389 do {
390 ret = nvme_tcp_try_send(queue);
391 } while (ret > 0);
392 }
393
nvme_tcp_queue_has_pending(struct nvme_tcp_queue * queue)394 static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
395 {
396 return !list_empty(&queue->send_list) ||
397 !llist_empty(&queue->req_list);
398 }
399
nvme_tcp_queue_more(struct nvme_tcp_queue * queue)400 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
401 {
402 return !nvme_tcp_queue_tls(queue) &&
403 nvme_tcp_queue_has_pending(queue);
404 }
405
nvme_tcp_queue_request(struct nvme_tcp_request * req,bool last)406 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
407 bool last)
408 {
409 struct nvme_tcp_queue *queue = req->queue;
410 bool empty;
411
412 empty = llist_add(&req->lentry, &queue->req_list) &&
413 list_empty(&queue->send_list) && !queue->request;
414
415 /*
416 * if we're the first on the send_list and we can try to send
417 * directly, otherwise queue io_work. Also, only do that if we
418 * are on the same cpu, so we don't introduce contention.
419 */
420 if (queue->io_cpu == raw_smp_processor_id() &&
421 empty && mutex_trylock(&queue->send_mutex)) {
422 nvme_tcp_send_all(queue);
423 mutex_unlock(&queue->send_mutex);
424 }
425
426 if (last && nvme_tcp_queue_has_pending(queue))
427 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
428 }
429
nvme_tcp_process_req_list(struct nvme_tcp_queue * queue)430 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
431 {
432 struct nvme_tcp_request *req;
433 struct llist_node *node;
434
435 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
436 req = llist_entry(node, struct nvme_tcp_request, lentry);
437 list_add(&req->entry, &queue->send_list);
438 }
439 }
440
441 static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue * queue)442 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
443 {
444 struct nvme_tcp_request *req;
445
446 req = list_first_entry_or_null(&queue->send_list,
447 struct nvme_tcp_request, entry);
448 if (!req) {
449 nvme_tcp_process_req_list(queue);
450 req = list_first_entry_or_null(&queue->send_list,
451 struct nvme_tcp_request, entry);
452 if (unlikely(!req))
453 return NULL;
454 }
455
456 list_del_init(&req->entry);
457 init_llist_node(&req->lentry);
458 return req;
459 }
460
461 #define NVME_TCP_CRC_SEED (~0)
462
nvme_tcp_ddgst_update(u32 * crcp,struct page * page,size_t off,size_t len)463 static inline void nvme_tcp_ddgst_update(u32 *crcp,
464 struct page *page, size_t off, size_t len)
465 {
466 page += off / PAGE_SIZE;
467 off %= PAGE_SIZE;
468 while (len) {
469 const void *vaddr = kmap_local_page(page);
470 size_t n = min(len, (size_t)PAGE_SIZE - off);
471
472 *crcp = crc32c(*crcp, vaddr + off, n);
473 kunmap_local(vaddr);
474 page++;
475 off = 0;
476 len -= n;
477 }
478 }
479
nvme_tcp_ddgst_final(u32 crc)480 static inline __le32 nvme_tcp_ddgst_final(u32 crc)
481 {
482 return cpu_to_le32(~crc);
483 }
484
nvme_tcp_hdgst(const void * pdu,size_t len)485 static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len)
486 {
487 return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len));
488 }
489
nvme_tcp_set_hdgst(void * pdu,size_t len)490 static inline void nvme_tcp_set_hdgst(void *pdu, size_t len)
491 {
492 *(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len);
493 }
494
nvme_tcp_verify_hdgst(struct nvme_tcp_queue * queue,void * pdu,size_t pdu_len)495 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
496 void *pdu, size_t pdu_len)
497 {
498 struct nvme_tcp_hdr *hdr = pdu;
499 __le32 recv_digest;
500 __le32 exp_digest;
501
502 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
503 dev_err(queue->ctrl->ctrl.device,
504 "queue %d: header digest flag is cleared\n",
505 nvme_tcp_queue_id(queue));
506 return -EPROTO;
507 }
508
509 recv_digest = *(__le32 *)(pdu + hdr->hlen);
510 exp_digest = nvme_tcp_hdgst(pdu, pdu_len);
511 if (recv_digest != exp_digest) {
512 dev_err(queue->ctrl->ctrl.device,
513 "header digest error: recv %#x expected %#x\n",
514 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
515 return -EIO;
516 }
517
518 return 0;
519 }
520
nvme_tcp_check_ddgst(struct nvme_tcp_queue * queue,void * pdu)521 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
522 {
523 struct nvme_tcp_hdr *hdr = pdu;
524 u8 digest_len = nvme_tcp_hdgst_len(queue);
525 u32 len;
526
527 len = le32_to_cpu(hdr->plen) - hdr->hlen -
528 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
529
530 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
531 dev_err(queue->ctrl->ctrl.device,
532 "queue %d: data digest flag is cleared\n",
533 nvme_tcp_queue_id(queue));
534 return -EPROTO;
535 }
536 queue->rcv_crc = NVME_TCP_CRC_SEED;
537
538 return 0;
539 }
540
nvme_tcp_exit_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx)541 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
542 struct request *rq, unsigned int hctx_idx)
543 {
544 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
545
546 page_frag_free(req->pdu);
547 }
548
nvme_tcp_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)549 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
550 struct request *rq, unsigned int hctx_idx,
551 unsigned int numa_node)
552 {
553 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
554 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
555 struct nvme_tcp_cmd_pdu *pdu;
556 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
557 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
558 u8 hdgst = nvme_tcp_hdgst_len(queue);
559
560 req->pdu = page_frag_alloc(&queue->pf_cache,
561 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
562 GFP_KERNEL | __GFP_ZERO);
563 if (!req->pdu)
564 return -ENOMEM;
565
566 pdu = req->pdu;
567 req->queue = queue;
568 nvme_req(rq)->ctrl = &ctrl->ctrl;
569 nvme_req(rq)->cmd = &pdu->cmd;
570 init_llist_node(&req->lentry);
571 INIT_LIST_HEAD(&req->entry);
572
573 return 0;
574 }
575
nvme_tcp_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)576 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
577 unsigned int hctx_idx)
578 {
579 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
580 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
581
582 hctx->driver_data = queue;
583 return 0;
584 }
585
nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)586 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
587 unsigned int hctx_idx)
588 {
589 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
590 struct nvme_tcp_queue *queue = &ctrl->queues[0];
591
592 hctx->driver_data = queue;
593 return 0;
594 }
595
596 static enum nvme_tcp_recv_state
nvme_tcp_recv_state(struct nvme_tcp_queue * queue)597 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
598 {
599 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
600 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
601 NVME_TCP_RECV_DATA;
602 }
603
nvme_tcp_init_recv_ctx(struct nvme_tcp_queue * queue)604 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
605 {
606 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
607 nvme_tcp_hdgst_len(queue);
608 queue->pdu_offset = 0;
609 queue->data_remaining = -1;
610 queue->ddgst_remaining = 0;
611 }
612
nvme_tcp_error_recovery(struct nvme_ctrl * ctrl)613 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
614 {
615 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
616 return;
617
618 dev_warn(ctrl->device, "starting error recovery\n");
619 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
620 }
621
nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue * queue,struct nvme_completion * cqe)622 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
623 struct nvme_completion *cqe)
624 {
625 struct nvme_tcp_request *req;
626 struct request *rq;
627
628 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
629 if (!rq) {
630 dev_err(queue->ctrl->ctrl.device,
631 "got bad cqe.command_id %#x on queue %d\n",
632 cqe->command_id, nvme_tcp_queue_id(queue));
633 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
634 return -EINVAL;
635 }
636
637 req = blk_mq_rq_to_pdu(rq);
638 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
639 req->status = cqe->status;
640
641 if (!nvme_try_complete_req(rq, req->status, cqe->result))
642 nvme_complete_rq(rq);
643 queue->nr_cqe++;
644
645 return 0;
646 }
647
nvme_tcp_handle_c2h_data(struct nvme_tcp_queue * queue,struct nvme_tcp_data_pdu * pdu)648 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
649 struct nvme_tcp_data_pdu *pdu)
650 {
651 struct request *rq;
652
653 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
654 if (!rq) {
655 dev_err(queue->ctrl->ctrl.device,
656 "got bad c2hdata.command_id %#x on queue %d\n",
657 pdu->command_id, nvme_tcp_queue_id(queue));
658 return -ENOENT;
659 }
660
661 if (!blk_rq_payload_bytes(rq)) {
662 dev_err(queue->ctrl->ctrl.device,
663 "queue %d tag %#x unexpected data\n",
664 nvme_tcp_queue_id(queue), rq->tag);
665 return -EIO;
666 }
667
668 queue->data_remaining = le32_to_cpu(pdu->data_length);
669
670 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
671 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
672 dev_err(queue->ctrl->ctrl.device,
673 "queue %d tag %#x SUCCESS set but not last PDU\n",
674 nvme_tcp_queue_id(queue), rq->tag);
675 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
676 return -EPROTO;
677 }
678
679 return 0;
680 }
681
nvme_tcp_handle_comp(struct nvme_tcp_queue * queue,struct nvme_tcp_rsp_pdu * pdu)682 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
683 struct nvme_tcp_rsp_pdu *pdu)
684 {
685 struct nvme_completion *cqe = &pdu->cqe;
686 int ret = 0;
687
688 /*
689 * AEN requests are special as they don't time out and can
690 * survive any kind of queue freeze and often don't respond to
691 * aborts. We don't even bother to allocate a struct request
692 * for them but rather special case them here.
693 */
694 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
695 cqe->command_id)))
696 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
697 &cqe->result);
698 else
699 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
700
701 return ret;
702 }
703
nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request * req)704 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
705 {
706 struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
707 struct nvme_tcp_queue *queue = req->queue;
708 struct request *rq = blk_mq_rq_from_pdu(req);
709 u32 h2cdata_sent = req->pdu_len;
710 u8 hdgst = nvme_tcp_hdgst_len(queue);
711 u8 ddgst = nvme_tcp_ddgst_len(queue);
712
713 req->state = NVME_TCP_SEND_H2C_PDU;
714 req->offset = 0;
715 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
716 req->pdu_sent = 0;
717 req->h2cdata_left -= req->pdu_len;
718 req->h2cdata_offset += h2cdata_sent;
719
720 memset(data, 0, sizeof(*data));
721 data->hdr.type = nvme_tcp_h2c_data;
722 if (!req->h2cdata_left)
723 data->hdr.flags = NVME_TCP_F_DATA_LAST;
724 if (queue->hdr_digest)
725 data->hdr.flags |= NVME_TCP_F_HDGST;
726 if (queue->data_digest)
727 data->hdr.flags |= NVME_TCP_F_DDGST;
728 data->hdr.hlen = sizeof(*data);
729 data->hdr.pdo = data->hdr.hlen + hdgst;
730 data->hdr.plen =
731 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
732 data->ttag = req->ttag;
733 data->command_id = nvme_cid(rq);
734 data->data_offset = cpu_to_le32(req->h2cdata_offset);
735 data->data_length = cpu_to_le32(req->pdu_len);
736 }
737
nvme_tcp_handle_r2t(struct nvme_tcp_queue * queue,struct nvme_tcp_r2t_pdu * pdu)738 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
739 struct nvme_tcp_r2t_pdu *pdu)
740 {
741 struct nvme_tcp_request *req;
742 struct request *rq;
743 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
744 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
745
746 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
747 if (!rq) {
748 dev_err(queue->ctrl->ctrl.device,
749 "got bad r2t.command_id %#x on queue %d\n",
750 pdu->command_id, nvme_tcp_queue_id(queue));
751 return -ENOENT;
752 }
753 req = blk_mq_rq_to_pdu(rq);
754
755 if (unlikely(!r2t_length)) {
756 dev_err(queue->ctrl->ctrl.device,
757 "req %d r2t len is %u, probably a bug...\n",
758 rq->tag, r2t_length);
759 return -EPROTO;
760 }
761
762 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
763 dev_err(queue->ctrl->ctrl.device,
764 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
765 rq->tag, r2t_length, req->data_len, req->data_sent);
766 return -EPROTO;
767 }
768
769 if (unlikely(r2t_offset < req->data_sent)) {
770 dev_err(queue->ctrl->ctrl.device,
771 "req %d unexpected r2t offset %u (expected %zu)\n",
772 rq->tag, r2t_offset, req->data_sent);
773 return -EPROTO;
774 }
775
776 if (llist_on_list(&req->lentry) ||
777 !list_empty(&req->entry)) {
778 dev_err(queue->ctrl->ctrl.device,
779 "req %d unexpected r2t while processing request\n",
780 rq->tag);
781 return -EPROTO;
782 }
783
784 req->pdu_len = 0;
785 req->h2cdata_left = r2t_length;
786 req->h2cdata_offset = r2t_offset;
787 req->ttag = pdu->ttag;
788
789 nvme_tcp_setup_h2c_data_pdu(req);
790
791 llist_add(&req->lentry, &queue->req_list);
792 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
793
794 return 0;
795 }
796
nvme_tcp_handle_c2h_term(struct nvme_tcp_queue * queue,struct nvme_tcp_term_pdu * pdu)797 static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
798 struct nvme_tcp_term_pdu *pdu)
799 {
800 u16 fes;
801 const char *msg;
802 u32 plen = le32_to_cpu(pdu->hdr.plen);
803
804 static const char * const msg_table[] = {
805 [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
806 [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
807 [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
808 [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
809 [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
810 [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
811 };
812
813 if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
814 plen > NVME_TCP_MAX_C2HTERM_PLEN) {
815 dev_err(queue->ctrl->ctrl.device,
816 "Received a malformed C2HTermReq PDU (plen = %u)\n",
817 plen);
818 return;
819 }
820
821 fes = le16_to_cpu(pdu->fes);
822 if (fes && fes < ARRAY_SIZE(msg_table))
823 msg = msg_table[fes];
824 else
825 msg = "Unknown";
826
827 dev_err(queue->ctrl->ctrl.device,
828 "Received C2HTermReq (FES = %s)\n", msg);
829 }
830
nvme_tcp_recv_pdu(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)831 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
832 unsigned int *offset, size_t *len)
833 {
834 struct nvme_tcp_hdr *hdr;
835 char *pdu = queue->pdu;
836 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
837 int ret;
838
839 ret = skb_copy_bits(skb, *offset,
840 &pdu[queue->pdu_offset], rcv_len);
841 if (unlikely(ret))
842 return ret;
843
844 queue->pdu_remaining -= rcv_len;
845 queue->pdu_offset += rcv_len;
846 *offset += rcv_len;
847 *len -= rcv_len;
848 if (queue->pdu_remaining)
849 return 0;
850
851 hdr = queue->pdu;
852 if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
853 if (!nvme_tcp_recv_pdu_supported(hdr->type))
854 goto unsupported_pdu;
855
856 dev_err(queue->ctrl->ctrl.device,
857 "pdu type %d has unexpected header length (%d)\n",
858 hdr->type, hdr->hlen);
859 return -EPROTO;
860 }
861
862 if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
863 /*
864 * C2HTermReq never includes Header or Data digests.
865 * Skip the checks.
866 */
867 nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
868 return -EINVAL;
869 }
870
871 if (queue->hdr_digest) {
872 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
873 if (unlikely(ret))
874 return ret;
875 }
876
877
878 if (queue->data_digest) {
879 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
880 if (unlikely(ret))
881 return ret;
882 }
883
884 switch (hdr->type) {
885 case nvme_tcp_c2h_data:
886 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
887 case nvme_tcp_rsp:
888 nvme_tcp_init_recv_ctx(queue);
889 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
890 case nvme_tcp_r2t:
891 nvme_tcp_init_recv_ctx(queue);
892 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
893 default:
894 goto unsupported_pdu;
895 }
896
897 unsupported_pdu:
898 dev_err(queue->ctrl->ctrl.device,
899 "unsupported pdu type (%d)\n", hdr->type);
900 return -EINVAL;
901 }
902
nvme_tcp_end_request(struct request * rq,u16 status)903 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
904 {
905 union nvme_result res = {};
906
907 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
908 nvme_complete_rq(rq);
909 }
910
nvme_tcp_recv_data(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)911 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
912 unsigned int *offset, size_t *len)
913 {
914 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
915 struct request *rq =
916 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
917 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
918
919 while (true) {
920 int recv_len, ret;
921
922 recv_len = min_t(size_t, *len, queue->data_remaining);
923 if (!recv_len)
924 break;
925
926 if (!iov_iter_count(&req->iter)) {
927 req->curr_bio = req->curr_bio->bi_next;
928
929 /*
930 * If we don't have any bios it means the controller
931 * sent more data than we requested, hence error
932 */
933 if (!req->curr_bio) {
934 dev_err(queue->ctrl->ctrl.device,
935 "queue %d no space in request %#x",
936 nvme_tcp_queue_id(queue), rq->tag);
937 nvme_tcp_init_recv_ctx(queue);
938 return -EIO;
939 }
940 nvme_tcp_init_iter(req, ITER_DEST);
941 }
942
943 /* we can read only from what is left in this bio */
944 recv_len = min_t(size_t, recv_len,
945 iov_iter_count(&req->iter));
946
947 if (queue->data_digest)
948 ret = skb_copy_and_crc32c_datagram_iter(skb, *offset,
949 &req->iter, recv_len, &queue->rcv_crc);
950 else
951 ret = skb_copy_datagram_iter(skb, *offset,
952 &req->iter, recv_len);
953 if (ret) {
954 dev_err(queue->ctrl->ctrl.device,
955 "queue %d failed to copy request %#x data",
956 nvme_tcp_queue_id(queue), rq->tag);
957 return ret;
958 }
959
960 *len -= recv_len;
961 *offset += recv_len;
962 queue->data_remaining -= recv_len;
963 }
964
965 if (!queue->data_remaining) {
966 if (queue->data_digest) {
967 queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc);
968 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
969 } else {
970 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
971 nvme_tcp_end_request(rq,
972 le16_to_cpu(req->status));
973 queue->nr_cqe++;
974 }
975 nvme_tcp_init_recv_ctx(queue);
976 }
977 }
978
979 return 0;
980 }
981
nvme_tcp_recv_ddgst(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)982 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
983 struct sk_buff *skb, unsigned int *offset, size_t *len)
984 {
985 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
986 char *ddgst = (char *)&queue->recv_ddgst;
987 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
988 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
989 int ret;
990
991 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
992 if (unlikely(ret))
993 return ret;
994
995 queue->ddgst_remaining -= recv_len;
996 *offset += recv_len;
997 *len -= recv_len;
998 if (queue->ddgst_remaining)
999 return 0;
1000
1001 if (queue->recv_ddgst != queue->exp_ddgst) {
1002 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
1003 pdu->command_id);
1004 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1005
1006 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
1007
1008 dev_err(queue->ctrl->ctrl.device,
1009 "data digest error: recv %#x expected %#x\n",
1010 le32_to_cpu(queue->recv_ddgst),
1011 le32_to_cpu(queue->exp_ddgst));
1012 }
1013
1014 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
1015 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
1016 pdu->command_id);
1017 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1018
1019 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
1020 queue->nr_cqe++;
1021 }
1022
1023 nvme_tcp_init_recv_ctx(queue);
1024 return 0;
1025 }
1026
nvme_tcp_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)1027 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1028 unsigned int offset, size_t len)
1029 {
1030 struct nvme_tcp_queue *queue = desc->arg.data;
1031 size_t consumed = len;
1032 int result;
1033
1034 if (unlikely(!queue->rd_enabled))
1035 return -EFAULT;
1036
1037 while (len) {
1038 switch (nvme_tcp_recv_state(queue)) {
1039 case NVME_TCP_RECV_PDU:
1040 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
1041 break;
1042 case NVME_TCP_RECV_DATA:
1043 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
1044 break;
1045 case NVME_TCP_RECV_DDGST:
1046 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
1047 break;
1048 default:
1049 result = -EFAULT;
1050 }
1051 if (result) {
1052 dev_err(queue->ctrl->ctrl.device,
1053 "receive failed: %d\n", result);
1054 queue->rd_enabled = false;
1055 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1056 return result;
1057 }
1058 }
1059
1060 return consumed;
1061 }
1062
nvme_tcp_data_ready(struct sock * sk)1063 static void nvme_tcp_data_ready(struct sock *sk)
1064 {
1065 struct nvme_tcp_queue *queue;
1066
1067 trace_sk_data_ready(sk);
1068
1069 read_lock_bh(&sk->sk_callback_lock);
1070 queue = sk->sk_user_data;
1071 if (likely(queue && queue->rd_enabled) &&
1072 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
1073 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1074 read_unlock_bh(&sk->sk_callback_lock);
1075 }
1076
nvme_tcp_write_space(struct sock * sk)1077 static void nvme_tcp_write_space(struct sock *sk)
1078 {
1079 struct nvme_tcp_queue *queue;
1080
1081 read_lock_bh(&sk->sk_callback_lock);
1082 queue = sk->sk_user_data;
1083 if (likely(queue && sk_stream_is_writeable(sk))) {
1084 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1085 /* Ensure pending TLS partial records are retried */
1086 if (nvme_tcp_queue_tls(queue))
1087 queue->write_space(sk);
1088 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1089 }
1090 read_unlock_bh(&sk->sk_callback_lock);
1091 }
1092
nvme_tcp_state_change(struct sock * sk)1093 static void nvme_tcp_state_change(struct sock *sk)
1094 {
1095 struct nvme_tcp_queue *queue;
1096
1097 read_lock_bh(&sk->sk_callback_lock);
1098 queue = sk->sk_user_data;
1099 if (!queue)
1100 goto done;
1101
1102 switch (sk->sk_state) {
1103 case TCP_CLOSE:
1104 case TCP_CLOSE_WAIT:
1105 case TCP_LAST_ACK:
1106 case TCP_FIN_WAIT1:
1107 case TCP_FIN_WAIT2:
1108 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1109 break;
1110 default:
1111 dev_info(queue->ctrl->ctrl.device,
1112 "queue %d socket state %d\n",
1113 nvme_tcp_queue_id(queue), sk->sk_state);
1114 }
1115
1116 queue->state_change(sk);
1117 done:
1118 read_unlock_bh(&sk->sk_callback_lock);
1119 }
1120
nvme_tcp_done_send_req(struct nvme_tcp_queue * queue)1121 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
1122 {
1123 queue->request = NULL;
1124 }
1125
nvme_tcp_fail_request(struct nvme_tcp_request * req)1126 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
1127 {
1128 if (nvme_tcp_async_req(req)) {
1129 union nvme_result res = {};
1130
1131 nvme_complete_async_event(&req->queue->ctrl->ctrl,
1132 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
1133 } else {
1134 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
1135 NVME_SC_HOST_PATH_ERROR);
1136 }
1137 }
1138
nvme_tcp_try_send_data(struct nvme_tcp_request * req)1139 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
1140 {
1141 struct nvme_tcp_queue *queue = req->queue;
1142 int req_data_len = req->data_len;
1143 u32 h2cdata_left = req->h2cdata_left;
1144
1145 while (true) {
1146 struct bio_vec bvec;
1147 struct msghdr msg = {
1148 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1149 };
1150 struct page *page = nvme_tcp_req_cur_page(req);
1151 size_t offset = nvme_tcp_req_cur_offset(req);
1152 size_t len = nvme_tcp_req_cur_length(req);
1153 bool last = nvme_tcp_pdu_last_send(req, len);
1154 int req_data_sent = req->data_sent;
1155 int ret;
1156
1157 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1158 msg.msg_flags |= MSG_EOR;
1159 else
1160 msg.msg_flags |= MSG_MORE;
1161
1162 if (!sendpages_ok(page, len, offset))
1163 msg.msg_flags &= ~MSG_SPLICE_PAGES;
1164
1165 bvec_set_page(&bvec, page, len, offset);
1166 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1167 ret = sock_sendmsg(queue->sock, &msg);
1168 if (ret <= 0)
1169 return ret;
1170
1171 if (queue->data_digest)
1172 nvme_tcp_ddgst_update(&queue->snd_crc, page,
1173 offset, ret);
1174
1175 /*
1176 * update the request iterator except for the last payload send
1177 * in the request where we don't want to modify it as we may
1178 * compete with the RX path completing the request.
1179 */
1180 if (req_data_sent + ret < req_data_len)
1181 nvme_tcp_advance_req(req, ret);
1182
1183 /* fully successful last send in current PDU */
1184 if (last && ret == len) {
1185 if (queue->data_digest) {
1186 req->ddgst =
1187 nvme_tcp_ddgst_final(queue->snd_crc);
1188 req->state = NVME_TCP_SEND_DDGST;
1189 req->offset = 0;
1190 } else {
1191 if (h2cdata_left)
1192 nvme_tcp_setup_h2c_data_pdu(req);
1193 else
1194 nvme_tcp_done_send_req(queue);
1195 }
1196 return 1;
1197 }
1198 }
1199 return -EAGAIN;
1200 }
1201
nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request * req)1202 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1203 {
1204 struct nvme_tcp_queue *queue = req->queue;
1205 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1206 struct bio_vec bvec;
1207 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
1208 bool inline_data = nvme_tcp_has_inline_data(req);
1209 u8 hdgst = nvme_tcp_hdgst_len(queue);
1210 int len = sizeof(*pdu) + hdgst - req->offset;
1211 int ret;
1212
1213 if (inline_data || nvme_tcp_queue_more(queue))
1214 msg.msg_flags |= MSG_MORE;
1215 else
1216 msg.msg_flags |= MSG_EOR;
1217
1218 if (queue->hdr_digest && !req->offset)
1219 nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
1220
1221 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1222 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1223 ret = sock_sendmsg(queue->sock, &msg);
1224 if (unlikely(ret <= 0))
1225 return ret;
1226
1227 len -= ret;
1228 if (!len) {
1229 if (inline_data) {
1230 req->state = NVME_TCP_SEND_DATA;
1231 if (queue->data_digest)
1232 queue->snd_crc = NVME_TCP_CRC_SEED;
1233 } else {
1234 nvme_tcp_done_send_req(queue);
1235 }
1236 return 1;
1237 }
1238 req->offset += ret;
1239
1240 return -EAGAIN;
1241 }
1242
nvme_tcp_try_send_data_pdu(struct nvme_tcp_request * req)1243 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1244 {
1245 struct nvme_tcp_queue *queue = req->queue;
1246 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1247 struct bio_vec bvec;
1248 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
1249 u8 hdgst = nvme_tcp_hdgst_len(queue);
1250 int len = sizeof(*pdu) - req->offset + hdgst;
1251 int ret;
1252
1253 if (queue->hdr_digest && !req->offset)
1254 nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
1255
1256 if (!req->h2cdata_left)
1257 msg.msg_flags |= MSG_SPLICE_PAGES;
1258
1259 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1260 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1261 ret = sock_sendmsg(queue->sock, &msg);
1262 if (unlikely(ret <= 0))
1263 return ret;
1264
1265 len -= ret;
1266 if (!len) {
1267 req->state = NVME_TCP_SEND_DATA;
1268 if (queue->data_digest)
1269 queue->snd_crc = NVME_TCP_CRC_SEED;
1270 return 1;
1271 }
1272 req->offset += ret;
1273
1274 return -EAGAIN;
1275 }
1276
nvme_tcp_try_send_ddgst(struct nvme_tcp_request * req)1277 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1278 {
1279 struct nvme_tcp_queue *queue = req->queue;
1280 size_t offset = req->offset;
1281 u32 h2cdata_left = req->h2cdata_left;
1282 int ret;
1283 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1284 struct kvec iov = {
1285 .iov_base = (u8 *)&req->ddgst + req->offset,
1286 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1287 };
1288
1289 if (nvme_tcp_queue_more(queue))
1290 msg.msg_flags |= MSG_MORE;
1291 else
1292 msg.msg_flags |= MSG_EOR;
1293
1294 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1295 if (unlikely(ret <= 0))
1296 return ret;
1297
1298 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1299 if (h2cdata_left)
1300 nvme_tcp_setup_h2c_data_pdu(req);
1301 else
1302 nvme_tcp_done_send_req(queue);
1303 return 1;
1304 }
1305
1306 req->offset += ret;
1307 return -EAGAIN;
1308 }
1309
nvme_tcp_try_send(struct nvme_tcp_queue * queue)1310 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1311 {
1312 struct nvme_tcp_request *req;
1313 unsigned int noreclaim_flag;
1314 int ret = 1;
1315
1316 if (!queue->request) {
1317 queue->request = nvme_tcp_fetch_request(queue);
1318 if (!queue->request)
1319 return 0;
1320 }
1321 req = queue->request;
1322
1323 noreclaim_flag = memalloc_noreclaim_save();
1324 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1325 ret = nvme_tcp_try_send_cmd_pdu(req);
1326 if (ret <= 0)
1327 goto done;
1328 if (!nvme_tcp_has_inline_data(req))
1329 goto out;
1330 }
1331
1332 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1333 ret = nvme_tcp_try_send_data_pdu(req);
1334 if (ret <= 0)
1335 goto done;
1336 }
1337
1338 if (req->state == NVME_TCP_SEND_DATA) {
1339 ret = nvme_tcp_try_send_data(req);
1340 if (ret <= 0)
1341 goto done;
1342 }
1343
1344 if (req->state == NVME_TCP_SEND_DDGST)
1345 ret = nvme_tcp_try_send_ddgst(req);
1346 done:
1347 if (ret == -EAGAIN) {
1348 ret = 0;
1349 } else if (ret < 0) {
1350 dev_err(queue->ctrl->ctrl.device,
1351 "failed to send request %d\n", ret);
1352 nvme_tcp_fail_request(queue->request);
1353 nvme_tcp_done_send_req(queue);
1354 }
1355 out:
1356 memalloc_noreclaim_restore(noreclaim_flag);
1357 return ret;
1358 }
1359
nvme_tcp_try_recv(struct nvme_tcp_queue * queue)1360 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1361 {
1362 struct socket *sock = queue->sock;
1363 struct sock *sk = sock->sk;
1364 read_descriptor_t rd_desc;
1365 int consumed;
1366
1367 rd_desc.arg.data = queue;
1368 rd_desc.count = 1;
1369 lock_sock(sk);
1370 queue->nr_cqe = 0;
1371 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1372 release_sock(sk);
1373 return consumed == -EAGAIN ? 0 : consumed;
1374 }
1375
nvme_tcp_io_work(struct work_struct * w)1376 static void nvme_tcp_io_work(struct work_struct *w)
1377 {
1378 struct nvme_tcp_queue *queue =
1379 container_of(w, struct nvme_tcp_queue, io_work);
1380 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1381
1382 do {
1383 bool pending = false;
1384 int result;
1385
1386 if (mutex_trylock(&queue->send_mutex)) {
1387 result = nvme_tcp_try_send(queue);
1388 mutex_unlock(&queue->send_mutex);
1389 if (result > 0)
1390 pending = true;
1391 else if (unlikely(result < 0))
1392 break;
1393 }
1394
1395 result = nvme_tcp_try_recv(queue);
1396 if (result > 0)
1397 pending = true;
1398 else if (unlikely(result < 0))
1399 return;
1400
1401 /* did we get some space after spending time in recv? */
1402 if (nvme_tcp_queue_has_pending(queue) &&
1403 sk_stream_is_writeable(queue->sock->sk))
1404 pending = true;
1405
1406 if (!pending || !queue->rd_enabled)
1407 return;
1408
1409 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1410
1411 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1412 }
1413
nvme_tcp_free_async_req(struct nvme_tcp_ctrl * ctrl)1414 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1415 {
1416 struct nvme_tcp_request *async = &ctrl->async_req;
1417
1418 page_frag_free(async->pdu);
1419 }
1420
nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl * ctrl)1421 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1422 {
1423 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1424 struct nvme_tcp_request *async = &ctrl->async_req;
1425 u8 hdgst = nvme_tcp_hdgst_len(queue);
1426
1427 async->pdu = page_frag_alloc(&queue->pf_cache,
1428 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1429 GFP_KERNEL | __GFP_ZERO);
1430 if (!async->pdu)
1431 return -ENOMEM;
1432
1433 async->queue = &ctrl->queues[0];
1434 return 0;
1435 }
1436
nvme_tcp_free_queue(struct nvme_ctrl * nctrl,int qid)1437 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1438 {
1439 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1440 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1441 unsigned int noio_flag;
1442
1443 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1444 return;
1445
1446 page_frag_cache_drain(&queue->pf_cache);
1447
1448 /**
1449 * Prevent memory reclaim from triggering block I/O during socket
1450 * teardown. The socket release path fput -> tcp_close ->
1451 * tcp_disconnect -> tcp_send_active_reset may allocate memory, and
1452 * allowing reclaim to issue I/O could deadlock if we're being called
1453 * from block device teardown (e.g., del_gendisk -> elevator cleanup)
1454 * which holds locks that the I/O completion path needs.
1455 */
1456 noio_flag = memalloc_noio_save();
1457
1458 /**
1459 * Release the socket synchronously. During reset in
1460 * nvme_reset_ctrl_work(), queue teardown is immediately followed by
1461 * re-allocation. fput() defers socket cleanup to delayed_fput_work
1462 * in workqueue context, which can race with new queue setup.
1463 */
1464 __fput_sync(queue->sock->file);
1465 queue->sock = NULL;
1466 memalloc_noio_restore(noio_flag);
1467
1468 kfree(queue->pdu);
1469 mutex_destroy(&queue->send_mutex);
1470 mutex_destroy(&queue->queue_lock);
1471 }
1472
nvme_tcp_init_connection(struct nvme_tcp_queue * queue)1473 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1474 {
1475 struct nvme_tcp_icreq_pdu *icreq;
1476 struct nvme_tcp_icresp_pdu *icresp;
1477 char cbuf[CMSG_LEN(sizeof(char))] = {};
1478 u8 ctype;
1479 struct msghdr msg = {};
1480 struct kvec iov;
1481 bool ctrl_hdgst, ctrl_ddgst;
1482 u32 maxh2cdata;
1483 int ret;
1484
1485 icreq = kzalloc_obj(*icreq);
1486 if (!icreq)
1487 return -ENOMEM;
1488
1489 icresp = kzalloc_obj(*icresp);
1490 if (!icresp) {
1491 ret = -ENOMEM;
1492 goto free_icreq;
1493 }
1494
1495 icreq->hdr.type = nvme_tcp_icreq;
1496 icreq->hdr.hlen = sizeof(*icreq);
1497 icreq->hdr.pdo = 0;
1498 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1499 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1500 icreq->maxr2t = 0; /* single inflight r2t supported */
1501 icreq->hpda = 0; /* no alignment constraint */
1502 if (queue->hdr_digest)
1503 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1504 if (queue->data_digest)
1505 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1506
1507 iov.iov_base = icreq;
1508 iov.iov_len = sizeof(*icreq);
1509 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1510 if (ret < 0) {
1511 pr_warn("queue %d: failed to send icreq, error %d\n",
1512 nvme_tcp_queue_id(queue), ret);
1513 goto free_icresp;
1514 }
1515
1516 memset(&msg, 0, sizeof(msg));
1517 iov.iov_base = icresp;
1518 iov.iov_len = sizeof(*icresp);
1519 if (nvme_tcp_queue_tls(queue)) {
1520 msg.msg_control = cbuf;
1521 msg.msg_controllen = sizeof(cbuf);
1522 }
1523 msg.msg_flags = MSG_WAITALL;
1524 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1525 iov.iov_len, msg.msg_flags);
1526 if (ret >= 0 && ret < sizeof(*icresp))
1527 ret = -ECONNRESET;
1528 if (ret < 0) {
1529 pr_warn("queue %d: failed to receive icresp, error %d\n",
1530 nvme_tcp_queue_id(queue), ret);
1531 goto free_icresp;
1532 }
1533 ret = -ENOTCONN;
1534 if (nvme_tcp_queue_tls(queue)) {
1535 ctype = tls_get_record_type(queue->sock->sk,
1536 (struct cmsghdr *)cbuf);
1537 if (ctype != TLS_RECORD_TYPE_DATA) {
1538 pr_err("queue %d: unhandled TLS record %d\n",
1539 nvme_tcp_queue_id(queue), ctype);
1540 goto free_icresp;
1541 }
1542 }
1543 ret = -EINVAL;
1544 if (icresp->hdr.type != nvme_tcp_icresp) {
1545 pr_err("queue %d: bad type returned %d\n",
1546 nvme_tcp_queue_id(queue), icresp->hdr.type);
1547 goto free_icresp;
1548 }
1549
1550 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1551 pr_err("queue %d: bad pdu length returned %d\n",
1552 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1553 goto free_icresp;
1554 }
1555
1556 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1557 pr_err("queue %d: bad pfv returned %d\n",
1558 nvme_tcp_queue_id(queue), icresp->pfv);
1559 goto free_icresp;
1560 }
1561
1562 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1563 if ((queue->data_digest && !ctrl_ddgst) ||
1564 (!queue->data_digest && ctrl_ddgst)) {
1565 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1566 nvme_tcp_queue_id(queue),
1567 queue->data_digest ? "enabled" : "disabled",
1568 ctrl_ddgst ? "enabled" : "disabled");
1569 goto free_icresp;
1570 }
1571
1572 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1573 if ((queue->hdr_digest && !ctrl_hdgst) ||
1574 (!queue->hdr_digest && ctrl_hdgst)) {
1575 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1576 nvme_tcp_queue_id(queue),
1577 queue->hdr_digest ? "enabled" : "disabled",
1578 ctrl_hdgst ? "enabled" : "disabled");
1579 goto free_icresp;
1580 }
1581
1582 if (icresp->cpda != 0) {
1583 pr_err("queue %d: unsupported cpda returned %d\n",
1584 nvme_tcp_queue_id(queue), icresp->cpda);
1585 goto free_icresp;
1586 }
1587
1588 maxh2cdata = le32_to_cpu(icresp->maxdata);
1589 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1590 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1591 nvme_tcp_queue_id(queue), maxh2cdata);
1592 goto free_icresp;
1593 }
1594 queue->maxh2cdata = maxh2cdata;
1595
1596 ret = 0;
1597 free_icresp:
1598 kfree(icresp);
1599 free_icreq:
1600 kfree(icreq);
1601 return ret;
1602 }
1603
nvme_tcp_admin_queue(struct nvme_tcp_queue * queue)1604 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1605 {
1606 return nvme_tcp_queue_id(queue) == 0;
1607 }
1608
nvme_tcp_default_queue(struct nvme_tcp_queue * queue)1609 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1610 {
1611 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1612 int qid = nvme_tcp_queue_id(queue);
1613
1614 return !nvme_tcp_admin_queue(queue) &&
1615 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1616 }
1617
nvme_tcp_read_queue(struct nvme_tcp_queue * queue)1618 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1619 {
1620 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1621 int qid = nvme_tcp_queue_id(queue);
1622
1623 return !nvme_tcp_admin_queue(queue) &&
1624 !nvme_tcp_default_queue(queue) &&
1625 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1626 ctrl->io_queues[HCTX_TYPE_READ];
1627 }
1628
nvme_tcp_poll_queue(struct nvme_tcp_queue * queue)1629 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1630 {
1631 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1632 int qid = nvme_tcp_queue_id(queue);
1633
1634 return !nvme_tcp_admin_queue(queue) &&
1635 !nvme_tcp_default_queue(queue) &&
1636 !nvme_tcp_read_queue(queue) &&
1637 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1638 ctrl->io_queues[HCTX_TYPE_READ] +
1639 ctrl->io_queues[HCTX_TYPE_POLL];
1640 }
1641
1642 /*
1643 * Track the number of queues assigned to each cpu using a global per-cpu
1644 * counter and select the least used cpu from the mq_map. Our goal is to spread
1645 * different controllers I/O threads across different cpu cores.
1646 *
1647 * Note that the accounting is not 100% perfect, but we don't need to be, we're
1648 * simply putting our best effort to select the best candidate cpu core that we
1649 * find at any given point.
1650 */
nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue * queue)1651 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1652 {
1653 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1654 struct blk_mq_tag_set *set = &ctrl->tag_set;
1655 int qid = nvme_tcp_queue_id(queue) - 1;
1656 unsigned int *mq_map = NULL;
1657 int cpu, min_queues = INT_MAX, io_cpu;
1658
1659 if (wq_unbound)
1660 goto out;
1661
1662 if (nvme_tcp_default_queue(queue))
1663 mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
1664 else if (nvme_tcp_read_queue(queue))
1665 mq_map = set->map[HCTX_TYPE_READ].mq_map;
1666 else if (nvme_tcp_poll_queue(queue))
1667 mq_map = set->map[HCTX_TYPE_POLL].mq_map;
1668
1669 if (WARN_ON(!mq_map))
1670 goto out;
1671
1672 /* Search for the least used cpu from the mq_map */
1673 io_cpu = WORK_CPU_UNBOUND;
1674 for_each_online_cpu(cpu) {
1675 int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
1676
1677 if (mq_map[cpu] != qid)
1678 continue;
1679 if (num_queues < min_queues) {
1680 io_cpu = cpu;
1681 min_queues = num_queues;
1682 }
1683 }
1684 if (io_cpu != WORK_CPU_UNBOUND) {
1685 queue->io_cpu = io_cpu;
1686 atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
1687 set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
1688 }
1689 out:
1690 dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
1691 qid, queue->io_cpu);
1692 }
1693
nvme_tcp_tls_done(void * data,int status,key_serial_t pskid)1694 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
1695 {
1696 struct nvme_tcp_queue *queue = data;
1697 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1698 int qid = nvme_tcp_queue_id(queue);
1699 struct key *tls_key;
1700
1701 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
1702 qid, pskid, status);
1703
1704 if (status) {
1705 queue->tls_err = -status;
1706 goto out_complete;
1707 }
1708
1709 tls_key = nvme_tls_key_lookup(pskid);
1710 if (IS_ERR(tls_key)) {
1711 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
1712 qid, pskid);
1713 queue->tls_err = -ENOKEY;
1714 } else {
1715 queue->tls_enabled = true;
1716 if (qid == 0)
1717 ctrl->ctrl.tls_pskid = key_serial(tls_key);
1718 key_put(tls_key);
1719 queue->tls_err = 0;
1720 }
1721
1722 out_complete:
1723 complete(&queue->tls_complete);
1724 }
1725
nvme_tcp_start_tls(struct nvme_ctrl * nctrl,struct nvme_tcp_queue * queue,key_serial_t pskid)1726 static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
1727 struct nvme_tcp_queue *queue,
1728 key_serial_t pskid)
1729 {
1730 int qid = nvme_tcp_queue_id(queue);
1731 int ret;
1732 struct tls_handshake_args args;
1733 unsigned long tmo = tls_handshake_timeout * HZ;
1734 key_serial_t keyring = nvme_keyring_id();
1735
1736 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
1737 qid, pskid);
1738 memset(&args, 0, sizeof(args));
1739 args.ta_sock = queue->sock;
1740 args.ta_done = nvme_tcp_tls_done;
1741 args.ta_data = queue;
1742 args.ta_my_peerids[0] = pskid;
1743 args.ta_num_peerids = 1;
1744 if (nctrl->opts->keyring)
1745 keyring = key_serial(nctrl->opts->keyring);
1746 args.ta_keyring = keyring;
1747 args.ta_timeout_ms = tls_handshake_timeout * 1000;
1748 queue->tls_err = -EOPNOTSUPP;
1749 init_completion(&queue->tls_complete);
1750 ret = tls_client_hello_psk(&args, GFP_KERNEL);
1751 if (ret) {
1752 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
1753 qid, ret);
1754 return ret;
1755 }
1756 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
1757 if (ret <= 0) {
1758 if (ret == 0)
1759 ret = -ETIMEDOUT;
1760
1761 dev_err(nctrl->device,
1762 "queue %d: TLS handshake failed, error %d\n",
1763 qid, ret);
1764 tls_handshake_cancel(queue->sock->sk);
1765 } else {
1766 if (queue->tls_err) {
1767 dev_err(nctrl->device,
1768 "queue %d: TLS handshake complete, error %d\n",
1769 qid, queue->tls_err);
1770 } else {
1771 dev_dbg(nctrl->device,
1772 "queue %d: TLS handshake complete\n", qid);
1773 }
1774 ret = queue->tls_err;
1775 }
1776 return ret;
1777 }
1778
nvme_tcp_alloc_queue(struct nvme_ctrl * nctrl,int qid,key_serial_t pskid)1779 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
1780 key_serial_t pskid)
1781 {
1782 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1783 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1784 int ret, rcv_pdu_size;
1785 struct file *sock_file;
1786
1787 mutex_init(&queue->queue_lock);
1788 queue->ctrl = ctrl;
1789 init_llist_head(&queue->req_list);
1790 INIT_LIST_HEAD(&queue->send_list);
1791 mutex_init(&queue->send_mutex);
1792 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1793
1794 if (qid > 0)
1795 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1796 else
1797 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1798 NVME_TCP_ADMIN_CCSZ;
1799
1800 ret = sock_create_kern(current->nsproxy->net_ns,
1801 ctrl->addr.ss_family, SOCK_STREAM,
1802 IPPROTO_TCP, &queue->sock);
1803 if (ret) {
1804 dev_err(nctrl->device,
1805 "failed to create socket: %d\n", ret);
1806 goto err_destroy_mutex;
1807 }
1808
1809 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1810 if (IS_ERR(sock_file)) {
1811 ret = PTR_ERR(sock_file);
1812 goto err_destroy_mutex;
1813 }
1814
1815 sk_net_refcnt_upgrade(queue->sock->sk);
1816 nvme_tcp_reclassify_socket(queue->sock);
1817
1818 /* Single syn retry */
1819 tcp_sock_set_syncnt(queue->sock->sk, 1);
1820
1821 /* Set TCP no delay */
1822 tcp_sock_set_nodelay(queue->sock->sk);
1823
1824 /*
1825 * Cleanup whatever is sitting in the TCP transmit queue on socket
1826 * close. This is done to prevent stale data from being sent should
1827 * the network connection be restored before TCP times out.
1828 */
1829 sock_no_linger(queue->sock->sk);
1830
1831 if (so_priority > 0)
1832 sock_set_priority(queue->sock->sk, so_priority);
1833
1834 /* Set socket type of service */
1835 if (nctrl->opts->tos >= 0)
1836 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1837
1838 /* Set 10 seconds timeout for icresp recvmsg */
1839 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1840
1841 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1842 queue->sock->sk->sk_use_task_frag = false;
1843 queue->io_cpu = WORK_CPU_UNBOUND;
1844 queue->request = NULL;
1845 queue->data_remaining = 0;
1846 queue->ddgst_remaining = 0;
1847 queue->pdu_remaining = 0;
1848 queue->pdu_offset = 0;
1849 sk_set_memalloc(queue->sock->sk);
1850
1851 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1852 ret = kernel_bind(queue->sock, (struct sockaddr_unsized *)&ctrl->src_addr,
1853 sizeof(ctrl->src_addr));
1854 if (ret) {
1855 dev_err(nctrl->device,
1856 "failed to bind queue %d socket %d\n",
1857 qid, ret);
1858 goto err_sock;
1859 }
1860 }
1861
1862 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1863 char *iface = nctrl->opts->host_iface;
1864 sockptr_t optval = KERNEL_SOCKPTR(iface);
1865
1866 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1867 optval, strlen(iface));
1868 if (ret) {
1869 dev_err(nctrl->device,
1870 "failed to bind to interface %s queue %d err %d\n",
1871 iface, qid, ret);
1872 goto err_sock;
1873 }
1874 }
1875
1876 queue->hdr_digest = nctrl->opts->hdr_digest;
1877 queue->data_digest = nctrl->opts->data_digest;
1878
1879 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1880 nvme_tcp_hdgst_len(queue);
1881 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1882 if (!queue->pdu) {
1883 ret = -ENOMEM;
1884 goto err_sock;
1885 }
1886
1887 dev_dbg(nctrl->device, "connecting queue %d\n",
1888 nvme_tcp_queue_id(queue));
1889
1890 ret = kernel_connect(queue->sock, (struct sockaddr_unsized *)&ctrl->addr,
1891 sizeof(ctrl->addr), 0);
1892 if (ret) {
1893 dev_err(nctrl->device,
1894 "failed to connect socket: %d\n", ret);
1895 goto err_rcv_pdu;
1896 }
1897
1898 /* If PSKs are configured try to start TLS */
1899 if (nvme_tcp_tls_configured(nctrl) && pskid) {
1900 ret = nvme_tcp_start_tls(nctrl, queue, pskid);
1901 if (ret)
1902 goto err_init_connect;
1903 }
1904
1905 ret = nvme_tcp_init_connection(queue);
1906 if (ret)
1907 goto err_init_connect;
1908
1909 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1910
1911 return 0;
1912
1913 err_init_connect:
1914 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1915 err_rcv_pdu:
1916 kfree(queue->pdu);
1917 err_sock:
1918 /* Use sync variant - see nvme_tcp_free_queue() for explanation */
1919 __fput_sync(queue->sock->file);
1920 queue->sock = NULL;
1921 err_destroy_mutex:
1922 mutex_destroy(&queue->send_mutex);
1923 mutex_destroy(&queue->queue_lock);
1924 return ret;
1925 }
1926
nvme_tcp_restore_sock_ops(struct nvme_tcp_queue * queue)1927 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1928 {
1929 struct socket *sock = queue->sock;
1930
1931 write_lock_bh(&sock->sk->sk_callback_lock);
1932 sock->sk->sk_user_data = NULL;
1933 sock->sk->sk_data_ready = queue->data_ready;
1934 sock->sk->sk_state_change = queue->state_change;
1935 sock->sk->sk_write_space = queue->write_space;
1936 write_unlock_bh(&sock->sk->sk_callback_lock);
1937 }
1938
__nvme_tcp_stop_queue(struct nvme_tcp_queue * queue)1939 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1940 {
1941 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1942 nvme_tcp_restore_sock_ops(queue);
1943 cancel_work_sync(&queue->io_work);
1944 }
1945
nvme_tcp_stop_queue_nowait(struct nvme_ctrl * nctrl,int qid)1946 static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
1947 {
1948 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1949 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1950
1951 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1952 return;
1953
1954 if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
1955 atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
1956
1957 mutex_lock(&queue->queue_lock);
1958 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1959 __nvme_tcp_stop_queue(queue);
1960 /* Stopping the queue will disable TLS */
1961 queue->tls_enabled = false;
1962 mutex_unlock(&queue->queue_lock);
1963 }
1964
nvme_tcp_wait_queue(struct nvme_ctrl * nctrl,int qid)1965 static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
1966 {
1967 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1968 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1969 int timeout = 100;
1970
1971 while (timeout > 0) {
1972 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
1973 !sk_wmem_alloc_get(queue->sock->sk))
1974 return;
1975 msleep(2);
1976 timeout -= 2;
1977 }
1978 dev_warn(nctrl->device,
1979 "qid %d: timeout draining sock wmem allocation expired\n",
1980 qid);
1981 }
1982
nvme_tcp_stop_queue(struct nvme_ctrl * nctrl,int qid)1983 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1984 {
1985 nvme_tcp_stop_queue_nowait(nctrl, qid);
1986 nvme_tcp_wait_queue(nctrl, qid);
1987 }
1988
1989
nvme_tcp_setup_sock_ops(struct nvme_tcp_queue * queue)1990 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1991 {
1992 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1993 queue->sock->sk->sk_user_data = queue;
1994 queue->state_change = queue->sock->sk->sk_state_change;
1995 queue->data_ready = queue->sock->sk->sk_data_ready;
1996 queue->write_space = queue->sock->sk->sk_write_space;
1997 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1998 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1999 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
2000 #ifdef CONFIG_NET_RX_BUSY_POLL
2001 queue->sock->sk->sk_ll_usec = 1;
2002 #endif
2003 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
2004 }
2005
nvme_tcp_start_queue(struct nvme_ctrl * nctrl,int idx)2006 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
2007 {
2008 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2009 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
2010 int ret;
2011
2012 queue->rd_enabled = true;
2013 nvme_tcp_init_recv_ctx(queue);
2014 nvme_tcp_setup_sock_ops(queue);
2015
2016 if (idx) {
2017 nvme_tcp_set_queue_io_cpu(queue);
2018 ret = nvmf_connect_io_queue(nctrl, idx);
2019 } else
2020 ret = nvmf_connect_admin_queue(nctrl);
2021
2022 if (!ret) {
2023 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
2024 } else {
2025 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
2026 __nvme_tcp_stop_queue(queue);
2027 dev_err(nctrl->device,
2028 "failed to connect queue: %d ret=%d\n", idx, ret);
2029 }
2030 return ret;
2031 }
2032
nvme_tcp_free_admin_queue(struct nvme_ctrl * ctrl)2033 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
2034 {
2035 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
2036 cancel_work_sync(&ctrl->async_event_work);
2037 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
2038 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
2039 }
2040
2041 nvme_tcp_free_queue(ctrl, 0);
2042 }
2043
nvme_tcp_free_io_queues(struct nvme_ctrl * ctrl)2044 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
2045 {
2046 int i;
2047
2048 for (i = 1; i < ctrl->queue_count; i++)
2049 nvme_tcp_free_queue(ctrl, i);
2050 }
2051
nvme_tcp_stop_io_queues(struct nvme_ctrl * ctrl)2052 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
2053 {
2054 int i;
2055
2056 for (i = 1; i < ctrl->queue_count; i++)
2057 nvme_tcp_stop_queue_nowait(ctrl, i);
2058 for (i = 1; i < ctrl->queue_count; i++)
2059 nvme_tcp_wait_queue(ctrl, i);
2060 }
2061
nvme_tcp_start_io_queues(struct nvme_ctrl * ctrl,int first,int last)2062 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
2063 int first, int last)
2064 {
2065 int i, ret;
2066
2067 for (i = first; i < last; i++) {
2068 ret = nvme_tcp_start_queue(ctrl, i);
2069 if (ret)
2070 goto out_stop_queues;
2071 }
2072
2073 return 0;
2074
2075 out_stop_queues:
2076 for (i--; i >= first; i--)
2077 nvme_tcp_stop_queue(ctrl, i);
2078 return ret;
2079 }
2080
nvme_tcp_alloc_admin_queue(struct nvme_ctrl * ctrl)2081 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
2082 {
2083 int ret;
2084 key_serial_t pskid = 0;
2085
2086 if (nvme_tcp_tls_configured(ctrl)) {
2087 if (ctrl->opts->tls_key)
2088 pskid = key_serial(ctrl->opts->tls_key);
2089 else if (ctrl->opts->tls) {
2090 pskid = nvme_tls_psk_default(ctrl->opts->keyring,
2091 ctrl->opts->host->nqn,
2092 ctrl->opts->subsysnqn);
2093 if (!pskid) {
2094 dev_err(ctrl->device, "no valid PSK found\n");
2095 return -ENOKEY;
2096 }
2097 }
2098 }
2099
2100 ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
2101 if (ret)
2102 return ret;
2103
2104 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
2105 if (ret)
2106 goto out_free_queue;
2107
2108 return 0;
2109
2110 out_free_queue:
2111 nvme_tcp_free_queue(ctrl, 0);
2112 return ret;
2113 }
2114
__nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)2115 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
2116 {
2117 int i, ret;
2118
2119 if (nvme_tcp_tls_configured(ctrl)) {
2120 if (ctrl->opts->concat) {
2121 /*
2122 * The generated PSK is stored in the
2123 * fabric options
2124 */
2125 if (!ctrl->opts->tls_key) {
2126 dev_err(ctrl->device, "no PSK generated\n");
2127 return -ENOKEY;
2128 }
2129 if (ctrl->tls_pskid &&
2130 ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) {
2131 dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid);
2132 ctrl->tls_pskid = 0;
2133 }
2134 } else if (!ctrl->tls_pskid) {
2135 dev_err(ctrl->device, "no PSK negotiated\n");
2136 return -ENOKEY;
2137 }
2138 }
2139
2140 for (i = 1; i < ctrl->queue_count; i++) {
2141 ret = nvme_tcp_alloc_queue(ctrl, i,
2142 ctrl->tls_pskid);
2143 if (ret)
2144 goto out_free_queues;
2145 }
2146
2147 return 0;
2148
2149 out_free_queues:
2150 for (i--; i >= 1; i--)
2151 nvme_tcp_free_queue(ctrl, i);
2152
2153 return ret;
2154 }
2155
nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)2156 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
2157 {
2158 unsigned int nr_io_queues;
2159 int ret;
2160
2161 nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
2162 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
2163 if (ret)
2164 return ret;
2165
2166 if (nr_io_queues == 0) {
2167 dev_err(ctrl->device,
2168 "unable to set any I/O queues\n");
2169 return -ENOMEM;
2170 }
2171
2172 ctrl->queue_count = nr_io_queues + 1;
2173 dev_info(ctrl->device,
2174 "creating %d I/O queues.\n", nr_io_queues);
2175
2176 nvmf_set_io_queues(ctrl->opts, nr_io_queues,
2177 to_tcp_ctrl(ctrl)->io_queues);
2178 return __nvme_tcp_alloc_io_queues(ctrl);
2179 }
2180
nvme_tcp_configure_io_queues(struct nvme_ctrl * ctrl,bool new)2181 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
2182 {
2183 int ret, nr_queues;
2184
2185 ret = nvme_tcp_alloc_io_queues(ctrl);
2186 if (ret)
2187 return ret;
2188
2189 if (new) {
2190 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
2191 &nvme_tcp_mq_ops,
2192 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
2193 sizeof(struct nvme_tcp_request));
2194 if (ret)
2195 goto out_free_io_queues;
2196 }
2197
2198 /*
2199 * Only start IO queues for which we have allocated the tagset
2200 * and limited it to the available queues. On reconnects, the
2201 * queue number might have changed.
2202 */
2203 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
2204 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
2205 if (ret)
2206 goto out_cleanup_connect_q;
2207
2208 if (!new) {
2209 nvme_start_freeze(ctrl);
2210 nvme_unquiesce_io_queues(ctrl);
2211 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
2212 /*
2213 * If we timed out waiting for freeze we are likely to
2214 * be stuck. Fail the controller initialization just
2215 * to be safe.
2216 */
2217 ret = -ENODEV;
2218 nvme_unfreeze(ctrl);
2219 goto out_wait_freeze_timed_out;
2220 }
2221 blk_mq_update_nr_hw_queues(ctrl->tagset,
2222 ctrl->queue_count - 1);
2223 nvme_unfreeze(ctrl);
2224 }
2225
2226 /*
2227 * If the number of queues has increased (reconnect case)
2228 * start all new queues now.
2229 */
2230 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
2231 ctrl->tagset->nr_hw_queues + 1);
2232 if (ret)
2233 goto out_wait_freeze_timed_out;
2234
2235 return 0;
2236
2237 out_wait_freeze_timed_out:
2238 nvme_quiesce_io_queues(ctrl);
2239 nvme_sync_io_queues(ctrl);
2240 nvme_tcp_stop_io_queues(ctrl);
2241 out_cleanup_connect_q:
2242 nvme_cancel_tagset(ctrl);
2243 if (new)
2244 nvme_remove_io_tag_set(ctrl);
2245 out_free_io_queues:
2246 nvme_tcp_free_io_queues(ctrl);
2247 return ret;
2248 }
2249
nvme_tcp_configure_admin_queue(struct nvme_ctrl * ctrl,bool new)2250 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
2251 {
2252 int error;
2253
2254 error = nvme_tcp_alloc_admin_queue(ctrl);
2255 if (error)
2256 return error;
2257
2258 if (new) {
2259 error = nvme_alloc_admin_tag_set(ctrl,
2260 &to_tcp_ctrl(ctrl)->admin_tag_set,
2261 &nvme_tcp_admin_mq_ops,
2262 sizeof(struct nvme_tcp_request));
2263 if (error)
2264 goto out_free_queue;
2265 }
2266
2267 error = nvme_tcp_start_queue(ctrl, 0);
2268 if (error)
2269 goto out_cleanup_tagset;
2270
2271 if (ctrl->opts->concat && !ctrl->tls_pskid)
2272 return 0;
2273
2274 error = nvme_enable_ctrl(ctrl);
2275 if (error)
2276 goto out_stop_queue;
2277
2278 nvme_unquiesce_admin_queue(ctrl);
2279
2280 error = nvme_init_ctrl_finish(ctrl, false);
2281 if (error)
2282 goto out_quiesce_queue;
2283
2284 return 0;
2285
2286 out_quiesce_queue:
2287 nvme_quiesce_admin_queue(ctrl);
2288 blk_sync_queue(ctrl->admin_q);
2289 out_stop_queue:
2290 nvme_tcp_stop_queue(ctrl, 0);
2291 nvme_cancel_admin_tagset(ctrl);
2292 out_cleanup_tagset:
2293 if (new)
2294 nvme_remove_admin_tag_set(ctrl);
2295 out_free_queue:
2296 nvme_tcp_free_admin_queue(ctrl);
2297 return error;
2298 }
2299
nvme_tcp_teardown_admin_queue(struct nvme_ctrl * ctrl,bool remove)2300 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2301 bool remove)
2302 {
2303 nvme_quiesce_admin_queue(ctrl);
2304 blk_sync_queue(ctrl->admin_q);
2305 nvme_tcp_stop_queue(ctrl, 0);
2306 nvme_cancel_admin_tagset(ctrl);
2307 if (remove) {
2308 nvme_unquiesce_admin_queue(ctrl);
2309 nvme_remove_admin_tag_set(ctrl);
2310 }
2311 nvme_tcp_free_admin_queue(ctrl);
2312 if (ctrl->tls_pskid) {
2313 dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
2314 ctrl->tls_pskid);
2315 ctrl->tls_pskid = 0;
2316 }
2317 }
2318
nvme_tcp_teardown_io_queues(struct nvme_ctrl * ctrl,bool remove)2319 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2320 bool remove)
2321 {
2322 if (ctrl->queue_count <= 1)
2323 return;
2324 nvme_quiesce_io_queues(ctrl);
2325 nvme_sync_io_queues(ctrl);
2326 nvme_tcp_stop_io_queues(ctrl);
2327 nvme_cancel_tagset(ctrl);
2328 if (remove) {
2329 nvme_unquiesce_io_queues(ctrl);
2330 nvme_remove_io_tag_set(ctrl);
2331 }
2332 nvme_tcp_free_io_queues(ctrl);
2333 }
2334
nvme_tcp_reconnect_or_remove(struct nvme_ctrl * ctrl,int status)2335 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
2336 int status)
2337 {
2338 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2339
2340 /* If we are resetting/deleting then do nothing */
2341 if (state != NVME_CTRL_CONNECTING) {
2342 WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
2343 return;
2344 }
2345
2346 if (nvmf_should_reconnect(ctrl, status)) {
2347 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2348 ctrl->opts->reconnect_delay);
2349 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2350 ctrl->opts->reconnect_delay * HZ);
2351 } else {
2352 dev_info(ctrl->device, "Removing controller (%d)...\n",
2353 status);
2354 nvme_delete_ctrl(ctrl);
2355 }
2356 }
2357
2358 /*
2359 * The TLS key is set by secure concatenation after negotiation has been
2360 * completed on the admin queue. We need to revoke the key when:
2361 * - concatenation is enabled (otherwise it's a static key set by the user)
2362 * and
2363 * - the generated key is present in ctrl->tls_key (otherwise there's nothing
2364 * to revoke)
2365 * and
2366 * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
2367 * negotiation has not run).
2368 *
2369 * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called
2370 * twice during secure concatenation, once on a 'normal' connection to run the
2371 * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
2372 * and once after the negotiation (which uses the key, so it _must_ be set).
2373 */
nvme_tcp_key_revoke_needed(struct nvme_ctrl * ctrl)2374 static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl)
2375 {
2376 return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid;
2377 }
2378
nvme_tcp_setup_ctrl(struct nvme_ctrl * ctrl,bool new)2379 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2380 {
2381 struct nvmf_ctrl_options *opts = ctrl->opts;
2382 int ret;
2383
2384 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2385 if (ret)
2386 return ret;
2387
2388 if (ctrl->opts->concat && !ctrl->tls_pskid) {
2389 /* See comments for nvme_tcp_key_revoke_needed() */
2390 dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
2391 nvme_stop_keep_alive(ctrl);
2392 nvme_tcp_teardown_admin_queue(ctrl, false);
2393 ret = nvme_tcp_configure_admin_queue(ctrl, false);
2394 if (ret)
2395 goto destroy_admin;
2396 }
2397
2398 if (ctrl->icdoff) {
2399 ret = -EOPNOTSUPP;
2400 dev_err(ctrl->device, "icdoff is not supported!\n");
2401 goto destroy_admin;
2402 }
2403
2404 if (!nvme_ctrl_sgl_supported(ctrl)) {
2405 ret = -EOPNOTSUPP;
2406 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2407 goto destroy_admin;
2408 }
2409
2410 if (opts->queue_size > ctrl->sqsize + 1)
2411 dev_warn(ctrl->device,
2412 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2413 opts->queue_size, ctrl->sqsize + 1);
2414
2415 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2416 dev_warn(ctrl->device,
2417 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2418 ctrl->sqsize + 1, ctrl->maxcmd);
2419 ctrl->sqsize = ctrl->maxcmd - 1;
2420 }
2421
2422 if (ctrl->queue_count > 1) {
2423 ret = nvme_tcp_configure_io_queues(ctrl, new);
2424 if (ret)
2425 goto destroy_admin;
2426 }
2427
2428 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2429 /*
2430 * state change failure is ok if we started ctrl delete,
2431 * unless we're during creation of a new controller to
2432 * avoid races with teardown flow.
2433 */
2434 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2435
2436 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2437 state != NVME_CTRL_DELETING_NOIO);
2438 WARN_ON_ONCE(new);
2439 ret = -EINVAL;
2440 goto destroy_io;
2441 }
2442
2443 nvme_start_ctrl(ctrl);
2444 return 0;
2445
2446 destroy_io:
2447 if (ctrl->queue_count > 1) {
2448 nvme_quiesce_io_queues(ctrl);
2449 nvme_sync_io_queues(ctrl);
2450 nvme_tcp_stop_io_queues(ctrl);
2451 nvme_cancel_tagset(ctrl);
2452 if (new)
2453 nvme_remove_io_tag_set(ctrl);
2454 nvme_tcp_free_io_queues(ctrl);
2455 }
2456 destroy_admin:
2457 nvme_stop_keep_alive(ctrl);
2458 nvme_tcp_teardown_admin_queue(ctrl, new);
2459 return ret;
2460 }
2461
nvme_tcp_reconnect_ctrl_work(struct work_struct * work)2462 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2463 {
2464 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2465 struct nvme_tcp_ctrl, connect_work);
2466 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2467 int ret;
2468
2469 ++ctrl->nr_reconnects;
2470
2471 ret = nvme_tcp_setup_ctrl(ctrl, false);
2472 if (ret)
2473 goto requeue;
2474
2475 dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n",
2476 ctrl->nr_reconnects, ctrl->opts->max_reconnects);
2477
2478 ctrl->nr_reconnects = 0;
2479
2480 return;
2481
2482 requeue:
2483 dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n",
2484 ctrl->nr_reconnects, ctrl->opts->max_reconnects);
2485 nvme_tcp_reconnect_or_remove(ctrl, ret);
2486 }
2487
nvme_tcp_error_recovery_work(struct work_struct * work)2488 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2489 {
2490 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2491 struct nvme_tcp_ctrl, err_work);
2492 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2493
2494 if (nvme_tcp_key_revoke_needed(ctrl))
2495 nvme_auth_revoke_tls_key(ctrl);
2496 nvme_stop_keep_alive(ctrl);
2497 flush_work(&ctrl->async_event_work);
2498 nvme_tcp_teardown_io_queues(ctrl, false);
2499 /* unquiesce to fail fast pending requests */
2500 nvme_unquiesce_io_queues(ctrl);
2501 nvme_tcp_teardown_admin_queue(ctrl, false);
2502 nvme_unquiesce_admin_queue(ctrl);
2503 nvme_auth_stop(ctrl);
2504
2505 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2506 /* state change failure is ok if we started ctrl delete */
2507 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2508
2509 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2510 state != NVME_CTRL_DELETING_NOIO);
2511 return;
2512 }
2513
2514 nvme_tcp_reconnect_or_remove(ctrl, 0);
2515 }
2516
nvme_tcp_teardown_ctrl(struct nvme_ctrl * ctrl,bool shutdown)2517 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2518 {
2519 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2520 nvme_quiesce_admin_queue(ctrl);
2521 nvme_disable_ctrl(ctrl, shutdown);
2522 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2523 }
2524
nvme_tcp_delete_ctrl(struct nvme_ctrl * ctrl)2525 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2526 {
2527 nvme_tcp_teardown_ctrl(ctrl, true);
2528 }
2529
nvme_reset_ctrl_work(struct work_struct * work)2530 static void nvme_reset_ctrl_work(struct work_struct *work)
2531 {
2532 struct nvme_ctrl *ctrl =
2533 container_of(work, struct nvme_ctrl, reset_work);
2534 int ret;
2535
2536 if (nvme_tcp_key_revoke_needed(ctrl))
2537 nvme_auth_revoke_tls_key(ctrl);
2538 nvme_stop_ctrl(ctrl);
2539 nvme_tcp_teardown_ctrl(ctrl, false);
2540
2541 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2542 /* state change failure is ok if we started ctrl delete */
2543 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2544
2545 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
2546 state != NVME_CTRL_DELETING_NOIO);
2547 return;
2548 }
2549
2550 ret = nvme_tcp_setup_ctrl(ctrl, false);
2551 if (ret)
2552 goto out_fail;
2553
2554 return;
2555
2556 out_fail:
2557 ++ctrl->nr_reconnects;
2558 nvme_tcp_reconnect_or_remove(ctrl, ret);
2559 }
2560
nvme_tcp_stop_ctrl(struct nvme_ctrl * ctrl)2561 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2562 {
2563 flush_work(&to_tcp_ctrl(ctrl)->err_work);
2564 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2565 }
2566
nvme_tcp_free_ctrl(struct nvme_ctrl * nctrl)2567 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2568 {
2569 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2570
2571 if (list_empty(&ctrl->list))
2572 goto free_ctrl;
2573
2574 mutex_lock(&nvme_tcp_ctrl_mutex);
2575 list_del(&ctrl->list);
2576 mutex_unlock(&nvme_tcp_ctrl_mutex);
2577
2578 nvmf_free_options(nctrl->opts);
2579 free_ctrl:
2580 kfree(ctrl->queues);
2581 kfree(ctrl);
2582 }
2583
nvme_tcp_set_sg_null(struct nvme_command * c)2584 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2585 {
2586 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2587
2588 sg->addr = 0;
2589 sg->length = 0;
2590 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2591 NVME_SGL_FMT_TRANSPORT_A;
2592 }
2593
nvme_tcp_set_sg_inline(struct nvme_tcp_queue * queue,struct nvme_command * c,u32 data_len)2594 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2595 struct nvme_command *c, u32 data_len)
2596 {
2597 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2598
2599 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2600 sg->length = cpu_to_le32(data_len);
2601 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2602 }
2603
nvme_tcp_set_sg_host_data(struct nvme_command * c,u32 data_len)2604 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2605 u32 data_len)
2606 {
2607 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2608
2609 sg->addr = 0;
2610 sg->length = cpu_to_le32(data_len);
2611 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2612 NVME_SGL_FMT_TRANSPORT_A;
2613 }
2614
nvme_tcp_submit_async_event(struct nvme_ctrl * arg)2615 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2616 {
2617 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2618 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2619 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2620 struct nvme_command *cmd = &pdu->cmd;
2621 u8 hdgst = nvme_tcp_hdgst_len(queue);
2622
2623 memset(pdu, 0, sizeof(*pdu));
2624 pdu->hdr.type = nvme_tcp_cmd;
2625 if (queue->hdr_digest)
2626 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2627 pdu->hdr.hlen = sizeof(*pdu);
2628 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2629
2630 cmd->common.opcode = nvme_admin_async_event;
2631 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2632 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2633 nvme_tcp_set_sg_null(cmd);
2634
2635 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2636 ctrl->async_req.offset = 0;
2637 ctrl->async_req.curr_bio = NULL;
2638 ctrl->async_req.data_len = 0;
2639 init_llist_node(&ctrl->async_req.lentry);
2640 INIT_LIST_HEAD(&ctrl->async_req.entry);
2641
2642 nvme_tcp_queue_request(&ctrl->async_req, true);
2643 }
2644
nvme_tcp_complete_timed_out(struct request * rq)2645 static void nvme_tcp_complete_timed_out(struct request *rq)
2646 {
2647 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2648 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2649
2650 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2651 nvmf_complete_timed_out_request(rq);
2652 }
2653
nvme_tcp_timeout(struct request * rq)2654 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2655 {
2656 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2657 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2658 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2659 struct nvme_command *cmd = &pdu->cmd;
2660 int qid = nvme_tcp_queue_id(req->queue);
2661
2662 dev_warn(ctrl->device,
2663 "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
2664 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
2665 nvme_fabrics_opcode_str(qid, cmd), qid);
2666
2667 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
2668 /*
2669 * If we are resetting, connecting or deleting we should
2670 * complete immediately because we may block controller
2671 * teardown or setup sequence
2672 * - ctrl disable/shutdown fabrics requests
2673 * - connect requests
2674 * - initialization admin requests
2675 * - I/O requests that entered after unquiescing and
2676 * the controller stopped responding
2677 *
2678 * All other requests should be cancelled by the error
2679 * recovery work, so it's fine that we fail it here.
2680 */
2681 nvme_tcp_complete_timed_out(rq);
2682 return BLK_EH_DONE;
2683 }
2684
2685 /*
2686 * LIVE state should trigger the normal error recovery which will
2687 * handle completing this request.
2688 */
2689 nvme_tcp_error_recovery(ctrl);
2690 return BLK_EH_RESET_TIMER;
2691 }
2692
nvme_tcp_map_data(struct nvme_tcp_queue * queue,struct request * rq)2693 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2694 struct request *rq)
2695 {
2696 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2697 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2698 struct nvme_command *c = &pdu->cmd;
2699
2700 c->common.flags |= NVME_CMD_SGL_METABUF;
2701
2702 if (!blk_rq_nr_phys_segments(rq))
2703 nvme_tcp_set_sg_null(c);
2704 else if (rq_data_dir(rq) == WRITE &&
2705 req->data_len <= nvme_tcp_inline_data_size(req))
2706 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2707 else
2708 nvme_tcp_set_sg_host_data(c, req->data_len);
2709
2710 return 0;
2711 }
2712
nvme_tcp_setup_cmd_pdu(struct nvme_ns * ns,struct request * rq)2713 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2714 struct request *rq)
2715 {
2716 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2717 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2718 struct nvme_tcp_queue *queue = req->queue;
2719 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2720 blk_status_t ret;
2721
2722 ret = nvme_setup_cmd(ns, rq);
2723 if (ret)
2724 return ret;
2725
2726 req->state = NVME_TCP_SEND_CMD_PDU;
2727 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2728 req->offset = 0;
2729 req->data_sent = 0;
2730 req->pdu_len = 0;
2731 req->pdu_sent = 0;
2732 req->h2cdata_left = 0;
2733 req->data_len = blk_rq_nr_phys_segments(rq) ?
2734 blk_rq_payload_bytes(rq) : 0;
2735 req->curr_bio = rq->bio;
2736 if (req->curr_bio && req->data_len)
2737 nvme_tcp_init_iter(req, rq_data_dir(rq));
2738
2739 if (rq_data_dir(rq) == WRITE &&
2740 req->data_len <= nvme_tcp_inline_data_size(req))
2741 req->pdu_len = req->data_len;
2742
2743 pdu->hdr.type = nvme_tcp_cmd;
2744 pdu->hdr.flags = 0;
2745 if (queue->hdr_digest)
2746 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2747 if (queue->data_digest && req->pdu_len) {
2748 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2749 ddgst = nvme_tcp_ddgst_len(queue);
2750 }
2751 pdu->hdr.hlen = sizeof(*pdu);
2752 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2753 pdu->hdr.plen =
2754 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2755
2756 ret = nvme_tcp_map_data(queue, rq);
2757 if (unlikely(ret)) {
2758 nvme_cleanup_cmd(rq);
2759 dev_err(queue->ctrl->ctrl.device,
2760 "Failed to map data (%d)\n", ret);
2761 return ret;
2762 }
2763
2764 return 0;
2765 }
2766
nvme_tcp_commit_rqs(struct blk_mq_hw_ctx * hctx)2767 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2768 {
2769 struct nvme_tcp_queue *queue = hctx->driver_data;
2770
2771 if (!llist_empty(&queue->req_list))
2772 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2773 }
2774
nvme_tcp_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)2775 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2776 const struct blk_mq_queue_data *bd)
2777 {
2778 struct nvme_ns *ns = hctx->queue->queuedata;
2779 struct nvme_tcp_queue *queue = hctx->driver_data;
2780 struct request *rq = bd->rq;
2781 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2782 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2783 blk_status_t ret;
2784
2785 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2786 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2787
2788 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2789 if (unlikely(ret))
2790 return ret;
2791
2792 nvme_start_request(rq);
2793
2794 nvme_tcp_queue_request(req, bd->last);
2795
2796 return BLK_STS_OK;
2797 }
2798
nvme_tcp_map_queues(struct blk_mq_tag_set * set)2799 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2800 {
2801 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2802
2803 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2804 }
2805
nvme_tcp_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)2806 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2807 {
2808 struct nvme_tcp_queue *queue = hctx->driver_data;
2809 struct sock *sk = queue->sock->sk;
2810 int ret;
2811
2812 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2813 return 0;
2814
2815 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2816 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2817 sk_busy_loop(sk, true);
2818 ret = nvme_tcp_try_recv(queue);
2819 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2820 return ret < 0 ? ret : queue->nr_cqe;
2821 }
2822
nvme_tcp_get_address(struct nvme_ctrl * ctrl,char * buf,int size)2823 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2824 {
2825 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2826 struct sockaddr_storage src_addr;
2827 int ret, len;
2828
2829 len = nvmf_get_address(ctrl, buf, size);
2830
2831 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2832 return len;
2833
2834 mutex_lock(&queue->queue_lock);
2835
2836 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2837 if (ret > 0) {
2838 if (len > 0)
2839 len--; /* strip trailing newline */
2840 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2841 (len) ? "," : "", &src_addr);
2842 }
2843
2844 mutex_unlock(&queue->queue_lock);
2845
2846 return len;
2847 }
2848
2849 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2850 .queue_rq = nvme_tcp_queue_rq,
2851 .commit_rqs = nvme_tcp_commit_rqs,
2852 .complete = nvme_complete_rq,
2853 .init_request = nvme_tcp_init_request,
2854 .exit_request = nvme_tcp_exit_request,
2855 .init_hctx = nvme_tcp_init_hctx,
2856 .timeout = nvme_tcp_timeout,
2857 .map_queues = nvme_tcp_map_queues,
2858 .poll = nvme_tcp_poll,
2859 };
2860
2861 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2862 .queue_rq = nvme_tcp_queue_rq,
2863 .complete = nvme_complete_rq,
2864 .init_request = nvme_tcp_init_request,
2865 .exit_request = nvme_tcp_exit_request,
2866 .init_hctx = nvme_tcp_init_admin_hctx,
2867 .timeout = nvme_tcp_timeout,
2868 };
2869
2870 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2871 .name = "tcp",
2872 .module = THIS_MODULE,
2873 .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
2874 .reg_read32 = nvmf_reg_read32,
2875 .reg_read64 = nvmf_reg_read64,
2876 .reg_write32 = nvmf_reg_write32,
2877 .subsystem_reset = nvmf_subsystem_reset,
2878 .free_ctrl = nvme_tcp_free_ctrl,
2879 .submit_async_event = nvme_tcp_submit_async_event,
2880 .delete_ctrl = nvme_tcp_delete_ctrl,
2881 .get_address = nvme_tcp_get_address,
2882 .stop_ctrl = nvme_tcp_stop_ctrl,
2883 .get_virt_boundary = nvmf_get_virt_boundary,
2884 };
2885
2886 static bool
nvme_tcp_existing_controller(struct nvmf_ctrl_options * opts)2887 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2888 {
2889 struct nvme_tcp_ctrl *ctrl;
2890 bool found = false;
2891
2892 mutex_lock(&nvme_tcp_ctrl_mutex);
2893 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2894 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2895 if (found)
2896 break;
2897 }
2898 mutex_unlock(&nvme_tcp_ctrl_mutex);
2899
2900 return found;
2901 }
2902
nvme_tcp_alloc_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)2903 static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
2904 struct nvmf_ctrl_options *opts)
2905 {
2906 struct nvme_tcp_ctrl *ctrl;
2907 int ret;
2908
2909 ctrl = kzalloc_obj(*ctrl);
2910 if (!ctrl)
2911 return ERR_PTR(-ENOMEM);
2912
2913 INIT_LIST_HEAD(&ctrl->list);
2914 ctrl->ctrl.opts = opts;
2915 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2916 opts->nr_poll_queues + 1;
2917 ctrl->ctrl.sqsize = opts->queue_size - 1;
2918 ctrl->ctrl.kato = opts->kato;
2919
2920 INIT_DELAYED_WORK(&ctrl->connect_work,
2921 nvme_tcp_reconnect_ctrl_work);
2922 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2923 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2924
2925 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2926 opts->trsvcid =
2927 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2928 if (!opts->trsvcid) {
2929 ret = -ENOMEM;
2930 goto out_free_ctrl;
2931 }
2932 opts->mask |= NVMF_OPT_TRSVCID;
2933 }
2934
2935 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2936 opts->traddr, opts->trsvcid, &ctrl->addr);
2937 if (ret) {
2938 pr_err("malformed address passed: %s:%s\n",
2939 opts->traddr, opts->trsvcid);
2940 goto out_free_ctrl;
2941 }
2942
2943 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2944 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2945 opts->host_traddr, NULL, &ctrl->src_addr);
2946 if (ret) {
2947 pr_err("malformed src address passed: %s\n",
2948 opts->host_traddr);
2949 goto out_free_ctrl;
2950 }
2951 }
2952
2953 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2954 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2955 pr_err("invalid interface passed: %s\n",
2956 opts->host_iface);
2957 ret = -ENODEV;
2958 goto out_free_ctrl;
2959 }
2960 }
2961
2962 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2963 ret = -EALREADY;
2964 goto out_free_ctrl;
2965 }
2966
2967 ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count);
2968 if (!ctrl->queues) {
2969 ret = -ENOMEM;
2970 goto out_free_ctrl;
2971 }
2972
2973 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2974 if (ret)
2975 goto out_kfree_queues;
2976
2977 return ctrl;
2978 out_kfree_queues:
2979 kfree(ctrl->queues);
2980 out_free_ctrl:
2981 kfree(ctrl);
2982 return ERR_PTR(ret);
2983 }
2984
nvme_tcp_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)2985 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2986 struct nvmf_ctrl_options *opts)
2987 {
2988 struct nvme_tcp_ctrl *ctrl;
2989 int ret;
2990
2991 ctrl = nvme_tcp_alloc_ctrl(dev, opts);
2992 if (IS_ERR(ctrl))
2993 return ERR_CAST(ctrl);
2994
2995 ret = nvme_add_ctrl(&ctrl->ctrl);
2996 if (ret)
2997 goto out_put_ctrl;
2998
2999 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3000 WARN_ON_ONCE(1);
3001 ret = -EINTR;
3002 goto out_uninit_ctrl;
3003 }
3004
3005 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
3006 if (ret)
3007 goto out_uninit_ctrl;
3008
3009 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
3010 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
3011
3012 mutex_lock(&nvme_tcp_ctrl_mutex);
3013 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
3014 mutex_unlock(&nvme_tcp_ctrl_mutex);
3015
3016 return &ctrl->ctrl;
3017
3018 out_uninit_ctrl:
3019 nvme_uninit_ctrl(&ctrl->ctrl);
3020 out_put_ctrl:
3021 nvme_put_ctrl(&ctrl->ctrl);
3022 if (ret > 0)
3023 ret = -EIO;
3024 return ERR_PTR(ret);
3025 }
3026
3027 static struct nvmf_transport_ops nvme_tcp_transport = {
3028 .name = "tcp",
3029 .module = THIS_MODULE,
3030 .required_opts = NVMF_OPT_TRADDR,
3031 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
3032 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
3033 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
3034 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
3035 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
3036 NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT,
3037 .create_ctrl = nvme_tcp_create_ctrl,
3038 };
3039
nvme_tcp_init_module(void)3040 static int __init nvme_tcp_init_module(void)
3041 {
3042 unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
3043 int cpu;
3044
3045 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
3046 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
3047 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
3048 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
3049 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
3050 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
3051 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
3052 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
3053
3054 if (wq_unbound)
3055 wq_flags |= WQ_UNBOUND;
3056
3057 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
3058 if (!nvme_tcp_wq)
3059 return -ENOMEM;
3060
3061 for_each_possible_cpu(cpu)
3062 atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
3063
3064 nvmf_register_transport(&nvme_tcp_transport);
3065 return 0;
3066 }
3067
nvme_tcp_cleanup_module(void)3068 static void __exit nvme_tcp_cleanup_module(void)
3069 {
3070 struct nvme_tcp_ctrl *ctrl;
3071
3072 nvmf_unregister_transport(&nvme_tcp_transport);
3073
3074 mutex_lock(&nvme_tcp_ctrl_mutex);
3075 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
3076 nvme_delete_ctrl(&ctrl->ctrl);
3077 mutex_unlock(&nvme_tcp_ctrl_mutex);
3078 flush_workqueue(nvme_delete_wq);
3079
3080 destroy_workqueue(nvme_tcp_wq);
3081 }
3082
3083 module_init(nvme_tcp_init_module);
3084 module_exit(nvme_tcp_cleanup_module);
3085
3086 MODULE_DESCRIPTION("NVMe host TCP transport driver");
3087 MODULE_LICENSE("GPL v2");
3088 MODULE_ALIAS("nvme-tcp");
3089