1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/hash.h>
12 #include <crypto/kpp.h>
13 #include "nvmet.h"
14
nvmet_auth_expired_work(struct work_struct * work)15 static void nvmet_auth_expired_work(struct work_struct *work)
16 {
17 struct nvmet_sq *sq = container_of(to_delayed_work(work),
18 struct nvmet_sq, auth_expired_work);
19
20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
23 sq->dhchap_tid = -1;
24 }
25
nvmet_auth_sq_init(struct nvmet_sq * sq)26 void nvmet_auth_sq_init(struct nvmet_sq *sq)
27 {
28 /* Initialize in-band authentication */
29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
30 sq->authenticated = false;
31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
32 }
33
nvmet_auth_negotiate(struct nvmet_req * req,void * d)34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
35 {
36 struct nvmet_ctrl *ctrl = req->sq->ctrl;
37 struct nvmf_auth_dhchap_negotiate_data *data = d;
38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
39
40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
41 __func__, ctrl->cntlid, req->sq->qid,
42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
43 data->auth_protocol[0].dhchap.halen,
44 data->auth_protocol[0].dhchap.dhlen);
45 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
46 req->sq->sc_c = data->sc_c;
47 if (data->sc_c != NVME_AUTH_SECP_NOSC) {
48 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
49 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
50 /* Secure concatenation can only be enabled on the admin queue */
51 if (req->sq->qid)
52 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
53 switch (data->sc_c) {
54 case NVME_AUTH_SECP_NEWTLSPSK:
55 if (nvmet_queue_tls_keyid(req->sq))
56 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
57 break;
58 case NVME_AUTH_SECP_REPLACETLSPSK:
59 if (!nvmet_queue_tls_keyid(req->sq))
60 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
61 break;
62 default:
63 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
64 }
65 ctrl->concat = true;
66 }
67
68 if (data->napd != 1)
69 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
70
71 if (data->auth_protocol[0].dhchap.authid !=
72 NVME_AUTH_DHCHAP_AUTH_ID)
73 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
74
75 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
76 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
77
78 if (!fallback_hash_id &&
79 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
80 fallback_hash_id = host_hmac_id;
81 if (ctrl->shash_id != host_hmac_id)
82 continue;
83 hash_id = ctrl->shash_id;
84 break;
85 }
86 if (hash_id == 0) {
87 if (fallback_hash_id == 0) {
88 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
89 __func__, ctrl->cntlid, req->sq->qid);
90 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
91 }
92 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
93 __func__, ctrl->cntlid, req->sq->qid,
94 nvme_auth_hmac_name(fallback_hash_id));
95 ctrl->shash_id = fallback_hash_id;
96 }
97
98 dhgid = -1;
99 fallback_dhgid = -1;
100 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
101 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
102
103 if (tmp_dhgid != ctrl->dh_gid) {
104 dhgid = tmp_dhgid;
105 break;
106 }
107 if (fallback_dhgid < 0) {
108 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
109
110 if (crypto_has_kpp(kpp, 0, 0))
111 fallback_dhgid = tmp_dhgid;
112 }
113 }
114 if (dhgid < 0) {
115 if (fallback_dhgid < 0) {
116 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
117 __func__, ctrl->cntlid, req->sq->qid);
118 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
119 }
120 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
121 __func__, ctrl->cntlid, req->sq->qid,
122 nvme_auth_dhgroup_name(fallback_dhgid));
123 ctrl->dh_gid = fallback_dhgid;
124 }
125 if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
126 pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
127 "for secure channel concatenation\n", __func__,
128 ctrl->cntlid, req->sq->qid);
129 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
130 }
131 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
132 __func__, ctrl->cntlid, req->sq->qid,
133 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
134 return 0;
135 }
136
nvmet_auth_reply(struct nvmet_req * req,void * d)137 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
138 {
139 struct nvmet_ctrl *ctrl = req->sq->ctrl;
140 struct nvmf_auth_dhchap_reply_data *data = d;
141 u16 dhvlen = le16_to_cpu(data->dhvlen);
142 u8 *response;
143
144 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
145 __func__, ctrl->cntlid, req->sq->qid,
146 data->hl, data->cvalid, dhvlen);
147
148 if (dhvlen) {
149 if (!ctrl->dh_tfm)
150 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
151 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
152 dhvlen) < 0)
153 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
154 }
155
156 response = kmalloc(data->hl, GFP_KERNEL);
157 if (!response)
158 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
159
160 if (!ctrl->host_key) {
161 pr_warn("ctrl %d qid %d no host key\n",
162 ctrl->cntlid, req->sq->qid);
163 kfree(response);
164 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
165 }
166 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
167 pr_debug("ctrl %d qid %d host hash failed\n",
168 ctrl->cntlid, req->sq->qid);
169 kfree(response);
170 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
171 }
172
173 if (memcmp(data->rval, response, data->hl)) {
174 pr_info("ctrl %d qid %d host response mismatch\n",
175 ctrl->cntlid, req->sq->qid);
176 pr_debug("ctrl %d qid %d rval %*ph\n",
177 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
178 pr_debug("ctrl %d qid %d response %*ph\n",
179 ctrl->cntlid, req->sq->qid, data->hl, response);
180 kfree(response);
181 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
182 }
183 kfree(response);
184 pr_debug("%s: ctrl %d qid %d host authenticated\n",
185 __func__, ctrl->cntlid, req->sq->qid);
186 if (!data->cvalid && ctrl->concat) {
187 pr_debug("%s: ctrl %d qid %d invalid challenge\n",
188 __func__, ctrl->cntlid, req->sq->qid);
189 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
190 }
191 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
192 if (data->cvalid) {
193 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
194 GFP_KERNEL);
195 if (!req->sq->dhchap_c2)
196 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
197
198 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
199 __func__, ctrl->cntlid, req->sq->qid, data->hl,
200 req->sq->dhchap_c2);
201 }
202 /*
203 * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
204 * Sequence Number (SEQNUM): [ .. ]
205 * The value 0h is used to indicate that bidirectional authentication
206 * is not performed, but a challenge value C2 is carried in order to
207 * generate a pre-shared key (PSK) for subsequent establishment of a
208 * secure channel.
209 */
210 if (req->sq->dhchap_s2 == 0) {
211 if (ctrl->concat)
212 nvmet_auth_insert_psk(req->sq);
213 req->sq->authenticated = true;
214 kfree(req->sq->dhchap_c2);
215 req->sq->dhchap_c2 = NULL;
216 } else if (!data->cvalid)
217 req->sq->authenticated = true;
218
219 return 0;
220 }
221
nvmet_auth_failure2(void * d)222 static u8 nvmet_auth_failure2(void *d)
223 {
224 struct nvmf_auth_dhchap_failure_data *data = d;
225
226 return data->rescode_exp;
227 }
228
nvmet_auth_send_data_len(struct nvmet_req * req)229 u32 nvmet_auth_send_data_len(struct nvmet_req *req)
230 {
231 return le32_to_cpu(req->cmd->auth_send.tl);
232 }
233
nvmet_execute_auth_send(struct nvmet_req * req)234 void nvmet_execute_auth_send(struct nvmet_req *req)
235 {
236 struct nvmet_ctrl *ctrl = req->sq->ctrl;
237 struct nvmf_auth_dhchap_success2_data *data;
238 void *d;
239 u32 tl;
240 u16 status = 0;
241 u8 dhchap_status;
242
243 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
244 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
245 req->error_loc =
246 offsetof(struct nvmf_auth_send_command, secp);
247 goto done;
248 }
249 if (req->cmd->auth_send.spsp0 != 0x01) {
250 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
251 req->error_loc =
252 offsetof(struct nvmf_auth_send_command, spsp0);
253 goto done;
254 }
255 if (req->cmd->auth_send.spsp1 != 0x01) {
256 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
257 req->error_loc =
258 offsetof(struct nvmf_auth_send_command, spsp1);
259 goto done;
260 }
261 tl = nvmet_auth_send_data_len(req);
262 if (!tl) {
263 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
264 req->error_loc =
265 offsetof(struct nvmf_auth_send_command, tl);
266 goto done;
267 }
268 if (!nvmet_check_transfer_len(req, tl)) {
269 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
270 return;
271 }
272
273 d = kmalloc(tl, GFP_KERNEL);
274 if (!d) {
275 status = NVME_SC_INTERNAL;
276 goto done;
277 }
278
279 status = nvmet_copy_from_sgl(req, 0, d, tl);
280 if (status)
281 goto done_kfree;
282
283 data = d;
284 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
285 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
286 req->sq->dhchap_step);
287 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
288 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
289 goto done_failure1;
290 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
291 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
292 /* Restart negotiation */
293 pr_debug("%s: ctrl %d qid %d reset negotiation\n",
294 __func__, ctrl->cntlid, req->sq->qid);
295 if (!req->sq->qid) {
296 dhchap_status = nvmet_setup_auth(ctrl, req->sq);
297 if (dhchap_status) {
298 pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
299 ctrl->cntlid);
300 req->sq->dhchap_status = dhchap_status;
301 req->sq->dhchap_step =
302 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
303 goto done_kfree;
304 }
305 }
306 req->sq->dhchap_step =
307 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
308 } else if (data->auth_id != req->sq->dhchap_step)
309 goto done_failure1;
310 /* Validate negotiation parameters */
311 dhchap_status = nvmet_auth_negotiate(req, d);
312 if (dhchap_status == 0)
313 req->sq->dhchap_step =
314 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
315 else {
316 req->sq->dhchap_step =
317 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
318 req->sq->dhchap_status = dhchap_status;
319 }
320 goto done_kfree;
321 }
322 if (data->auth_id != req->sq->dhchap_step) {
323 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
324 __func__, ctrl->cntlid, req->sq->qid,
325 data->auth_id, req->sq->dhchap_step);
326 goto done_failure1;
327 }
328 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
329 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
330 __func__, ctrl->cntlid, req->sq->qid,
331 le16_to_cpu(data->t_id),
332 req->sq->dhchap_tid);
333 req->sq->dhchap_step =
334 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
335 req->sq->dhchap_status =
336 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
337 goto done_kfree;
338 }
339
340 switch (data->auth_id) {
341 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
342 dhchap_status = nvmet_auth_reply(req, d);
343 if (dhchap_status == 0)
344 req->sq->dhchap_step =
345 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
346 else {
347 req->sq->dhchap_step =
348 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
349 req->sq->dhchap_status = dhchap_status;
350 }
351 goto done_kfree;
352 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
353 if (ctrl->concat)
354 nvmet_auth_insert_psk(req->sq);
355 req->sq->authenticated = true;
356 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
357 __func__, ctrl->cntlid, req->sq->qid);
358 goto done_kfree;
359 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
360 dhchap_status = nvmet_auth_failure2(d);
361 if (dhchap_status) {
362 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
363 ctrl->cntlid, req->sq->qid, dhchap_status);
364 req->sq->dhchap_status = dhchap_status;
365 req->sq->authenticated = false;
366 }
367 goto done_kfree;
368 default:
369 req->sq->dhchap_status =
370 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
371 req->sq->dhchap_step =
372 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
373 req->sq->authenticated = false;
374 goto done_kfree;
375 }
376 done_failure1:
377 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
378 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
379
380 done_kfree:
381 kfree(d);
382 done:
383 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
384 ctrl->cntlid, req->sq->qid,
385 req->sq->dhchap_status, req->sq->dhchap_step);
386 if (status)
387 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
388 __func__, ctrl->cntlid, req->sq->qid,
389 status, req->error_loc);
390 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
391 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
392 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
393
394 mod_delayed_work(system_wq, &req->sq->auth_expired_work,
395 auth_expire_secs * HZ);
396 goto complete;
397 }
398 /* Final states, clear up variables */
399 nvmet_auth_sq_free(req->sq);
400 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
401 nvmet_ctrl_fatal_error(ctrl);
402
403 complete:
404 nvmet_req_complete(req, status);
405 }
406
nvmet_auth_challenge(struct nvmet_req * req,void * d,int al)407 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
408 {
409 struct nvmf_auth_dhchap_challenge_data *data = d;
410 struct nvmet_ctrl *ctrl = req->sq->ctrl;
411 int ret = 0;
412 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
413 int data_size = sizeof(*d) + hash_len;
414
415 if (ctrl->dh_tfm)
416 data_size += ctrl->dh_keysize;
417 if (al < data_size) {
418 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
419 al, data_size);
420 return -EINVAL;
421 }
422 memset(data, 0, data_size);
423 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
424 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
425 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
426 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
427 data->hashid = ctrl->shash_id;
428 data->hl = hash_len;
429 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
430 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
431 if (!req->sq->dhchap_c1)
432 return -ENOMEM;
433 get_random_bytes(req->sq->dhchap_c1, data->hl);
434 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
435 if (ctrl->dh_tfm) {
436 data->dhgid = ctrl->dh_gid;
437 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
438 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
439 ctrl->dh_keysize);
440 }
441 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
442 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
443 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
444 return ret;
445 }
446
nvmet_auth_success1(struct nvmet_req * req,void * d,int al)447 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
448 {
449 struct nvmf_auth_dhchap_success1_data *data = d;
450 struct nvmet_ctrl *ctrl = req->sq->ctrl;
451 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
452
453 WARN_ON(al < sizeof(*data));
454 memset(data, 0, sizeof(*data));
455 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
456 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
457 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
458 data->hl = hash_len;
459 if (req->sq->dhchap_c2) {
460 if (!ctrl->ctrl_key) {
461 pr_warn("ctrl %d qid %d no ctrl key\n",
462 ctrl->cntlid, req->sq->qid);
463 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
464 }
465 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
466 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
467 data->rvalid = 1;
468 pr_debug("ctrl %d qid %d response %*ph\n",
469 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
470 }
471 return 0;
472 }
473
nvmet_auth_failure1(struct nvmet_req * req,void * d,int al)474 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
475 {
476 struct nvmf_auth_dhchap_failure_data *data = d;
477
478 WARN_ON(al < sizeof(*data));
479 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
480 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
481 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
482 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
483 data->rescode_exp = req->sq->dhchap_status;
484 }
485
nvmet_auth_receive_data_len(struct nvmet_req * req)486 u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
487 {
488 return le32_to_cpu(req->cmd->auth_receive.al);
489 }
490
nvmet_execute_auth_receive(struct nvmet_req * req)491 void nvmet_execute_auth_receive(struct nvmet_req *req)
492 {
493 struct nvmet_ctrl *ctrl = req->sq->ctrl;
494 void *d;
495 u32 al;
496 u16 status = 0;
497
498 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
499 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
500 req->error_loc =
501 offsetof(struct nvmf_auth_receive_command, secp);
502 goto done;
503 }
504 if (req->cmd->auth_receive.spsp0 != 0x01) {
505 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
506 req->error_loc =
507 offsetof(struct nvmf_auth_receive_command, spsp0);
508 goto done;
509 }
510 if (req->cmd->auth_receive.spsp1 != 0x01) {
511 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
512 req->error_loc =
513 offsetof(struct nvmf_auth_receive_command, spsp1);
514 goto done;
515 }
516 al = nvmet_auth_receive_data_len(req);
517 if (!al) {
518 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
519 req->error_loc =
520 offsetof(struct nvmf_auth_receive_command, al);
521 goto done;
522 }
523 if (!nvmet_check_transfer_len(req, al)) {
524 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
525 return;
526 }
527
528 d = kmalloc(al, GFP_KERNEL);
529 if (!d) {
530 status = NVME_SC_INTERNAL;
531 goto done;
532 }
533 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
534 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
535 switch (req->sq->dhchap_step) {
536 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
537 if (nvmet_auth_challenge(req, d, al) < 0) {
538 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
539 ctrl->cntlid, req->sq->qid, status);
540 status = NVME_SC_INTERNAL;
541 break;
542 }
543 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
544 break;
545 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
546 status = nvmet_auth_success1(req, d, al);
547 if (status) {
548 req->sq->dhchap_status = status;
549 req->sq->authenticated = false;
550 nvmet_auth_failure1(req, d, al);
551 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
552 ctrl->cntlid, req->sq->qid,
553 req->sq->dhchap_status);
554 break;
555 }
556 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
557 break;
558 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
559 req->sq->authenticated = false;
560 nvmet_auth_failure1(req, d, al);
561 pr_warn("ctrl %d qid %d failure1 (%x)\n",
562 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
563 break;
564 default:
565 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
566 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
567 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
568 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
569 nvmet_auth_failure1(req, d, al);
570 status = 0;
571 break;
572 }
573
574 status = nvmet_copy_to_sgl(req, 0, d, al);
575 kfree(d);
576 done:
577 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
578 nvmet_auth_sq_free(req->sq);
579 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
580 nvmet_auth_sq_free(req->sq);
581 nvmet_ctrl_fatal_error(ctrl);
582 }
583 nvmet_req_complete(req, status);
584 }
585