1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/hash.h>
12 #include <crypto/kpp.h>
13 #include "nvmet.h"
14
nvmet_auth_expired_work(struct work_struct * work)15 static void nvmet_auth_expired_work(struct work_struct *work)
16 {
17 struct nvmet_sq *sq = container_of(to_delayed_work(work),
18 struct nvmet_sq, auth_expired_work);
19
20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
23 sq->dhchap_tid = -1;
24 }
25
nvmet_auth_sq_init(struct nvmet_sq * sq)26 void nvmet_auth_sq_init(struct nvmet_sq *sq)
27 {
28 /* Initialize in-band authentication */
29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
30 sq->authenticated = false;
31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
32 }
33
nvmet_auth_negotiate(struct nvmet_req * req,void * d)34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
35 {
36 struct nvmet_ctrl *ctrl = req->sq->ctrl;
37 struct nvmf_auth_dhchap_negotiate_data *data = d;
38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
39
40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
41 __func__, ctrl->cntlid, req->sq->qid,
42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
43 data->auth_protocol[0].dhchap.halen,
44 data->auth_protocol[0].dhchap.dhlen);
45 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
46 if (data->sc_c != NVME_AUTH_SECP_NOSC) {
47 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
48 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
49 /* Secure concatenation can only be enabled on the admin queue */
50 if (req->sq->qid)
51 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
52 switch (data->sc_c) {
53 case NVME_AUTH_SECP_NEWTLSPSK:
54 if (nvmet_queue_tls_keyid(req->sq))
55 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
56 break;
57 case NVME_AUTH_SECP_REPLACETLSPSK:
58 if (!nvmet_queue_tls_keyid(req->sq))
59 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
60 break;
61 default:
62 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
63 }
64 ctrl->concat = true;
65 }
66
67 if (data->napd != 1)
68 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
69
70 if (data->auth_protocol[0].dhchap.authid !=
71 NVME_AUTH_DHCHAP_AUTH_ID)
72 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
73
74 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
75 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
76
77 if (!fallback_hash_id &&
78 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
79 fallback_hash_id = host_hmac_id;
80 if (ctrl->shash_id != host_hmac_id)
81 continue;
82 hash_id = ctrl->shash_id;
83 break;
84 }
85 if (hash_id == 0) {
86 if (fallback_hash_id == 0) {
87 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
88 __func__, ctrl->cntlid, req->sq->qid);
89 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
90 }
91 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
92 __func__, ctrl->cntlid, req->sq->qid,
93 nvme_auth_hmac_name(fallback_hash_id));
94 ctrl->shash_id = fallback_hash_id;
95 }
96
97 dhgid = -1;
98 fallback_dhgid = -1;
99 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
100 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
101
102 if (tmp_dhgid != ctrl->dh_gid) {
103 dhgid = tmp_dhgid;
104 break;
105 }
106 if (fallback_dhgid < 0) {
107 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
108
109 if (crypto_has_kpp(kpp, 0, 0))
110 fallback_dhgid = tmp_dhgid;
111 }
112 }
113 if (dhgid < 0) {
114 if (fallback_dhgid < 0) {
115 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
116 __func__, ctrl->cntlid, req->sq->qid);
117 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
118 }
119 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
120 __func__, ctrl->cntlid, req->sq->qid,
121 nvme_auth_dhgroup_name(fallback_dhgid));
122 ctrl->dh_gid = fallback_dhgid;
123 }
124 if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
125 pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
126 "for secure channel concatenation\n", __func__,
127 ctrl->cntlid, req->sq->qid);
128 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
129 }
130 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
131 __func__, ctrl->cntlid, req->sq->qid,
132 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
133 return 0;
134 }
135
nvmet_auth_reply(struct nvmet_req * req,void * d)136 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
137 {
138 struct nvmet_ctrl *ctrl = req->sq->ctrl;
139 struct nvmf_auth_dhchap_reply_data *data = d;
140 u16 dhvlen = le16_to_cpu(data->dhvlen);
141 u8 *response;
142
143 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
144 __func__, ctrl->cntlid, req->sq->qid,
145 data->hl, data->cvalid, dhvlen);
146
147 if (dhvlen) {
148 if (!ctrl->dh_tfm)
149 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
150 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
151 dhvlen) < 0)
152 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
153 }
154
155 response = kmalloc(data->hl, GFP_KERNEL);
156 if (!response)
157 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
158
159 if (!ctrl->host_key) {
160 pr_warn("ctrl %d qid %d no host key\n",
161 ctrl->cntlid, req->sq->qid);
162 kfree(response);
163 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
164 }
165 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
166 pr_debug("ctrl %d qid %d host hash failed\n",
167 ctrl->cntlid, req->sq->qid);
168 kfree(response);
169 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
170 }
171
172 if (memcmp(data->rval, response, data->hl)) {
173 pr_info("ctrl %d qid %d host response mismatch\n",
174 ctrl->cntlid, req->sq->qid);
175 pr_debug("ctrl %d qid %d rval %*ph\n",
176 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
177 pr_debug("ctrl %d qid %d response %*ph\n",
178 ctrl->cntlid, req->sq->qid, data->hl, response);
179 kfree(response);
180 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
181 }
182 kfree(response);
183 pr_debug("%s: ctrl %d qid %d host authenticated\n",
184 __func__, ctrl->cntlid, req->sq->qid);
185 if (!data->cvalid && ctrl->concat) {
186 pr_debug("%s: ctrl %d qid %d invalid challenge\n",
187 __func__, ctrl->cntlid, req->sq->qid);
188 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
189 }
190 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
191 if (data->cvalid) {
192 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
193 GFP_KERNEL);
194 if (!req->sq->dhchap_c2)
195 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
196
197 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
198 __func__, ctrl->cntlid, req->sq->qid, data->hl,
199 req->sq->dhchap_c2);
200 }
201 /*
202 * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
203 * Sequence Number (SEQNUM): [ .. ]
204 * The value 0h is used to indicate that bidirectional authentication
205 * is not performed, but a challenge value C2 is carried in order to
206 * generate a pre-shared key (PSK) for subsequent establishment of a
207 * secure channel.
208 */
209 if (req->sq->dhchap_s2 == 0) {
210 if (ctrl->concat)
211 nvmet_auth_insert_psk(req->sq);
212 req->sq->authenticated = true;
213 kfree(req->sq->dhchap_c2);
214 req->sq->dhchap_c2 = NULL;
215 } else if (!data->cvalid)
216 req->sq->authenticated = true;
217
218 return 0;
219 }
220
nvmet_auth_failure2(void * d)221 static u8 nvmet_auth_failure2(void *d)
222 {
223 struct nvmf_auth_dhchap_failure_data *data = d;
224
225 return data->rescode_exp;
226 }
227
nvmet_auth_send_data_len(struct nvmet_req * req)228 u32 nvmet_auth_send_data_len(struct nvmet_req *req)
229 {
230 return le32_to_cpu(req->cmd->auth_send.tl);
231 }
232
nvmet_execute_auth_send(struct nvmet_req * req)233 void nvmet_execute_auth_send(struct nvmet_req *req)
234 {
235 struct nvmet_ctrl *ctrl = req->sq->ctrl;
236 struct nvmf_auth_dhchap_success2_data *data;
237 void *d;
238 u32 tl;
239 u16 status = 0;
240 u8 dhchap_status;
241
242 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
243 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
244 req->error_loc =
245 offsetof(struct nvmf_auth_send_command, secp);
246 goto done;
247 }
248 if (req->cmd->auth_send.spsp0 != 0x01) {
249 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
250 req->error_loc =
251 offsetof(struct nvmf_auth_send_command, spsp0);
252 goto done;
253 }
254 if (req->cmd->auth_send.spsp1 != 0x01) {
255 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
256 req->error_loc =
257 offsetof(struct nvmf_auth_send_command, spsp1);
258 goto done;
259 }
260 tl = nvmet_auth_send_data_len(req);
261 if (!tl) {
262 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
263 req->error_loc =
264 offsetof(struct nvmf_auth_send_command, tl);
265 goto done;
266 }
267 if (!nvmet_check_transfer_len(req, tl)) {
268 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
269 return;
270 }
271
272 d = kmalloc(tl, GFP_KERNEL);
273 if (!d) {
274 status = NVME_SC_INTERNAL;
275 goto done;
276 }
277
278 status = nvmet_copy_from_sgl(req, 0, d, tl);
279 if (status)
280 goto done_kfree;
281
282 data = d;
283 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
284 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
285 req->sq->dhchap_step);
286 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
287 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
288 goto done_failure1;
289 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
290 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
291 /* Restart negotiation */
292 pr_debug("%s: ctrl %d qid %d reset negotiation\n",
293 __func__, ctrl->cntlid, req->sq->qid);
294 if (!req->sq->qid) {
295 dhchap_status = nvmet_setup_auth(ctrl, req->sq);
296 if (dhchap_status) {
297 pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
298 ctrl->cntlid);
299 req->sq->dhchap_status = dhchap_status;
300 req->sq->dhchap_step =
301 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
302 goto done_kfree;
303 }
304 }
305 req->sq->dhchap_step =
306 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
307 } else if (data->auth_id != req->sq->dhchap_step)
308 goto done_failure1;
309 /* Validate negotiation parameters */
310 dhchap_status = nvmet_auth_negotiate(req, d);
311 if (dhchap_status == 0)
312 req->sq->dhchap_step =
313 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
314 else {
315 req->sq->dhchap_step =
316 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
317 req->sq->dhchap_status = dhchap_status;
318 }
319 goto done_kfree;
320 }
321 if (data->auth_id != req->sq->dhchap_step) {
322 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
323 __func__, ctrl->cntlid, req->sq->qid,
324 data->auth_id, req->sq->dhchap_step);
325 goto done_failure1;
326 }
327 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
328 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
329 __func__, ctrl->cntlid, req->sq->qid,
330 le16_to_cpu(data->t_id),
331 req->sq->dhchap_tid);
332 req->sq->dhchap_step =
333 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
334 req->sq->dhchap_status =
335 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
336 goto done_kfree;
337 }
338
339 switch (data->auth_id) {
340 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
341 dhchap_status = nvmet_auth_reply(req, d);
342 if (dhchap_status == 0)
343 req->sq->dhchap_step =
344 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
345 else {
346 req->sq->dhchap_step =
347 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
348 req->sq->dhchap_status = dhchap_status;
349 }
350 goto done_kfree;
351 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
352 if (ctrl->concat)
353 nvmet_auth_insert_psk(req->sq);
354 req->sq->authenticated = true;
355 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
356 __func__, ctrl->cntlid, req->sq->qid);
357 goto done_kfree;
358 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
359 dhchap_status = nvmet_auth_failure2(d);
360 if (dhchap_status) {
361 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
362 ctrl->cntlid, req->sq->qid, dhchap_status);
363 req->sq->dhchap_status = dhchap_status;
364 req->sq->authenticated = false;
365 }
366 goto done_kfree;
367 default:
368 req->sq->dhchap_status =
369 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
370 req->sq->dhchap_step =
371 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
372 req->sq->authenticated = false;
373 goto done_kfree;
374 }
375 done_failure1:
376 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
377 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
378
379 done_kfree:
380 kfree(d);
381 done:
382 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
383 ctrl->cntlid, req->sq->qid,
384 req->sq->dhchap_status, req->sq->dhchap_step);
385 if (status)
386 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
387 __func__, ctrl->cntlid, req->sq->qid,
388 status, req->error_loc);
389 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
390 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
391 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
392
393 mod_delayed_work(system_wq, &req->sq->auth_expired_work,
394 auth_expire_secs * HZ);
395 goto complete;
396 }
397 /* Final states, clear up variables */
398 nvmet_auth_sq_free(req->sq);
399 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
400 nvmet_ctrl_fatal_error(ctrl);
401
402 complete:
403 nvmet_req_complete(req, status);
404 }
405
nvmet_auth_challenge(struct nvmet_req * req,void * d,int al)406 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
407 {
408 struct nvmf_auth_dhchap_challenge_data *data = d;
409 struct nvmet_ctrl *ctrl = req->sq->ctrl;
410 int ret = 0;
411 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
412 int data_size = sizeof(*d) + hash_len;
413
414 if (ctrl->dh_tfm)
415 data_size += ctrl->dh_keysize;
416 if (al < data_size) {
417 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
418 al, data_size);
419 return -EINVAL;
420 }
421 memset(data, 0, data_size);
422 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
423 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
424 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
425 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
426 data->hashid = ctrl->shash_id;
427 data->hl = hash_len;
428 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
429 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
430 if (!req->sq->dhchap_c1)
431 return -ENOMEM;
432 get_random_bytes(req->sq->dhchap_c1, data->hl);
433 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
434 if (ctrl->dh_tfm) {
435 data->dhgid = ctrl->dh_gid;
436 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
437 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
438 ctrl->dh_keysize);
439 }
440 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
441 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
442 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
443 return ret;
444 }
445
nvmet_auth_success1(struct nvmet_req * req,void * d,int al)446 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
447 {
448 struct nvmf_auth_dhchap_success1_data *data = d;
449 struct nvmet_ctrl *ctrl = req->sq->ctrl;
450 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
451
452 WARN_ON(al < sizeof(*data));
453 memset(data, 0, sizeof(*data));
454 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
455 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
456 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
457 data->hl = hash_len;
458 if (req->sq->dhchap_c2) {
459 if (!ctrl->ctrl_key) {
460 pr_warn("ctrl %d qid %d no ctrl key\n",
461 ctrl->cntlid, req->sq->qid);
462 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
463 }
464 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
465 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
466 data->rvalid = 1;
467 pr_debug("ctrl %d qid %d response %*ph\n",
468 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
469 }
470 return 0;
471 }
472
nvmet_auth_failure1(struct nvmet_req * req,void * d,int al)473 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
474 {
475 struct nvmf_auth_dhchap_failure_data *data = d;
476
477 WARN_ON(al < sizeof(*data));
478 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
479 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
480 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
481 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
482 data->rescode_exp = req->sq->dhchap_status;
483 }
484
nvmet_auth_receive_data_len(struct nvmet_req * req)485 u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
486 {
487 return le32_to_cpu(req->cmd->auth_receive.al);
488 }
489
nvmet_execute_auth_receive(struct nvmet_req * req)490 void nvmet_execute_auth_receive(struct nvmet_req *req)
491 {
492 struct nvmet_ctrl *ctrl = req->sq->ctrl;
493 void *d;
494 u32 al;
495 u16 status = 0;
496
497 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
498 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
499 req->error_loc =
500 offsetof(struct nvmf_auth_receive_command, secp);
501 goto done;
502 }
503 if (req->cmd->auth_receive.spsp0 != 0x01) {
504 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
505 req->error_loc =
506 offsetof(struct nvmf_auth_receive_command, spsp0);
507 goto done;
508 }
509 if (req->cmd->auth_receive.spsp1 != 0x01) {
510 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
511 req->error_loc =
512 offsetof(struct nvmf_auth_receive_command, spsp1);
513 goto done;
514 }
515 al = nvmet_auth_receive_data_len(req);
516 if (!al) {
517 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
518 req->error_loc =
519 offsetof(struct nvmf_auth_receive_command, al);
520 goto done;
521 }
522 if (!nvmet_check_transfer_len(req, al)) {
523 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
524 return;
525 }
526
527 d = kmalloc(al, GFP_KERNEL);
528 if (!d) {
529 status = NVME_SC_INTERNAL;
530 goto done;
531 }
532 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
533 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
534 switch (req->sq->dhchap_step) {
535 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
536 if (nvmet_auth_challenge(req, d, al) < 0) {
537 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
538 ctrl->cntlid, req->sq->qid, status);
539 status = NVME_SC_INTERNAL;
540 break;
541 }
542 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
543 break;
544 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
545 status = nvmet_auth_success1(req, d, al);
546 if (status) {
547 req->sq->dhchap_status = status;
548 req->sq->authenticated = false;
549 nvmet_auth_failure1(req, d, al);
550 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
551 ctrl->cntlid, req->sq->qid,
552 req->sq->dhchap_status);
553 break;
554 }
555 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
556 break;
557 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
558 req->sq->authenticated = false;
559 nvmet_auth_failure1(req, d, al);
560 pr_warn("ctrl %d qid %d failure1 (%x)\n",
561 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
562 break;
563 default:
564 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
565 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
566 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
567 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
568 nvmet_auth_failure1(req, d, al);
569 status = 0;
570 break;
571 }
572
573 status = nvmet_copy_to_sgl(req, 0, d, al);
574 kfree(d);
575 done:
576 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
577 nvmet_auth_sq_free(req->sq);
578 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
579 nvmet_auth_sq_free(req->sq);
580 nvmet_ctrl_fatal_error(ctrl);
581 }
582 nvmet_req_complete(req, status);
583 }
584