1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling. 4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. 5 * All rights reserved. 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 #include <linux/blkdev.h> 9 #include <linux/random.h> 10 #include <linux/nvme-auth.h> 11 #include <crypto/hash.h> 12 #include <crypto/kpp.h> 13 #include "nvmet.h" 14 15 static void nvmet_auth_expired_work(struct work_struct *work) 16 { 17 struct nvmet_sq *sq = container_of(to_delayed_work(work), 18 struct nvmet_sq, auth_expired_work); 19 20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n", 21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); 22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 23 sq->dhchap_tid = -1; 24 } 25 26 void nvmet_auth_sq_init(struct nvmet_sq *sq) 27 { 28 /* Initialize in-band authentication */ 29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work); 30 sq->authenticated = false; 31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 32 } 33 34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) 35 { 36 struct nvmet_ctrl *ctrl = req->sq->ctrl; 37 struct nvmf_auth_dhchap_negotiate_data *data = d; 38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid; 39 40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n", 41 __func__, ctrl->cntlid, req->sq->qid, 42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid, 43 data->auth_protocol[0].dhchap.halen, 44 data->auth_protocol[0].dhchap.dhlen); 45 req->sq->dhchap_tid = le16_to_cpu(data->t_id); 46 if (data->sc_c) 47 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; 48 49 if (data->napd != 1) 50 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 51 52 if (data->auth_protocol[0].dhchap.authid != 53 NVME_AUTH_DHCHAP_AUTH_ID) 54 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 55 56 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) { 57 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i]; 58 59 if (!fallback_hash_id && 60 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0)) 61 fallback_hash_id = host_hmac_id; 62 if (ctrl->shash_id != host_hmac_id) 63 continue; 64 hash_id = ctrl->shash_id; 65 break; 66 } 67 if (hash_id == 0) { 68 if (fallback_hash_id == 0) { 69 pr_debug("%s: ctrl %d qid %d: no usable hash found\n", 70 __func__, ctrl->cntlid, req->sq->qid); 71 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 72 } 73 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n", 74 __func__, ctrl->cntlid, req->sq->qid, 75 nvme_auth_hmac_name(fallback_hash_id)); 76 ctrl->shash_id = fallback_hash_id; 77 } 78 79 dhgid = -1; 80 fallback_dhgid = -1; 81 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) { 82 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30]; 83 84 if (tmp_dhgid != ctrl->dh_gid) { 85 dhgid = tmp_dhgid; 86 break; 87 } 88 if (fallback_dhgid < 0) { 89 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid); 90 91 if (crypto_has_kpp(kpp, 0, 0)) 92 fallback_dhgid = tmp_dhgid; 93 } 94 } 95 if (dhgid < 0) { 96 if (fallback_dhgid < 0) { 97 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n", 98 __func__, ctrl->cntlid, req->sq->qid); 99 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 100 } 101 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n", 102 __func__, ctrl->cntlid, req->sq->qid, 103 nvme_auth_dhgroup_name(fallback_dhgid)); 104 ctrl->dh_gid = fallback_dhgid; 105 } 106 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", 107 __func__, ctrl->cntlid, req->sq->qid, 108 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid); 109 return 0; 110 } 111 112 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) 113 { 114 struct nvmet_ctrl *ctrl = req->sq->ctrl; 115 struct nvmf_auth_dhchap_reply_data *data = d; 116 u16 dhvlen = le16_to_cpu(data->dhvlen); 117 u8 *response; 118 119 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n", 120 __func__, ctrl->cntlid, req->sq->qid, 121 data->hl, data->cvalid, dhvlen); 122 123 if (dhvlen) { 124 if (!ctrl->dh_tfm) 125 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 126 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl, 127 dhvlen) < 0) 128 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 129 } 130 131 response = kmalloc(data->hl, GFP_KERNEL); 132 if (!response) 133 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 134 135 if (!ctrl->host_key) { 136 pr_warn("ctrl %d qid %d no host key\n", 137 ctrl->cntlid, req->sq->qid); 138 kfree(response); 139 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 140 } 141 if (nvmet_auth_host_hash(req, response, data->hl) < 0) { 142 pr_debug("ctrl %d qid %d host hash failed\n", 143 ctrl->cntlid, req->sq->qid); 144 kfree(response); 145 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 146 } 147 148 if (memcmp(data->rval, response, data->hl)) { 149 pr_info("ctrl %d qid %d host response mismatch\n", 150 ctrl->cntlid, req->sq->qid); 151 kfree(response); 152 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 153 } 154 kfree(response); 155 pr_debug("%s: ctrl %d qid %d host authenticated\n", 156 __func__, ctrl->cntlid, req->sq->qid); 157 if (data->cvalid) { 158 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl, 159 GFP_KERNEL); 160 if (!req->sq->dhchap_c2) 161 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 162 163 pr_debug("%s: ctrl %d qid %d challenge %*ph\n", 164 __func__, ctrl->cntlid, req->sq->qid, data->hl, 165 req->sq->dhchap_c2); 166 } else { 167 req->sq->authenticated = true; 168 req->sq->dhchap_c2 = NULL; 169 } 170 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); 171 172 return 0; 173 } 174 175 static u8 nvmet_auth_failure2(void *d) 176 { 177 struct nvmf_auth_dhchap_failure_data *data = d; 178 179 return data->rescode_exp; 180 } 181 182 void nvmet_execute_auth_send(struct nvmet_req *req) 183 { 184 struct nvmet_ctrl *ctrl = req->sq->ctrl; 185 struct nvmf_auth_dhchap_success2_data *data; 186 void *d; 187 u32 tl; 188 u16 status = 0; 189 u8 dhchap_status; 190 191 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 192 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 193 req->error_loc = 194 offsetof(struct nvmf_auth_send_command, secp); 195 goto done; 196 } 197 if (req->cmd->auth_send.spsp0 != 0x01) { 198 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 199 req->error_loc = 200 offsetof(struct nvmf_auth_send_command, spsp0); 201 goto done; 202 } 203 if (req->cmd->auth_send.spsp1 != 0x01) { 204 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 205 req->error_loc = 206 offsetof(struct nvmf_auth_send_command, spsp1); 207 goto done; 208 } 209 tl = le32_to_cpu(req->cmd->auth_send.tl); 210 if (!tl) { 211 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 212 req->error_loc = 213 offsetof(struct nvmf_auth_send_command, tl); 214 goto done; 215 } 216 if (!nvmet_check_transfer_len(req, tl)) { 217 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); 218 return; 219 } 220 221 d = kmalloc(tl, GFP_KERNEL); 222 if (!d) { 223 status = NVME_SC_INTERNAL; 224 goto done; 225 } 226 227 status = nvmet_copy_from_sgl(req, 0, d, tl); 228 if (status) 229 goto done_kfree; 230 231 data = d; 232 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__, 233 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id, 234 req->sq->dhchap_step); 235 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES && 236 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES) 237 goto done_failure1; 238 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { 239 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { 240 /* Restart negotiation */ 241 pr_debug("%s: ctrl %d qid %d reset negotiation\n", 242 __func__, ctrl->cntlid, req->sq->qid); 243 if (!req->sq->qid) { 244 dhchap_status = nvmet_setup_auth(ctrl); 245 if (dhchap_status) { 246 pr_err("ctrl %d qid 0 failed to setup re-authentication\n", 247 ctrl->cntlid); 248 req->sq->dhchap_status = dhchap_status; 249 req->sq->dhchap_step = 250 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 251 goto done_kfree; 252 } 253 } 254 req->sq->dhchap_step = 255 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 256 } else if (data->auth_id != req->sq->dhchap_step) 257 goto done_failure1; 258 /* Validate negotiation parameters */ 259 dhchap_status = nvmet_auth_negotiate(req, d); 260 if (dhchap_status == 0) 261 req->sq->dhchap_step = 262 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 263 else { 264 req->sq->dhchap_step = 265 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 266 req->sq->dhchap_status = dhchap_status; 267 } 268 goto done_kfree; 269 } 270 if (data->auth_id != req->sq->dhchap_step) { 271 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n", 272 __func__, ctrl->cntlid, req->sq->qid, 273 data->auth_id, req->sq->dhchap_step); 274 goto done_failure1; 275 } 276 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) { 277 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n", 278 __func__, ctrl->cntlid, req->sq->qid, 279 le16_to_cpu(data->t_id), 280 req->sq->dhchap_tid); 281 req->sq->dhchap_step = 282 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 283 req->sq->dhchap_status = 284 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 285 goto done_kfree; 286 } 287 288 switch (data->auth_id) { 289 case NVME_AUTH_DHCHAP_MESSAGE_REPLY: 290 dhchap_status = nvmet_auth_reply(req, d); 291 if (dhchap_status == 0) 292 req->sq->dhchap_step = 293 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 294 else { 295 req->sq->dhchap_step = 296 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 297 req->sq->dhchap_status = dhchap_status; 298 } 299 goto done_kfree; 300 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: 301 req->sq->authenticated = true; 302 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", 303 __func__, ctrl->cntlid, req->sq->qid); 304 goto done_kfree; 305 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: 306 dhchap_status = nvmet_auth_failure2(d); 307 if (dhchap_status) { 308 pr_warn("ctrl %d qid %d: authentication failed (%d)\n", 309 ctrl->cntlid, req->sq->qid, dhchap_status); 310 req->sq->dhchap_status = dhchap_status; 311 req->sq->authenticated = false; 312 } 313 goto done_kfree; 314 default: 315 req->sq->dhchap_status = 316 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 317 req->sq->dhchap_step = 318 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 319 req->sq->authenticated = false; 320 goto done_kfree; 321 } 322 done_failure1: 323 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 324 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 325 326 done_kfree: 327 kfree(d); 328 done: 329 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__, 330 ctrl->cntlid, req->sq->qid, 331 req->sq->dhchap_status, req->sq->dhchap_step); 332 if (status) 333 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", 334 __func__, ctrl->cntlid, req->sq->qid, 335 status, req->error_loc); 336 req->cqe->result.u64 = 0; 337 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && 338 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { 339 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; 340 341 mod_delayed_work(system_wq, &req->sq->auth_expired_work, 342 auth_expire_secs * HZ); 343 goto complete; 344 } 345 /* Final states, clear up variables */ 346 nvmet_auth_sq_free(req->sq); 347 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) 348 nvmet_ctrl_fatal_error(ctrl); 349 350 complete: 351 nvmet_req_complete(req, status); 352 } 353 354 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) 355 { 356 struct nvmf_auth_dhchap_challenge_data *data = d; 357 struct nvmet_ctrl *ctrl = req->sq->ctrl; 358 int ret = 0; 359 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 360 int data_size = sizeof(*d) + hash_len; 361 362 if (ctrl->dh_tfm) 363 data_size += ctrl->dh_keysize; 364 if (al < data_size) { 365 pr_debug("%s: buffer too small (al %d need %d)\n", __func__, 366 al, data_size); 367 return -EINVAL; 368 } 369 memset(data, 0, data_size); 370 req->sq->dhchap_s1 = nvme_auth_get_seqnum(); 371 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 372 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 373 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 374 data->hashid = ctrl->shash_id; 375 data->hl = hash_len; 376 data->seqnum = cpu_to_le32(req->sq->dhchap_s1); 377 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL); 378 if (!req->sq->dhchap_c1) 379 return -ENOMEM; 380 get_random_bytes(req->sq->dhchap_c1, data->hl); 381 memcpy(data->cval, req->sq->dhchap_c1, data->hl); 382 if (ctrl->dh_tfm) { 383 data->dhgid = ctrl->dh_gid; 384 data->dhvlen = cpu_to_le16(ctrl->dh_keysize); 385 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl, 386 ctrl->dh_keysize); 387 } 388 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n", 389 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, 390 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize); 391 return ret; 392 } 393 394 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al) 395 { 396 struct nvmf_auth_dhchap_success1_data *data = d; 397 struct nvmet_ctrl *ctrl = req->sq->ctrl; 398 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 399 400 WARN_ON(al < sizeof(*data)); 401 memset(data, 0, sizeof(*data)); 402 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 403 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 404 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 405 data->hl = hash_len; 406 if (req->sq->dhchap_c2) { 407 if (!ctrl->ctrl_key) { 408 pr_warn("ctrl %d qid %d no ctrl key\n", 409 ctrl->cntlid, req->sq->qid); 410 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 411 } 412 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl)) 413 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 414 data->rvalid = 1; 415 pr_debug("ctrl %d qid %d response %*ph\n", 416 ctrl->cntlid, req->sq->qid, data->hl, data->rval); 417 } 418 return 0; 419 } 420 421 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) 422 { 423 struct nvmf_auth_dhchap_failure_data *data = d; 424 425 WARN_ON(al < sizeof(*data)); 426 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 427 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 428 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 429 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 430 data->rescode_exp = req->sq->dhchap_status; 431 } 432 433 void nvmet_execute_auth_receive(struct nvmet_req *req) 434 { 435 struct nvmet_ctrl *ctrl = req->sq->ctrl; 436 void *d; 437 u32 al; 438 u16 status = 0; 439 440 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 441 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 442 req->error_loc = 443 offsetof(struct nvmf_auth_receive_command, secp); 444 goto done; 445 } 446 if (req->cmd->auth_receive.spsp0 != 0x01) { 447 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 448 req->error_loc = 449 offsetof(struct nvmf_auth_receive_command, spsp0); 450 goto done; 451 } 452 if (req->cmd->auth_receive.spsp1 != 0x01) { 453 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 454 req->error_loc = 455 offsetof(struct nvmf_auth_receive_command, spsp1); 456 goto done; 457 } 458 al = le32_to_cpu(req->cmd->auth_receive.al); 459 if (!al) { 460 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 461 req->error_loc = 462 offsetof(struct nvmf_auth_receive_command, al); 463 goto done; 464 } 465 if (!nvmet_check_transfer_len(req, al)) { 466 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al); 467 return; 468 } 469 470 d = kmalloc(al, GFP_KERNEL); 471 if (!d) { 472 status = NVME_SC_INTERNAL; 473 goto done; 474 } 475 pr_debug("%s: ctrl %d qid %d step %x\n", __func__, 476 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 477 switch (req->sq->dhchap_step) { 478 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE: 479 if (nvmet_auth_challenge(req, d, al) < 0) { 480 pr_warn("ctrl %d qid %d: challenge error (%d)\n", 481 ctrl->cntlid, req->sq->qid, status); 482 status = NVME_SC_INTERNAL; 483 break; 484 } 485 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 486 break; 487 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: 488 status = nvmet_auth_success1(req, d, al); 489 if (status) { 490 req->sq->dhchap_status = status; 491 req->sq->authenticated = false; 492 nvmet_auth_failure1(req, d, al); 493 pr_warn("ctrl %d qid %d: success1 status (%x)\n", 494 ctrl->cntlid, req->sq->qid, 495 req->sq->dhchap_status); 496 break; 497 } 498 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 499 break; 500 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1: 501 req->sq->authenticated = false; 502 nvmet_auth_failure1(req, d, al); 503 pr_warn("ctrl %d qid %d failure1 (%x)\n", 504 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); 505 break; 506 default: 507 pr_warn("ctrl %d qid %d unhandled step (%d)\n", 508 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 509 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 510 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 511 nvmet_auth_failure1(req, d, al); 512 status = 0; 513 break; 514 } 515 516 status = nvmet_copy_to_sgl(req, 0, d, al); 517 kfree(d); 518 done: 519 req->cqe->result.u64 = 0; 520 521 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) 522 nvmet_auth_sq_free(req->sq); 523 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 524 nvmet_auth_sq_free(req->sq); 525 nvmet_ctrl_fatal_error(ctrl); 526 } 527 nvmet_req_complete(req, status); 528 } 529