1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/device.h> 6 #include <linux/ndctl.h> 7 #include <linux/uuid.h> 8 #include <linux/slab.h> 9 #include <linux/io.h> 10 #include <linux/nd.h> 11 #include "nd-core.h" 12 #include "label.h" 13 #include "nd.h" 14 15 static guid_t nvdimm_btt_guid; 16 static guid_t nvdimm_btt2_guid; 17 static guid_t nvdimm_pfn_guid; 18 static guid_t nvdimm_dax_guid; 19 20 static uuid_t nvdimm_btt_uuid; 21 static uuid_t nvdimm_btt2_uuid; 22 static uuid_t nvdimm_pfn_uuid; 23 static uuid_t nvdimm_dax_uuid; 24 25 static uuid_t cxl_region_uuid; 26 static uuid_t cxl_namespace_uuid; 27 28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; 29 30 static u32 best_seq(u32 a, u32 b) 31 { 32 a &= NSINDEX_SEQ_MASK; 33 b &= NSINDEX_SEQ_MASK; 34 35 if (a == 0 || a == b) 36 return b; 37 else if (b == 0) 38 return a; 39 else if (nd_inc_seq(a) == b) 40 return b; 41 else 42 return a; 43 } 44 45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd) 46 { 47 return ndd->nslabel_size; 48 } 49 50 static size_t __sizeof_namespace_index(u32 nslot) 51 { 52 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), 53 NSINDEX_ALIGN); 54 } 55 56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd, 57 size_t index_size) 58 { 59 return (ndd->nsarea.config_size - index_size * 2) / 60 sizeof_namespace_label(ndd); 61 } 62 63 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) 64 { 65 u32 tmp_nslot, n; 66 67 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd); 68 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN; 69 70 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n); 71 } 72 73 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) 74 { 75 u32 nslot, space, size; 76 77 /* 78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large 79 * enough to hold 2 index blocks and 2 labels. The minimum index 80 * block size is 256 bytes. The label size is 128 for namespaces 81 * prior to version 1.2 and at minimum 256 for version 1.2 and later. 82 */ 83 nslot = nvdimm_num_label_slots(ndd); 84 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); 85 size = __sizeof_namespace_index(nslot) * 2; 86 if (size <= space && nslot >= 2) 87 return size / 2; 88 89 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", 90 ndd->nsarea.config_size, sizeof_namespace_label(ndd)); 91 return 0; 92 } 93 94 static int __nd_label_validate(struct nvdimm_drvdata *ndd) 95 { 96 /* 97 * On media label format consists of two index blocks followed 98 * by an array of labels. None of these structures are ever 99 * updated in place. A sequence number tracks the current 100 * active index and the next one to write, while labels are 101 * written to free slots. 102 * 103 * +------------+ 104 * | | 105 * | nsindex0 | 106 * | | 107 * +------------+ 108 * | | 109 * | nsindex1 | 110 * | | 111 * +------------+ 112 * | label0 | 113 * +------------+ 114 * | label1 | 115 * +------------+ 116 * | | 117 * ....nslot... 118 * | | 119 * +------------+ 120 * | labelN | 121 * +------------+ 122 */ 123 struct nd_namespace_index *nsindex[] = { 124 to_namespace_index(ndd, 0), 125 to_namespace_index(ndd, 1), 126 }; 127 const int num_index = ARRAY_SIZE(nsindex); 128 struct device *dev = ndd->dev; 129 bool valid[2] = { 0 }; 130 int i, num_valid = 0; 131 u32 seq; 132 133 for (i = 0; i < num_index; i++) { 134 u32 nslot; 135 u8 sig[NSINDEX_SIG_LEN]; 136 u64 sum_save, sum, size; 137 unsigned int version, labelsize; 138 139 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); 140 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { 141 dev_dbg(dev, "nsindex%d signature invalid\n", i); 142 continue; 143 } 144 145 /* label sizes larger than 128 arrived with v1.2 */ 146 version = __le16_to_cpu(nsindex[i]->major) * 100 147 + __le16_to_cpu(nsindex[i]->minor); 148 if (version >= 102) 149 labelsize = 1 << (7 + nsindex[i]->labelsize); 150 else 151 labelsize = 128; 152 153 if (labelsize != sizeof_namespace_label(ndd)) { 154 dev_dbg(dev, "nsindex%d labelsize %d invalid\n", 155 i, nsindex[i]->labelsize); 156 continue; 157 } 158 159 sum_save = __le64_to_cpu(nsindex[i]->checksum); 160 nsindex[i]->checksum = __cpu_to_le64(0); 161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); 162 nsindex[i]->checksum = __cpu_to_le64(sum_save); 163 if (sum != sum_save) { 164 dev_dbg(dev, "nsindex%d checksum invalid\n", i); 165 continue; 166 } 167 168 seq = __le32_to_cpu(nsindex[i]->seq); 169 if ((seq & NSINDEX_SEQ_MASK) == 0) { 170 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq); 171 continue; 172 } 173 174 /* sanity check the index against expected values */ 175 if (__le64_to_cpu(nsindex[i]->myoff) 176 != i * sizeof_namespace_index(ndd)) { 177 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n", 178 i, (unsigned long long) 179 __le64_to_cpu(nsindex[i]->myoff)); 180 continue; 181 } 182 if (__le64_to_cpu(nsindex[i]->otheroff) 183 != (!i) * sizeof_namespace_index(ndd)) { 184 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n", 185 i, (unsigned long long) 186 __le64_to_cpu(nsindex[i]->otheroff)); 187 continue; 188 } 189 if (__le64_to_cpu(nsindex[i]->labeloff) 190 != 2 * sizeof_namespace_index(ndd)) { 191 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n", 192 i, (unsigned long long) 193 __le64_to_cpu(nsindex[i]->labeloff)); 194 continue; 195 } 196 197 size = __le64_to_cpu(nsindex[i]->mysize); 198 if (size > sizeof_namespace_index(ndd) 199 || size < sizeof(struct nd_namespace_index)) { 200 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size); 201 continue; 202 } 203 204 nslot = __le32_to_cpu(nsindex[i]->nslot); 205 if (nslot * sizeof_namespace_label(ndd) 206 + 2 * sizeof_namespace_index(ndd) 207 > ndd->nsarea.config_size) { 208 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n", 209 i, nslot, ndd->nsarea.config_size); 210 continue; 211 } 212 valid[i] = true; 213 num_valid++; 214 } 215 216 switch (num_valid) { 217 case 0: 218 break; 219 case 1: 220 for (i = 0; i < num_index; i++) 221 if (valid[i]) 222 return i; 223 /* can't have num_valid > 0 but valid[] = { false, false } */ 224 WARN_ON(1); 225 break; 226 default: 227 /* pick the best index... */ 228 seq = best_seq(__le32_to_cpu(nsindex[0]->seq), 229 __le32_to_cpu(nsindex[1]->seq)); 230 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK)) 231 return 1; 232 else 233 return 0; 234 break; 235 } 236 237 return -1; 238 } 239 240 static int nd_label_validate(struct nvdimm_drvdata *ndd) 241 { 242 /* 243 * In order to probe for and validate namespace index blocks we 244 * need to know the size of the labels, and we can't trust the 245 * size of the labels until we validate the index blocks. 246 * Resolve this dependency loop by probing for known label 247 * sizes, but default to v1.2 256-byte namespace labels if 248 * discovery fails. 249 */ 250 int label_size[] = { 128, 256 }; 251 int i, rc; 252 253 for (i = 0; i < ARRAY_SIZE(label_size); i++) { 254 ndd->nslabel_size = label_size[i]; 255 rc = __nd_label_validate(ndd); 256 if (rc >= 0) 257 return rc; 258 } 259 260 return -1; 261 } 262 263 static void nd_label_copy(struct nvdimm_drvdata *ndd, 264 struct nd_namespace_index *dst, 265 struct nd_namespace_index *src) 266 { 267 /* just exit if either destination or source is NULL */ 268 if (!dst || !src) 269 return; 270 271 memcpy(dst, src, sizeof_namespace_index(ndd)); 272 } 273 274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd) 275 { 276 void *base = to_namespace_index(ndd, 0); 277 278 return base + 2 * sizeof_namespace_index(ndd); 279 } 280 281 static int to_slot(struct nvdimm_drvdata *ndd, 282 struct nd_namespace_label *nd_label) 283 { 284 unsigned long label, base; 285 286 label = (unsigned long) nd_label; 287 base = (unsigned long) nd_label_base(ndd); 288 289 return (label - base) / sizeof_namespace_label(ndd); 290 } 291 292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot) 293 { 294 unsigned long label, base; 295 296 base = (unsigned long) nd_label_base(ndd); 297 label = base + sizeof_namespace_label(ndd) * slot; 298 299 return (struct nd_namespace_label *) label; 300 } 301 302 #define for_each_clear_bit_le(bit, addr, size) \ 303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \ 304 (bit) < (size); \ 305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1)) 306 307 /** 308 * preamble_index - common variable initialization for nd_label_* routines 309 * @ndd: dimm container for the relevant label set 310 * @idx: namespace_index index 311 * @nsindex_out: on return set to the currently active namespace index 312 * @free: on return set to the free label bitmap in the index 313 * @nslot: on return set to the number of slots in the label space 314 */ 315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx, 316 struct nd_namespace_index **nsindex_out, 317 unsigned long **free, u32 *nslot) 318 { 319 struct nd_namespace_index *nsindex; 320 321 nsindex = to_namespace_index(ndd, idx); 322 if (nsindex == NULL) 323 return false; 324 325 *free = (unsigned long *) nsindex->free; 326 *nslot = __le32_to_cpu(nsindex->nslot); 327 *nsindex_out = nsindex; 328 329 return true; 330 } 331 332 char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, 333 u32 flags) 334 { 335 if (!label_id || !uuid) 336 return NULL; 337 snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid); 338 return label_id->id; 339 } 340 341 static bool preamble_current(struct nvdimm_drvdata *ndd, 342 struct nd_namespace_index **nsindex, 343 unsigned long **free, u32 *nslot) 344 { 345 return preamble_index(ndd, ndd->ns_current, nsindex, 346 free, nslot); 347 } 348 349 static bool preamble_next(struct nvdimm_drvdata *ndd, 350 struct nd_namespace_index **nsindex, 351 unsigned long **free, u32 *nslot) 352 { 353 return preamble_index(ndd, ndd->ns_next, nsindex, 354 free, nslot); 355 } 356 357 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd, 358 struct nd_namespace_label *nd_label) 359 { 360 u64 sum, sum_save; 361 362 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 363 return true; 364 365 sum_save = nsl_get_checksum(ndd, nd_label); 366 nsl_set_checksum(ndd, nd_label, 0); 367 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 368 nsl_set_checksum(ndd, nd_label, sum_save); 369 return sum == sum_save; 370 } 371 372 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd, 373 struct nd_namespace_label *nd_label) 374 { 375 u64 sum; 376 377 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 378 return; 379 nsl_set_checksum(ndd, nd_label, 0); 380 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 381 nsl_set_checksum(ndd, nd_label, sum); 382 } 383 384 static bool slot_valid(struct nvdimm_drvdata *ndd, 385 struct nd_namespace_label *nd_label, u32 slot) 386 { 387 bool valid; 388 389 /* check that we are written where we expect to be written */ 390 if (slot != nsl_get_slot(ndd, nd_label)) 391 return false; 392 valid = nsl_validate_checksum(ndd, nd_label); 393 if (!valid) 394 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); 395 return valid; 396 } 397 398 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) 399 { 400 struct nd_namespace_index *nsindex; 401 unsigned long *free; 402 u32 nslot, slot; 403 404 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 405 return 0; /* no label, nothing to reserve */ 406 407 for_each_clear_bit_le(slot, free, nslot) { 408 struct nd_namespace_label *nd_label; 409 struct nd_region *nd_region = NULL; 410 struct nd_label_id label_id; 411 struct resource *res; 412 uuid_t label_uuid; 413 u32 flags; 414 415 nd_label = to_label(ndd, slot); 416 417 if (!slot_valid(ndd, nd_label, slot)) 418 continue; 419 420 nsl_get_uuid(ndd, nd_label, &label_uuid); 421 flags = nsl_get_flags(ndd, nd_label); 422 nd_label_gen_id(&label_id, &label_uuid, flags); 423 res = nvdimm_allocate_dpa(ndd, &label_id, 424 nsl_get_dpa(ndd, nd_label), 425 nsl_get_rawsize(ndd, nd_label)); 426 nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); 427 if (!res) 428 return -EBUSY; 429 } 430 431 return 0; 432 } 433 434 int nd_label_data_init(struct nvdimm_drvdata *ndd) 435 { 436 size_t config_size, read_size, max_xfer, offset; 437 struct nd_namespace_index *nsindex; 438 unsigned int i; 439 int rc = 0; 440 u32 nslot; 441 442 if (ndd->data) 443 return 0; 444 445 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { 446 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", 447 ndd->nsarea.max_xfer, ndd->nsarea.config_size); 448 return -ENXIO; 449 } 450 451 /* 452 * We need to determine the maximum index area as this is the section 453 * we must read and validate before we can start processing labels. 454 * 455 * If the area is too small to contain the two indexes and 2 labels 456 * then we abort. 457 * 458 * Start at a label size of 128 as this should result in the largest 459 * possible namespace index size. 460 */ 461 ndd->nslabel_size = 128; 462 read_size = sizeof_namespace_index(ndd) * 2; 463 if (!read_size) 464 return -ENXIO; 465 466 /* Allocate config data */ 467 config_size = ndd->nsarea.config_size; 468 ndd->data = kvzalloc(config_size, GFP_KERNEL); 469 if (!ndd->data) 470 return -ENOMEM; 471 472 /* 473 * We want to guarantee as few reads as possible while conserving 474 * memory. To do that we figure out how much unused space will be left 475 * in the last read, divide that by the total number of reads it is 476 * going to take given our maximum transfer size, and then reduce our 477 * maximum transfer size based on that result. 478 */ 479 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size); 480 if (read_size < max_xfer) { 481 /* trim waste */ 482 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) / 483 DIV_ROUND_UP(config_size, max_xfer); 484 /* make certain we read indexes in exactly 1 read */ 485 if (max_xfer < read_size) 486 max_xfer = read_size; 487 } 488 489 /* Make our initial read size a multiple of max_xfer size */ 490 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer, 491 config_size); 492 493 /* Read the index data */ 494 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size); 495 if (rc) 496 goto out_err; 497 498 /* Validate index data, if not valid assume all labels are invalid */ 499 ndd->ns_current = nd_label_validate(ndd); 500 if (ndd->ns_current < 0) 501 return 0; 502 503 /* Record our index values */ 504 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); 505 506 /* Copy "current" index on top of the "next" index */ 507 nsindex = to_current_namespace_index(ndd); 508 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex); 509 510 /* Determine starting offset for label data */ 511 offset = __le64_to_cpu(nsindex->labeloff); 512 nslot = __le32_to_cpu(nsindex->nslot); 513 514 /* Loop through the free list pulling in any active labels */ 515 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) { 516 size_t label_read_size; 517 518 /* zero out the unused labels */ 519 if (test_bit_le(i, nsindex->free)) { 520 memset(ndd->data + offset, 0, ndd->nslabel_size); 521 continue; 522 } 523 524 /* if we already read past here then just continue */ 525 if (offset + ndd->nslabel_size <= read_size) 526 continue; 527 528 /* if we haven't read in a while reset our read_size offset */ 529 if (read_size < offset) 530 read_size = offset; 531 532 /* determine how much more will be read after this next call. */ 533 label_read_size = offset + ndd->nslabel_size - read_size; 534 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) * 535 max_xfer; 536 537 /* truncate last read if needed */ 538 if (read_size + label_read_size > config_size) 539 label_read_size = config_size - read_size; 540 541 /* Read the label data */ 542 rc = nvdimm_get_config_data(ndd, ndd->data + read_size, 543 read_size, label_read_size); 544 if (rc) 545 goto out_err; 546 547 /* push read_size to next read offset */ 548 read_size += label_read_size; 549 } 550 551 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); 552 out_err: 553 return rc; 554 } 555 556 int nd_label_active_count(struct nvdimm_drvdata *ndd) 557 { 558 struct nd_namespace_index *nsindex; 559 unsigned long *free; 560 u32 nslot, slot; 561 int count = 0; 562 563 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 564 return 0; 565 566 for_each_clear_bit_le(slot, free, nslot) { 567 struct nd_namespace_label *nd_label; 568 569 nd_label = to_label(ndd, slot); 570 571 if (!slot_valid(ndd, nd_label, slot)) { 572 u32 label_slot = nsl_get_slot(ndd, nd_label); 573 u64 size = nsl_get_rawsize(ndd, nd_label); 574 u64 dpa = nsl_get_dpa(ndd, nd_label); 575 576 dev_dbg(ndd->dev, 577 "slot%d invalid slot: %d dpa: %llx size: %llx\n", 578 slot, label_slot, dpa, size); 579 continue; 580 } 581 count++; 582 } 583 return count; 584 } 585 586 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) 587 { 588 struct nd_namespace_index *nsindex; 589 unsigned long *free; 590 u32 nslot, slot; 591 592 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 593 return NULL; 594 595 for_each_clear_bit_le(slot, free, nslot) { 596 struct nd_namespace_label *nd_label; 597 598 nd_label = to_label(ndd, slot); 599 if (!slot_valid(ndd, nd_label, slot)) 600 continue; 601 602 if (n-- == 0) 603 return to_label(ndd, slot); 604 } 605 606 return NULL; 607 } 608 609 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd) 610 { 611 struct nd_namespace_index *nsindex; 612 unsigned long *free; 613 u32 nslot, slot; 614 615 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 616 return UINT_MAX; 617 618 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 619 620 slot = find_next_bit_le(free, nslot, 0); 621 if (slot == nslot) 622 return UINT_MAX; 623 624 clear_bit_le(slot, free); 625 626 return slot; 627 } 628 629 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot) 630 { 631 struct nd_namespace_index *nsindex; 632 unsigned long *free; 633 u32 nslot; 634 635 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 636 return false; 637 638 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 639 640 if (slot < nslot) 641 return !test_and_set_bit_le(slot, free); 642 return false; 643 } 644 645 u32 nd_label_nfree(struct nvdimm_drvdata *ndd) 646 { 647 struct nd_namespace_index *nsindex; 648 unsigned long *free; 649 u32 nslot; 650 651 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 652 653 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 654 return nvdimm_num_label_slots(ndd); 655 656 return bitmap_weight(free, nslot); 657 } 658 659 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq, 660 unsigned long flags) 661 { 662 struct nd_namespace_index *nsindex; 663 unsigned long offset; 664 u64 checksum; 665 u32 nslot; 666 int rc; 667 668 nsindex = to_namespace_index(ndd, index); 669 if (flags & ND_NSINDEX_INIT) 670 nslot = nvdimm_num_label_slots(ndd); 671 else 672 nslot = __le32_to_cpu(nsindex->nslot); 673 674 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN); 675 memset(&nsindex->flags, 0, 3); 676 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8; 677 nsindex->seq = __cpu_to_le32(seq); 678 offset = (unsigned long) nsindex 679 - (unsigned long) to_namespace_index(ndd, 0); 680 nsindex->myoff = __cpu_to_le64(offset); 681 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd)); 682 offset = (unsigned long) to_namespace_index(ndd, 683 nd_label_next_nsindex(index)) 684 - (unsigned long) to_namespace_index(ndd, 0); 685 nsindex->otheroff = __cpu_to_le64(offset); 686 offset = (unsigned long) nd_label_base(ndd) 687 - (unsigned long) to_namespace_index(ndd, 0); 688 nsindex->labeloff = __cpu_to_le64(offset); 689 nsindex->nslot = __cpu_to_le32(nslot); 690 nsindex->major = __cpu_to_le16(1); 691 if (sizeof_namespace_label(ndd) < 256) 692 nsindex->minor = __cpu_to_le16(1); 693 else 694 nsindex->minor = __cpu_to_le16(2); 695 nsindex->checksum = __cpu_to_le64(0); 696 if (flags & ND_NSINDEX_INIT) { 697 unsigned long *free = (unsigned long *) nsindex->free; 698 u32 nfree = ALIGN(nslot, BITS_PER_LONG); 699 int last_bits, i; 700 701 memset(nsindex->free, 0xff, nfree / 8); 702 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++) 703 clear_bit_le(nslot + i, free); 704 } 705 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1); 706 nsindex->checksum = __cpu_to_le64(checksum); 707 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff), 708 nsindex, sizeof_namespace_index(ndd)); 709 if (rc < 0) 710 return rc; 711 712 if (flags & ND_NSINDEX_INIT) 713 return 0; 714 715 /* copy the index we just wrote to the new 'next' */ 716 WARN_ON(index != ndd->ns_next); 717 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex); 718 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current); 719 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next); 720 WARN_ON(ndd->ns_current == ndd->ns_next); 721 722 return 0; 723 } 724 725 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, 726 struct nd_namespace_label *nd_label) 727 { 728 return (unsigned long) nd_label 729 - (unsigned long) to_namespace_index(ndd, 0); 730 } 731 732 static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid) 733 { 734 if (guid_equal(guid, &nvdimm_btt_guid)) 735 return NVDIMM_CCLASS_BTT; 736 else if (guid_equal(guid, &nvdimm_btt2_guid)) 737 return NVDIMM_CCLASS_BTT2; 738 else if (guid_equal(guid, &nvdimm_pfn_guid)) 739 return NVDIMM_CCLASS_PFN; 740 else if (guid_equal(guid, &nvdimm_dax_guid)) 741 return NVDIMM_CCLASS_DAX; 742 else if (guid_equal(guid, &guid_null)) 743 return NVDIMM_CCLASS_NONE; 744 745 return NVDIMM_CCLASS_UNKNOWN; 746 } 747 748 /* CXL labels store UUIDs instead of GUIDs for the same data */ 749 static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid) 750 { 751 if (uuid_equal(uuid, &nvdimm_btt_uuid)) 752 return NVDIMM_CCLASS_BTT; 753 else if (uuid_equal(uuid, &nvdimm_btt2_uuid)) 754 return NVDIMM_CCLASS_BTT2; 755 else if (uuid_equal(uuid, &nvdimm_pfn_uuid)) 756 return NVDIMM_CCLASS_PFN; 757 else if (uuid_equal(uuid, &nvdimm_dax_uuid)) 758 return NVDIMM_CCLASS_DAX; 759 else if (uuid_equal(uuid, &uuid_null)) 760 return NVDIMM_CCLASS_NONE; 761 762 return NVDIMM_CCLASS_UNKNOWN; 763 } 764 765 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, 766 guid_t *target) 767 { 768 if (claim_class == NVDIMM_CCLASS_BTT) 769 return &nvdimm_btt_guid; 770 else if (claim_class == NVDIMM_CCLASS_BTT2) 771 return &nvdimm_btt2_guid; 772 else if (claim_class == NVDIMM_CCLASS_PFN) 773 return &nvdimm_pfn_guid; 774 else if (claim_class == NVDIMM_CCLASS_DAX) 775 return &nvdimm_dax_guid; 776 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 777 /* 778 * If we're modifying a namespace for which we don't 779 * know the claim_class, don't touch the existing guid. 780 */ 781 return target; 782 } else 783 return &guid_null; 784 } 785 786 /* CXL labels store UUIDs instead of GUIDs for the same data */ 787 static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class, 788 uuid_t *target) 789 { 790 if (claim_class == NVDIMM_CCLASS_BTT) 791 return &nvdimm_btt_uuid; 792 else if (claim_class == NVDIMM_CCLASS_BTT2) 793 return &nvdimm_btt2_uuid; 794 else if (claim_class == NVDIMM_CCLASS_PFN) 795 return &nvdimm_pfn_uuid; 796 else if (claim_class == NVDIMM_CCLASS_DAX) 797 return &nvdimm_dax_uuid; 798 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 799 /* 800 * If we're modifying a namespace for which we don't 801 * know the claim_class, don't touch the existing uuid. 802 */ 803 return target; 804 } else 805 return &uuid_null; 806 } 807 808 static void reap_victim(struct nd_mapping *nd_mapping, 809 struct nd_label_ent *victim) 810 { 811 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 812 u32 slot = to_slot(ndd, victim->label); 813 814 dev_dbg(ndd->dev, "free: %d\n", slot); 815 nd_label_free_slot(ndd, slot); 816 victim->label = NULL; 817 } 818 819 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd, 820 struct nd_namespace_label *nd_label, guid_t *guid) 821 { 822 if (efi_namespace_label_has(ndd, type_guid)) 823 guid_copy(&nd_label->efi.type_guid, guid); 824 } 825 826 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, 827 struct nd_namespace_label *nd_label, guid_t *guid) 828 { 829 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid)) 830 return true; 831 if (!guid_equal(&nd_label->efi.type_guid, guid)) { 832 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid, 833 &nd_label->efi.type_guid); 834 return false; 835 } 836 return true; 837 } 838 839 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd, 840 struct nd_namespace_label *nd_label, 841 enum nvdimm_claim_class claim_class) 842 { 843 if (ndd->cxl) { 844 uuid_t uuid; 845 846 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 847 export_uuid(nd_label->cxl.abstraction_uuid, 848 to_abstraction_uuid(claim_class, &uuid)); 849 return; 850 } 851 852 if (!efi_namespace_label_has(ndd, abstraction_guid)) 853 return; 854 guid_copy(&nd_label->efi.abstraction_guid, 855 to_abstraction_guid(claim_class, 856 &nd_label->efi.abstraction_guid)); 857 } 858 859 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, 860 struct nd_namespace_label *nd_label) 861 { 862 if (ndd->cxl) { 863 uuid_t uuid; 864 865 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 866 return uuid_to_nvdimm_cclass(&uuid); 867 } 868 if (!efi_namespace_label_has(ndd, abstraction_guid)) 869 return NVDIMM_CCLASS_NONE; 870 return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid); 871 } 872 873 static int __pmem_label_update(struct nd_region *nd_region, 874 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, 875 int pos, unsigned long flags) 876 { 877 struct nd_namespace_common *ndns = &nspm->nsio.common; 878 struct nd_interleave_set *nd_set = nd_region->nd_set; 879 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 880 struct nd_namespace_label *nd_label; 881 struct nd_namespace_index *nsindex; 882 struct nd_label_ent *label_ent; 883 struct nd_label_id label_id; 884 struct resource *res; 885 unsigned long *free; 886 u32 nslot, slot; 887 size_t offset; 888 u64 cookie; 889 int rc; 890 891 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 892 return -ENXIO; 893 894 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); 895 nd_label_gen_id(&label_id, nspm->uuid, 0); 896 for_each_dpa_resource(ndd, res) 897 if (strcmp(res->name, label_id.id) == 0) 898 break; 899 900 if (!res) { 901 WARN_ON_ONCE(1); 902 return -ENXIO; 903 } 904 905 /* allocate and write the label to the staging (next) index */ 906 slot = nd_label_alloc_slot(ndd); 907 if (slot == UINT_MAX) 908 return -ENXIO; 909 dev_dbg(ndd->dev, "allocated: %d\n", slot); 910 911 nd_label = to_label(ndd, slot); 912 memset(nd_label, 0, sizeof_namespace_label(ndd)); 913 nsl_set_uuid(ndd, nd_label, nspm->uuid); 914 nsl_set_name(ndd, nd_label, nspm->alt_name); 915 nsl_set_flags(ndd, nd_label, flags); 916 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings); 917 nsl_set_nrange(ndd, nd_label, 1); 918 nsl_set_position(ndd, nd_label, pos); 919 nsl_set_isetcookie(ndd, nd_label, cookie); 920 nsl_set_rawsize(ndd, nd_label, resource_size(res)); 921 nsl_set_lbasize(ndd, nd_label, nspm->lbasize); 922 nsl_set_dpa(ndd, nd_label, res->start); 923 nsl_set_slot(ndd, nd_label, slot); 924 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); 925 nsl_set_claim_class(ndd, nd_label, ndns->claim_class); 926 nsl_calculate_checksum(ndd, nd_label); 927 nd_dbg_dpa(nd_region, ndd, res, "\n"); 928 929 /* update label */ 930 offset = nd_label_offset(ndd, nd_label); 931 rc = nvdimm_set_config_data(ndd, offset, nd_label, 932 sizeof_namespace_label(ndd)); 933 if (rc < 0) 934 return rc; 935 936 /* Garbage collect the previous label */ 937 mutex_lock(&nd_mapping->lock); 938 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 939 if (!label_ent->label) 940 continue; 941 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) || 942 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid)) 943 reap_victim(nd_mapping, label_ent); 944 } 945 946 /* update index */ 947 rc = nd_label_write_index(ndd, ndd->ns_next, 948 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 949 if (rc == 0) { 950 list_for_each_entry(label_ent, &nd_mapping->labels, list) 951 if (!label_ent->label) { 952 label_ent->label = nd_label; 953 nd_label = NULL; 954 break; 955 } 956 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label, 957 "failed to track label: %d\n", 958 to_slot(ndd, nd_label)); 959 if (nd_label) 960 rc = -ENXIO; 961 } 962 mutex_unlock(&nd_mapping->lock); 963 964 return rc; 965 } 966 967 static int init_labels(struct nd_mapping *nd_mapping, int num_labels) 968 { 969 int i, old_num_labels = 0; 970 struct nd_label_ent *label_ent; 971 struct nd_namespace_index *nsindex; 972 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 973 974 mutex_lock(&nd_mapping->lock); 975 list_for_each_entry(label_ent, &nd_mapping->labels, list) 976 old_num_labels++; 977 mutex_unlock(&nd_mapping->lock); 978 979 /* 980 * We need to preserve all the old labels for the mapping so 981 * they can be garbage collected after writing the new labels. 982 */ 983 for (i = old_num_labels; i < num_labels; i++) { 984 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 985 if (!label_ent) 986 return -ENOMEM; 987 mutex_lock(&nd_mapping->lock); 988 list_add_tail(&label_ent->list, &nd_mapping->labels); 989 mutex_unlock(&nd_mapping->lock); 990 } 991 992 if (ndd->ns_current == -1 || ndd->ns_next == -1) 993 /* pass */; 994 else 995 return max(num_labels, old_num_labels); 996 997 nsindex = to_namespace_index(ndd, 0); 998 memset(nsindex, 0, ndd->nsarea.config_size); 999 for (i = 0; i < 2; i++) { 1000 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); 1001 1002 if (rc) 1003 return rc; 1004 } 1005 ndd->ns_next = 1; 1006 ndd->ns_current = 0; 1007 1008 return max(num_labels, old_num_labels); 1009 } 1010 1011 static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid) 1012 { 1013 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1014 struct nd_label_ent *label_ent, *e; 1015 struct nd_namespace_index *nsindex; 1016 unsigned long *free; 1017 LIST_HEAD(list); 1018 u32 nslot, slot; 1019 int active = 0; 1020 1021 if (!uuid) 1022 return 0; 1023 1024 /* no index || no labels == nothing to delete */ 1025 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 1026 return 0; 1027 1028 mutex_lock(&nd_mapping->lock); 1029 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1030 struct nd_namespace_label *nd_label = label_ent->label; 1031 1032 if (!nd_label) 1033 continue; 1034 active++; 1035 if (!nsl_uuid_equal(ndd, nd_label, uuid)) 1036 continue; 1037 active--; 1038 slot = to_slot(ndd, nd_label); 1039 nd_label_free_slot(ndd, slot); 1040 dev_dbg(ndd->dev, "free: %d\n", slot); 1041 list_move_tail(&label_ent->list, &list); 1042 label_ent->label = NULL; 1043 } 1044 list_splice_tail_init(&list, &nd_mapping->labels); 1045 1046 if (active == 0) { 1047 nd_mapping_free_labels(nd_mapping); 1048 dev_dbg(ndd->dev, "no more active labels\n"); 1049 } 1050 mutex_unlock(&nd_mapping->lock); 1051 1052 return nd_label_write_index(ndd, ndd->ns_next, 1053 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 1054 } 1055 1056 int nd_pmem_namespace_label_update(struct nd_region *nd_region, 1057 struct nd_namespace_pmem *nspm, resource_size_t size) 1058 { 1059 int i, rc; 1060 1061 for (i = 0; i < nd_region->ndr_mappings; i++) { 1062 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1063 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1064 struct resource *res; 1065 int count = 0; 1066 1067 if (size == 0) { 1068 rc = del_labels(nd_mapping, nspm->uuid); 1069 if (rc) 1070 return rc; 1071 continue; 1072 } 1073 1074 for_each_dpa_resource(ndd, res) 1075 if (strncmp(res->name, "pmem", 4) == 0) 1076 count++; 1077 WARN_ON_ONCE(!count); 1078 1079 rc = init_labels(nd_mapping, count); 1080 if (rc < 0) 1081 return rc; 1082 1083 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 1084 NSLABEL_FLAG_UPDATING); 1085 if (rc) 1086 return rc; 1087 } 1088 1089 if (size == 0) 1090 return 0; 1091 1092 /* Clear the UPDATING flag per UEFI 2.7 expectations */ 1093 for (i = 0; i < nd_region->ndr_mappings; i++) { 1094 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1095 1096 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); 1097 if (rc) 1098 return rc; 1099 } 1100 1101 return 0; 1102 } 1103 1104 int __init nd_label_init(void) 1105 { 1106 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid)); 1107 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid)); 1108 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid)); 1109 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid)); 1110 1111 WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid)); 1112 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid)); 1113 WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid)); 1114 WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid)); 1115 1116 WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid)); 1117 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid)); 1118 1119 return 0; 1120 } 1121