1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #include <linux/device.h> 6 #include <linux/ndctl.h> 7 #include <linux/uuid.h> 8 #include <linux/slab.h> 9 #include <linux/io.h> 10 #include <linux/nd.h> 11 #include "nd-core.h" 12 #include "label.h" 13 #include "nd.h" 14 15 static guid_t nvdimm_btt_guid; 16 static guid_t nvdimm_btt2_guid; 17 static guid_t nvdimm_pfn_guid; 18 static guid_t nvdimm_dax_guid; 19 20 static uuid_t nvdimm_btt_uuid; 21 static uuid_t nvdimm_btt2_uuid; 22 static uuid_t nvdimm_pfn_uuid; 23 static uuid_t nvdimm_dax_uuid; 24 25 static uuid_t cxl_region_uuid; 26 static uuid_t cxl_namespace_uuid; 27 28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; 29 30 static u32 best_seq(u32 a, u32 b) 31 { 32 a &= NSINDEX_SEQ_MASK; 33 b &= NSINDEX_SEQ_MASK; 34 35 if (a == 0 || a == b) 36 return b; 37 else if (b == 0) 38 return a; 39 else if (nd_inc_seq(a) == b) 40 return b; 41 else 42 return a; 43 } 44 45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd) 46 { 47 return ndd->nslabel_size; 48 } 49 50 static size_t __sizeof_namespace_index(u32 nslot) 51 { 52 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), 53 NSINDEX_ALIGN); 54 } 55 56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd, 57 size_t index_size) 58 { 59 return (ndd->nsarea.config_size - index_size * 2) / 60 sizeof_namespace_label(ndd); 61 } 62 63 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) 64 { 65 u32 tmp_nslot, n; 66 67 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd); 68 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN; 69 70 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n); 71 } 72 73 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) 74 { 75 u32 nslot, space, size; 76 77 /* 78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large 79 * enough to hold 2 index blocks and 2 labels. The minimum index 80 * block size is 256 bytes. The label size is 128 for namespaces 81 * prior to version 1.2 and at minimum 256 for version 1.2 and later. 82 */ 83 nslot = nvdimm_num_label_slots(ndd); 84 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); 85 size = __sizeof_namespace_index(nslot) * 2; 86 if (size <= space && nslot >= 2) 87 return size / 2; 88 89 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", 90 ndd->nsarea.config_size, sizeof_namespace_label(ndd)); 91 return 0; 92 } 93 94 static int __nd_label_validate(struct nvdimm_drvdata *ndd) 95 { 96 /* 97 * On media label format consists of two index blocks followed 98 * by an array of labels. None of these structures are ever 99 * updated in place. A sequence number tracks the current 100 * active index and the next one to write, while labels are 101 * written to free slots. 102 * 103 * +------------+ 104 * | | 105 * | nsindex0 | 106 * | | 107 * +------------+ 108 * | | 109 * | nsindex1 | 110 * | | 111 * +------------+ 112 * | label0 | 113 * +------------+ 114 * | label1 | 115 * +------------+ 116 * | | 117 * ....nslot... 118 * | | 119 * +------------+ 120 * | labelN | 121 * +------------+ 122 */ 123 struct nd_namespace_index *nsindex[] = { 124 to_namespace_index(ndd, 0), 125 to_namespace_index(ndd, 1), 126 }; 127 const int num_index = ARRAY_SIZE(nsindex); 128 struct device *dev = ndd->dev; 129 bool valid[2] = { 0 }; 130 int i, num_valid = 0; 131 u32 seq; 132 133 for (i = 0; i < num_index; i++) { 134 u32 nslot; 135 u8 sig[NSINDEX_SIG_LEN]; 136 u64 sum_save, sum, size; 137 unsigned int version, labelsize; 138 139 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); 140 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { 141 dev_dbg(dev, "nsindex%d signature invalid\n", i); 142 continue; 143 } 144 145 /* label sizes larger than 128 arrived with v1.2 */ 146 version = __le16_to_cpu(nsindex[i]->major) * 100 147 + __le16_to_cpu(nsindex[i]->minor); 148 if (version >= 102) 149 labelsize = 1 << (7 + nsindex[i]->labelsize); 150 else 151 labelsize = 128; 152 153 if (labelsize != sizeof_namespace_label(ndd)) { 154 dev_dbg(dev, "nsindex%d labelsize %d invalid\n", 155 i, nsindex[i]->labelsize); 156 continue; 157 } 158 159 sum_save = __le64_to_cpu(nsindex[i]->checksum); 160 nsindex[i]->checksum = __cpu_to_le64(0); 161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); 162 nsindex[i]->checksum = __cpu_to_le64(sum_save); 163 if (sum != sum_save) { 164 dev_dbg(dev, "nsindex%d checksum invalid\n", i); 165 continue; 166 } 167 168 seq = __le32_to_cpu(nsindex[i]->seq); 169 if ((seq & NSINDEX_SEQ_MASK) == 0) { 170 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq); 171 continue; 172 } 173 174 /* sanity check the index against expected values */ 175 if (__le64_to_cpu(nsindex[i]->myoff) 176 != i * sizeof_namespace_index(ndd)) { 177 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n", 178 i, (unsigned long long) 179 __le64_to_cpu(nsindex[i]->myoff)); 180 continue; 181 } 182 if (__le64_to_cpu(nsindex[i]->otheroff) 183 != (!i) * sizeof_namespace_index(ndd)) { 184 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n", 185 i, (unsigned long long) 186 __le64_to_cpu(nsindex[i]->otheroff)); 187 continue; 188 } 189 if (__le64_to_cpu(nsindex[i]->labeloff) 190 != 2 * sizeof_namespace_index(ndd)) { 191 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n", 192 i, (unsigned long long) 193 __le64_to_cpu(nsindex[i]->labeloff)); 194 continue; 195 } 196 197 size = __le64_to_cpu(nsindex[i]->mysize); 198 if (size > sizeof_namespace_index(ndd) 199 || size < sizeof(struct nd_namespace_index)) { 200 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size); 201 continue; 202 } 203 204 nslot = __le32_to_cpu(nsindex[i]->nslot); 205 if (nslot * sizeof_namespace_label(ndd) 206 + 2 * sizeof_namespace_index(ndd) 207 > ndd->nsarea.config_size) { 208 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n", 209 i, nslot, ndd->nsarea.config_size); 210 continue; 211 } 212 valid[i] = true; 213 num_valid++; 214 } 215 216 switch (num_valid) { 217 case 0: 218 break; 219 case 1: 220 for (i = 0; i < num_index; i++) 221 if (valid[i]) 222 return i; 223 /* can't have num_valid > 0 but valid[] = { false, false } */ 224 WARN_ON(1); 225 break; 226 default: 227 /* pick the best index... */ 228 seq = best_seq(__le32_to_cpu(nsindex[0]->seq), 229 __le32_to_cpu(nsindex[1]->seq)); 230 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK)) 231 return 1; 232 else 233 return 0; 234 break; 235 } 236 237 return -1; 238 } 239 240 static int nd_label_validate(struct nvdimm_drvdata *ndd) 241 { 242 /* 243 * In order to probe for and validate namespace index blocks we 244 * need to know the size of the labels, and we can't trust the 245 * size of the labels until we validate the index blocks. 246 * Resolve this dependency loop by probing for known label 247 * sizes, but default to v1.2 256-byte namespace labels if 248 * discovery fails. 249 */ 250 int label_size[] = { 128, 256 }; 251 int i, rc; 252 253 for (i = 0; i < ARRAY_SIZE(label_size); i++) { 254 ndd->nslabel_size = label_size[i]; 255 rc = __nd_label_validate(ndd); 256 if (rc >= 0) 257 return rc; 258 } 259 260 return -1; 261 } 262 263 static void nd_label_copy(struct nvdimm_drvdata *ndd, 264 struct nd_namespace_index *dst, 265 struct nd_namespace_index *src) 266 { 267 /* just exit if either destination or source is NULL */ 268 if (!dst || !src) 269 return; 270 271 memcpy(dst, src, sizeof_namespace_index(ndd)); 272 } 273 274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd) 275 { 276 void *base = to_namespace_index(ndd, 0); 277 278 return base + 2 * sizeof_namespace_index(ndd); 279 } 280 281 static int to_slot(struct nvdimm_drvdata *ndd, 282 struct nd_namespace_label *nd_label) 283 { 284 unsigned long label, base; 285 286 label = (unsigned long) nd_label; 287 base = (unsigned long) nd_label_base(ndd); 288 289 return (label - base) / sizeof_namespace_label(ndd); 290 } 291 292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot) 293 { 294 unsigned long label, base; 295 296 base = (unsigned long) nd_label_base(ndd); 297 label = base + sizeof_namespace_label(ndd) * slot; 298 299 return (struct nd_namespace_label *) label; 300 } 301 302 #define for_each_clear_bit_le(bit, addr, size) \ 303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \ 304 (bit) < (size); \ 305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1)) 306 307 /** 308 * preamble_index - common variable initialization for nd_label_* routines 309 * @ndd: dimm container for the relevant label set 310 * @idx: namespace_index index 311 * @nsindex_out: on return set to the currently active namespace index 312 * @free: on return set to the free label bitmap in the index 313 * @nslot: on return set to the number of slots in the label space 314 */ 315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx, 316 struct nd_namespace_index **nsindex_out, 317 unsigned long **free, u32 *nslot) 318 { 319 struct nd_namespace_index *nsindex; 320 321 nsindex = to_namespace_index(ndd, idx); 322 if (nsindex == NULL) 323 return false; 324 325 *free = (unsigned long *) nsindex->free; 326 *nslot = __le32_to_cpu(nsindex->nslot); 327 *nsindex_out = nsindex; 328 329 return true; 330 } 331 332 char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, 333 u32 flags) 334 { 335 if (!label_id || !uuid) 336 return NULL; 337 snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid); 338 return label_id->id; 339 } 340 341 static bool preamble_current(struct nvdimm_drvdata *ndd, 342 struct nd_namespace_index **nsindex, 343 unsigned long **free, u32 *nslot) 344 { 345 return preamble_index(ndd, ndd->ns_current, nsindex, 346 free, nslot); 347 } 348 349 static bool preamble_next(struct nvdimm_drvdata *ndd, 350 struct nd_namespace_index **nsindex, 351 unsigned long **free, u32 *nslot) 352 { 353 return preamble_index(ndd, ndd->ns_next, nsindex, 354 free, nslot); 355 } 356 357 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd, 358 struct nd_namespace_label *nd_label) 359 { 360 u64 sum, sum_save; 361 362 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 363 return true; 364 365 sum_save = nsl_get_checksum(ndd, nd_label); 366 nsl_set_checksum(ndd, nd_label, 0); 367 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 368 nsl_set_checksum(ndd, nd_label, sum_save); 369 return sum == sum_save; 370 } 371 372 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd, 373 struct nd_namespace_label *nd_label) 374 { 375 u64 sum; 376 377 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum)) 378 return; 379 nsl_set_checksum(ndd, nd_label, 0); 380 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 381 nsl_set_checksum(ndd, nd_label, sum); 382 } 383 384 static bool slot_valid(struct nvdimm_drvdata *ndd, 385 struct nd_namespace_label *nd_label, u32 slot) 386 { 387 bool valid; 388 389 /* check that we are written where we expect to be written */ 390 if (slot != nsl_get_slot(ndd, nd_label)) 391 return false; 392 valid = nsl_validate_checksum(ndd, nd_label); 393 if (!valid) 394 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); 395 return valid; 396 } 397 398 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) 399 { 400 struct nd_namespace_index *nsindex; 401 unsigned long *free; 402 u32 nslot, slot; 403 404 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 405 return 0; /* no label, nothing to reserve */ 406 407 for_each_clear_bit_le(slot, free, nslot) { 408 struct nd_namespace_label *nd_label; 409 struct nd_region *nd_region = NULL; 410 struct nd_label_id label_id; 411 struct resource *res; 412 uuid_t label_uuid; 413 u32 flags; 414 415 nd_label = to_label(ndd, slot); 416 417 if (!slot_valid(ndd, nd_label, slot)) 418 continue; 419 420 nsl_get_uuid(ndd, nd_label, &label_uuid); 421 flags = nsl_get_flags(ndd, nd_label); 422 nd_label_gen_id(&label_id, &label_uuid, flags); 423 res = nvdimm_allocate_dpa(ndd, &label_id, 424 nsl_get_dpa(ndd, nd_label), 425 nsl_get_rawsize(ndd, nd_label)); 426 nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); 427 if (!res) 428 return -EBUSY; 429 } 430 431 return 0; 432 } 433 434 int nd_label_data_init(struct nvdimm_drvdata *ndd) 435 { 436 size_t config_size, read_size, max_xfer, offset; 437 struct nd_namespace_index *nsindex; 438 unsigned int i; 439 int rc = 0; 440 u32 nslot; 441 442 if (ndd->data) 443 return 0; 444 445 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 || 446 ndd->nsarea.config_size == 0) { 447 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", 448 ndd->nsarea.max_xfer, ndd->nsarea.config_size); 449 return -ENXIO; 450 } 451 452 /* 453 * We need to determine the maximum index area as this is the section 454 * we must read and validate before we can start processing labels. 455 * 456 * If the area is too small to contain the two indexes and 2 labels 457 * then we abort. 458 * 459 * Start at a label size of 128 as this should result in the largest 460 * possible namespace index size. 461 */ 462 ndd->nslabel_size = 128; 463 read_size = sizeof_namespace_index(ndd) * 2; 464 if (!read_size) 465 return -ENXIO; 466 467 /* Allocate config data */ 468 config_size = ndd->nsarea.config_size; 469 ndd->data = kvzalloc(config_size, GFP_KERNEL); 470 if (!ndd->data) 471 return -ENOMEM; 472 473 /* 474 * We want to guarantee as few reads as possible while conserving 475 * memory. To do that we figure out how much unused space will be left 476 * in the last read, divide that by the total number of reads it is 477 * going to take given our maximum transfer size, and then reduce our 478 * maximum transfer size based on that result. 479 */ 480 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size); 481 if (read_size < max_xfer) { 482 /* trim waste */ 483 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) / 484 DIV_ROUND_UP(config_size, max_xfer); 485 /* make certain we read indexes in exactly 1 read */ 486 if (max_xfer < read_size) 487 max_xfer = read_size; 488 } 489 490 /* Make our initial read size a multiple of max_xfer size */ 491 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer, 492 config_size); 493 494 /* Read the index data */ 495 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size); 496 if (rc) 497 goto out_err; 498 499 /* Validate index data, if not valid assume all labels are invalid */ 500 ndd->ns_current = nd_label_validate(ndd); 501 if (ndd->ns_current < 0) 502 return 0; 503 504 /* Record our index values */ 505 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); 506 507 /* Copy "current" index on top of the "next" index */ 508 nsindex = to_current_namespace_index(ndd); 509 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex); 510 511 /* Determine starting offset for label data */ 512 offset = __le64_to_cpu(nsindex->labeloff); 513 nslot = __le32_to_cpu(nsindex->nslot); 514 515 /* Loop through the free list pulling in any active labels */ 516 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) { 517 size_t label_read_size; 518 519 /* zero out the unused labels */ 520 if (test_bit_le(i, nsindex->free)) { 521 memset(ndd->data + offset, 0, ndd->nslabel_size); 522 continue; 523 } 524 525 /* if we already read past here then just continue */ 526 if (offset + ndd->nslabel_size <= read_size) 527 continue; 528 529 /* if we haven't read in a while reset our read_size offset */ 530 if (read_size < offset) 531 read_size = offset; 532 533 /* determine how much more will be read after this next call. */ 534 label_read_size = offset + ndd->nslabel_size - read_size; 535 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) * 536 max_xfer; 537 538 /* truncate last read if needed */ 539 if (read_size + label_read_size > config_size) 540 label_read_size = config_size - read_size; 541 542 /* Read the label data */ 543 rc = nvdimm_get_config_data(ndd, ndd->data + read_size, 544 read_size, label_read_size); 545 if (rc) 546 goto out_err; 547 548 /* push read_size to next read offset */ 549 read_size += label_read_size; 550 } 551 552 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); 553 out_err: 554 return rc; 555 } 556 557 int nd_label_active_count(struct nvdimm_drvdata *ndd) 558 { 559 struct nd_namespace_index *nsindex; 560 unsigned long *free; 561 u32 nslot, slot; 562 int count = 0; 563 564 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 565 return 0; 566 567 for_each_clear_bit_le(slot, free, nslot) { 568 struct nd_namespace_label *nd_label; 569 570 nd_label = to_label(ndd, slot); 571 572 if (!slot_valid(ndd, nd_label, slot)) { 573 u32 label_slot = nsl_get_slot(ndd, nd_label); 574 u64 size = nsl_get_rawsize(ndd, nd_label); 575 u64 dpa = nsl_get_dpa(ndd, nd_label); 576 577 dev_dbg(ndd->dev, 578 "slot%d invalid slot: %d dpa: %llx size: %llx\n", 579 slot, label_slot, dpa, size); 580 continue; 581 } 582 count++; 583 } 584 return count; 585 } 586 587 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) 588 { 589 struct nd_namespace_index *nsindex; 590 unsigned long *free; 591 u32 nslot, slot; 592 593 if (!preamble_current(ndd, &nsindex, &free, &nslot)) 594 return NULL; 595 596 for_each_clear_bit_le(slot, free, nslot) { 597 struct nd_namespace_label *nd_label; 598 599 nd_label = to_label(ndd, slot); 600 if (!slot_valid(ndd, nd_label, slot)) 601 continue; 602 603 if (n-- == 0) 604 return to_label(ndd, slot); 605 } 606 607 return NULL; 608 } 609 610 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd) 611 { 612 struct nd_namespace_index *nsindex; 613 unsigned long *free; 614 u32 nslot, slot; 615 616 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 617 return UINT_MAX; 618 619 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 620 621 slot = find_next_bit_le(free, nslot, 0); 622 if (slot == nslot) 623 return UINT_MAX; 624 625 clear_bit_le(slot, free); 626 627 return slot; 628 } 629 630 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot) 631 { 632 struct nd_namespace_index *nsindex; 633 unsigned long *free; 634 u32 nslot; 635 636 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 637 return false; 638 639 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 640 641 if (slot < nslot) 642 return !test_and_set_bit_le(slot, free); 643 return false; 644 } 645 646 u32 nd_label_nfree(struct nvdimm_drvdata *ndd) 647 { 648 struct nd_namespace_index *nsindex; 649 unsigned long *free; 650 u32 nslot; 651 652 WARN_ON(!is_nvdimm_bus_locked(ndd->dev)); 653 654 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 655 return nvdimm_num_label_slots(ndd); 656 657 return bitmap_weight(free, nslot); 658 } 659 660 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq, 661 unsigned long flags) 662 { 663 struct nd_namespace_index *nsindex; 664 unsigned long offset; 665 u64 checksum; 666 u32 nslot; 667 int rc; 668 669 nsindex = to_namespace_index(ndd, index); 670 if (flags & ND_NSINDEX_INIT) 671 nslot = nvdimm_num_label_slots(ndd); 672 else 673 nslot = __le32_to_cpu(nsindex->nslot); 674 675 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN); 676 memset(&nsindex->flags, 0, 3); 677 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8; 678 nsindex->seq = __cpu_to_le32(seq); 679 offset = (unsigned long) nsindex 680 - (unsigned long) to_namespace_index(ndd, 0); 681 nsindex->myoff = __cpu_to_le64(offset); 682 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd)); 683 offset = (unsigned long) to_namespace_index(ndd, 684 nd_label_next_nsindex(index)) 685 - (unsigned long) to_namespace_index(ndd, 0); 686 nsindex->otheroff = __cpu_to_le64(offset); 687 offset = (unsigned long) nd_label_base(ndd) 688 - (unsigned long) to_namespace_index(ndd, 0); 689 nsindex->labeloff = __cpu_to_le64(offset); 690 nsindex->nslot = __cpu_to_le32(nslot); 691 nsindex->major = __cpu_to_le16(1); 692 if (sizeof_namespace_label(ndd) < 256) 693 nsindex->minor = __cpu_to_le16(1); 694 else 695 nsindex->minor = __cpu_to_le16(2); 696 nsindex->checksum = __cpu_to_le64(0); 697 if (flags & ND_NSINDEX_INIT) { 698 unsigned long *free = (unsigned long *) nsindex->free; 699 u32 nfree = ALIGN(nslot, BITS_PER_LONG); 700 int last_bits, i; 701 702 memset(nsindex->free, 0xff, nfree / 8); 703 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++) 704 clear_bit_le(nslot + i, free); 705 } 706 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1); 707 nsindex->checksum = __cpu_to_le64(checksum); 708 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff), 709 nsindex, sizeof_namespace_index(ndd)); 710 if (rc < 0) 711 return rc; 712 713 if (flags & ND_NSINDEX_INIT) 714 return 0; 715 716 /* copy the index we just wrote to the new 'next' */ 717 WARN_ON(index != ndd->ns_next); 718 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex); 719 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current); 720 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next); 721 WARN_ON(ndd->ns_current == ndd->ns_next); 722 723 return 0; 724 } 725 726 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, 727 struct nd_namespace_label *nd_label) 728 { 729 return (unsigned long) nd_label 730 - (unsigned long) to_namespace_index(ndd, 0); 731 } 732 733 static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid) 734 { 735 if (guid_equal(guid, &nvdimm_btt_guid)) 736 return NVDIMM_CCLASS_BTT; 737 else if (guid_equal(guid, &nvdimm_btt2_guid)) 738 return NVDIMM_CCLASS_BTT2; 739 else if (guid_equal(guid, &nvdimm_pfn_guid)) 740 return NVDIMM_CCLASS_PFN; 741 else if (guid_equal(guid, &nvdimm_dax_guid)) 742 return NVDIMM_CCLASS_DAX; 743 else if (guid_equal(guid, &guid_null)) 744 return NVDIMM_CCLASS_NONE; 745 746 return NVDIMM_CCLASS_UNKNOWN; 747 } 748 749 /* CXL labels store UUIDs instead of GUIDs for the same data */ 750 static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid) 751 { 752 if (uuid_equal(uuid, &nvdimm_btt_uuid)) 753 return NVDIMM_CCLASS_BTT; 754 else if (uuid_equal(uuid, &nvdimm_btt2_uuid)) 755 return NVDIMM_CCLASS_BTT2; 756 else if (uuid_equal(uuid, &nvdimm_pfn_uuid)) 757 return NVDIMM_CCLASS_PFN; 758 else if (uuid_equal(uuid, &nvdimm_dax_uuid)) 759 return NVDIMM_CCLASS_DAX; 760 else if (uuid_equal(uuid, &uuid_null)) 761 return NVDIMM_CCLASS_NONE; 762 763 return NVDIMM_CCLASS_UNKNOWN; 764 } 765 766 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, 767 guid_t *target) 768 { 769 if (claim_class == NVDIMM_CCLASS_BTT) 770 return &nvdimm_btt_guid; 771 else if (claim_class == NVDIMM_CCLASS_BTT2) 772 return &nvdimm_btt2_guid; 773 else if (claim_class == NVDIMM_CCLASS_PFN) 774 return &nvdimm_pfn_guid; 775 else if (claim_class == NVDIMM_CCLASS_DAX) 776 return &nvdimm_dax_guid; 777 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 778 /* 779 * If we're modifying a namespace for which we don't 780 * know the claim_class, don't touch the existing guid. 781 */ 782 return target; 783 } else 784 return &guid_null; 785 } 786 787 /* CXL labels store UUIDs instead of GUIDs for the same data */ 788 static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class, 789 uuid_t *target) 790 { 791 if (claim_class == NVDIMM_CCLASS_BTT) 792 return &nvdimm_btt_uuid; 793 else if (claim_class == NVDIMM_CCLASS_BTT2) 794 return &nvdimm_btt2_uuid; 795 else if (claim_class == NVDIMM_CCLASS_PFN) 796 return &nvdimm_pfn_uuid; 797 else if (claim_class == NVDIMM_CCLASS_DAX) 798 return &nvdimm_dax_uuid; 799 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) { 800 /* 801 * If we're modifying a namespace for which we don't 802 * know the claim_class, don't touch the existing uuid. 803 */ 804 return target; 805 } else 806 return &uuid_null; 807 } 808 809 static void reap_victim(struct nd_mapping *nd_mapping, 810 struct nd_label_ent *victim) 811 { 812 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 813 u32 slot = to_slot(ndd, victim->label); 814 815 dev_dbg(ndd->dev, "free: %d\n", slot); 816 nd_label_free_slot(ndd, slot); 817 victim->label = NULL; 818 } 819 820 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd, 821 struct nd_namespace_label *nd_label, guid_t *guid) 822 { 823 if (efi_namespace_label_has(ndd, type_guid)) 824 guid_copy(&nd_label->efi.type_guid, guid); 825 } 826 827 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, 828 struct nd_namespace_label *nd_label, guid_t *guid) 829 { 830 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid)) 831 return true; 832 if (!guid_equal(&nd_label->efi.type_guid, guid)) { 833 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid, 834 &nd_label->efi.type_guid); 835 return false; 836 } 837 return true; 838 } 839 840 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd, 841 struct nd_namespace_label *nd_label, 842 enum nvdimm_claim_class claim_class) 843 { 844 if (ndd->cxl) { 845 uuid_t uuid; 846 847 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 848 export_uuid(nd_label->cxl.abstraction_uuid, 849 to_abstraction_uuid(claim_class, &uuid)); 850 return; 851 } 852 853 if (!efi_namespace_label_has(ndd, abstraction_guid)) 854 return; 855 guid_copy(&nd_label->efi.abstraction_guid, 856 to_abstraction_guid(claim_class, 857 &nd_label->efi.abstraction_guid)); 858 } 859 860 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, 861 struct nd_namespace_label *nd_label) 862 { 863 if (ndd->cxl) { 864 uuid_t uuid; 865 866 import_uuid(&uuid, nd_label->cxl.abstraction_uuid); 867 return uuid_to_nvdimm_cclass(&uuid); 868 } 869 if (!efi_namespace_label_has(ndd, abstraction_guid)) 870 return NVDIMM_CCLASS_NONE; 871 return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid); 872 } 873 874 static int __pmem_label_update(struct nd_region *nd_region, 875 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, 876 int pos, unsigned long flags) 877 { 878 struct nd_namespace_common *ndns = &nspm->nsio.common; 879 struct nd_interleave_set *nd_set = nd_region->nd_set; 880 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 881 struct nd_namespace_label *nd_label; 882 struct nd_namespace_index *nsindex; 883 struct nd_label_ent *label_ent; 884 struct nd_label_id label_id; 885 struct resource *res; 886 unsigned long *free; 887 u32 nslot, slot; 888 size_t offset; 889 u64 cookie; 890 int rc; 891 892 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 893 return -ENXIO; 894 895 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); 896 nd_label_gen_id(&label_id, nspm->uuid, 0); 897 for_each_dpa_resource(ndd, res) 898 if (strcmp(res->name, label_id.id) == 0) 899 break; 900 901 if (!res) { 902 WARN_ON_ONCE(1); 903 return -ENXIO; 904 } 905 906 /* allocate and write the label to the staging (next) index */ 907 slot = nd_label_alloc_slot(ndd); 908 if (slot == UINT_MAX) 909 return -ENXIO; 910 dev_dbg(ndd->dev, "allocated: %d\n", slot); 911 912 nd_label = to_label(ndd, slot); 913 memset(nd_label, 0, sizeof_namespace_label(ndd)); 914 nsl_set_uuid(ndd, nd_label, nspm->uuid); 915 nsl_set_name(ndd, nd_label, nspm->alt_name); 916 nsl_set_flags(ndd, nd_label, flags); 917 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings); 918 nsl_set_nrange(ndd, nd_label, 1); 919 nsl_set_position(ndd, nd_label, pos); 920 nsl_set_isetcookie(ndd, nd_label, cookie); 921 nsl_set_rawsize(ndd, nd_label, resource_size(res)); 922 nsl_set_lbasize(ndd, nd_label, nspm->lbasize); 923 nsl_set_dpa(ndd, nd_label, res->start); 924 nsl_set_slot(ndd, nd_label, slot); 925 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); 926 nsl_set_claim_class(ndd, nd_label, ndns->claim_class); 927 nsl_calculate_checksum(ndd, nd_label); 928 nd_dbg_dpa(nd_region, ndd, res, "\n"); 929 930 /* update label */ 931 offset = nd_label_offset(ndd, nd_label); 932 rc = nvdimm_set_config_data(ndd, offset, nd_label, 933 sizeof_namespace_label(ndd)); 934 if (rc < 0) 935 return rc; 936 937 /* Garbage collect the previous label */ 938 mutex_lock(&nd_mapping->lock); 939 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 940 if (!label_ent->label) 941 continue; 942 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) || 943 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid)) 944 reap_victim(nd_mapping, label_ent); 945 } 946 947 /* update index */ 948 rc = nd_label_write_index(ndd, ndd->ns_next, 949 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 950 if (rc == 0) { 951 list_for_each_entry(label_ent, &nd_mapping->labels, list) 952 if (!label_ent->label) { 953 label_ent->label = nd_label; 954 nd_label = NULL; 955 break; 956 } 957 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label, 958 "failed to track label: %d\n", 959 to_slot(ndd, nd_label)); 960 if (nd_label) 961 rc = -ENXIO; 962 } 963 mutex_unlock(&nd_mapping->lock); 964 965 return rc; 966 } 967 968 static int init_labels(struct nd_mapping *nd_mapping, int num_labels) 969 { 970 int i, old_num_labels = 0; 971 struct nd_label_ent *label_ent; 972 struct nd_namespace_index *nsindex; 973 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 974 975 mutex_lock(&nd_mapping->lock); 976 list_for_each_entry(label_ent, &nd_mapping->labels, list) 977 old_num_labels++; 978 mutex_unlock(&nd_mapping->lock); 979 980 /* 981 * We need to preserve all the old labels for the mapping so 982 * they can be garbage collected after writing the new labels. 983 */ 984 for (i = old_num_labels; i < num_labels; i++) { 985 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); 986 if (!label_ent) 987 return -ENOMEM; 988 mutex_lock(&nd_mapping->lock); 989 list_add_tail(&label_ent->list, &nd_mapping->labels); 990 mutex_unlock(&nd_mapping->lock); 991 } 992 993 if (ndd->ns_current == -1 || ndd->ns_next == -1) 994 /* pass */; 995 else 996 return max(num_labels, old_num_labels); 997 998 nsindex = to_namespace_index(ndd, 0); 999 memset(nsindex, 0, ndd->nsarea.config_size); 1000 for (i = 0; i < 2; i++) { 1001 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); 1002 1003 if (rc) 1004 return rc; 1005 } 1006 ndd->ns_next = 1; 1007 ndd->ns_current = 0; 1008 1009 return max(num_labels, old_num_labels); 1010 } 1011 1012 static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid) 1013 { 1014 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1015 struct nd_label_ent *label_ent, *e; 1016 struct nd_namespace_index *nsindex; 1017 unsigned long *free; 1018 LIST_HEAD(list); 1019 u32 nslot, slot; 1020 int active = 0; 1021 1022 if (!uuid) 1023 return 0; 1024 1025 /* no index || no labels == nothing to delete */ 1026 if (!preamble_next(ndd, &nsindex, &free, &nslot)) 1027 return 0; 1028 1029 mutex_lock(&nd_mapping->lock); 1030 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1031 struct nd_namespace_label *nd_label = label_ent->label; 1032 1033 if (!nd_label) 1034 continue; 1035 active++; 1036 if (!nsl_uuid_equal(ndd, nd_label, uuid)) 1037 continue; 1038 active--; 1039 slot = to_slot(ndd, nd_label); 1040 nd_label_free_slot(ndd, slot); 1041 dev_dbg(ndd->dev, "free: %d\n", slot); 1042 list_move_tail(&label_ent->list, &list); 1043 label_ent->label = NULL; 1044 } 1045 list_splice_tail_init(&list, &nd_mapping->labels); 1046 1047 if (active == 0) { 1048 nd_mapping_free_labels(nd_mapping); 1049 dev_dbg(ndd->dev, "no more active labels\n"); 1050 } 1051 mutex_unlock(&nd_mapping->lock); 1052 1053 return nd_label_write_index(ndd, ndd->ns_next, 1054 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); 1055 } 1056 1057 int nd_pmem_namespace_label_update(struct nd_region *nd_region, 1058 struct nd_namespace_pmem *nspm, resource_size_t size) 1059 { 1060 int i, rc; 1061 1062 for (i = 0; i < nd_region->ndr_mappings; i++) { 1063 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1064 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1065 struct resource *res; 1066 int count = 0; 1067 1068 if (size == 0) { 1069 rc = del_labels(nd_mapping, nspm->uuid); 1070 if (rc) 1071 return rc; 1072 continue; 1073 } 1074 1075 for_each_dpa_resource(ndd, res) 1076 if (strncmp(res->name, "pmem", 4) == 0) 1077 count++; 1078 WARN_ON_ONCE(!count); 1079 1080 rc = init_labels(nd_mapping, count); 1081 if (rc < 0) 1082 return rc; 1083 1084 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 1085 NSLABEL_FLAG_UPDATING); 1086 if (rc) 1087 return rc; 1088 } 1089 1090 if (size == 0) 1091 return 0; 1092 1093 /* Clear the UPDATING flag per UEFI 2.7 expectations */ 1094 for (i = 0; i < nd_region->ndr_mappings; i++) { 1095 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1096 1097 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); 1098 if (rc) 1099 return rc; 1100 } 1101 1102 return 0; 1103 } 1104 1105 int __init nd_label_init(void) 1106 { 1107 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid)); 1108 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid)); 1109 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid)); 1110 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid)); 1111 1112 WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid)); 1113 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid)); 1114 WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid)); 1115 WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid)); 1116 1117 WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid)); 1118 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid)); 1119 1120 return 0; 1121 } 1122