1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static void crush_finalize(struct crush_map *c) 142 { 143 __s32 b; 144 145 /* Space for the array of pointers to per-bucket workspace */ 146 c->working_size = sizeof(struct crush_work) + 147 c->max_buckets * sizeof(struct crush_work_bucket *); 148 149 for (b = 0; b < c->max_buckets; b++) { 150 if (!c->buckets[b]) 151 continue; 152 153 switch (c->buckets[b]->alg) { 154 default: 155 /* 156 * The base case, permutation variables and 157 * the pointer to the permutation array. 158 */ 159 c->working_size += sizeof(struct crush_work_bucket); 160 break; 161 } 162 /* Every bucket has a permutation array. */ 163 c->working_size += c->buckets[b]->size * sizeof(__u32); 164 } 165 } 166 167 static struct crush_map *crush_decode(void *pbyval, void *end) 168 { 169 struct crush_map *c; 170 int err = -EINVAL; 171 int i, j; 172 void **p = &pbyval; 173 void *start = pbyval; 174 u32 magic; 175 176 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 177 178 c = kzalloc(sizeof(*c), GFP_NOFS); 179 if (c == NULL) 180 return ERR_PTR(-ENOMEM); 181 182 /* set tunables to default values */ 183 c->choose_local_tries = 2; 184 c->choose_local_fallback_tries = 5; 185 c->choose_total_tries = 19; 186 c->chooseleaf_descend_once = 0; 187 188 ceph_decode_need(p, end, 4*sizeof(u32), bad); 189 magic = ceph_decode_32(p); 190 if (magic != CRUSH_MAGIC) { 191 pr_err("crush_decode magic %x != current %x\n", 192 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 193 goto bad; 194 } 195 c->max_buckets = ceph_decode_32(p); 196 c->max_rules = ceph_decode_32(p); 197 c->max_devices = ceph_decode_32(p); 198 199 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 200 if (c->buckets == NULL) 201 goto badmem; 202 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 203 if (c->rules == NULL) 204 goto badmem; 205 206 /* buckets */ 207 for (i = 0; i < c->max_buckets; i++) { 208 int size = 0; 209 u32 alg; 210 struct crush_bucket *b; 211 212 ceph_decode_32_safe(p, end, alg, bad); 213 if (alg == 0) { 214 c->buckets[i] = NULL; 215 continue; 216 } 217 dout("crush_decode bucket %d off %x %p to %p\n", 218 i, (int)(*p-start), *p, end); 219 220 switch (alg) { 221 case CRUSH_BUCKET_UNIFORM: 222 size = sizeof(struct crush_bucket_uniform); 223 break; 224 case CRUSH_BUCKET_LIST: 225 size = sizeof(struct crush_bucket_list); 226 break; 227 case CRUSH_BUCKET_TREE: 228 size = sizeof(struct crush_bucket_tree); 229 break; 230 case CRUSH_BUCKET_STRAW: 231 size = sizeof(struct crush_bucket_straw); 232 break; 233 case CRUSH_BUCKET_STRAW2: 234 size = sizeof(struct crush_bucket_straw2); 235 break; 236 default: 237 err = -EINVAL; 238 goto bad; 239 } 240 BUG_ON(size == 0); 241 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 242 if (b == NULL) 243 goto badmem; 244 245 ceph_decode_need(p, end, 4*sizeof(u32), bad); 246 b->id = ceph_decode_32(p); 247 b->type = ceph_decode_16(p); 248 b->alg = ceph_decode_8(p); 249 b->hash = ceph_decode_8(p); 250 b->weight = ceph_decode_32(p); 251 b->size = ceph_decode_32(p); 252 253 dout("crush_decode bucket size %d off %x %p to %p\n", 254 b->size, (int)(*p-start), *p, end); 255 256 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 257 if (b->items == NULL) 258 goto badmem; 259 260 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 261 for (j = 0; j < b->size; j++) 262 b->items[j] = ceph_decode_32(p); 263 264 switch (b->alg) { 265 case CRUSH_BUCKET_UNIFORM: 266 err = crush_decode_uniform_bucket(p, end, 267 (struct crush_bucket_uniform *)b); 268 if (err < 0) 269 goto bad; 270 break; 271 case CRUSH_BUCKET_LIST: 272 err = crush_decode_list_bucket(p, end, 273 (struct crush_bucket_list *)b); 274 if (err < 0) 275 goto bad; 276 break; 277 case CRUSH_BUCKET_TREE: 278 err = crush_decode_tree_bucket(p, end, 279 (struct crush_bucket_tree *)b); 280 if (err < 0) 281 goto bad; 282 break; 283 case CRUSH_BUCKET_STRAW: 284 err = crush_decode_straw_bucket(p, end, 285 (struct crush_bucket_straw *)b); 286 if (err < 0) 287 goto bad; 288 break; 289 case CRUSH_BUCKET_STRAW2: 290 err = crush_decode_straw2_bucket(p, end, 291 (struct crush_bucket_straw2 *)b); 292 if (err < 0) 293 goto bad; 294 break; 295 } 296 } 297 298 /* rules */ 299 dout("rule vec is %p\n", c->rules); 300 for (i = 0; i < c->max_rules; i++) { 301 u32 yes; 302 struct crush_rule *r; 303 304 err = -EINVAL; 305 ceph_decode_32_safe(p, end, yes, bad); 306 if (!yes) { 307 dout("crush_decode NO rule %d off %x %p to %p\n", 308 i, (int)(*p-start), *p, end); 309 c->rules[i] = NULL; 310 continue; 311 } 312 313 dout("crush_decode rule %d off %x %p to %p\n", 314 i, (int)(*p-start), *p, end); 315 316 /* len */ 317 ceph_decode_32_safe(p, end, yes, bad); 318 #if BITS_PER_LONG == 32 319 err = -EINVAL; 320 if (yes > (ULONG_MAX - sizeof(*r)) 321 / sizeof(struct crush_rule_step)) 322 goto bad; 323 #endif 324 r = c->rules[i] = kmalloc(sizeof(*r) + 325 yes*sizeof(struct crush_rule_step), 326 GFP_NOFS); 327 if (r == NULL) 328 goto badmem; 329 dout(" rule %d is at %p\n", i, r); 330 r->len = yes; 331 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 332 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 333 for (j = 0; j < r->len; j++) { 334 r->steps[j].op = ceph_decode_32(p); 335 r->steps[j].arg1 = ceph_decode_32(p); 336 r->steps[j].arg2 = ceph_decode_32(p); 337 } 338 } 339 340 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 341 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 342 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 343 344 /* tunables */ 345 ceph_decode_need(p, end, 3*sizeof(u32), done); 346 c->choose_local_tries = ceph_decode_32(p); 347 c->choose_local_fallback_tries = ceph_decode_32(p); 348 c->choose_total_tries = ceph_decode_32(p); 349 dout("crush decode tunable choose_local_tries = %d\n", 350 c->choose_local_tries); 351 dout("crush decode tunable choose_local_fallback_tries = %d\n", 352 c->choose_local_fallback_tries); 353 dout("crush decode tunable choose_total_tries = %d\n", 354 c->choose_total_tries); 355 356 ceph_decode_need(p, end, sizeof(u32), done); 357 c->chooseleaf_descend_once = ceph_decode_32(p); 358 dout("crush decode tunable chooseleaf_descend_once = %d\n", 359 c->chooseleaf_descend_once); 360 361 ceph_decode_need(p, end, sizeof(u8), done); 362 c->chooseleaf_vary_r = ceph_decode_8(p); 363 dout("crush decode tunable chooseleaf_vary_r = %d\n", 364 c->chooseleaf_vary_r); 365 366 /* skip straw_calc_version, allowed_bucket_algs */ 367 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 368 *p += sizeof(u8) + sizeof(u32); 369 370 ceph_decode_need(p, end, sizeof(u8), done); 371 c->chooseleaf_stable = ceph_decode_8(p); 372 dout("crush decode tunable chooseleaf_stable = %d\n", 373 c->chooseleaf_stable); 374 375 done: 376 crush_finalize(c); 377 dout("crush_decode success\n"); 378 return c; 379 380 badmem: 381 err = -ENOMEM; 382 bad: 383 dout("crush_decode fail %d\n", err); 384 crush_destroy(c); 385 return ERR_PTR(err); 386 } 387 388 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 389 { 390 if (lhs->pool < rhs->pool) 391 return -1; 392 if (lhs->pool > rhs->pool) 393 return 1; 394 if (lhs->seed < rhs->seed) 395 return -1; 396 if (lhs->seed > rhs->seed) 397 return 1; 398 399 return 0; 400 } 401 402 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 403 { 404 int ret; 405 406 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 407 if (ret) 408 return ret; 409 410 if (lhs->shard < rhs->shard) 411 return -1; 412 if (lhs->shard > rhs->shard) 413 return 1; 414 415 return 0; 416 } 417 418 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 419 { 420 struct ceph_pg_mapping *pg; 421 422 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 423 if (!pg) 424 return NULL; 425 426 RB_CLEAR_NODE(&pg->node); 427 return pg; 428 } 429 430 static void free_pg_mapping(struct ceph_pg_mapping *pg) 431 { 432 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 433 434 kfree(pg); 435 } 436 437 /* 438 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 439 * to a set of osds) and primary_temp (explicit primary setting) 440 */ 441 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 442 RB_BYPTR, const struct ceph_pg *, node) 443 444 /* 445 * rbtree of pg pool info 446 */ 447 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 448 { 449 struct rb_node **p = &root->rb_node; 450 struct rb_node *parent = NULL; 451 struct ceph_pg_pool_info *pi = NULL; 452 453 while (*p) { 454 parent = *p; 455 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 456 if (new->id < pi->id) 457 p = &(*p)->rb_left; 458 else if (new->id > pi->id) 459 p = &(*p)->rb_right; 460 else 461 return -EEXIST; 462 } 463 464 rb_link_node(&new->node, parent, p); 465 rb_insert_color(&new->node, root); 466 return 0; 467 } 468 469 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 470 { 471 struct ceph_pg_pool_info *pi; 472 struct rb_node *n = root->rb_node; 473 474 while (n) { 475 pi = rb_entry(n, struct ceph_pg_pool_info, node); 476 if (id < pi->id) 477 n = n->rb_left; 478 else if (id > pi->id) 479 n = n->rb_right; 480 else 481 return pi; 482 } 483 return NULL; 484 } 485 486 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 487 { 488 return __lookup_pg_pool(&map->pg_pools, id); 489 } 490 491 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 492 { 493 struct ceph_pg_pool_info *pi; 494 495 if (id == CEPH_NOPOOL) 496 return NULL; 497 498 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 499 return NULL; 500 501 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 502 503 return pi ? pi->name : NULL; 504 } 505 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 506 507 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 508 { 509 struct rb_node *rbp; 510 511 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 512 struct ceph_pg_pool_info *pi = 513 rb_entry(rbp, struct ceph_pg_pool_info, node); 514 if (pi->name && strcmp(pi->name, name) == 0) 515 return pi->id; 516 } 517 return -ENOENT; 518 } 519 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 520 521 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 522 { 523 rb_erase(&pi->node, root); 524 kfree(pi->name); 525 kfree(pi); 526 } 527 528 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 529 { 530 u8 ev, cv; 531 unsigned len, num; 532 void *pool_end; 533 534 ceph_decode_need(p, end, 2 + 4, bad); 535 ev = ceph_decode_8(p); /* encoding version */ 536 cv = ceph_decode_8(p); /* compat version */ 537 if (ev < 5) { 538 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 539 return -EINVAL; 540 } 541 if (cv > 9) { 542 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 543 return -EINVAL; 544 } 545 len = ceph_decode_32(p); 546 ceph_decode_need(p, end, len, bad); 547 pool_end = *p + len; 548 549 pi->type = ceph_decode_8(p); 550 pi->size = ceph_decode_8(p); 551 pi->crush_ruleset = ceph_decode_8(p); 552 pi->object_hash = ceph_decode_8(p); 553 554 pi->pg_num = ceph_decode_32(p); 555 pi->pgp_num = ceph_decode_32(p); 556 557 *p += 4 + 4; /* skip lpg* */ 558 *p += 4; /* skip last_change */ 559 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 560 561 /* skip snaps */ 562 num = ceph_decode_32(p); 563 while (num--) { 564 *p += 8; /* snapid key */ 565 *p += 1 + 1; /* versions */ 566 len = ceph_decode_32(p); 567 *p += len; 568 } 569 570 /* skip removed_snaps */ 571 num = ceph_decode_32(p); 572 *p += num * (8 + 8); 573 574 *p += 8; /* skip auid */ 575 pi->flags = ceph_decode_64(p); 576 *p += 4; /* skip crash_replay_interval */ 577 578 if (ev >= 7) 579 pi->min_size = ceph_decode_8(p); 580 else 581 pi->min_size = pi->size - pi->size / 2; 582 583 if (ev >= 8) 584 *p += 8 + 8; /* skip quota_max_* */ 585 586 if (ev >= 9) { 587 /* skip tiers */ 588 num = ceph_decode_32(p); 589 *p += num * 8; 590 591 *p += 8; /* skip tier_of */ 592 *p += 1; /* skip cache_mode */ 593 594 pi->read_tier = ceph_decode_64(p); 595 pi->write_tier = ceph_decode_64(p); 596 } else { 597 pi->read_tier = -1; 598 pi->write_tier = -1; 599 } 600 601 if (ev >= 10) { 602 /* skip properties */ 603 num = ceph_decode_32(p); 604 while (num--) { 605 len = ceph_decode_32(p); 606 *p += len; /* key */ 607 len = ceph_decode_32(p); 608 *p += len; /* val */ 609 } 610 } 611 612 if (ev >= 11) { 613 /* skip hit_set_params */ 614 *p += 1 + 1; /* versions */ 615 len = ceph_decode_32(p); 616 *p += len; 617 618 *p += 4; /* skip hit_set_period */ 619 *p += 4; /* skip hit_set_count */ 620 } 621 622 if (ev >= 12) 623 *p += 4; /* skip stripe_width */ 624 625 if (ev >= 13) { 626 *p += 8; /* skip target_max_bytes */ 627 *p += 8; /* skip target_max_objects */ 628 *p += 4; /* skip cache_target_dirty_ratio_micro */ 629 *p += 4; /* skip cache_target_full_ratio_micro */ 630 *p += 4; /* skip cache_min_flush_age */ 631 *p += 4; /* skip cache_min_evict_age */ 632 } 633 634 if (ev >= 14) { 635 /* skip erasure_code_profile */ 636 len = ceph_decode_32(p); 637 *p += len; 638 } 639 640 /* 641 * last_force_op_resend_preluminous, will be overridden if the 642 * map was encoded with RESEND_ON_SPLIT 643 */ 644 if (ev >= 15) 645 pi->last_force_request_resend = ceph_decode_32(p); 646 else 647 pi->last_force_request_resend = 0; 648 649 if (ev >= 16) 650 *p += 4; /* skip min_read_recency_for_promote */ 651 652 if (ev >= 17) 653 *p += 8; /* skip expected_num_objects */ 654 655 if (ev >= 19) 656 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 657 658 if (ev >= 20) 659 *p += 4; /* skip min_write_recency_for_promote */ 660 661 if (ev >= 21) 662 *p += 1; /* skip use_gmt_hitset */ 663 664 if (ev >= 22) 665 *p += 1; /* skip fast_read */ 666 667 if (ev >= 23) { 668 *p += 4; /* skip hit_set_grade_decay_rate */ 669 *p += 4; /* skip hit_set_search_last_n */ 670 } 671 672 if (ev >= 24) { 673 /* skip opts */ 674 *p += 1 + 1; /* versions */ 675 len = ceph_decode_32(p); 676 *p += len; 677 } 678 679 if (ev >= 25) 680 pi->last_force_request_resend = ceph_decode_32(p); 681 682 /* ignore the rest */ 683 684 *p = pool_end; 685 calc_pg_masks(pi); 686 return 0; 687 688 bad: 689 return -EINVAL; 690 } 691 692 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 693 { 694 struct ceph_pg_pool_info *pi; 695 u32 num, len; 696 u64 pool; 697 698 ceph_decode_32_safe(p, end, num, bad); 699 dout(" %d pool names\n", num); 700 while (num--) { 701 ceph_decode_64_safe(p, end, pool, bad); 702 ceph_decode_32_safe(p, end, len, bad); 703 dout(" pool %llu len %d\n", pool, len); 704 ceph_decode_need(p, end, len, bad); 705 pi = __lookup_pg_pool(&map->pg_pools, pool); 706 if (pi) { 707 char *name = kstrndup(*p, len, GFP_NOFS); 708 709 if (!name) 710 return -ENOMEM; 711 kfree(pi->name); 712 pi->name = name; 713 dout(" name is %s\n", pi->name); 714 } 715 *p += len; 716 } 717 return 0; 718 719 bad: 720 return -EINVAL; 721 } 722 723 /* 724 * osd map 725 */ 726 struct ceph_osdmap *ceph_osdmap_alloc(void) 727 { 728 struct ceph_osdmap *map; 729 730 map = kzalloc(sizeof(*map), GFP_NOIO); 731 if (!map) 732 return NULL; 733 734 map->pg_pools = RB_ROOT; 735 map->pool_max = -1; 736 map->pg_temp = RB_ROOT; 737 map->primary_temp = RB_ROOT; 738 map->pg_upmap = RB_ROOT; 739 map->pg_upmap_items = RB_ROOT; 740 mutex_init(&map->crush_workspace_mutex); 741 742 return map; 743 } 744 745 void ceph_osdmap_destroy(struct ceph_osdmap *map) 746 { 747 dout("osdmap_destroy %p\n", map); 748 if (map->crush) 749 crush_destroy(map->crush); 750 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 751 struct ceph_pg_mapping *pg = 752 rb_entry(rb_first(&map->pg_temp), 753 struct ceph_pg_mapping, node); 754 erase_pg_mapping(&map->pg_temp, pg); 755 free_pg_mapping(pg); 756 } 757 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 758 struct ceph_pg_mapping *pg = 759 rb_entry(rb_first(&map->primary_temp), 760 struct ceph_pg_mapping, node); 761 erase_pg_mapping(&map->primary_temp, pg); 762 free_pg_mapping(pg); 763 } 764 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 765 struct ceph_pg_mapping *pg = 766 rb_entry(rb_first(&map->pg_upmap), 767 struct ceph_pg_mapping, node); 768 rb_erase(&pg->node, &map->pg_upmap); 769 kfree(pg); 770 } 771 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 772 struct ceph_pg_mapping *pg = 773 rb_entry(rb_first(&map->pg_upmap_items), 774 struct ceph_pg_mapping, node); 775 rb_erase(&pg->node, &map->pg_upmap_items); 776 kfree(pg); 777 } 778 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 779 struct ceph_pg_pool_info *pi = 780 rb_entry(rb_first(&map->pg_pools), 781 struct ceph_pg_pool_info, node); 782 __remove_pg_pool(&map->pg_pools, pi); 783 } 784 kfree(map->osd_state); 785 kfree(map->osd_weight); 786 kfree(map->osd_addr); 787 kfree(map->osd_primary_affinity); 788 kfree(map->crush_workspace); 789 kfree(map); 790 } 791 792 /* 793 * Adjust max_osd value, (re)allocate arrays. 794 * 795 * The new elements are properly initialized. 796 */ 797 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 798 { 799 u8 *state; 800 u32 *weight; 801 struct ceph_entity_addr *addr; 802 int i; 803 804 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 805 if (!state) 806 return -ENOMEM; 807 map->osd_state = state; 808 809 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 810 if (!weight) 811 return -ENOMEM; 812 map->osd_weight = weight; 813 814 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 815 if (!addr) 816 return -ENOMEM; 817 map->osd_addr = addr; 818 819 for (i = map->max_osd; i < max; i++) { 820 map->osd_state[i] = 0; 821 map->osd_weight[i] = CEPH_OSD_OUT; 822 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 823 } 824 825 if (map->osd_primary_affinity) { 826 u32 *affinity; 827 828 affinity = krealloc(map->osd_primary_affinity, 829 max*sizeof(*affinity), GFP_NOFS); 830 if (!affinity) 831 return -ENOMEM; 832 map->osd_primary_affinity = affinity; 833 834 for (i = map->max_osd; i < max; i++) 835 map->osd_primary_affinity[i] = 836 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 837 } 838 839 map->max_osd = max; 840 841 return 0; 842 } 843 844 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 845 { 846 void *workspace; 847 size_t work_size; 848 849 if (IS_ERR(crush)) 850 return PTR_ERR(crush); 851 852 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 853 dout("%s work_size %zu bytes\n", __func__, work_size); 854 workspace = kmalloc(work_size, GFP_NOIO); 855 if (!workspace) { 856 crush_destroy(crush); 857 return -ENOMEM; 858 } 859 crush_init_workspace(crush, workspace); 860 861 if (map->crush) 862 crush_destroy(map->crush); 863 kfree(map->crush_workspace); 864 map->crush = crush; 865 map->crush_workspace = workspace; 866 return 0; 867 } 868 869 #define OSDMAP_WRAPPER_COMPAT_VER 7 870 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 871 872 /* 873 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 874 * to struct_v of the client_data section for new (v7 and above) 875 * osdmaps. 876 */ 877 static int get_osdmap_client_data_v(void **p, void *end, 878 const char *prefix, u8 *v) 879 { 880 u8 struct_v; 881 882 ceph_decode_8_safe(p, end, struct_v, e_inval); 883 if (struct_v >= 7) { 884 u8 struct_compat; 885 886 ceph_decode_8_safe(p, end, struct_compat, e_inval); 887 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 888 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 889 struct_v, struct_compat, 890 OSDMAP_WRAPPER_COMPAT_VER, prefix); 891 return -EINVAL; 892 } 893 *p += 4; /* ignore wrapper struct_len */ 894 895 ceph_decode_8_safe(p, end, struct_v, e_inval); 896 ceph_decode_8_safe(p, end, struct_compat, e_inval); 897 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 898 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 899 struct_v, struct_compat, 900 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 901 return -EINVAL; 902 } 903 *p += 4; /* ignore client data struct_len */ 904 } else { 905 u16 version; 906 907 *p -= 1; 908 ceph_decode_16_safe(p, end, version, e_inval); 909 if (version < 6) { 910 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 911 version, prefix); 912 return -EINVAL; 913 } 914 915 /* old osdmap enconding */ 916 struct_v = 0; 917 } 918 919 *v = struct_v; 920 return 0; 921 922 e_inval: 923 return -EINVAL; 924 } 925 926 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 927 bool incremental) 928 { 929 u32 n; 930 931 ceph_decode_32_safe(p, end, n, e_inval); 932 while (n--) { 933 struct ceph_pg_pool_info *pi; 934 u64 pool; 935 int ret; 936 937 ceph_decode_64_safe(p, end, pool, e_inval); 938 939 pi = __lookup_pg_pool(&map->pg_pools, pool); 940 if (!incremental || !pi) { 941 pi = kzalloc(sizeof(*pi), GFP_NOFS); 942 if (!pi) 943 return -ENOMEM; 944 945 pi->id = pool; 946 947 ret = __insert_pg_pool(&map->pg_pools, pi); 948 if (ret) { 949 kfree(pi); 950 return ret; 951 } 952 } 953 954 ret = decode_pool(p, end, pi); 955 if (ret) 956 return ret; 957 } 958 959 return 0; 960 961 e_inval: 962 return -EINVAL; 963 } 964 965 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 966 { 967 return __decode_pools(p, end, map, false); 968 } 969 970 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 971 { 972 return __decode_pools(p, end, map, true); 973 } 974 975 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 976 977 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 978 decode_mapping_fn_t fn, bool incremental) 979 { 980 u32 n; 981 982 WARN_ON(!incremental && !fn); 983 984 ceph_decode_32_safe(p, end, n, e_inval); 985 while (n--) { 986 struct ceph_pg_mapping *pg; 987 struct ceph_pg pgid; 988 int ret; 989 990 ret = ceph_decode_pgid(p, end, &pgid); 991 if (ret) 992 return ret; 993 994 pg = lookup_pg_mapping(mapping_root, &pgid); 995 if (pg) { 996 WARN_ON(!incremental); 997 erase_pg_mapping(mapping_root, pg); 998 free_pg_mapping(pg); 999 } 1000 1001 if (fn) { 1002 pg = fn(p, end, incremental); 1003 if (IS_ERR(pg)) 1004 return PTR_ERR(pg); 1005 1006 if (pg) { 1007 pg->pgid = pgid; /* struct */ 1008 insert_pg_mapping(mapping_root, pg); 1009 } 1010 } 1011 } 1012 1013 return 0; 1014 1015 e_inval: 1016 return -EINVAL; 1017 } 1018 1019 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1020 bool incremental) 1021 { 1022 struct ceph_pg_mapping *pg; 1023 u32 len, i; 1024 1025 ceph_decode_32_safe(p, end, len, e_inval); 1026 if (len == 0 && incremental) 1027 return NULL; /* new_pg_temp: [] to remove */ 1028 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1029 return ERR_PTR(-EINVAL); 1030 1031 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1032 pg = alloc_pg_mapping(len * sizeof(u32)); 1033 if (!pg) 1034 return ERR_PTR(-ENOMEM); 1035 1036 pg->pg_temp.len = len; 1037 for (i = 0; i < len; i++) 1038 pg->pg_temp.osds[i] = ceph_decode_32(p); 1039 1040 return pg; 1041 1042 e_inval: 1043 return ERR_PTR(-EINVAL); 1044 } 1045 1046 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1047 { 1048 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1049 false); 1050 } 1051 1052 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1053 { 1054 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1055 true); 1056 } 1057 1058 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1059 bool incremental) 1060 { 1061 struct ceph_pg_mapping *pg; 1062 u32 osd; 1063 1064 ceph_decode_32_safe(p, end, osd, e_inval); 1065 if (osd == (u32)-1 && incremental) 1066 return NULL; /* new_primary_temp: -1 to remove */ 1067 1068 pg = alloc_pg_mapping(0); 1069 if (!pg) 1070 return ERR_PTR(-ENOMEM); 1071 1072 pg->primary_temp.osd = osd; 1073 return pg; 1074 1075 e_inval: 1076 return ERR_PTR(-EINVAL); 1077 } 1078 1079 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1080 { 1081 return decode_pg_mapping(p, end, &map->primary_temp, 1082 __decode_primary_temp, false); 1083 } 1084 1085 static int decode_new_primary_temp(void **p, void *end, 1086 struct ceph_osdmap *map) 1087 { 1088 return decode_pg_mapping(p, end, &map->primary_temp, 1089 __decode_primary_temp, true); 1090 } 1091 1092 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1093 { 1094 BUG_ON(osd >= map->max_osd); 1095 1096 if (!map->osd_primary_affinity) 1097 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1098 1099 return map->osd_primary_affinity[osd]; 1100 } 1101 1102 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1103 { 1104 BUG_ON(osd >= map->max_osd); 1105 1106 if (!map->osd_primary_affinity) { 1107 int i; 1108 1109 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32), 1110 GFP_NOFS); 1111 if (!map->osd_primary_affinity) 1112 return -ENOMEM; 1113 1114 for (i = 0; i < map->max_osd; i++) 1115 map->osd_primary_affinity[i] = 1116 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1117 } 1118 1119 map->osd_primary_affinity[osd] = aff; 1120 1121 return 0; 1122 } 1123 1124 static int decode_primary_affinity(void **p, void *end, 1125 struct ceph_osdmap *map) 1126 { 1127 u32 len, i; 1128 1129 ceph_decode_32_safe(p, end, len, e_inval); 1130 if (len == 0) { 1131 kfree(map->osd_primary_affinity); 1132 map->osd_primary_affinity = NULL; 1133 return 0; 1134 } 1135 if (len != map->max_osd) 1136 goto e_inval; 1137 1138 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1139 1140 for (i = 0; i < map->max_osd; i++) { 1141 int ret; 1142 1143 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 return 0; 1149 1150 e_inval: 1151 return -EINVAL; 1152 } 1153 1154 static int decode_new_primary_affinity(void **p, void *end, 1155 struct ceph_osdmap *map) 1156 { 1157 u32 n; 1158 1159 ceph_decode_32_safe(p, end, n, e_inval); 1160 while (n--) { 1161 u32 osd, aff; 1162 int ret; 1163 1164 ceph_decode_32_safe(p, end, osd, e_inval); 1165 ceph_decode_32_safe(p, end, aff, e_inval); 1166 1167 ret = set_primary_affinity(map, osd, aff); 1168 if (ret) 1169 return ret; 1170 1171 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1172 } 1173 1174 return 0; 1175 1176 e_inval: 1177 return -EINVAL; 1178 } 1179 1180 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1181 bool __unused) 1182 { 1183 return __decode_pg_temp(p, end, false); 1184 } 1185 1186 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1187 { 1188 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1189 false); 1190 } 1191 1192 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1193 { 1194 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1195 true); 1196 } 1197 1198 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1199 { 1200 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1201 } 1202 1203 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1204 bool __unused) 1205 { 1206 struct ceph_pg_mapping *pg; 1207 u32 len, i; 1208 1209 ceph_decode_32_safe(p, end, len, e_inval); 1210 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1211 return ERR_PTR(-EINVAL); 1212 1213 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1214 pg = kzalloc(sizeof(*pg) + 2 * len * sizeof(u32), GFP_NOIO); 1215 if (!pg) 1216 return ERR_PTR(-ENOMEM); 1217 1218 pg->pg_upmap_items.len = len; 1219 for (i = 0; i < len; i++) { 1220 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1221 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1222 } 1223 1224 return pg; 1225 1226 e_inval: 1227 return ERR_PTR(-EINVAL); 1228 } 1229 1230 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1231 { 1232 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1233 __decode_pg_upmap_items, false); 1234 } 1235 1236 static int decode_new_pg_upmap_items(void **p, void *end, 1237 struct ceph_osdmap *map) 1238 { 1239 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1240 __decode_pg_upmap_items, true); 1241 } 1242 1243 static int decode_old_pg_upmap_items(void **p, void *end, 1244 struct ceph_osdmap *map) 1245 { 1246 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1247 } 1248 1249 /* 1250 * decode a full map. 1251 */ 1252 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1253 { 1254 u8 struct_v; 1255 u32 epoch = 0; 1256 void *start = *p; 1257 u32 max; 1258 u32 len, i; 1259 int err; 1260 1261 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1262 1263 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1264 if (err) 1265 goto bad; 1266 1267 /* fsid, epoch, created, modified */ 1268 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1269 sizeof(map->created) + sizeof(map->modified), e_inval); 1270 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1271 epoch = map->epoch = ceph_decode_32(p); 1272 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1273 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1274 1275 /* pools */ 1276 err = decode_pools(p, end, map); 1277 if (err) 1278 goto bad; 1279 1280 /* pool_name */ 1281 err = decode_pool_names(p, end, map); 1282 if (err) 1283 goto bad; 1284 1285 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1286 1287 ceph_decode_32_safe(p, end, map->flags, e_inval); 1288 1289 /* max_osd */ 1290 ceph_decode_32_safe(p, end, max, e_inval); 1291 1292 /* (re)alloc osd arrays */ 1293 err = osdmap_set_max_osd(map, max); 1294 if (err) 1295 goto bad; 1296 1297 /* osd_state, osd_weight, osd_addrs->client_addr */ 1298 ceph_decode_need(p, end, 3*sizeof(u32) + 1299 map->max_osd*(1 + sizeof(*map->osd_weight) + 1300 sizeof(*map->osd_addr)), e_inval); 1301 1302 if (ceph_decode_32(p) != map->max_osd) 1303 goto e_inval; 1304 1305 ceph_decode_copy(p, map->osd_state, map->max_osd); 1306 1307 if (ceph_decode_32(p) != map->max_osd) 1308 goto e_inval; 1309 1310 for (i = 0; i < map->max_osd; i++) 1311 map->osd_weight[i] = ceph_decode_32(p); 1312 1313 if (ceph_decode_32(p) != map->max_osd) 1314 goto e_inval; 1315 1316 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1317 for (i = 0; i < map->max_osd; i++) 1318 ceph_decode_addr(&map->osd_addr[i]); 1319 1320 /* pg_temp */ 1321 err = decode_pg_temp(p, end, map); 1322 if (err) 1323 goto bad; 1324 1325 /* primary_temp */ 1326 if (struct_v >= 1) { 1327 err = decode_primary_temp(p, end, map); 1328 if (err) 1329 goto bad; 1330 } 1331 1332 /* primary_affinity */ 1333 if (struct_v >= 2) { 1334 err = decode_primary_affinity(p, end, map); 1335 if (err) 1336 goto bad; 1337 } else { 1338 WARN_ON(map->osd_primary_affinity); 1339 } 1340 1341 /* crush */ 1342 ceph_decode_32_safe(p, end, len, e_inval); 1343 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1344 if (err) 1345 goto bad; 1346 1347 *p += len; 1348 if (struct_v >= 3) { 1349 /* erasure_code_profiles */ 1350 ceph_decode_skip_map_of_map(p, end, string, string, string, 1351 bad); 1352 } 1353 1354 if (struct_v >= 4) { 1355 err = decode_pg_upmap(p, end, map); 1356 if (err) 1357 goto bad; 1358 1359 err = decode_pg_upmap_items(p, end, map); 1360 if (err) 1361 goto bad; 1362 } else { 1363 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1364 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1365 } 1366 1367 /* ignore the rest */ 1368 *p = end; 1369 1370 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1371 return 0; 1372 1373 e_inval: 1374 err = -EINVAL; 1375 bad: 1376 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1377 err, epoch, (int)(*p - start), *p, start, end); 1378 print_hex_dump(KERN_DEBUG, "osdmap: ", 1379 DUMP_PREFIX_OFFSET, 16, 1, 1380 start, end - start, true); 1381 return err; 1382 } 1383 1384 /* 1385 * Allocate and decode a full map. 1386 */ 1387 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1388 { 1389 struct ceph_osdmap *map; 1390 int ret; 1391 1392 map = ceph_osdmap_alloc(); 1393 if (!map) 1394 return ERR_PTR(-ENOMEM); 1395 1396 ret = osdmap_decode(p, end, map); 1397 if (ret) { 1398 ceph_osdmap_destroy(map); 1399 return ERR_PTR(ret); 1400 } 1401 1402 return map; 1403 } 1404 1405 /* 1406 * Encoding order is (new_up_client, new_state, new_weight). Need to 1407 * apply in the (new_weight, new_state, new_up_client) order, because 1408 * an incremental map may look like e.g. 1409 * 1410 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1411 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1412 */ 1413 static int decode_new_up_state_weight(void **p, void *end, 1414 struct ceph_osdmap *map) 1415 { 1416 void *new_up_client; 1417 void *new_state; 1418 void *new_weight_end; 1419 u32 len; 1420 1421 new_up_client = *p; 1422 ceph_decode_32_safe(p, end, len, e_inval); 1423 len *= sizeof(u32) + sizeof(struct ceph_entity_addr); 1424 ceph_decode_need(p, end, len, e_inval); 1425 *p += len; 1426 1427 new_state = *p; 1428 ceph_decode_32_safe(p, end, len, e_inval); 1429 len *= sizeof(u32) + sizeof(u8); 1430 ceph_decode_need(p, end, len, e_inval); 1431 *p += len; 1432 1433 /* new_weight */ 1434 ceph_decode_32_safe(p, end, len, e_inval); 1435 while (len--) { 1436 s32 osd; 1437 u32 w; 1438 1439 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1440 osd = ceph_decode_32(p); 1441 w = ceph_decode_32(p); 1442 BUG_ON(osd >= map->max_osd); 1443 pr_info("osd%d weight 0x%x %s\n", osd, w, 1444 w == CEPH_OSD_IN ? "(in)" : 1445 (w == CEPH_OSD_OUT ? "(out)" : "")); 1446 map->osd_weight[osd] = w; 1447 1448 /* 1449 * If we are marking in, set the EXISTS, and clear the 1450 * AUTOOUT and NEW bits. 1451 */ 1452 if (w) { 1453 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1454 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1455 CEPH_OSD_NEW); 1456 } 1457 } 1458 new_weight_end = *p; 1459 1460 /* new_state (up/down) */ 1461 *p = new_state; 1462 len = ceph_decode_32(p); 1463 while (len--) { 1464 s32 osd; 1465 u8 xorstate; 1466 int ret; 1467 1468 osd = ceph_decode_32(p); 1469 xorstate = ceph_decode_8(p); 1470 if (xorstate == 0) 1471 xorstate = CEPH_OSD_UP; 1472 BUG_ON(osd >= map->max_osd); 1473 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1474 (xorstate & CEPH_OSD_UP)) 1475 pr_info("osd%d down\n", osd); 1476 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1477 (xorstate & CEPH_OSD_EXISTS)) { 1478 pr_info("osd%d does not exist\n", osd); 1479 ret = set_primary_affinity(map, osd, 1480 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1481 if (ret) 1482 return ret; 1483 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1484 map->osd_state[osd] = 0; 1485 } else { 1486 map->osd_state[osd] ^= xorstate; 1487 } 1488 } 1489 1490 /* new_up_client */ 1491 *p = new_up_client; 1492 len = ceph_decode_32(p); 1493 while (len--) { 1494 s32 osd; 1495 struct ceph_entity_addr addr; 1496 1497 osd = ceph_decode_32(p); 1498 ceph_decode_copy(p, &addr, sizeof(addr)); 1499 ceph_decode_addr(&addr); 1500 BUG_ON(osd >= map->max_osd); 1501 pr_info("osd%d up\n", osd); 1502 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1503 map->osd_addr[osd] = addr; 1504 } 1505 1506 *p = new_weight_end; 1507 return 0; 1508 1509 e_inval: 1510 return -EINVAL; 1511 } 1512 1513 /* 1514 * decode and apply an incremental map update. 1515 */ 1516 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1517 struct ceph_osdmap *map) 1518 { 1519 struct ceph_fsid fsid; 1520 u32 epoch = 0; 1521 struct ceph_timespec modified; 1522 s32 len; 1523 u64 pool; 1524 __s64 new_pool_max; 1525 __s32 new_flags, max; 1526 void *start = *p; 1527 int err; 1528 u8 struct_v; 1529 1530 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1531 1532 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1533 if (err) 1534 goto bad; 1535 1536 /* fsid, epoch, modified, new_pool_max, new_flags */ 1537 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1538 sizeof(u64) + sizeof(u32), e_inval); 1539 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1540 epoch = ceph_decode_32(p); 1541 BUG_ON(epoch != map->epoch+1); 1542 ceph_decode_copy(p, &modified, sizeof(modified)); 1543 new_pool_max = ceph_decode_64(p); 1544 new_flags = ceph_decode_32(p); 1545 1546 /* full map? */ 1547 ceph_decode_32_safe(p, end, len, e_inval); 1548 if (len > 0) { 1549 dout("apply_incremental full map len %d, %p to %p\n", 1550 len, *p, end); 1551 return ceph_osdmap_decode(p, min(*p+len, end)); 1552 } 1553 1554 /* new crush? */ 1555 ceph_decode_32_safe(p, end, len, e_inval); 1556 if (len > 0) { 1557 err = osdmap_set_crush(map, 1558 crush_decode(*p, min(*p + len, end))); 1559 if (err) 1560 goto bad; 1561 *p += len; 1562 } 1563 1564 /* new flags? */ 1565 if (new_flags >= 0) 1566 map->flags = new_flags; 1567 if (new_pool_max >= 0) 1568 map->pool_max = new_pool_max; 1569 1570 /* new max? */ 1571 ceph_decode_32_safe(p, end, max, e_inval); 1572 if (max >= 0) { 1573 err = osdmap_set_max_osd(map, max); 1574 if (err) 1575 goto bad; 1576 } 1577 1578 map->epoch++; 1579 map->modified = modified; 1580 1581 /* new_pools */ 1582 err = decode_new_pools(p, end, map); 1583 if (err) 1584 goto bad; 1585 1586 /* new_pool_names */ 1587 err = decode_pool_names(p, end, map); 1588 if (err) 1589 goto bad; 1590 1591 /* old_pool */ 1592 ceph_decode_32_safe(p, end, len, e_inval); 1593 while (len--) { 1594 struct ceph_pg_pool_info *pi; 1595 1596 ceph_decode_64_safe(p, end, pool, e_inval); 1597 pi = __lookup_pg_pool(&map->pg_pools, pool); 1598 if (pi) 1599 __remove_pg_pool(&map->pg_pools, pi); 1600 } 1601 1602 /* new_up_client, new_state, new_weight */ 1603 err = decode_new_up_state_weight(p, end, map); 1604 if (err) 1605 goto bad; 1606 1607 /* new_pg_temp */ 1608 err = decode_new_pg_temp(p, end, map); 1609 if (err) 1610 goto bad; 1611 1612 /* new_primary_temp */ 1613 if (struct_v >= 1) { 1614 err = decode_new_primary_temp(p, end, map); 1615 if (err) 1616 goto bad; 1617 } 1618 1619 /* new_primary_affinity */ 1620 if (struct_v >= 2) { 1621 err = decode_new_primary_affinity(p, end, map); 1622 if (err) 1623 goto bad; 1624 } 1625 1626 if (struct_v >= 3) { 1627 /* new_erasure_code_profiles */ 1628 ceph_decode_skip_map_of_map(p, end, string, string, string, 1629 bad); 1630 /* old_erasure_code_profiles */ 1631 ceph_decode_skip_set(p, end, string, bad); 1632 } 1633 1634 if (struct_v >= 4) { 1635 err = decode_new_pg_upmap(p, end, map); 1636 if (err) 1637 goto bad; 1638 1639 err = decode_old_pg_upmap(p, end, map); 1640 if (err) 1641 goto bad; 1642 1643 err = decode_new_pg_upmap_items(p, end, map); 1644 if (err) 1645 goto bad; 1646 1647 err = decode_old_pg_upmap_items(p, end, map); 1648 if (err) 1649 goto bad; 1650 } 1651 1652 /* ignore the rest */ 1653 *p = end; 1654 1655 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1656 return map; 1657 1658 e_inval: 1659 err = -EINVAL; 1660 bad: 1661 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1662 err, epoch, (int)(*p - start), *p, start, end); 1663 print_hex_dump(KERN_DEBUG, "osdmap: ", 1664 DUMP_PREFIX_OFFSET, 16, 1, 1665 start, end - start, true); 1666 return ERR_PTR(err); 1667 } 1668 1669 void ceph_oloc_copy(struct ceph_object_locator *dest, 1670 const struct ceph_object_locator *src) 1671 { 1672 ceph_oloc_destroy(dest); 1673 1674 dest->pool = src->pool; 1675 if (src->pool_ns) 1676 dest->pool_ns = ceph_get_string(src->pool_ns); 1677 else 1678 dest->pool_ns = NULL; 1679 } 1680 EXPORT_SYMBOL(ceph_oloc_copy); 1681 1682 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1683 { 1684 ceph_put_string(oloc->pool_ns); 1685 } 1686 EXPORT_SYMBOL(ceph_oloc_destroy); 1687 1688 void ceph_oid_copy(struct ceph_object_id *dest, 1689 const struct ceph_object_id *src) 1690 { 1691 ceph_oid_destroy(dest); 1692 1693 if (src->name != src->inline_name) { 1694 /* very rare, see ceph_object_id definition */ 1695 dest->name = kmalloc(src->name_len + 1, 1696 GFP_NOIO | __GFP_NOFAIL); 1697 } else { 1698 dest->name = dest->inline_name; 1699 } 1700 memcpy(dest->name, src->name, src->name_len + 1); 1701 dest->name_len = src->name_len; 1702 } 1703 EXPORT_SYMBOL(ceph_oid_copy); 1704 1705 static __printf(2, 0) 1706 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1707 { 1708 int len; 1709 1710 WARN_ON(!ceph_oid_empty(oid)); 1711 1712 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1713 if (len >= sizeof(oid->inline_name)) 1714 return len; 1715 1716 oid->name_len = len; 1717 return 0; 1718 } 1719 1720 /* 1721 * If oid doesn't fit into inline buffer, BUG. 1722 */ 1723 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1724 { 1725 va_list ap; 1726 1727 va_start(ap, fmt); 1728 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1729 va_end(ap); 1730 } 1731 EXPORT_SYMBOL(ceph_oid_printf); 1732 1733 static __printf(3, 0) 1734 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1735 const char *fmt, va_list ap) 1736 { 1737 va_list aq; 1738 int len; 1739 1740 va_copy(aq, ap); 1741 len = oid_printf_vargs(oid, fmt, aq); 1742 va_end(aq); 1743 1744 if (len) { 1745 char *external_name; 1746 1747 external_name = kmalloc(len + 1, gfp); 1748 if (!external_name) 1749 return -ENOMEM; 1750 1751 oid->name = external_name; 1752 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1753 oid->name_len = len; 1754 } 1755 1756 return 0; 1757 } 1758 1759 /* 1760 * If oid doesn't fit into inline buffer, allocate. 1761 */ 1762 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1763 const char *fmt, ...) 1764 { 1765 va_list ap; 1766 int ret; 1767 1768 va_start(ap, fmt); 1769 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1770 va_end(ap); 1771 1772 return ret; 1773 } 1774 EXPORT_SYMBOL(ceph_oid_aprintf); 1775 1776 void ceph_oid_destroy(struct ceph_object_id *oid) 1777 { 1778 if (oid->name != oid->inline_name) 1779 kfree(oid->name); 1780 } 1781 EXPORT_SYMBOL(ceph_oid_destroy); 1782 1783 /* 1784 * osds only 1785 */ 1786 static bool __osds_equal(const struct ceph_osds *lhs, 1787 const struct ceph_osds *rhs) 1788 { 1789 if (lhs->size == rhs->size && 1790 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 1791 return true; 1792 1793 return false; 1794 } 1795 1796 /* 1797 * osds + primary 1798 */ 1799 static bool osds_equal(const struct ceph_osds *lhs, 1800 const struct ceph_osds *rhs) 1801 { 1802 if (__osds_equal(lhs, rhs) && 1803 lhs->primary == rhs->primary) 1804 return true; 1805 1806 return false; 1807 } 1808 1809 static bool osds_valid(const struct ceph_osds *set) 1810 { 1811 /* non-empty set */ 1812 if (set->size > 0 && set->primary >= 0) 1813 return true; 1814 1815 /* empty can_shift_osds set */ 1816 if (!set->size && set->primary == -1) 1817 return true; 1818 1819 /* empty !can_shift_osds set - all NONE */ 1820 if (set->size > 0 && set->primary == -1) { 1821 int i; 1822 1823 for (i = 0; i < set->size; i++) { 1824 if (set->osds[i] != CRUSH_ITEM_NONE) 1825 break; 1826 } 1827 if (i == set->size) 1828 return true; 1829 } 1830 1831 return false; 1832 } 1833 1834 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 1835 { 1836 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 1837 dest->size = src->size; 1838 dest->primary = src->primary; 1839 } 1840 1841 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 1842 u32 new_pg_num) 1843 { 1844 int old_bits = calc_bits_of(old_pg_num); 1845 int old_mask = (1 << old_bits) - 1; 1846 int n; 1847 1848 WARN_ON(pgid->seed >= old_pg_num); 1849 if (new_pg_num <= old_pg_num) 1850 return false; 1851 1852 for (n = 1; ; n++) { 1853 int next_bit = n << (old_bits - 1); 1854 u32 s = next_bit | pgid->seed; 1855 1856 if (s < old_pg_num || s == pgid->seed) 1857 continue; 1858 if (s >= new_pg_num) 1859 break; 1860 1861 s = ceph_stable_mod(s, old_pg_num, old_mask); 1862 if (s == pgid->seed) 1863 return true; 1864 } 1865 1866 return false; 1867 } 1868 1869 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 1870 const struct ceph_osds *new_acting, 1871 const struct ceph_osds *old_up, 1872 const struct ceph_osds *new_up, 1873 int old_size, 1874 int new_size, 1875 int old_min_size, 1876 int new_min_size, 1877 u32 old_pg_num, 1878 u32 new_pg_num, 1879 bool old_sort_bitwise, 1880 bool new_sort_bitwise, 1881 const struct ceph_pg *pgid) 1882 { 1883 return !osds_equal(old_acting, new_acting) || 1884 !osds_equal(old_up, new_up) || 1885 old_size != new_size || 1886 old_min_size != new_min_size || 1887 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 1888 old_sort_bitwise != new_sort_bitwise; 1889 } 1890 1891 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 1892 { 1893 int i; 1894 1895 for (i = 0; i < acting->size; i++) { 1896 if (acting->osds[i] == osd) 1897 return i; 1898 } 1899 1900 return -1; 1901 } 1902 1903 static bool primary_changed(const struct ceph_osds *old_acting, 1904 const struct ceph_osds *new_acting) 1905 { 1906 if (!old_acting->size && !new_acting->size) 1907 return false; /* both still empty */ 1908 1909 if (!old_acting->size ^ !new_acting->size) 1910 return true; /* was empty, now not, or vice versa */ 1911 1912 if (old_acting->primary != new_acting->primary) 1913 return true; /* primary changed */ 1914 1915 if (calc_pg_rank(old_acting->primary, old_acting) != 1916 calc_pg_rank(new_acting->primary, new_acting)) 1917 return true; 1918 1919 return false; /* same primary (tho replicas may have changed) */ 1920 } 1921 1922 bool ceph_osds_changed(const struct ceph_osds *old_acting, 1923 const struct ceph_osds *new_acting, 1924 bool any_change) 1925 { 1926 if (primary_changed(old_acting, new_acting)) 1927 return true; 1928 1929 if (any_change && !__osds_equal(old_acting, new_acting)) 1930 return true; 1931 1932 return false; 1933 } 1934 1935 /* 1936 * calculate file layout from given offset, length. 1937 * fill in correct oid, logical length, and object extent 1938 * offset, length. 1939 * 1940 * for now, we write only a single su, until we can 1941 * pass a stride back to the caller. 1942 */ 1943 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 1944 u64 off, u64 len, 1945 u64 *ono, 1946 u64 *oxoff, u64 *oxlen) 1947 { 1948 u32 osize = layout->object_size; 1949 u32 su = layout->stripe_unit; 1950 u32 sc = layout->stripe_count; 1951 u32 bl, stripeno, stripepos, objsetno; 1952 u32 su_per_object; 1953 u64 t, su_offset; 1954 1955 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 1956 osize, su); 1957 if (su == 0 || sc == 0) 1958 goto invalid; 1959 su_per_object = osize / su; 1960 if (su_per_object == 0) 1961 goto invalid; 1962 dout("osize %u / su %u = su_per_object %u\n", osize, su, 1963 su_per_object); 1964 1965 if ((su & ~PAGE_MASK) != 0) 1966 goto invalid; 1967 1968 /* bl = *off / su; */ 1969 t = off; 1970 do_div(t, su); 1971 bl = t; 1972 dout("off %llu / su %u = bl %u\n", off, su, bl); 1973 1974 stripeno = bl / sc; 1975 stripepos = bl % sc; 1976 objsetno = stripeno / su_per_object; 1977 1978 *ono = objsetno * sc + stripepos; 1979 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 1980 1981 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 1982 t = off; 1983 su_offset = do_div(t, su); 1984 *oxoff = su_offset + (stripeno % su_per_object) * su; 1985 1986 /* 1987 * Calculate the length of the extent being written to the selected 1988 * object. This is the minimum of the full length requested (len) or 1989 * the remainder of the current stripe being written to. 1990 */ 1991 *oxlen = min_t(u64, len, su - su_offset); 1992 1993 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 1994 return 0; 1995 1996 invalid: 1997 dout(" invalid layout\n"); 1998 *ono = 0; 1999 *oxoff = 0; 2000 *oxlen = 0; 2001 return -EINVAL; 2002 } 2003 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 2004 2005 /* 2006 * Map an object into a PG. 2007 * 2008 * Should only be called with target_oid and target_oloc (as opposed to 2009 * base_oid and base_oloc), since tiering isn't taken into account. 2010 */ 2011 int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2012 const struct ceph_object_id *oid, 2013 const struct ceph_object_locator *oloc, 2014 struct ceph_pg *raw_pgid) 2015 { 2016 WARN_ON(pi->id != oloc->pool); 2017 2018 if (!oloc->pool_ns) { 2019 raw_pgid->pool = oloc->pool; 2020 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2021 oid->name_len); 2022 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2023 raw_pgid->pool, raw_pgid->seed); 2024 } else { 2025 char stack_buf[256]; 2026 char *buf = stack_buf; 2027 int nsl = oloc->pool_ns->len; 2028 size_t total = nsl + 1 + oid->name_len; 2029 2030 if (total > sizeof(stack_buf)) { 2031 buf = kmalloc(total, GFP_NOIO); 2032 if (!buf) 2033 return -ENOMEM; 2034 } 2035 memcpy(buf, oloc->pool_ns->str, nsl); 2036 buf[nsl] = '\037'; 2037 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2038 raw_pgid->pool = oloc->pool; 2039 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2040 if (buf != stack_buf) 2041 kfree(buf); 2042 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2043 oid->name, nsl, oloc->pool_ns->str, 2044 raw_pgid->pool, raw_pgid->seed); 2045 } 2046 return 0; 2047 } 2048 2049 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2050 const struct ceph_object_id *oid, 2051 const struct ceph_object_locator *oloc, 2052 struct ceph_pg *raw_pgid) 2053 { 2054 struct ceph_pg_pool_info *pi; 2055 2056 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2057 if (!pi) 2058 return -ENOENT; 2059 2060 return __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2061 } 2062 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2063 2064 /* 2065 * Map a raw PG (full precision ps) into an actual PG. 2066 */ 2067 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2068 const struct ceph_pg *raw_pgid, 2069 struct ceph_pg *pgid) 2070 { 2071 pgid->pool = raw_pgid->pool; 2072 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2073 pi->pg_num_mask); 2074 } 2075 2076 /* 2077 * Map a raw PG (full precision ps) into a placement ps (placement 2078 * seed). Include pool id in that value so that different pools don't 2079 * use the same seeds. 2080 */ 2081 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2082 const struct ceph_pg *raw_pgid) 2083 { 2084 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2085 /* hash pool id and seed so that pool PGs do not overlap */ 2086 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2087 ceph_stable_mod(raw_pgid->seed, 2088 pi->pgp_num, 2089 pi->pgp_num_mask), 2090 raw_pgid->pool); 2091 } else { 2092 /* 2093 * legacy behavior: add ps and pool together. this is 2094 * not a great approach because the PGs from each pool 2095 * will overlap on top of each other: 0.5 == 1.4 == 2096 * 2.3 == ... 2097 */ 2098 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2099 pi->pgp_num_mask) + 2100 (unsigned)raw_pgid->pool; 2101 } 2102 } 2103 2104 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2105 int *result, int result_max, 2106 const __u32 *weight, int weight_max) 2107 { 2108 int r; 2109 2110 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2111 2112 mutex_lock(&map->crush_workspace_mutex); 2113 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2114 weight, weight_max, map->crush_workspace, NULL); 2115 mutex_unlock(&map->crush_workspace_mutex); 2116 2117 return r; 2118 } 2119 2120 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2121 struct ceph_pg_pool_info *pi, 2122 struct ceph_osds *set) 2123 { 2124 int i; 2125 2126 if (ceph_can_shift_osds(pi)) { 2127 int removed = 0; 2128 2129 /* shift left */ 2130 for (i = 0; i < set->size; i++) { 2131 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2132 removed++; 2133 continue; 2134 } 2135 if (removed) 2136 set->osds[i - removed] = set->osds[i]; 2137 } 2138 set->size -= removed; 2139 } else { 2140 /* set dne devices to NONE */ 2141 for (i = 0; i < set->size; i++) { 2142 if (!ceph_osd_exists(osdmap, set->osds[i])) 2143 set->osds[i] = CRUSH_ITEM_NONE; 2144 } 2145 } 2146 } 2147 2148 /* 2149 * Calculate raw set (CRUSH output) for given PG and filter out 2150 * nonexistent OSDs. ->primary is undefined for a raw set. 2151 * 2152 * Placement seed (CRUSH input) is returned through @ppps. 2153 */ 2154 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2155 struct ceph_pg_pool_info *pi, 2156 const struct ceph_pg *raw_pgid, 2157 struct ceph_osds *raw, 2158 u32 *ppps) 2159 { 2160 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2161 int ruleno; 2162 int len; 2163 2164 ceph_osds_init(raw); 2165 if (ppps) 2166 *ppps = pps; 2167 2168 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2169 pi->size); 2170 if (ruleno < 0) { 2171 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2172 pi->id, pi->crush_ruleset, pi->type, pi->size); 2173 return; 2174 } 2175 2176 if (pi->size > ARRAY_SIZE(raw->osds)) { 2177 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2178 pi->id, pi->crush_ruleset, pi->type, pi->size, 2179 ARRAY_SIZE(raw->osds)); 2180 return; 2181 } 2182 2183 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2184 osdmap->osd_weight, osdmap->max_osd); 2185 if (len < 0) { 2186 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2187 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2188 pi->size); 2189 return; 2190 } 2191 2192 raw->size = len; 2193 remove_nonexistent_osds(osdmap, pi, raw); 2194 } 2195 2196 /* apply pg_upmap[_items] mappings */ 2197 static void apply_upmap(struct ceph_osdmap *osdmap, 2198 const struct ceph_pg *pgid, 2199 struct ceph_osds *raw) 2200 { 2201 struct ceph_pg_mapping *pg; 2202 int i, j; 2203 2204 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2205 if (pg) { 2206 /* make sure targets aren't marked out */ 2207 for (i = 0; i < pg->pg_upmap.len; i++) { 2208 int osd = pg->pg_upmap.osds[i]; 2209 2210 if (osd != CRUSH_ITEM_NONE && 2211 osd < osdmap->max_osd && 2212 osdmap->osd_weight[osd] == 0) { 2213 /* reject/ignore explicit mapping */ 2214 return; 2215 } 2216 } 2217 for (i = 0; i < pg->pg_upmap.len; i++) 2218 raw->osds[i] = pg->pg_upmap.osds[i]; 2219 raw->size = pg->pg_upmap.len; 2220 return; 2221 } 2222 2223 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2224 if (pg) { 2225 /* 2226 * Note: this approach does not allow a bidirectional swap, 2227 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2228 */ 2229 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2230 int from = pg->pg_upmap_items.from_to[i][0]; 2231 int to = pg->pg_upmap_items.from_to[i][1]; 2232 int pos = -1; 2233 bool exists = false; 2234 2235 /* make sure replacement doesn't already appear */ 2236 for (j = 0; j < raw->size; j++) { 2237 int osd = raw->osds[j]; 2238 2239 if (osd == to) { 2240 exists = true; 2241 break; 2242 } 2243 /* ignore mapping if target is marked out */ 2244 if (osd == from && pos < 0 && 2245 !(to != CRUSH_ITEM_NONE && 2246 to < osdmap->max_osd && 2247 osdmap->osd_weight[to] == 0)) { 2248 pos = j; 2249 } 2250 } 2251 if (!exists && pos >= 0) { 2252 raw->osds[pos] = to; 2253 return; 2254 } 2255 } 2256 } 2257 } 2258 2259 /* 2260 * Given raw set, calculate up set and up primary. By definition of an 2261 * up set, the result won't contain nonexistent or down OSDs. 2262 * 2263 * This is done in-place - on return @set is the up set. If it's 2264 * empty, ->primary will remain undefined. 2265 */ 2266 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2267 struct ceph_pg_pool_info *pi, 2268 struct ceph_osds *set) 2269 { 2270 int i; 2271 2272 /* ->primary is undefined for a raw set */ 2273 BUG_ON(set->primary != -1); 2274 2275 if (ceph_can_shift_osds(pi)) { 2276 int removed = 0; 2277 2278 /* shift left */ 2279 for (i = 0; i < set->size; i++) { 2280 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2281 removed++; 2282 continue; 2283 } 2284 if (removed) 2285 set->osds[i - removed] = set->osds[i]; 2286 } 2287 set->size -= removed; 2288 if (set->size > 0) 2289 set->primary = set->osds[0]; 2290 } else { 2291 /* set down/dne devices to NONE */ 2292 for (i = set->size - 1; i >= 0; i--) { 2293 if (ceph_osd_is_down(osdmap, set->osds[i])) 2294 set->osds[i] = CRUSH_ITEM_NONE; 2295 else 2296 set->primary = set->osds[i]; 2297 } 2298 } 2299 } 2300 2301 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2302 struct ceph_pg_pool_info *pi, 2303 u32 pps, 2304 struct ceph_osds *up) 2305 { 2306 int i; 2307 int pos = -1; 2308 2309 /* 2310 * Do we have any non-default primary_affinity values for these 2311 * osds? 2312 */ 2313 if (!osdmap->osd_primary_affinity) 2314 return; 2315 2316 for (i = 0; i < up->size; i++) { 2317 int osd = up->osds[i]; 2318 2319 if (osd != CRUSH_ITEM_NONE && 2320 osdmap->osd_primary_affinity[osd] != 2321 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2322 break; 2323 } 2324 } 2325 if (i == up->size) 2326 return; 2327 2328 /* 2329 * Pick the primary. Feed both the seed (for the pg) and the 2330 * osd into the hash/rng so that a proportional fraction of an 2331 * osd's pgs get rejected as primary. 2332 */ 2333 for (i = 0; i < up->size; i++) { 2334 int osd = up->osds[i]; 2335 u32 aff; 2336 2337 if (osd == CRUSH_ITEM_NONE) 2338 continue; 2339 2340 aff = osdmap->osd_primary_affinity[osd]; 2341 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2342 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2343 pps, osd) >> 16) >= aff) { 2344 /* 2345 * We chose not to use this primary. Note it 2346 * anyway as a fallback in case we don't pick 2347 * anyone else, but keep looking. 2348 */ 2349 if (pos < 0) 2350 pos = i; 2351 } else { 2352 pos = i; 2353 break; 2354 } 2355 } 2356 if (pos < 0) 2357 return; 2358 2359 up->primary = up->osds[pos]; 2360 2361 if (ceph_can_shift_osds(pi) && pos > 0) { 2362 /* move the new primary to the front */ 2363 for (i = pos; i > 0; i--) 2364 up->osds[i] = up->osds[i - 1]; 2365 up->osds[0] = up->primary; 2366 } 2367 } 2368 2369 /* 2370 * Get pg_temp and primary_temp mappings for given PG. 2371 * 2372 * Note that a PG may have none, only pg_temp, only primary_temp or 2373 * both pg_temp and primary_temp mappings. This means @temp isn't 2374 * always a valid OSD set on return: in the "only primary_temp" case, 2375 * @temp will have its ->primary >= 0 but ->size == 0. 2376 */ 2377 static void get_temp_osds(struct ceph_osdmap *osdmap, 2378 struct ceph_pg_pool_info *pi, 2379 const struct ceph_pg *pgid, 2380 struct ceph_osds *temp) 2381 { 2382 struct ceph_pg_mapping *pg; 2383 int i; 2384 2385 ceph_osds_init(temp); 2386 2387 /* pg_temp? */ 2388 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2389 if (pg) { 2390 for (i = 0; i < pg->pg_temp.len; i++) { 2391 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2392 if (ceph_can_shift_osds(pi)) 2393 continue; 2394 2395 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2396 } else { 2397 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2398 } 2399 } 2400 2401 /* apply pg_temp's primary */ 2402 for (i = 0; i < temp->size; i++) { 2403 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2404 temp->primary = temp->osds[i]; 2405 break; 2406 } 2407 } 2408 } 2409 2410 /* primary_temp? */ 2411 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2412 if (pg) 2413 temp->primary = pg->primary_temp.osd; 2414 } 2415 2416 /* 2417 * Map a PG to its acting set as well as its up set. 2418 * 2419 * Acting set is used for data mapping purposes, while up set can be 2420 * recorded for detecting interval changes and deciding whether to 2421 * resend a request. 2422 */ 2423 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2424 struct ceph_pg_pool_info *pi, 2425 const struct ceph_pg *raw_pgid, 2426 struct ceph_osds *up, 2427 struct ceph_osds *acting) 2428 { 2429 struct ceph_pg pgid; 2430 u32 pps; 2431 2432 WARN_ON(pi->id != raw_pgid->pool); 2433 raw_pg_to_pg(pi, raw_pgid, &pgid); 2434 2435 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2436 apply_upmap(osdmap, &pgid, up); 2437 raw_to_up_osds(osdmap, pi, up); 2438 apply_primary_affinity(osdmap, pi, pps, up); 2439 get_temp_osds(osdmap, pi, &pgid, acting); 2440 if (!acting->size) { 2441 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2442 acting->size = up->size; 2443 if (acting->primary == -1) 2444 acting->primary = up->primary; 2445 } 2446 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2447 } 2448 2449 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2450 struct ceph_pg_pool_info *pi, 2451 const struct ceph_pg *raw_pgid, 2452 struct ceph_spg *spgid) 2453 { 2454 struct ceph_pg pgid; 2455 struct ceph_osds up, acting; 2456 int i; 2457 2458 WARN_ON(pi->id != raw_pgid->pool); 2459 raw_pg_to_pg(pi, raw_pgid, &pgid); 2460 2461 if (ceph_can_shift_osds(pi)) { 2462 spgid->pgid = pgid; /* struct */ 2463 spgid->shard = CEPH_SPG_NOSHARD; 2464 return true; 2465 } 2466 2467 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2468 for (i = 0; i < acting.size; i++) { 2469 if (acting.osds[i] == acting.primary) { 2470 spgid->pgid = pgid; /* struct */ 2471 spgid->shard = i; 2472 return true; 2473 } 2474 } 2475 2476 return false; 2477 } 2478 2479 /* 2480 * Return acting primary for given PG, or -1 if none. 2481 */ 2482 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2483 const struct ceph_pg *raw_pgid) 2484 { 2485 struct ceph_pg_pool_info *pi; 2486 struct ceph_osds up, acting; 2487 2488 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2489 if (!pi) 2490 return -1; 2491 2492 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2493 return acting.primary; 2494 } 2495 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2496