1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_32_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int skip_name_map(void **p, void *end) 126 { 127 int len; 128 ceph_decode_32_safe(p, end, len ,bad); 129 while (len--) { 130 int strlen; 131 *p += sizeof(u32); 132 ceph_decode_32_safe(p, end, strlen, bad); 133 *p += strlen; 134 } 135 return 0; 136 bad: 137 return -EINVAL; 138 } 139 140 static struct crush_map *crush_decode(void *pbyval, void *end) 141 { 142 struct crush_map *c; 143 int err = -EINVAL; 144 int i, j; 145 void **p = &pbyval; 146 void *start = pbyval; 147 u32 magic; 148 u32 num_name_maps; 149 150 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 151 152 c = kzalloc(sizeof(*c), GFP_NOFS); 153 if (c == NULL) 154 return ERR_PTR(-ENOMEM); 155 156 /* set tunables to default values */ 157 c->choose_local_tries = 2; 158 c->choose_local_fallback_tries = 5; 159 c->choose_total_tries = 19; 160 c->chooseleaf_descend_once = 0; 161 162 ceph_decode_need(p, end, 4*sizeof(u32), bad); 163 magic = ceph_decode_32(p); 164 if (magic != CRUSH_MAGIC) { 165 pr_err("crush_decode magic %x != current %x\n", 166 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 167 goto bad; 168 } 169 c->max_buckets = ceph_decode_32(p); 170 c->max_rules = ceph_decode_32(p); 171 c->max_devices = ceph_decode_32(p); 172 173 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 174 if (c->buckets == NULL) 175 goto badmem; 176 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 177 if (c->rules == NULL) 178 goto badmem; 179 180 /* buckets */ 181 for (i = 0; i < c->max_buckets; i++) { 182 int size = 0; 183 u32 alg; 184 struct crush_bucket *b; 185 186 ceph_decode_32_safe(p, end, alg, bad); 187 if (alg == 0) { 188 c->buckets[i] = NULL; 189 continue; 190 } 191 dout("crush_decode bucket %d off %x %p to %p\n", 192 i, (int)(*p-start), *p, end); 193 194 switch (alg) { 195 case CRUSH_BUCKET_UNIFORM: 196 size = sizeof(struct crush_bucket_uniform); 197 break; 198 case CRUSH_BUCKET_LIST: 199 size = sizeof(struct crush_bucket_list); 200 break; 201 case CRUSH_BUCKET_TREE: 202 size = sizeof(struct crush_bucket_tree); 203 break; 204 case CRUSH_BUCKET_STRAW: 205 size = sizeof(struct crush_bucket_straw); 206 break; 207 default: 208 err = -EINVAL; 209 goto bad; 210 } 211 BUG_ON(size == 0); 212 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 213 if (b == NULL) 214 goto badmem; 215 216 ceph_decode_need(p, end, 4*sizeof(u32), bad); 217 b->id = ceph_decode_32(p); 218 b->type = ceph_decode_16(p); 219 b->alg = ceph_decode_8(p); 220 b->hash = ceph_decode_8(p); 221 b->weight = ceph_decode_32(p); 222 b->size = ceph_decode_32(p); 223 224 dout("crush_decode bucket size %d off %x %p to %p\n", 225 b->size, (int)(*p-start), *p, end); 226 227 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 228 if (b->items == NULL) 229 goto badmem; 230 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 231 if (b->perm == NULL) 232 goto badmem; 233 b->perm_n = 0; 234 235 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 236 for (j = 0; j < b->size; j++) 237 b->items[j] = ceph_decode_32(p); 238 239 switch (b->alg) { 240 case CRUSH_BUCKET_UNIFORM: 241 err = crush_decode_uniform_bucket(p, end, 242 (struct crush_bucket_uniform *)b); 243 if (err < 0) 244 goto bad; 245 break; 246 case CRUSH_BUCKET_LIST: 247 err = crush_decode_list_bucket(p, end, 248 (struct crush_bucket_list *)b); 249 if (err < 0) 250 goto bad; 251 break; 252 case CRUSH_BUCKET_TREE: 253 err = crush_decode_tree_bucket(p, end, 254 (struct crush_bucket_tree *)b); 255 if (err < 0) 256 goto bad; 257 break; 258 case CRUSH_BUCKET_STRAW: 259 err = crush_decode_straw_bucket(p, end, 260 (struct crush_bucket_straw *)b); 261 if (err < 0) 262 goto bad; 263 break; 264 } 265 } 266 267 /* rules */ 268 dout("rule vec is %p\n", c->rules); 269 for (i = 0; i < c->max_rules; i++) { 270 u32 yes; 271 struct crush_rule *r; 272 273 ceph_decode_32_safe(p, end, yes, bad); 274 if (!yes) { 275 dout("crush_decode NO rule %d off %x %p to %p\n", 276 i, (int)(*p-start), *p, end); 277 c->rules[i] = NULL; 278 continue; 279 } 280 281 dout("crush_decode rule %d off %x %p to %p\n", 282 i, (int)(*p-start), *p, end); 283 284 /* len */ 285 ceph_decode_32_safe(p, end, yes, bad); 286 #if BITS_PER_LONG == 32 287 err = -EINVAL; 288 if (yes > (ULONG_MAX - sizeof(*r)) 289 / sizeof(struct crush_rule_step)) 290 goto bad; 291 #endif 292 r = c->rules[i] = kmalloc(sizeof(*r) + 293 yes*sizeof(struct crush_rule_step), 294 GFP_NOFS); 295 if (r == NULL) 296 goto badmem; 297 dout(" rule %d is at %p\n", i, r); 298 r->len = yes; 299 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 300 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 301 for (j = 0; j < r->len; j++) { 302 r->steps[j].op = ceph_decode_32(p); 303 r->steps[j].arg1 = ceph_decode_32(p); 304 r->steps[j].arg2 = ceph_decode_32(p); 305 } 306 } 307 308 /* ignore trailing name maps. */ 309 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) { 310 err = skip_name_map(p, end); 311 if (err < 0) 312 goto done; 313 } 314 315 /* tunables */ 316 ceph_decode_need(p, end, 3*sizeof(u32), done); 317 c->choose_local_tries = ceph_decode_32(p); 318 c->choose_local_fallback_tries = ceph_decode_32(p); 319 c->choose_total_tries = ceph_decode_32(p); 320 dout("crush decode tunable choose_local_tries = %d", 321 c->choose_local_tries); 322 dout("crush decode tunable choose_local_fallback_tries = %d", 323 c->choose_local_fallback_tries); 324 dout("crush decode tunable choose_total_tries = %d", 325 c->choose_total_tries); 326 327 ceph_decode_need(p, end, sizeof(u32), done); 328 c->chooseleaf_descend_once = ceph_decode_32(p); 329 dout("crush decode tunable chooseleaf_descend_once = %d", 330 c->chooseleaf_descend_once); 331 332 done: 333 dout("crush_decode success\n"); 334 return c; 335 336 badmem: 337 err = -ENOMEM; 338 bad: 339 dout("crush_decode fail %d\n", err); 340 crush_destroy(c); 341 return ERR_PTR(err); 342 } 343 344 /* 345 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 346 * to a set of osds) and primary_temp (explicit primary setting) 347 */ 348 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) 349 { 350 if (l.pool < r.pool) 351 return -1; 352 if (l.pool > r.pool) 353 return 1; 354 if (l.seed < r.seed) 355 return -1; 356 if (l.seed > r.seed) 357 return 1; 358 return 0; 359 } 360 361 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 362 struct rb_root *root) 363 { 364 struct rb_node **p = &root->rb_node; 365 struct rb_node *parent = NULL; 366 struct ceph_pg_mapping *pg = NULL; 367 int c; 368 369 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 370 while (*p) { 371 parent = *p; 372 pg = rb_entry(parent, struct ceph_pg_mapping, node); 373 c = pgid_cmp(new->pgid, pg->pgid); 374 if (c < 0) 375 p = &(*p)->rb_left; 376 else if (c > 0) 377 p = &(*p)->rb_right; 378 else 379 return -EEXIST; 380 } 381 382 rb_link_node(&new->node, parent, p); 383 rb_insert_color(&new->node, root); 384 return 0; 385 } 386 387 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 388 struct ceph_pg pgid) 389 { 390 struct rb_node *n = root->rb_node; 391 struct ceph_pg_mapping *pg; 392 int c; 393 394 while (n) { 395 pg = rb_entry(n, struct ceph_pg_mapping, node); 396 c = pgid_cmp(pgid, pg->pgid); 397 if (c < 0) { 398 n = n->rb_left; 399 } else if (c > 0) { 400 n = n->rb_right; 401 } else { 402 dout("__lookup_pg_mapping %lld.%x got %p\n", 403 pgid.pool, pgid.seed, pg); 404 return pg; 405 } 406 } 407 return NULL; 408 } 409 410 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 411 { 412 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 413 414 if (pg) { 415 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, 416 pg); 417 rb_erase(&pg->node, root); 418 kfree(pg); 419 return 0; 420 } 421 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); 422 return -ENOENT; 423 } 424 425 /* 426 * rbtree of pg pool info 427 */ 428 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 429 { 430 struct rb_node **p = &root->rb_node; 431 struct rb_node *parent = NULL; 432 struct ceph_pg_pool_info *pi = NULL; 433 434 while (*p) { 435 parent = *p; 436 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 437 if (new->id < pi->id) 438 p = &(*p)->rb_left; 439 else if (new->id > pi->id) 440 p = &(*p)->rb_right; 441 else 442 return -EEXIST; 443 } 444 445 rb_link_node(&new->node, parent, p); 446 rb_insert_color(&new->node, root); 447 return 0; 448 } 449 450 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 451 { 452 struct ceph_pg_pool_info *pi; 453 struct rb_node *n = root->rb_node; 454 455 while (n) { 456 pi = rb_entry(n, struct ceph_pg_pool_info, node); 457 if (id < pi->id) 458 n = n->rb_left; 459 else if (id > pi->id) 460 n = n->rb_right; 461 else 462 return pi; 463 } 464 return NULL; 465 } 466 467 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 468 { 469 return __lookup_pg_pool(&map->pg_pools, id); 470 } 471 472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 473 { 474 struct ceph_pg_pool_info *pi; 475 476 if (id == CEPH_NOPOOL) 477 return NULL; 478 479 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 480 return NULL; 481 482 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 483 484 return pi ? pi->name : NULL; 485 } 486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 487 488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 489 { 490 struct rb_node *rbp; 491 492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 493 struct ceph_pg_pool_info *pi = 494 rb_entry(rbp, struct ceph_pg_pool_info, node); 495 if (pi->name && strcmp(pi->name, name) == 0) 496 return pi->id; 497 } 498 return -ENOENT; 499 } 500 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 501 502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 503 { 504 rb_erase(&pi->node, root); 505 kfree(pi->name); 506 kfree(pi); 507 } 508 509 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 510 { 511 u8 ev, cv; 512 unsigned len, num; 513 void *pool_end; 514 515 ceph_decode_need(p, end, 2 + 4, bad); 516 ev = ceph_decode_8(p); /* encoding version */ 517 cv = ceph_decode_8(p); /* compat version */ 518 if (ev < 5) { 519 pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 520 return -EINVAL; 521 } 522 if (cv > 9) { 523 pr_warning("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 524 return -EINVAL; 525 } 526 len = ceph_decode_32(p); 527 ceph_decode_need(p, end, len, bad); 528 pool_end = *p + len; 529 530 pi->type = ceph_decode_8(p); 531 pi->size = ceph_decode_8(p); 532 pi->crush_ruleset = ceph_decode_8(p); 533 pi->object_hash = ceph_decode_8(p); 534 535 pi->pg_num = ceph_decode_32(p); 536 pi->pgp_num = ceph_decode_32(p); 537 538 *p += 4 + 4; /* skip lpg* */ 539 *p += 4; /* skip last_change */ 540 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 541 542 /* skip snaps */ 543 num = ceph_decode_32(p); 544 while (num--) { 545 *p += 8; /* snapid key */ 546 *p += 1 + 1; /* versions */ 547 len = ceph_decode_32(p); 548 *p += len; 549 } 550 551 /* skip removed_snaps */ 552 num = ceph_decode_32(p); 553 *p += num * (8 + 8); 554 555 *p += 8; /* skip auid */ 556 pi->flags = ceph_decode_64(p); 557 *p += 4; /* skip crash_replay_interval */ 558 559 if (ev >= 7) 560 *p += 1; /* skip min_size */ 561 562 if (ev >= 8) 563 *p += 8 + 8; /* skip quota_max_* */ 564 565 if (ev >= 9) { 566 /* skip tiers */ 567 num = ceph_decode_32(p); 568 *p += num * 8; 569 570 *p += 8; /* skip tier_of */ 571 *p += 1; /* skip cache_mode */ 572 573 pi->read_tier = ceph_decode_64(p); 574 pi->write_tier = ceph_decode_64(p); 575 } else { 576 pi->read_tier = -1; 577 pi->write_tier = -1; 578 } 579 580 /* ignore the rest */ 581 582 *p = pool_end; 583 calc_pg_masks(pi); 584 return 0; 585 586 bad: 587 return -EINVAL; 588 } 589 590 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 591 { 592 struct ceph_pg_pool_info *pi; 593 u32 num, len; 594 u64 pool; 595 596 ceph_decode_32_safe(p, end, num, bad); 597 dout(" %d pool names\n", num); 598 while (num--) { 599 ceph_decode_64_safe(p, end, pool, bad); 600 ceph_decode_32_safe(p, end, len, bad); 601 dout(" pool %llu len %d\n", pool, len); 602 ceph_decode_need(p, end, len, bad); 603 pi = __lookup_pg_pool(&map->pg_pools, pool); 604 if (pi) { 605 char *name = kstrndup(*p, len, GFP_NOFS); 606 607 if (!name) 608 return -ENOMEM; 609 kfree(pi->name); 610 pi->name = name; 611 dout(" name is %s\n", pi->name); 612 } 613 *p += len; 614 } 615 return 0; 616 617 bad: 618 return -EINVAL; 619 } 620 621 /* 622 * osd map 623 */ 624 void ceph_osdmap_destroy(struct ceph_osdmap *map) 625 { 626 dout("osdmap_destroy %p\n", map); 627 if (map->crush) 628 crush_destroy(map->crush); 629 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 630 struct ceph_pg_mapping *pg = 631 rb_entry(rb_first(&map->pg_temp), 632 struct ceph_pg_mapping, node); 633 rb_erase(&pg->node, &map->pg_temp); 634 kfree(pg); 635 } 636 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 637 struct ceph_pg_mapping *pg = 638 rb_entry(rb_first(&map->primary_temp), 639 struct ceph_pg_mapping, node); 640 rb_erase(&pg->node, &map->primary_temp); 641 kfree(pg); 642 } 643 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 644 struct ceph_pg_pool_info *pi = 645 rb_entry(rb_first(&map->pg_pools), 646 struct ceph_pg_pool_info, node); 647 __remove_pg_pool(&map->pg_pools, pi); 648 } 649 kfree(map->osd_state); 650 kfree(map->osd_weight); 651 kfree(map->osd_addr); 652 kfree(map->osd_primary_affinity); 653 kfree(map); 654 } 655 656 /* 657 * Adjust max_osd value, (re)allocate arrays. 658 * 659 * The new elements are properly initialized. 660 */ 661 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 662 { 663 u8 *state; 664 u32 *weight; 665 struct ceph_entity_addr *addr; 666 int i; 667 668 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 669 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 670 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 671 if (!state || !weight || !addr) { 672 kfree(state); 673 kfree(weight); 674 kfree(addr); 675 676 return -ENOMEM; 677 } 678 679 for (i = map->max_osd; i < max; i++) { 680 state[i] = 0; 681 weight[i] = CEPH_OSD_OUT; 682 memset(addr + i, 0, sizeof(*addr)); 683 } 684 685 map->osd_state = state; 686 map->osd_weight = weight; 687 map->osd_addr = addr; 688 689 if (map->osd_primary_affinity) { 690 u32 *affinity; 691 692 affinity = krealloc(map->osd_primary_affinity, 693 max*sizeof(*affinity), GFP_NOFS); 694 if (!affinity) 695 return -ENOMEM; 696 697 for (i = map->max_osd; i < max; i++) 698 affinity[i] = CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 699 700 map->osd_primary_affinity = affinity; 701 } 702 703 map->max_osd = max; 704 705 return 0; 706 } 707 708 #define OSDMAP_WRAPPER_COMPAT_VER 7 709 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 710 711 /* 712 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 713 * to struct_v of the client_data section for new (v7 and above) 714 * osdmaps. 715 */ 716 static int get_osdmap_client_data_v(void **p, void *end, 717 const char *prefix, u8 *v) 718 { 719 u8 struct_v; 720 721 ceph_decode_8_safe(p, end, struct_v, e_inval); 722 if (struct_v >= 7) { 723 u8 struct_compat; 724 725 ceph_decode_8_safe(p, end, struct_compat, e_inval); 726 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 727 pr_warning("got v %d cv %d > %d of %s ceph_osdmap\n", 728 struct_v, struct_compat, 729 OSDMAP_WRAPPER_COMPAT_VER, prefix); 730 return -EINVAL; 731 } 732 *p += 4; /* ignore wrapper struct_len */ 733 734 ceph_decode_8_safe(p, end, struct_v, e_inval); 735 ceph_decode_8_safe(p, end, struct_compat, e_inval); 736 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 737 pr_warning("got v %d cv %d > %d of %s ceph_osdmap client data\n", 738 struct_v, struct_compat, 739 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 740 return -EINVAL; 741 } 742 *p += 4; /* ignore client data struct_len */ 743 } else { 744 u16 version; 745 746 *p -= 1; 747 ceph_decode_16_safe(p, end, version, e_inval); 748 if (version < 6) { 749 pr_warning("got v %d < 6 of %s ceph_osdmap\n", version, 750 prefix); 751 return -EINVAL; 752 } 753 754 /* old osdmap enconding */ 755 struct_v = 0; 756 } 757 758 *v = struct_v; 759 return 0; 760 761 e_inval: 762 return -EINVAL; 763 } 764 765 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 766 bool incremental) 767 { 768 u32 n; 769 770 ceph_decode_32_safe(p, end, n, e_inval); 771 while (n--) { 772 struct ceph_pg_pool_info *pi; 773 u64 pool; 774 int ret; 775 776 ceph_decode_64_safe(p, end, pool, e_inval); 777 778 pi = __lookup_pg_pool(&map->pg_pools, pool); 779 if (!incremental || !pi) { 780 pi = kzalloc(sizeof(*pi), GFP_NOFS); 781 if (!pi) 782 return -ENOMEM; 783 784 pi->id = pool; 785 786 ret = __insert_pg_pool(&map->pg_pools, pi); 787 if (ret) { 788 kfree(pi); 789 return ret; 790 } 791 } 792 793 ret = decode_pool(p, end, pi); 794 if (ret) 795 return ret; 796 } 797 798 return 0; 799 800 e_inval: 801 return -EINVAL; 802 } 803 804 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 805 { 806 return __decode_pools(p, end, map, false); 807 } 808 809 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 810 { 811 return __decode_pools(p, end, map, true); 812 } 813 814 static int __decode_pg_temp(void **p, void *end, struct ceph_osdmap *map, 815 bool incremental) 816 { 817 u32 n; 818 819 ceph_decode_32_safe(p, end, n, e_inval); 820 while (n--) { 821 struct ceph_pg pgid; 822 u32 len, i; 823 int ret; 824 825 ret = ceph_decode_pgid(p, end, &pgid); 826 if (ret) 827 return ret; 828 829 ceph_decode_32_safe(p, end, len, e_inval); 830 831 ret = __remove_pg_mapping(&map->pg_temp, pgid); 832 BUG_ON(!incremental && ret != -ENOENT); 833 834 if (!incremental || len > 0) { 835 struct ceph_pg_mapping *pg; 836 837 ceph_decode_need(p, end, len*sizeof(u32), e_inval); 838 839 if (len > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 840 return -EINVAL; 841 842 pg = kzalloc(sizeof(*pg) + len*sizeof(u32), GFP_NOFS); 843 if (!pg) 844 return -ENOMEM; 845 846 pg->pgid = pgid; 847 pg->pg_temp.len = len; 848 for (i = 0; i < len; i++) 849 pg->pg_temp.osds[i] = ceph_decode_32(p); 850 851 ret = __insert_pg_mapping(pg, &map->pg_temp); 852 if (ret) { 853 kfree(pg); 854 return ret; 855 } 856 } 857 } 858 859 return 0; 860 861 e_inval: 862 return -EINVAL; 863 } 864 865 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 866 { 867 return __decode_pg_temp(p, end, map, false); 868 } 869 870 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 871 { 872 return __decode_pg_temp(p, end, map, true); 873 } 874 875 static int __decode_primary_temp(void **p, void *end, struct ceph_osdmap *map, 876 bool incremental) 877 { 878 u32 n; 879 880 ceph_decode_32_safe(p, end, n, e_inval); 881 while (n--) { 882 struct ceph_pg pgid; 883 u32 osd; 884 int ret; 885 886 ret = ceph_decode_pgid(p, end, &pgid); 887 if (ret) 888 return ret; 889 890 ceph_decode_32_safe(p, end, osd, e_inval); 891 892 ret = __remove_pg_mapping(&map->primary_temp, pgid); 893 BUG_ON(!incremental && ret != -ENOENT); 894 895 if (!incremental || osd != (u32)-1) { 896 struct ceph_pg_mapping *pg; 897 898 pg = kzalloc(sizeof(*pg), GFP_NOFS); 899 if (!pg) 900 return -ENOMEM; 901 902 pg->pgid = pgid; 903 pg->primary_temp.osd = osd; 904 905 ret = __insert_pg_mapping(pg, &map->primary_temp); 906 if (ret) { 907 kfree(pg); 908 return ret; 909 } 910 } 911 } 912 913 return 0; 914 915 e_inval: 916 return -EINVAL; 917 } 918 919 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 920 { 921 return __decode_primary_temp(p, end, map, false); 922 } 923 924 static int decode_new_primary_temp(void **p, void *end, 925 struct ceph_osdmap *map) 926 { 927 return __decode_primary_temp(p, end, map, true); 928 } 929 930 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 931 { 932 BUG_ON(osd >= map->max_osd); 933 934 if (!map->osd_primary_affinity) 935 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 936 937 return map->osd_primary_affinity[osd]; 938 } 939 940 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 941 { 942 BUG_ON(osd >= map->max_osd); 943 944 if (!map->osd_primary_affinity) { 945 int i; 946 947 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32), 948 GFP_NOFS); 949 if (!map->osd_primary_affinity) 950 return -ENOMEM; 951 952 for (i = 0; i < map->max_osd; i++) 953 map->osd_primary_affinity[i] = 954 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 955 } 956 957 map->osd_primary_affinity[osd] = aff; 958 959 return 0; 960 } 961 962 static int decode_primary_affinity(void **p, void *end, 963 struct ceph_osdmap *map) 964 { 965 u32 len, i; 966 967 ceph_decode_32_safe(p, end, len, e_inval); 968 if (len == 0) { 969 kfree(map->osd_primary_affinity); 970 map->osd_primary_affinity = NULL; 971 return 0; 972 } 973 if (len != map->max_osd) 974 goto e_inval; 975 976 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 977 978 for (i = 0; i < map->max_osd; i++) { 979 int ret; 980 981 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 982 if (ret) 983 return ret; 984 } 985 986 return 0; 987 988 e_inval: 989 return -EINVAL; 990 } 991 992 static int decode_new_primary_affinity(void **p, void *end, 993 struct ceph_osdmap *map) 994 { 995 u32 n; 996 997 ceph_decode_32_safe(p, end, n, e_inval); 998 while (n--) { 999 u32 osd, aff; 1000 int ret; 1001 1002 ceph_decode_32_safe(p, end, osd, e_inval); 1003 ceph_decode_32_safe(p, end, aff, e_inval); 1004 1005 ret = set_primary_affinity(map, osd, aff); 1006 if (ret) 1007 return ret; 1008 1009 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1010 } 1011 1012 return 0; 1013 1014 e_inval: 1015 return -EINVAL; 1016 } 1017 1018 /* 1019 * decode a full map. 1020 */ 1021 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1022 { 1023 u8 struct_v; 1024 u32 epoch = 0; 1025 void *start = *p; 1026 u32 max; 1027 u32 len, i; 1028 int err; 1029 1030 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1031 1032 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1033 if (err) 1034 goto bad; 1035 1036 /* fsid, epoch, created, modified */ 1037 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1038 sizeof(map->created) + sizeof(map->modified), e_inval); 1039 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1040 epoch = map->epoch = ceph_decode_32(p); 1041 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1042 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1043 1044 /* pools */ 1045 err = decode_pools(p, end, map); 1046 if (err) 1047 goto bad; 1048 1049 /* pool_name */ 1050 err = decode_pool_names(p, end, map); 1051 if (err) 1052 goto bad; 1053 1054 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1055 1056 ceph_decode_32_safe(p, end, map->flags, e_inval); 1057 1058 /* max_osd */ 1059 ceph_decode_32_safe(p, end, max, e_inval); 1060 1061 /* (re)alloc osd arrays */ 1062 err = osdmap_set_max_osd(map, max); 1063 if (err) 1064 goto bad; 1065 1066 /* osd_state, osd_weight, osd_addrs->client_addr */ 1067 ceph_decode_need(p, end, 3*sizeof(u32) + 1068 map->max_osd*(1 + sizeof(*map->osd_weight) + 1069 sizeof(*map->osd_addr)), e_inval); 1070 1071 if (ceph_decode_32(p) != map->max_osd) 1072 goto e_inval; 1073 1074 ceph_decode_copy(p, map->osd_state, map->max_osd); 1075 1076 if (ceph_decode_32(p) != map->max_osd) 1077 goto e_inval; 1078 1079 for (i = 0; i < map->max_osd; i++) 1080 map->osd_weight[i] = ceph_decode_32(p); 1081 1082 if (ceph_decode_32(p) != map->max_osd) 1083 goto e_inval; 1084 1085 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1086 for (i = 0; i < map->max_osd; i++) 1087 ceph_decode_addr(&map->osd_addr[i]); 1088 1089 /* pg_temp */ 1090 err = decode_pg_temp(p, end, map); 1091 if (err) 1092 goto bad; 1093 1094 /* primary_temp */ 1095 if (struct_v >= 1) { 1096 err = decode_primary_temp(p, end, map); 1097 if (err) 1098 goto bad; 1099 } 1100 1101 /* primary_affinity */ 1102 if (struct_v >= 2) { 1103 err = decode_primary_affinity(p, end, map); 1104 if (err) 1105 goto bad; 1106 } else { 1107 /* XXX can this happen? */ 1108 kfree(map->osd_primary_affinity); 1109 map->osd_primary_affinity = NULL; 1110 } 1111 1112 /* crush */ 1113 ceph_decode_32_safe(p, end, len, e_inval); 1114 map->crush = crush_decode(*p, min(*p + len, end)); 1115 if (IS_ERR(map->crush)) { 1116 err = PTR_ERR(map->crush); 1117 map->crush = NULL; 1118 goto bad; 1119 } 1120 *p += len; 1121 1122 /* ignore the rest */ 1123 *p = end; 1124 1125 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1126 return 0; 1127 1128 e_inval: 1129 err = -EINVAL; 1130 bad: 1131 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1132 err, epoch, (int)(*p - start), *p, start, end); 1133 print_hex_dump(KERN_DEBUG, "osdmap: ", 1134 DUMP_PREFIX_OFFSET, 16, 1, 1135 start, end - start, true); 1136 return err; 1137 } 1138 1139 /* 1140 * Allocate and decode a full map. 1141 */ 1142 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1143 { 1144 struct ceph_osdmap *map; 1145 int ret; 1146 1147 map = kzalloc(sizeof(*map), GFP_NOFS); 1148 if (!map) 1149 return ERR_PTR(-ENOMEM); 1150 1151 map->pg_temp = RB_ROOT; 1152 map->primary_temp = RB_ROOT; 1153 mutex_init(&map->crush_scratch_mutex); 1154 1155 ret = osdmap_decode(p, end, map); 1156 if (ret) { 1157 ceph_osdmap_destroy(map); 1158 return ERR_PTR(ret); 1159 } 1160 1161 return map; 1162 } 1163 1164 /* 1165 * decode and apply an incremental map update. 1166 */ 1167 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1168 struct ceph_osdmap *map, 1169 struct ceph_messenger *msgr) 1170 { 1171 struct crush_map *newcrush = NULL; 1172 struct ceph_fsid fsid; 1173 u32 epoch = 0; 1174 struct ceph_timespec modified; 1175 s32 len; 1176 u64 pool; 1177 __s64 new_pool_max; 1178 __s32 new_flags, max; 1179 void *start = *p; 1180 int err; 1181 u8 struct_v; 1182 1183 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1184 1185 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1186 if (err) 1187 goto bad; 1188 1189 /* fsid, epoch, modified, new_pool_max, new_flags */ 1190 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1191 sizeof(u64) + sizeof(u32), e_inval); 1192 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1193 epoch = ceph_decode_32(p); 1194 BUG_ON(epoch != map->epoch+1); 1195 ceph_decode_copy(p, &modified, sizeof(modified)); 1196 new_pool_max = ceph_decode_64(p); 1197 new_flags = ceph_decode_32(p); 1198 1199 /* full map? */ 1200 ceph_decode_32_safe(p, end, len, e_inval); 1201 if (len > 0) { 1202 dout("apply_incremental full map len %d, %p to %p\n", 1203 len, *p, end); 1204 return ceph_osdmap_decode(p, min(*p+len, end)); 1205 } 1206 1207 /* new crush? */ 1208 ceph_decode_32_safe(p, end, len, e_inval); 1209 if (len > 0) { 1210 newcrush = crush_decode(*p, min(*p+len, end)); 1211 if (IS_ERR(newcrush)) { 1212 err = PTR_ERR(newcrush); 1213 newcrush = NULL; 1214 goto bad; 1215 } 1216 *p += len; 1217 } 1218 1219 /* new flags? */ 1220 if (new_flags >= 0) 1221 map->flags = new_flags; 1222 if (new_pool_max >= 0) 1223 map->pool_max = new_pool_max; 1224 1225 /* new max? */ 1226 ceph_decode_32_safe(p, end, max, e_inval); 1227 if (max >= 0) { 1228 err = osdmap_set_max_osd(map, max); 1229 if (err) 1230 goto bad; 1231 } 1232 1233 map->epoch++; 1234 map->modified = modified; 1235 if (newcrush) { 1236 if (map->crush) 1237 crush_destroy(map->crush); 1238 map->crush = newcrush; 1239 newcrush = NULL; 1240 } 1241 1242 /* new_pools */ 1243 err = decode_new_pools(p, end, map); 1244 if (err) 1245 goto bad; 1246 1247 /* new_pool_names */ 1248 err = decode_pool_names(p, end, map); 1249 if (err) 1250 goto bad; 1251 1252 /* old_pool */ 1253 ceph_decode_32_safe(p, end, len, e_inval); 1254 while (len--) { 1255 struct ceph_pg_pool_info *pi; 1256 1257 ceph_decode_64_safe(p, end, pool, e_inval); 1258 pi = __lookup_pg_pool(&map->pg_pools, pool); 1259 if (pi) 1260 __remove_pg_pool(&map->pg_pools, pi); 1261 } 1262 1263 /* new_up */ 1264 ceph_decode_32_safe(p, end, len, e_inval); 1265 while (len--) { 1266 u32 osd; 1267 struct ceph_entity_addr addr; 1268 ceph_decode_32_safe(p, end, osd, e_inval); 1269 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval); 1270 ceph_decode_addr(&addr); 1271 pr_info("osd%d up\n", osd); 1272 BUG_ON(osd >= map->max_osd); 1273 map->osd_state[osd] |= CEPH_OSD_UP; 1274 map->osd_addr[osd] = addr; 1275 } 1276 1277 /* new_state */ 1278 ceph_decode_32_safe(p, end, len, e_inval); 1279 while (len--) { 1280 u32 osd; 1281 u8 xorstate; 1282 ceph_decode_32_safe(p, end, osd, e_inval); 1283 xorstate = **(u8 **)p; 1284 (*p)++; /* clean flag */ 1285 if (xorstate == 0) 1286 xorstate = CEPH_OSD_UP; 1287 if (xorstate & CEPH_OSD_UP) 1288 pr_info("osd%d down\n", osd); 1289 if (osd < map->max_osd) 1290 map->osd_state[osd] ^= xorstate; 1291 } 1292 1293 /* new_weight */ 1294 ceph_decode_32_safe(p, end, len, e_inval); 1295 while (len--) { 1296 u32 osd, off; 1297 ceph_decode_need(p, end, sizeof(u32)*2, e_inval); 1298 osd = ceph_decode_32(p); 1299 off = ceph_decode_32(p); 1300 pr_info("osd%d weight 0x%x %s\n", osd, off, 1301 off == CEPH_OSD_IN ? "(in)" : 1302 (off == CEPH_OSD_OUT ? "(out)" : "")); 1303 if (osd < map->max_osd) 1304 map->osd_weight[osd] = off; 1305 } 1306 1307 /* new_pg_temp */ 1308 err = decode_new_pg_temp(p, end, map); 1309 if (err) 1310 goto bad; 1311 1312 /* new_primary_temp */ 1313 if (struct_v >= 1) { 1314 err = decode_new_primary_temp(p, end, map); 1315 if (err) 1316 goto bad; 1317 } 1318 1319 /* new_primary_affinity */ 1320 if (struct_v >= 2) { 1321 err = decode_new_primary_affinity(p, end, map); 1322 if (err) 1323 goto bad; 1324 } 1325 1326 /* ignore the rest */ 1327 *p = end; 1328 1329 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1330 return map; 1331 1332 e_inval: 1333 err = -EINVAL; 1334 bad: 1335 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1336 err, epoch, (int)(*p - start), *p, start, end); 1337 print_hex_dump(KERN_DEBUG, "osdmap: ", 1338 DUMP_PREFIX_OFFSET, 16, 1, 1339 start, end - start, true); 1340 if (newcrush) 1341 crush_destroy(newcrush); 1342 return ERR_PTR(err); 1343 } 1344 1345 1346 1347 1348 /* 1349 * calculate file layout from given offset, length. 1350 * fill in correct oid, logical length, and object extent 1351 * offset, length. 1352 * 1353 * for now, we write only a single su, until we can 1354 * pass a stride back to the caller. 1355 */ 1356 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 1357 u64 off, u64 len, 1358 u64 *ono, 1359 u64 *oxoff, u64 *oxlen) 1360 { 1361 u32 osize = le32_to_cpu(layout->fl_object_size); 1362 u32 su = le32_to_cpu(layout->fl_stripe_unit); 1363 u32 sc = le32_to_cpu(layout->fl_stripe_count); 1364 u32 bl, stripeno, stripepos, objsetno; 1365 u32 su_per_object; 1366 u64 t, su_offset; 1367 1368 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 1369 osize, su); 1370 if (su == 0 || sc == 0) 1371 goto invalid; 1372 su_per_object = osize / su; 1373 if (su_per_object == 0) 1374 goto invalid; 1375 dout("osize %u / su %u = su_per_object %u\n", osize, su, 1376 su_per_object); 1377 1378 if ((su & ~PAGE_MASK) != 0) 1379 goto invalid; 1380 1381 /* bl = *off / su; */ 1382 t = off; 1383 do_div(t, su); 1384 bl = t; 1385 dout("off %llu / su %u = bl %u\n", off, su, bl); 1386 1387 stripeno = bl / sc; 1388 stripepos = bl % sc; 1389 objsetno = stripeno / su_per_object; 1390 1391 *ono = objsetno * sc + stripepos; 1392 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 1393 1394 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 1395 t = off; 1396 su_offset = do_div(t, su); 1397 *oxoff = su_offset + (stripeno % su_per_object) * su; 1398 1399 /* 1400 * Calculate the length of the extent being written to the selected 1401 * object. This is the minimum of the full length requested (len) or 1402 * the remainder of the current stripe being written to. 1403 */ 1404 *oxlen = min_t(u64, len, su - su_offset); 1405 1406 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 1407 return 0; 1408 1409 invalid: 1410 dout(" invalid layout\n"); 1411 *ono = 0; 1412 *oxoff = 0; 1413 *oxlen = 0; 1414 return -EINVAL; 1415 } 1416 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 1417 1418 /* 1419 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be 1420 * called with target's (oloc, oid), since tiering isn't taken into 1421 * account. 1422 */ 1423 int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap, 1424 struct ceph_object_locator *oloc, 1425 struct ceph_object_id *oid, 1426 struct ceph_pg *pg_out) 1427 { 1428 struct ceph_pg_pool_info *pi; 1429 1430 pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool); 1431 if (!pi) 1432 return -EIO; 1433 1434 pg_out->pool = oloc->pool; 1435 pg_out->seed = ceph_str_hash(pi->object_hash, oid->name, 1436 oid->name_len); 1437 1438 dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name, 1439 pg_out->pool, pg_out->seed); 1440 return 0; 1441 } 1442 EXPORT_SYMBOL(ceph_oloc_oid_to_pg); 1443 1444 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 1445 int *result, int result_max, 1446 const __u32 *weight, int weight_max) 1447 { 1448 int r; 1449 1450 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 1451 1452 mutex_lock(&map->crush_scratch_mutex); 1453 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 1454 weight, weight_max, map->crush_scratch_ary); 1455 mutex_unlock(&map->crush_scratch_mutex); 1456 1457 return r; 1458 } 1459 1460 /* 1461 * Calculate raw (crush) set for given pgid. 1462 * 1463 * Return raw set length, or error. 1464 */ 1465 static int pg_to_raw_osds(struct ceph_osdmap *osdmap, 1466 struct ceph_pg_pool_info *pool, 1467 struct ceph_pg pgid, u32 pps, int *osds) 1468 { 1469 int ruleno; 1470 int len; 1471 1472 /* crush */ 1473 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset, 1474 pool->type, pool->size); 1475 if (ruleno < 0) { 1476 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 1477 pgid.pool, pool->crush_ruleset, pool->type, 1478 pool->size); 1479 return -ENOENT; 1480 } 1481 1482 len = do_crush(osdmap, ruleno, pps, osds, 1483 min_t(int, pool->size, CEPH_PG_MAX_SIZE), 1484 osdmap->osd_weight, osdmap->max_osd); 1485 if (len < 0) { 1486 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 1487 len, ruleno, pgid.pool, pool->crush_ruleset, 1488 pool->type, pool->size); 1489 return len; 1490 } 1491 1492 return len; 1493 } 1494 1495 /* 1496 * Given raw set, calculate up set and up primary. 1497 * 1498 * Return up set length. *primary is set to up primary osd id, or -1 1499 * if up set is empty. 1500 */ 1501 static int raw_to_up_osds(struct ceph_osdmap *osdmap, 1502 struct ceph_pg_pool_info *pool, 1503 int *osds, int len, int *primary) 1504 { 1505 int up_primary = -1; 1506 int i; 1507 1508 if (ceph_can_shift_osds(pool)) { 1509 int removed = 0; 1510 1511 for (i = 0; i < len; i++) { 1512 if (ceph_osd_is_down(osdmap, osds[i])) { 1513 removed++; 1514 continue; 1515 } 1516 if (removed) 1517 osds[i - removed] = osds[i]; 1518 } 1519 1520 len -= removed; 1521 if (len > 0) 1522 up_primary = osds[0]; 1523 } else { 1524 for (i = len - 1; i >= 0; i--) { 1525 if (ceph_osd_is_down(osdmap, osds[i])) 1526 osds[i] = CRUSH_ITEM_NONE; 1527 else 1528 up_primary = osds[i]; 1529 } 1530 } 1531 1532 *primary = up_primary; 1533 return len; 1534 } 1535 1536 static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps, 1537 struct ceph_pg_pool_info *pool, 1538 int *osds, int len, int *primary) 1539 { 1540 int i; 1541 int pos = -1; 1542 1543 /* 1544 * Do we have any non-default primary_affinity values for these 1545 * osds? 1546 */ 1547 if (!osdmap->osd_primary_affinity) 1548 return; 1549 1550 for (i = 0; i < len; i++) { 1551 if (osds[i] != CRUSH_ITEM_NONE && 1552 osdmap->osd_primary_affinity[i] != 1553 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 1554 break; 1555 } 1556 } 1557 if (i == len) 1558 return; 1559 1560 /* 1561 * Pick the primary. Feed both the seed (for the pg) and the 1562 * osd into the hash/rng so that a proportional fraction of an 1563 * osd's pgs get rejected as primary. 1564 */ 1565 for (i = 0; i < len; i++) { 1566 int osd; 1567 u32 aff; 1568 1569 osd = osds[i]; 1570 if (osd == CRUSH_ITEM_NONE) 1571 continue; 1572 1573 aff = osdmap->osd_primary_affinity[osd]; 1574 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 1575 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 1576 pps, osd) >> 16) >= aff) { 1577 /* 1578 * We chose not to use this primary. Note it 1579 * anyway as a fallback in case we don't pick 1580 * anyone else, but keep looking. 1581 */ 1582 if (pos < 0) 1583 pos = i; 1584 } else { 1585 pos = i; 1586 break; 1587 } 1588 } 1589 if (pos < 0) 1590 return; 1591 1592 *primary = osds[pos]; 1593 1594 if (ceph_can_shift_osds(pool) && pos > 0) { 1595 /* move the new primary to the front */ 1596 for (i = pos; i > 0; i--) 1597 osds[i] = osds[i - 1]; 1598 osds[0] = *primary; 1599 } 1600 } 1601 1602 /* 1603 * Given up set, apply pg_temp and primary_temp mappings. 1604 * 1605 * Return acting set length. *primary is set to acting primary osd id, 1606 * or -1 if acting set is empty. 1607 */ 1608 static int apply_temps(struct ceph_osdmap *osdmap, 1609 struct ceph_pg_pool_info *pool, struct ceph_pg pgid, 1610 int *osds, int len, int *primary) 1611 { 1612 struct ceph_pg_mapping *pg; 1613 int temp_len; 1614 int temp_primary; 1615 int i; 1616 1617 /* raw_pg -> pg */ 1618 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, 1619 pool->pg_num_mask); 1620 1621 /* pg_temp? */ 1622 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1623 if (pg) { 1624 temp_len = 0; 1625 temp_primary = -1; 1626 1627 for (i = 0; i < pg->pg_temp.len; i++) { 1628 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 1629 if (ceph_can_shift_osds(pool)) 1630 continue; 1631 else 1632 osds[temp_len++] = CRUSH_ITEM_NONE; 1633 } else { 1634 osds[temp_len++] = pg->pg_temp.osds[i]; 1635 } 1636 } 1637 1638 /* apply pg_temp's primary */ 1639 for (i = 0; i < temp_len; i++) { 1640 if (osds[i] != CRUSH_ITEM_NONE) { 1641 temp_primary = osds[i]; 1642 break; 1643 } 1644 } 1645 } else { 1646 temp_len = len; 1647 temp_primary = *primary; 1648 } 1649 1650 /* primary_temp? */ 1651 pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid); 1652 if (pg) 1653 temp_primary = pg->primary_temp.osd; 1654 1655 *primary = temp_primary; 1656 return temp_len; 1657 } 1658 1659 /* 1660 * Calculate acting set for given pgid. 1661 * 1662 * Return acting set length, or error. *primary is set to acting 1663 * primary osd id, or -1 if acting set is empty or on error. 1664 */ 1665 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1666 int *osds, int *primary) 1667 { 1668 struct ceph_pg_pool_info *pool; 1669 u32 pps; 1670 int len; 1671 1672 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool); 1673 if (!pool) { 1674 *primary = -1; 1675 return -ENOENT; 1676 } 1677 1678 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 1679 /* hash pool id and seed so that pool PGs do not overlap */ 1680 pps = crush_hash32_2(CRUSH_HASH_RJENKINS1, 1681 ceph_stable_mod(pgid.seed, pool->pgp_num, 1682 pool->pgp_num_mask), 1683 pgid.pool); 1684 } else { 1685 /* 1686 * legacy behavior: add ps and pool together. this is 1687 * not a great approach because the PGs from each pool 1688 * will overlap on top of each other: 0.5 == 1.4 == 1689 * 2.3 == ... 1690 */ 1691 pps = ceph_stable_mod(pgid.seed, pool->pgp_num, 1692 pool->pgp_num_mask) + 1693 (unsigned)pgid.pool; 1694 } 1695 1696 len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds); 1697 if (len < 0) { 1698 *primary = -1; 1699 return len; 1700 } 1701 1702 len = raw_to_up_osds(osdmap, pool, osds, len, primary); 1703 1704 apply_primary_affinity(osdmap, pps, pool, osds, len, primary); 1705 1706 len = apply_temps(osdmap, pool, pgid, osds, len, primary); 1707 1708 return len; 1709 } 1710 1711 /* 1712 * Return primary osd for given pgid, or -1 if none. 1713 */ 1714 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) 1715 { 1716 int osds[CEPH_PG_MAX_SIZE]; 1717 int primary; 1718 1719 ceph_calc_pg_acting(osdmap, pgid, osds, &primary); 1720 1721 return primary; 1722 } 1723 EXPORT_SYMBOL(ceph_calc_pg_primary); 1724