1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 int flag = 0; 17 18 if (!len) 19 goto done; 20 21 *str = '\0'; 22 if (state) { 23 if (state & CEPH_OSD_EXISTS) { 24 snprintf(str, len, "exists"); 25 flag = 1; 26 } 27 if (state & CEPH_OSD_UP) { 28 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""), 29 "up"); 30 flag = 1; 31 } 32 } else { 33 snprintf(str, len, "doesn't exist"); 34 } 35 done: 36 return str; 37 } 38 39 /* maps */ 40 41 static int calc_bits_of(unsigned int t) 42 { 43 int b = 0; 44 while (t) { 45 t = t >> 1; 46 b++; 47 } 48 return b; 49 } 50 51 /* 52 * the foo_mask is the smallest value 2^n-1 that is >= foo. 53 */ 54 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 55 { 56 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1; 57 pi->pgp_num_mask = 58 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1; 59 pi->lpg_num_mask = 60 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1; 61 pi->lpgp_num_mask = 62 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1; 63 } 64 65 /* 66 * decode crush map 67 */ 68 static int crush_decode_uniform_bucket(void **p, void *end, 69 struct crush_bucket_uniform *b) 70 { 71 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 72 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 73 b->item_weight = ceph_decode_32(p); 74 return 0; 75 bad: 76 return -EINVAL; 77 } 78 79 static int crush_decode_list_bucket(void **p, void *end, 80 struct crush_bucket_list *b) 81 { 82 int j; 83 dout("crush_decode_list_bucket %p to %p\n", *p, end); 84 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 85 if (b->item_weights == NULL) 86 return -ENOMEM; 87 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 88 if (b->sum_weights == NULL) 89 return -ENOMEM; 90 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 91 for (j = 0; j < b->h.size; j++) { 92 b->item_weights[j] = ceph_decode_32(p); 93 b->sum_weights[j] = ceph_decode_32(p); 94 } 95 return 0; 96 bad: 97 return -EINVAL; 98 } 99 100 static int crush_decode_tree_bucket(void **p, void *end, 101 struct crush_bucket_tree *b) 102 { 103 int j; 104 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 105 ceph_decode_32_safe(p, end, b->num_nodes, bad); 106 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 107 if (b->node_weights == NULL) 108 return -ENOMEM; 109 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 110 for (j = 0; j < b->num_nodes; j++) 111 b->node_weights[j] = ceph_decode_32(p); 112 return 0; 113 bad: 114 return -EINVAL; 115 } 116 117 static int crush_decode_straw_bucket(void **p, void *end, 118 struct crush_bucket_straw *b) 119 { 120 int j; 121 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 122 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 123 if (b->item_weights == NULL) 124 return -ENOMEM; 125 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 126 if (b->straws == NULL) 127 return -ENOMEM; 128 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 129 for (j = 0; j < b->h.size; j++) { 130 b->item_weights[j] = ceph_decode_32(p); 131 b->straws[j] = ceph_decode_32(p); 132 } 133 return 0; 134 bad: 135 return -EINVAL; 136 } 137 138 static int skip_name_map(void **p, void *end) 139 { 140 int len; 141 ceph_decode_32_safe(p, end, len ,bad); 142 while (len--) { 143 int strlen; 144 *p += sizeof(u32); 145 ceph_decode_32_safe(p, end, strlen, bad); 146 *p += strlen; 147 } 148 return 0; 149 bad: 150 return -EINVAL; 151 } 152 153 static struct crush_map *crush_decode(void *pbyval, void *end) 154 { 155 struct crush_map *c; 156 int err = -EINVAL; 157 int i, j; 158 void **p = &pbyval; 159 void *start = pbyval; 160 u32 magic; 161 u32 num_name_maps; 162 163 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 164 165 c = kzalloc(sizeof(*c), GFP_NOFS); 166 if (c == NULL) 167 return ERR_PTR(-ENOMEM); 168 169 /* set tunables to default values */ 170 c->choose_local_tries = 2; 171 c->choose_local_fallback_tries = 5; 172 c->choose_total_tries = 19; 173 174 ceph_decode_need(p, end, 4*sizeof(u32), bad); 175 magic = ceph_decode_32(p); 176 if (magic != CRUSH_MAGIC) { 177 pr_err("crush_decode magic %x != current %x\n", 178 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 179 goto bad; 180 } 181 c->max_buckets = ceph_decode_32(p); 182 c->max_rules = ceph_decode_32(p); 183 c->max_devices = ceph_decode_32(p); 184 185 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 186 if (c->buckets == NULL) 187 goto badmem; 188 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 189 if (c->rules == NULL) 190 goto badmem; 191 192 /* buckets */ 193 for (i = 0; i < c->max_buckets; i++) { 194 int size = 0; 195 u32 alg; 196 struct crush_bucket *b; 197 198 ceph_decode_32_safe(p, end, alg, bad); 199 if (alg == 0) { 200 c->buckets[i] = NULL; 201 continue; 202 } 203 dout("crush_decode bucket %d off %x %p to %p\n", 204 i, (int)(*p-start), *p, end); 205 206 switch (alg) { 207 case CRUSH_BUCKET_UNIFORM: 208 size = sizeof(struct crush_bucket_uniform); 209 break; 210 case CRUSH_BUCKET_LIST: 211 size = sizeof(struct crush_bucket_list); 212 break; 213 case CRUSH_BUCKET_TREE: 214 size = sizeof(struct crush_bucket_tree); 215 break; 216 case CRUSH_BUCKET_STRAW: 217 size = sizeof(struct crush_bucket_straw); 218 break; 219 default: 220 err = -EINVAL; 221 goto bad; 222 } 223 BUG_ON(size == 0); 224 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 225 if (b == NULL) 226 goto badmem; 227 228 ceph_decode_need(p, end, 4*sizeof(u32), bad); 229 b->id = ceph_decode_32(p); 230 b->type = ceph_decode_16(p); 231 b->alg = ceph_decode_8(p); 232 b->hash = ceph_decode_8(p); 233 b->weight = ceph_decode_32(p); 234 b->size = ceph_decode_32(p); 235 236 dout("crush_decode bucket size %d off %x %p to %p\n", 237 b->size, (int)(*p-start), *p, end); 238 239 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 240 if (b->items == NULL) 241 goto badmem; 242 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 243 if (b->perm == NULL) 244 goto badmem; 245 b->perm_n = 0; 246 247 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 248 for (j = 0; j < b->size; j++) 249 b->items[j] = ceph_decode_32(p); 250 251 switch (b->alg) { 252 case CRUSH_BUCKET_UNIFORM: 253 err = crush_decode_uniform_bucket(p, end, 254 (struct crush_bucket_uniform *)b); 255 if (err < 0) 256 goto bad; 257 break; 258 case CRUSH_BUCKET_LIST: 259 err = crush_decode_list_bucket(p, end, 260 (struct crush_bucket_list *)b); 261 if (err < 0) 262 goto bad; 263 break; 264 case CRUSH_BUCKET_TREE: 265 err = crush_decode_tree_bucket(p, end, 266 (struct crush_bucket_tree *)b); 267 if (err < 0) 268 goto bad; 269 break; 270 case CRUSH_BUCKET_STRAW: 271 err = crush_decode_straw_bucket(p, end, 272 (struct crush_bucket_straw *)b); 273 if (err < 0) 274 goto bad; 275 break; 276 } 277 } 278 279 /* rules */ 280 dout("rule vec is %p\n", c->rules); 281 for (i = 0; i < c->max_rules; i++) { 282 u32 yes; 283 struct crush_rule *r; 284 285 ceph_decode_32_safe(p, end, yes, bad); 286 if (!yes) { 287 dout("crush_decode NO rule %d off %x %p to %p\n", 288 i, (int)(*p-start), *p, end); 289 c->rules[i] = NULL; 290 continue; 291 } 292 293 dout("crush_decode rule %d off %x %p to %p\n", 294 i, (int)(*p-start), *p, end); 295 296 /* len */ 297 ceph_decode_32_safe(p, end, yes, bad); 298 #if BITS_PER_LONG == 32 299 err = -EINVAL; 300 if (yes > (ULONG_MAX - sizeof(*r)) 301 / sizeof(struct crush_rule_step)) 302 goto bad; 303 #endif 304 r = c->rules[i] = kmalloc(sizeof(*r) + 305 yes*sizeof(struct crush_rule_step), 306 GFP_NOFS); 307 if (r == NULL) 308 goto badmem; 309 dout(" rule %d is at %p\n", i, r); 310 r->len = yes; 311 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 312 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 313 for (j = 0; j < r->len; j++) { 314 r->steps[j].op = ceph_decode_32(p); 315 r->steps[j].arg1 = ceph_decode_32(p); 316 r->steps[j].arg2 = ceph_decode_32(p); 317 } 318 } 319 320 /* ignore trailing name maps. */ 321 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) { 322 err = skip_name_map(p, end); 323 if (err < 0) 324 goto done; 325 } 326 327 /* tunables */ 328 ceph_decode_need(p, end, 3*sizeof(u32), done); 329 c->choose_local_tries = ceph_decode_32(p); 330 c->choose_local_fallback_tries = ceph_decode_32(p); 331 c->choose_total_tries = ceph_decode_32(p); 332 dout("crush decode tunable choose_local_tries = %d", 333 c->choose_local_tries); 334 dout("crush decode tunable choose_local_fallback_tries = %d", 335 c->choose_local_fallback_tries); 336 dout("crush decode tunable choose_total_tries = %d", 337 c->choose_total_tries); 338 339 done: 340 dout("crush_decode success\n"); 341 return c; 342 343 badmem: 344 err = -ENOMEM; 345 bad: 346 dout("crush_decode fail %d\n", err); 347 crush_destroy(c); 348 return ERR_PTR(err); 349 } 350 351 /* 352 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 353 * to a set of osds) 354 */ 355 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) 356 { 357 u64 a = *(u64 *)&l; 358 u64 b = *(u64 *)&r; 359 360 if (a < b) 361 return -1; 362 if (a > b) 363 return 1; 364 return 0; 365 } 366 367 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 368 struct rb_root *root) 369 { 370 struct rb_node **p = &root->rb_node; 371 struct rb_node *parent = NULL; 372 struct ceph_pg_mapping *pg = NULL; 373 int c; 374 375 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 376 while (*p) { 377 parent = *p; 378 pg = rb_entry(parent, struct ceph_pg_mapping, node); 379 c = pgid_cmp(new->pgid, pg->pgid); 380 if (c < 0) 381 p = &(*p)->rb_left; 382 else if (c > 0) 383 p = &(*p)->rb_right; 384 else 385 return -EEXIST; 386 } 387 388 rb_link_node(&new->node, parent, p); 389 rb_insert_color(&new->node, root); 390 return 0; 391 } 392 393 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 394 struct ceph_pg pgid) 395 { 396 struct rb_node *n = root->rb_node; 397 struct ceph_pg_mapping *pg; 398 int c; 399 400 while (n) { 401 pg = rb_entry(n, struct ceph_pg_mapping, node); 402 c = pgid_cmp(pgid, pg->pgid); 403 if (c < 0) { 404 n = n->rb_left; 405 } else if (c > 0) { 406 n = n->rb_right; 407 } else { 408 dout("__lookup_pg_mapping %llx got %p\n", 409 *(u64 *)&pgid, pg); 410 return pg; 411 } 412 } 413 return NULL; 414 } 415 416 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 417 { 418 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 419 420 if (pg) { 421 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); 422 rb_erase(&pg->node, root); 423 kfree(pg); 424 return 0; 425 } 426 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); 427 return -ENOENT; 428 } 429 430 /* 431 * rbtree of pg pool info 432 */ 433 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 434 { 435 struct rb_node **p = &root->rb_node; 436 struct rb_node *parent = NULL; 437 struct ceph_pg_pool_info *pi = NULL; 438 439 while (*p) { 440 parent = *p; 441 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 442 if (new->id < pi->id) 443 p = &(*p)->rb_left; 444 else if (new->id > pi->id) 445 p = &(*p)->rb_right; 446 else 447 return -EEXIST; 448 } 449 450 rb_link_node(&new->node, parent, p); 451 rb_insert_color(&new->node, root); 452 return 0; 453 } 454 455 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) 456 { 457 struct ceph_pg_pool_info *pi; 458 struct rb_node *n = root->rb_node; 459 460 while (n) { 461 pi = rb_entry(n, struct ceph_pg_pool_info, node); 462 if (id < pi->id) 463 n = n->rb_left; 464 else if (id > pi->id) 465 n = n->rb_right; 466 else 467 return pi; 468 } 469 return NULL; 470 } 471 472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 473 { 474 struct ceph_pg_pool_info *pi; 475 476 if (id == CEPH_NOPOOL) 477 return NULL; 478 479 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 480 return NULL; 481 482 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 483 484 return pi ? pi->name : NULL; 485 } 486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 487 488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 489 { 490 struct rb_node *rbp; 491 492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 493 struct ceph_pg_pool_info *pi = 494 rb_entry(rbp, struct ceph_pg_pool_info, node); 495 if (pi->name && strcmp(pi->name, name) == 0) 496 return pi->id; 497 } 498 return -ENOENT; 499 } 500 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 501 502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 503 { 504 rb_erase(&pi->node, root); 505 kfree(pi->name); 506 kfree(pi); 507 } 508 509 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 510 { 511 unsigned int n, m; 512 513 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 514 calc_pg_masks(pi); 515 516 /* num_snaps * snap_info_t */ 517 n = le32_to_cpu(pi->v.num_snaps); 518 while (n--) { 519 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) + 520 sizeof(struct ceph_timespec), bad); 521 *p += sizeof(u64) + /* key */ 522 1 + sizeof(u64) + /* u8, snapid */ 523 sizeof(struct ceph_timespec); 524 m = ceph_decode_32(p); /* snap name */ 525 *p += m; 526 } 527 528 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; 529 return 0; 530 531 bad: 532 return -EINVAL; 533 } 534 535 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 536 { 537 struct ceph_pg_pool_info *pi; 538 u32 num, len, pool; 539 540 ceph_decode_32_safe(p, end, num, bad); 541 dout(" %d pool names\n", num); 542 while (num--) { 543 ceph_decode_32_safe(p, end, pool, bad); 544 ceph_decode_32_safe(p, end, len, bad); 545 dout(" pool %d len %d\n", pool, len); 546 ceph_decode_need(p, end, len, bad); 547 pi = __lookup_pg_pool(&map->pg_pools, pool); 548 if (pi) { 549 char *name = kstrndup(*p, len, GFP_NOFS); 550 551 if (!name) 552 return -ENOMEM; 553 kfree(pi->name); 554 pi->name = name; 555 dout(" name is %s\n", pi->name); 556 } 557 *p += len; 558 } 559 return 0; 560 561 bad: 562 return -EINVAL; 563 } 564 565 /* 566 * osd map 567 */ 568 void ceph_osdmap_destroy(struct ceph_osdmap *map) 569 { 570 dout("osdmap_destroy %p\n", map); 571 if (map->crush) 572 crush_destroy(map->crush); 573 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 574 struct ceph_pg_mapping *pg = 575 rb_entry(rb_first(&map->pg_temp), 576 struct ceph_pg_mapping, node); 577 rb_erase(&pg->node, &map->pg_temp); 578 kfree(pg); 579 } 580 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 581 struct ceph_pg_pool_info *pi = 582 rb_entry(rb_first(&map->pg_pools), 583 struct ceph_pg_pool_info, node); 584 __remove_pg_pool(&map->pg_pools, pi); 585 } 586 kfree(map->osd_state); 587 kfree(map->osd_weight); 588 kfree(map->osd_addr); 589 kfree(map); 590 } 591 592 /* 593 * adjust max osd value. reallocate arrays. 594 */ 595 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 596 { 597 u8 *state; 598 struct ceph_entity_addr *addr; 599 u32 *weight; 600 601 state = kcalloc(max, sizeof(*state), GFP_NOFS); 602 addr = kcalloc(max, sizeof(*addr), GFP_NOFS); 603 weight = kcalloc(max, sizeof(*weight), GFP_NOFS); 604 if (state == NULL || addr == NULL || weight == NULL) { 605 kfree(state); 606 kfree(addr); 607 kfree(weight); 608 return -ENOMEM; 609 } 610 611 /* copy old? */ 612 if (map->osd_state) { 613 memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); 614 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); 615 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); 616 kfree(map->osd_state); 617 kfree(map->osd_addr); 618 kfree(map->osd_weight); 619 } 620 621 map->osd_state = state; 622 map->osd_weight = weight; 623 map->osd_addr = addr; 624 map->max_osd = max; 625 return 0; 626 } 627 628 /* 629 * decode a full map. 630 */ 631 struct ceph_osdmap *osdmap_decode(void **p, void *end) 632 { 633 struct ceph_osdmap *map; 634 u16 version; 635 u32 len, max, i; 636 u8 ev; 637 int err = -EINVAL; 638 void *start = *p; 639 struct ceph_pg_pool_info *pi; 640 641 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 642 643 map = kzalloc(sizeof(*map), GFP_NOFS); 644 if (map == NULL) 645 return ERR_PTR(-ENOMEM); 646 map->pg_temp = RB_ROOT; 647 648 ceph_decode_16_safe(p, end, version, bad); 649 if (version > CEPH_OSDMAP_VERSION) { 650 pr_warning("got unknown v %d > %d of osdmap\n", version, 651 CEPH_OSDMAP_VERSION); 652 goto bad; 653 } 654 655 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); 656 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 657 map->epoch = ceph_decode_32(p); 658 ceph_decode_copy(p, &map->created, sizeof(map->created)); 659 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 660 661 ceph_decode_32_safe(p, end, max, bad); 662 while (max--) { 663 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 664 err = -ENOMEM; 665 pi = kzalloc(sizeof(*pi), GFP_NOFS); 666 if (!pi) 667 goto bad; 668 pi->id = ceph_decode_32(p); 669 err = -EINVAL; 670 ev = ceph_decode_8(p); /* encoding version */ 671 if (ev > CEPH_PG_POOL_VERSION) { 672 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 673 ev, CEPH_PG_POOL_VERSION); 674 kfree(pi); 675 goto bad; 676 } 677 err = __decode_pool(p, end, pi); 678 if (err < 0) { 679 kfree(pi); 680 goto bad; 681 } 682 __insert_pg_pool(&map->pg_pools, pi); 683 } 684 685 if (version >= 5) { 686 err = __decode_pool_names(p, end, map); 687 if (err < 0) { 688 dout("fail to decode pool names"); 689 goto bad; 690 } 691 } 692 693 ceph_decode_32_safe(p, end, map->pool_max, bad); 694 695 ceph_decode_32_safe(p, end, map->flags, bad); 696 697 max = ceph_decode_32(p); 698 699 /* (re)alloc osd arrays */ 700 err = osdmap_set_max_osd(map, max); 701 if (err < 0) 702 goto bad; 703 dout("osdmap_decode max_osd = %d\n", map->max_osd); 704 705 /* osds */ 706 err = -EINVAL; 707 ceph_decode_need(p, end, 3*sizeof(u32) + 708 map->max_osd*(1 + sizeof(*map->osd_weight) + 709 sizeof(*map->osd_addr)), bad); 710 *p += 4; /* skip length field (should match max) */ 711 ceph_decode_copy(p, map->osd_state, map->max_osd); 712 713 *p += 4; /* skip length field (should match max) */ 714 for (i = 0; i < map->max_osd; i++) 715 map->osd_weight[i] = ceph_decode_32(p); 716 717 *p += 4; /* skip length field (should match max) */ 718 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 719 for (i = 0; i < map->max_osd; i++) 720 ceph_decode_addr(&map->osd_addr[i]); 721 722 /* pg_temp */ 723 ceph_decode_32_safe(p, end, len, bad); 724 for (i = 0; i < len; i++) { 725 int n, j; 726 struct ceph_pg pgid; 727 struct ceph_pg_mapping *pg; 728 729 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 730 ceph_decode_copy(p, &pgid, sizeof(pgid)); 731 n = ceph_decode_32(p); 732 err = -EINVAL; 733 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 734 goto bad; 735 ceph_decode_need(p, end, n * sizeof(u32), bad); 736 err = -ENOMEM; 737 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 738 if (!pg) 739 goto bad; 740 pg->pgid = pgid; 741 pg->len = n; 742 for (j = 0; j < n; j++) 743 pg->osds[j] = ceph_decode_32(p); 744 745 err = __insert_pg_mapping(pg, &map->pg_temp); 746 if (err) 747 goto bad; 748 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len); 749 } 750 751 /* crush */ 752 ceph_decode_32_safe(p, end, len, bad); 753 dout("osdmap_decode crush len %d from off 0x%x\n", len, 754 (int)(*p - start)); 755 ceph_decode_need(p, end, len, bad); 756 map->crush = crush_decode(*p, end); 757 *p += len; 758 if (IS_ERR(map->crush)) { 759 err = PTR_ERR(map->crush); 760 map->crush = NULL; 761 goto bad; 762 } 763 764 /* ignore the rest of the map */ 765 *p = end; 766 767 dout("osdmap_decode done %p %p\n", *p, end); 768 return map; 769 770 bad: 771 dout("osdmap_decode fail err %d\n", err); 772 ceph_osdmap_destroy(map); 773 return ERR_PTR(err); 774 } 775 776 /* 777 * decode and apply an incremental map update. 778 */ 779 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 780 struct ceph_osdmap *map, 781 struct ceph_messenger *msgr) 782 { 783 struct crush_map *newcrush = NULL; 784 struct ceph_fsid fsid; 785 u32 epoch = 0; 786 struct ceph_timespec modified; 787 u32 len, pool; 788 __s32 new_pool_max, new_flags, max; 789 void *start = *p; 790 int err = -EINVAL; 791 u16 version; 792 793 ceph_decode_16_safe(p, end, version, bad); 794 if (version > CEPH_OSDMAP_INC_VERSION) { 795 pr_warning("got unknown v %d > %d of inc osdmap\n", version, 796 CEPH_OSDMAP_INC_VERSION); 797 goto bad; 798 } 799 800 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), 801 bad); 802 ceph_decode_copy(p, &fsid, sizeof(fsid)); 803 epoch = ceph_decode_32(p); 804 BUG_ON(epoch != map->epoch+1); 805 ceph_decode_copy(p, &modified, sizeof(modified)); 806 new_pool_max = ceph_decode_32(p); 807 new_flags = ceph_decode_32(p); 808 809 /* full map? */ 810 ceph_decode_32_safe(p, end, len, bad); 811 if (len > 0) { 812 dout("apply_incremental full map len %d, %p to %p\n", 813 len, *p, end); 814 return osdmap_decode(p, min(*p+len, end)); 815 } 816 817 /* new crush? */ 818 ceph_decode_32_safe(p, end, len, bad); 819 if (len > 0) { 820 dout("apply_incremental new crush map len %d, %p to %p\n", 821 len, *p, end); 822 newcrush = crush_decode(*p, min(*p+len, end)); 823 if (IS_ERR(newcrush)) 824 return ERR_CAST(newcrush); 825 *p += len; 826 } 827 828 /* new flags? */ 829 if (new_flags >= 0) 830 map->flags = new_flags; 831 if (new_pool_max >= 0) 832 map->pool_max = new_pool_max; 833 834 ceph_decode_need(p, end, 5*sizeof(u32), bad); 835 836 /* new max? */ 837 max = ceph_decode_32(p); 838 if (max >= 0) { 839 err = osdmap_set_max_osd(map, max); 840 if (err < 0) 841 goto bad; 842 } 843 844 map->epoch++; 845 map->modified = modified; 846 if (newcrush) { 847 if (map->crush) 848 crush_destroy(map->crush); 849 map->crush = newcrush; 850 newcrush = NULL; 851 } 852 853 /* new_pool */ 854 ceph_decode_32_safe(p, end, len, bad); 855 while (len--) { 856 __u8 ev; 857 struct ceph_pg_pool_info *pi; 858 859 ceph_decode_32_safe(p, end, pool, bad); 860 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad); 861 ev = ceph_decode_8(p); /* encoding version */ 862 if (ev > CEPH_PG_POOL_VERSION) { 863 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 864 ev, CEPH_PG_POOL_VERSION); 865 err = -EINVAL; 866 goto bad; 867 } 868 pi = __lookup_pg_pool(&map->pg_pools, pool); 869 if (!pi) { 870 pi = kzalloc(sizeof(*pi), GFP_NOFS); 871 if (!pi) { 872 err = -ENOMEM; 873 goto bad; 874 } 875 pi->id = pool; 876 __insert_pg_pool(&map->pg_pools, pi); 877 } 878 err = __decode_pool(p, end, pi); 879 if (err < 0) 880 goto bad; 881 } 882 if (version >= 5) { 883 err = __decode_pool_names(p, end, map); 884 if (err < 0) 885 goto bad; 886 } 887 888 /* old_pool */ 889 ceph_decode_32_safe(p, end, len, bad); 890 while (len--) { 891 struct ceph_pg_pool_info *pi; 892 893 ceph_decode_32_safe(p, end, pool, bad); 894 pi = __lookup_pg_pool(&map->pg_pools, pool); 895 if (pi) 896 __remove_pg_pool(&map->pg_pools, pi); 897 } 898 899 /* new_up */ 900 err = -EINVAL; 901 ceph_decode_32_safe(p, end, len, bad); 902 while (len--) { 903 u32 osd; 904 struct ceph_entity_addr addr; 905 ceph_decode_32_safe(p, end, osd, bad); 906 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); 907 ceph_decode_addr(&addr); 908 pr_info("osd%d up\n", osd); 909 BUG_ON(osd >= map->max_osd); 910 map->osd_state[osd] |= CEPH_OSD_UP; 911 map->osd_addr[osd] = addr; 912 } 913 914 /* new_state */ 915 ceph_decode_32_safe(p, end, len, bad); 916 while (len--) { 917 u32 osd; 918 u8 xorstate; 919 ceph_decode_32_safe(p, end, osd, bad); 920 xorstate = **(u8 **)p; 921 (*p)++; /* clean flag */ 922 if (xorstate == 0) 923 xorstate = CEPH_OSD_UP; 924 if (xorstate & CEPH_OSD_UP) 925 pr_info("osd%d down\n", osd); 926 if (osd < map->max_osd) 927 map->osd_state[osd] ^= xorstate; 928 } 929 930 /* new_weight */ 931 ceph_decode_32_safe(p, end, len, bad); 932 while (len--) { 933 u32 osd, off; 934 ceph_decode_need(p, end, sizeof(u32)*2, bad); 935 osd = ceph_decode_32(p); 936 off = ceph_decode_32(p); 937 pr_info("osd%d weight 0x%x %s\n", osd, off, 938 off == CEPH_OSD_IN ? "(in)" : 939 (off == CEPH_OSD_OUT ? "(out)" : "")); 940 if (osd < map->max_osd) 941 map->osd_weight[osd] = off; 942 } 943 944 /* new_pg_temp */ 945 ceph_decode_32_safe(p, end, len, bad); 946 while (len--) { 947 struct ceph_pg_mapping *pg; 948 int j; 949 struct ceph_pg pgid; 950 u32 pglen; 951 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 952 ceph_decode_copy(p, &pgid, sizeof(pgid)); 953 pglen = ceph_decode_32(p); 954 955 if (pglen) { 956 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 957 958 /* removing existing (if any) */ 959 (void) __remove_pg_mapping(&map->pg_temp, pgid); 960 961 /* insert */ 962 err = -EINVAL; 963 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 964 goto bad; 965 err = -ENOMEM; 966 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 967 if (!pg) 968 goto bad; 969 pg->pgid = pgid; 970 pg->len = pglen; 971 for (j = 0; j < pglen; j++) 972 pg->osds[j] = ceph_decode_32(p); 973 err = __insert_pg_mapping(pg, &map->pg_temp); 974 if (err) { 975 kfree(pg); 976 goto bad; 977 } 978 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 979 pglen); 980 } else { 981 /* remove */ 982 __remove_pg_mapping(&map->pg_temp, pgid); 983 } 984 } 985 986 /* ignore the rest */ 987 *p = end; 988 return map; 989 990 bad: 991 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", 992 epoch, (int)(*p - start), *p, start, end); 993 print_hex_dump(KERN_DEBUG, "osdmap: ", 994 DUMP_PREFIX_OFFSET, 16, 1, 995 start, end - start, true); 996 if (newcrush) 997 crush_destroy(newcrush); 998 return ERR_PTR(err); 999 } 1000 1001 1002 1003 1004 /* 1005 * calculate file layout from given offset, length. 1006 * fill in correct oid, logical length, and object extent 1007 * offset, length. 1008 * 1009 * for now, we write only a single su, until we can 1010 * pass a stride back to the caller. 1011 */ 1012 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 1013 u64 off, u64 *plen, 1014 u64 *ono, 1015 u64 *oxoff, u64 *oxlen) 1016 { 1017 u32 osize = le32_to_cpu(layout->fl_object_size); 1018 u32 su = le32_to_cpu(layout->fl_stripe_unit); 1019 u32 sc = le32_to_cpu(layout->fl_stripe_count); 1020 u32 bl, stripeno, stripepos, objsetno; 1021 u32 su_per_object; 1022 u64 t, su_offset; 1023 1024 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, 1025 osize, su); 1026 if (su == 0 || sc == 0) 1027 goto invalid; 1028 su_per_object = osize / su; 1029 if (su_per_object == 0) 1030 goto invalid; 1031 dout("osize %u / su %u = su_per_object %u\n", osize, su, 1032 su_per_object); 1033 1034 if ((su & ~PAGE_MASK) != 0) 1035 goto invalid; 1036 1037 /* bl = *off / su; */ 1038 t = off; 1039 do_div(t, su); 1040 bl = t; 1041 dout("off %llu / su %u = bl %u\n", off, su, bl); 1042 1043 stripeno = bl / sc; 1044 stripepos = bl % sc; 1045 objsetno = stripeno / su_per_object; 1046 1047 *ono = objsetno * sc + stripepos; 1048 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 1049 1050 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 1051 t = off; 1052 su_offset = do_div(t, su); 1053 *oxoff = su_offset + (stripeno % su_per_object) * su; 1054 1055 /* 1056 * Calculate the length of the extent being written to the selected 1057 * object. This is the minimum of the full length requested (plen) or 1058 * the remainder of the current stripe being written to. 1059 */ 1060 *oxlen = min_t(u64, *plen, su - su_offset); 1061 *plen = *oxlen; 1062 1063 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 1064 return 0; 1065 1066 invalid: 1067 dout(" invalid layout\n"); 1068 *ono = 0; 1069 *oxoff = 0; 1070 *oxlen = 0; 1071 return -EINVAL; 1072 } 1073 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 1074 1075 /* 1076 * calculate an object layout (i.e. pgid) from an oid, 1077 * file_layout, and osdmap 1078 */ 1079 int ceph_calc_object_layout(struct ceph_object_layout *ol, 1080 const char *oid, 1081 struct ceph_file_layout *fl, 1082 struct ceph_osdmap *osdmap) 1083 { 1084 unsigned int num, num_mask; 1085 struct ceph_pg pgid; 1086 int poolid = le32_to_cpu(fl->fl_pg_pool); 1087 struct ceph_pg_pool_info *pool; 1088 unsigned int ps; 1089 1090 BUG_ON(!osdmap); 1091 1092 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1093 if (!pool) 1094 return -EIO; 1095 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); 1096 num = le32_to_cpu(pool->v.pg_num); 1097 num_mask = pool->pg_num_mask; 1098 1099 pgid.ps = cpu_to_le16(ps); 1100 pgid.preferred = cpu_to_le16(-1); 1101 pgid.pool = fl->fl_pg_pool; 1102 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps); 1103 1104 ol->ol_pgid = pgid; 1105 ol->ol_stripe_unit = fl->fl_object_stripe_unit; 1106 return 0; 1107 } 1108 EXPORT_SYMBOL(ceph_calc_object_layout); 1109 1110 /* 1111 * Calculate raw osd vector for the given pgid. Return pointer to osd 1112 * array, or NULL on failure. 1113 */ 1114 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1115 int *osds, int *num) 1116 { 1117 struct ceph_pg_mapping *pg; 1118 struct ceph_pg_pool_info *pool; 1119 int ruleno; 1120 unsigned int poolid, ps, pps, t, r; 1121 1122 poolid = le32_to_cpu(pgid.pool); 1123 ps = le16_to_cpu(pgid.ps); 1124 1125 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1126 if (!pool) 1127 return NULL; 1128 1129 /* pg_temp? */ 1130 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), 1131 pool->pgp_num_mask); 1132 pgid.ps = cpu_to_le16(t); 1133 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1134 if (pg) { 1135 *num = pg->len; 1136 return pg->osds; 1137 } 1138 1139 /* crush */ 1140 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1141 pool->v.type, pool->v.size); 1142 if (ruleno < 0) { 1143 pr_err("no crush rule pool %d ruleset %d type %d size %d\n", 1144 poolid, pool->v.crush_ruleset, pool->v.type, 1145 pool->v.size); 1146 return NULL; 1147 } 1148 1149 pps = ceph_stable_mod(ps, 1150 le32_to_cpu(pool->v.pgp_num), 1151 pool->pgp_num_mask); 1152 pps += poolid; 1153 r = crush_do_rule(osdmap->crush, ruleno, pps, osds, 1154 min_t(int, pool->v.size, *num), 1155 osdmap->osd_weight); 1156 if (r < 0) { 1157 pr_err("error %d from crush rule: pool %d ruleset %d type %d" 1158 " size %d\n", r, poolid, pool->v.crush_ruleset, 1159 pool->v.type, pool->v.size); 1160 return NULL; 1161 } 1162 *num = r; 1163 return osds; 1164 } 1165 1166 /* 1167 * Return acting set for given pgid. 1168 */ 1169 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1170 int *acting) 1171 { 1172 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1173 int i, o, num = CEPH_PG_MAX_SIZE; 1174 1175 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1176 if (!osds) 1177 return -1; 1178 1179 /* primary is first up osd */ 1180 o = 0; 1181 for (i = 0; i < num; i++) 1182 if (ceph_osd_is_up(osdmap, osds[i])) 1183 acting[o++] = osds[i]; 1184 return o; 1185 } 1186 1187 /* 1188 * Return primary osd for given pgid, or -1 if none. 1189 */ 1190 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) 1191 { 1192 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1193 int i, num = CEPH_PG_MAX_SIZE; 1194 1195 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1196 if (!osds) 1197 return -1; 1198 1199 /* primary is first up osd */ 1200 for (i = 0; i < num; i++) 1201 if (ceph_osd_is_up(osdmap, osds[i])) 1202 return osds[i]; 1203 return -1; 1204 } 1205 EXPORT_SYMBOL(ceph_calc_pg_primary); 1206