1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 int flag = 0; 17 18 if (!len) 19 goto done; 20 21 *str = '\0'; 22 if (state) { 23 if (state & CEPH_OSD_EXISTS) { 24 snprintf(str, len, "exists"); 25 flag = 1; 26 } 27 if (state & CEPH_OSD_UP) { 28 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""), 29 "up"); 30 flag = 1; 31 } 32 } else { 33 snprintf(str, len, "doesn't exist"); 34 } 35 done: 36 return str; 37 } 38 39 /* maps */ 40 41 static int calc_bits_of(unsigned int t) 42 { 43 int b = 0; 44 while (t) { 45 t = t >> 1; 46 b++; 47 } 48 return b; 49 } 50 51 /* 52 * the foo_mask is the smallest value 2^n-1 that is >= foo. 53 */ 54 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 55 { 56 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1; 57 pi->pgp_num_mask = 58 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1; 59 pi->lpg_num_mask = 60 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1; 61 pi->lpgp_num_mask = 62 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1; 63 } 64 65 /* 66 * decode crush map 67 */ 68 static int crush_decode_uniform_bucket(void **p, void *end, 69 struct crush_bucket_uniform *b) 70 { 71 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 72 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 73 b->item_weight = ceph_decode_32(p); 74 return 0; 75 bad: 76 return -EINVAL; 77 } 78 79 static int crush_decode_list_bucket(void **p, void *end, 80 struct crush_bucket_list *b) 81 { 82 int j; 83 dout("crush_decode_list_bucket %p to %p\n", *p, end); 84 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 85 if (b->item_weights == NULL) 86 return -ENOMEM; 87 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 88 if (b->sum_weights == NULL) 89 return -ENOMEM; 90 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 91 for (j = 0; j < b->h.size; j++) { 92 b->item_weights[j] = ceph_decode_32(p); 93 b->sum_weights[j] = ceph_decode_32(p); 94 } 95 return 0; 96 bad: 97 return -EINVAL; 98 } 99 100 static int crush_decode_tree_bucket(void **p, void *end, 101 struct crush_bucket_tree *b) 102 { 103 int j; 104 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 105 ceph_decode_32_safe(p, end, b->num_nodes, bad); 106 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 107 if (b->node_weights == NULL) 108 return -ENOMEM; 109 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 110 for (j = 0; j < b->num_nodes; j++) 111 b->node_weights[j] = ceph_decode_32(p); 112 return 0; 113 bad: 114 return -EINVAL; 115 } 116 117 static int crush_decode_straw_bucket(void **p, void *end, 118 struct crush_bucket_straw *b) 119 { 120 int j; 121 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 122 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 123 if (b->item_weights == NULL) 124 return -ENOMEM; 125 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 126 if (b->straws == NULL) 127 return -ENOMEM; 128 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 129 for (j = 0; j < b->h.size; j++) { 130 b->item_weights[j] = ceph_decode_32(p); 131 b->straws[j] = ceph_decode_32(p); 132 } 133 return 0; 134 bad: 135 return -EINVAL; 136 } 137 138 static struct crush_map *crush_decode(void *pbyval, void *end) 139 { 140 struct crush_map *c; 141 int err = -EINVAL; 142 int i, j; 143 void **p = &pbyval; 144 void *start = pbyval; 145 u32 magic; 146 147 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 148 149 c = kzalloc(sizeof(*c), GFP_NOFS); 150 if (c == NULL) 151 return ERR_PTR(-ENOMEM); 152 153 ceph_decode_need(p, end, 4*sizeof(u32), bad); 154 magic = ceph_decode_32(p); 155 if (magic != CRUSH_MAGIC) { 156 pr_err("crush_decode magic %x != current %x\n", 157 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 158 goto bad; 159 } 160 c->max_buckets = ceph_decode_32(p); 161 c->max_rules = ceph_decode_32(p); 162 c->max_devices = ceph_decode_32(p); 163 164 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 165 if (c->buckets == NULL) 166 goto badmem; 167 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 168 if (c->rules == NULL) 169 goto badmem; 170 171 /* buckets */ 172 for (i = 0; i < c->max_buckets; i++) { 173 int size = 0; 174 u32 alg; 175 struct crush_bucket *b; 176 177 ceph_decode_32_safe(p, end, alg, bad); 178 if (alg == 0) { 179 c->buckets[i] = NULL; 180 continue; 181 } 182 dout("crush_decode bucket %d off %x %p to %p\n", 183 i, (int)(*p-start), *p, end); 184 185 switch (alg) { 186 case CRUSH_BUCKET_UNIFORM: 187 size = sizeof(struct crush_bucket_uniform); 188 break; 189 case CRUSH_BUCKET_LIST: 190 size = sizeof(struct crush_bucket_list); 191 break; 192 case CRUSH_BUCKET_TREE: 193 size = sizeof(struct crush_bucket_tree); 194 break; 195 case CRUSH_BUCKET_STRAW: 196 size = sizeof(struct crush_bucket_straw); 197 break; 198 default: 199 err = -EINVAL; 200 goto bad; 201 } 202 BUG_ON(size == 0); 203 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 204 if (b == NULL) 205 goto badmem; 206 207 ceph_decode_need(p, end, 4*sizeof(u32), bad); 208 b->id = ceph_decode_32(p); 209 b->type = ceph_decode_16(p); 210 b->alg = ceph_decode_8(p); 211 b->hash = ceph_decode_8(p); 212 b->weight = ceph_decode_32(p); 213 b->size = ceph_decode_32(p); 214 215 dout("crush_decode bucket size %d off %x %p to %p\n", 216 b->size, (int)(*p-start), *p, end); 217 218 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 219 if (b->items == NULL) 220 goto badmem; 221 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 222 if (b->perm == NULL) 223 goto badmem; 224 b->perm_n = 0; 225 226 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 227 for (j = 0; j < b->size; j++) 228 b->items[j] = ceph_decode_32(p); 229 230 switch (b->alg) { 231 case CRUSH_BUCKET_UNIFORM: 232 err = crush_decode_uniform_bucket(p, end, 233 (struct crush_bucket_uniform *)b); 234 if (err < 0) 235 goto bad; 236 break; 237 case CRUSH_BUCKET_LIST: 238 err = crush_decode_list_bucket(p, end, 239 (struct crush_bucket_list *)b); 240 if (err < 0) 241 goto bad; 242 break; 243 case CRUSH_BUCKET_TREE: 244 err = crush_decode_tree_bucket(p, end, 245 (struct crush_bucket_tree *)b); 246 if (err < 0) 247 goto bad; 248 break; 249 case CRUSH_BUCKET_STRAW: 250 err = crush_decode_straw_bucket(p, end, 251 (struct crush_bucket_straw *)b); 252 if (err < 0) 253 goto bad; 254 break; 255 } 256 } 257 258 /* rules */ 259 dout("rule vec is %p\n", c->rules); 260 for (i = 0; i < c->max_rules; i++) { 261 u32 yes; 262 struct crush_rule *r; 263 264 ceph_decode_32_safe(p, end, yes, bad); 265 if (!yes) { 266 dout("crush_decode NO rule %d off %x %p to %p\n", 267 i, (int)(*p-start), *p, end); 268 c->rules[i] = NULL; 269 continue; 270 } 271 272 dout("crush_decode rule %d off %x %p to %p\n", 273 i, (int)(*p-start), *p, end); 274 275 /* len */ 276 ceph_decode_32_safe(p, end, yes, bad); 277 #if BITS_PER_LONG == 32 278 err = -EINVAL; 279 if (yes > (ULONG_MAX - sizeof(*r)) 280 / sizeof(struct crush_rule_step)) 281 goto bad; 282 #endif 283 r = c->rules[i] = kmalloc(sizeof(*r) + 284 yes*sizeof(struct crush_rule_step), 285 GFP_NOFS); 286 if (r == NULL) 287 goto badmem; 288 dout(" rule %d is at %p\n", i, r); 289 r->len = yes; 290 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 291 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 292 for (j = 0; j < r->len; j++) { 293 r->steps[j].op = ceph_decode_32(p); 294 r->steps[j].arg1 = ceph_decode_32(p); 295 r->steps[j].arg2 = ceph_decode_32(p); 296 } 297 } 298 299 /* ignore trailing name maps. */ 300 301 dout("crush_decode success\n"); 302 return c; 303 304 badmem: 305 err = -ENOMEM; 306 bad: 307 dout("crush_decode fail %d\n", err); 308 crush_destroy(c); 309 return ERR_PTR(err); 310 } 311 312 /* 313 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 314 * to a set of osds) 315 */ 316 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) 317 { 318 u64 a = *(u64 *)&l; 319 u64 b = *(u64 *)&r; 320 321 if (a < b) 322 return -1; 323 if (a > b) 324 return 1; 325 return 0; 326 } 327 328 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 329 struct rb_root *root) 330 { 331 struct rb_node **p = &root->rb_node; 332 struct rb_node *parent = NULL; 333 struct ceph_pg_mapping *pg = NULL; 334 int c; 335 336 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 337 while (*p) { 338 parent = *p; 339 pg = rb_entry(parent, struct ceph_pg_mapping, node); 340 c = pgid_cmp(new->pgid, pg->pgid); 341 if (c < 0) 342 p = &(*p)->rb_left; 343 else if (c > 0) 344 p = &(*p)->rb_right; 345 else 346 return -EEXIST; 347 } 348 349 rb_link_node(&new->node, parent, p); 350 rb_insert_color(&new->node, root); 351 return 0; 352 } 353 354 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 355 struct ceph_pg pgid) 356 { 357 struct rb_node *n = root->rb_node; 358 struct ceph_pg_mapping *pg; 359 int c; 360 361 while (n) { 362 pg = rb_entry(n, struct ceph_pg_mapping, node); 363 c = pgid_cmp(pgid, pg->pgid); 364 if (c < 0) { 365 n = n->rb_left; 366 } else if (c > 0) { 367 n = n->rb_right; 368 } else { 369 dout("__lookup_pg_mapping %llx got %p\n", 370 *(u64 *)&pgid, pg); 371 return pg; 372 } 373 } 374 return NULL; 375 } 376 377 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 378 { 379 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 380 381 if (pg) { 382 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); 383 rb_erase(&pg->node, root); 384 kfree(pg); 385 return 0; 386 } 387 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); 388 return -ENOENT; 389 } 390 391 /* 392 * rbtree of pg pool info 393 */ 394 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 395 { 396 struct rb_node **p = &root->rb_node; 397 struct rb_node *parent = NULL; 398 struct ceph_pg_pool_info *pi = NULL; 399 400 while (*p) { 401 parent = *p; 402 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 403 if (new->id < pi->id) 404 p = &(*p)->rb_left; 405 else if (new->id > pi->id) 406 p = &(*p)->rb_right; 407 else 408 return -EEXIST; 409 } 410 411 rb_link_node(&new->node, parent, p); 412 rb_insert_color(&new->node, root); 413 return 0; 414 } 415 416 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) 417 { 418 struct ceph_pg_pool_info *pi; 419 struct rb_node *n = root->rb_node; 420 421 while (n) { 422 pi = rb_entry(n, struct ceph_pg_pool_info, node); 423 if (id < pi->id) 424 n = n->rb_left; 425 else if (id > pi->id) 426 n = n->rb_right; 427 else 428 return pi; 429 } 430 return NULL; 431 } 432 433 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 434 { 435 struct rb_node *rbp; 436 437 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 438 struct ceph_pg_pool_info *pi = 439 rb_entry(rbp, struct ceph_pg_pool_info, node); 440 if (pi->name && strcmp(pi->name, name) == 0) 441 return pi->id; 442 } 443 return -ENOENT; 444 } 445 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 446 447 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 448 { 449 rb_erase(&pi->node, root); 450 kfree(pi->name); 451 kfree(pi); 452 } 453 454 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 455 { 456 unsigned int n, m; 457 458 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 459 calc_pg_masks(pi); 460 461 /* num_snaps * snap_info_t */ 462 n = le32_to_cpu(pi->v.num_snaps); 463 while (n--) { 464 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) + 465 sizeof(struct ceph_timespec), bad); 466 *p += sizeof(u64) + /* key */ 467 1 + sizeof(u64) + /* u8, snapid */ 468 sizeof(struct ceph_timespec); 469 m = ceph_decode_32(p); /* snap name */ 470 *p += m; 471 } 472 473 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; 474 return 0; 475 476 bad: 477 return -EINVAL; 478 } 479 480 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 481 { 482 struct ceph_pg_pool_info *pi; 483 u32 num, len, pool; 484 485 ceph_decode_32_safe(p, end, num, bad); 486 dout(" %d pool names\n", num); 487 while (num--) { 488 ceph_decode_32_safe(p, end, pool, bad); 489 ceph_decode_32_safe(p, end, len, bad); 490 dout(" pool %d len %d\n", pool, len); 491 pi = __lookup_pg_pool(&map->pg_pools, pool); 492 if (pi) { 493 kfree(pi->name); 494 pi->name = kmalloc(len + 1, GFP_NOFS); 495 if (pi->name) { 496 memcpy(pi->name, *p, len); 497 pi->name[len] = '\0'; 498 dout(" name is %s\n", pi->name); 499 } 500 } 501 *p += len; 502 } 503 return 0; 504 505 bad: 506 return -EINVAL; 507 } 508 509 /* 510 * osd map 511 */ 512 void ceph_osdmap_destroy(struct ceph_osdmap *map) 513 { 514 dout("osdmap_destroy %p\n", map); 515 if (map->crush) 516 crush_destroy(map->crush); 517 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 518 struct ceph_pg_mapping *pg = 519 rb_entry(rb_first(&map->pg_temp), 520 struct ceph_pg_mapping, node); 521 rb_erase(&pg->node, &map->pg_temp); 522 kfree(pg); 523 } 524 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 525 struct ceph_pg_pool_info *pi = 526 rb_entry(rb_first(&map->pg_pools), 527 struct ceph_pg_pool_info, node); 528 __remove_pg_pool(&map->pg_pools, pi); 529 } 530 kfree(map->osd_state); 531 kfree(map->osd_weight); 532 kfree(map->osd_addr); 533 kfree(map); 534 } 535 536 /* 537 * adjust max osd value. reallocate arrays. 538 */ 539 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 540 { 541 u8 *state; 542 struct ceph_entity_addr *addr; 543 u32 *weight; 544 545 state = kcalloc(max, sizeof(*state), GFP_NOFS); 546 addr = kcalloc(max, sizeof(*addr), GFP_NOFS); 547 weight = kcalloc(max, sizeof(*weight), GFP_NOFS); 548 if (state == NULL || addr == NULL || weight == NULL) { 549 kfree(state); 550 kfree(addr); 551 kfree(weight); 552 return -ENOMEM; 553 } 554 555 /* copy old? */ 556 if (map->osd_state) { 557 memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); 558 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); 559 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); 560 kfree(map->osd_state); 561 kfree(map->osd_addr); 562 kfree(map->osd_weight); 563 } 564 565 map->osd_state = state; 566 map->osd_weight = weight; 567 map->osd_addr = addr; 568 map->max_osd = max; 569 return 0; 570 } 571 572 /* 573 * decode a full map. 574 */ 575 struct ceph_osdmap *osdmap_decode(void **p, void *end) 576 { 577 struct ceph_osdmap *map; 578 u16 version; 579 u32 len, max, i; 580 u8 ev; 581 int err = -EINVAL; 582 void *start = *p; 583 struct ceph_pg_pool_info *pi; 584 585 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 586 587 map = kzalloc(sizeof(*map), GFP_NOFS); 588 if (map == NULL) 589 return ERR_PTR(-ENOMEM); 590 map->pg_temp = RB_ROOT; 591 592 ceph_decode_16_safe(p, end, version, bad); 593 if (version > CEPH_OSDMAP_VERSION) { 594 pr_warning("got unknown v %d > %d of osdmap\n", version, 595 CEPH_OSDMAP_VERSION); 596 goto bad; 597 } 598 599 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); 600 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 601 map->epoch = ceph_decode_32(p); 602 ceph_decode_copy(p, &map->created, sizeof(map->created)); 603 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 604 605 ceph_decode_32_safe(p, end, max, bad); 606 while (max--) { 607 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 608 pi = kzalloc(sizeof(*pi), GFP_NOFS); 609 if (!pi) 610 goto bad; 611 pi->id = ceph_decode_32(p); 612 ev = ceph_decode_8(p); /* encoding version */ 613 if (ev > CEPH_PG_POOL_VERSION) { 614 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 615 ev, CEPH_PG_POOL_VERSION); 616 kfree(pi); 617 goto bad; 618 } 619 err = __decode_pool(p, end, pi); 620 if (err < 0) { 621 kfree(pi); 622 goto bad; 623 } 624 __insert_pg_pool(&map->pg_pools, pi); 625 } 626 627 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 628 goto bad; 629 630 ceph_decode_32_safe(p, end, map->pool_max, bad); 631 632 ceph_decode_32_safe(p, end, map->flags, bad); 633 634 max = ceph_decode_32(p); 635 636 /* (re)alloc osd arrays */ 637 err = osdmap_set_max_osd(map, max); 638 if (err < 0) 639 goto bad; 640 dout("osdmap_decode max_osd = %d\n", map->max_osd); 641 642 /* osds */ 643 err = -EINVAL; 644 ceph_decode_need(p, end, 3*sizeof(u32) + 645 map->max_osd*(1 + sizeof(*map->osd_weight) + 646 sizeof(*map->osd_addr)), bad); 647 *p += 4; /* skip length field (should match max) */ 648 ceph_decode_copy(p, map->osd_state, map->max_osd); 649 650 *p += 4; /* skip length field (should match max) */ 651 for (i = 0; i < map->max_osd; i++) 652 map->osd_weight[i] = ceph_decode_32(p); 653 654 *p += 4; /* skip length field (should match max) */ 655 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 656 for (i = 0; i < map->max_osd; i++) 657 ceph_decode_addr(&map->osd_addr[i]); 658 659 /* pg_temp */ 660 ceph_decode_32_safe(p, end, len, bad); 661 for (i = 0; i < len; i++) { 662 int n, j; 663 struct ceph_pg pgid; 664 struct ceph_pg_mapping *pg; 665 666 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 667 ceph_decode_copy(p, &pgid, sizeof(pgid)); 668 n = ceph_decode_32(p); 669 ceph_decode_need(p, end, n * sizeof(u32), bad); 670 err = -ENOMEM; 671 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 672 if (!pg) 673 goto bad; 674 pg->pgid = pgid; 675 pg->len = n; 676 for (j = 0; j < n; j++) 677 pg->osds[j] = ceph_decode_32(p); 678 679 err = __insert_pg_mapping(pg, &map->pg_temp); 680 if (err) 681 goto bad; 682 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len); 683 } 684 685 /* crush */ 686 ceph_decode_32_safe(p, end, len, bad); 687 dout("osdmap_decode crush len %d from off 0x%x\n", len, 688 (int)(*p - start)); 689 ceph_decode_need(p, end, len, bad); 690 map->crush = crush_decode(*p, end); 691 *p += len; 692 if (IS_ERR(map->crush)) { 693 err = PTR_ERR(map->crush); 694 map->crush = NULL; 695 goto bad; 696 } 697 698 /* ignore the rest of the map */ 699 *p = end; 700 701 dout("osdmap_decode done %p %p\n", *p, end); 702 return map; 703 704 bad: 705 dout("osdmap_decode fail\n"); 706 ceph_osdmap_destroy(map); 707 return ERR_PTR(err); 708 } 709 710 /* 711 * decode and apply an incremental map update. 712 */ 713 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 714 struct ceph_osdmap *map, 715 struct ceph_messenger *msgr) 716 { 717 struct crush_map *newcrush = NULL; 718 struct ceph_fsid fsid; 719 u32 epoch = 0; 720 struct ceph_timespec modified; 721 u32 len, pool; 722 __s32 new_pool_max, new_flags, max; 723 void *start = *p; 724 int err = -EINVAL; 725 u16 version; 726 727 ceph_decode_16_safe(p, end, version, bad); 728 if (version > CEPH_OSDMAP_INC_VERSION) { 729 pr_warning("got unknown v %d > %d of inc osdmap\n", version, 730 CEPH_OSDMAP_INC_VERSION); 731 goto bad; 732 } 733 734 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), 735 bad); 736 ceph_decode_copy(p, &fsid, sizeof(fsid)); 737 epoch = ceph_decode_32(p); 738 BUG_ON(epoch != map->epoch+1); 739 ceph_decode_copy(p, &modified, sizeof(modified)); 740 new_pool_max = ceph_decode_32(p); 741 new_flags = ceph_decode_32(p); 742 743 /* full map? */ 744 ceph_decode_32_safe(p, end, len, bad); 745 if (len > 0) { 746 dout("apply_incremental full map len %d, %p to %p\n", 747 len, *p, end); 748 return osdmap_decode(p, min(*p+len, end)); 749 } 750 751 /* new crush? */ 752 ceph_decode_32_safe(p, end, len, bad); 753 if (len > 0) { 754 dout("apply_incremental new crush map len %d, %p to %p\n", 755 len, *p, end); 756 newcrush = crush_decode(*p, min(*p+len, end)); 757 if (IS_ERR(newcrush)) 758 return ERR_CAST(newcrush); 759 *p += len; 760 } 761 762 /* new flags? */ 763 if (new_flags >= 0) 764 map->flags = new_flags; 765 if (new_pool_max >= 0) 766 map->pool_max = new_pool_max; 767 768 ceph_decode_need(p, end, 5*sizeof(u32), bad); 769 770 /* new max? */ 771 max = ceph_decode_32(p); 772 if (max >= 0) { 773 err = osdmap_set_max_osd(map, max); 774 if (err < 0) 775 goto bad; 776 } 777 778 map->epoch++; 779 map->modified = modified; 780 if (newcrush) { 781 if (map->crush) 782 crush_destroy(map->crush); 783 map->crush = newcrush; 784 newcrush = NULL; 785 } 786 787 /* new_pool */ 788 ceph_decode_32_safe(p, end, len, bad); 789 while (len--) { 790 __u8 ev; 791 struct ceph_pg_pool_info *pi; 792 793 ceph_decode_32_safe(p, end, pool, bad); 794 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad); 795 ev = ceph_decode_8(p); /* encoding version */ 796 if (ev > CEPH_PG_POOL_VERSION) { 797 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 798 ev, CEPH_PG_POOL_VERSION); 799 goto bad; 800 } 801 pi = __lookup_pg_pool(&map->pg_pools, pool); 802 if (!pi) { 803 pi = kzalloc(sizeof(*pi), GFP_NOFS); 804 if (!pi) { 805 err = -ENOMEM; 806 goto bad; 807 } 808 pi->id = pool; 809 __insert_pg_pool(&map->pg_pools, pi); 810 } 811 err = __decode_pool(p, end, pi); 812 if (err < 0) 813 goto bad; 814 } 815 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 816 goto bad; 817 818 /* old_pool */ 819 ceph_decode_32_safe(p, end, len, bad); 820 while (len--) { 821 struct ceph_pg_pool_info *pi; 822 823 ceph_decode_32_safe(p, end, pool, bad); 824 pi = __lookup_pg_pool(&map->pg_pools, pool); 825 if (pi) 826 __remove_pg_pool(&map->pg_pools, pi); 827 } 828 829 /* new_up */ 830 err = -EINVAL; 831 ceph_decode_32_safe(p, end, len, bad); 832 while (len--) { 833 u32 osd; 834 struct ceph_entity_addr addr; 835 ceph_decode_32_safe(p, end, osd, bad); 836 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); 837 ceph_decode_addr(&addr); 838 pr_info("osd%d up\n", osd); 839 BUG_ON(osd >= map->max_osd); 840 map->osd_state[osd] |= CEPH_OSD_UP; 841 map->osd_addr[osd] = addr; 842 } 843 844 /* new_state */ 845 ceph_decode_32_safe(p, end, len, bad); 846 while (len--) { 847 u32 osd; 848 u8 xorstate; 849 ceph_decode_32_safe(p, end, osd, bad); 850 xorstate = **(u8 **)p; 851 (*p)++; /* clean flag */ 852 if (xorstate == 0) 853 xorstate = CEPH_OSD_UP; 854 if (xorstate & CEPH_OSD_UP) 855 pr_info("osd%d down\n", osd); 856 if (osd < map->max_osd) 857 map->osd_state[osd] ^= xorstate; 858 } 859 860 /* new_weight */ 861 ceph_decode_32_safe(p, end, len, bad); 862 while (len--) { 863 u32 osd, off; 864 ceph_decode_need(p, end, sizeof(u32)*2, bad); 865 osd = ceph_decode_32(p); 866 off = ceph_decode_32(p); 867 pr_info("osd%d weight 0x%x %s\n", osd, off, 868 off == CEPH_OSD_IN ? "(in)" : 869 (off == CEPH_OSD_OUT ? "(out)" : "")); 870 if (osd < map->max_osd) 871 map->osd_weight[osd] = off; 872 } 873 874 /* new_pg_temp */ 875 ceph_decode_32_safe(p, end, len, bad); 876 while (len--) { 877 struct ceph_pg_mapping *pg; 878 int j; 879 struct ceph_pg pgid; 880 u32 pglen; 881 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 882 ceph_decode_copy(p, &pgid, sizeof(pgid)); 883 pglen = ceph_decode_32(p); 884 885 if (pglen) { 886 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 887 888 /* removing existing (if any) */ 889 (void) __remove_pg_mapping(&map->pg_temp, pgid); 890 891 /* insert */ 892 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 893 if (!pg) { 894 err = -ENOMEM; 895 goto bad; 896 } 897 pg->pgid = pgid; 898 pg->len = pglen; 899 for (j = 0; j < pglen; j++) 900 pg->osds[j] = ceph_decode_32(p); 901 err = __insert_pg_mapping(pg, &map->pg_temp); 902 if (err) { 903 kfree(pg); 904 goto bad; 905 } 906 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 907 pglen); 908 } else { 909 /* remove */ 910 __remove_pg_mapping(&map->pg_temp, pgid); 911 } 912 } 913 914 /* ignore the rest */ 915 *p = end; 916 return map; 917 918 bad: 919 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", 920 epoch, (int)(*p - start), *p, start, end); 921 print_hex_dump(KERN_DEBUG, "osdmap: ", 922 DUMP_PREFIX_OFFSET, 16, 1, 923 start, end - start, true); 924 if (newcrush) 925 crush_destroy(newcrush); 926 return ERR_PTR(err); 927 } 928 929 930 931 932 /* 933 * calculate file layout from given offset, length. 934 * fill in correct oid, logical length, and object extent 935 * offset, length. 936 * 937 * for now, we write only a single su, until we can 938 * pass a stride back to the caller. 939 */ 940 void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 941 u64 off, u64 *plen, 942 u64 *ono, 943 u64 *oxoff, u64 *oxlen) 944 { 945 u32 osize = le32_to_cpu(layout->fl_object_size); 946 u32 su = le32_to_cpu(layout->fl_stripe_unit); 947 u32 sc = le32_to_cpu(layout->fl_stripe_count); 948 u32 bl, stripeno, stripepos, objsetno; 949 u32 su_per_object; 950 u64 t, su_offset; 951 952 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, 953 osize, su); 954 su_per_object = osize / su; 955 dout("osize %u / su %u = su_per_object %u\n", osize, su, 956 su_per_object); 957 958 BUG_ON((su & ~PAGE_MASK) != 0); 959 /* bl = *off / su; */ 960 t = off; 961 do_div(t, su); 962 bl = t; 963 dout("off %llu / su %u = bl %u\n", off, su, bl); 964 965 stripeno = bl / sc; 966 stripepos = bl % sc; 967 objsetno = stripeno / su_per_object; 968 969 *ono = objsetno * sc + stripepos; 970 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 971 972 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 973 t = off; 974 su_offset = do_div(t, su); 975 *oxoff = su_offset + (stripeno % su_per_object) * su; 976 977 /* 978 * Calculate the length of the extent being written to the selected 979 * object. This is the minimum of the full length requested (plen) or 980 * the remainder of the current stripe being written to. 981 */ 982 *oxlen = min_t(u64, *plen, su - su_offset); 983 *plen = *oxlen; 984 985 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 986 } 987 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 988 989 /* 990 * calculate an object layout (i.e. pgid) from an oid, 991 * file_layout, and osdmap 992 */ 993 int ceph_calc_object_layout(struct ceph_object_layout *ol, 994 const char *oid, 995 struct ceph_file_layout *fl, 996 struct ceph_osdmap *osdmap) 997 { 998 unsigned int num, num_mask; 999 struct ceph_pg pgid; 1000 int poolid = le32_to_cpu(fl->fl_pg_pool); 1001 struct ceph_pg_pool_info *pool; 1002 unsigned int ps; 1003 1004 BUG_ON(!osdmap); 1005 1006 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1007 if (!pool) 1008 return -EIO; 1009 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); 1010 num = le32_to_cpu(pool->v.pg_num); 1011 num_mask = pool->pg_num_mask; 1012 1013 pgid.ps = cpu_to_le16(ps); 1014 pgid.preferred = cpu_to_le16(-1); 1015 pgid.pool = fl->fl_pg_pool; 1016 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps); 1017 1018 ol->ol_pgid = pgid; 1019 ol->ol_stripe_unit = fl->fl_object_stripe_unit; 1020 return 0; 1021 } 1022 EXPORT_SYMBOL(ceph_calc_object_layout); 1023 1024 /* 1025 * Calculate raw osd vector for the given pgid. Return pointer to osd 1026 * array, or NULL on failure. 1027 */ 1028 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1029 int *osds, int *num) 1030 { 1031 struct ceph_pg_mapping *pg; 1032 struct ceph_pg_pool_info *pool; 1033 int ruleno; 1034 unsigned int poolid, ps, pps, t, r; 1035 1036 poolid = le32_to_cpu(pgid.pool); 1037 ps = le16_to_cpu(pgid.ps); 1038 1039 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1040 if (!pool) 1041 return NULL; 1042 1043 /* pg_temp? */ 1044 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), 1045 pool->pgp_num_mask); 1046 pgid.ps = cpu_to_le16(t); 1047 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1048 if (pg) { 1049 *num = pg->len; 1050 return pg->osds; 1051 } 1052 1053 /* crush */ 1054 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1055 pool->v.type, pool->v.size); 1056 if (ruleno < 0) { 1057 pr_err("no crush rule pool %d ruleset %d type %d size %d\n", 1058 poolid, pool->v.crush_ruleset, pool->v.type, 1059 pool->v.size); 1060 return NULL; 1061 } 1062 1063 pps = ceph_stable_mod(ps, 1064 le32_to_cpu(pool->v.pgp_num), 1065 pool->pgp_num_mask); 1066 pps += poolid; 1067 r = crush_do_rule(osdmap->crush, ruleno, pps, osds, 1068 min_t(int, pool->v.size, *num), 1069 osdmap->osd_weight); 1070 if (r < 0) { 1071 pr_err("error %d from crush rule: pool %d ruleset %d type %d" 1072 " size %d\n", r, poolid, pool->v.crush_ruleset, 1073 pool->v.type, pool->v.size); 1074 return NULL; 1075 } 1076 *num = r; 1077 return osds; 1078 } 1079 1080 /* 1081 * Return acting set for given pgid. 1082 */ 1083 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1084 int *acting) 1085 { 1086 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1087 int i, o, num = CEPH_PG_MAX_SIZE; 1088 1089 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1090 if (!osds) 1091 return -1; 1092 1093 /* primary is first up osd */ 1094 o = 0; 1095 for (i = 0; i < num; i++) 1096 if (ceph_osd_is_up(osdmap, osds[i])) 1097 acting[o++] = osds[i]; 1098 return o; 1099 } 1100 1101 /* 1102 * Return primary osd for given pgid, or -1 if none. 1103 */ 1104 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) 1105 { 1106 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1107 int i, num = CEPH_PG_MAX_SIZE; 1108 1109 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1110 if (!osds) 1111 return -1; 1112 1113 /* primary is first up osd */ 1114 for (i = 0; i < num; i++) 1115 if (ceph_osd_is_up(osdmap, osds[i])) 1116 return osds[i]; 1117 return -1; 1118 } 1119 EXPORT_SYMBOL(ceph_calc_pg_primary); 1120