1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/err.h> 5 #include <linux/highmem.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/slab.h> 9 #include <linux/uaccess.h> 10 #ifdef CONFIG_BLOCK 11 #include <linux/bio.h> 12 #endif 13 14 #include <linux/ceph/libceph.h> 15 #include <linux/ceph/osd_client.h> 16 #include <linux/ceph/messenger.h> 17 #include <linux/ceph/decode.h> 18 #include <linux/ceph/auth.h> 19 #include <linux/ceph/pagelist.h> 20 21 #define OSD_OP_FRONT_LEN 4096 22 #define OSD_OPREPLY_FRONT_LEN 512 23 24 static const struct ceph_connection_operations osd_con_ops; 25 26 static void send_queued(struct ceph_osd_client *osdc); 27 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); 28 static void __register_request(struct ceph_osd_client *osdc, 29 struct ceph_osd_request *req); 30 static void __unregister_linger_request(struct ceph_osd_client *osdc, 31 struct ceph_osd_request *req); 32 static int __send_request(struct ceph_osd_client *osdc, 33 struct ceph_osd_request *req); 34 35 static int op_needs_trail(int op) 36 { 37 switch (op) { 38 case CEPH_OSD_OP_GETXATTR: 39 case CEPH_OSD_OP_SETXATTR: 40 case CEPH_OSD_OP_CMPXATTR: 41 case CEPH_OSD_OP_CALL: 42 case CEPH_OSD_OP_NOTIFY: 43 return 1; 44 default: 45 return 0; 46 } 47 } 48 49 static int op_has_extent(int op) 50 { 51 return (op == CEPH_OSD_OP_READ || 52 op == CEPH_OSD_OP_WRITE); 53 } 54 55 void ceph_calc_raw_layout(struct ceph_osd_client *osdc, 56 struct ceph_file_layout *layout, 57 u64 snapid, 58 u64 off, u64 *plen, u64 *bno, 59 struct ceph_osd_request *req, 60 struct ceph_osd_req_op *op) 61 { 62 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; 63 u64 orig_len = *plen; 64 u64 objoff, objlen; /* extent in object */ 65 66 reqhead->snapid = cpu_to_le64(snapid); 67 68 /* object extent? */ 69 ceph_calc_file_object_mapping(layout, off, plen, bno, 70 &objoff, &objlen); 71 if (*plen < orig_len) 72 dout(" skipping last %llu, final file extent %llu~%llu\n", 73 orig_len - *plen, off, *plen); 74 75 if (op_has_extent(op->op)) { 76 op->extent.offset = objoff; 77 op->extent.length = objlen; 78 } 79 req->r_num_pages = calc_pages_for(off, *plen); 80 req->r_page_alignment = off & ~PAGE_MASK; 81 if (op->op == CEPH_OSD_OP_WRITE) 82 op->payload_len = *plen; 83 84 dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", 85 *bno, objoff, objlen, req->r_num_pages); 86 87 } 88 EXPORT_SYMBOL(ceph_calc_raw_layout); 89 90 /* 91 * Implement client access to distributed object storage cluster. 92 * 93 * All data objects are stored within a cluster/cloud of OSDs, or 94 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 95 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 96 * remote daemons serving up and coordinating consistent and safe 97 * access to storage. 98 * 99 * Cluster membership and the mapping of data objects onto storage devices 100 * are described by the osd map. 101 * 102 * We keep track of pending OSD requests (read, write), resubmit 103 * requests to different OSDs when the cluster topology/data layout 104 * change, or retry the affected requests when the communications 105 * channel with an OSD is reset. 106 */ 107 108 /* 109 * calculate the mapping of a file extent onto an object, and fill out the 110 * request accordingly. shorten extent as necessary if it crosses an 111 * object boundary. 112 * 113 * fill osd op in request message. 114 */ 115 static void calc_layout(struct ceph_osd_client *osdc, 116 struct ceph_vino vino, 117 struct ceph_file_layout *layout, 118 u64 off, u64 *plen, 119 struct ceph_osd_request *req, 120 struct ceph_osd_req_op *op) 121 { 122 u64 bno; 123 124 ceph_calc_raw_layout(osdc, layout, vino.snap, off, 125 plen, &bno, req, op); 126 127 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); 128 req->r_oid_len = strlen(req->r_oid); 129 } 130 131 /* 132 * requests 133 */ 134 void ceph_osdc_release_request(struct kref *kref) 135 { 136 struct ceph_osd_request *req = container_of(kref, 137 struct ceph_osd_request, 138 r_kref); 139 140 if (req->r_request) 141 ceph_msg_put(req->r_request); 142 if (req->r_reply) 143 ceph_msg_put(req->r_reply); 144 if (req->r_con_filling_msg) { 145 dout("release_request revoking pages %p from con %p\n", 146 req->r_pages, req->r_con_filling_msg); 147 ceph_con_revoke_message(req->r_con_filling_msg, 148 req->r_reply); 149 ceph_con_put(req->r_con_filling_msg); 150 } 151 if (req->r_own_pages) 152 ceph_release_page_vector(req->r_pages, 153 req->r_num_pages); 154 #ifdef CONFIG_BLOCK 155 if (req->r_bio) 156 bio_put(req->r_bio); 157 #endif 158 ceph_put_snap_context(req->r_snapc); 159 if (req->r_trail) { 160 ceph_pagelist_release(req->r_trail); 161 kfree(req->r_trail); 162 } 163 if (req->r_mempool) 164 mempool_free(req, req->r_osdc->req_mempool); 165 else 166 kfree(req); 167 } 168 EXPORT_SYMBOL(ceph_osdc_release_request); 169 170 static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail) 171 { 172 int i = 0; 173 174 if (needs_trail) 175 *needs_trail = 0; 176 while (ops[i].op) { 177 if (needs_trail && op_needs_trail(ops[i].op)) 178 *needs_trail = 1; 179 i++; 180 } 181 182 return i; 183 } 184 185 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 186 int flags, 187 struct ceph_snap_context *snapc, 188 struct ceph_osd_req_op *ops, 189 bool use_mempool, 190 gfp_t gfp_flags, 191 struct page **pages, 192 struct bio *bio) 193 { 194 struct ceph_osd_request *req; 195 struct ceph_msg *msg; 196 int needs_trail; 197 int num_op = get_num_ops(ops, &needs_trail); 198 size_t msg_size = sizeof(struct ceph_osd_request_head); 199 200 msg_size += num_op*sizeof(struct ceph_osd_op); 201 202 if (use_mempool) { 203 req = mempool_alloc(osdc->req_mempool, gfp_flags); 204 memset(req, 0, sizeof(*req)); 205 } else { 206 req = kzalloc(sizeof(*req), gfp_flags); 207 } 208 if (req == NULL) 209 return NULL; 210 211 req->r_osdc = osdc; 212 req->r_mempool = use_mempool; 213 214 kref_init(&req->r_kref); 215 init_completion(&req->r_completion); 216 init_completion(&req->r_safe_completion); 217 INIT_LIST_HEAD(&req->r_unsafe_item); 218 INIT_LIST_HEAD(&req->r_linger_item); 219 INIT_LIST_HEAD(&req->r_linger_osd); 220 req->r_flags = flags; 221 222 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 223 224 /* create reply message */ 225 if (use_mempool) 226 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 227 else 228 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 229 OSD_OPREPLY_FRONT_LEN, gfp_flags); 230 if (!msg) { 231 ceph_osdc_put_request(req); 232 return NULL; 233 } 234 req->r_reply = msg; 235 236 /* allocate space for the trailing data */ 237 if (needs_trail) { 238 req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags); 239 if (!req->r_trail) { 240 ceph_osdc_put_request(req); 241 return NULL; 242 } 243 ceph_pagelist_init(req->r_trail); 244 } 245 /* create request message; allow space for oid */ 246 msg_size += 40; 247 if (snapc) 248 msg_size += sizeof(u64) * snapc->num_snaps; 249 if (use_mempool) 250 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 251 else 252 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags); 253 if (!msg) { 254 ceph_osdc_put_request(req); 255 return NULL; 256 } 257 258 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); 259 memset(msg->front.iov_base, 0, msg->front.iov_len); 260 261 req->r_request = msg; 262 req->r_pages = pages; 263 #ifdef CONFIG_BLOCK 264 if (bio) { 265 req->r_bio = bio; 266 bio_get(req->r_bio); 267 } 268 #endif 269 270 return req; 271 } 272 EXPORT_SYMBOL(ceph_osdc_alloc_request); 273 274 static void osd_req_encode_op(struct ceph_osd_request *req, 275 struct ceph_osd_op *dst, 276 struct ceph_osd_req_op *src) 277 { 278 dst->op = cpu_to_le16(src->op); 279 280 switch (dst->op) { 281 case CEPH_OSD_OP_READ: 282 case CEPH_OSD_OP_WRITE: 283 dst->extent.offset = 284 cpu_to_le64(src->extent.offset); 285 dst->extent.length = 286 cpu_to_le64(src->extent.length); 287 dst->extent.truncate_size = 288 cpu_to_le64(src->extent.truncate_size); 289 dst->extent.truncate_seq = 290 cpu_to_le32(src->extent.truncate_seq); 291 break; 292 293 case CEPH_OSD_OP_GETXATTR: 294 case CEPH_OSD_OP_SETXATTR: 295 case CEPH_OSD_OP_CMPXATTR: 296 BUG_ON(!req->r_trail); 297 298 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 299 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 300 dst->xattr.cmp_op = src->xattr.cmp_op; 301 dst->xattr.cmp_mode = src->xattr.cmp_mode; 302 ceph_pagelist_append(req->r_trail, src->xattr.name, 303 src->xattr.name_len); 304 ceph_pagelist_append(req->r_trail, src->xattr.val, 305 src->xattr.value_len); 306 break; 307 case CEPH_OSD_OP_CALL: 308 BUG_ON(!req->r_trail); 309 310 dst->cls.class_len = src->cls.class_len; 311 dst->cls.method_len = src->cls.method_len; 312 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 313 314 ceph_pagelist_append(req->r_trail, src->cls.class_name, 315 src->cls.class_len); 316 ceph_pagelist_append(req->r_trail, src->cls.method_name, 317 src->cls.method_len); 318 ceph_pagelist_append(req->r_trail, src->cls.indata, 319 src->cls.indata_len); 320 break; 321 case CEPH_OSD_OP_ROLLBACK: 322 dst->snap.snapid = cpu_to_le64(src->snap.snapid); 323 break; 324 case CEPH_OSD_OP_STARTSYNC: 325 break; 326 case CEPH_OSD_OP_NOTIFY: 327 { 328 __le32 prot_ver = cpu_to_le32(src->watch.prot_ver); 329 __le32 timeout = cpu_to_le32(src->watch.timeout); 330 331 BUG_ON(!req->r_trail); 332 333 ceph_pagelist_append(req->r_trail, 334 &prot_ver, sizeof(prot_ver)); 335 ceph_pagelist_append(req->r_trail, 336 &timeout, sizeof(timeout)); 337 } 338 case CEPH_OSD_OP_NOTIFY_ACK: 339 case CEPH_OSD_OP_WATCH: 340 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 341 dst->watch.ver = cpu_to_le64(src->watch.ver); 342 dst->watch.flag = src->watch.flag; 343 break; 344 default: 345 pr_err("unrecognized osd opcode %d\n", dst->op); 346 WARN_ON(1); 347 break; 348 } 349 dst->payload_len = cpu_to_le32(src->payload_len); 350 } 351 352 /* 353 * build new request AND message 354 * 355 */ 356 void ceph_osdc_build_request(struct ceph_osd_request *req, 357 u64 off, u64 *plen, 358 struct ceph_osd_req_op *src_ops, 359 struct ceph_snap_context *snapc, 360 struct timespec *mtime, 361 const char *oid, 362 int oid_len) 363 { 364 struct ceph_msg *msg = req->r_request; 365 struct ceph_osd_request_head *head; 366 struct ceph_osd_req_op *src_op; 367 struct ceph_osd_op *op; 368 void *p; 369 int num_op = get_num_ops(src_ops, NULL); 370 size_t msg_size = sizeof(*head) + num_op*sizeof(*op); 371 int flags = req->r_flags; 372 u64 data_len = 0; 373 int i; 374 375 head = msg->front.iov_base; 376 op = (void *)(head + 1); 377 p = (void *)(op + num_op); 378 379 req->r_snapc = ceph_get_snap_context(snapc); 380 381 head->client_inc = cpu_to_le32(1); /* always, for now. */ 382 head->flags = cpu_to_le32(flags); 383 if (flags & CEPH_OSD_FLAG_WRITE) 384 ceph_encode_timespec(&head->mtime, mtime); 385 head->num_ops = cpu_to_le16(num_op); 386 387 388 /* fill in oid */ 389 head->object_len = cpu_to_le32(oid_len); 390 memcpy(p, oid, oid_len); 391 p += oid_len; 392 393 src_op = src_ops; 394 while (src_op->op) { 395 osd_req_encode_op(req, op, src_op); 396 src_op++; 397 op++; 398 } 399 400 if (req->r_trail) 401 data_len += req->r_trail->length; 402 403 if (snapc) { 404 head->snap_seq = cpu_to_le64(snapc->seq); 405 head->num_snaps = cpu_to_le32(snapc->num_snaps); 406 for (i = 0; i < snapc->num_snaps; i++) { 407 put_unaligned_le64(snapc->snaps[i], p); 408 p += sizeof(u64); 409 } 410 } 411 412 if (flags & CEPH_OSD_FLAG_WRITE) { 413 req->r_request->hdr.data_off = cpu_to_le16(off); 414 req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len); 415 } else if (data_len) { 416 req->r_request->hdr.data_off = 0; 417 req->r_request->hdr.data_len = cpu_to_le32(data_len); 418 } 419 420 req->r_request->page_alignment = req->r_page_alignment; 421 422 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 423 msg_size = p - msg->front.iov_base; 424 msg->front.iov_len = msg_size; 425 msg->hdr.front_len = cpu_to_le32(msg_size); 426 return; 427 } 428 EXPORT_SYMBOL(ceph_osdc_build_request); 429 430 /* 431 * build new request AND message, calculate layout, and adjust file 432 * extent as needed. 433 * 434 * if the file was recently truncated, we include information about its 435 * old and new size so that the object can be updated appropriately. (we 436 * avoid synchronously deleting truncated objects because it's slow.) 437 * 438 * if @do_sync, include a 'startsync' command so that the osd will flush 439 * data quickly. 440 */ 441 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 442 struct ceph_file_layout *layout, 443 struct ceph_vino vino, 444 u64 off, u64 *plen, 445 int opcode, int flags, 446 struct ceph_snap_context *snapc, 447 int do_sync, 448 u32 truncate_seq, 449 u64 truncate_size, 450 struct timespec *mtime, 451 bool use_mempool, int num_reply, 452 int page_align) 453 { 454 struct ceph_osd_req_op ops[3]; 455 struct ceph_osd_request *req; 456 457 ops[0].op = opcode; 458 ops[0].extent.truncate_seq = truncate_seq; 459 ops[0].extent.truncate_size = truncate_size; 460 ops[0].payload_len = 0; 461 462 if (do_sync) { 463 ops[1].op = CEPH_OSD_OP_STARTSYNC; 464 ops[1].payload_len = 0; 465 ops[2].op = 0; 466 } else 467 ops[1].op = 0; 468 469 req = ceph_osdc_alloc_request(osdc, flags, 470 snapc, ops, 471 use_mempool, 472 GFP_NOFS, NULL, NULL); 473 if (!req) 474 return NULL; 475 476 /* calculate max write size */ 477 calc_layout(osdc, vino, layout, off, plen, req, ops); 478 req->r_file_layout = *layout; /* keep a copy */ 479 480 /* in case it differs from natural alignment that calc_layout 481 filled in for us */ 482 req->r_page_alignment = page_align; 483 484 ceph_osdc_build_request(req, off, plen, ops, 485 snapc, 486 mtime, 487 req->r_oid, req->r_oid_len); 488 489 return req; 490 } 491 EXPORT_SYMBOL(ceph_osdc_new_request); 492 493 /* 494 * We keep osd requests in an rbtree, sorted by ->r_tid. 495 */ 496 static void __insert_request(struct ceph_osd_client *osdc, 497 struct ceph_osd_request *new) 498 { 499 struct rb_node **p = &osdc->requests.rb_node; 500 struct rb_node *parent = NULL; 501 struct ceph_osd_request *req = NULL; 502 503 while (*p) { 504 parent = *p; 505 req = rb_entry(parent, struct ceph_osd_request, r_node); 506 if (new->r_tid < req->r_tid) 507 p = &(*p)->rb_left; 508 else if (new->r_tid > req->r_tid) 509 p = &(*p)->rb_right; 510 else 511 BUG(); 512 } 513 514 rb_link_node(&new->r_node, parent, p); 515 rb_insert_color(&new->r_node, &osdc->requests); 516 } 517 518 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, 519 u64 tid) 520 { 521 struct ceph_osd_request *req; 522 struct rb_node *n = osdc->requests.rb_node; 523 524 while (n) { 525 req = rb_entry(n, struct ceph_osd_request, r_node); 526 if (tid < req->r_tid) 527 n = n->rb_left; 528 else if (tid > req->r_tid) 529 n = n->rb_right; 530 else 531 return req; 532 } 533 return NULL; 534 } 535 536 static struct ceph_osd_request * 537 __lookup_request_ge(struct ceph_osd_client *osdc, 538 u64 tid) 539 { 540 struct ceph_osd_request *req; 541 struct rb_node *n = osdc->requests.rb_node; 542 543 while (n) { 544 req = rb_entry(n, struct ceph_osd_request, r_node); 545 if (tid < req->r_tid) { 546 if (!n->rb_left) 547 return req; 548 n = n->rb_left; 549 } else if (tid > req->r_tid) { 550 n = n->rb_right; 551 } else { 552 return req; 553 } 554 } 555 return NULL; 556 } 557 558 /* 559 * Resubmit requests pending on the given osd. 560 */ 561 static void __kick_osd_requests(struct ceph_osd_client *osdc, 562 struct ceph_osd *osd) 563 { 564 struct ceph_osd_request *req, *nreq; 565 int err; 566 567 dout("__kick_osd_requests osd%d\n", osd->o_osd); 568 err = __reset_osd(osdc, osd); 569 if (err == -EAGAIN) 570 return; 571 572 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 573 list_move(&req->r_req_lru_item, &osdc->req_unsent); 574 dout("requeued %p tid %llu osd%d\n", req, req->r_tid, 575 osd->o_osd); 576 if (!req->r_linger) 577 req->r_flags |= CEPH_OSD_FLAG_RETRY; 578 } 579 580 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, 581 r_linger_osd) { 582 /* 583 * reregister request prior to unregistering linger so 584 * that r_osd is preserved. 585 */ 586 BUG_ON(!list_empty(&req->r_req_lru_item)); 587 __register_request(osdc, req); 588 list_add(&req->r_req_lru_item, &osdc->req_unsent); 589 list_add(&req->r_osd_item, &req->r_osd->o_requests); 590 __unregister_linger_request(osdc, req); 591 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, 592 osd->o_osd); 593 } 594 } 595 596 static void kick_osd_requests(struct ceph_osd_client *osdc, 597 struct ceph_osd *kickosd) 598 { 599 mutex_lock(&osdc->request_mutex); 600 __kick_osd_requests(osdc, kickosd); 601 mutex_unlock(&osdc->request_mutex); 602 } 603 604 /* 605 * If the osd connection drops, we need to resubmit all requests. 606 */ 607 static void osd_reset(struct ceph_connection *con) 608 { 609 struct ceph_osd *osd = con->private; 610 struct ceph_osd_client *osdc; 611 612 if (!osd) 613 return; 614 dout("osd_reset osd%d\n", osd->o_osd); 615 osdc = osd->o_osdc; 616 down_read(&osdc->map_sem); 617 kick_osd_requests(osdc, osd); 618 send_queued(osdc); 619 up_read(&osdc->map_sem); 620 } 621 622 /* 623 * Track open sessions with osds. 624 */ 625 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) 626 { 627 struct ceph_osd *osd; 628 629 osd = kzalloc(sizeof(*osd), GFP_NOFS); 630 if (!osd) 631 return NULL; 632 633 atomic_set(&osd->o_ref, 1); 634 osd->o_osdc = osdc; 635 INIT_LIST_HEAD(&osd->o_requests); 636 INIT_LIST_HEAD(&osd->o_linger_requests); 637 INIT_LIST_HEAD(&osd->o_osd_lru); 638 osd->o_incarnation = 1; 639 640 ceph_con_init(osdc->client->msgr, &osd->o_con); 641 osd->o_con.private = osd; 642 osd->o_con.ops = &osd_con_ops; 643 osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; 644 645 INIT_LIST_HEAD(&osd->o_keepalive_item); 646 return osd; 647 } 648 649 static struct ceph_osd *get_osd(struct ceph_osd *osd) 650 { 651 if (atomic_inc_not_zero(&osd->o_ref)) { 652 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, 653 atomic_read(&osd->o_ref)); 654 return osd; 655 } else { 656 dout("get_osd %p FAIL\n", osd); 657 return NULL; 658 } 659 } 660 661 static void put_osd(struct ceph_osd *osd) 662 { 663 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 664 atomic_read(&osd->o_ref) - 1); 665 if (atomic_dec_and_test(&osd->o_ref)) { 666 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 667 668 if (osd->o_authorizer) 669 ac->ops->destroy_authorizer(ac, osd->o_authorizer); 670 kfree(osd); 671 } 672 } 673 674 /* 675 * remove an osd from our map 676 */ 677 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 678 { 679 dout("__remove_osd %p\n", osd); 680 BUG_ON(!list_empty(&osd->o_requests)); 681 rb_erase(&osd->o_node, &osdc->osds); 682 list_del_init(&osd->o_osd_lru); 683 ceph_con_close(&osd->o_con); 684 put_osd(osd); 685 } 686 687 static void __move_osd_to_lru(struct ceph_osd_client *osdc, 688 struct ceph_osd *osd) 689 { 690 dout("__move_osd_to_lru %p\n", osd); 691 BUG_ON(!list_empty(&osd->o_osd_lru)); 692 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 693 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; 694 } 695 696 static void __remove_osd_from_lru(struct ceph_osd *osd) 697 { 698 dout("__remove_osd_from_lru %p\n", osd); 699 if (!list_empty(&osd->o_osd_lru)) 700 list_del_init(&osd->o_osd_lru); 701 } 702 703 static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) 704 { 705 struct ceph_osd *osd, *nosd; 706 707 dout("__remove_old_osds %p\n", osdc); 708 mutex_lock(&osdc->request_mutex); 709 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 710 if (!remove_all && time_before(jiffies, osd->lru_ttl)) 711 break; 712 __remove_osd(osdc, osd); 713 } 714 mutex_unlock(&osdc->request_mutex); 715 } 716 717 /* 718 * reset osd connect 719 */ 720 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 721 { 722 struct ceph_osd_request *req; 723 int ret = 0; 724 725 dout("__reset_osd %p osd%d\n", osd, osd->o_osd); 726 if (list_empty(&osd->o_requests) && 727 list_empty(&osd->o_linger_requests)) { 728 __remove_osd(osdc, osd); 729 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], 730 &osd->o_con.peer_addr, 731 sizeof(osd->o_con.peer_addr)) == 0 && 732 !ceph_con_opened(&osd->o_con)) { 733 dout(" osd addr hasn't changed and connection never opened," 734 " letting msgr retry"); 735 /* touch each r_stamp for handle_timeout()'s benfit */ 736 list_for_each_entry(req, &osd->o_requests, r_osd_item) 737 req->r_stamp = jiffies; 738 ret = -EAGAIN; 739 } else { 740 ceph_con_close(&osd->o_con); 741 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); 742 osd->o_incarnation++; 743 } 744 return ret; 745 } 746 747 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) 748 { 749 struct rb_node **p = &osdc->osds.rb_node; 750 struct rb_node *parent = NULL; 751 struct ceph_osd *osd = NULL; 752 753 while (*p) { 754 parent = *p; 755 osd = rb_entry(parent, struct ceph_osd, o_node); 756 if (new->o_osd < osd->o_osd) 757 p = &(*p)->rb_left; 758 else if (new->o_osd > osd->o_osd) 759 p = &(*p)->rb_right; 760 else 761 BUG(); 762 } 763 764 rb_link_node(&new->o_node, parent, p); 765 rb_insert_color(&new->o_node, &osdc->osds); 766 } 767 768 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) 769 { 770 struct ceph_osd *osd; 771 struct rb_node *n = osdc->osds.rb_node; 772 773 while (n) { 774 osd = rb_entry(n, struct ceph_osd, o_node); 775 if (o < osd->o_osd) 776 n = n->rb_left; 777 else if (o > osd->o_osd) 778 n = n->rb_right; 779 else 780 return osd; 781 } 782 return NULL; 783 } 784 785 static void __schedule_osd_timeout(struct ceph_osd_client *osdc) 786 { 787 schedule_delayed_work(&osdc->timeout_work, 788 osdc->client->options->osd_keepalive_timeout * HZ); 789 } 790 791 static void __cancel_osd_timeout(struct ceph_osd_client *osdc) 792 { 793 cancel_delayed_work(&osdc->timeout_work); 794 } 795 796 /* 797 * Register request, assign tid. If this is the first request, set up 798 * the timeout event. 799 */ 800 static void __register_request(struct ceph_osd_client *osdc, 801 struct ceph_osd_request *req) 802 { 803 req->r_tid = ++osdc->last_tid; 804 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 805 INIT_LIST_HEAD(&req->r_req_lru_item); 806 807 dout("__register_request %p tid %lld\n", req, req->r_tid); 808 __insert_request(osdc, req); 809 ceph_osdc_get_request(req); 810 osdc->num_requests++; 811 812 if (osdc->num_requests == 1) { 813 dout(" first request, scheduling timeout\n"); 814 __schedule_osd_timeout(osdc); 815 } 816 } 817 818 static void register_request(struct ceph_osd_client *osdc, 819 struct ceph_osd_request *req) 820 { 821 mutex_lock(&osdc->request_mutex); 822 __register_request(osdc, req); 823 mutex_unlock(&osdc->request_mutex); 824 } 825 826 /* 827 * called under osdc->request_mutex 828 */ 829 static void __unregister_request(struct ceph_osd_client *osdc, 830 struct ceph_osd_request *req) 831 { 832 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 833 rb_erase(&req->r_node, &osdc->requests); 834 osdc->num_requests--; 835 836 if (req->r_osd) { 837 /* make sure the original request isn't in flight. */ 838 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 839 840 list_del_init(&req->r_osd_item); 841 if (list_empty(&req->r_osd->o_requests) && 842 list_empty(&req->r_osd->o_linger_requests)) { 843 dout("moving osd to %p lru\n", req->r_osd); 844 __move_osd_to_lru(osdc, req->r_osd); 845 } 846 if (list_empty(&req->r_linger_item)) 847 req->r_osd = NULL; 848 } 849 850 ceph_osdc_put_request(req); 851 852 list_del_init(&req->r_req_lru_item); 853 if (osdc->num_requests == 0) { 854 dout(" no requests, canceling timeout\n"); 855 __cancel_osd_timeout(osdc); 856 } 857 } 858 859 /* 860 * Cancel a previously queued request message 861 */ 862 static void __cancel_request(struct ceph_osd_request *req) 863 { 864 if (req->r_sent && req->r_osd) { 865 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 866 req->r_sent = 0; 867 } 868 } 869 870 static void __register_linger_request(struct ceph_osd_client *osdc, 871 struct ceph_osd_request *req) 872 { 873 dout("__register_linger_request %p\n", req); 874 list_add_tail(&req->r_linger_item, &osdc->req_linger); 875 list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests); 876 } 877 878 static void __unregister_linger_request(struct ceph_osd_client *osdc, 879 struct ceph_osd_request *req) 880 { 881 dout("__unregister_linger_request %p\n", req); 882 if (req->r_osd) { 883 list_del_init(&req->r_linger_item); 884 list_del_init(&req->r_linger_osd); 885 886 if (list_empty(&req->r_osd->o_requests) && 887 list_empty(&req->r_osd->o_linger_requests)) { 888 dout("moving osd to %p lru\n", req->r_osd); 889 __move_osd_to_lru(osdc, req->r_osd); 890 } 891 if (list_empty(&req->r_osd_item)) 892 req->r_osd = NULL; 893 } 894 } 895 896 void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, 897 struct ceph_osd_request *req) 898 { 899 mutex_lock(&osdc->request_mutex); 900 if (req->r_linger) { 901 __unregister_linger_request(osdc, req); 902 ceph_osdc_put_request(req); 903 } 904 mutex_unlock(&osdc->request_mutex); 905 } 906 EXPORT_SYMBOL(ceph_osdc_unregister_linger_request); 907 908 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, 909 struct ceph_osd_request *req) 910 { 911 if (!req->r_linger) { 912 dout("set_request_linger %p\n", req); 913 req->r_linger = 1; 914 /* 915 * caller is now responsible for calling 916 * unregister_linger_request 917 */ 918 ceph_osdc_get_request(req); 919 } 920 } 921 EXPORT_SYMBOL(ceph_osdc_set_request_linger); 922 923 /* 924 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct 925 * (as needed), and set the request r_osd appropriately. If there is 926 * no up osd, set r_osd to NULL. Move the request to the appropriate list 927 * (unsent, homeless) or leave on in-flight lru. 928 * 929 * Return 0 if unchanged, 1 if changed, or negative on error. 930 * 931 * Caller should hold map_sem for read and request_mutex. 932 */ 933 static int __map_request(struct ceph_osd_client *osdc, 934 struct ceph_osd_request *req) 935 { 936 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; 937 struct ceph_pg pgid; 938 int acting[CEPH_PG_MAX_SIZE]; 939 int o = -1, num = 0; 940 int err; 941 942 dout("map_request %p tid %lld\n", req, req->r_tid); 943 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, 944 &req->r_file_layout, osdc->osdmap); 945 if (err) { 946 list_move(&req->r_req_lru_item, &osdc->req_notarget); 947 return err; 948 } 949 pgid = reqhead->layout.ol_pgid; 950 req->r_pgid = pgid; 951 952 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); 953 if (err > 0) { 954 o = acting[0]; 955 num = err; 956 } 957 958 if ((req->r_osd && req->r_osd->o_osd == o && 959 req->r_sent >= req->r_osd->o_incarnation && 960 req->r_num_pg_osds == num && 961 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || 962 (req->r_osd == NULL && o == -1)) 963 return 0; /* no change */ 964 965 dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n", 966 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, 967 req->r_osd ? req->r_osd->o_osd : -1); 968 969 /* record full pg acting set */ 970 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); 971 req->r_num_pg_osds = num; 972 973 if (req->r_osd) { 974 __cancel_request(req); 975 list_del_init(&req->r_osd_item); 976 req->r_osd = NULL; 977 } 978 979 req->r_osd = __lookup_osd(osdc, o); 980 if (!req->r_osd && o >= 0) { 981 err = -ENOMEM; 982 req->r_osd = create_osd(osdc); 983 if (!req->r_osd) { 984 list_move(&req->r_req_lru_item, &osdc->req_notarget); 985 goto out; 986 } 987 988 dout("map_request osd %p is osd%d\n", req->r_osd, o); 989 req->r_osd->o_osd = o; 990 req->r_osd->o_con.peer_name.num = cpu_to_le64(o); 991 __insert_osd(osdc, req->r_osd); 992 993 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); 994 } 995 996 if (req->r_osd) { 997 __remove_osd_from_lru(req->r_osd); 998 list_add(&req->r_osd_item, &req->r_osd->o_requests); 999 list_move(&req->r_req_lru_item, &osdc->req_unsent); 1000 } else { 1001 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1002 } 1003 err = 1; /* osd or pg changed */ 1004 1005 out: 1006 return err; 1007 } 1008 1009 /* 1010 * caller should hold map_sem (for read) and request_mutex 1011 */ 1012 static int __send_request(struct ceph_osd_client *osdc, 1013 struct ceph_osd_request *req) 1014 { 1015 struct ceph_osd_request_head *reqhead; 1016 1017 dout("send_request %p tid %llu to osd%d flags %d\n", 1018 req, req->r_tid, req->r_osd->o_osd, req->r_flags); 1019 1020 reqhead = req->r_request->front.iov_base; 1021 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch); 1022 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ 1023 reqhead->reassert_version = req->r_reassert_version; 1024 1025 req->r_stamp = jiffies; 1026 list_move_tail(&req->r_req_lru_item, &osdc->req_lru); 1027 1028 ceph_msg_get(req->r_request); /* send consumes a ref */ 1029 ceph_con_send(&req->r_osd->o_con, req->r_request); 1030 req->r_sent = req->r_osd->o_incarnation; 1031 return 0; 1032 } 1033 1034 /* 1035 * Send any requests in the queue (req_unsent). 1036 */ 1037 static void send_queued(struct ceph_osd_client *osdc) 1038 { 1039 struct ceph_osd_request *req, *tmp; 1040 1041 dout("send_queued\n"); 1042 mutex_lock(&osdc->request_mutex); 1043 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) { 1044 __send_request(osdc, req); 1045 } 1046 mutex_unlock(&osdc->request_mutex); 1047 } 1048 1049 /* 1050 * Timeout callback, called every N seconds when 1 or more osd 1051 * requests has been active for more than N seconds. When this 1052 * happens, we ping all OSDs with requests who have timed out to 1053 * ensure any communications channel reset is detected. Reset the 1054 * request timeouts another N seconds in the future as we go. 1055 * Reschedule the timeout event another N seconds in future (unless 1056 * there are no open requests). 1057 */ 1058 static void handle_timeout(struct work_struct *work) 1059 { 1060 struct ceph_osd_client *osdc = 1061 container_of(work, struct ceph_osd_client, timeout_work.work); 1062 struct ceph_osd_request *req, *last_req = NULL; 1063 struct ceph_osd *osd; 1064 unsigned long timeout = osdc->client->options->osd_timeout * HZ; 1065 unsigned long keepalive = 1066 osdc->client->options->osd_keepalive_timeout * HZ; 1067 unsigned long last_stamp = 0; 1068 struct list_head slow_osds; 1069 dout("timeout\n"); 1070 down_read(&osdc->map_sem); 1071 1072 ceph_monc_request_next_osdmap(&osdc->client->monc); 1073 1074 mutex_lock(&osdc->request_mutex); 1075 1076 /* 1077 * reset osds that appear to be _really_ unresponsive. this 1078 * is a failsafe measure.. we really shouldn't be getting to 1079 * this point if the system is working properly. the monitors 1080 * should mark the osd as failed and we should find out about 1081 * it from an updated osd map. 1082 */ 1083 while (timeout && !list_empty(&osdc->req_lru)) { 1084 req = list_entry(osdc->req_lru.next, struct ceph_osd_request, 1085 r_req_lru_item); 1086 1087 if (time_before(jiffies, req->r_stamp + timeout)) 1088 break; 1089 1090 BUG_ON(req == last_req && req->r_stamp == last_stamp); 1091 last_req = req; 1092 last_stamp = req->r_stamp; 1093 1094 osd = req->r_osd; 1095 BUG_ON(!osd); 1096 pr_warning(" tid %llu timed out on osd%d, will reset osd\n", 1097 req->r_tid, osd->o_osd); 1098 __kick_osd_requests(osdc, osd); 1099 } 1100 1101 /* 1102 * ping osds that are a bit slow. this ensures that if there 1103 * is a break in the TCP connection we will notice, and reopen 1104 * a connection with that osd (from the fault callback). 1105 */ 1106 INIT_LIST_HEAD(&slow_osds); 1107 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { 1108 if (time_before(jiffies, req->r_stamp + keepalive)) 1109 break; 1110 1111 osd = req->r_osd; 1112 BUG_ON(!osd); 1113 dout(" tid %llu is slow, will send keepalive on osd%d\n", 1114 req->r_tid, osd->o_osd); 1115 list_move_tail(&osd->o_keepalive_item, &slow_osds); 1116 } 1117 while (!list_empty(&slow_osds)) { 1118 osd = list_entry(slow_osds.next, struct ceph_osd, 1119 o_keepalive_item); 1120 list_del_init(&osd->o_keepalive_item); 1121 ceph_con_keepalive(&osd->o_con); 1122 } 1123 1124 __schedule_osd_timeout(osdc); 1125 mutex_unlock(&osdc->request_mutex); 1126 send_queued(osdc); 1127 up_read(&osdc->map_sem); 1128 } 1129 1130 static void handle_osds_timeout(struct work_struct *work) 1131 { 1132 struct ceph_osd_client *osdc = 1133 container_of(work, struct ceph_osd_client, 1134 osds_timeout_work.work); 1135 unsigned long delay = 1136 osdc->client->options->osd_idle_ttl * HZ >> 2; 1137 1138 dout("osds timeout\n"); 1139 down_read(&osdc->map_sem); 1140 remove_old_osds(osdc, 0); 1141 up_read(&osdc->map_sem); 1142 1143 schedule_delayed_work(&osdc->osds_timeout_work, 1144 round_jiffies_relative(delay)); 1145 } 1146 1147 /* 1148 * handle osd op reply. either call the callback if it is specified, 1149 * or do the completion to wake up the waiting thread. 1150 */ 1151 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, 1152 struct ceph_connection *con) 1153 { 1154 struct ceph_osd_reply_head *rhead = msg->front.iov_base; 1155 struct ceph_osd_request *req; 1156 u64 tid; 1157 int numops, object_len, flags; 1158 s32 result; 1159 1160 tid = le64_to_cpu(msg->hdr.tid); 1161 if (msg->front.iov_len < sizeof(*rhead)) 1162 goto bad; 1163 numops = le32_to_cpu(rhead->num_ops); 1164 object_len = le32_to_cpu(rhead->object_len); 1165 result = le32_to_cpu(rhead->result); 1166 if (msg->front.iov_len != sizeof(*rhead) + object_len + 1167 numops * sizeof(struct ceph_osd_op)) 1168 goto bad; 1169 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result); 1170 /* lookup */ 1171 mutex_lock(&osdc->request_mutex); 1172 req = __lookup_request(osdc, tid); 1173 if (req == NULL) { 1174 dout("handle_reply tid %llu dne\n", tid); 1175 mutex_unlock(&osdc->request_mutex); 1176 return; 1177 } 1178 ceph_osdc_get_request(req); 1179 flags = le32_to_cpu(rhead->flags); 1180 1181 /* 1182 * if this connection filled our message, drop our reference now, to 1183 * avoid a (safe but slower) revoke later. 1184 */ 1185 if (req->r_con_filling_msg == con && req->r_reply == msg) { 1186 dout(" dropping con_filling_msg ref %p\n", con); 1187 req->r_con_filling_msg = NULL; 1188 ceph_con_put(con); 1189 } 1190 1191 if (!req->r_got_reply) { 1192 unsigned bytes; 1193 1194 req->r_result = le32_to_cpu(rhead->result); 1195 bytes = le32_to_cpu(msg->hdr.data_len); 1196 dout("handle_reply result %d bytes %d\n", req->r_result, 1197 bytes); 1198 if (req->r_result == 0) 1199 req->r_result = bytes; 1200 1201 /* in case this is a write and we need to replay, */ 1202 req->r_reassert_version = rhead->reassert_version; 1203 1204 req->r_got_reply = 1; 1205 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { 1206 dout("handle_reply tid %llu dup ack\n", tid); 1207 mutex_unlock(&osdc->request_mutex); 1208 goto done; 1209 } 1210 1211 dout("handle_reply tid %llu flags %d\n", tid, flags); 1212 1213 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) 1214 __register_linger_request(osdc, req); 1215 1216 /* either this is a read, or we got the safe response */ 1217 if (result < 0 || 1218 (flags & CEPH_OSD_FLAG_ONDISK) || 1219 ((flags & CEPH_OSD_FLAG_WRITE) == 0)) 1220 __unregister_request(osdc, req); 1221 1222 mutex_unlock(&osdc->request_mutex); 1223 1224 if (req->r_callback) 1225 req->r_callback(req, msg); 1226 else 1227 complete_all(&req->r_completion); 1228 1229 if (flags & CEPH_OSD_FLAG_ONDISK) { 1230 if (req->r_safe_callback) 1231 req->r_safe_callback(req, msg); 1232 complete_all(&req->r_safe_completion); /* fsync waiter */ 1233 } 1234 1235 done: 1236 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1237 ceph_osdc_put_request(req); 1238 return; 1239 1240 bad: 1241 pr_err("corrupt osd_op_reply got %d %d expected %d\n", 1242 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), 1243 (int)sizeof(*rhead)); 1244 ceph_msg_dump(msg); 1245 } 1246 1247 static void reset_changed_osds(struct ceph_osd_client *osdc) 1248 { 1249 struct rb_node *p, *n; 1250 1251 for (p = rb_first(&osdc->osds); p; p = n) { 1252 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); 1253 1254 n = rb_next(p); 1255 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 1256 memcmp(&osd->o_con.peer_addr, 1257 ceph_osd_addr(osdc->osdmap, 1258 osd->o_osd), 1259 sizeof(struct ceph_entity_addr)) != 0) 1260 __reset_osd(osdc, osd); 1261 } 1262 } 1263 1264 /* 1265 * Requeue requests whose mapping to an OSD has changed. If requests map to 1266 * no osd, request a new map. 1267 * 1268 * Caller should hold map_sem for read and request_mutex. 1269 */ 1270 static void kick_requests(struct ceph_osd_client *osdc) 1271 { 1272 struct ceph_osd_request *req, *nreq; 1273 struct rb_node *p; 1274 int needmap = 0; 1275 int err; 1276 1277 dout("kick_requests\n"); 1278 mutex_lock(&osdc->request_mutex); 1279 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 1280 req = rb_entry(p, struct ceph_osd_request, r_node); 1281 err = __map_request(osdc, req); 1282 if (err < 0) 1283 continue; /* error */ 1284 if (req->r_osd == NULL) { 1285 dout("%p tid %llu maps to no osd\n", req, req->r_tid); 1286 needmap++; /* request a newer map */ 1287 } else if (err > 0) { 1288 dout("%p tid %llu requeued on osd%d\n", req, req->r_tid, 1289 req->r_osd ? req->r_osd->o_osd : -1); 1290 if (!req->r_linger) 1291 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1292 } 1293 } 1294 1295 list_for_each_entry_safe(req, nreq, &osdc->req_linger, 1296 r_linger_item) { 1297 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1298 1299 err = __map_request(osdc, req); 1300 if (err == 0) 1301 continue; /* no change and no osd was specified */ 1302 if (err < 0) 1303 continue; /* hrm! */ 1304 if (req->r_osd == NULL) { 1305 dout("tid %llu maps to no valid osd\n", req->r_tid); 1306 needmap++; /* request a newer map */ 1307 continue; 1308 } 1309 1310 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 1311 req->r_osd ? req->r_osd->o_osd : -1); 1312 __unregister_linger_request(osdc, req); 1313 __register_request(osdc, req); 1314 } 1315 mutex_unlock(&osdc->request_mutex); 1316 1317 if (needmap) { 1318 dout("%d requests for down osds, need new map\n", needmap); 1319 ceph_monc_request_next_osdmap(&osdc->client->monc); 1320 } 1321 } 1322 1323 1324 /* 1325 * Process updated osd map. 1326 * 1327 * The message contains any number of incremental and full maps, normally 1328 * indicating some sort of topology change in the cluster. Kick requests 1329 * off to different OSDs as needed. 1330 */ 1331 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 1332 { 1333 void *p, *end, *next; 1334 u32 nr_maps, maplen; 1335 u32 epoch; 1336 struct ceph_osdmap *newmap = NULL, *oldmap; 1337 int err; 1338 struct ceph_fsid fsid; 1339 1340 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); 1341 p = msg->front.iov_base; 1342 end = p + msg->front.iov_len; 1343 1344 /* verify fsid */ 1345 ceph_decode_need(&p, end, sizeof(fsid), bad); 1346 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 1347 if (ceph_check_fsid(osdc->client, &fsid) < 0) 1348 return; 1349 1350 down_write(&osdc->map_sem); 1351 1352 /* incremental maps */ 1353 ceph_decode_32_safe(&p, end, nr_maps, bad); 1354 dout(" %d inc maps\n", nr_maps); 1355 while (nr_maps > 0) { 1356 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 1357 epoch = ceph_decode_32(&p); 1358 maplen = ceph_decode_32(&p); 1359 ceph_decode_need(&p, end, maplen, bad); 1360 next = p + maplen; 1361 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { 1362 dout("applying incremental map %u len %d\n", 1363 epoch, maplen); 1364 newmap = osdmap_apply_incremental(&p, next, 1365 osdc->osdmap, 1366 osdc->client->msgr); 1367 if (IS_ERR(newmap)) { 1368 err = PTR_ERR(newmap); 1369 goto bad; 1370 } 1371 BUG_ON(!newmap); 1372 if (newmap != osdc->osdmap) { 1373 ceph_osdmap_destroy(osdc->osdmap); 1374 osdc->osdmap = newmap; 1375 } 1376 kick_requests(osdc); 1377 reset_changed_osds(osdc); 1378 } else { 1379 dout("ignoring incremental map %u len %d\n", 1380 epoch, maplen); 1381 } 1382 p = next; 1383 nr_maps--; 1384 } 1385 if (newmap) 1386 goto done; 1387 1388 /* full maps */ 1389 ceph_decode_32_safe(&p, end, nr_maps, bad); 1390 dout(" %d full maps\n", nr_maps); 1391 while (nr_maps) { 1392 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 1393 epoch = ceph_decode_32(&p); 1394 maplen = ceph_decode_32(&p); 1395 ceph_decode_need(&p, end, maplen, bad); 1396 if (nr_maps > 1) { 1397 dout("skipping non-latest full map %u len %d\n", 1398 epoch, maplen); 1399 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { 1400 dout("skipping full map %u len %d, " 1401 "older than our %u\n", epoch, maplen, 1402 osdc->osdmap->epoch); 1403 } else { 1404 dout("taking full map %u len %d\n", epoch, maplen); 1405 newmap = osdmap_decode(&p, p+maplen); 1406 if (IS_ERR(newmap)) { 1407 err = PTR_ERR(newmap); 1408 goto bad; 1409 } 1410 BUG_ON(!newmap); 1411 oldmap = osdc->osdmap; 1412 osdc->osdmap = newmap; 1413 if (oldmap) 1414 ceph_osdmap_destroy(oldmap); 1415 kick_requests(osdc); 1416 } 1417 p += maplen; 1418 nr_maps--; 1419 } 1420 1421 done: 1422 downgrade_write(&osdc->map_sem); 1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1424 1425 /* 1426 * subscribe to subsequent osdmap updates if full to ensure 1427 * we find out when we are no longer full and stop returning 1428 * ENOSPC. 1429 */ 1430 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) 1431 ceph_monc_request_next_osdmap(&osdc->client->monc); 1432 1433 send_queued(osdc); 1434 up_read(&osdc->map_sem); 1435 wake_up_all(&osdc->client->auth_wq); 1436 return; 1437 1438 bad: 1439 pr_err("osdc handle_map corrupt msg\n"); 1440 ceph_msg_dump(msg); 1441 up_write(&osdc->map_sem); 1442 return; 1443 } 1444 1445 /* 1446 * watch/notify callback event infrastructure 1447 * 1448 * These callbacks are used both for watch and notify operations. 1449 */ 1450 static void __release_event(struct kref *kref) 1451 { 1452 struct ceph_osd_event *event = 1453 container_of(kref, struct ceph_osd_event, kref); 1454 1455 dout("__release_event %p\n", event); 1456 kfree(event); 1457 } 1458 1459 static void get_event(struct ceph_osd_event *event) 1460 { 1461 kref_get(&event->kref); 1462 } 1463 1464 void ceph_osdc_put_event(struct ceph_osd_event *event) 1465 { 1466 kref_put(&event->kref, __release_event); 1467 } 1468 EXPORT_SYMBOL(ceph_osdc_put_event); 1469 1470 static void __insert_event(struct ceph_osd_client *osdc, 1471 struct ceph_osd_event *new) 1472 { 1473 struct rb_node **p = &osdc->event_tree.rb_node; 1474 struct rb_node *parent = NULL; 1475 struct ceph_osd_event *event = NULL; 1476 1477 while (*p) { 1478 parent = *p; 1479 event = rb_entry(parent, struct ceph_osd_event, node); 1480 if (new->cookie < event->cookie) 1481 p = &(*p)->rb_left; 1482 else if (new->cookie > event->cookie) 1483 p = &(*p)->rb_right; 1484 else 1485 BUG(); 1486 } 1487 1488 rb_link_node(&new->node, parent, p); 1489 rb_insert_color(&new->node, &osdc->event_tree); 1490 } 1491 1492 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, 1493 u64 cookie) 1494 { 1495 struct rb_node **p = &osdc->event_tree.rb_node; 1496 struct rb_node *parent = NULL; 1497 struct ceph_osd_event *event = NULL; 1498 1499 while (*p) { 1500 parent = *p; 1501 event = rb_entry(parent, struct ceph_osd_event, node); 1502 if (cookie < event->cookie) 1503 p = &(*p)->rb_left; 1504 else if (cookie > event->cookie) 1505 p = &(*p)->rb_right; 1506 else 1507 return event; 1508 } 1509 return NULL; 1510 } 1511 1512 static void __remove_event(struct ceph_osd_event *event) 1513 { 1514 struct ceph_osd_client *osdc = event->osdc; 1515 1516 if (!RB_EMPTY_NODE(&event->node)) { 1517 dout("__remove_event removed %p\n", event); 1518 rb_erase(&event->node, &osdc->event_tree); 1519 ceph_osdc_put_event(event); 1520 } else { 1521 dout("__remove_event didn't remove %p\n", event); 1522 } 1523 } 1524 1525 int ceph_osdc_create_event(struct ceph_osd_client *osdc, 1526 void (*event_cb)(u64, u64, u8, void *), 1527 int one_shot, void *data, 1528 struct ceph_osd_event **pevent) 1529 { 1530 struct ceph_osd_event *event; 1531 1532 event = kmalloc(sizeof(*event), GFP_NOIO); 1533 if (!event) 1534 return -ENOMEM; 1535 1536 dout("create_event %p\n", event); 1537 event->cb = event_cb; 1538 event->one_shot = one_shot; 1539 event->data = data; 1540 event->osdc = osdc; 1541 INIT_LIST_HEAD(&event->osd_node); 1542 kref_init(&event->kref); /* one ref for us */ 1543 kref_get(&event->kref); /* one ref for the caller */ 1544 init_completion(&event->completion); 1545 1546 spin_lock(&osdc->event_lock); 1547 event->cookie = ++osdc->event_count; 1548 __insert_event(osdc, event); 1549 spin_unlock(&osdc->event_lock); 1550 1551 *pevent = event; 1552 return 0; 1553 } 1554 EXPORT_SYMBOL(ceph_osdc_create_event); 1555 1556 void ceph_osdc_cancel_event(struct ceph_osd_event *event) 1557 { 1558 struct ceph_osd_client *osdc = event->osdc; 1559 1560 dout("cancel_event %p\n", event); 1561 spin_lock(&osdc->event_lock); 1562 __remove_event(event); 1563 spin_unlock(&osdc->event_lock); 1564 ceph_osdc_put_event(event); /* caller's */ 1565 } 1566 EXPORT_SYMBOL(ceph_osdc_cancel_event); 1567 1568 1569 static void do_event_work(struct work_struct *work) 1570 { 1571 struct ceph_osd_event_work *event_work = 1572 container_of(work, struct ceph_osd_event_work, work); 1573 struct ceph_osd_event *event = event_work->event; 1574 u64 ver = event_work->ver; 1575 u64 notify_id = event_work->notify_id; 1576 u8 opcode = event_work->opcode; 1577 1578 dout("do_event_work completing %p\n", event); 1579 event->cb(ver, notify_id, opcode, event->data); 1580 complete(&event->completion); 1581 dout("do_event_work completed %p\n", event); 1582 ceph_osdc_put_event(event); 1583 kfree(event_work); 1584 } 1585 1586 1587 /* 1588 * Process osd watch notifications 1589 */ 1590 void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) 1591 { 1592 void *p, *end; 1593 u8 proto_ver; 1594 u64 cookie, ver, notify_id; 1595 u8 opcode; 1596 struct ceph_osd_event *event; 1597 struct ceph_osd_event_work *event_work; 1598 1599 p = msg->front.iov_base; 1600 end = p + msg->front.iov_len; 1601 1602 ceph_decode_8_safe(&p, end, proto_ver, bad); 1603 ceph_decode_8_safe(&p, end, opcode, bad); 1604 ceph_decode_64_safe(&p, end, cookie, bad); 1605 ceph_decode_64_safe(&p, end, ver, bad); 1606 ceph_decode_64_safe(&p, end, notify_id, bad); 1607 1608 spin_lock(&osdc->event_lock); 1609 event = __find_event(osdc, cookie); 1610 if (event) { 1611 get_event(event); 1612 if (event->one_shot) 1613 __remove_event(event); 1614 } 1615 spin_unlock(&osdc->event_lock); 1616 dout("handle_watch_notify cookie %lld ver %lld event %p\n", 1617 cookie, ver, event); 1618 if (event) { 1619 event_work = kmalloc(sizeof(*event_work), GFP_NOIO); 1620 if (!event_work) { 1621 dout("ERROR: could not allocate event_work\n"); 1622 goto done_err; 1623 } 1624 INIT_WORK(&event_work->work, do_event_work); 1625 event_work->event = event; 1626 event_work->ver = ver; 1627 event_work->notify_id = notify_id; 1628 event_work->opcode = opcode; 1629 if (!queue_work(osdc->notify_wq, &event_work->work)) { 1630 dout("WARNING: failed to queue notify event work\n"); 1631 goto done_err; 1632 } 1633 } 1634 1635 return; 1636 1637 done_err: 1638 complete(&event->completion); 1639 ceph_osdc_put_event(event); 1640 return; 1641 1642 bad: 1643 pr_err("osdc handle_watch_notify corrupt msg\n"); 1644 return; 1645 } 1646 1647 int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout) 1648 { 1649 int err; 1650 1651 dout("wait_event %p\n", event); 1652 err = wait_for_completion_interruptible_timeout(&event->completion, 1653 timeout * HZ); 1654 ceph_osdc_put_event(event); 1655 if (err > 0) 1656 err = 0; 1657 dout("wait_event %p returns %d\n", event, err); 1658 return err; 1659 } 1660 EXPORT_SYMBOL(ceph_osdc_wait_event); 1661 1662 /* 1663 * Register request, send initial attempt. 1664 */ 1665 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 1666 struct ceph_osd_request *req, 1667 bool nofail) 1668 { 1669 int rc = 0; 1670 1671 req->r_request->pages = req->r_pages; 1672 req->r_request->nr_pages = req->r_num_pages; 1673 #ifdef CONFIG_BLOCK 1674 req->r_request->bio = req->r_bio; 1675 #endif 1676 req->r_request->trail = req->r_trail; 1677 1678 register_request(osdc, req); 1679 1680 down_read(&osdc->map_sem); 1681 mutex_lock(&osdc->request_mutex); 1682 /* 1683 * a racing kick_requests() may have sent the message for us 1684 * while we dropped request_mutex above, so only send now if 1685 * the request still han't been touched yet. 1686 */ 1687 if (req->r_sent == 0) { 1688 rc = __map_request(osdc, req); 1689 if (rc < 0) { 1690 if (nofail) { 1691 dout("osdc_start_request failed map, " 1692 " will retry %lld\n", req->r_tid); 1693 rc = 0; 1694 } 1695 goto out_unlock; 1696 } 1697 if (req->r_osd == NULL) { 1698 dout("send_request %p no up osds in pg\n", req); 1699 ceph_monc_request_next_osdmap(&osdc->client->monc); 1700 } else { 1701 rc = __send_request(osdc, req); 1702 if (rc) { 1703 if (nofail) { 1704 dout("osdc_start_request failed send, " 1705 " will retry %lld\n", req->r_tid); 1706 rc = 0; 1707 } else { 1708 __unregister_request(osdc, req); 1709 } 1710 } 1711 } 1712 } 1713 1714 out_unlock: 1715 mutex_unlock(&osdc->request_mutex); 1716 up_read(&osdc->map_sem); 1717 return rc; 1718 } 1719 EXPORT_SYMBOL(ceph_osdc_start_request); 1720 1721 /* 1722 * wait for a request to complete 1723 */ 1724 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 1725 struct ceph_osd_request *req) 1726 { 1727 int rc; 1728 1729 rc = wait_for_completion_interruptible(&req->r_completion); 1730 if (rc < 0) { 1731 mutex_lock(&osdc->request_mutex); 1732 __cancel_request(req); 1733 __unregister_request(osdc, req); 1734 mutex_unlock(&osdc->request_mutex); 1735 dout("wait_request tid %llu canceled/timed out\n", req->r_tid); 1736 return rc; 1737 } 1738 1739 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); 1740 return req->r_result; 1741 } 1742 EXPORT_SYMBOL(ceph_osdc_wait_request); 1743 1744 /* 1745 * sync - wait for all in-flight requests to flush. avoid starvation. 1746 */ 1747 void ceph_osdc_sync(struct ceph_osd_client *osdc) 1748 { 1749 struct ceph_osd_request *req; 1750 u64 last_tid, next_tid = 0; 1751 1752 mutex_lock(&osdc->request_mutex); 1753 last_tid = osdc->last_tid; 1754 while (1) { 1755 req = __lookup_request_ge(osdc, next_tid); 1756 if (!req) 1757 break; 1758 if (req->r_tid > last_tid) 1759 break; 1760 1761 next_tid = req->r_tid + 1; 1762 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) 1763 continue; 1764 1765 ceph_osdc_get_request(req); 1766 mutex_unlock(&osdc->request_mutex); 1767 dout("sync waiting on tid %llu (last is %llu)\n", 1768 req->r_tid, last_tid); 1769 wait_for_completion(&req->r_safe_completion); 1770 mutex_lock(&osdc->request_mutex); 1771 ceph_osdc_put_request(req); 1772 } 1773 mutex_unlock(&osdc->request_mutex); 1774 dout("sync done (thru tid %llu)\n", last_tid); 1775 } 1776 EXPORT_SYMBOL(ceph_osdc_sync); 1777 1778 /* 1779 * init, shutdown 1780 */ 1781 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 1782 { 1783 int err; 1784 1785 dout("init\n"); 1786 osdc->client = client; 1787 osdc->osdmap = NULL; 1788 init_rwsem(&osdc->map_sem); 1789 init_completion(&osdc->map_waiters); 1790 osdc->last_requested_map = 0; 1791 mutex_init(&osdc->request_mutex); 1792 osdc->last_tid = 0; 1793 osdc->osds = RB_ROOT; 1794 INIT_LIST_HEAD(&osdc->osd_lru); 1795 osdc->requests = RB_ROOT; 1796 INIT_LIST_HEAD(&osdc->req_lru); 1797 INIT_LIST_HEAD(&osdc->req_unsent); 1798 INIT_LIST_HEAD(&osdc->req_notarget); 1799 INIT_LIST_HEAD(&osdc->req_linger); 1800 osdc->num_requests = 0; 1801 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 1802 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 1803 spin_lock_init(&osdc->event_lock); 1804 osdc->event_tree = RB_ROOT; 1805 osdc->event_count = 0; 1806 1807 schedule_delayed_work(&osdc->osds_timeout_work, 1808 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); 1809 1810 err = -ENOMEM; 1811 osdc->req_mempool = mempool_create_kmalloc_pool(10, 1812 sizeof(struct ceph_osd_request)); 1813 if (!osdc->req_mempool) 1814 goto out; 1815 1816 err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true, 1817 "osd_op"); 1818 if (err < 0) 1819 goto out_mempool; 1820 err = ceph_msgpool_init(&osdc->msgpool_op_reply, 1821 OSD_OPREPLY_FRONT_LEN, 10, true, 1822 "osd_op_reply"); 1823 if (err < 0) 1824 goto out_msgpool; 1825 1826 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 1827 if (IS_ERR(osdc->notify_wq)) { 1828 err = PTR_ERR(osdc->notify_wq); 1829 osdc->notify_wq = NULL; 1830 goto out_msgpool; 1831 } 1832 return 0; 1833 1834 out_msgpool: 1835 ceph_msgpool_destroy(&osdc->msgpool_op); 1836 out_mempool: 1837 mempool_destroy(osdc->req_mempool); 1838 out: 1839 return err; 1840 } 1841 EXPORT_SYMBOL(ceph_osdc_init); 1842 1843 void ceph_osdc_stop(struct ceph_osd_client *osdc) 1844 { 1845 flush_workqueue(osdc->notify_wq); 1846 destroy_workqueue(osdc->notify_wq); 1847 cancel_delayed_work_sync(&osdc->timeout_work); 1848 cancel_delayed_work_sync(&osdc->osds_timeout_work); 1849 if (osdc->osdmap) { 1850 ceph_osdmap_destroy(osdc->osdmap); 1851 osdc->osdmap = NULL; 1852 } 1853 remove_old_osds(osdc, 1); 1854 WARN_ON(!RB_EMPTY_ROOT(&osdc->osds)); 1855 mempool_destroy(osdc->req_mempool); 1856 ceph_msgpool_destroy(&osdc->msgpool_op); 1857 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1858 } 1859 EXPORT_SYMBOL(ceph_osdc_stop); 1860 1861 /* 1862 * Read some contiguous pages. If we cross a stripe boundary, shorten 1863 * *plen. Return number of bytes read, or error. 1864 */ 1865 int ceph_osdc_readpages(struct ceph_osd_client *osdc, 1866 struct ceph_vino vino, struct ceph_file_layout *layout, 1867 u64 off, u64 *plen, 1868 u32 truncate_seq, u64 truncate_size, 1869 struct page **pages, int num_pages, int page_align) 1870 { 1871 struct ceph_osd_request *req; 1872 int rc = 0; 1873 1874 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 1875 vino.snap, off, *plen); 1876 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1877 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 1878 NULL, 0, truncate_seq, truncate_size, NULL, 1879 false, 1, page_align); 1880 if (!req) 1881 return -ENOMEM; 1882 1883 /* it may be a short read due to an object boundary */ 1884 req->r_pages = pages; 1885 1886 dout("readpages final extent is %llu~%llu (%d pages align %d)\n", 1887 off, *plen, req->r_num_pages, page_align); 1888 1889 rc = ceph_osdc_start_request(osdc, req, false); 1890 if (!rc) 1891 rc = ceph_osdc_wait_request(osdc, req); 1892 1893 ceph_osdc_put_request(req); 1894 dout("readpages result %d\n", rc); 1895 return rc; 1896 } 1897 EXPORT_SYMBOL(ceph_osdc_readpages); 1898 1899 /* 1900 * do a synchronous write on N pages 1901 */ 1902 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, 1903 struct ceph_file_layout *layout, 1904 struct ceph_snap_context *snapc, 1905 u64 off, u64 len, 1906 u32 truncate_seq, u64 truncate_size, 1907 struct timespec *mtime, 1908 struct page **pages, int num_pages, 1909 int flags, int do_sync, bool nofail) 1910 { 1911 struct ceph_osd_request *req; 1912 int rc = 0; 1913 int page_align = off & ~PAGE_MASK; 1914 1915 BUG_ON(vino.snap != CEPH_NOSNAP); 1916 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1917 CEPH_OSD_OP_WRITE, 1918 flags | CEPH_OSD_FLAG_ONDISK | 1919 CEPH_OSD_FLAG_WRITE, 1920 snapc, do_sync, 1921 truncate_seq, truncate_size, mtime, 1922 nofail, 1, page_align); 1923 if (!req) 1924 return -ENOMEM; 1925 1926 /* it may be a short write due to an object boundary */ 1927 req->r_pages = pages; 1928 dout("writepages %llu~%llu (%d pages)\n", off, len, 1929 req->r_num_pages); 1930 1931 rc = ceph_osdc_start_request(osdc, req, nofail); 1932 if (!rc) 1933 rc = ceph_osdc_wait_request(osdc, req); 1934 1935 ceph_osdc_put_request(req); 1936 if (rc == 0) 1937 rc = len; 1938 dout("writepages result %d\n", rc); 1939 return rc; 1940 } 1941 EXPORT_SYMBOL(ceph_osdc_writepages); 1942 1943 /* 1944 * handle incoming message 1945 */ 1946 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 1947 { 1948 struct ceph_osd *osd = con->private; 1949 struct ceph_osd_client *osdc; 1950 int type = le16_to_cpu(msg->hdr.type); 1951 1952 if (!osd) 1953 goto out; 1954 osdc = osd->o_osdc; 1955 1956 switch (type) { 1957 case CEPH_MSG_OSD_MAP: 1958 ceph_osdc_handle_map(osdc, msg); 1959 break; 1960 case CEPH_MSG_OSD_OPREPLY: 1961 handle_reply(osdc, msg, con); 1962 break; 1963 case CEPH_MSG_WATCH_NOTIFY: 1964 handle_watch_notify(osdc, msg); 1965 break; 1966 1967 default: 1968 pr_err("received unknown message type %d %s\n", type, 1969 ceph_msg_type_name(type)); 1970 } 1971 out: 1972 ceph_msg_put(msg); 1973 } 1974 1975 /* 1976 * lookup and return message for incoming reply. set up reply message 1977 * pages. 1978 */ 1979 static struct ceph_msg *get_reply(struct ceph_connection *con, 1980 struct ceph_msg_header *hdr, 1981 int *skip) 1982 { 1983 struct ceph_osd *osd = con->private; 1984 struct ceph_osd_client *osdc = osd->o_osdc; 1985 struct ceph_msg *m; 1986 struct ceph_osd_request *req; 1987 int front = le32_to_cpu(hdr->front_len); 1988 int data_len = le32_to_cpu(hdr->data_len); 1989 u64 tid; 1990 1991 tid = le64_to_cpu(hdr->tid); 1992 mutex_lock(&osdc->request_mutex); 1993 req = __lookup_request(osdc, tid); 1994 if (!req) { 1995 *skip = 1; 1996 m = NULL; 1997 pr_info("get_reply unknown tid %llu from osd%d\n", tid, 1998 osd->o_osd); 1999 goto out; 2000 } 2001 2002 if (req->r_con_filling_msg) { 2003 dout("get_reply revoking msg %p from old con %p\n", 2004 req->r_reply, req->r_con_filling_msg); 2005 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); 2006 ceph_con_put(req->r_con_filling_msg); 2007 req->r_con_filling_msg = NULL; 2008 } 2009 2010 if (front > req->r_reply->front.iov_len) { 2011 pr_warning("get_reply front %d > preallocated %d\n", 2012 front, (int)req->r_reply->front.iov_len); 2013 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS); 2014 if (!m) 2015 goto out; 2016 ceph_msg_put(req->r_reply); 2017 req->r_reply = m; 2018 } 2019 m = ceph_msg_get(req->r_reply); 2020 2021 if (data_len > 0) { 2022 int want = calc_pages_for(req->r_page_alignment, data_len); 2023 2024 if (unlikely(req->r_num_pages < want)) { 2025 pr_warning("tid %lld reply %d > expected %d pages\n", 2026 tid, want, m->nr_pages); 2027 *skip = 1; 2028 ceph_msg_put(m); 2029 m = NULL; 2030 goto out; 2031 } 2032 m->pages = req->r_pages; 2033 m->nr_pages = req->r_num_pages; 2034 m->page_alignment = req->r_page_alignment; 2035 #ifdef CONFIG_BLOCK 2036 m->bio = req->r_bio; 2037 #endif 2038 } 2039 *skip = 0; 2040 req->r_con_filling_msg = ceph_con_get(con); 2041 dout("get_reply tid %lld %p\n", tid, m); 2042 2043 out: 2044 mutex_unlock(&osdc->request_mutex); 2045 return m; 2046 2047 } 2048 2049 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 2050 struct ceph_msg_header *hdr, 2051 int *skip) 2052 { 2053 struct ceph_osd *osd = con->private; 2054 int type = le16_to_cpu(hdr->type); 2055 int front = le32_to_cpu(hdr->front_len); 2056 2057 switch (type) { 2058 case CEPH_MSG_OSD_MAP: 2059 case CEPH_MSG_WATCH_NOTIFY: 2060 return ceph_msg_new(type, front, GFP_NOFS); 2061 case CEPH_MSG_OSD_OPREPLY: 2062 return get_reply(con, hdr, skip); 2063 default: 2064 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, 2065 osd->o_osd); 2066 *skip = 1; 2067 return NULL; 2068 } 2069 } 2070 2071 /* 2072 * Wrappers to refcount containing ceph_osd struct 2073 */ 2074 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 2075 { 2076 struct ceph_osd *osd = con->private; 2077 if (get_osd(osd)) 2078 return con; 2079 return NULL; 2080 } 2081 2082 static void put_osd_con(struct ceph_connection *con) 2083 { 2084 struct ceph_osd *osd = con->private; 2085 put_osd(osd); 2086 } 2087 2088 /* 2089 * authentication 2090 */ 2091 static int get_authorizer(struct ceph_connection *con, 2092 void **buf, int *len, int *proto, 2093 void **reply_buf, int *reply_len, int force_new) 2094 { 2095 struct ceph_osd *o = con->private; 2096 struct ceph_osd_client *osdc = o->o_osdc; 2097 struct ceph_auth_client *ac = osdc->client->monc.auth; 2098 int ret = 0; 2099 2100 if (force_new && o->o_authorizer) { 2101 ac->ops->destroy_authorizer(ac, o->o_authorizer); 2102 o->o_authorizer = NULL; 2103 } 2104 if (o->o_authorizer == NULL) { 2105 ret = ac->ops->create_authorizer( 2106 ac, CEPH_ENTITY_TYPE_OSD, 2107 &o->o_authorizer, 2108 &o->o_authorizer_buf, 2109 &o->o_authorizer_buf_len, 2110 &o->o_authorizer_reply_buf, 2111 &o->o_authorizer_reply_buf_len); 2112 if (ret) 2113 return ret; 2114 } 2115 2116 *proto = ac->protocol; 2117 *buf = o->o_authorizer_buf; 2118 *len = o->o_authorizer_buf_len; 2119 *reply_buf = o->o_authorizer_reply_buf; 2120 *reply_len = o->o_authorizer_reply_buf_len; 2121 return 0; 2122 } 2123 2124 2125 static int verify_authorizer_reply(struct ceph_connection *con, int len) 2126 { 2127 struct ceph_osd *o = con->private; 2128 struct ceph_osd_client *osdc = o->o_osdc; 2129 struct ceph_auth_client *ac = osdc->client->monc.auth; 2130 2131 return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len); 2132 } 2133 2134 static int invalidate_authorizer(struct ceph_connection *con) 2135 { 2136 struct ceph_osd *o = con->private; 2137 struct ceph_osd_client *osdc = o->o_osdc; 2138 struct ceph_auth_client *ac = osdc->client->monc.auth; 2139 2140 if (ac->ops->invalidate_authorizer) 2141 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 2142 2143 return ceph_monc_validate_auth(&osdc->client->monc); 2144 } 2145 2146 static const struct ceph_connection_operations osd_con_ops = { 2147 .get = get_osd_con, 2148 .put = put_osd_con, 2149 .dispatch = dispatch, 2150 .get_authorizer = get_authorizer, 2151 .verify_authorizer_reply = verify_authorizer_reply, 2152 .invalidate_authorizer = invalidate_authorizer, 2153 .alloc_msg = alloc_msg, 2154 .fault = osd_reset, 2155 }; 2156