1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static void ceph_osd_iter_init(struct ceph_osd_data *osd_data, 175 struct iov_iter *iter) 176 { 177 osd_data->type = CEPH_OSD_DATA_TYPE_ITER; 178 osd_data->iter = *iter; 179 } 180 181 static struct ceph_osd_data * 182 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 183 { 184 BUG_ON(which >= osd_req->r_num_ops); 185 186 return &osd_req->r_ops[which].raw_data_in; 187 } 188 189 struct ceph_osd_data * 190 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 191 unsigned int which) 192 { 193 return osd_req_op_data(osd_req, which, extent, osd_data); 194 } 195 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 196 197 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 198 unsigned int which, struct page **pages, 199 u64 length, u32 alignment, 200 bool pages_from_pool, bool own_pages) 201 { 202 struct ceph_osd_data *osd_data; 203 204 osd_data = osd_req_op_raw_data_in(osd_req, which); 205 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 206 pages_from_pool, own_pages); 207 } 208 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 209 210 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 211 unsigned int which, struct page **pages, 212 u64 length, u32 alignment, 213 bool pages_from_pool, bool own_pages) 214 { 215 struct ceph_osd_data *osd_data; 216 217 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 218 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 219 pages_from_pool, own_pages); 220 } 221 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 222 223 #ifdef CONFIG_BLOCK 224 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 225 unsigned int which, 226 struct ceph_bio_iter *bio_pos, 227 u32 bio_length) 228 { 229 struct ceph_osd_data *osd_data; 230 231 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 232 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 233 } 234 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 235 #endif /* CONFIG_BLOCK */ 236 237 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 238 unsigned int which, 239 struct bio_vec *bvecs, u32 num_bvecs, 240 u32 bytes) 241 { 242 struct ceph_osd_data *osd_data; 243 struct ceph_bvec_iter it = { 244 .bvecs = bvecs, 245 .iter = { .bi_size = bytes }, 246 }; 247 248 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 249 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 250 } 251 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 252 253 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 254 unsigned int which, 255 struct ceph_bvec_iter *bvec_pos) 256 { 257 struct ceph_osd_data *osd_data; 258 259 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 260 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 261 } 262 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 263 264 /** 265 * osd_req_op_extent_osd_iter - Set up an operation with an iterator buffer 266 * @osd_req: The request to set up 267 * @which: Index of the operation in which to set the iter 268 * @iter: The buffer iterator 269 */ 270 void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req, 271 unsigned int which, struct iov_iter *iter) 272 { 273 struct ceph_osd_data *osd_data; 274 275 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 276 ceph_osd_iter_init(osd_data, iter); 277 } 278 EXPORT_SYMBOL(osd_req_op_extent_osd_iter); 279 280 static void osd_req_op_cls_request_info_pagelist( 281 struct ceph_osd_request *osd_req, 282 unsigned int which, struct ceph_pagelist *pagelist) 283 { 284 struct ceph_osd_data *osd_data; 285 286 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 287 ceph_osd_data_pagelist_init(osd_data, pagelist); 288 } 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 case CEPH_OSD_DATA_TYPE_ITER: 350 return iov_iter_count(&osd_data->iter); 351 default: 352 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 353 return 0; 354 } 355 } 356 357 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 358 { 359 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 360 int num_pages; 361 362 num_pages = calc_pages_for((u64)osd_data->alignment, 363 (u64)osd_data->length); 364 ceph_release_page_vector(osd_data->pages, num_pages); 365 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 366 ceph_pagelist_release(osd_data->pagelist); 367 } 368 ceph_osd_data_init(osd_data); 369 } 370 371 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 372 unsigned int which) 373 { 374 struct ceph_osd_req_op *op; 375 376 BUG_ON(which >= osd_req->r_num_ops); 377 op = &osd_req->r_ops[which]; 378 379 switch (op->op) { 380 case CEPH_OSD_OP_READ: 381 case CEPH_OSD_OP_SPARSE_READ: 382 case CEPH_OSD_OP_WRITE: 383 case CEPH_OSD_OP_WRITEFULL: 384 kfree(op->extent.sparse_ext); 385 ceph_osd_data_release(&op->extent.osd_data); 386 break; 387 case CEPH_OSD_OP_CALL: 388 ceph_osd_data_release(&op->cls.request_info); 389 ceph_osd_data_release(&op->cls.request_data); 390 ceph_osd_data_release(&op->cls.response_data); 391 break; 392 case CEPH_OSD_OP_SETXATTR: 393 case CEPH_OSD_OP_CMPXATTR: 394 ceph_osd_data_release(&op->xattr.osd_data); 395 break; 396 case CEPH_OSD_OP_STAT: 397 ceph_osd_data_release(&op->raw_data_in); 398 break; 399 case CEPH_OSD_OP_NOTIFY_ACK: 400 ceph_osd_data_release(&op->notify_ack.request_data); 401 break; 402 case CEPH_OSD_OP_NOTIFY: 403 ceph_osd_data_release(&op->notify.request_data); 404 ceph_osd_data_release(&op->notify.response_data); 405 break; 406 case CEPH_OSD_OP_LIST_WATCHERS: 407 ceph_osd_data_release(&op->list_watchers.response_data); 408 break; 409 case CEPH_OSD_OP_COPY_FROM2: 410 ceph_osd_data_release(&op->copy_from.osd_data); 411 break; 412 default: 413 break; 414 } 415 } 416 417 /* 418 * Assumes @t is zero-initialized. 419 */ 420 static void target_init(struct ceph_osd_request_target *t) 421 { 422 ceph_oid_init(&t->base_oid); 423 ceph_oloc_init(&t->base_oloc); 424 ceph_oid_init(&t->target_oid); 425 ceph_oloc_init(&t->target_oloc); 426 427 ceph_osds_init(&t->acting); 428 ceph_osds_init(&t->up); 429 t->size = -1; 430 t->min_size = -1; 431 432 t->osd = CEPH_HOMELESS_OSD; 433 } 434 435 static void target_copy(struct ceph_osd_request_target *dest, 436 const struct ceph_osd_request_target *src) 437 { 438 ceph_oid_copy(&dest->base_oid, &src->base_oid); 439 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 440 ceph_oid_copy(&dest->target_oid, &src->target_oid); 441 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 442 443 dest->pgid = src->pgid; /* struct */ 444 dest->spgid = src->spgid; /* struct */ 445 dest->pg_num = src->pg_num; 446 dest->pg_num_mask = src->pg_num_mask; 447 ceph_osds_copy(&dest->acting, &src->acting); 448 ceph_osds_copy(&dest->up, &src->up); 449 dest->size = src->size; 450 dest->min_size = src->min_size; 451 dest->sort_bitwise = src->sort_bitwise; 452 dest->recovery_deletes = src->recovery_deletes; 453 454 dest->flags = src->flags; 455 dest->used_replica = src->used_replica; 456 dest->paused = src->paused; 457 458 dest->epoch = src->epoch; 459 dest->last_force_resend = src->last_force_resend; 460 461 dest->osd = src->osd; 462 } 463 464 static void target_destroy(struct ceph_osd_request_target *t) 465 { 466 ceph_oid_destroy(&t->base_oid); 467 ceph_oloc_destroy(&t->base_oloc); 468 ceph_oid_destroy(&t->target_oid); 469 ceph_oloc_destroy(&t->target_oloc); 470 } 471 472 /* 473 * requests 474 */ 475 static void request_release_checks(struct ceph_osd_request *req) 476 { 477 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 478 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 479 WARN_ON(!list_empty(&req->r_private_item)); 480 WARN_ON(req->r_osd); 481 } 482 483 static void ceph_osdc_release_request(struct kref *kref) 484 { 485 struct ceph_osd_request *req = container_of(kref, 486 struct ceph_osd_request, r_kref); 487 unsigned int which; 488 489 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 490 req->r_request, req->r_reply); 491 request_release_checks(req); 492 493 if (req->r_request) 494 ceph_msg_put(req->r_request); 495 if (req->r_reply) 496 ceph_msg_put(req->r_reply); 497 498 for (which = 0; which < req->r_num_ops; which++) 499 osd_req_op_data_release(req, which); 500 501 target_destroy(&req->r_t); 502 ceph_put_snap_context(req->r_snapc); 503 504 if (req->r_mempool) 505 mempool_free(req, req->r_osdc->req_mempool); 506 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 507 kmem_cache_free(ceph_osd_request_cache, req); 508 else 509 kfree(req); 510 } 511 512 void ceph_osdc_get_request(struct ceph_osd_request *req) 513 { 514 dout("%s %p (was %d)\n", __func__, req, 515 kref_read(&req->r_kref)); 516 kref_get(&req->r_kref); 517 } 518 EXPORT_SYMBOL(ceph_osdc_get_request); 519 520 void ceph_osdc_put_request(struct ceph_osd_request *req) 521 { 522 if (req) { 523 dout("%s %p (was %d)\n", __func__, req, 524 kref_read(&req->r_kref)); 525 kref_put(&req->r_kref, ceph_osdc_release_request); 526 } 527 } 528 EXPORT_SYMBOL(ceph_osdc_put_request); 529 530 static void request_init(struct ceph_osd_request *req) 531 { 532 /* req only, each op is zeroed in osd_req_op_init() */ 533 memset(req, 0, sizeof(*req)); 534 535 kref_init(&req->r_kref); 536 init_completion(&req->r_completion); 537 RB_CLEAR_NODE(&req->r_node); 538 RB_CLEAR_NODE(&req->r_mc_node); 539 INIT_LIST_HEAD(&req->r_private_item); 540 541 target_init(&req->r_t); 542 } 543 544 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 545 struct ceph_snap_context *snapc, 546 unsigned int num_ops, 547 bool use_mempool, 548 gfp_t gfp_flags) 549 { 550 struct ceph_osd_request *req; 551 552 if (use_mempool) { 553 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 554 req = mempool_alloc(osdc->req_mempool, gfp_flags); 555 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 556 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 557 } else { 558 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 559 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 560 } 561 if (unlikely(!req)) 562 return NULL; 563 564 request_init(req); 565 req->r_osdc = osdc; 566 req->r_mempool = use_mempool; 567 req->r_num_ops = num_ops; 568 req->r_snapid = CEPH_NOSNAP; 569 req->r_snapc = ceph_get_snap_context(snapc); 570 571 dout("%s req %p\n", __func__, req); 572 return req; 573 } 574 EXPORT_SYMBOL(ceph_osdc_alloc_request); 575 576 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 577 { 578 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 579 } 580 581 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 582 int num_request_data_items, 583 int num_reply_data_items) 584 { 585 struct ceph_osd_client *osdc = req->r_osdc; 586 struct ceph_msg *msg; 587 int msg_size; 588 589 WARN_ON(req->r_request || req->r_reply); 590 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 591 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 592 593 /* create request message */ 594 msg_size = CEPH_ENCODING_START_BLK_LEN + 595 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 596 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 597 msg_size += CEPH_ENCODING_START_BLK_LEN + 598 sizeof(struct ceph_osd_reqid); /* reqid */ 599 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 600 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 601 msg_size += CEPH_ENCODING_START_BLK_LEN + 602 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 603 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 604 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 605 msg_size += 8; /* snapid */ 606 msg_size += 8; /* snap_seq */ 607 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 608 msg_size += 4 + 8; /* retry_attempt, features */ 609 610 if (req->r_mempool) 611 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 612 num_request_data_items); 613 else 614 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 615 num_request_data_items, gfp, true); 616 if (!msg) 617 return -ENOMEM; 618 619 memset(msg->front.iov_base, 0, msg->front.iov_len); 620 req->r_request = msg; 621 622 /* create reply message */ 623 msg_size = OSD_OPREPLY_FRONT_LEN; 624 msg_size += req->r_base_oid.name_len; 625 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 626 627 if (req->r_mempool) 628 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 629 num_reply_data_items); 630 else 631 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 632 num_reply_data_items, gfp, true); 633 if (!msg) 634 return -ENOMEM; 635 636 req->r_reply = msg; 637 638 return 0; 639 } 640 641 static bool osd_req_opcode_valid(u16 opcode) 642 { 643 switch (opcode) { 644 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 645 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 646 #undef GENERATE_CASE 647 default: 648 return false; 649 } 650 } 651 652 static void get_num_data_items(struct ceph_osd_request *req, 653 int *num_request_data_items, 654 int *num_reply_data_items) 655 { 656 struct ceph_osd_req_op *op; 657 658 *num_request_data_items = 0; 659 *num_reply_data_items = 0; 660 661 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 662 switch (op->op) { 663 /* request */ 664 case CEPH_OSD_OP_WRITE: 665 case CEPH_OSD_OP_WRITEFULL: 666 case CEPH_OSD_OP_SETXATTR: 667 case CEPH_OSD_OP_CMPXATTR: 668 case CEPH_OSD_OP_NOTIFY_ACK: 669 case CEPH_OSD_OP_COPY_FROM2: 670 *num_request_data_items += 1; 671 break; 672 673 /* reply */ 674 case CEPH_OSD_OP_STAT: 675 case CEPH_OSD_OP_READ: 676 case CEPH_OSD_OP_SPARSE_READ: 677 case CEPH_OSD_OP_LIST_WATCHERS: 678 *num_reply_data_items += 1; 679 break; 680 681 /* both */ 682 case CEPH_OSD_OP_NOTIFY: 683 *num_request_data_items += 1; 684 *num_reply_data_items += 1; 685 break; 686 case CEPH_OSD_OP_CALL: 687 *num_request_data_items += 2; 688 *num_reply_data_items += 1; 689 break; 690 691 default: 692 WARN_ON(!osd_req_opcode_valid(op->op)); 693 break; 694 } 695 } 696 } 697 698 /* 699 * oid, oloc and OSD op opcode(s) must be filled in before this function 700 * is called. 701 */ 702 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 703 { 704 int num_request_data_items, num_reply_data_items; 705 706 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 707 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 708 num_reply_data_items); 709 } 710 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 711 712 /* 713 * This is an osd op init function for opcodes that have no data or 714 * other information associated with them. It also serves as a 715 * common init routine for all the other init functions, below. 716 */ 717 struct ceph_osd_req_op * 718 osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 719 u16 opcode, u32 flags) 720 { 721 struct ceph_osd_req_op *op; 722 723 BUG_ON(which >= osd_req->r_num_ops); 724 BUG_ON(!osd_req_opcode_valid(opcode)); 725 726 op = &osd_req->r_ops[which]; 727 memset(op, 0, sizeof (*op)); 728 op->op = opcode; 729 op->flags = flags; 730 731 return op; 732 } 733 EXPORT_SYMBOL(osd_req_op_init); 734 735 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 736 unsigned int which, u16 opcode, 737 u64 offset, u64 length, 738 u64 truncate_size, u32 truncate_seq) 739 { 740 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 741 opcode, 0); 742 size_t payload_len = 0; 743 744 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 745 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 746 opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ); 747 748 op->extent.offset = offset; 749 op->extent.length = length; 750 op->extent.truncate_size = truncate_size; 751 op->extent.truncate_seq = truncate_seq; 752 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 753 payload_len += length; 754 755 op->indata_len = payload_len; 756 } 757 EXPORT_SYMBOL(osd_req_op_extent_init); 758 759 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 760 unsigned int which, u64 length) 761 { 762 struct ceph_osd_req_op *op; 763 u64 previous; 764 765 BUG_ON(which >= osd_req->r_num_ops); 766 op = &osd_req->r_ops[which]; 767 previous = op->extent.length; 768 769 if (length == previous) 770 return; /* Nothing to do */ 771 BUG_ON(length > previous); 772 773 op->extent.length = length; 774 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 775 op->indata_len -= previous - length; 776 } 777 EXPORT_SYMBOL(osd_req_op_extent_update); 778 779 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 780 unsigned int which, u64 offset_inc) 781 { 782 struct ceph_osd_req_op *op, *prev_op; 783 784 BUG_ON(which + 1 >= osd_req->r_num_ops); 785 786 prev_op = &osd_req->r_ops[which]; 787 op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 788 /* dup previous one */ 789 op->indata_len = prev_op->indata_len; 790 op->outdata_len = prev_op->outdata_len; 791 op->extent = prev_op->extent; 792 /* adjust offset */ 793 op->extent.offset += offset_inc; 794 op->extent.length -= offset_inc; 795 796 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 797 op->indata_len -= offset_inc; 798 } 799 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 800 801 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 802 const char *class, const char *method) 803 { 804 struct ceph_osd_req_op *op; 805 struct ceph_pagelist *pagelist; 806 size_t payload_len = 0; 807 size_t size; 808 int ret; 809 810 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 811 812 pagelist = ceph_pagelist_alloc(GFP_NOFS); 813 if (!pagelist) 814 return -ENOMEM; 815 816 op->cls.class_name = class; 817 size = strlen(class); 818 BUG_ON(size > (size_t) U8_MAX); 819 op->cls.class_len = size; 820 ret = ceph_pagelist_append(pagelist, class, size); 821 if (ret) 822 goto err_pagelist_free; 823 payload_len += size; 824 825 op->cls.method_name = method; 826 size = strlen(method); 827 BUG_ON(size > (size_t) U8_MAX); 828 op->cls.method_len = size; 829 ret = ceph_pagelist_append(pagelist, method, size); 830 if (ret) 831 goto err_pagelist_free; 832 payload_len += size; 833 834 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 835 op->indata_len = payload_len; 836 return 0; 837 838 err_pagelist_free: 839 ceph_pagelist_release(pagelist); 840 return ret; 841 } 842 EXPORT_SYMBOL(osd_req_op_cls_init); 843 844 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 845 u16 opcode, const char *name, const void *value, 846 size_t size, u8 cmp_op, u8 cmp_mode) 847 { 848 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 849 opcode, 0); 850 struct ceph_pagelist *pagelist; 851 size_t payload_len; 852 int ret; 853 854 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 855 856 pagelist = ceph_pagelist_alloc(GFP_NOFS); 857 if (!pagelist) 858 return -ENOMEM; 859 860 payload_len = strlen(name); 861 op->xattr.name_len = payload_len; 862 ret = ceph_pagelist_append(pagelist, name, payload_len); 863 if (ret) 864 goto err_pagelist_free; 865 866 op->xattr.value_len = size; 867 ret = ceph_pagelist_append(pagelist, value, size); 868 if (ret) 869 goto err_pagelist_free; 870 payload_len += size; 871 872 op->xattr.cmp_op = cmp_op; 873 op->xattr.cmp_mode = cmp_mode; 874 875 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 876 op->indata_len = payload_len; 877 return 0; 878 879 err_pagelist_free: 880 ceph_pagelist_release(pagelist); 881 return ret; 882 } 883 EXPORT_SYMBOL(osd_req_op_xattr_init); 884 885 /* 886 * @watch_opcode: CEPH_OSD_WATCH_OP_* 887 */ 888 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 889 u8 watch_opcode, u64 cookie, u32 gen) 890 { 891 struct ceph_osd_req_op *op; 892 893 op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 894 op->watch.cookie = cookie; 895 op->watch.op = watch_opcode; 896 op->watch.gen = gen; 897 } 898 899 /* 900 * prot_ver, timeout and notify payload (may be empty) should already be 901 * encoded in @request_pl 902 */ 903 static void osd_req_op_notify_init(struct ceph_osd_request *req, int which, 904 u64 cookie, struct ceph_pagelist *request_pl) 905 { 906 struct ceph_osd_req_op *op; 907 908 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 909 op->notify.cookie = cookie; 910 911 ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl); 912 op->indata_len = request_pl->length; 913 } 914 915 /* 916 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_* 917 */ 918 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 919 unsigned int which, 920 u64 expected_object_size, 921 u64 expected_write_size, 922 u32 flags) 923 { 924 struct ceph_osd_req_op *op; 925 926 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); 927 op->alloc_hint.expected_object_size = expected_object_size; 928 op->alloc_hint.expected_write_size = expected_write_size; 929 op->alloc_hint.flags = flags; 930 931 /* 932 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 933 * not worth a feature bit. Set FAILOK per-op flag to make 934 * sure older osds don't trip over an unsupported opcode. 935 */ 936 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 937 } 938 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 939 940 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 941 struct ceph_osd_data *osd_data) 942 { 943 u64 length = ceph_osd_data_length(osd_data); 944 945 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 946 BUG_ON(length > (u64) SIZE_MAX); 947 if (length) 948 ceph_msg_data_add_pages(msg, osd_data->pages, 949 length, osd_data->alignment, false); 950 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 951 BUG_ON(!length); 952 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 953 #ifdef CONFIG_BLOCK 954 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 955 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 956 #endif 957 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 958 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 959 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_ITER) { 960 ceph_msg_data_add_iter(msg, &osd_data->iter); 961 } else { 962 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 963 } 964 } 965 966 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 967 const struct ceph_osd_req_op *src) 968 { 969 switch (src->op) { 970 case CEPH_OSD_OP_STAT: 971 break; 972 case CEPH_OSD_OP_READ: 973 case CEPH_OSD_OP_SPARSE_READ: 974 case CEPH_OSD_OP_WRITE: 975 case CEPH_OSD_OP_WRITEFULL: 976 case CEPH_OSD_OP_ZERO: 977 case CEPH_OSD_OP_TRUNCATE: 978 dst->extent.offset = cpu_to_le64(src->extent.offset); 979 dst->extent.length = cpu_to_le64(src->extent.length); 980 dst->extent.truncate_size = 981 cpu_to_le64(src->extent.truncate_size); 982 dst->extent.truncate_seq = 983 cpu_to_le32(src->extent.truncate_seq); 984 break; 985 case CEPH_OSD_OP_CALL: 986 dst->cls.class_len = src->cls.class_len; 987 dst->cls.method_len = src->cls.method_len; 988 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 989 break; 990 case CEPH_OSD_OP_WATCH: 991 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 992 dst->watch.ver = cpu_to_le64(0); 993 dst->watch.op = src->watch.op; 994 dst->watch.gen = cpu_to_le32(src->watch.gen); 995 break; 996 case CEPH_OSD_OP_NOTIFY_ACK: 997 break; 998 case CEPH_OSD_OP_NOTIFY: 999 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 1000 break; 1001 case CEPH_OSD_OP_LIST_WATCHERS: 1002 break; 1003 case CEPH_OSD_OP_SETALLOCHINT: 1004 dst->alloc_hint.expected_object_size = 1005 cpu_to_le64(src->alloc_hint.expected_object_size); 1006 dst->alloc_hint.expected_write_size = 1007 cpu_to_le64(src->alloc_hint.expected_write_size); 1008 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags); 1009 break; 1010 case CEPH_OSD_OP_SETXATTR: 1011 case CEPH_OSD_OP_CMPXATTR: 1012 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1013 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1014 dst->xattr.cmp_op = src->xattr.cmp_op; 1015 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1016 break; 1017 case CEPH_OSD_OP_CREATE: 1018 case CEPH_OSD_OP_DELETE: 1019 break; 1020 case CEPH_OSD_OP_COPY_FROM2: 1021 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1022 dst->copy_from.src_version = 1023 cpu_to_le64(src->copy_from.src_version); 1024 dst->copy_from.flags = src->copy_from.flags; 1025 dst->copy_from.src_fadvise_flags = 1026 cpu_to_le32(src->copy_from.src_fadvise_flags); 1027 break; 1028 case CEPH_OSD_OP_ASSERT_VER: 1029 dst->assert_ver.unused = cpu_to_le64(0); 1030 dst->assert_ver.ver = cpu_to_le64(src->assert_ver.ver); 1031 break; 1032 default: 1033 pr_err("unsupported osd opcode %s\n", 1034 ceph_osd_op_name(src->op)); 1035 WARN_ON(1); 1036 1037 return 0; 1038 } 1039 1040 dst->op = cpu_to_le16(src->op); 1041 dst->flags = cpu_to_le32(src->flags); 1042 dst->payload_len = cpu_to_le32(src->indata_len); 1043 1044 return src->indata_len; 1045 } 1046 1047 /* 1048 * build new request AND message, calculate layout, and adjust file 1049 * extent as needed. 1050 * 1051 * if the file was recently truncated, we include information about its 1052 * old and new size so that the object can be updated appropriately. (we 1053 * avoid synchronously deleting truncated objects because it's slow.) 1054 */ 1055 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1056 struct ceph_file_layout *layout, 1057 struct ceph_vino vino, 1058 u64 off, u64 *plen, 1059 unsigned int which, int num_ops, 1060 int opcode, int flags, 1061 struct ceph_snap_context *snapc, 1062 u32 truncate_seq, 1063 u64 truncate_size, 1064 bool use_mempool) 1065 { 1066 struct ceph_osd_request *req; 1067 u64 objnum = 0; 1068 u64 objoff = 0; 1069 u64 objlen = 0; 1070 int r; 1071 1072 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1073 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1074 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE && 1075 opcode != CEPH_OSD_OP_SPARSE_READ); 1076 1077 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1078 GFP_NOFS); 1079 if (!req) { 1080 r = -ENOMEM; 1081 goto fail; 1082 } 1083 1084 /* calculate max write size */ 1085 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1086 if (r) 1087 goto fail; 1088 1089 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1090 osd_req_op_init(req, which, opcode, 0); 1091 } else { 1092 u32 object_size = layout->object_size; 1093 u32 object_base = off - objoff; 1094 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1095 if (truncate_size <= object_base) { 1096 truncate_size = 0; 1097 } else { 1098 truncate_size -= object_base; 1099 if (truncate_size > object_size) 1100 truncate_size = object_size; 1101 } 1102 } 1103 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1104 truncate_size, truncate_seq); 1105 } 1106 1107 req->r_base_oloc.pool = layout->pool_id; 1108 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1109 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1110 req->r_flags = flags | osdc->client->options->read_from_replica; 1111 1112 req->r_snapid = vino.snap; 1113 if (flags & CEPH_OSD_FLAG_WRITE) 1114 req->r_data_offset = off; 1115 1116 if (num_ops > 1) { 1117 int num_req_ops, num_rep_ops; 1118 1119 /* 1120 * If this is a multi-op write request, assume that we'll need 1121 * request ops. If it's a multi-op read then assume we'll need 1122 * reply ops. Anything else and call it -EINVAL. 1123 */ 1124 if (flags & CEPH_OSD_FLAG_WRITE) { 1125 num_req_ops = num_ops; 1126 num_rep_ops = 0; 1127 } else if (flags & CEPH_OSD_FLAG_READ) { 1128 num_req_ops = 0; 1129 num_rep_ops = num_ops; 1130 } else { 1131 r = -EINVAL; 1132 goto fail; 1133 } 1134 1135 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_req_ops, 1136 num_rep_ops); 1137 } else { 1138 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1139 } 1140 if (r) 1141 goto fail; 1142 1143 return req; 1144 1145 fail: 1146 ceph_osdc_put_request(req); 1147 return ERR_PTR(r); 1148 } 1149 EXPORT_SYMBOL(ceph_osdc_new_request); 1150 1151 int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt) 1152 { 1153 WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ); 1154 1155 op->extent.sparse_ext_cnt = cnt; 1156 op->extent.sparse_ext = kmalloc_array(cnt, 1157 sizeof(*op->extent.sparse_ext), 1158 GFP_NOFS); 1159 if (!op->extent.sparse_ext) 1160 return -ENOMEM; 1161 return 0; 1162 } 1163 EXPORT_SYMBOL(__ceph_alloc_sparse_ext_map); 1164 1165 /* 1166 * We keep osd requests in an rbtree, sorted by ->r_tid. 1167 */ 1168 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1169 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1170 1171 /* 1172 * Call @fn on each OSD request as long as @fn returns 0. 1173 */ 1174 static void for_each_request(struct ceph_osd_client *osdc, 1175 int (*fn)(struct ceph_osd_request *req, void *arg), 1176 void *arg) 1177 { 1178 struct rb_node *n, *p; 1179 1180 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1181 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1182 1183 for (p = rb_first(&osd->o_requests); p; ) { 1184 struct ceph_osd_request *req = 1185 rb_entry(p, struct ceph_osd_request, r_node); 1186 1187 p = rb_next(p); 1188 if (fn(req, arg)) 1189 return; 1190 } 1191 } 1192 1193 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1194 struct ceph_osd_request *req = 1195 rb_entry(p, struct ceph_osd_request, r_node); 1196 1197 p = rb_next(p); 1198 if (fn(req, arg)) 1199 return; 1200 } 1201 } 1202 1203 static bool osd_homeless(struct ceph_osd *osd) 1204 { 1205 return osd->o_osd == CEPH_HOMELESS_OSD; 1206 } 1207 1208 static bool osd_registered(struct ceph_osd *osd) 1209 { 1210 verify_osdc_locked(osd->o_osdc); 1211 1212 return !RB_EMPTY_NODE(&osd->o_node); 1213 } 1214 1215 /* 1216 * Assumes @osd is zero-initialized. 1217 */ 1218 static void osd_init(struct ceph_osd *osd) 1219 { 1220 refcount_set(&osd->o_ref, 1); 1221 RB_CLEAR_NODE(&osd->o_node); 1222 spin_lock_init(&osd->o_requests_lock); 1223 osd->o_requests = RB_ROOT; 1224 osd->o_linger_requests = RB_ROOT; 1225 osd->o_backoff_mappings = RB_ROOT; 1226 osd->o_backoffs_by_id = RB_ROOT; 1227 INIT_LIST_HEAD(&osd->o_osd_lru); 1228 INIT_LIST_HEAD(&osd->o_keepalive_item); 1229 osd->o_incarnation = 1; 1230 mutex_init(&osd->lock); 1231 } 1232 1233 static void ceph_init_sparse_read(struct ceph_sparse_read *sr) 1234 { 1235 kfree(sr->sr_extent); 1236 memset(sr, '\0', sizeof(*sr)); 1237 sr->sr_state = CEPH_SPARSE_READ_HDR; 1238 } 1239 1240 static void osd_cleanup(struct ceph_osd *osd) 1241 { 1242 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1243 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1244 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1245 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1246 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1247 WARN_ON(!list_empty(&osd->o_osd_lru)); 1248 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1249 1250 ceph_init_sparse_read(&osd->o_sparse_read); 1251 1252 if (osd->o_auth.authorizer) { 1253 WARN_ON(osd_homeless(osd)); 1254 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1255 } 1256 } 1257 1258 /* 1259 * Track open sessions with osds. 1260 */ 1261 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1262 { 1263 struct ceph_osd *osd; 1264 1265 WARN_ON(onum == CEPH_HOMELESS_OSD); 1266 1267 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1268 osd_init(osd); 1269 osd->o_osdc = osdc; 1270 osd->o_osd = onum; 1271 osd->o_sparse_op_idx = -1; 1272 1273 ceph_init_sparse_read(&osd->o_sparse_read); 1274 1275 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1276 1277 return osd; 1278 } 1279 1280 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1281 { 1282 if (refcount_inc_not_zero(&osd->o_ref)) { 1283 dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref)); 1284 return osd; 1285 } else { 1286 dout("get_osd %p FAIL\n", osd); 1287 return NULL; 1288 } 1289 } 1290 1291 static void put_osd(struct ceph_osd *osd) 1292 { 1293 dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1); 1294 if (refcount_dec_and_test(&osd->o_ref)) { 1295 osd_cleanup(osd); 1296 kfree(osd); 1297 } 1298 } 1299 1300 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1301 1302 static void __move_osd_to_lru(struct ceph_osd *osd) 1303 { 1304 struct ceph_osd_client *osdc = osd->o_osdc; 1305 1306 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1307 BUG_ON(!list_empty(&osd->o_osd_lru)); 1308 1309 spin_lock(&osdc->osd_lru_lock); 1310 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1311 spin_unlock(&osdc->osd_lru_lock); 1312 1313 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1314 } 1315 1316 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1317 { 1318 if (RB_EMPTY_ROOT(&osd->o_requests) && 1319 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1320 __move_osd_to_lru(osd); 1321 } 1322 1323 static void __remove_osd_from_lru(struct ceph_osd *osd) 1324 { 1325 struct ceph_osd_client *osdc = osd->o_osdc; 1326 1327 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1328 1329 spin_lock(&osdc->osd_lru_lock); 1330 if (!list_empty(&osd->o_osd_lru)) 1331 list_del_init(&osd->o_osd_lru); 1332 spin_unlock(&osdc->osd_lru_lock); 1333 } 1334 1335 /* 1336 * Close the connection and assign any leftover requests to the 1337 * homeless session. 1338 */ 1339 static void close_osd(struct ceph_osd *osd) 1340 { 1341 struct ceph_osd_client *osdc = osd->o_osdc; 1342 struct rb_node *n; 1343 1344 verify_osdc_wrlocked(osdc); 1345 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1346 1347 ceph_con_close(&osd->o_con); 1348 1349 for (n = rb_first(&osd->o_requests); n; ) { 1350 struct ceph_osd_request *req = 1351 rb_entry(n, struct ceph_osd_request, r_node); 1352 1353 n = rb_next(n); /* unlink_request() */ 1354 1355 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1356 unlink_request(osd, req); 1357 link_request(&osdc->homeless_osd, req); 1358 } 1359 for (n = rb_first(&osd->o_linger_requests); n; ) { 1360 struct ceph_osd_linger_request *lreq = 1361 rb_entry(n, struct ceph_osd_linger_request, node); 1362 1363 n = rb_next(n); /* unlink_linger() */ 1364 1365 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1366 lreq->linger_id); 1367 unlink_linger(osd, lreq); 1368 link_linger(&osdc->homeless_osd, lreq); 1369 } 1370 clear_backoffs(osd); 1371 1372 __remove_osd_from_lru(osd); 1373 erase_osd(&osdc->osds, osd); 1374 put_osd(osd); 1375 } 1376 1377 /* 1378 * reset osd connect 1379 */ 1380 static int reopen_osd(struct ceph_osd *osd) 1381 { 1382 struct ceph_entity_addr *peer_addr; 1383 1384 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1385 1386 if (RB_EMPTY_ROOT(&osd->o_requests) && 1387 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1388 close_osd(osd); 1389 return -ENODEV; 1390 } 1391 1392 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1393 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1394 !ceph_con_opened(&osd->o_con)) { 1395 struct rb_node *n; 1396 1397 dout("osd addr hasn't changed and connection never opened, " 1398 "letting msgr retry\n"); 1399 /* touch each r_stamp for handle_timeout()'s benfit */ 1400 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1401 struct ceph_osd_request *req = 1402 rb_entry(n, struct ceph_osd_request, r_node); 1403 req->r_stamp = jiffies; 1404 } 1405 1406 return -EAGAIN; 1407 } 1408 1409 ceph_con_close(&osd->o_con); 1410 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1411 osd->o_incarnation++; 1412 1413 return 0; 1414 } 1415 1416 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1417 bool wrlocked) 1418 { 1419 struct ceph_osd *osd; 1420 1421 if (wrlocked) 1422 verify_osdc_wrlocked(osdc); 1423 else 1424 verify_osdc_locked(osdc); 1425 1426 if (o != CEPH_HOMELESS_OSD) 1427 osd = lookup_osd(&osdc->osds, o); 1428 else 1429 osd = &osdc->homeless_osd; 1430 if (!osd) { 1431 if (!wrlocked) 1432 return ERR_PTR(-EAGAIN); 1433 1434 osd = create_osd(osdc, o); 1435 insert_osd(&osdc->osds, osd); 1436 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1437 &osdc->osdmap->osd_addr[osd->o_osd]); 1438 } 1439 1440 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1441 return osd; 1442 } 1443 1444 /* 1445 * Create request <-> OSD session relation. 1446 * 1447 * @req has to be assigned a tid, @osd may be homeless. 1448 */ 1449 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1450 { 1451 verify_osd_locked(osd); 1452 WARN_ON(!req->r_tid || req->r_osd); 1453 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1454 req, req->r_tid); 1455 1456 if (!osd_homeless(osd)) 1457 __remove_osd_from_lru(osd); 1458 else 1459 atomic_inc(&osd->o_osdc->num_homeless); 1460 1461 get_osd(osd); 1462 spin_lock(&osd->o_requests_lock); 1463 insert_request(&osd->o_requests, req); 1464 spin_unlock(&osd->o_requests_lock); 1465 req->r_osd = osd; 1466 } 1467 1468 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1469 { 1470 verify_osd_locked(osd); 1471 WARN_ON(req->r_osd != osd); 1472 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1473 req, req->r_tid); 1474 1475 req->r_osd = NULL; 1476 spin_lock(&osd->o_requests_lock); 1477 erase_request(&osd->o_requests, req); 1478 spin_unlock(&osd->o_requests_lock); 1479 put_osd(osd); 1480 1481 if (!osd_homeless(osd)) 1482 maybe_move_osd_to_lru(osd); 1483 else 1484 atomic_dec(&osd->o_osdc->num_homeless); 1485 } 1486 1487 static bool __pool_full(struct ceph_pg_pool_info *pi) 1488 { 1489 return pi->flags & CEPH_POOL_FLAG_FULL; 1490 } 1491 1492 static bool have_pool_full(struct ceph_osd_client *osdc) 1493 { 1494 struct rb_node *n; 1495 1496 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1497 struct ceph_pg_pool_info *pi = 1498 rb_entry(n, struct ceph_pg_pool_info, node); 1499 1500 if (__pool_full(pi)) 1501 return true; 1502 } 1503 1504 return false; 1505 } 1506 1507 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1508 { 1509 struct ceph_pg_pool_info *pi; 1510 1511 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1512 if (!pi) 1513 return false; 1514 1515 return __pool_full(pi); 1516 } 1517 1518 /* 1519 * Returns whether a request should be blocked from being sent 1520 * based on the current osdmap and osd_client settings. 1521 */ 1522 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1523 const struct ceph_osd_request_target *t, 1524 struct ceph_pg_pool_info *pi) 1525 { 1526 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1527 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1528 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1529 __pool_full(pi); 1530 1531 WARN_ON(pi->id != t->target_oloc.pool); 1532 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1533 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1534 (osdc->osdmap->epoch < osdc->epoch_barrier); 1535 } 1536 1537 static int pick_random_replica(const struct ceph_osds *acting) 1538 { 1539 int i = get_random_u32_below(acting->size); 1540 1541 dout("%s picked osd%d, primary osd%d\n", __func__, 1542 acting->osds[i], acting->primary); 1543 return i; 1544 } 1545 1546 /* 1547 * Picks the closest replica based on client's location given by 1548 * crush_location option. Prefers the primary if the locality is 1549 * the same. 1550 */ 1551 static int pick_closest_replica(struct ceph_osd_client *osdc, 1552 const struct ceph_osds *acting) 1553 { 1554 struct ceph_options *opt = osdc->client->options; 1555 int best_i, best_locality; 1556 int i = 0, locality; 1557 1558 do { 1559 locality = ceph_get_crush_locality(osdc->osdmap, 1560 acting->osds[i], 1561 &opt->crush_locs); 1562 if (i == 0 || 1563 (locality >= 0 && best_locality < 0) || 1564 (locality >= 0 && best_locality >= 0 && 1565 locality < best_locality)) { 1566 best_i = i; 1567 best_locality = locality; 1568 } 1569 } while (++i < acting->size); 1570 1571 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, 1572 acting->osds[best_i], best_locality, acting->primary); 1573 return best_i; 1574 } 1575 1576 enum calc_target_result { 1577 CALC_TARGET_NO_ACTION = 0, 1578 CALC_TARGET_NEED_RESEND, 1579 CALC_TARGET_POOL_DNE, 1580 }; 1581 1582 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1583 struct ceph_osd_request_target *t, 1584 bool any_change) 1585 { 1586 struct ceph_pg_pool_info *pi; 1587 struct ceph_pg pgid, last_pgid; 1588 struct ceph_osds up, acting; 1589 bool is_read = t->flags & CEPH_OSD_FLAG_READ; 1590 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; 1591 bool force_resend = false; 1592 bool unpaused = false; 1593 bool legacy_change = false; 1594 bool split = false; 1595 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1596 bool recovery_deletes = ceph_osdmap_flag(osdc, 1597 CEPH_OSDMAP_RECOVERY_DELETES); 1598 enum calc_target_result ct_res; 1599 1600 t->epoch = osdc->osdmap->epoch; 1601 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1602 if (!pi) { 1603 t->osd = CEPH_HOMELESS_OSD; 1604 ct_res = CALC_TARGET_POOL_DNE; 1605 goto out; 1606 } 1607 1608 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1609 if (t->last_force_resend < pi->last_force_request_resend) { 1610 t->last_force_resend = pi->last_force_request_resend; 1611 force_resend = true; 1612 } else if (t->last_force_resend == 0) { 1613 force_resend = true; 1614 } 1615 } 1616 1617 /* apply tiering */ 1618 ceph_oid_copy(&t->target_oid, &t->base_oid); 1619 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1620 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1621 if (is_read && pi->read_tier >= 0) 1622 t->target_oloc.pool = pi->read_tier; 1623 if (is_write && pi->write_tier >= 0) 1624 t->target_oloc.pool = pi->write_tier; 1625 1626 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1627 if (!pi) { 1628 t->osd = CEPH_HOMELESS_OSD; 1629 ct_res = CALC_TARGET_POOL_DNE; 1630 goto out; 1631 } 1632 } 1633 1634 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1635 last_pgid.pool = pgid.pool; 1636 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1637 1638 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1639 if (any_change && 1640 ceph_is_new_interval(&t->acting, 1641 &acting, 1642 &t->up, 1643 &up, 1644 t->size, 1645 pi->size, 1646 t->min_size, 1647 pi->min_size, 1648 t->pg_num, 1649 pi->pg_num, 1650 t->sort_bitwise, 1651 sort_bitwise, 1652 t->recovery_deletes, 1653 recovery_deletes, 1654 &last_pgid)) 1655 force_resend = true; 1656 1657 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1658 t->paused = false; 1659 unpaused = true; 1660 } 1661 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1662 ceph_osds_changed(&t->acting, &acting, 1663 t->used_replica || any_change); 1664 if (t->pg_num) 1665 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1666 1667 if (legacy_change || force_resend || split) { 1668 t->pgid = pgid; /* struct */ 1669 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1670 ceph_osds_copy(&t->acting, &acting); 1671 ceph_osds_copy(&t->up, &up); 1672 t->size = pi->size; 1673 t->min_size = pi->min_size; 1674 t->pg_num = pi->pg_num; 1675 t->pg_num_mask = pi->pg_num_mask; 1676 t->sort_bitwise = sort_bitwise; 1677 t->recovery_deletes = recovery_deletes; 1678 1679 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS | 1680 CEPH_OSD_FLAG_LOCALIZE_READS)) && 1681 !is_write && pi->type == CEPH_POOL_TYPE_REP && 1682 acting.size > 1) { 1683 int pos; 1684 1685 WARN_ON(!is_read || acting.osds[0] != acting.primary); 1686 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) { 1687 pos = pick_random_replica(&acting); 1688 } else { 1689 pos = pick_closest_replica(osdc, &acting); 1690 } 1691 t->osd = acting.osds[pos]; 1692 t->used_replica = pos > 0; 1693 } else { 1694 t->osd = acting.primary; 1695 t->used_replica = false; 1696 } 1697 } 1698 1699 if (unpaused || legacy_change || force_resend || split) 1700 ct_res = CALC_TARGET_NEED_RESEND; 1701 else 1702 ct_res = CALC_TARGET_NO_ACTION; 1703 1704 out: 1705 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1706 legacy_change, force_resend, split, ct_res, t->osd); 1707 return ct_res; 1708 } 1709 1710 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1711 { 1712 struct ceph_spg_mapping *spg; 1713 1714 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1715 if (!spg) 1716 return NULL; 1717 1718 RB_CLEAR_NODE(&spg->node); 1719 spg->backoffs = RB_ROOT; 1720 return spg; 1721 } 1722 1723 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1724 { 1725 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1726 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1727 1728 kfree(spg); 1729 } 1730 1731 /* 1732 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1733 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1734 * defined only within a specific spgid; it does not pass anything to 1735 * children on split, or to another primary. 1736 */ 1737 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1738 RB_BYPTR, const struct ceph_spg *, node) 1739 1740 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1741 { 1742 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1743 } 1744 1745 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1746 void **pkey, size_t *pkey_len) 1747 { 1748 if (hoid->key_len) { 1749 *pkey = hoid->key; 1750 *pkey_len = hoid->key_len; 1751 } else { 1752 *pkey = hoid->oid; 1753 *pkey_len = hoid->oid_len; 1754 } 1755 } 1756 1757 static int compare_names(const void *name1, size_t name1_len, 1758 const void *name2, size_t name2_len) 1759 { 1760 int ret; 1761 1762 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1763 if (!ret) { 1764 if (name1_len < name2_len) 1765 ret = -1; 1766 else if (name1_len > name2_len) 1767 ret = 1; 1768 } 1769 return ret; 1770 } 1771 1772 static int hoid_compare(const struct ceph_hobject_id *lhs, 1773 const struct ceph_hobject_id *rhs) 1774 { 1775 void *effective_key1, *effective_key2; 1776 size_t effective_key1_len, effective_key2_len; 1777 int ret; 1778 1779 if (lhs->is_max < rhs->is_max) 1780 return -1; 1781 if (lhs->is_max > rhs->is_max) 1782 return 1; 1783 1784 if (lhs->pool < rhs->pool) 1785 return -1; 1786 if (lhs->pool > rhs->pool) 1787 return 1; 1788 1789 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1790 return -1; 1791 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1792 return 1; 1793 1794 ret = compare_names(lhs->nspace, lhs->nspace_len, 1795 rhs->nspace, rhs->nspace_len); 1796 if (ret) 1797 return ret; 1798 1799 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1800 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1801 ret = compare_names(effective_key1, effective_key1_len, 1802 effective_key2, effective_key2_len); 1803 if (ret) 1804 return ret; 1805 1806 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1807 if (ret) 1808 return ret; 1809 1810 if (lhs->snapid < rhs->snapid) 1811 return -1; 1812 if (lhs->snapid > rhs->snapid) 1813 return 1; 1814 1815 return 0; 1816 } 1817 1818 /* 1819 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1820 * compat stuff here. 1821 * 1822 * Assumes @hoid is zero-initialized. 1823 */ 1824 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1825 { 1826 u8 struct_v; 1827 u32 struct_len; 1828 int ret; 1829 1830 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1831 &struct_len); 1832 if (ret) 1833 return ret; 1834 1835 if (struct_v < 4) { 1836 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1837 goto e_inval; 1838 } 1839 1840 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1841 GFP_NOIO); 1842 if (IS_ERR(hoid->key)) { 1843 ret = PTR_ERR(hoid->key); 1844 hoid->key = NULL; 1845 return ret; 1846 } 1847 1848 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1849 GFP_NOIO); 1850 if (IS_ERR(hoid->oid)) { 1851 ret = PTR_ERR(hoid->oid); 1852 hoid->oid = NULL; 1853 return ret; 1854 } 1855 1856 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1857 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1858 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1859 1860 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1861 GFP_NOIO); 1862 if (IS_ERR(hoid->nspace)) { 1863 ret = PTR_ERR(hoid->nspace); 1864 hoid->nspace = NULL; 1865 return ret; 1866 } 1867 1868 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1869 1870 ceph_hoid_build_hash_cache(hoid); 1871 return 0; 1872 1873 e_inval: 1874 return -EINVAL; 1875 } 1876 1877 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1878 { 1879 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1880 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1881 } 1882 1883 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1884 { 1885 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1886 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1887 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1888 ceph_encode_64(p, hoid->snapid); 1889 ceph_encode_32(p, hoid->hash); 1890 ceph_encode_8(p, hoid->is_max); 1891 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1892 ceph_encode_64(p, hoid->pool); 1893 } 1894 1895 static void free_hoid(struct ceph_hobject_id *hoid) 1896 { 1897 if (hoid) { 1898 kfree(hoid->key); 1899 kfree(hoid->oid); 1900 kfree(hoid->nspace); 1901 kfree(hoid); 1902 } 1903 } 1904 1905 static struct ceph_osd_backoff *alloc_backoff(void) 1906 { 1907 struct ceph_osd_backoff *backoff; 1908 1909 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1910 if (!backoff) 1911 return NULL; 1912 1913 RB_CLEAR_NODE(&backoff->spg_node); 1914 RB_CLEAR_NODE(&backoff->id_node); 1915 return backoff; 1916 } 1917 1918 static void free_backoff(struct ceph_osd_backoff *backoff) 1919 { 1920 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1921 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1922 1923 free_hoid(backoff->begin); 1924 free_hoid(backoff->end); 1925 kfree(backoff); 1926 } 1927 1928 /* 1929 * Within a specific spgid, backoffs are managed by ->begin hoid. 1930 */ 1931 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1932 RB_BYVAL, spg_node); 1933 1934 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1935 const struct ceph_hobject_id *hoid) 1936 { 1937 struct rb_node *n = root->rb_node; 1938 1939 while (n) { 1940 struct ceph_osd_backoff *cur = 1941 rb_entry(n, struct ceph_osd_backoff, spg_node); 1942 int cmp; 1943 1944 cmp = hoid_compare(hoid, cur->begin); 1945 if (cmp < 0) { 1946 n = n->rb_left; 1947 } else if (cmp > 0) { 1948 if (hoid_compare(hoid, cur->end) < 0) 1949 return cur; 1950 1951 n = n->rb_right; 1952 } else { 1953 return cur; 1954 } 1955 } 1956 1957 return NULL; 1958 } 1959 1960 /* 1961 * Each backoff has a unique id within its OSD session. 1962 */ 1963 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1964 1965 static void clear_backoffs(struct ceph_osd *osd) 1966 { 1967 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1968 struct ceph_spg_mapping *spg = 1969 rb_entry(rb_first(&osd->o_backoff_mappings), 1970 struct ceph_spg_mapping, node); 1971 1972 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1973 struct ceph_osd_backoff *backoff = 1974 rb_entry(rb_first(&spg->backoffs), 1975 struct ceph_osd_backoff, spg_node); 1976 1977 erase_backoff(&spg->backoffs, backoff); 1978 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1979 free_backoff(backoff); 1980 } 1981 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1982 free_spg_mapping(spg); 1983 } 1984 } 1985 1986 /* 1987 * Set up a temporary, non-owning view into @t. 1988 */ 1989 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1990 const struct ceph_osd_request_target *t) 1991 { 1992 hoid->key = NULL; 1993 hoid->key_len = 0; 1994 hoid->oid = t->target_oid.name; 1995 hoid->oid_len = t->target_oid.name_len; 1996 hoid->snapid = CEPH_NOSNAP; 1997 hoid->hash = t->pgid.seed; 1998 hoid->is_max = false; 1999 if (t->target_oloc.pool_ns) { 2000 hoid->nspace = t->target_oloc.pool_ns->str; 2001 hoid->nspace_len = t->target_oloc.pool_ns->len; 2002 } else { 2003 hoid->nspace = NULL; 2004 hoid->nspace_len = 0; 2005 } 2006 hoid->pool = t->target_oloc.pool; 2007 ceph_hoid_build_hash_cache(hoid); 2008 } 2009 2010 static bool should_plug_request(struct ceph_osd_request *req) 2011 { 2012 struct ceph_osd *osd = req->r_osd; 2013 struct ceph_spg_mapping *spg; 2014 struct ceph_osd_backoff *backoff; 2015 struct ceph_hobject_id hoid; 2016 2017 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 2018 if (!spg) 2019 return false; 2020 2021 hoid_fill_from_target(&hoid, &req->r_t); 2022 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 2023 if (!backoff) 2024 return false; 2025 2026 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 2027 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 2028 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 2029 return true; 2030 } 2031 2032 /* 2033 * Keep get_num_data_items() in sync with this function. 2034 */ 2035 static void setup_request_data(struct ceph_osd_request *req) 2036 { 2037 struct ceph_msg *request_msg = req->r_request; 2038 struct ceph_msg *reply_msg = req->r_reply; 2039 struct ceph_osd_req_op *op; 2040 2041 if (req->r_request->num_data_items || req->r_reply->num_data_items) 2042 return; 2043 2044 WARN_ON(request_msg->data_length || reply_msg->data_length); 2045 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 2046 switch (op->op) { 2047 /* request */ 2048 case CEPH_OSD_OP_WRITE: 2049 case CEPH_OSD_OP_WRITEFULL: 2050 WARN_ON(op->indata_len != op->extent.length); 2051 ceph_osdc_msg_data_add(request_msg, 2052 &op->extent.osd_data); 2053 break; 2054 case CEPH_OSD_OP_SETXATTR: 2055 case CEPH_OSD_OP_CMPXATTR: 2056 WARN_ON(op->indata_len != op->xattr.name_len + 2057 op->xattr.value_len); 2058 ceph_osdc_msg_data_add(request_msg, 2059 &op->xattr.osd_data); 2060 break; 2061 case CEPH_OSD_OP_NOTIFY_ACK: 2062 ceph_osdc_msg_data_add(request_msg, 2063 &op->notify_ack.request_data); 2064 break; 2065 case CEPH_OSD_OP_COPY_FROM2: 2066 ceph_osdc_msg_data_add(request_msg, 2067 &op->copy_from.osd_data); 2068 break; 2069 2070 /* reply */ 2071 case CEPH_OSD_OP_STAT: 2072 ceph_osdc_msg_data_add(reply_msg, 2073 &op->raw_data_in); 2074 break; 2075 case CEPH_OSD_OP_READ: 2076 case CEPH_OSD_OP_SPARSE_READ: 2077 ceph_osdc_msg_data_add(reply_msg, 2078 &op->extent.osd_data); 2079 break; 2080 case CEPH_OSD_OP_LIST_WATCHERS: 2081 ceph_osdc_msg_data_add(reply_msg, 2082 &op->list_watchers.response_data); 2083 break; 2084 2085 /* both */ 2086 case CEPH_OSD_OP_CALL: 2087 WARN_ON(op->indata_len != op->cls.class_len + 2088 op->cls.method_len + 2089 op->cls.indata_len); 2090 ceph_osdc_msg_data_add(request_msg, 2091 &op->cls.request_info); 2092 /* optional, can be NONE */ 2093 ceph_osdc_msg_data_add(request_msg, 2094 &op->cls.request_data); 2095 /* optional, can be NONE */ 2096 ceph_osdc_msg_data_add(reply_msg, 2097 &op->cls.response_data); 2098 break; 2099 case CEPH_OSD_OP_NOTIFY: 2100 ceph_osdc_msg_data_add(request_msg, 2101 &op->notify.request_data); 2102 ceph_osdc_msg_data_add(reply_msg, 2103 &op->notify.response_data); 2104 break; 2105 } 2106 } 2107 } 2108 2109 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2110 { 2111 ceph_encode_8(p, 1); 2112 ceph_encode_64(p, pgid->pool); 2113 ceph_encode_32(p, pgid->seed); 2114 ceph_encode_32(p, -1); /* preferred */ 2115 } 2116 2117 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2118 { 2119 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2120 encode_pgid(p, &spgid->pgid); 2121 ceph_encode_8(p, spgid->shard); 2122 } 2123 2124 static void encode_oloc(void **p, void *end, 2125 const struct ceph_object_locator *oloc) 2126 { 2127 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2128 ceph_encode_64(p, oloc->pool); 2129 ceph_encode_32(p, -1); /* preferred */ 2130 ceph_encode_32(p, 0); /* key len */ 2131 if (oloc->pool_ns) 2132 ceph_encode_string(p, end, oloc->pool_ns->str, 2133 oloc->pool_ns->len); 2134 else 2135 ceph_encode_32(p, 0); 2136 } 2137 2138 static void encode_request_partial(struct ceph_osd_request *req, 2139 struct ceph_msg *msg) 2140 { 2141 void *p = msg->front.iov_base; 2142 void *const end = p + msg->front_alloc_len; 2143 u32 data_len = 0; 2144 int i; 2145 2146 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2147 /* snapshots aren't writeable */ 2148 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2149 } else { 2150 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2151 req->r_data_offset || req->r_snapc); 2152 } 2153 2154 setup_request_data(req); 2155 2156 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2157 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2158 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2159 ceph_encode_32(&p, req->r_flags); 2160 2161 /* reqid */ 2162 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2163 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2164 p += sizeof(struct ceph_osd_reqid); 2165 2166 /* trace */ 2167 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2168 p += sizeof(struct ceph_blkin_trace_info); 2169 2170 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2171 ceph_encode_timespec64(p, &req->r_mtime); 2172 p += sizeof(struct ceph_timespec); 2173 2174 encode_oloc(&p, end, &req->r_t.target_oloc); 2175 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2176 req->r_t.target_oid.name_len); 2177 2178 /* ops, can imply data */ 2179 ceph_encode_16(&p, req->r_num_ops); 2180 for (i = 0; i < req->r_num_ops; i++) { 2181 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2182 p += sizeof(struct ceph_osd_op); 2183 } 2184 2185 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2186 if (req->r_snapc) { 2187 ceph_encode_64(&p, req->r_snapc->seq); 2188 ceph_encode_32(&p, req->r_snapc->num_snaps); 2189 for (i = 0; i < req->r_snapc->num_snaps; i++) 2190 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2191 } else { 2192 ceph_encode_64(&p, 0); /* snap_seq */ 2193 ceph_encode_32(&p, 0); /* snaps len */ 2194 } 2195 2196 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2197 BUG_ON(p > end - 8); /* space for features */ 2198 2199 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2200 /* front_len is finalized in encode_request_finish() */ 2201 msg->front.iov_len = p - msg->front.iov_base; 2202 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2203 msg->hdr.data_len = cpu_to_le32(data_len); 2204 /* 2205 * The header "data_off" is a hint to the receiver allowing it 2206 * to align received data into its buffers such that there's no 2207 * need to re-copy it before writing it to disk (direct I/O). 2208 */ 2209 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2210 2211 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2212 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2213 } 2214 2215 static void encode_request_finish(struct ceph_msg *msg) 2216 { 2217 void *p = msg->front.iov_base; 2218 void *const partial_end = p + msg->front.iov_len; 2219 void *const end = p + msg->front_alloc_len; 2220 2221 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2222 /* luminous OSD -- encode features and be done */ 2223 p = partial_end; 2224 ceph_encode_64(&p, msg->con->peer_features); 2225 } else { 2226 struct { 2227 char spgid[CEPH_ENCODING_START_BLK_LEN + 2228 CEPH_PGID_ENCODING_LEN + 1]; 2229 __le32 hash; 2230 __le32 epoch; 2231 __le32 flags; 2232 char reqid[CEPH_ENCODING_START_BLK_LEN + 2233 sizeof(struct ceph_osd_reqid)]; 2234 char trace[sizeof(struct ceph_blkin_trace_info)]; 2235 __le32 client_inc; 2236 struct ceph_timespec mtime; 2237 } __packed head; 2238 struct ceph_pg pgid; 2239 void *oloc, *oid, *tail; 2240 int oloc_len, oid_len, tail_len; 2241 int len; 2242 2243 /* 2244 * Pre-luminous OSD -- reencode v8 into v4 using @head 2245 * as a temporary buffer. Encode the raw PG; the rest 2246 * is just a matter of moving oloc, oid and tail blobs 2247 * around. 2248 */ 2249 memcpy(&head, p, sizeof(head)); 2250 p += sizeof(head); 2251 2252 oloc = p; 2253 p += CEPH_ENCODING_START_BLK_LEN; 2254 pgid.pool = ceph_decode_64(&p); 2255 p += 4 + 4; /* preferred, key len */ 2256 len = ceph_decode_32(&p); 2257 p += len; /* nspace */ 2258 oloc_len = p - oloc; 2259 2260 oid = p; 2261 len = ceph_decode_32(&p); 2262 p += len; 2263 oid_len = p - oid; 2264 2265 tail = p; 2266 tail_len = partial_end - p; 2267 2268 p = msg->front.iov_base; 2269 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2270 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2271 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2272 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2273 2274 /* reassert_version */ 2275 memset(p, 0, sizeof(struct ceph_eversion)); 2276 p += sizeof(struct ceph_eversion); 2277 2278 BUG_ON(p >= oloc); 2279 memmove(p, oloc, oloc_len); 2280 p += oloc_len; 2281 2282 pgid.seed = le32_to_cpu(head.hash); 2283 encode_pgid(&p, &pgid); /* raw pg */ 2284 2285 BUG_ON(p >= oid); 2286 memmove(p, oid, oid_len); 2287 p += oid_len; 2288 2289 /* tail -- ops, snapid, snapc, retry_attempt */ 2290 BUG_ON(p >= tail); 2291 memmove(p, tail, tail_len); 2292 p += tail_len; 2293 2294 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2295 } 2296 2297 BUG_ON(p > end); 2298 msg->front.iov_len = p - msg->front.iov_base; 2299 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2300 2301 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2302 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2303 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2304 le16_to_cpu(msg->hdr.version)); 2305 } 2306 2307 /* 2308 * @req has to be assigned a tid and registered. 2309 */ 2310 static void send_request(struct ceph_osd_request *req) 2311 { 2312 struct ceph_osd *osd = req->r_osd; 2313 2314 verify_osd_locked(osd); 2315 WARN_ON(osd->o_osd != req->r_t.osd); 2316 2317 /* backoff? */ 2318 if (should_plug_request(req)) 2319 return; 2320 2321 /* 2322 * We may have a previously queued request message hanging 2323 * around. Cancel it to avoid corrupting the msgr. 2324 */ 2325 if (req->r_sent) 2326 ceph_msg_revoke(req->r_request); 2327 2328 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2329 if (req->r_attempts) 2330 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2331 else 2332 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2333 2334 encode_request_partial(req, req->r_request); 2335 2336 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2337 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2338 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2339 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2340 req->r_attempts); 2341 2342 req->r_t.paused = false; 2343 req->r_stamp = jiffies; 2344 req->r_attempts++; 2345 2346 req->r_sent = osd->o_incarnation; 2347 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2348 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2349 } 2350 2351 static void maybe_request_map(struct ceph_osd_client *osdc) 2352 { 2353 bool continuous = false; 2354 2355 verify_osdc_locked(osdc); 2356 WARN_ON(!osdc->osdmap->epoch); 2357 2358 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2359 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2360 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2361 dout("%s osdc %p continuous\n", __func__, osdc); 2362 continuous = true; 2363 } else { 2364 dout("%s osdc %p onetime\n", __func__, osdc); 2365 } 2366 2367 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2368 osdc->osdmap->epoch + 1, continuous)) 2369 ceph_monc_renew_subs(&osdc->client->monc); 2370 } 2371 2372 static void complete_request(struct ceph_osd_request *req, int err); 2373 static void send_map_check(struct ceph_osd_request *req); 2374 2375 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2376 { 2377 struct ceph_osd_client *osdc = req->r_osdc; 2378 struct ceph_osd *osd; 2379 enum calc_target_result ct_res; 2380 int err = 0; 2381 bool need_send = false; 2382 bool promoted = false; 2383 2384 WARN_ON(req->r_tid); 2385 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2386 2387 again: 2388 ct_res = calc_target(osdc, &req->r_t, false); 2389 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2390 goto promote; 2391 2392 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2393 if (IS_ERR(osd)) { 2394 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2395 goto promote; 2396 } 2397 2398 if (osdc->abort_err) { 2399 dout("req %p abort_err %d\n", req, osdc->abort_err); 2400 err = osdc->abort_err; 2401 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2402 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2403 osdc->epoch_barrier); 2404 req->r_t.paused = true; 2405 maybe_request_map(osdc); 2406 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2407 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2408 dout("req %p pausewr\n", req); 2409 req->r_t.paused = true; 2410 maybe_request_map(osdc); 2411 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2412 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2413 dout("req %p pauserd\n", req); 2414 req->r_t.paused = true; 2415 maybe_request_map(osdc); 2416 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2417 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2418 CEPH_OSD_FLAG_FULL_FORCE)) && 2419 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2420 pool_full(osdc, req->r_t.base_oloc.pool))) { 2421 dout("req %p full/pool_full\n", req); 2422 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2423 err = -ENOSPC; 2424 } else { 2425 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) 2426 pr_warn_ratelimited("cluster is full (osdmap FULL)\n"); 2427 else 2428 pr_warn_ratelimited("pool %lld is full or reached quota\n", 2429 req->r_t.base_oloc.pool); 2430 req->r_t.paused = true; 2431 maybe_request_map(osdc); 2432 } 2433 } else if (!osd_homeless(osd)) { 2434 need_send = true; 2435 } else { 2436 maybe_request_map(osdc); 2437 } 2438 2439 mutex_lock(&osd->lock); 2440 /* 2441 * Assign the tid atomically with send_request() to protect 2442 * multiple writes to the same object from racing with each 2443 * other, resulting in out of order ops on the OSDs. 2444 */ 2445 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2446 link_request(osd, req); 2447 if (need_send) 2448 send_request(req); 2449 else if (err) 2450 complete_request(req, err); 2451 mutex_unlock(&osd->lock); 2452 2453 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2454 send_map_check(req); 2455 2456 if (promoted) 2457 downgrade_write(&osdc->lock); 2458 return; 2459 2460 promote: 2461 up_read(&osdc->lock); 2462 down_write(&osdc->lock); 2463 wrlocked = true; 2464 promoted = true; 2465 goto again; 2466 } 2467 2468 static void account_request(struct ceph_osd_request *req) 2469 { 2470 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2471 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2472 2473 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2474 atomic_inc(&req->r_osdc->num_requests); 2475 2476 req->r_start_stamp = jiffies; 2477 req->r_start_latency = ktime_get(); 2478 } 2479 2480 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2481 { 2482 ceph_osdc_get_request(req); 2483 account_request(req); 2484 __submit_request(req, wrlocked); 2485 } 2486 2487 static void finish_request(struct ceph_osd_request *req) 2488 { 2489 struct ceph_osd_client *osdc = req->r_osdc; 2490 2491 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2492 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2493 2494 req->r_end_latency = ktime_get(); 2495 2496 if (req->r_osd) { 2497 ceph_init_sparse_read(&req->r_osd->o_sparse_read); 2498 unlink_request(req->r_osd, req); 2499 } 2500 atomic_dec(&osdc->num_requests); 2501 2502 /* 2503 * If an OSD has failed or returned and a request has been sent 2504 * twice, it's possible to get a reply and end up here while the 2505 * request message is queued for delivery. We will ignore the 2506 * reply, so not a big deal, but better to try and catch it. 2507 */ 2508 ceph_msg_revoke(req->r_request); 2509 ceph_msg_revoke_incoming(req->r_reply); 2510 } 2511 2512 static void __complete_request(struct ceph_osd_request *req) 2513 { 2514 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2515 req->r_tid, req->r_callback, req->r_result); 2516 2517 if (req->r_callback) 2518 req->r_callback(req); 2519 complete_all(&req->r_completion); 2520 ceph_osdc_put_request(req); 2521 } 2522 2523 static void complete_request_workfn(struct work_struct *work) 2524 { 2525 struct ceph_osd_request *req = 2526 container_of(work, struct ceph_osd_request, r_complete_work); 2527 2528 __complete_request(req); 2529 } 2530 2531 /* 2532 * This is open-coded in handle_reply(). 2533 */ 2534 static void complete_request(struct ceph_osd_request *req, int err) 2535 { 2536 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2537 2538 req->r_result = err; 2539 finish_request(req); 2540 2541 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2542 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2543 } 2544 2545 static void cancel_map_check(struct ceph_osd_request *req) 2546 { 2547 struct ceph_osd_client *osdc = req->r_osdc; 2548 struct ceph_osd_request *lookup_req; 2549 2550 verify_osdc_wrlocked(osdc); 2551 2552 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2553 if (!lookup_req) 2554 return; 2555 2556 WARN_ON(lookup_req != req); 2557 erase_request_mc(&osdc->map_checks, req); 2558 ceph_osdc_put_request(req); 2559 } 2560 2561 static void cancel_request(struct ceph_osd_request *req) 2562 { 2563 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2564 2565 cancel_map_check(req); 2566 finish_request(req); 2567 complete_all(&req->r_completion); 2568 ceph_osdc_put_request(req); 2569 } 2570 2571 static void abort_request(struct ceph_osd_request *req, int err) 2572 { 2573 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2574 2575 cancel_map_check(req); 2576 complete_request(req, err); 2577 } 2578 2579 static int abort_fn(struct ceph_osd_request *req, void *arg) 2580 { 2581 int err = *(int *)arg; 2582 2583 abort_request(req, err); 2584 return 0; /* continue iteration */ 2585 } 2586 2587 /* 2588 * Abort all in-flight requests with @err and arrange for all future 2589 * requests to be failed immediately. 2590 */ 2591 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2592 { 2593 dout("%s osdc %p err %d\n", __func__, osdc, err); 2594 down_write(&osdc->lock); 2595 for_each_request(osdc, abort_fn, &err); 2596 osdc->abort_err = err; 2597 up_write(&osdc->lock); 2598 } 2599 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2600 2601 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2602 { 2603 down_write(&osdc->lock); 2604 osdc->abort_err = 0; 2605 up_write(&osdc->lock); 2606 } 2607 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2608 2609 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2610 { 2611 if (likely(eb > osdc->epoch_barrier)) { 2612 dout("updating epoch_barrier from %u to %u\n", 2613 osdc->epoch_barrier, eb); 2614 osdc->epoch_barrier = eb; 2615 /* Request map if we're not to the barrier yet */ 2616 if (eb > osdc->osdmap->epoch) 2617 maybe_request_map(osdc); 2618 } 2619 } 2620 2621 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2622 { 2623 down_read(&osdc->lock); 2624 if (unlikely(eb > osdc->epoch_barrier)) { 2625 up_read(&osdc->lock); 2626 down_write(&osdc->lock); 2627 update_epoch_barrier(osdc, eb); 2628 up_write(&osdc->lock); 2629 } else { 2630 up_read(&osdc->lock); 2631 } 2632 } 2633 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2634 2635 /* 2636 * We can end up releasing caps as a result of abort_request(). 2637 * In that case, we probably want to ensure that the cap release message 2638 * has an updated epoch barrier in it, so set the epoch barrier prior to 2639 * aborting the first request. 2640 */ 2641 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2642 { 2643 struct ceph_osd_client *osdc = req->r_osdc; 2644 bool *victims = arg; 2645 2646 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2647 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2648 pool_full(osdc, req->r_t.base_oloc.pool))) { 2649 if (!*victims) { 2650 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2651 *victims = true; 2652 } 2653 abort_request(req, -ENOSPC); 2654 } 2655 2656 return 0; /* continue iteration */ 2657 } 2658 2659 /* 2660 * Drop all pending requests that are stalled waiting on a full condition to 2661 * clear, and complete them with ENOSPC as the return code. Set the 2662 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2663 * cancelled. 2664 */ 2665 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2666 { 2667 bool victims = false; 2668 2669 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2670 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2671 for_each_request(osdc, abort_on_full_fn, &victims); 2672 } 2673 2674 static void check_pool_dne(struct ceph_osd_request *req) 2675 { 2676 struct ceph_osd_client *osdc = req->r_osdc; 2677 struct ceph_osdmap *map = osdc->osdmap; 2678 2679 verify_osdc_wrlocked(osdc); 2680 WARN_ON(!map->epoch); 2681 2682 if (req->r_attempts) { 2683 /* 2684 * We sent a request earlier, which means that 2685 * previously the pool existed, and now it does not 2686 * (i.e., it was deleted). 2687 */ 2688 req->r_map_dne_bound = map->epoch; 2689 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2690 req->r_tid); 2691 } else { 2692 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2693 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2694 } 2695 2696 if (req->r_map_dne_bound) { 2697 if (map->epoch >= req->r_map_dne_bound) { 2698 /* we had a new enough map */ 2699 pr_info_ratelimited("tid %llu pool does not exist\n", 2700 req->r_tid); 2701 complete_request(req, -ENOENT); 2702 } 2703 } else { 2704 send_map_check(req); 2705 } 2706 } 2707 2708 static void map_check_cb(struct ceph_mon_generic_request *greq) 2709 { 2710 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2711 struct ceph_osd_request *req; 2712 u64 tid = greq->private_data; 2713 2714 WARN_ON(greq->result || !greq->u.newest); 2715 2716 down_write(&osdc->lock); 2717 req = lookup_request_mc(&osdc->map_checks, tid); 2718 if (!req) { 2719 dout("%s tid %llu dne\n", __func__, tid); 2720 goto out_unlock; 2721 } 2722 2723 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2724 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2725 if (!req->r_map_dne_bound) 2726 req->r_map_dne_bound = greq->u.newest; 2727 erase_request_mc(&osdc->map_checks, req); 2728 check_pool_dne(req); 2729 2730 ceph_osdc_put_request(req); 2731 out_unlock: 2732 up_write(&osdc->lock); 2733 } 2734 2735 static void send_map_check(struct ceph_osd_request *req) 2736 { 2737 struct ceph_osd_client *osdc = req->r_osdc; 2738 struct ceph_osd_request *lookup_req; 2739 int ret; 2740 2741 verify_osdc_wrlocked(osdc); 2742 2743 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2744 if (lookup_req) { 2745 WARN_ON(lookup_req != req); 2746 return; 2747 } 2748 2749 ceph_osdc_get_request(req); 2750 insert_request_mc(&osdc->map_checks, req); 2751 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2752 map_check_cb, req->r_tid); 2753 WARN_ON(ret); 2754 } 2755 2756 /* 2757 * lingering requests, watch/notify v2 infrastructure 2758 */ 2759 static void linger_release(struct kref *kref) 2760 { 2761 struct ceph_osd_linger_request *lreq = 2762 container_of(kref, struct ceph_osd_linger_request, kref); 2763 2764 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2765 lreq->reg_req, lreq->ping_req); 2766 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2767 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2768 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2769 WARN_ON(!list_empty(&lreq->scan_item)); 2770 WARN_ON(!list_empty(&lreq->pending_lworks)); 2771 WARN_ON(lreq->osd); 2772 2773 if (lreq->request_pl) 2774 ceph_pagelist_release(lreq->request_pl); 2775 if (lreq->notify_id_pages) 2776 ceph_release_page_vector(lreq->notify_id_pages, 1); 2777 2778 ceph_osdc_put_request(lreq->reg_req); 2779 ceph_osdc_put_request(lreq->ping_req); 2780 target_destroy(&lreq->t); 2781 kfree(lreq); 2782 } 2783 2784 static void linger_put(struct ceph_osd_linger_request *lreq) 2785 { 2786 if (lreq) 2787 kref_put(&lreq->kref, linger_release); 2788 } 2789 2790 static struct ceph_osd_linger_request * 2791 linger_get(struct ceph_osd_linger_request *lreq) 2792 { 2793 kref_get(&lreq->kref); 2794 return lreq; 2795 } 2796 2797 static struct ceph_osd_linger_request * 2798 linger_alloc(struct ceph_osd_client *osdc) 2799 { 2800 struct ceph_osd_linger_request *lreq; 2801 2802 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2803 if (!lreq) 2804 return NULL; 2805 2806 kref_init(&lreq->kref); 2807 mutex_init(&lreq->lock); 2808 RB_CLEAR_NODE(&lreq->node); 2809 RB_CLEAR_NODE(&lreq->osdc_node); 2810 RB_CLEAR_NODE(&lreq->mc_node); 2811 INIT_LIST_HEAD(&lreq->scan_item); 2812 INIT_LIST_HEAD(&lreq->pending_lworks); 2813 init_completion(&lreq->reg_commit_wait); 2814 init_completion(&lreq->notify_finish_wait); 2815 2816 lreq->osdc = osdc; 2817 target_init(&lreq->t); 2818 2819 dout("%s lreq %p\n", __func__, lreq); 2820 return lreq; 2821 } 2822 2823 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2824 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2825 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2826 2827 /* 2828 * Create linger request <-> OSD session relation. 2829 * 2830 * @lreq has to be registered, @osd may be homeless. 2831 */ 2832 static void link_linger(struct ceph_osd *osd, 2833 struct ceph_osd_linger_request *lreq) 2834 { 2835 verify_osd_locked(osd); 2836 WARN_ON(!lreq->linger_id || lreq->osd); 2837 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2838 osd->o_osd, lreq, lreq->linger_id); 2839 2840 if (!osd_homeless(osd)) 2841 __remove_osd_from_lru(osd); 2842 else 2843 atomic_inc(&osd->o_osdc->num_homeless); 2844 2845 get_osd(osd); 2846 insert_linger(&osd->o_linger_requests, lreq); 2847 lreq->osd = osd; 2848 } 2849 2850 static void unlink_linger(struct ceph_osd *osd, 2851 struct ceph_osd_linger_request *lreq) 2852 { 2853 verify_osd_locked(osd); 2854 WARN_ON(lreq->osd != osd); 2855 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2856 osd->o_osd, lreq, lreq->linger_id); 2857 2858 lreq->osd = NULL; 2859 erase_linger(&osd->o_linger_requests, lreq); 2860 put_osd(osd); 2861 2862 if (!osd_homeless(osd)) 2863 maybe_move_osd_to_lru(osd); 2864 else 2865 atomic_dec(&osd->o_osdc->num_homeless); 2866 } 2867 2868 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2869 { 2870 verify_osdc_locked(lreq->osdc); 2871 2872 return !RB_EMPTY_NODE(&lreq->osdc_node); 2873 } 2874 2875 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2876 { 2877 struct ceph_osd_client *osdc = lreq->osdc; 2878 bool registered; 2879 2880 down_read(&osdc->lock); 2881 registered = __linger_registered(lreq); 2882 up_read(&osdc->lock); 2883 2884 return registered; 2885 } 2886 2887 static void linger_register(struct ceph_osd_linger_request *lreq) 2888 { 2889 struct ceph_osd_client *osdc = lreq->osdc; 2890 2891 verify_osdc_wrlocked(osdc); 2892 WARN_ON(lreq->linger_id); 2893 2894 linger_get(lreq); 2895 lreq->linger_id = ++osdc->last_linger_id; 2896 insert_linger_osdc(&osdc->linger_requests, lreq); 2897 } 2898 2899 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2900 { 2901 struct ceph_osd_client *osdc = lreq->osdc; 2902 2903 verify_osdc_wrlocked(osdc); 2904 2905 erase_linger_osdc(&osdc->linger_requests, lreq); 2906 linger_put(lreq); 2907 } 2908 2909 static void cancel_linger_request(struct ceph_osd_request *req) 2910 { 2911 struct ceph_osd_linger_request *lreq = req->r_priv; 2912 2913 WARN_ON(!req->r_linger); 2914 cancel_request(req); 2915 linger_put(lreq); 2916 } 2917 2918 struct linger_work { 2919 struct work_struct work; 2920 struct ceph_osd_linger_request *lreq; 2921 struct list_head pending_item; 2922 unsigned long queued_stamp; 2923 2924 union { 2925 struct { 2926 u64 notify_id; 2927 u64 notifier_id; 2928 void *payload; /* points into @msg front */ 2929 size_t payload_len; 2930 2931 struct ceph_msg *msg; /* for ceph_msg_put() */ 2932 } notify; 2933 struct { 2934 int err; 2935 } error; 2936 }; 2937 }; 2938 2939 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2940 work_func_t workfn) 2941 { 2942 struct linger_work *lwork; 2943 2944 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2945 if (!lwork) 2946 return NULL; 2947 2948 INIT_WORK(&lwork->work, workfn); 2949 INIT_LIST_HEAD(&lwork->pending_item); 2950 lwork->lreq = linger_get(lreq); 2951 2952 return lwork; 2953 } 2954 2955 static void lwork_free(struct linger_work *lwork) 2956 { 2957 struct ceph_osd_linger_request *lreq = lwork->lreq; 2958 2959 mutex_lock(&lreq->lock); 2960 list_del(&lwork->pending_item); 2961 mutex_unlock(&lreq->lock); 2962 2963 linger_put(lreq); 2964 kfree(lwork); 2965 } 2966 2967 static void lwork_queue(struct linger_work *lwork) 2968 { 2969 struct ceph_osd_linger_request *lreq = lwork->lreq; 2970 struct ceph_osd_client *osdc = lreq->osdc; 2971 2972 verify_lreq_locked(lreq); 2973 WARN_ON(!list_empty(&lwork->pending_item)); 2974 2975 lwork->queued_stamp = jiffies; 2976 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2977 queue_work(osdc->notify_wq, &lwork->work); 2978 } 2979 2980 static void do_watch_notify(struct work_struct *w) 2981 { 2982 struct linger_work *lwork = container_of(w, struct linger_work, work); 2983 struct ceph_osd_linger_request *lreq = lwork->lreq; 2984 2985 if (!linger_registered(lreq)) { 2986 dout("%s lreq %p not registered\n", __func__, lreq); 2987 goto out; 2988 } 2989 2990 WARN_ON(!lreq->is_watch); 2991 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2992 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2993 lwork->notify.payload_len); 2994 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2995 lwork->notify.notifier_id, lwork->notify.payload, 2996 lwork->notify.payload_len); 2997 2998 out: 2999 ceph_msg_put(lwork->notify.msg); 3000 lwork_free(lwork); 3001 } 3002 3003 static void do_watch_error(struct work_struct *w) 3004 { 3005 struct linger_work *lwork = container_of(w, struct linger_work, work); 3006 struct ceph_osd_linger_request *lreq = lwork->lreq; 3007 3008 if (!linger_registered(lreq)) { 3009 dout("%s lreq %p not registered\n", __func__, lreq); 3010 goto out; 3011 } 3012 3013 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 3014 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 3015 3016 out: 3017 lwork_free(lwork); 3018 } 3019 3020 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 3021 { 3022 struct linger_work *lwork; 3023 3024 lwork = lwork_alloc(lreq, do_watch_error); 3025 if (!lwork) { 3026 pr_err("failed to allocate error-lwork\n"); 3027 return; 3028 } 3029 3030 lwork->error.err = lreq->last_error; 3031 lwork_queue(lwork); 3032 } 3033 3034 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 3035 int result) 3036 { 3037 if (!completion_done(&lreq->reg_commit_wait)) { 3038 lreq->reg_commit_error = (result <= 0 ? result : 0); 3039 complete_all(&lreq->reg_commit_wait); 3040 } 3041 } 3042 3043 static void linger_commit_cb(struct ceph_osd_request *req) 3044 { 3045 struct ceph_osd_linger_request *lreq = req->r_priv; 3046 3047 mutex_lock(&lreq->lock); 3048 if (req != lreq->reg_req) { 3049 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3050 __func__, lreq, lreq->linger_id, req, lreq->reg_req); 3051 goto out; 3052 } 3053 3054 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 3055 lreq->linger_id, req->r_result); 3056 linger_reg_commit_complete(lreq, req->r_result); 3057 lreq->committed = true; 3058 3059 if (!lreq->is_watch) { 3060 struct ceph_osd_data *osd_data = 3061 osd_req_op_data(req, 0, notify, response_data); 3062 void *p = page_address(osd_data->pages[0]); 3063 3064 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 3065 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 3066 3067 /* make note of the notify_id */ 3068 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 3069 lreq->notify_id = ceph_decode_64(&p); 3070 dout("lreq %p notify_id %llu\n", lreq, 3071 lreq->notify_id); 3072 } else { 3073 dout("lreq %p no notify_id\n", lreq); 3074 } 3075 } 3076 3077 out: 3078 mutex_unlock(&lreq->lock); 3079 linger_put(lreq); 3080 } 3081 3082 static int normalize_watch_error(int err) 3083 { 3084 /* 3085 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 3086 * notification and a failure to reconnect because we raced with 3087 * the delete appear the same to the user. 3088 */ 3089 if (err == -ENOENT) 3090 err = -ENOTCONN; 3091 3092 return err; 3093 } 3094 3095 static void linger_reconnect_cb(struct ceph_osd_request *req) 3096 { 3097 struct ceph_osd_linger_request *lreq = req->r_priv; 3098 3099 mutex_lock(&lreq->lock); 3100 if (req != lreq->reg_req) { 3101 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3102 __func__, lreq, lreq->linger_id, req, lreq->reg_req); 3103 goto out; 3104 } 3105 3106 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 3107 lreq, lreq->linger_id, req->r_result, lreq->last_error); 3108 if (req->r_result < 0) { 3109 if (!lreq->last_error) { 3110 lreq->last_error = normalize_watch_error(req->r_result); 3111 queue_watch_error(lreq); 3112 } 3113 } 3114 3115 out: 3116 mutex_unlock(&lreq->lock); 3117 linger_put(lreq); 3118 } 3119 3120 static void send_linger(struct ceph_osd_linger_request *lreq) 3121 { 3122 struct ceph_osd_client *osdc = lreq->osdc; 3123 struct ceph_osd_request *req; 3124 int ret; 3125 3126 verify_osdc_wrlocked(osdc); 3127 mutex_lock(&lreq->lock); 3128 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3129 3130 if (lreq->reg_req) { 3131 if (lreq->reg_req->r_osd) 3132 cancel_linger_request(lreq->reg_req); 3133 ceph_osdc_put_request(lreq->reg_req); 3134 } 3135 3136 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); 3137 BUG_ON(!req); 3138 3139 target_copy(&req->r_t, &lreq->t); 3140 req->r_mtime = lreq->mtime; 3141 3142 if (lreq->is_watch && lreq->committed) { 3143 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT, 3144 lreq->linger_id, ++lreq->register_gen); 3145 dout("lreq %p reconnect register_gen %u\n", lreq, 3146 req->r_ops[0].watch.gen); 3147 req->r_callback = linger_reconnect_cb; 3148 } else { 3149 if (lreq->is_watch) { 3150 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH, 3151 lreq->linger_id, 0); 3152 } else { 3153 lreq->notify_id = 0; 3154 3155 refcount_inc(&lreq->request_pl->refcnt); 3156 osd_req_op_notify_init(req, 0, lreq->linger_id, 3157 lreq->request_pl); 3158 ceph_osd_data_pages_init( 3159 osd_req_op_data(req, 0, notify, response_data), 3160 lreq->notify_id_pages, PAGE_SIZE, 0, false, false); 3161 } 3162 dout("lreq %p register\n", lreq); 3163 req->r_callback = linger_commit_cb; 3164 } 3165 3166 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 3167 BUG_ON(ret); 3168 3169 req->r_priv = linger_get(lreq); 3170 req->r_linger = true; 3171 lreq->reg_req = req; 3172 mutex_unlock(&lreq->lock); 3173 3174 submit_request(req, true); 3175 } 3176 3177 static void linger_ping_cb(struct ceph_osd_request *req) 3178 { 3179 struct ceph_osd_linger_request *lreq = req->r_priv; 3180 3181 mutex_lock(&lreq->lock); 3182 if (req != lreq->ping_req) { 3183 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3184 __func__, lreq, lreq->linger_id, req, lreq->ping_req); 3185 goto out; 3186 } 3187 3188 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3189 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3190 lreq->last_error); 3191 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3192 if (!req->r_result) { 3193 lreq->watch_valid_thru = lreq->ping_sent; 3194 } else if (!lreq->last_error) { 3195 lreq->last_error = normalize_watch_error(req->r_result); 3196 queue_watch_error(lreq); 3197 } 3198 } else { 3199 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3200 lreq->register_gen, req->r_ops[0].watch.gen); 3201 } 3202 3203 out: 3204 mutex_unlock(&lreq->lock); 3205 linger_put(lreq); 3206 } 3207 3208 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3209 { 3210 struct ceph_osd_client *osdc = lreq->osdc; 3211 struct ceph_osd_request *req; 3212 int ret; 3213 3214 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3215 dout("%s PAUSERD\n", __func__); 3216 return; 3217 } 3218 3219 lreq->ping_sent = jiffies; 3220 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3221 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3222 lreq->register_gen); 3223 3224 if (lreq->ping_req) { 3225 if (lreq->ping_req->r_osd) 3226 cancel_linger_request(lreq->ping_req); 3227 ceph_osdc_put_request(lreq->ping_req); 3228 } 3229 3230 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); 3231 BUG_ON(!req); 3232 3233 target_copy(&req->r_t, &lreq->t); 3234 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id, 3235 lreq->register_gen); 3236 req->r_callback = linger_ping_cb; 3237 3238 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 3239 BUG_ON(ret); 3240 3241 req->r_priv = linger_get(lreq); 3242 req->r_linger = true; 3243 lreq->ping_req = req; 3244 3245 ceph_osdc_get_request(req); 3246 account_request(req); 3247 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3248 link_request(lreq->osd, req); 3249 send_request(req); 3250 } 3251 3252 static void linger_submit(struct ceph_osd_linger_request *lreq) 3253 { 3254 struct ceph_osd_client *osdc = lreq->osdc; 3255 struct ceph_osd *osd; 3256 3257 down_write(&osdc->lock); 3258 linger_register(lreq); 3259 3260 calc_target(osdc, &lreq->t, false); 3261 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3262 link_linger(osd, lreq); 3263 3264 send_linger(lreq); 3265 up_write(&osdc->lock); 3266 } 3267 3268 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3269 { 3270 struct ceph_osd_client *osdc = lreq->osdc; 3271 struct ceph_osd_linger_request *lookup_lreq; 3272 3273 verify_osdc_wrlocked(osdc); 3274 3275 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3276 lreq->linger_id); 3277 if (!lookup_lreq) 3278 return; 3279 3280 WARN_ON(lookup_lreq != lreq); 3281 erase_linger_mc(&osdc->linger_map_checks, lreq); 3282 linger_put(lreq); 3283 } 3284 3285 /* 3286 * @lreq has to be both registered and linked. 3287 */ 3288 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3289 { 3290 if (lreq->ping_req && lreq->ping_req->r_osd) 3291 cancel_linger_request(lreq->ping_req); 3292 if (lreq->reg_req && lreq->reg_req->r_osd) 3293 cancel_linger_request(lreq->reg_req); 3294 cancel_linger_map_check(lreq); 3295 unlink_linger(lreq->osd, lreq); 3296 linger_unregister(lreq); 3297 } 3298 3299 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3300 { 3301 struct ceph_osd_client *osdc = lreq->osdc; 3302 3303 down_write(&osdc->lock); 3304 if (__linger_registered(lreq)) 3305 __linger_cancel(lreq); 3306 up_write(&osdc->lock); 3307 } 3308 3309 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3310 3311 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3312 { 3313 struct ceph_osd_client *osdc = lreq->osdc; 3314 struct ceph_osdmap *map = osdc->osdmap; 3315 3316 verify_osdc_wrlocked(osdc); 3317 WARN_ON(!map->epoch); 3318 3319 if (lreq->register_gen) { 3320 lreq->map_dne_bound = map->epoch; 3321 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3322 lreq, lreq->linger_id); 3323 } else { 3324 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3325 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3326 map->epoch); 3327 } 3328 3329 if (lreq->map_dne_bound) { 3330 if (map->epoch >= lreq->map_dne_bound) { 3331 /* we had a new enough map */ 3332 pr_info("linger_id %llu pool does not exist\n", 3333 lreq->linger_id); 3334 linger_reg_commit_complete(lreq, -ENOENT); 3335 __linger_cancel(lreq); 3336 } 3337 } else { 3338 send_linger_map_check(lreq); 3339 } 3340 } 3341 3342 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3343 { 3344 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3345 struct ceph_osd_linger_request *lreq; 3346 u64 linger_id = greq->private_data; 3347 3348 WARN_ON(greq->result || !greq->u.newest); 3349 3350 down_write(&osdc->lock); 3351 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3352 if (!lreq) { 3353 dout("%s linger_id %llu dne\n", __func__, linger_id); 3354 goto out_unlock; 3355 } 3356 3357 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3358 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3359 greq->u.newest); 3360 if (!lreq->map_dne_bound) 3361 lreq->map_dne_bound = greq->u.newest; 3362 erase_linger_mc(&osdc->linger_map_checks, lreq); 3363 check_linger_pool_dne(lreq); 3364 3365 linger_put(lreq); 3366 out_unlock: 3367 up_write(&osdc->lock); 3368 } 3369 3370 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3371 { 3372 struct ceph_osd_client *osdc = lreq->osdc; 3373 struct ceph_osd_linger_request *lookup_lreq; 3374 int ret; 3375 3376 verify_osdc_wrlocked(osdc); 3377 3378 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3379 lreq->linger_id); 3380 if (lookup_lreq) { 3381 WARN_ON(lookup_lreq != lreq); 3382 return; 3383 } 3384 3385 linger_get(lreq); 3386 insert_linger_mc(&osdc->linger_map_checks, lreq); 3387 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3388 linger_map_check_cb, lreq->linger_id); 3389 WARN_ON(ret); 3390 } 3391 3392 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3393 { 3394 int ret; 3395 3396 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3397 ret = wait_for_completion_killable(&lreq->reg_commit_wait); 3398 return ret ?: lreq->reg_commit_error; 3399 } 3400 3401 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, 3402 unsigned long timeout) 3403 { 3404 long left; 3405 3406 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3407 left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, 3408 ceph_timeout_jiffies(timeout)); 3409 if (left <= 0) 3410 left = left ?: -ETIMEDOUT; 3411 else 3412 left = lreq->notify_finish_error; /* completed */ 3413 3414 return left; 3415 } 3416 3417 /* 3418 * Timeout callback, called every N seconds. When 1 or more OSD 3419 * requests has been active for more than N seconds, we send a keepalive 3420 * (tag + timestamp) to its OSD to ensure any communications channel 3421 * reset is detected. 3422 */ 3423 static void handle_timeout(struct work_struct *work) 3424 { 3425 struct ceph_osd_client *osdc = 3426 container_of(work, struct ceph_osd_client, timeout_work.work); 3427 struct ceph_options *opts = osdc->client->options; 3428 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3429 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3430 LIST_HEAD(slow_osds); 3431 struct rb_node *n, *p; 3432 3433 dout("%s osdc %p\n", __func__, osdc); 3434 down_write(&osdc->lock); 3435 3436 /* 3437 * ping osds that are a bit slow. this ensures that if there 3438 * is a break in the TCP connection we will notice, and reopen 3439 * a connection with that osd (from the fault callback). 3440 */ 3441 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3442 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3443 bool found = false; 3444 3445 for (p = rb_first(&osd->o_requests); p; ) { 3446 struct ceph_osd_request *req = 3447 rb_entry(p, struct ceph_osd_request, r_node); 3448 3449 p = rb_next(p); /* abort_request() */ 3450 3451 if (time_before(req->r_stamp, cutoff)) { 3452 dout(" req %p tid %llu on osd%d is laggy\n", 3453 req, req->r_tid, osd->o_osd); 3454 found = true; 3455 } 3456 if (opts->osd_request_timeout && 3457 time_before(req->r_start_stamp, expiry_cutoff)) { 3458 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3459 req->r_tid, osd->o_osd); 3460 abort_request(req, -ETIMEDOUT); 3461 } 3462 } 3463 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3464 struct ceph_osd_linger_request *lreq = 3465 rb_entry(p, struct ceph_osd_linger_request, node); 3466 3467 dout(" lreq %p linger_id %llu is served by osd%d\n", 3468 lreq, lreq->linger_id, osd->o_osd); 3469 found = true; 3470 3471 mutex_lock(&lreq->lock); 3472 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3473 send_linger_ping(lreq); 3474 mutex_unlock(&lreq->lock); 3475 } 3476 3477 if (found) 3478 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3479 } 3480 3481 if (opts->osd_request_timeout) { 3482 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3483 struct ceph_osd_request *req = 3484 rb_entry(p, struct ceph_osd_request, r_node); 3485 3486 p = rb_next(p); /* abort_request() */ 3487 3488 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3489 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3490 req->r_tid, osdc->homeless_osd.o_osd); 3491 abort_request(req, -ETIMEDOUT); 3492 } 3493 } 3494 } 3495 3496 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3497 maybe_request_map(osdc); 3498 3499 while (!list_empty(&slow_osds)) { 3500 struct ceph_osd *osd = list_first_entry(&slow_osds, 3501 struct ceph_osd, 3502 o_keepalive_item); 3503 list_del_init(&osd->o_keepalive_item); 3504 ceph_con_keepalive(&osd->o_con); 3505 } 3506 3507 up_write(&osdc->lock); 3508 schedule_delayed_work(&osdc->timeout_work, 3509 osdc->client->options->osd_keepalive_timeout); 3510 } 3511 3512 static void handle_osds_timeout(struct work_struct *work) 3513 { 3514 struct ceph_osd_client *osdc = 3515 container_of(work, struct ceph_osd_client, 3516 osds_timeout_work.work); 3517 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3518 struct ceph_osd *osd, *nosd; 3519 3520 dout("%s osdc %p\n", __func__, osdc); 3521 down_write(&osdc->lock); 3522 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3523 if (time_before(jiffies, osd->lru_ttl)) 3524 break; 3525 3526 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3527 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3528 close_osd(osd); 3529 } 3530 3531 up_write(&osdc->lock); 3532 schedule_delayed_work(&osdc->osds_timeout_work, 3533 round_jiffies_relative(delay)); 3534 } 3535 3536 static int ceph_oloc_decode(void **p, void *end, 3537 struct ceph_object_locator *oloc) 3538 { 3539 u8 struct_v, struct_cv; 3540 u32 len; 3541 void *struct_end; 3542 int ret = 0; 3543 3544 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3545 struct_v = ceph_decode_8(p); 3546 struct_cv = ceph_decode_8(p); 3547 if (struct_v < 3) { 3548 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3549 struct_v, struct_cv); 3550 goto e_inval; 3551 } 3552 if (struct_cv > 6) { 3553 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3554 struct_v, struct_cv); 3555 goto e_inval; 3556 } 3557 len = ceph_decode_32(p); 3558 ceph_decode_need(p, end, len, e_inval); 3559 struct_end = *p + len; 3560 3561 oloc->pool = ceph_decode_64(p); 3562 *p += 4; /* skip preferred */ 3563 3564 len = ceph_decode_32(p); 3565 if (len > 0) { 3566 pr_warn("ceph_object_locator::key is set\n"); 3567 goto e_inval; 3568 } 3569 3570 if (struct_v >= 5) { 3571 bool changed = false; 3572 3573 len = ceph_decode_32(p); 3574 if (len > 0) { 3575 ceph_decode_need(p, end, len, e_inval); 3576 if (!oloc->pool_ns || 3577 ceph_compare_string(oloc->pool_ns, *p, len)) 3578 changed = true; 3579 *p += len; 3580 } else { 3581 if (oloc->pool_ns) 3582 changed = true; 3583 } 3584 if (changed) { 3585 /* redirect changes namespace */ 3586 pr_warn("ceph_object_locator::nspace is changed\n"); 3587 goto e_inval; 3588 } 3589 } 3590 3591 if (struct_v >= 6) { 3592 s64 hash = ceph_decode_64(p); 3593 if (hash != -1) { 3594 pr_warn("ceph_object_locator::hash is set\n"); 3595 goto e_inval; 3596 } 3597 } 3598 3599 /* skip the rest */ 3600 *p = struct_end; 3601 out: 3602 return ret; 3603 3604 e_inval: 3605 ret = -EINVAL; 3606 goto out; 3607 } 3608 3609 static int ceph_redirect_decode(void **p, void *end, 3610 struct ceph_request_redirect *redir) 3611 { 3612 u8 struct_v, struct_cv; 3613 u32 len; 3614 void *struct_end; 3615 int ret; 3616 3617 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3618 struct_v = ceph_decode_8(p); 3619 struct_cv = ceph_decode_8(p); 3620 if (struct_cv > 1) { 3621 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3622 struct_v, struct_cv); 3623 goto e_inval; 3624 } 3625 len = ceph_decode_32(p); 3626 ceph_decode_need(p, end, len, e_inval); 3627 struct_end = *p + len; 3628 3629 ret = ceph_oloc_decode(p, end, &redir->oloc); 3630 if (ret) 3631 goto out; 3632 3633 len = ceph_decode_32(p); 3634 if (len > 0) { 3635 pr_warn("ceph_request_redirect::object_name is set\n"); 3636 goto e_inval; 3637 } 3638 3639 /* skip the rest */ 3640 *p = struct_end; 3641 out: 3642 return ret; 3643 3644 e_inval: 3645 ret = -EINVAL; 3646 goto out; 3647 } 3648 3649 struct MOSDOpReply { 3650 struct ceph_pg pgid; 3651 u64 flags; 3652 int result; 3653 u32 epoch; 3654 int num_ops; 3655 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3656 s32 rval[CEPH_OSD_MAX_OPS]; 3657 int retry_attempt; 3658 struct ceph_eversion replay_version; 3659 u64 user_version; 3660 struct ceph_request_redirect redirect; 3661 }; 3662 3663 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3664 { 3665 void *p = msg->front.iov_base; 3666 void *const end = p + msg->front.iov_len; 3667 u16 version = le16_to_cpu(msg->hdr.version); 3668 struct ceph_eversion bad_replay_version; 3669 u8 decode_redir; 3670 u32 len; 3671 int ret; 3672 int i; 3673 3674 ceph_decode_32_safe(&p, end, len, e_inval); 3675 ceph_decode_need(&p, end, len, e_inval); 3676 p += len; /* skip oid */ 3677 3678 ret = ceph_decode_pgid(&p, end, &m->pgid); 3679 if (ret) 3680 return ret; 3681 3682 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3683 ceph_decode_32_safe(&p, end, m->result, e_inval); 3684 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3685 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3686 p += sizeof(bad_replay_version); 3687 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3688 3689 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3690 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3691 goto e_inval; 3692 3693 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3694 e_inval); 3695 for (i = 0; i < m->num_ops; i++) { 3696 struct ceph_osd_op *op = p; 3697 3698 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3699 p += sizeof(*op); 3700 } 3701 3702 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3703 for (i = 0; i < m->num_ops; i++) 3704 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3705 3706 if (version >= 5) { 3707 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3708 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3709 p += sizeof(m->replay_version); 3710 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3711 } else { 3712 m->replay_version = bad_replay_version; /* struct */ 3713 m->user_version = le64_to_cpu(m->replay_version.version); 3714 } 3715 3716 if (version >= 6) { 3717 if (version >= 7) 3718 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3719 else 3720 decode_redir = 1; 3721 } else { 3722 decode_redir = 0; 3723 } 3724 3725 if (decode_redir) { 3726 ret = ceph_redirect_decode(&p, end, &m->redirect); 3727 if (ret) 3728 return ret; 3729 } else { 3730 ceph_oloc_init(&m->redirect.oloc); 3731 } 3732 3733 return 0; 3734 3735 e_inval: 3736 return -EINVAL; 3737 } 3738 3739 /* 3740 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3741 * specified. 3742 */ 3743 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3744 { 3745 struct ceph_osd_client *osdc = osd->o_osdc; 3746 struct ceph_osd_request *req; 3747 struct MOSDOpReply m; 3748 u64 tid = le64_to_cpu(msg->hdr.tid); 3749 u32 data_len = 0; 3750 int ret; 3751 int i; 3752 3753 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3754 3755 down_read(&osdc->lock); 3756 if (!osd_registered(osd)) { 3757 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3758 goto out_unlock_osdc; 3759 } 3760 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3761 3762 mutex_lock(&osd->lock); 3763 req = lookup_request(&osd->o_requests, tid); 3764 if (!req) { 3765 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3766 goto out_unlock_session; 3767 } 3768 3769 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3770 ret = decode_MOSDOpReply(msg, &m); 3771 m.redirect.oloc.pool_ns = NULL; 3772 if (ret) { 3773 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3774 req->r_tid, ret); 3775 ceph_msg_dump(msg); 3776 goto fail_request; 3777 } 3778 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3779 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3780 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3781 le64_to_cpu(m.replay_version.version), m.user_version); 3782 3783 if (m.retry_attempt >= 0) { 3784 if (m.retry_attempt != req->r_attempts - 1) { 3785 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3786 req, req->r_tid, m.retry_attempt, 3787 req->r_attempts - 1); 3788 goto out_unlock_session; 3789 } 3790 } else { 3791 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3792 } 3793 3794 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3795 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3796 m.redirect.oloc.pool); 3797 unlink_request(osd, req); 3798 mutex_unlock(&osd->lock); 3799 3800 /* 3801 * Not ceph_oloc_copy() - changing pool_ns is not 3802 * supported. 3803 */ 3804 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3805 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3806 CEPH_OSD_FLAG_IGNORE_OVERLAY | 3807 CEPH_OSD_FLAG_IGNORE_CACHE; 3808 req->r_tid = 0; 3809 __submit_request(req, false); 3810 goto out_unlock_osdc; 3811 } 3812 3813 if (m.result == -EAGAIN) { 3814 dout("req %p tid %llu EAGAIN\n", req, req->r_tid); 3815 unlink_request(osd, req); 3816 mutex_unlock(&osd->lock); 3817 3818 /* 3819 * The object is missing on the replica or not (yet) 3820 * readable. Clear pgid to force a resend to the primary 3821 * via legacy_change. 3822 */ 3823 req->r_t.pgid.pool = 0; 3824 req->r_t.pgid.seed = 0; 3825 WARN_ON(!req->r_t.used_replica); 3826 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | 3827 CEPH_OSD_FLAG_LOCALIZE_READS); 3828 req->r_tid = 0; 3829 __submit_request(req, false); 3830 goto out_unlock_osdc; 3831 } 3832 3833 if (m.num_ops != req->r_num_ops) { 3834 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3835 req->r_num_ops, req->r_tid); 3836 goto fail_request; 3837 } 3838 for (i = 0; i < req->r_num_ops; i++) { 3839 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3840 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3841 req->r_ops[i].rval = m.rval[i]; 3842 req->r_ops[i].outdata_len = m.outdata_len[i]; 3843 data_len += m.outdata_len[i]; 3844 } 3845 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3846 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3847 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3848 goto fail_request; 3849 } 3850 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3851 req, req->r_tid, m.result, data_len); 3852 3853 /* 3854 * Since we only ever request ONDISK, we should only ever get 3855 * one (type of) reply back. 3856 */ 3857 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3858 req->r_version = m.user_version; 3859 req->r_result = m.result ?: data_len; 3860 finish_request(req); 3861 mutex_unlock(&osd->lock); 3862 up_read(&osdc->lock); 3863 3864 __complete_request(req); 3865 return; 3866 3867 fail_request: 3868 complete_request(req, -EIO); 3869 out_unlock_session: 3870 mutex_unlock(&osd->lock); 3871 out_unlock_osdc: 3872 up_read(&osdc->lock); 3873 } 3874 3875 static void set_pool_was_full(struct ceph_osd_client *osdc) 3876 { 3877 struct rb_node *n; 3878 3879 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3880 struct ceph_pg_pool_info *pi = 3881 rb_entry(n, struct ceph_pg_pool_info, node); 3882 3883 pi->was_full = __pool_full(pi); 3884 } 3885 } 3886 3887 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3888 { 3889 struct ceph_pg_pool_info *pi; 3890 3891 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3892 if (!pi) 3893 return false; 3894 3895 return pi->was_full && !__pool_full(pi); 3896 } 3897 3898 static enum calc_target_result 3899 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3900 { 3901 struct ceph_osd_client *osdc = lreq->osdc; 3902 enum calc_target_result ct_res; 3903 3904 ct_res = calc_target(osdc, &lreq->t, true); 3905 if (ct_res == CALC_TARGET_NEED_RESEND) { 3906 struct ceph_osd *osd; 3907 3908 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3909 if (osd != lreq->osd) { 3910 unlink_linger(lreq->osd, lreq); 3911 link_linger(osd, lreq); 3912 } 3913 } 3914 3915 return ct_res; 3916 } 3917 3918 /* 3919 * Requeue requests whose mapping to an OSD has changed. 3920 */ 3921 static void scan_requests(struct ceph_osd *osd, 3922 bool force_resend, 3923 bool cleared_full, 3924 bool check_pool_cleared_full, 3925 struct rb_root *need_resend, 3926 struct list_head *need_resend_linger) 3927 { 3928 struct ceph_osd_client *osdc = osd->o_osdc; 3929 struct rb_node *n; 3930 bool force_resend_writes; 3931 3932 for (n = rb_first(&osd->o_linger_requests); n; ) { 3933 struct ceph_osd_linger_request *lreq = 3934 rb_entry(n, struct ceph_osd_linger_request, node); 3935 enum calc_target_result ct_res; 3936 3937 n = rb_next(n); /* recalc_linger_target() */ 3938 3939 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3940 lreq->linger_id); 3941 ct_res = recalc_linger_target(lreq); 3942 switch (ct_res) { 3943 case CALC_TARGET_NO_ACTION: 3944 force_resend_writes = cleared_full || 3945 (check_pool_cleared_full && 3946 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3947 if (!force_resend && !force_resend_writes) 3948 break; 3949 3950 fallthrough; 3951 case CALC_TARGET_NEED_RESEND: 3952 cancel_linger_map_check(lreq); 3953 /* 3954 * scan_requests() for the previous epoch(s) 3955 * may have already added it to the list, since 3956 * it's not unlinked here. 3957 */ 3958 if (list_empty(&lreq->scan_item)) 3959 list_add_tail(&lreq->scan_item, need_resend_linger); 3960 break; 3961 case CALC_TARGET_POOL_DNE: 3962 list_del_init(&lreq->scan_item); 3963 check_linger_pool_dne(lreq); 3964 break; 3965 } 3966 } 3967 3968 for (n = rb_first(&osd->o_requests); n; ) { 3969 struct ceph_osd_request *req = 3970 rb_entry(n, struct ceph_osd_request, r_node); 3971 enum calc_target_result ct_res; 3972 3973 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3974 3975 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3976 ct_res = calc_target(osdc, &req->r_t, false); 3977 switch (ct_res) { 3978 case CALC_TARGET_NO_ACTION: 3979 force_resend_writes = cleared_full || 3980 (check_pool_cleared_full && 3981 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3982 if (!force_resend && 3983 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3984 !force_resend_writes)) 3985 break; 3986 3987 fallthrough; 3988 case CALC_TARGET_NEED_RESEND: 3989 cancel_map_check(req); 3990 unlink_request(osd, req); 3991 insert_request(need_resend, req); 3992 break; 3993 case CALC_TARGET_POOL_DNE: 3994 check_pool_dne(req); 3995 break; 3996 } 3997 } 3998 } 3999 4000 static int handle_one_map(struct ceph_osd_client *osdc, 4001 void *p, void *end, bool incremental, 4002 struct rb_root *need_resend, 4003 struct list_head *need_resend_linger) 4004 { 4005 struct ceph_osdmap *newmap; 4006 struct rb_node *n; 4007 bool skipped_map = false; 4008 bool was_full; 4009 4010 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 4011 set_pool_was_full(osdc); 4012 4013 if (incremental) 4014 newmap = osdmap_apply_incremental(&p, end, 4015 ceph_msgr2(osdc->client), 4016 osdc->osdmap); 4017 else 4018 newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client)); 4019 if (IS_ERR(newmap)) 4020 return PTR_ERR(newmap); 4021 4022 if (newmap != osdc->osdmap) { 4023 /* 4024 * Preserve ->was_full before destroying the old map. 4025 * For pools that weren't in the old map, ->was_full 4026 * should be false. 4027 */ 4028 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 4029 struct ceph_pg_pool_info *pi = 4030 rb_entry(n, struct ceph_pg_pool_info, node); 4031 struct ceph_pg_pool_info *old_pi; 4032 4033 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 4034 if (old_pi) 4035 pi->was_full = old_pi->was_full; 4036 else 4037 WARN_ON(pi->was_full); 4038 } 4039 4040 if (osdc->osdmap->epoch && 4041 osdc->osdmap->epoch + 1 < newmap->epoch) { 4042 WARN_ON(incremental); 4043 skipped_map = true; 4044 } 4045 4046 ceph_osdmap_destroy(osdc->osdmap); 4047 osdc->osdmap = newmap; 4048 } 4049 4050 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 4051 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 4052 need_resend, need_resend_linger); 4053 4054 for (n = rb_first(&osdc->osds); n; ) { 4055 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4056 4057 n = rb_next(n); /* close_osd() */ 4058 4059 scan_requests(osd, skipped_map, was_full, true, need_resend, 4060 need_resend_linger); 4061 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 4062 memcmp(&osd->o_con.peer_addr, 4063 ceph_osd_addr(osdc->osdmap, osd->o_osd), 4064 sizeof(struct ceph_entity_addr))) 4065 close_osd(osd); 4066 } 4067 4068 return 0; 4069 } 4070 4071 static void kick_requests(struct ceph_osd_client *osdc, 4072 struct rb_root *need_resend, 4073 struct list_head *need_resend_linger) 4074 { 4075 struct ceph_osd_linger_request *lreq, *nlreq; 4076 enum calc_target_result ct_res; 4077 struct rb_node *n; 4078 4079 /* make sure need_resend targets reflect latest map */ 4080 for (n = rb_first(need_resend); n; ) { 4081 struct ceph_osd_request *req = 4082 rb_entry(n, struct ceph_osd_request, r_node); 4083 4084 n = rb_next(n); 4085 4086 if (req->r_t.epoch < osdc->osdmap->epoch) { 4087 ct_res = calc_target(osdc, &req->r_t, false); 4088 if (ct_res == CALC_TARGET_POOL_DNE) { 4089 erase_request(need_resend, req); 4090 check_pool_dne(req); 4091 } 4092 } 4093 } 4094 4095 for (n = rb_first(need_resend); n; ) { 4096 struct ceph_osd_request *req = 4097 rb_entry(n, struct ceph_osd_request, r_node); 4098 struct ceph_osd *osd; 4099 4100 n = rb_next(n); 4101 erase_request(need_resend, req); /* before link_request() */ 4102 4103 osd = lookup_create_osd(osdc, req->r_t.osd, true); 4104 link_request(osd, req); 4105 if (!req->r_linger) { 4106 if (!osd_homeless(osd) && !req->r_t.paused) 4107 send_request(req); 4108 } else { 4109 cancel_linger_request(req); 4110 } 4111 } 4112 4113 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 4114 if (!osd_homeless(lreq->osd)) 4115 send_linger(lreq); 4116 4117 list_del_init(&lreq->scan_item); 4118 } 4119 } 4120 4121 /* 4122 * Process updated osd map. 4123 * 4124 * The message contains any number of incremental and full maps, normally 4125 * indicating some sort of topology change in the cluster. Kick requests 4126 * off to different OSDs as needed. 4127 */ 4128 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 4129 { 4130 void *p = msg->front.iov_base; 4131 void *const end = p + msg->front.iov_len; 4132 u32 nr_maps, maplen; 4133 u32 epoch; 4134 struct ceph_fsid fsid; 4135 struct rb_root need_resend = RB_ROOT; 4136 LIST_HEAD(need_resend_linger); 4137 bool handled_incremental = false; 4138 bool was_pauserd, was_pausewr; 4139 bool pauserd, pausewr; 4140 int err; 4141 4142 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 4143 down_write(&osdc->lock); 4144 4145 /* verify fsid */ 4146 ceph_decode_need(&p, end, sizeof(fsid), bad); 4147 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 4148 if (ceph_check_fsid(osdc->client, &fsid) < 0) 4149 goto bad; 4150 4151 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4152 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4153 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4154 have_pool_full(osdc); 4155 4156 /* incremental maps */ 4157 ceph_decode_32_safe(&p, end, nr_maps, bad); 4158 dout(" %d inc maps\n", nr_maps); 4159 while (nr_maps > 0) { 4160 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4161 epoch = ceph_decode_32(&p); 4162 maplen = ceph_decode_32(&p); 4163 ceph_decode_need(&p, end, maplen, bad); 4164 if (osdc->osdmap->epoch && 4165 osdc->osdmap->epoch + 1 == epoch) { 4166 dout("applying incremental map %u len %d\n", 4167 epoch, maplen); 4168 err = handle_one_map(osdc, p, p + maplen, true, 4169 &need_resend, &need_resend_linger); 4170 if (err) 4171 goto bad; 4172 handled_incremental = true; 4173 } else { 4174 dout("ignoring incremental map %u len %d\n", 4175 epoch, maplen); 4176 } 4177 p += maplen; 4178 nr_maps--; 4179 } 4180 if (handled_incremental) 4181 goto done; 4182 4183 /* full maps */ 4184 ceph_decode_32_safe(&p, end, nr_maps, bad); 4185 dout(" %d full maps\n", nr_maps); 4186 while (nr_maps) { 4187 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4188 epoch = ceph_decode_32(&p); 4189 maplen = ceph_decode_32(&p); 4190 ceph_decode_need(&p, end, maplen, bad); 4191 if (nr_maps > 1) { 4192 dout("skipping non-latest full map %u len %d\n", 4193 epoch, maplen); 4194 } else if (osdc->osdmap->epoch >= epoch) { 4195 dout("skipping full map %u len %d, " 4196 "older than our %u\n", epoch, maplen, 4197 osdc->osdmap->epoch); 4198 } else { 4199 dout("taking full map %u len %d\n", epoch, maplen); 4200 err = handle_one_map(osdc, p, p + maplen, false, 4201 &need_resend, &need_resend_linger); 4202 if (err) 4203 goto bad; 4204 } 4205 p += maplen; 4206 nr_maps--; 4207 } 4208 4209 done: 4210 /* 4211 * subscribe to subsequent osdmap updates if full to ensure 4212 * we find out when we are no longer full and stop returning 4213 * ENOSPC. 4214 */ 4215 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4216 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4217 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4218 have_pool_full(osdc); 4219 if (was_pauserd || was_pausewr || pauserd || pausewr || 4220 osdc->osdmap->epoch < osdc->epoch_barrier) 4221 maybe_request_map(osdc); 4222 4223 kick_requests(osdc, &need_resend, &need_resend_linger); 4224 4225 ceph_osdc_abort_on_full(osdc); 4226 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4227 osdc->osdmap->epoch); 4228 up_write(&osdc->lock); 4229 wake_up_all(&osdc->client->auth_wq); 4230 return; 4231 4232 bad: 4233 pr_err("osdc handle_map corrupt msg\n"); 4234 ceph_msg_dump(msg); 4235 up_write(&osdc->lock); 4236 } 4237 4238 /* 4239 * Resubmit requests pending on the given osd. 4240 */ 4241 static void kick_osd_requests(struct ceph_osd *osd) 4242 { 4243 struct rb_node *n; 4244 4245 clear_backoffs(osd); 4246 4247 for (n = rb_first(&osd->o_requests); n; ) { 4248 struct ceph_osd_request *req = 4249 rb_entry(n, struct ceph_osd_request, r_node); 4250 4251 n = rb_next(n); /* cancel_linger_request() */ 4252 4253 if (!req->r_linger) { 4254 if (!req->r_t.paused) 4255 send_request(req); 4256 } else { 4257 cancel_linger_request(req); 4258 } 4259 } 4260 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4261 struct ceph_osd_linger_request *lreq = 4262 rb_entry(n, struct ceph_osd_linger_request, node); 4263 4264 send_linger(lreq); 4265 } 4266 } 4267 4268 /* 4269 * If the osd connection drops, we need to resubmit all requests. 4270 */ 4271 static void osd_fault(struct ceph_connection *con) 4272 { 4273 struct ceph_osd *osd = con->private; 4274 struct ceph_osd_client *osdc = osd->o_osdc; 4275 4276 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4277 4278 down_write(&osdc->lock); 4279 if (!osd_registered(osd)) { 4280 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4281 goto out_unlock; 4282 } 4283 4284 if (!reopen_osd(osd)) 4285 kick_osd_requests(osd); 4286 maybe_request_map(osdc); 4287 4288 out_unlock: 4289 up_write(&osdc->lock); 4290 } 4291 4292 struct MOSDBackoff { 4293 struct ceph_spg spgid; 4294 u32 map_epoch; 4295 u8 op; 4296 u64 id; 4297 struct ceph_hobject_id *begin; 4298 struct ceph_hobject_id *end; 4299 }; 4300 4301 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4302 { 4303 void *p = msg->front.iov_base; 4304 void *const end = p + msg->front.iov_len; 4305 u8 struct_v; 4306 u32 struct_len; 4307 int ret; 4308 4309 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4310 if (ret) 4311 return ret; 4312 4313 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4314 if (ret) 4315 return ret; 4316 4317 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4318 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4319 ceph_decode_8_safe(&p, end, m->op, e_inval); 4320 ceph_decode_64_safe(&p, end, m->id, e_inval); 4321 4322 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4323 if (!m->begin) 4324 return -ENOMEM; 4325 4326 ret = decode_hoid(&p, end, m->begin); 4327 if (ret) { 4328 free_hoid(m->begin); 4329 return ret; 4330 } 4331 4332 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4333 if (!m->end) { 4334 free_hoid(m->begin); 4335 return -ENOMEM; 4336 } 4337 4338 ret = decode_hoid(&p, end, m->end); 4339 if (ret) { 4340 free_hoid(m->begin); 4341 free_hoid(m->end); 4342 return ret; 4343 } 4344 4345 return 0; 4346 4347 e_inval: 4348 return -EINVAL; 4349 } 4350 4351 static struct ceph_msg *create_backoff_message( 4352 const struct ceph_osd_backoff *backoff, 4353 u32 map_epoch) 4354 { 4355 struct ceph_msg *msg; 4356 void *p, *end; 4357 int msg_size; 4358 4359 msg_size = CEPH_ENCODING_START_BLK_LEN + 4360 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4361 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4362 msg_size += CEPH_ENCODING_START_BLK_LEN + 4363 hoid_encoding_size(backoff->begin); 4364 msg_size += CEPH_ENCODING_START_BLK_LEN + 4365 hoid_encoding_size(backoff->end); 4366 4367 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4368 if (!msg) 4369 return NULL; 4370 4371 p = msg->front.iov_base; 4372 end = p + msg->front_alloc_len; 4373 4374 encode_spgid(&p, &backoff->spgid); 4375 ceph_encode_32(&p, map_epoch); 4376 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4377 ceph_encode_64(&p, backoff->id); 4378 encode_hoid(&p, end, backoff->begin); 4379 encode_hoid(&p, end, backoff->end); 4380 BUG_ON(p != end); 4381 4382 msg->front.iov_len = p - msg->front.iov_base; 4383 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4384 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4385 4386 return msg; 4387 } 4388 4389 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4390 { 4391 struct ceph_spg_mapping *spg; 4392 struct ceph_osd_backoff *backoff; 4393 struct ceph_msg *msg; 4394 4395 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4396 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4397 4398 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4399 if (!spg) { 4400 spg = alloc_spg_mapping(); 4401 if (!spg) { 4402 pr_err("%s failed to allocate spg\n", __func__); 4403 return; 4404 } 4405 spg->spgid = m->spgid; /* struct */ 4406 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4407 } 4408 4409 backoff = alloc_backoff(); 4410 if (!backoff) { 4411 pr_err("%s failed to allocate backoff\n", __func__); 4412 return; 4413 } 4414 backoff->spgid = m->spgid; /* struct */ 4415 backoff->id = m->id; 4416 backoff->begin = m->begin; 4417 m->begin = NULL; /* backoff now owns this */ 4418 backoff->end = m->end; 4419 m->end = NULL; /* ditto */ 4420 4421 insert_backoff(&spg->backoffs, backoff); 4422 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4423 4424 /* 4425 * Ack with original backoff's epoch so that the OSD can 4426 * discard this if there was a PG split. 4427 */ 4428 msg = create_backoff_message(backoff, m->map_epoch); 4429 if (!msg) { 4430 pr_err("%s failed to allocate msg\n", __func__); 4431 return; 4432 } 4433 ceph_con_send(&osd->o_con, msg); 4434 } 4435 4436 static bool target_contained_by(const struct ceph_osd_request_target *t, 4437 const struct ceph_hobject_id *begin, 4438 const struct ceph_hobject_id *end) 4439 { 4440 struct ceph_hobject_id hoid; 4441 int cmp; 4442 4443 hoid_fill_from_target(&hoid, t); 4444 cmp = hoid_compare(&hoid, begin); 4445 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4446 } 4447 4448 static void handle_backoff_unblock(struct ceph_osd *osd, 4449 const struct MOSDBackoff *m) 4450 { 4451 struct ceph_spg_mapping *spg; 4452 struct ceph_osd_backoff *backoff; 4453 struct rb_node *n; 4454 4455 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4456 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4457 4458 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4459 if (!backoff) { 4460 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4461 __func__, osd->o_osd, m->spgid.pgid.pool, 4462 m->spgid.pgid.seed, m->spgid.shard, m->id); 4463 return; 4464 } 4465 4466 if (hoid_compare(backoff->begin, m->begin) && 4467 hoid_compare(backoff->end, m->end)) { 4468 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4469 __func__, osd->o_osd, m->spgid.pgid.pool, 4470 m->spgid.pgid.seed, m->spgid.shard, m->id); 4471 /* unblock it anyway... */ 4472 } 4473 4474 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4475 BUG_ON(!spg); 4476 4477 erase_backoff(&spg->backoffs, backoff); 4478 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4479 free_backoff(backoff); 4480 4481 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4482 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4483 free_spg_mapping(spg); 4484 } 4485 4486 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4487 struct ceph_osd_request *req = 4488 rb_entry(n, struct ceph_osd_request, r_node); 4489 4490 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4491 /* 4492 * Match against @m, not @backoff -- the PG may 4493 * have split on the OSD. 4494 */ 4495 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4496 /* 4497 * If no other installed backoff applies, 4498 * resend. 4499 */ 4500 send_request(req); 4501 } 4502 } 4503 } 4504 } 4505 4506 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4507 { 4508 struct ceph_osd_client *osdc = osd->o_osdc; 4509 struct MOSDBackoff m; 4510 int ret; 4511 4512 down_read(&osdc->lock); 4513 if (!osd_registered(osd)) { 4514 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4515 up_read(&osdc->lock); 4516 return; 4517 } 4518 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4519 4520 mutex_lock(&osd->lock); 4521 ret = decode_MOSDBackoff(msg, &m); 4522 if (ret) { 4523 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4524 ceph_msg_dump(msg); 4525 goto out_unlock; 4526 } 4527 4528 switch (m.op) { 4529 case CEPH_OSD_BACKOFF_OP_BLOCK: 4530 handle_backoff_block(osd, &m); 4531 break; 4532 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4533 handle_backoff_unblock(osd, &m); 4534 break; 4535 default: 4536 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4537 } 4538 4539 free_hoid(m.begin); 4540 free_hoid(m.end); 4541 4542 out_unlock: 4543 mutex_unlock(&osd->lock); 4544 up_read(&osdc->lock); 4545 } 4546 4547 /* 4548 * Process osd watch notifications 4549 */ 4550 static void handle_watch_notify(struct ceph_osd_client *osdc, 4551 struct ceph_msg *msg) 4552 { 4553 void *p = msg->front.iov_base; 4554 void *const end = p + msg->front.iov_len; 4555 struct ceph_osd_linger_request *lreq; 4556 struct linger_work *lwork; 4557 u8 proto_ver, opcode; 4558 u64 cookie, notify_id; 4559 u64 notifier_id = 0; 4560 s32 return_code = 0; 4561 void *payload = NULL; 4562 u32 payload_len = 0; 4563 4564 ceph_decode_8_safe(&p, end, proto_ver, bad); 4565 ceph_decode_8_safe(&p, end, opcode, bad); 4566 ceph_decode_64_safe(&p, end, cookie, bad); 4567 p += 8; /* skip ver */ 4568 ceph_decode_64_safe(&p, end, notify_id, bad); 4569 4570 if (proto_ver >= 1) { 4571 ceph_decode_32_safe(&p, end, payload_len, bad); 4572 ceph_decode_need(&p, end, payload_len, bad); 4573 payload = p; 4574 p += payload_len; 4575 } 4576 4577 if (le16_to_cpu(msg->hdr.version) >= 2) 4578 ceph_decode_32_safe(&p, end, return_code, bad); 4579 4580 if (le16_to_cpu(msg->hdr.version) >= 3) 4581 ceph_decode_64_safe(&p, end, notifier_id, bad); 4582 4583 down_read(&osdc->lock); 4584 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4585 if (!lreq) { 4586 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4587 cookie); 4588 goto out_unlock_osdc; 4589 } 4590 4591 mutex_lock(&lreq->lock); 4592 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4593 opcode, cookie, lreq, lreq->is_watch); 4594 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4595 if (!lreq->last_error) { 4596 lreq->last_error = -ENOTCONN; 4597 queue_watch_error(lreq); 4598 } 4599 } else if (!lreq->is_watch) { 4600 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4601 if (lreq->notify_id && lreq->notify_id != notify_id) { 4602 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4603 lreq->notify_id, notify_id); 4604 } else if (!completion_done(&lreq->notify_finish_wait)) { 4605 struct ceph_msg_data *data = 4606 msg->num_data_items ? &msg->data[0] : NULL; 4607 4608 if (data) { 4609 if (lreq->preply_pages) { 4610 WARN_ON(data->type != 4611 CEPH_MSG_DATA_PAGES); 4612 *lreq->preply_pages = data->pages; 4613 *lreq->preply_len = data->length; 4614 data->own_pages = false; 4615 } 4616 } 4617 lreq->notify_finish_error = return_code; 4618 complete_all(&lreq->notify_finish_wait); 4619 } 4620 } else { 4621 /* CEPH_WATCH_EVENT_NOTIFY */ 4622 lwork = lwork_alloc(lreq, do_watch_notify); 4623 if (!lwork) { 4624 pr_err("failed to allocate notify-lwork\n"); 4625 goto out_unlock_lreq; 4626 } 4627 4628 lwork->notify.notify_id = notify_id; 4629 lwork->notify.notifier_id = notifier_id; 4630 lwork->notify.payload = payload; 4631 lwork->notify.payload_len = payload_len; 4632 lwork->notify.msg = ceph_msg_get(msg); 4633 lwork_queue(lwork); 4634 } 4635 4636 out_unlock_lreq: 4637 mutex_unlock(&lreq->lock); 4638 out_unlock_osdc: 4639 up_read(&osdc->lock); 4640 return; 4641 4642 bad: 4643 pr_err("osdc handle_watch_notify corrupt msg\n"); 4644 } 4645 4646 /* 4647 * Register request, send initial attempt. 4648 */ 4649 void ceph_osdc_start_request(struct ceph_osd_client *osdc, 4650 struct ceph_osd_request *req) 4651 { 4652 down_read(&osdc->lock); 4653 submit_request(req, false); 4654 up_read(&osdc->lock); 4655 } 4656 EXPORT_SYMBOL(ceph_osdc_start_request); 4657 4658 /* 4659 * Unregister request. If @req was registered, it isn't completed: 4660 * r_result isn't set and __complete_request() isn't invoked. 4661 * 4662 * If @req wasn't registered, this call may have raced with 4663 * handle_reply(), in which case r_result would already be set and 4664 * __complete_request() would be getting invoked, possibly even 4665 * concurrently with this call. 4666 */ 4667 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4668 { 4669 struct ceph_osd_client *osdc = req->r_osdc; 4670 4671 down_write(&osdc->lock); 4672 if (req->r_osd) 4673 cancel_request(req); 4674 up_write(&osdc->lock); 4675 } 4676 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4677 4678 /* 4679 * @timeout: in jiffies, 0 means "wait forever" 4680 */ 4681 static int wait_request_timeout(struct ceph_osd_request *req, 4682 unsigned long timeout) 4683 { 4684 long left; 4685 4686 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4687 left = wait_for_completion_killable_timeout(&req->r_completion, 4688 ceph_timeout_jiffies(timeout)); 4689 if (left <= 0) { 4690 left = left ?: -ETIMEDOUT; 4691 ceph_osdc_cancel_request(req); 4692 } else { 4693 left = req->r_result; /* completed */ 4694 } 4695 4696 return left; 4697 } 4698 4699 /* 4700 * wait for a request to complete 4701 */ 4702 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4703 struct ceph_osd_request *req) 4704 { 4705 return wait_request_timeout(req, 0); 4706 } 4707 EXPORT_SYMBOL(ceph_osdc_wait_request); 4708 4709 /* 4710 * sync - wait for all in-flight requests to flush. avoid starvation. 4711 */ 4712 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4713 { 4714 struct rb_node *n, *p; 4715 u64 last_tid = atomic64_read(&osdc->last_tid); 4716 4717 again: 4718 down_read(&osdc->lock); 4719 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4720 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4721 4722 mutex_lock(&osd->lock); 4723 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4724 struct ceph_osd_request *req = 4725 rb_entry(p, struct ceph_osd_request, r_node); 4726 4727 if (req->r_tid > last_tid) 4728 break; 4729 4730 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4731 continue; 4732 4733 ceph_osdc_get_request(req); 4734 mutex_unlock(&osd->lock); 4735 up_read(&osdc->lock); 4736 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4737 __func__, req, req->r_tid, last_tid); 4738 wait_for_completion(&req->r_completion); 4739 ceph_osdc_put_request(req); 4740 goto again; 4741 } 4742 4743 mutex_unlock(&osd->lock); 4744 } 4745 4746 up_read(&osdc->lock); 4747 dout("%s done last_tid %llu\n", __func__, last_tid); 4748 } 4749 EXPORT_SYMBOL(ceph_osdc_sync); 4750 4751 /* 4752 * Returns a handle, caller owns a ref. 4753 */ 4754 struct ceph_osd_linger_request * 4755 ceph_osdc_watch(struct ceph_osd_client *osdc, 4756 struct ceph_object_id *oid, 4757 struct ceph_object_locator *oloc, 4758 rados_watchcb2_t wcb, 4759 rados_watcherrcb_t errcb, 4760 void *data) 4761 { 4762 struct ceph_osd_linger_request *lreq; 4763 int ret; 4764 4765 lreq = linger_alloc(osdc); 4766 if (!lreq) 4767 return ERR_PTR(-ENOMEM); 4768 4769 lreq->is_watch = true; 4770 lreq->wcb = wcb; 4771 lreq->errcb = errcb; 4772 lreq->data = data; 4773 lreq->watch_valid_thru = jiffies; 4774 4775 ceph_oid_copy(&lreq->t.base_oid, oid); 4776 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4777 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4778 ktime_get_real_ts64(&lreq->mtime); 4779 4780 linger_submit(lreq); 4781 ret = linger_reg_commit_wait(lreq); 4782 if (ret) { 4783 linger_cancel(lreq); 4784 goto err_put_lreq; 4785 } 4786 4787 return lreq; 4788 4789 err_put_lreq: 4790 linger_put(lreq); 4791 return ERR_PTR(ret); 4792 } 4793 EXPORT_SYMBOL(ceph_osdc_watch); 4794 4795 /* 4796 * Releases a ref. 4797 * 4798 * Times out after mount_timeout to preserve rbd unmap behaviour 4799 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4800 * with mount_timeout"). 4801 */ 4802 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4803 struct ceph_osd_linger_request *lreq) 4804 { 4805 struct ceph_options *opts = osdc->client->options; 4806 struct ceph_osd_request *req; 4807 int ret; 4808 4809 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4810 if (!req) 4811 return -ENOMEM; 4812 4813 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4814 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4815 req->r_flags = CEPH_OSD_FLAG_WRITE; 4816 ktime_get_real_ts64(&req->r_mtime); 4817 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH, 4818 lreq->linger_id, 0); 4819 4820 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4821 if (ret) 4822 goto out_put_req; 4823 4824 ceph_osdc_start_request(osdc, req); 4825 linger_cancel(lreq); 4826 linger_put(lreq); 4827 ret = wait_request_timeout(req, opts->mount_timeout); 4828 4829 out_put_req: 4830 ceph_osdc_put_request(req); 4831 return ret; 4832 } 4833 EXPORT_SYMBOL(ceph_osdc_unwatch); 4834 4835 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4836 u64 notify_id, u64 cookie, void *payload, 4837 u32 payload_len) 4838 { 4839 struct ceph_osd_req_op *op; 4840 struct ceph_pagelist *pl; 4841 int ret; 4842 4843 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4844 4845 pl = ceph_pagelist_alloc(GFP_NOIO); 4846 if (!pl) 4847 return -ENOMEM; 4848 4849 ret = ceph_pagelist_encode_64(pl, notify_id); 4850 ret |= ceph_pagelist_encode_64(pl, cookie); 4851 if (payload) { 4852 ret |= ceph_pagelist_encode_32(pl, payload_len); 4853 ret |= ceph_pagelist_append(pl, payload, payload_len); 4854 } else { 4855 ret |= ceph_pagelist_encode_32(pl, 0); 4856 } 4857 if (ret) { 4858 ceph_pagelist_release(pl); 4859 return -ENOMEM; 4860 } 4861 4862 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4863 op->indata_len = pl->length; 4864 return 0; 4865 } 4866 4867 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4868 struct ceph_object_id *oid, 4869 struct ceph_object_locator *oloc, 4870 u64 notify_id, 4871 u64 cookie, 4872 void *payload, 4873 u32 payload_len) 4874 { 4875 struct ceph_osd_request *req; 4876 int ret; 4877 4878 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4879 if (!req) 4880 return -ENOMEM; 4881 4882 ceph_oid_copy(&req->r_base_oid, oid); 4883 ceph_oloc_copy(&req->r_base_oloc, oloc); 4884 req->r_flags = CEPH_OSD_FLAG_READ; 4885 4886 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4887 payload_len); 4888 if (ret) 4889 goto out_put_req; 4890 4891 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4892 if (ret) 4893 goto out_put_req; 4894 4895 ceph_osdc_start_request(osdc, req); 4896 ret = ceph_osdc_wait_request(osdc, req); 4897 4898 out_put_req: 4899 ceph_osdc_put_request(req); 4900 return ret; 4901 } 4902 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4903 4904 /* 4905 * @timeout: in seconds 4906 * 4907 * @preply_{pages,len} are initialized both on success and error. 4908 * The caller is responsible for: 4909 * 4910 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4911 */ 4912 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4913 struct ceph_object_id *oid, 4914 struct ceph_object_locator *oloc, 4915 void *payload, 4916 u32 payload_len, 4917 u32 timeout, 4918 struct page ***preply_pages, 4919 size_t *preply_len) 4920 { 4921 struct ceph_osd_linger_request *lreq; 4922 int ret; 4923 4924 WARN_ON(!timeout); 4925 if (preply_pages) { 4926 *preply_pages = NULL; 4927 *preply_len = 0; 4928 } 4929 4930 lreq = linger_alloc(osdc); 4931 if (!lreq) 4932 return -ENOMEM; 4933 4934 lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO); 4935 if (!lreq->request_pl) { 4936 ret = -ENOMEM; 4937 goto out_put_lreq; 4938 } 4939 4940 ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */ 4941 ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout); 4942 ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len); 4943 ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len); 4944 if (ret) { 4945 ret = -ENOMEM; 4946 goto out_put_lreq; 4947 } 4948 4949 /* for notify_id */ 4950 lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO); 4951 if (IS_ERR(lreq->notify_id_pages)) { 4952 ret = PTR_ERR(lreq->notify_id_pages); 4953 lreq->notify_id_pages = NULL; 4954 goto out_put_lreq; 4955 } 4956 4957 lreq->preply_pages = preply_pages; 4958 lreq->preply_len = preply_len; 4959 4960 ceph_oid_copy(&lreq->t.base_oid, oid); 4961 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4962 lreq->t.flags = CEPH_OSD_FLAG_READ; 4963 4964 linger_submit(lreq); 4965 ret = linger_reg_commit_wait(lreq); 4966 if (!ret) 4967 ret = linger_notify_finish_wait(lreq, 4968 msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); 4969 else 4970 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4971 4972 linger_cancel(lreq); 4973 out_put_lreq: 4974 linger_put(lreq); 4975 return ret; 4976 } 4977 EXPORT_SYMBOL(ceph_osdc_notify); 4978 4979 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 4980 { 4981 u8 struct_v; 4982 u32 struct_len; 4983 int ret; 4984 4985 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 4986 &struct_v, &struct_len); 4987 if (ret) 4988 goto bad; 4989 4990 ret = -EINVAL; 4991 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 4992 ceph_decode_64_safe(p, end, item->cookie, bad); 4993 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 4994 4995 if (struct_v >= 2) { 4996 ret = ceph_decode_entity_addr(p, end, &item->addr); 4997 if (ret) 4998 goto bad; 4999 } else { 5000 ret = 0; 5001 } 5002 5003 dout("%s %s%llu cookie %llu addr %s\n", __func__, 5004 ENTITY_NAME(item->name), item->cookie, 5005 ceph_pr_addr(&item->addr)); 5006 bad: 5007 return ret; 5008 } 5009 5010 static int decode_watchers(void **p, void *end, 5011 struct ceph_watch_item **watchers, 5012 u32 *num_watchers) 5013 { 5014 u8 struct_v; 5015 u32 struct_len; 5016 int i; 5017 int ret; 5018 5019 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 5020 &struct_v, &struct_len); 5021 if (ret) 5022 return ret; 5023 5024 *num_watchers = ceph_decode_32(p); 5025 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 5026 if (!*watchers) 5027 return -ENOMEM; 5028 5029 for (i = 0; i < *num_watchers; i++) { 5030 ret = decode_watcher(p, end, *watchers + i); 5031 if (ret) { 5032 kfree(*watchers); 5033 return ret; 5034 } 5035 } 5036 5037 return 0; 5038 } 5039 5040 /* 5041 * On success, the caller is responsible for: 5042 * 5043 * kfree(watchers); 5044 */ 5045 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 5046 struct ceph_object_id *oid, 5047 struct ceph_object_locator *oloc, 5048 struct ceph_watch_item **watchers, 5049 u32 *num_watchers) 5050 { 5051 struct ceph_osd_request *req; 5052 struct page **pages; 5053 int ret; 5054 5055 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5056 if (!req) 5057 return -ENOMEM; 5058 5059 ceph_oid_copy(&req->r_base_oid, oid); 5060 ceph_oloc_copy(&req->r_base_oloc, oloc); 5061 req->r_flags = CEPH_OSD_FLAG_READ; 5062 5063 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5064 if (IS_ERR(pages)) { 5065 ret = PTR_ERR(pages); 5066 goto out_put_req; 5067 } 5068 5069 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5070 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5071 response_data), 5072 pages, PAGE_SIZE, 0, false, true); 5073 5074 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5075 if (ret) 5076 goto out_put_req; 5077 5078 ceph_osdc_start_request(osdc, req); 5079 ret = ceph_osdc_wait_request(osdc, req); 5080 if (ret >= 0) { 5081 void *p = page_address(pages[0]); 5082 void *const end = p + req->r_ops[0].outdata_len; 5083 5084 ret = decode_watchers(&p, end, watchers, num_watchers); 5085 } 5086 5087 out_put_req: 5088 ceph_osdc_put_request(req); 5089 return ret; 5090 } 5091 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5092 5093 /* 5094 * Call all pending notify callbacks - for use after a watch is 5095 * unregistered, to make sure no more callbacks for it will be invoked 5096 */ 5097 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5098 { 5099 dout("%s osdc %p\n", __func__, osdc); 5100 flush_workqueue(osdc->notify_wq); 5101 } 5102 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5103 5104 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5105 { 5106 down_read(&osdc->lock); 5107 maybe_request_map(osdc); 5108 up_read(&osdc->lock); 5109 } 5110 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5111 5112 /* 5113 * Execute an OSD class method on an object. 5114 * 5115 * @flags: CEPH_OSD_FLAG_* 5116 * @resp_len: in/out param for reply length 5117 */ 5118 int ceph_osdc_call(struct ceph_osd_client *osdc, 5119 struct ceph_object_id *oid, 5120 struct ceph_object_locator *oloc, 5121 const char *class, const char *method, 5122 unsigned int flags, 5123 struct page *req_page, size_t req_len, 5124 struct page **resp_pages, size_t *resp_len) 5125 { 5126 struct ceph_osd_request *req; 5127 int ret; 5128 5129 if (req_len > PAGE_SIZE) 5130 return -E2BIG; 5131 5132 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5133 if (!req) 5134 return -ENOMEM; 5135 5136 ceph_oid_copy(&req->r_base_oid, oid); 5137 ceph_oloc_copy(&req->r_base_oloc, oloc); 5138 req->r_flags = flags; 5139 5140 ret = osd_req_op_cls_init(req, 0, class, method); 5141 if (ret) 5142 goto out_put_req; 5143 5144 if (req_page) 5145 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5146 0, false, false); 5147 if (resp_pages) 5148 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5149 *resp_len, 0, false, false); 5150 5151 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5152 if (ret) 5153 goto out_put_req; 5154 5155 ceph_osdc_start_request(osdc, req); 5156 ret = ceph_osdc_wait_request(osdc, req); 5157 if (ret >= 0) { 5158 ret = req->r_ops[0].rval; 5159 if (resp_pages) 5160 *resp_len = req->r_ops[0].outdata_len; 5161 } 5162 5163 out_put_req: 5164 ceph_osdc_put_request(req); 5165 return ret; 5166 } 5167 EXPORT_SYMBOL(ceph_osdc_call); 5168 5169 /* 5170 * reset all osd connections 5171 */ 5172 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5173 { 5174 struct rb_node *n; 5175 5176 down_write(&osdc->lock); 5177 for (n = rb_first(&osdc->osds); n; ) { 5178 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5179 5180 n = rb_next(n); 5181 if (!reopen_osd(osd)) 5182 kick_osd_requests(osd); 5183 } 5184 up_write(&osdc->lock); 5185 } 5186 5187 /* 5188 * init, shutdown 5189 */ 5190 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5191 { 5192 int err; 5193 5194 dout("init\n"); 5195 osdc->client = client; 5196 init_rwsem(&osdc->lock); 5197 osdc->osds = RB_ROOT; 5198 INIT_LIST_HEAD(&osdc->osd_lru); 5199 spin_lock_init(&osdc->osd_lru_lock); 5200 osd_init(&osdc->homeless_osd); 5201 osdc->homeless_osd.o_osdc = osdc; 5202 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5203 osdc->last_linger_id = CEPH_LINGER_ID_START; 5204 osdc->linger_requests = RB_ROOT; 5205 osdc->map_checks = RB_ROOT; 5206 osdc->linger_map_checks = RB_ROOT; 5207 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5208 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5209 5210 err = -ENOMEM; 5211 osdc->osdmap = ceph_osdmap_alloc(); 5212 if (!osdc->osdmap) 5213 goto out; 5214 5215 osdc->req_mempool = mempool_create_slab_pool(10, 5216 ceph_osd_request_cache); 5217 if (!osdc->req_mempool) 5218 goto out_map; 5219 5220 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5221 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5222 if (err < 0) 5223 goto out_mempool; 5224 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5225 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5226 "osd_op_reply"); 5227 if (err < 0) 5228 goto out_msgpool; 5229 5230 err = -ENOMEM; 5231 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5232 if (!osdc->notify_wq) 5233 goto out_msgpool_reply; 5234 5235 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5236 if (!osdc->completion_wq) 5237 goto out_notify_wq; 5238 5239 schedule_delayed_work(&osdc->timeout_work, 5240 osdc->client->options->osd_keepalive_timeout); 5241 schedule_delayed_work(&osdc->osds_timeout_work, 5242 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5243 5244 return 0; 5245 5246 out_notify_wq: 5247 destroy_workqueue(osdc->notify_wq); 5248 out_msgpool_reply: 5249 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5250 out_msgpool: 5251 ceph_msgpool_destroy(&osdc->msgpool_op); 5252 out_mempool: 5253 mempool_destroy(osdc->req_mempool); 5254 out_map: 5255 ceph_osdmap_destroy(osdc->osdmap); 5256 out: 5257 return err; 5258 } 5259 5260 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5261 { 5262 destroy_workqueue(osdc->completion_wq); 5263 destroy_workqueue(osdc->notify_wq); 5264 cancel_delayed_work_sync(&osdc->timeout_work); 5265 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5266 5267 down_write(&osdc->lock); 5268 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5269 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5270 struct ceph_osd, o_node); 5271 close_osd(osd); 5272 } 5273 up_write(&osdc->lock); 5274 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5275 osd_cleanup(&osdc->homeless_osd); 5276 5277 WARN_ON(!list_empty(&osdc->osd_lru)); 5278 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5279 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5280 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5281 WARN_ON(atomic_read(&osdc->num_requests)); 5282 WARN_ON(atomic_read(&osdc->num_homeless)); 5283 5284 ceph_osdmap_destroy(osdc->osdmap); 5285 mempool_destroy(osdc->req_mempool); 5286 ceph_msgpool_destroy(&osdc->msgpool_op); 5287 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5288 } 5289 5290 int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5291 u64 src_snapid, u64 src_version, 5292 struct ceph_object_id *src_oid, 5293 struct ceph_object_locator *src_oloc, 5294 u32 src_fadvise_flags, 5295 u32 dst_fadvise_flags, 5296 u32 truncate_seq, u64 truncate_size, 5297 u8 copy_from_flags) 5298 { 5299 struct ceph_osd_req_op *op; 5300 struct page **pages; 5301 void *p, *end; 5302 5303 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5304 if (IS_ERR(pages)) 5305 return PTR_ERR(pages); 5306 5307 op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, 5308 dst_fadvise_flags); 5309 op->copy_from.snapid = src_snapid; 5310 op->copy_from.src_version = src_version; 5311 op->copy_from.flags = copy_from_flags; 5312 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5313 5314 p = page_address(pages[0]); 5315 end = p + PAGE_SIZE; 5316 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5317 encode_oloc(&p, end, src_oloc); 5318 ceph_encode_32(&p, truncate_seq); 5319 ceph_encode_64(&p, truncate_size); 5320 op->indata_len = PAGE_SIZE - (end - p); 5321 5322 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5323 op->indata_len, 0, false, true); 5324 return 0; 5325 } 5326 EXPORT_SYMBOL(osd_req_op_copy_from_init); 5327 5328 int __init ceph_osdc_setup(void) 5329 { 5330 size_t size = sizeof(struct ceph_osd_request) + 5331 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5332 5333 BUG_ON(ceph_osd_request_cache); 5334 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5335 0, 0, NULL); 5336 5337 return ceph_osd_request_cache ? 0 : -ENOMEM; 5338 } 5339 5340 void ceph_osdc_cleanup(void) 5341 { 5342 BUG_ON(!ceph_osd_request_cache); 5343 kmem_cache_destroy(ceph_osd_request_cache); 5344 ceph_osd_request_cache = NULL; 5345 } 5346 5347 /* 5348 * handle incoming message 5349 */ 5350 static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5351 { 5352 struct ceph_osd *osd = con->private; 5353 struct ceph_osd_client *osdc = osd->o_osdc; 5354 int type = le16_to_cpu(msg->hdr.type); 5355 5356 switch (type) { 5357 case CEPH_MSG_OSD_MAP: 5358 ceph_osdc_handle_map(osdc, msg); 5359 break; 5360 case CEPH_MSG_OSD_OPREPLY: 5361 handle_reply(osd, msg); 5362 break; 5363 case CEPH_MSG_OSD_BACKOFF: 5364 handle_backoff(osd, msg); 5365 break; 5366 case CEPH_MSG_WATCH_NOTIFY: 5367 handle_watch_notify(osdc, msg); 5368 break; 5369 5370 default: 5371 pr_err("received unknown message type %d %s\n", type, 5372 ceph_msg_type_name(type)); 5373 } 5374 5375 ceph_msg_put(msg); 5376 } 5377 5378 /* How much sparse data was requested? */ 5379 static u64 sparse_data_requested(struct ceph_osd_request *req) 5380 { 5381 u64 len = 0; 5382 5383 if (req->r_flags & CEPH_OSD_FLAG_READ) { 5384 int i; 5385 5386 for (i = 0; i < req->r_num_ops; ++i) { 5387 struct ceph_osd_req_op *op = &req->r_ops[i]; 5388 5389 if (op->op == CEPH_OSD_OP_SPARSE_READ) 5390 len += op->extent.length; 5391 } 5392 } 5393 return len; 5394 } 5395 5396 /* 5397 * Lookup and return message for incoming reply. Don't try to do 5398 * anything about a larger than preallocated data portion of the 5399 * message at the moment - for now, just skip the message. 5400 */ 5401 static struct ceph_msg *get_reply(struct ceph_connection *con, 5402 struct ceph_msg_header *hdr, 5403 int *skip) 5404 { 5405 struct ceph_osd *osd = con->private; 5406 struct ceph_osd_client *osdc = osd->o_osdc; 5407 struct ceph_msg *m = NULL; 5408 struct ceph_osd_request *req; 5409 int front_len = le32_to_cpu(hdr->front_len); 5410 int data_len = le32_to_cpu(hdr->data_len); 5411 u64 tid = le64_to_cpu(hdr->tid); 5412 u64 srlen; 5413 5414 down_read(&osdc->lock); 5415 if (!osd_registered(osd)) { 5416 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5417 *skip = 1; 5418 goto out_unlock_osdc; 5419 } 5420 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5421 5422 mutex_lock(&osd->lock); 5423 req = lookup_request(&osd->o_requests, tid); 5424 if (!req) { 5425 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5426 osd->o_osd, tid); 5427 *skip = 1; 5428 goto out_unlock_session; 5429 } 5430 5431 ceph_msg_revoke_incoming(req->r_reply); 5432 5433 if (front_len > req->r_reply->front_alloc_len) { 5434 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5435 __func__, osd->o_osd, req->r_tid, front_len, 5436 req->r_reply->front_alloc_len); 5437 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5438 false); 5439 if (!m) 5440 goto out_unlock_session; 5441 ceph_msg_put(req->r_reply); 5442 req->r_reply = m; 5443 } 5444 5445 srlen = sparse_data_requested(req); 5446 if (!srlen && data_len > req->r_reply->data_length) { 5447 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5448 __func__, osd->o_osd, req->r_tid, data_len, 5449 req->r_reply->data_length); 5450 m = NULL; 5451 *skip = 1; 5452 goto out_unlock_session; 5453 } 5454 5455 m = ceph_msg_get(req->r_reply); 5456 m->sparse_read_total = srlen; 5457 5458 dout("get_reply tid %lld %p\n", tid, m); 5459 5460 out_unlock_session: 5461 mutex_unlock(&osd->lock); 5462 out_unlock_osdc: 5463 up_read(&osdc->lock); 5464 return m; 5465 } 5466 5467 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5468 { 5469 struct ceph_msg *m; 5470 int type = le16_to_cpu(hdr->type); 5471 u32 front_len = le32_to_cpu(hdr->front_len); 5472 u32 data_len = le32_to_cpu(hdr->data_len); 5473 5474 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5475 if (!m) 5476 return NULL; 5477 5478 if (data_len) { 5479 struct page **pages; 5480 5481 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5482 GFP_NOIO); 5483 if (IS_ERR(pages)) { 5484 ceph_msg_put(m); 5485 return NULL; 5486 } 5487 5488 ceph_msg_data_add_pages(m, pages, data_len, 0, true); 5489 } 5490 5491 return m; 5492 } 5493 5494 static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con, 5495 struct ceph_msg_header *hdr, 5496 int *skip) 5497 { 5498 struct ceph_osd *osd = con->private; 5499 int type = le16_to_cpu(hdr->type); 5500 5501 *skip = 0; 5502 switch (type) { 5503 case CEPH_MSG_OSD_MAP: 5504 case CEPH_MSG_OSD_BACKOFF: 5505 case CEPH_MSG_WATCH_NOTIFY: 5506 return alloc_msg_with_page_vector(hdr); 5507 case CEPH_MSG_OSD_OPREPLY: 5508 return get_reply(con, hdr, skip); 5509 default: 5510 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5511 osd->o_osd, type); 5512 *skip = 1; 5513 return NULL; 5514 } 5515 } 5516 5517 /* 5518 * Wrappers to refcount containing ceph_osd struct 5519 */ 5520 static struct ceph_connection *osd_get_con(struct ceph_connection *con) 5521 { 5522 struct ceph_osd *osd = con->private; 5523 if (get_osd(osd)) 5524 return con; 5525 return NULL; 5526 } 5527 5528 static void osd_put_con(struct ceph_connection *con) 5529 { 5530 struct ceph_osd *osd = con->private; 5531 put_osd(osd); 5532 } 5533 5534 /* 5535 * authentication 5536 */ 5537 5538 /* 5539 * Note: returned pointer is the address of a structure that's 5540 * managed separately. Caller must *not* attempt to free it. 5541 */ 5542 static struct ceph_auth_handshake * 5543 osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new) 5544 { 5545 struct ceph_osd *o = con->private; 5546 struct ceph_osd_client *osdc = o->o_osdc; 5547 struct ceph_auth_client *ac = osdc->client->monc.auth; 5548 struct ceph_auth_handshake *auth = &o->o_auth; 5549 int ret; 5550 5551 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5552 force_new, proto, NULL, NULL); 5553 if (ret) 5554 return ERR_PTR(ret); 5555 5556 return auth; 5557 } 5558 5559 static int osd_add_authorizer_challenge(struct ceph_connection *con, 5560 void *challenge_buf, int challenge_buf_len) 5561 { 5562 struct ceph_osd *o = con->private; 5563 struct ceph_osd_client *osdc = o->o_osdc; 5564 struct ceph_auth_client *ac = osdc->client->monc.auth; 5565 5566 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5567 challenge_buf, challenge_buf_len); 5568 } 5569 5570 static int osd_verify_authorizer_reply(struct ceph_connection *con) 5571 { 5572 struct ceph_osd *o = con->private; 5573 struct ceph_osd_client *osdc = o->o_osdc; 5574 struct ceph_auth_client *ac = osdc->client->monc.auth; 5575 struct ceph_auth_handshake *auth = &o->o_auth; 5576 5577 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer, 5578 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len, 5579 NULL, NULL, NULL, NULL); 5580 } 5581 5582 static int osd_invalidate_authorizer(struct ceph_connection *con) 5583 { 5584 struct ceph_osd *o = con->private; 5585 struct ceph_osd_client *osdc = o->o_osdc; 5586 struct ceph_auth_client *ac = osdc->client->monc.auth; 5587 5588 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5589 return ceph_monc_validate_auth(&osdc->client->monc); 5590 } 5591 5592 static int osd_get_auth_request(struct ceph_connection *con, 5593 void *buf, int *buf_len, 5594 void **authorizer, int *authorizer_len) 5595 { 5596 struct ceph_osd *o = con->private; 5597 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5598 struct ceph_auth_handshake *auth = &o->o_auth; 5599 int ret; 5600 5601 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5602 buf, buf_len); 5603 if (ret) 5604 return ret; 5605 5606 *authorizer = auth->authorizer_buf; 5607 *authorizer_len = auth->authorizer_buf_len; 5608 return 0; 5609 } 5610 5611 static int osd_handle_auth_reply_more(struct ceph_connection *con, 5612 void *reply, int reply_len, 5613 void *buf, int *buf_len, 5614 void **authorizer, int *authorizer_len) 5615 { 5616 struct ceph_osd *o = con->private; 5617 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5618 struct ceph_auth_handshake *auth = &o->o_auth; 5619 int ret; 5620 5621 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len, 5622 buf, buf_len); 5623 if (ret) 5624 return ret; 5625 5626 *authorizer = auth->authorizer_buf; 5627 *authorizer_len = auth->authorizer_buf_len; 5628 return 0; 5629 } 5630 5631 static int osd_handle_auth_done(struct ceph_connection *con, 5632 u64 global_id, void *reply, int reply_len, 5633 u8 *session_key, int *session_key_len, 5634 u8 *con_secret, int *con_secret_len) 5635 { 5636 struct ceph_osd *o = con->private; 5637 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5638 struct ceph_auth_handshake *auth = &o->o_auth; 5639 5640 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len, 5641 session_key, session_key_len, 5642 con_secret, con_secret_len); 5643 } 5644 5645 static int osd_handle_auth_bad_method(struct ceph_connection *con, 5646 int used_proto, int result, 5647 const int *allowed_protos, int proto_cnt, 5648 const int *allowed_modes, int mode_cnt) 5649 { 5650 struct ceph_osd *o = con->private; 5651 struct ceph_mon_client *monc = &o->o_osdc->client->monc; 5652 int ret; 5653 5654 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_OSD, 5655 used_proto, result, 5656 allowed_protos, proto_cnt, 5657 allowed_modes, mode_cnt)) { 5658 ret = ceph_monc_validate_auth(monc); 5659 if (ret) 5660 return ret; 5661 } 5662 5663 return -EACCES; 5664 } 5665 5666 static void osd_reencode_message(struct ceph_msg *msg) 5667 { 5668 int type = le16_to_cpu(msg->hdr.type); 5669 5670 if (type == CEPH_MSG_OSD_OP) 5671 encode_request_finish(msg); 5672 } 5673 5674 static int osd_sign_message(struct ceph_msg *msg) 5675 { 5676 struct ceph_osd *o = msg->con->private; 5677 struct ceph_auth_handshake *auth = &o->o_auth; 5678 5679 return ceph_auth_sign_message(auth, msg); 5680 } 5681 5682 static int osd_check_message_signature(struct ceph_msg *msg) 5683 { 5684 struct ceph_osd *o = msg->con->private; 5685 struct ceph_auth_handshake *auth = &o->o_auth; 5686 5687 return ceph_auth_check_message_signature(auth, msg); 5688 } 5689 5690 static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len, 5691 bool zero) 5692 { 5693 while (len) { 5694 struct page *page; 5695 size_t poff, plen; 5696 5697 page = ceph_msg_data_next(cursor, &poff, &plen); 5698 if (plen > len) 5699 plen = len; 5700 if (zero) 5701 zero_user_segment(page, poff, poff + plen); 5702 len -= plen; 5703 ceph_msg_data_advance(cursor, plen); 5704 } 5705 } 5706 5707 static int prep_next_sparse_read(struct ceph_connection *con, 5708 struct ceph_msg_data_cursor *cursor) 5709 { 5710 struct ceph_osd *o = con->private; 5711 struct ceph_sparse_read *sr = &o->o_sparse_read; 5712 struct ceph_osd_request *req; 5713 struct ceph_osd_req_op *op; 5714 5715 spin_lock(&o->o_requests_lock); 5716 req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid)); 5717 if (!req) { 5718 spin_unlock(&o->o_requests_lock); 5719 return -EBADR; 5720 } 5721 5722 if (o->o_sparse_op_idx < 0) { 5723 dout("%s: [%d] starting new sparse read req\n", 5724 __func__, o->o_osd); 5725 } else { 5726 u64 end; 5727 5728 op = &req->r_ops[o->o_sparse_op_idx]; 5729 5730 WARN_ON_ONCE(op->extent.sparse_ext); 5731 5732 /* hand back buffer we took earlier */ 5733 op->extent.sparse_ext = sr->sr_extent; 5734 sr->sr_extent = NULL; 5735 op->extent.sparse_ext_cnt = sr->sr_count; 5736 sr->sr_ext_len = 0; 5737 dout("%s: [%d] completed extent array len %d cursor->resid %zd\n", 5738 __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid); 5739 /* Advance to end of data for this operation */ 5740 end = ceph_sparse_ext_map_end(op); 5741 if (end < sr->sr_req_len) 5742 advance_cursor(cursor, sr->sr_req_len - end, false); 5743 } 5744 5745 ceph_init_sparse_read(sr); 5746 5747 /* find next op in this request (if any) */ 5748 while (++o->o_sparse_op_idx < req->r_num_ops) { 5749 op = &req->r_ops[o->o_sparse_op_idx]; 5750 if (op->op == CEPH_OSD_OP_SPARSE_READ) 5751 goto found; 5752 } 5753 5754 /* reset for next sparse read request */ 5755 spin_unlock(&o->o_requests_lock); 5756 o->o_sparse_op_idx = -1; 5757 return 0; 5758 found: 5759 sr->sr_req_off = op->extent.offset; 5760 sr->sr_req_len = op->extent.length; 5761 sr->sr_pos = sr->sr_req_off; 5762 dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__, 5763 o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len); 5764 5765 /* hand off request's sparse extent map buffer */ 5766 sr->sr_ext_len = op->extent.sparse_ext_cnt; 5767 op->extent.sparse_ext_cnt = 0; 5768 sr->sr_extent = op->extent.sparse_ext; 5769 op->extent.sparse_ext = NULL; 5770 5771 spin_unlock(&o->o_requests_lock); 5772 return 1; 5773 } 5774 5775 #ifdef __BIG_ENDIAN 5776 static inline void convert_extent_map(struct ceph_sparse_read *sr) 5777 { 5778 int i; 5779 5780 for (i = 0; i < sr->sr_count; i++) { 5781 struct ceph_sparse_extent *ext = &sr->sr_extent[i]; 5782 5783 ext->off = le64_to_cpu((__force __le64)ext->off); 5784 ext->len = le64_to_cpu((__force __le64)ext->len); 5785 } 5786 } 5787 #else 5788 static inline void convert_extent_map(struct ceph_sparse_read *sr) 5789 { 5790 } 5791 #endif 5792 5793 static int osd_sparse_read(struct ceph_connection *con, 5794 struct ceph_msg_data_cursor *cursor, 5795 char **pbuf) 5796 { 5797 struct ceph_osd *o = con->private; 5798 struct ceph_sparse_read *sr = &o->o_sparse_read; 5799 u32 count = sr->sr_count; 5800 u64 eoff, elen, len = 0; 5801 int i, ret; 5802 5803 switch (sr->sr_state) { 5804 case CEPH_SPARSE_READ_HDR: 5805 next_op: 5806 ret = prep_next_sparse_read(con, cursor); 5807 if (ret <= 0) 5808 return ret; 5809 5810 /* number of extents */ 5811 ret = sizeof(sr->sr_count); 5812 *pbuf = (char *)&sr->sr_count; 5813 sr->sr_state = CEPH_SPARSE_READ_EXTENTS; 5814 break; 5815 case CEPH_SPARSE_READ_EXTENTS: 5816 /* Convert sr_count to host-endian */ 5817 count = le32_to_cpu((__force __le32)sr->sr_count); 5818 sr->sr_count = count; 5819 dout("[%d] got %u extents\n", o->o_osd, count); 5820 5821 if (count > 0) { 5822 if (!sr->sr_extent || count > sr->sr_ext_len) { 5823 /* no extent array provided, or too short */ 5824 kfree(sr->sr_extent); 5825 sr->sr_extent = kmalloc_array(count, 5826 sizeof(*sr->sr_extent), 5827 GFP_NOIO); 5828 if (!sr->sr_extent) { 5829 pr_err("%s: failed to allocate %u extents\n", 5830 __func__, count); 5831 return -ENOMEM; 5832 } 5833 sr->sr_ext_len = count; 5834 } 5835 ret = count * sizeof(*sr->sr_extent); 5836 *pbuf = (char *)sr->sr_extent; 5837 sr->sr_state = CEPH_SPARSE_READ_DATA_LEN; 5838 break; 5839 } 5840 /* No extents? Read data len */ 5841 fallthrough; 5842 case CEPH_SPARSE_READ_DATA_LEN: 5843 convert_extent_map(sr); 5844 ret = sizeof(sr->sr_datalen); 5845 *pbuf = (char *)&sr->sr_datalen; 5846 sr->sr_state = CEPH_SPARSE_READ_DATA_PRE; 5847 break; 5848 case CEPH_SPARSE_READ_DATA_PRE: 5849 /* Convert sr_datalen to host-endian */ 5850 sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen); 5851 for (i = 0; i < count; i++) 5852 len += sr->sr_extent[i].len; 5853 if (sr->sr_datalen != len) { 5854 pr_warn_ratelimited("data len %u != extent len %llu\n", 5855 sr->sr_datalen, len); 5856 return -EREMOTEIO; 5857 } 5858 sr->sr_state = CEPH_SPARSE_READ_DATA; 5859 fallthrough; 5860 case CEPH_SPARSE_READ_DATA: 5861 if (sr->sr_index >= count) { 5862 sr->sr_state = CEPH_SPARSE_READ_HDR; 5863 goto next_op; 5864 } 5865 5866 eoff = sr->sr_extent[sr->sr_index].off; 5867 elen = sr->sr_extent[sr->sr_index].len; 5868 5869 dout("[%d] ext %d off 0x%llx len 0x%llx\n", 5870 o->o_osd, sr->sr_index, eoff, elen); 5871 5872 if (elen > INT_MAX) { 5873 dout("Sparse read extent length too long (0x%llx)\n", 5874 elen); 5875 return -EREMOTEIO; 5876 } 5877 5878 /* zero out anything from sr_pos to start of extent */ 5879 if (sr->sr_pos < eoff) 5880 advance_cursor(cursor, eoff - sr->sr_pos, true); 5881 5882 /* Set position to end of extent */ 5883 sr->sr_pos = eoff + elen; 5884 5885 /* send back the new length and nullify the ptr */ 5886 cursor->sr_resid = elen; 5887 ret = elen; 5888 *pbuf = NULL; 5889 5890 /* Bump the array index */ 5891 ++sr->sr_index; 5892 break; 5893 } 5894 return ret; 5895 } 5896 5897 static const struct ceph_connection_operations osd_con_ops = { 5898 .get = osd_get_con, 5899 .put = osd_put_con, 5900 .sparse_read = osd_sparse_read, 5901 .alloc_msg = osd_alloc_msg, 5902 .dispatch = osd_dispatch, 5903 .fault = osd_fault, 5904 .reencode_message = osd_reencode_message, 5905 .get_authorizer = osd_get_authorizer, 5906 .add_authorizer_challenge = osd_add_authorizer_challenge, 5907 .verify_authorizer_reply = osd_verify_authorizer_reply, 5908 .invalidate_authorizer = osd_invalidate_authorizer, 5909 .sign_message = osd_sign_message, 5910 .check_message_signature = osd_check_message_signature, 5911 .get_auth_request = osd_get_auth_request, 5912 .handle_auth_reply_more = osd_handle_auth_reply_more, 5913 .handle_auth_done = osd_handle_auth_done, 5914 .handle_auth_bad_method = osd_handle_auth_bad_method, 5915 }; 5916