Lines Matching full:osd

36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
53 static void unlink_linger(struct ceph_osd *osd,
55 static void clear_backoffs(struct ceph_osd *osd);
77 static inline void verify_osd_locked(struct ceph_osd *osd) in verify_osd_locked() argument
79 struct ceph_osd_client *osdc = osd->o_osdc; in verify_osd_locked()
81 WARN_ON(!(mutex_is_locked(&osd->lock) && in verify_osd_locked()
92 static inline void verify_osd_locked(struct ceph_osd *osd) { } in verify_osd_locked() argument
101 * fill osd op in request message.
432 t->osd = CEPH_HOMELESS_OSD; in target_init()
461 dest->osd = src->osd; in target_copy()
699 * oid, oloc and OSD op opcode(s) must be filled in before this function
713 * This is an osd op init function for opcodes that have no data or
1033 pr_err("unsupported osd opcode %s\n", in osd_req_encode_op()
1166 * We keep osd requests in an rbtree, sorted by ->r_tid.
1172 * Call @fn on each OSD request as long as @fn returns 0. in DEFINE_RB_FUNCS()
1181 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in DEFINE_RB_FUNCS() local
1183 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS()
1203 static bool osd_homeless(struct ceph_osd *osd) in osd_homeless() argument
1205 return osd->o_osd == CEPH_HOMELESS_OSD; in osd_homeless()
1208 static bool osd_registered(struct ceph_osd *osd) in osd_registered() argument
1210 verify_osdc_locked(osd->o_osdc); in osd_registered()
1212 return !RB_EMPTY_NODE(&osd->o_node); in osd_registered()
1216 * Assumes @osd is zero-initialized.
1218 static void osd_init(struct ceph_osd *osd) in osd_init() argument
1220 refcount_set(&osd->o_ref, 1); in osd_init()
1221 RB_CLEAR_NODE(&osd->o_node); in osd_init()
1222 spin_lock_init(&osd->o_requests_lock); in osd_init()
1223 osd->o_requests = RB_ROOT; in osd_init()
1224 osd->o_linger_requests = RB_ROOT; in osd_init()
1225 osd->o_backoff_mappings = RB_ROOT; in osd_init()
1226 osd->o_backoffs_by_id = RB_ROOT; in osd_init()
1227 INIT_LIST_HEAD(&osd->o_osd_lru); in osd_init()
1228 INIT_LIST_HEAD(&osd->o_keepalive_item); in osd_init()
1229 osd->o_incarnation = 1; in osd_init()
1230 mutex_init(&osd->lock); in osd_init()
1240 static void osd_cleanup(struct ceph_osd *osd) in osd_cleanup() argument
1242 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); in osd_cleanup()
1243 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in osd_cleanup()
1244 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in osd_cleanup()
1245 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); in osd_cleanup()
1246 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); in osd_cleanup()
1247 WARN_ON(!list_empty(&osd->o_osd_lru)); in osd_cleanup()
1248 WARN_ON(!list_empty(&osd->o_keepalive_item)); in osd_cleanup()
1250 ceph_init_sparse_read(&osd->o_sparse_read); in osd_cleanup()
1252 if (osd->o_auth.authorizer) { in osd_cleanup()
1253 WARN_ON(osd_homeless(osd)); in osd_cleanup()
1254 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); in osd_cleanup()
1263 struct ceph_osd *osd; in create_osd() local
1267 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); in create_osd()
1268 osd_init(osd); in create_osd()
1269 osd->o_osdc = osdc; in create_osd()
1270 osd->o_osd = onum; in create_osd()
1271 osd->o_sparse_op_idx = -1; in create_osd()
1273 ceph_init_sparse_read(&osd->o_sparse_read); in create_osd()
1275 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); in create_osd()
1277 return osd; in create_osd()
1280 static struct ceph_osd *get_osd(struct ceph_osd *osd) in get_osd() argument
1282 if (refcount_inc_not_zero(&osd->o_ref)) { in get_osd()
1283 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, in get_osd()
1284 refcount_read(&osd->o_ref)); in get_osd()
1285 return osd; in get_osd()
1287 dout("get_osd %p FAIL\n", osd); in get_osd()
1292 static void put_osd(struct ceph_osd *osd) in put_osd() argument
1294 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), in put_osd()
1295 refcount_read(&osd->o_ref) - 1); in put_osd()
1296 if (refcount_dec_and_test(&osd->o_ref)) { in put_osd()
1297 osd_cleanup(osd); in put_osd()
1298 kfree(osd); in put_osd()
1302 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) in DEFINE_RB_FUNCS() argument
1304 static void __move_osd_to_lru(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1306 struct ceph_osd_client *osdc = osd->o_osdc; in DEFINE_RB_FUNCS()
1308 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in DEFINE_RB_FUNCS()
1309 BUG_ON(!list_empty(&osd->o_osd_lru)); in DEFINE_RB_FUNCS()
1312 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); in DEFINE_RB_FUNCS()
1315 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; in DEFINE_RB_FUNCS()
1318 static void maybe_move_osd_to_lru(struct ceph_osd *osd) in maybe_move_osd_to_lru() argument
1320 if (RB_EMPTY_ROOT(&osd->o_requests) && in maybe_move_osd_to_lru()
1321 RB_EMPTY_ROOT(&osd->o_linger_requests)) in maybe_move_osd_to_lru()
1322 __move_osd_to_lru(osd); in maybe_move_osd_to_lru()
1325 static void __remove_osd_from_lru(struct ceph_osd *osd) in __remove_osd_from_lru() argument
1327 struct ceph_osd_client *osdc = osd->o_osdc; in __remove_osd_from_lru()
1329 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in __remove_osd_from_lru()
1332 if (!list_empty(&osd->o_osd_lru)) in __remove_osd_from_lru()
1333 list_del_init(&osd->o_osd_lru); in __remove_osd_from_lru()
1341 static void close_osd(struct ceph_osd *osd) in close_osd() argument
1343 struct ceph_osd_client *osdc = osd->o_osdc; in close_osd()
1347 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in close_osd()
1349 ceph_con_close(&osd->o_con); in close_osd()
1351 for (n = rb_first(&osd->o_requests); n; ) { in close_osd()
1358 unlink_request(osd, req); in close_osd()
1361 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd()
1369 unlink_linger(osd, lreq); in close_osd()
1372 clear_backoffs(osd); in close_osd()
1374 __remove_osd_from_lru(osd); in close_osd()
1375 erase_osd(&osdc->osds, osd); in close_osd()
1376 put_osd(osd); in close_osd()
1380 * reset osd connect
1382 static int reopen_osd(struct ceph_osd *osd) in reopen_osd() argument
1386 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in reopen_osd()
1388 if (RB_EMPTY_ROOT(&osd->o_requests) && in reopen_osd()
1389 RB_EMPTY_ROOT(&osd->o_linger_requests)) { in reopen_osd()
1390 close_osd(osd); in reopen_osd()
1394 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; in reopen_osd()
1395 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && in reopen_osd()
1396 !ceph_con_opened(&osd->o_con)) { in reopen_osd()
1399 dout("osd addr hasn't changed and connection never opened, " in reopen_osd()
1402 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd()
1411 ceph_con_close(&osd->o_con); in reopen_osd()
1412 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); in reopen_osd()
1413 osd->o_incarnation++; in reopen_osd()
1421 struct ceph_osd *osd; in lookup_create_osd() local
1429 osd = lookup_osd(&osdc->osds, o); in lookup_create_osd()
1431 osd = &osdc->homeless_osd; in lookup_create_osd()
1432 if (!osd) { in lookup_create_osd()
1436 osd = create_osd(osdc, o); in lookup_create_osd()
1437 insert_osd(&osdc->osds, osd); in lookup_create_osd()
1438 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, in lookup_create_osd()
1439 &osdc->osdmap->osd_addr[osd->o_osd]); in lookup_create_osd()
1442 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); in lookup_create_osd()
1443 return osd; in lookup_create_osd()
1447 * Create request <-> OSD session relation.
1449 * @req has to be assigned a tid, @osd may be homeless.
1451 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) in link_request() argument
1453 verify_osd_locked(osd); in link_request()
1455 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in link_request()
1458 if (!osd_homeless(osd)) in link_request()
1459 __remove_osd_from_lru(osd); in link_request()
1461 atomic_inc(&osd->o_osdc->num_homeless); in link_request()
1463 get_osd(osd); in link_request()
1464 spin_lock(&osd->o_requests_lock); in link_request()
1465 insert_request(&osd->o_requests, req); in link_request()
1466 spin_unlock(&osd->o_requests_lock); in link_request()
1467 req->r_osd = osd; in link_request()
1470 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) in unlink_request() argument
1472 verify_osd_locked(osd); in unlink_request()
1473 WARN_ON(req->r_osd != osd); in unlink_request()
1474 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in unlink_request()
1478 spin_lock(&osd->o_requests_lock); in unlink_request()
1479 erase_request(&osd->o_requests, req); in unlink_request()
1480 spin_unlock(&osd->o_requests_lock); in unlink_request()
1481 put_osd(osd); in unlink_request()
1483 if (!osd_homeless(osd)) in unlink_request()
1484 maybe_move_osd_to_lru(osd); in unlink_request()
1486 atomic_dec(&osd->o_osdc->num_homeless); in unlink_request()
1543 dout("%s picked osd%d, primary osd%d\n", __func__, in pick_random_replica()
1573 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, in pick_closest_replica()
1605 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1630 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1693 t->osd = acting.osds[pos]; in calc_target()
1696 t->osd = acting.primary; in calc_target()
1707 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, in calc_target()
1708 legacy_change, force_resend, split, ct_res, t->osd); in calc_target()
1735 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1963 * Each backoff has a unique id within its OSD session.
1967 static void clear_backoffs(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1969 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { in DEFINE_RB_FUNCS()
1971 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS()
1980 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in DEFINE_RB_FUNCS()
1983 erase_spg_mapping(&osd->o_backoff_mappings, spg); in DEFINE_RB_FUNCS()
2014 struct ceph_osd *osd = req->r_osd; in should_plug_request() local
2019 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); in should_plug_request()
2028 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", in should_plug_request()
2029 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, in should_plug_request()
2224 /* luminous OSD -- encode features and be done */ in encode_request_finish()
2246 * Pre-luminous OSD -- reencode v8 into v4 using @head in encode_request_finish()
2314 struct ceph_osd *osd = req->r_osd; in send_request() local
2316 verify_osd_locked(osd); in send_request()
2317 WARN_ON(osd->o_osd != req->r_t.osd); in send_request()
2338 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", in send_request()
2341 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, in send_request()
2348 req->r_sent = osd->o_incarnation; in send_request()
2350 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); in send_request()
2380 struct ceph_osd *osd; in __submit_request() local
2394 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); in __submit_request()
2395 if (IS_ERR(osd)) { in __submit_request()
2396 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); in __submit_request()
2435 } else if (!osd_homeless(osd)) { in __submit_request()
2441 mutex_lock(&osd->lock); in __submit_request()
2448 link_request(osd, req); in __submit_request()
2453 mutex_unlock(&osd->lock); in __submit_request()
2505 * If an OSD has failed or returned and a request has been sent in finish_request()
2773 WARN_ON(lreq->osd); in linger_release()
2830 * Create linger request <-> OSD session relation. in DEFINE_RB_INSDEL_FUNCS()
2832 * @lreq has to be registered, @osd may be homeless. in DEFINE_RB_INSDEL_FUNCS()
2834 static void link_linger(struct ceph_osd *osd, in DEFINE_RB_INSDEL_FUNCS()
2837 verify_osd_locked(osd); in DEFINE_RB_INSDEL_FUNCS()
2838 WARN_ON(!lreq->linger_id || lreq->osd); in DEFINE_RB_INSDEL_FUNCS()
2839 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in DEFINE_RB_INSDEL_FUNCS()
2840 osd->o_osd, lreq, lreq->linger_id); in DEFINE_RB_INSDEL_FUNCS()
2842 if (!osd_homeless(osd)) in DEFINE_RB_INSDEL_FUNCS()
2843 __remove_osd_from_lru(osd); in DEFINE_RB_INSDEL_FUNCS()
2845 atomic_inc(&osd->o_osdc->num_homeless); in DEFINE_RB_INSDEL_FUNCS()
2847 get_osd(osd); in DEFINE_RB_INSDEL_FUNCS()
2848 insert_linger(&osd->o_linger_requests, lreq); in DEFINE_RB_INSDEL_FUNCS()
2849 lreq->osd = osd; in DEFINE_RB_INSDEL_FUNCS()
2852 static void unlink_linger(struct ceph_osd *osd, in unlink_linger() argument
2855 verify_osd_locked(osd); in unlink_linger()
2856 WARN_ON(lreq->osd != osd); in unlink_linger()
2857 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in unlink_linger()
2858 osd->o_osd, lreq, lreq->linger_id); in unlink_linger()
2860 lreq->osd = NULL; in unlink_linger()
2861 erase_linger(&osd->o_linger_requests, lreq); in unlink_linger()
2862 put_osd(osd); in unlink_linger()
2864 if (!osd_homeless(osd)) in unlink_linger()
2865 maybe_move_osd_to_lru(osd); in unlink_linger()
2867 atomic_dec(&osd->o_osdc->num_homeless); in unlink_linger()
3250 link_request(lreq->osd, req); in send_linger_ping()
3257 struct ceph_osd *osd; in linger_submit() local
3263 osd = lookup_create_osd(osdc, lreq->t.osd, true); in linger_submit()
3264 link_linger(osd, lreq); in linger_submit()
3297 unlink_linger(lreq->osd, lreq); in __linger_cancel()
3420 * Timeout callback, called every N seconds. When 1 or more OSD
3422 * (tag + timestamp) to its OSD to ensure any communications channel
3441 * a connection with that osd (from the fault callback). in handle_timeout()
3444 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_timeout() local
3447 for (p = rb_first(&osd->o_requests); p; ) { in handle_timeout()
3454 dout(" req %p tid %llu on osd%d is laggy\n", in handle_timeout()
3455 req, req->r_tid, osd->o_osd); in handle_timeout()
3460 pr_err_ratelimited("tid %llu on osd%d timeout\n", in handle_timeout()
3461 req->r_tid, osd->o_osd); in handle_timeout()
3465 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { in handle_timeout()
3469 dout(" lreq %p linger_id %llu is served by osd%d\n", in handle_timeout()
3470 lreq, lreq->linger_id, osd->o_osd); in handle_timeout()
3480 list_move_tail(&osd->o_keepalive_item, &slow_osds); in handle_timeout()
3491 pr_err_ratelimited("tid %llu on osd%d timeout\n", in handle_timeout()
3502 struct ceph_osd *osd = list_first_entry(&slow_osds, in handle_timeout() local
3505 list_del_init(&osd->o_keepalive_item); in handle_timeout()
3506 ceph_con_keepalive(&osd->o_con); in handle_timeout()
3520 struct ceph_osd *osd, *nosd; in handle_osds_timeout() local
3524 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { in handle_osds_timeout()
3525 if (time_before(jiffies, osd->lru_ttl)) in handle_osds_timeout()
3528 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in handle_osds_timeout()
3529 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in handle_osds_timeout()
3530 close_osd(osd); in handle_osds_timeout()
3745 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) in handle_reply() argument
3747 struct ceph_osd_client *osdc = osd->o_osdc; in handle_reply()
3758 if (!osd_registered(osd)) { in handle_reply()
3759 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_reply()
3762 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_reply()
3764 mutex_lock(&osd->lock); in handle_reply()
3765 req = lookup_request(&osd->o_requests, tid); in handle_reply()
3767 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); in handle_reply()
3799 unlink_request(osd, req); in handle_reply()
3800 mutex_unlock(&osd->lock); in handle_reply()
3817 unlink_request(osd, req); in handle_reply()
3818 mutex_unlock(&osd->lock); in handle_reply()
3863 mutex_unlock(&osd->lock); in handle_reply()
3872 mutex_unlock(&osd->lock); in handle_reply()
3908 struct ceph_osd *osd; in recalc_linger_target() local
3910 osd = lookup_create_osd(osdc, lreq->t.osd, true); in recalc_linger_target()
3911 if (osd != lreq->osd) { in recalc_linger_target()
3912 unlink_linger(lreq->osd, lreq); in recalc_linger_target()
3913 link_linger(osd, lreq); in recalc_linger_target()
3921 * Requeue requests whose mapping to an OSD has changed.
3923 static void scan_requests(struct ceph_osd *osd, in scan_requests() argument
3930 struct ceph_osd_client *osdc = osd->o_osdc; in scan_requests()
3934 for (n = rb_first(&osd->o_linger_requests); n; ) { in scan_requests()
3970 for (n = rb_first(&osd->o_requests); n; ) { in scan_requests()
3992 unlink_request(osd, req); in scan_requests()
4057 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_one_map() local
4061 scan_requests(osd, skipped_map, was_full, true, need_resend, in handle_one_map()
4063 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || in handle_one_map()
4064 memcmp(&osd->o_con.peer_addr, in handle_one_map()
4065 ceph_osd_addr(osdc->osdmap, osd->o_osd), in handle_one_map()
4067 close_osd(osd); in handle_one_map()
4100 struct ceph_osd *osd; in kick_requests() local
4105 osd = lookup_create_osd(osdc, req->r_t.osd, true); in kick_requests()
4106 link_request(osd, req); in kick_requests()
4108 if (!osd_homeless(osd) && !req->r_t.paused) in kick_requests()
4116 if (!osd_homeless(lreq->osd)) in kick_requests()
4124 * Process updated osd map.
4241 * Resubmit requests pending on the given osd.
4243 static void kick_osd_requests(struct ceph_osd *osd) in kick_osd_requests() argument
4247 clear_backoffs(osd); in kick_osd_requests()
4249 for (n = rb_first(&osd->o_requests); n; ) { in kick_osd_requests()
4262 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in kick_osd_requests()
4271 * If the osd connection drops, we need to resubmit all requests.
4275 struct ceph_osd *osd = con->private; in osd_fault() local
4276 struct ceph_osd_client *osdc = osd->o_osdc; in osd_fault()
4278 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in osd_fault()
4281 if (!osd_registered(osd)) { in osd_fault()
4282 dout("%s osd%d unknown\n", __func__, osd->o_osd); in osd_fault()
4286 if (!reopen_osd(osd)) in osd_fault()
4287 kick_osd_requests(osd); in osd_fault()
4391 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) in handle_backoff_block() argument
4397 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_block()
4400 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); in handle_backoff_block()
4408 insert_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_block()
4424 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_block()
4427 * Ack with original backoff's epoch so that the OSD can in handle_backoff_block()
4435 ceph_con_send(&osd->o_con, msg); in handle_backoff_block()
4450 static void handle_backoff_unblock(struct ceph_osd *osd, in handle_backoff_unblock() argument
4457 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_unblock()
4460 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); in handle_backoff_unblock()
4462 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", in handle_backoff_unblock()
4463 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4470 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", in handle_backoff_unblock()
4471 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4476 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); in handle_backoff_unblock()
4480 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_unblock()
4484 erase_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_unblock()
4488 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in handle_backoff_unblock()
4495 * have split on the OSD. in handle_backoff_unblock()
4508 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) in handle_backoff() argument
4510 struct ceph_osd_client *osdc = osd->o_osdc; in handle_backoff()
4515 if (!osd_registered(osd)) { in handle_backoff()
4516 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_backoff()
4520 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_backoff()
4522 mutex_lock(&osd->lock); in handle_backoff()
4532 handle_backoff_block(osd, &m); in handle_backoff()
4535 handle_backoff_unblock(osd, &m); in handle_backoff()
4538 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); in handle_backoff()
4545 mutex_unlock(&osd->lock); in handle_backoff()
4550 * Process osd watch notifications
4722 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_sync() local
4724 mutex_lock(&osd->lock); in ceph_osdc_sync()
4725 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { in ceph_osdc_sync()
4736 mutex_unlock(&osd->lock); in ceph_osdc_sync()
4745 mutex_unlock(&osd->lock); in ceph_osdc_sync()
5115 * Execute an OSD class method on an object.
5172 * reset all osd connections
5180 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_reopen_osds() local
5183 if (!reopen_osd(osd)) in ceph_osdc_reopen_osds()
5184 kick_osd_requests(osd); in ceph_osdc_reopen_osds()
5271 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), in ceph_osdc_stop() local
5273 close_osd(osd); in ceph_osdc_stop()
5354 struct ceph_osd *osd = con->private; in osd_dispatch() local
5355 struct ceph_osd_client *osdc = osd->o_osdc; in osd_dispatch()
5363 handle_reply(osd, msg); in osd_dispatch()
5366 handle_backoff(osd, msg); in osd_dispatch()
5407 struct ceph_osd *osd = con->private; in get_reply() local
5408 struct ceph_osd_client *osdc = osd->o_osdc; in get_reply()
5417 if (!osd_registered(osd)) { in get_reply()
5418 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); in get_reply()
5422 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); in get_reply()
5424 mutex_lock(&osd->lock); in get_reply()
5425 req = lookup_request(&osd->o_requests, tid); in get_reply()
5427 dout("%s osd%d tid %llu unknown, skipping\n", __func__, in get_reply()
5428 osd->o_osd, tid); in get_reply()
5436 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", in get_reply()
5437 __func__, osd->o_osd, req->r_tid, front_len, in get_reply()
5449 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", in get_reply()
5450 __func__, osd->o_osd, req->r_tid, data_len, in get_reply()
5463 mutex_unlock(&osd->lock); in get_reply()
5500 struct ceph_osd *osd = con->private; in osd_alloc_msg() local
5512 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, in osd_alloc_msg()
5513 osd->o_osd, type); in osd_alloc_msg()
5524 struct ceph_osd *osd = con->private; in osd_get_con() local
5525 if (get_osd(osd)) in osd_get_con()
5532 struct ceph_osd *osd = con->private; in osd_put_con() local
5533 put_osd(osd); in osd_put_con()