1 /* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/drbd.h> 30 #include <linux/in.h> 31 #include <linux/fs.h> 32 #include <linux/file.h> 33 #include <linux/slab.h> 34 #include <linux/blkpg.h> 35 #include <linux/cpumask.h> 36 #include "drbd_int.h" 37 #include "drbd_protocol.h" 38 #include "drbd_req.h" 39 #include "drbd_state_change.h" 40 #include <asm/unaligned.h> 41 #include <linux/drbd_limits.h> 42 #include <linux/kthread.h> 43 44 #include <net/genetlink.h> 45 46 /* .doit */ 47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info); 48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info); 49 50 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info); 51 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info); 52 53 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info); 54 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info); 55 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info); 56 57 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info); 58 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info); 59 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info); 60 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info); 61 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info); 62 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info); 63 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info); 64 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info); 65 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info); 66 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info); 67 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info); 68 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info); 69 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info); 70 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info); 71 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info); 72 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info); 73 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info); 74 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info); 75 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info); 76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info); 77 /* .dumpit */ 78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb); 79 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb); 80 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb); 81 int drbd_adm_dump_devices_done(struct netlink_callback *cb); 82 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb); 83 int drbd_adm_dump_connections_done(struct netlink_callback *cb); 84 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb); 85 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb); 86 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb); 87 88 #include <linux/drbd_genl_api.h> 89 #include "drbd_nla.h" 90 #include <linux/genl_magic_func.h> 91 92 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ 93 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */ 94 95 DEFINE_MUTEX(notification_mutex); 96 97 /* used blkdev_get_by_path, to claim our meta data device(s) */ 98 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 99 100 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) 101 { 102 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); 103 if (genlmsg_reply(skb, info)) 104 pr_err("error sending genl reply\n"); 105 } 106 107 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only 108 * reason it could fail was no space in skb, and there are 4k available. */ 109 static int drbd_msg_put_info(struct sk_buff *skb, const char *info) 110 { 111 struct nlattr *nla; 112 int err = -EMSGSIZE; 113 114 if (!info || !info[0]) 115 return 0; 116 117 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY); 118 if (!nla) 119 return err; 120 121 err = nla_put_string(skb, T_info_text, info); 122 if (err) { 123 nla_nest_cancel(skb, nla); 124 return err; 125 } else 126 nla_nest_end(skb, nla); 127 return 0; 128 } 129 130 /* This would be a good candidate for a "pre_doit" hook, 131 * and per-family private info->pointers. 132 * But we need to stay compatible with older kernels. 133 * If it returns successfully, adm_ctx members are valid. 134 * 135 * At this point, we still rely on the global genl_lock(). 136 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need 137 * to add additional synchronization against object destruction/modification. 138 */ 139 #define DRBD_ADM_NEED_MINOR 1 140 #define DRBD_ADM_NEED_RESOURCE 2 141 #define DRBD_ADM_NEED_CONNECTION 4 142 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx, 143 struct sk_buff *skb, struct genl_info *info, unsigned flags) 144 { 145 struct drbd_genlmsghdr *d_in = info->userhdr; 146 const u8 cmd = info->genlhdr->cmd; 147 int err; 148 149 memset(adm_ctx, 0, sizeof(*adm_ctx)); 150 151 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ 152 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN)) 153 return -EPERM; 154 155 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 156 if (!adm_ctx->reply_skb) { 157 err = -ENOMEM; 158 goto fail; 159 } 160 161 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb, 162 info, &drbd_genl_family, 0, cmd); 163 /* put of a few bytes into a fresh skb of >= 4k will always succeed. 164 * but anyways */ 165 if (!adm_ctx->reply_dh) { 166 err = -ENOMEM; 167 goto fail; 168 } 169 170 adm_ctx->reply_dh->minor = d_in->minor; 171 adm_ctx->reply_dh->ret_code = NO_ERROR; 172 173 adm_ctx->volume = VOLUME_UNSPECIFIED; 174 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { 175 struct nlattr *nla; 176 /* parse and validate only */ 177 err = drbd_cfg_context_from_attrs(NULL, info); 178 if (err) 179 goto fail; 180 181 /* It was present, and valid, 182 * copy it over to the reply skb. */ 183 err = nla_put_nohdr(adm_ctx->reply_skb, 184 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len, 185 info->attrs[DRBD_NLA_CFG_CONTEXT]); 186 if (err) 187 goto fail; 188 189 /* and assign stuff to the adm_ctx */ 190 nla = nested_attr_tb[__nla_type(T_ctx_volume)]; 191 if (nla) 192 adm_ctx->volume = nla_get_u32(nla); 193 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)]; 194 if (nla) 195 adm_ctx->resource_name = nla_data(nla); 196 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; 197 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; 198 if ((adm_ctx->my_addr && 199 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) || 200 (adm_ctx->peer_addr && 201 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) { 202 err = -EINVAL; 203 goto fail; 204 } 205 } 206 207 adm_ctx->minor = d_in->minor; 208 adm_ctx->device = minor_to_device(d_in->minor); 209 210 /* We are protected by the global genl_lock(). 211 * But we may explicitly drop it/retake it in drbd_adm_set_role(), 212 * so make sure this object stays around. */ 213 if (adm_ctx->device) 214 kref_get(&adm_ctx->device->kref); 215 216 if (adm_ctx->resource_name) { 217 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name); 218 } 219 220 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) { 221 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor"); 222 return ERR_MINOR_INVALID; 223 } 224 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) { 225 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource"); 226 if (adm_ctx->resource_name) 227 return ERR_RES_NOT_KNOWN; 228 return ERR_INVALID_REQUEST; 229 } 230 231 if (flags & DRBD_ADM_NEED_CONNECTION) { 232 if (adm_ctx->resource) { 233 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected"); 234 return ERR_INVALID_REQUEST; 235 } 236 if (adm_ctx->device) { 237 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected"); 238 return ERR_INVALID_REQUEST; 239 } 240 if (adm_ctx->my_addr && adm_ctx->peer_addr) 241 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr), 242 nla_len(adm_ctx->my_addr), 243 nla_data(adm_ctx->peer_addr), 244 nla_len(adm_ctx->peer_addr)); 245 if (!adm_ctx->connection) { 246 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection"); 247 return ERR_INVALID_REQUEST; 248 } 249 } 250 251 /* some more paranoia, if the request was over-determined */ 252 if (adm_ctx->device && adm_ctx->resource && 253 adm_ctx->device->resource != adm_ctx->resource) { 254 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n", 255 adm_ctx->minor, adm_ctx->resource->name, 256 adm_ctx->device->resource->name); 257 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource"); 258 return ERR_INVALID_REQUEST; 259 } 260 if (adm_ctx->device && 261 adm_ctx->volume != VOLUME_UNSPECIFIED && 262 adm_ctx->volume != adm_ctx->device->vnr) { 263 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", 264 adm_ctx->minor, adm_ctx->volume, 265 adm_ctx->device->vnr, 266 adm_ctx->device->resource->name); 267 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume"); 268 return ERR_INVALID_REQUEST; 269 } 270 271 /* still, provide adm_ctx->resource always, if possible. */ 272 if (!adm_ctx->resource) { 273 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource 274 : adm_ctx->connection ? adm_ctx->connection->resource : NULL; 275 if (adm_ctx->resource) 276 kref_get(&adm_ctx->resource->kref); 277 } 278 279 return NO_ERROR; 280 281 fail: 282 nlmsg_free(adm_ctx->reply_skb); 283 adm_ctx->reply_skb = NULL; 284 return err; 285 } 286 287 static int drbd_adm_finish(struct drbd_config_context *adm_ctx, 288 struct genl_info *info, int retcode) 289 { 290 if (adm_ctx->device) { 291 kref_put(&adm_ctx->device->kref, drbd_destroy_device); 292 adm_ctx->device = NULL; 293 } 294 if (adm_ctx->connection) { 295 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection); 296 adm_ctx->connection = NULL; 297 } 298 if (adm_ctx->resource) { 299 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource); 300 adm_ctx->resource = NULL; 301 } 302 303 if (!adm_ctx->reply_skb) 304 return -ENOMEM; 305 306 adm_ctx->reply_dh->ret_code = retcode; 307 drbd_adm_send_reply(adm_ctx->reply_skb, info); 308 return 0; 309 } 310 311 static void setup_khelper_env(struct drbd_connection *connection, char **envp) 312 { 313 char *afs; 314 315 /* FIXME: A future version will not allow this case. */ 316 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0) 317 return; 318 319 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) { 320 case AF_INET6: 321 afs = "ipv6"; 322 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6", 323 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr); 324 break; 325 case AF_INET: 326 afs = "ipv4"; 327 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", 328 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr); 329 break; 330 default: 331 afs = "ssocks"; 332 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", 333 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr); 334 } 335 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs); 336 } 337 338 int drbd_khelper(struct drbd_device *device, char *cmd) 339 { 340 char *envp[] = { "HOME=/", 341 "TERM=linux", 342 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 343 (char[20]) { }, /* address family */ 344 (char[60]) { }, /* address */ 345 NULL }; 346 char mb[12]; 347 char *argv[] = {usermode_helper, cmd, mb, NULL }; 348 struct drbd_connection *connection = first_peer_device(device)->connection; 349 struct sib_info sib; 350 int ret; 351 352 if (current == connection->worker.task) 353 set_bit(CALLBACK_PENDING, &connection->flags); 354 355 snprintf(mb, 12, "minor-%d", device_to_minor(device)); 356 setup_khelper_env(connection, envp); 357 358 /* The helper may take some time. 359 * write out any unsynced meta data changes now */ 360 drbd_md_sync(device); 361 362 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 363 sib.sib_reason = SIB_HELPER_PRE; 364 sib.helper_name = cmd; 365 drbd_bcast_event(device, &sib); 366 notify_helper(NOTIFY_CALL, device, connection, cmd, 0); 367 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 368 if (ret) 369 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n", 370 usermode_helper, cmd, mb, 371 (ret >> 8) & 0xff, ret); 372 else 373 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n", 374 usermode_helper, cmd, mb, 375 (ret >> 8) & 0xff, ret); 376 sib.sib_reason = SIB_HELPER_POST; 377 sib.helper_exit_code = ret; 378 drbd_bcast_event(device, &sib); 379 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret); 380 381 if (current == connection->worker.task) 382 clear_bit(CALLBACK_PENDING, &connection->flags); 383 384 if (ret < 0) /* Ignore any ERRNOs we got. */ 385 ret = 0; 386 387 return ret; 388 } 389 390 static int conn_khelper(struct drbd_connection *connection, char *cmd) 391 { 392 char *envp[] = { "HOME=/", 393 "TERM=linux", 394 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 395 (char[20]) { }, /* address family */ 396 (char[60]) { }, /* address */ 397 NULL }; 398 char *resource_name = connection->resource->name; 399 char *argv[] = {usermode_helper, cmd, resource_name, NULL }; 400 int ret; 401 402 setup_khelper_env(connection, envp); 403 conn_md_sync(connection); 404 405 drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name); 406 /* TODO: conn_bcast_event() ?? */ 407 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0); 408 409 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 410 if (ret) 411 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n", 412 usermode_helper, cmd, resource_name, 413 (ret >> 8) & 0xff, ret); 414 else 415 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n", 416 usermode_helper, cmd, resource_name, 417 (ret >> 8) & 0xff, ret); 418 /* TODO: conn_bcast_event() ?? */ 419 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret); 420 421 if (ret < 0) /* Ignore any ERRNOs we got. */ 422 ret = 0; 423 424 return ret; 425 } 426 427 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection) 428 { 429 enum drbd_fencing_p fp = FP_NOT_AVAIL; 430 struct drbd_peer_device *peer_device; 431 int vnr; 432 433 rcu_read_lock(); 434 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 435 struct drbd_device *device = peer_device->device; 436 if (get_ldev_if_state(device, D_CONSISTENT)) { 437 struct disk_conf *disk_conf = 438 rcu_dereference(peer_device->device->ldev->disk_conf); 439 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing); 440 put_ldev(device); 441 } 442 } 443 rcu_read_unlock(); 444 445 if (fp == FP_NOT_AVAIL) { 446 /* IO Suspending works on the whole resource. 447 Do it only for one device. */ 448 vnr = 0; 449 peer_device = idr_get_next(&connection->peer_devices, &vnr); 450 drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0)); 451 } 452 453 return fp; 454 } 455 456 bool conn_try_outdate_peer(struct drbd_connection *connection) 457 { 458 unsigned int connect_cnt; 459 union drbd_state mask = { }; 460 union drbd_state val = { }; 461 enum drbd_fencing_p fp; 462 char *ex_to_string; 463 int r; 464 465 spin_lock_irq(&connection->resource->req_lock); 466 if (connection->cstate >= C_WF_REPORT_PARAMS) { 467 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); 468 spin_unlock_irq(&connection->resource->req_lock); 469 return false; 470 } 471 472 connect_cnt = connection->connect_cnt; 473 spin_unlock_irq(&connection->resource->req_lock); 474 475 fp = highest_fencing_policy(connection); 476 switch (fp) { 477 case FP_NOT_AVAIL: 478 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n"); 479 goto out; 480 case FP_DONT_CARE: 481 return true; 482 default: ; 483 } 484 485 r = conn_khelper(connection, "fence-peer"); 486 487 switch ((r>>8) & 0xff) { 488 case 3: /* peer is inconsistent */ 489 ex_to_string = "peer is inconsistent or worse"; 490 mask.pdsk = D_MASK; 491 val.pdsk = D_INCONSISTENT; 492 break; 493 case 4: /* peer got outdated, or was already outdated */ 494 ex_to_string = "peer was fenced"; 495 mask.pdsk = D_MASK; 496 val.pdsk = D_OUTDATED; 497 break; 498 case 5: /* peer was down */ 499 if (conn_highest_disk(connection) == D_UP_TO_DATE) { 500 /* we will(have) create(d) a new UUID anyways... */ 501 ex_to_string = "peer is unreachable, assumed to be dead"; 502 mask.pdsk = D_MASK; 503 val.pdsk = D_OUTDATED; 504 } else { 505 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 506 } 507 break; 508 case 6: /* Peer is primary, voluntarily outdate myself. 509 * This is useful when an unconnected R_SECONDARY is asked to 510 * become R_PRIMARY, but finds the other peer being active. */ 511 ex_to_string = "peer is active"; 512 drbd_warn(connection, "Peer is primary, outdating myself.\n"); 513 mask.disk = D_MASK; 514 val.disk = D_OUTDATED; 515 break; 516 case 7: 517 if (fp != FP_STONITH) 518 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n"); 519 ex_to_string = "peer was stonithed"; 520 mask.pdsk = D_MASK; 521 val.pdsk = D_OUTDATED; 522 break; 523 default: 524 /* The script is broken ... */ 525 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 526 return false; /* Eventually leave IO frozen */ 527 } 528 529 drbd_info(connection, "fence-peer helper returned %d (%s)\n", 530 (r>>8) & 0xff, ex_to_string); 531 532 out: 533 534 /* Not using 535 conn_request_state(connection, mask, val, CS_VERBOSE); 536 here, because we might were able to re-establish the connection in the 537 meantime. */ 538 spin_lock_irq(&connection->resource->req_lock); 539 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) { 540 if (connection->connect_cnt != connect_cnt) 541 /* In case the connection was established and droped 542 while the fence-peer handler was running, ignore it */ 543 drbd_info(connection, "Ignoring fence-peer exit code\n"); 544 else 545 _conn_request_state(connection, mask, val, CS_VERBOSE); 546 } 547 spin_unlock_irq(&connection->resource->req_lock); 548 549 return conn_highest_pdsk(connection) <= D_OUTDATED; 550 } 551 552 static int _try_outdate_peer_async(void *data) 553 { 554 struct drbd_connection *connection = (struct drbd_connection *)data; 555 556 conn_try_outdate_peer(connection); 557 558 kref_put(&connection->kref, drbd_destroy_connection); 559 return 0; 560 } 561 562 void conn_try_outdate_peer_async(struct drbd_connection *connection) 563 { 564 struct task_struct *opa; 565 566 kref_get(&connection->kref); 567 /* We may just have force_sig()'ed this thread 568 * to get it out of some blocking network function. 569 * Clear signals; otherwise kthread_run(), which internally uses 570 * wait_on_completion_killable(), will mistake our pending signal 571 * for a new fatal signal and fail. */ 572 flush_signals(current); 573 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 574 if (IS_ERR(opa)) { 575 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 576 kref_put(&connection->kref, drbd_destroy_connection); 577 } 578 } 579 580 enum drbd_state_rv 581 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force) 582 { 583 struct drbd_peer_device *const peer_device = first_peer_device(device); 584 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; 585 const int max_tries = 4; 586 enum drbd_state_rv rv = SS_UNKNOWN_ERROR; 587 struct net_conf *nc; 588 int try = 0; 589 int forced = 0; 590 union drbd_state mask, val; 591 592 if (new_role == R_PRIMARY) { 593 struct drbd_connection *connection; 594 595 /* Detect dead peers as soon as possible. */ 596 597 rcu_read_lock(); 598 for_each_connection(connection, device->resource) 599 request_ping(connection); 600 rcu_read_unlock(); 601 } 602 603 mutex_lock(device->state_mutex); 604 605 mask.i = 0; mask.role = R_MASK; 606 val.i = 0; val.role = new_role; 607 608 while (try++ < max_tries) { 609 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE); 610 611 /* in case we first succeeded to outdate, 612 * but now suddenly could establish a connection */ 613 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 614 val.pdsk = 0; 615 mask.pdsk = 0; 616 continue; 617 } 618 619 if (rv == SS_NO_UP_TO_DATE_DISK && force && 620 (device->state.disk < D_UP_TO_DATE && 621 device->state.disk >= D_INCONSISTENT)) { 622 mask.disk = D_MASK; 623 val.disk = D_UP_TO_DATE; 624 forced = 1; 625 continue; 626 } 627 628 if (rv == SS_NO_UP_TO_DATE_DISK && 629 device->state.disk == D_CONSISTENT && mask.pdsk == 0) { 630 D_ASSERT(device, device->state.pdsk == D_UNKNOWN); 631 632 if (conn_try_outdate_peer(connection)) { 633 val.disk = D_UP_TO_DATE; 634 mask.disk = D_MASK; 635 } 636 continue; 637 } 638 639 if (rv == SS_NOTHING_TO_DO) 640 goto out; 641 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 642 if (!conn_try_outdate_peer(connection) && force) { 643 drbd_warn(device, "Forced into split brain situation!\n"); 644 mask.pdsk = D_MASK; 645 val.pdsk = D_OUTDATED; 646 647 } 648 continue; 649 } 650 if (rv == SS_TWO_PRIMARIES) { 651 /* Maybe the peer is detected as dead very soon... 652 retry at most once more in this case. */ 653 int timeo; 654 rcu_read_lock(); 655 nc = rcu_dereference(connection->net_conf); 656 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; 657 rcu_read_unlock(); 658 schedule_timeout_interruptible(timeo); 659 if (try < max_tries) 660 try = max_tries - 1; 661 continue; 662 } 663 if (rv < SS_SUCCESS) { 664 rv = _drbd_request_state(device, mask, val, 665 CS_VERBOSE + CS_WAIT_COMPLETE); 666 if (rv < SS_SUCCESS) 667 goto out; 668 } 669 break; 670 } 671 672 if (rv < SS_SUCCESS) 673 goto out; 674 675 if (forced) 676 drbd_warn(device, "Forced to consider local data as UpToDate!\n"); 677 678 /* Wait until nothing is on the fly :) */ 679 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0); 680 681 /* FIXME also wait for all pending P_BARRIER_ACK? */ 682 683 if (new_role == R_SECONDARY) { 684 if (get_ldev(device)) { 685 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 686 put_ldev(device); 687 } 688 } else { 689 mutex_lock(&device->resource->conf_update); 690 nc = connection->net_conf; 691 if (nc) 692 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 693 mutex_unlock(&device->resource->conf_update); 694 695 if (get_ldev(device)) { 696 if (((device->state.conn < C_CONNECTED || 697 device->state.pdsk <= D_FAILED) 698 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced) 699 drbd_uuid_new_current(device); 700 701 device->ldev->md.uuid[UI_CURRENT] |= (u64)1; 702 put_ldev(device); 703 } 704 } 705 706 /* writeout of activity log covered areas of the bitmap 707 * to stable storage done in after state change already */ 708 709 if (device->state.conn >= C_WF_REPORT_PARAMS) { 710 /* if this was forced, we should consider sync */ 711 if (forced) 712 drbd_send_uuids(peer_device); 713 drbd_send_current_state(peer_device); 714 } 715 716 drbd_md_sync(device); 717 set_disk_ro(device->vdisk, new_role == R_SECONDARY); 718 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 719 out: 720 mutex_unlock(device->state_mutex); 721 return rv; 722 } 723 724 static const char *from_attrs_err_to_txt(int err) 725 { 726 return err == -ENOMSG ? "required attribute missing" : 727 err == -EOPNOTSUPP ? "unknown mandatory attribute" : 728 err == -EEXIST ? "can not change invariant setting" : 729 "invalid attribute value"; 730 } 731 732 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) 733 { 734 struct drbd_config_context adm_ctx; 735 struct set_role_parms parms; 736 int err; 737 enum drbd_ret_code retcode; 738 739 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 740 if (!adm_ctx.reply_skb) 741 return retcode; 742 if (retcode != NO_ERROR) 743 goto out; 744 745 memset(&parms, 0, sizeof(parms)); 746 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) { 747 err = set_role_parms_from_attrs(&parms, info); 748 if (err) { 749 retcode = ERR_MANDATORY_TAG; 750 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 751 goto out; 752 } 753 } 754 genl_unlock(); 755 mutex_lock(&adm_ctx.resource->adm_mutex); 756 757 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) 758 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); 759 else 760 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); 761 762 mutex_unlock(&adm_ctx.resource->adm_mutex); 763 genl_lock(); 764 out: 765 drbd_adm_finish(&adm_ctx, info, retcode); 766 return 0; 767 } 768 769 /* Initializes the md.*_offset members, so we are able to find 770 * the on disk meta data. 771 * 772 * We currently have two possible layouts: 773 * external: 774 * |----------- md_size_sect ------------------| 775 * [ 4k superblock ][ activity log ][ Bitmap ] 776 * | al_offset == 8 | 777 * | bm_offset = al_offset + X | 778 * ==> bitmap sectors = md_size_sect - bm_offset 779 * 780 * internal: 781 * |----------- md_size_sect ------------------| 782 * [data.....][ Bitmap ][ activity log ][ 4k superblock ] 783 * | al_offset < 0 | 784 * | bm_offset = al_offset - Y | 785 * ==> bitmap sectors = Y = al_offset - bm_offset 786 * 787 * Activity log size used to be fixed 32kB, 788 * but is about to become configurable. 789 */ 790 static void drbd_md_set_sector_offsets(struct drbd_device *device, 791 struct drbd_backing_dev *bdev) 792 { 793 sector_t md_size_sect = 0; 794 unsigned int al_size_sect = bdev->md.al_size_4k * 8; 795 796 bdev->md.md_offset = drbd_md_ss(bdev); 797 798 switch (bdev->md.meta_dev_idx) { 799 default: 800 /* v07 style fixed size indexed meta data */ 801 bdev->md.md_size_sect = MD_128MB_SECT; 802 bdev->md.al_offset = MD_4kB_SECT; 803 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; 804 break; 805 case DRBD_MD_INDEX_FLEX_EXT: 806 /* just occupy the full device; unit: sectors */ 807 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 808 bdev->md.al_offset = MD_4kB_SECT; 809 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; 810 break; 811 case DRBD_MD_INDEX_INTERNAL: 812 case DRBD_MD_INDEX_FLEX_INT: 813 /* al size is still fixed */ 814 bdev->md.al_offset = -al_size_sect; 815 /* we need (slightly less than) ~ this much bitmap sectors: */ 816 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 817 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 818 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 819 md_size_sect = ALIGN(md_size_sect, 8); 820 821 /* plus the "drbd meta data super block", 822 * and the activity log; */ 823 md_size_sect += MD_4kB_SECT + al_size_sect; 824 825 bdev->md.md_size_sect = md_size_sect; 826 /* bitmap offset is adjusted by 'super' block size */ 827 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT; 828 break; 829 } 830 } 831 832 /* input size is expected to be in KB */ 833 char *ppsize(char *buf, unsigned long long size) 834 { 835 /* Needs 9 bytes at max including trailing NUL: 836 * -1ULL ==> "16384 EB" */ 837 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 838 int base = 0; 839 while (size >= 10000 && base < sizeof(units)-1) { 840 /* shift + round */ 841 size = (size >> 10) + !!(size & (1<<9)); 842 base++; 843 } 844 sprintf(buf, "%u %cB", (unsigned)size, units[base]); 845 846 return buf; 847 } 848 849 /* there is still a theoretical deadlock when called from receiver 850 * on an D_INCONSISTENT R_PRIMARY: 851 * remote READ does inc_ap_bio, receiver would need to receive answer 852 * packet from remote to dec_ap_bio again. 853 * receiver receive_sizes(), comes here, 854 * waits for ap_bio_cnt == 0. -> deadlock. 855 * but this cannot happen, actually, because: 856 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 857 * (not connected, or bad/no disk on peer): 858 * see drbd_fail_request_early, ap_bio_cnt is zero. 859 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 860 * peer may not initiate a resize. 861 */ 862 /* Note these are not to be confused with 863 * drbd_adm_suspend_io/drbd_adm_resume_io, 864 * which are (sub) state changes triggered by admin (drbdsetup), 865 * and can be long lived. 866 * This changes an device->flag, is triggered by drbd internals, 867 * and should be short-lived. */ 868 /* It needs to be a counter, since multiple threads might 869 independently suspend and resume IO. */ 870 void drbd_suspend_io(struct drbd_device *device) 871 { 872 atomic_inc(&device->suspend_cnt); 873 if (drbd_suspended(device)) 874 return; 875 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt)); 876 } 877 878 void drbd_resume_io(struct drbd_device *device) 879 { 880 if (atomic_dec_and_test(&device->suspend_cnt)) 881 wake_up(&device->misc_wait); 882 } 883 884 /** 885 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 886 * @device: DRBD device. 887 * 888 * Returns 0 on success, negative return values indicate errors. 889 * You should call drbd_md_sync() after calling this function. 890 */ 891 enum determine_dev_size 892 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local) 893 { 894 struct md_offsets_and_sizes { 895 u64 last_agreed_sect; 896 u64 md_offset; 897 s32 al_offset; 898 s32 bm_offset; 899 u32 md_size_sect; 900 901 u32 al_stripes; 902 u32 al_stripe_size_4k; 903 } prev; 904 sector_t u_size, size; 905 struct drbd_md *md = &device->ldev->md; 906 char ppb[10]; 907 void *buffer; 908 909 int md_moved, la_size_changed; 910 enum determine_dev_size rv = DS_UNCHANGED; 911 912 /* We may change the on-disk offsets of our meta data below. Lock out 913 * anything that may cause meta data IO, to avoid acting on incomplete 914 * layout changes or scribbling over meta data that is in the process 915 * of being moved. 916 * 917 * Move is not exactly correct, btw, currently we have all our meta 918 * data in core memory, to "move" it we just write it all out, there 919 * are no reads. */ 920 drbd_suspend_io(device); 921 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */ 922 if (!buffer) { 923 drbd_resume_io(device); 924 return DS_ERROR; 925 } 926 927 /* remember current offset and sizes */ 928 prev.last_agreed_sect = md->la_size_sect; 929 prev.md_offset = md->md_offset; 930 prev.al_offset = md->al_offset; 931 prev.bm_offset = md->bm_offset; 932 prev.md_size_sect = md->md_size_sect; 933 prev.al_stripes = md->al_stripes; 934 prev.al_stripe_size_4k = md->al_stripe_size_4k; 935 936 if (rs) { 937 /* rs is non NULL if we should change the AL layout only */ 938 md->al_stripes = rs->al_stripes; 939 md->al_stripe_size_4k = rs->al_stripe_size / 4; 940 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4; 941 } 942 943 drbd_md_set_sector_offsets(device, device->ldev); 944 945 rcu_read_lock(); 946 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; 947 rcu_read_unlock(); 948 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED); 949 950 if (size < prev.last_agreed_sect) { 951 if (rs && u_size == 0) { 952 /* Remove "rs &&" later. This check should always be active, but 953 right now the receiver expects the permissive behavior */ 954 drbd_warn(device, "Implicit shrink not allowed. " 955 "Use --size=%llus for explicit shrink.\n", 956 (unsigned long long)size); 957 rv = DS_ERROR_SHRINK; 958 } 959 if (u_size > size) 960 rv = DS_ERROR_SPACE_MD; 961 if (rv != DS_UNCHANGED) 962 goto err_out; 963 } 964 965 if (drbd_get_capacity(device->this_bdev) != size || 966 drbd_bm_capacity(device) != size) { 967 int err; 968 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC)); 969 if (unlikely(err)) { 970 /* currently there is only one error: ENOMEM! */ 971 size = drbd_bm_capacity(device); 972 if (size == 0) { 973 drbd_err(device, "OUT OF MEMORY! " 974 "Could not allocate bitmap!\n"); 975 } else { 976 drbd_err(device, "BM resizing failed. " 977 "Leaving size unchanged\n"); 978 } 979 rv = DS_ERROR; 980 } 981 /* racy, see comments above. */ 982 drbd_set_my_capacity(device, size); 983 md->la_size_sect = size; 984 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 985 (unsigned long long)size>>1); 986 } 987 if (rv <= DS_ERROR) 988 goto err_out; 989 990 la_size_changed = (prev.last_agreed_sect != md->la_size_sect); 991 992 md_moved = prev.md_offset != md->md_offset 993 || prev.md_size_sect != md->md_size_sect; 994 995 if (la_size_changed || md_moved || rs) { 996 u32 prev_flags; 997 998 /* We do some synchronous IO below, which may take some time. 999 * Clear the timer, to avoid scary "timer expired!" messages, 1000 * "Superblock" is written out at least twice below, anyways. */ 1001 del_timer(&device->md_sync_timer); 1002 1003 /* We won't change the "al-extents" setting, we just may need 1004 * to move the on-disk location of the activity log ringbuffer. 1005 * Lock for transaction is good enough, it may well be "dirty" 1006 * or even "starving". */ 1007 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log)); 1008 1009 /* mark current on-disk bitmap and activity log as unreliable */ 1010 prev_flags = md->flags; 1011 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED; 1012 drbd_md_write(device, buffer); 1013 1014 drbd_al_initialize(device, buffer); 1015 1016 drbd_info(device, "Writing the whole bitmap, %s\n", 1017 la_size_changed && md_moved ? "size changed and md moved" : 1018 la_size_changed ? "size changed" : "md moved"); 1019 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 1020 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write, 1021 "size changed", BM_LOCKED_MASK); 1022 1023 /* on-disk bitmap and activity log is authoritative again 1024 * (unless there was an IO error meanwhile...) */ 1025 md->flags = prev_flags; 1026 drbd_md_write(device, buffer); 1027 1028 if (rs) 1029 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n", 1030 md->al_stripes, md->al_stripe_size_4k * 4); 1031 } 1032 1033 if (size > prev.last_agreed_sect) 1034 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO; 1035 if (size < prev.last_agreed_sect) 1036 rv = DS_SHRUNK; 1037 1038 if (0) { 1039 err_out: 1040 /* restore previous offset and sizes */ 1041 md->la_size_sect = prev.last_agreed_sect; 1042 md->md_offset = prev.md_offset; 1043 md->al_offset = prev.al_offset; 1044 md->bm_offset = prev.bm_offset; 1045 md->md_size_sect = prev.md_size_sect; 1046 md->al_stripes = prev.al_stripes; 1047 md->al_stripe_size_4k = prev.al_stripe_size_4k; 1048 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k; 1049 } 1050 lc_unlock(device->act_log); 1051 wake_up(&device->al_wait); 1052 drbd_md_put_buffer(device); 1053 drbd_resume_io(device); 1054 1055 return rv; 1056 } 1057 1058 sector_t 1059 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev, 1060 sector_t u_size, int assume_peer_has_space) 1061 { 1062 sector_t p_size = device->p_size; /* partner's disk size. */ 1063 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ 1064 sector_t m_size; /* my size */ 1065 sector_t size = 0; 1066 1067 m_size = drbd_get_max_capacity(bdev); 1068 1069 if (device->state.conn < C_CONNECTED && assume_peer_has_space) { 1070 drbd_warn(device, "Resize while not connected was forced by the user!\n"); 1071 p_size = m_size; 1072 } 1073 1074 if (p_size && m_size) { 1075 size = min_t(sector_t, p_size, m_size); 1076 } else { 1077 if (la_size_sect) { 1078 size = la_size_sect; 1079 if (m_size && m_size < size) 1080 size = m_size; 1081 if (p_size && p_size < size) 1082 size = p_size; 1083 } else { 1084 if (m_size) 1085 size = m_size; 1086 if (p_size) 1087 size = p_size; 1088 } 1089 } 1090 1091 if (size == 0) 1092 drbd_err(device, "Both nodes diskless!\n"); 1093 1094 if (u_size) { 1095 if (u_size > size) 1096 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n", 1097 (unsigned long)u_size>>1, (unsigned long)size>>1); 1098 else 1099 size = u_size; 1100 } 1101 1102 return size; 1103 } 1104 1105 /** 1106 * drbd_check_al_size() - Ensures that the AL is of the right size 1107 * @device: DRBD device. 1108 * 1109 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 1110 * failed, and 0 on success. You should call drbd_md_sync() after you called 1111 * this function. 1112 */ 1113 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc) 1114 { 1115 struct lru_cache *n, *t; 1116 struct lc_element *e; 1117 unsigned int in_use; 1118 int i; 1119 1120 if (device->act_log && 1121 device->act_log->nr_elements == dc->al_extents) 1122 return 0; 1123 1124 in_use = 0; 1125 t = device->act_log; 1126 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION, 1127 dc->al_extents, sizeof(struct lc_element), 0); 1128 1129 if (n == NULL) { 1130 drbd_err(device, "Cannot allocate act_log lru!\n"); 1131 return -ENOMEM; 1132 } 1133 spin_lock_irq(&device->al_lock); 1134 if (t) { 1135 for (i = 0; i < t->nr_elements; i++) { 1136 e = lc_element_by_index(t, i); 1137 if (e->refcnt) 1138 drbd_err(device, "refcnt(%d)==%d\n", 1139 e->lc_number, e->refcnt); 1140 in_use += e->refcnt; 1141 } 1142 } 1143 if (!in_use) 1144 device->act_log = n; 1145 spin_unlock_irq(&device->al_lock); 1146 if (in_use) { 1147 drbd_err(device, "Activity log still in use!\n"); 1148 lc_destroy(n); 1149 return -EBUSY; 1150 } else { 1151 lc_destroy(t); 1152 } 1153 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */ 1154 return 0; 1155 } 1156 1157 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, 1158 unsigned int max_bio_size) 1159 { 1160 struct request_queue * const q = device->rq_queue; 1161 unsigned int max_hw_sectors = max_bio_size >> 9; 1162 unsigned int max_segments = 0; 1163 struct request_queue *b = NULL; 1164 1165 if (bdev) { 1166 b = bdev->backing_bdev->bd_disk->queue; 1167 1168 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 1169 rcu_read_lock(); 1170 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; 1171 rcu_read_unlock(); 1172 1173 blk_set_stacking_limits(&q->limits); 1174 blk_queue_max_write_same_sectors(q, 0); 1175 } 1176 1177 blk_queue_logical_block_size(q, 512); 1178 blk_queue_max_hw_sectors(q, max_hw_sectors); 1179 /* This is the workaround for "bio would need to, but cannot, be split" */ 1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1181 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 1182 1183 if (b) { 1184 struct drbd_connection *connection = first_peer_device(device)->connection; 1185 1186 blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS); 1187 1188 if (blk_queue_discard(b) && 1189 (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { 1190 /* We don't care, stacking below should fix it for the local device. 1191 * Whether or not it is a suitable granularity on the remote device 1192 * is not our problem, really. If you care, you need to 1193 * use devices with similar topology on all peers. */ 1194 q->limits.discard_granularity = 512; 1195 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1196 } else { 1197 blk_queue_max_discard_sectors(q, 0); 1198 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1199 q->limits.discard_granularity = 0; 1200 } 1201 1202 blk_queue_stack_limits(q, b); 1203 1204 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 1205 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 1206 q->backing_dev_info.ra_pages, 1207 b->backing_dev_info.ra_pages); 1208 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 1209 } 1210 } 1211 /* To avoid confusion, if this queue does not support discard, clear 1212 * max_discard_sectors, which is what lsblk -D reports to the user. */ 1213 if (!blk_queue_discard(q)) { 1214 blk_queue_max_discard_sectors(q, 0); 1215 q->limits.discard_granularity = 0; 1216 } 1217 } 1218 1219 void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) 1220 { 1221 unsigned int now, new, local, peer; 1222 1223 now = queue_max_hw_sectors(device->rq_queue) << 9; 1224 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ 1225 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ 1226 1227 if (bdev) { 1228 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; 1229 device->local_max_bio_size = local; 1230 } 1231 local = min(local, DRBD_MAX_BIO_SIZE); 1232 1233 /* We may ignore peer limits if the peer is modern enough. 1234 Because new from 8.3.8 onwards the peer can use multiple 1235 BIOs for a single peer_request */ 1236 if (device->state.conn >= C_WF_REPORT_PARAMS) { 1237 if (first_peer_device(device)->connection->agreed_pro_version < 94) 1238 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 1239 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ 1240 else if (first_peer_device(device)->connection->agreed_pro_version == 94) 1241 peer = DRBD_MAX_SIZE_H80_PACKET; 1242 else if (first_peer_device(device)->connection->agreed_pro_version < 100) 1243 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ 1244 else 1245 peer = DRBD_MAX_BIO_SIZE; 1246 1247 /* We may later detach and re-attach on a disconnected Primary. 1248 * Avoid this setting to jump back in that case. 1249 * We want to store what we know the peer DRBD can handle, 1250 * not what the peer IO backend can handle. */ 1251 if (peer > device->peer_max_bio_size) 1252 device->peer_max_bio_size = peer; 1253 } 1254 new = min(local, peer); 1255 1256 if (device->state.role == R_PRIMARY && new < now) 1257 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now); 1258 1259 if (new != now) 1260 drbd_info(device, "max BIO size = %u\n", new); 1261 1262 drbd_setup_queue_param(device, bdev, new); 1263 } 1264 1265 /* Starts the worker thread */ 1266 static void conn_reconfig_start(struct drbd_connection *connection) 1267 { 1268 drbd_thread_start(&connection->worker); 1269 drbd_flush_workqueue(&connection->sender_work); 1270 } 1271 1272 /* if still unconfigured, stops worker again. */ 1273 static void conn_reconfig_done(struct drbd_connection *connection) 1274 { 1275 bool stop_threads; 1276 spin_lock_irq(&connection->resource->req_lock); 1277 stop_threads = conn_all_vols_unconf(connection) && 1278 connection->cstate == C_STANDALONE; 1279 spin_unlock_irq(&connection->resource->req_lock); 1280 if (stop_threads) { 1281 /* ack_receiver thread and ack_sender workqueue are implicitly 1282 * stopped by receiver in conn_disconnect() */ 1283 drbd_thread_stop(&connection->receiver); 1284 drbd_thread_stop(&connection->worker); 1285 } 1286 } 1287 1288 /* Make sure IO is suspended before calling this function(). */ 1289 static void drbd_suspend_al(struct drbd_device *device) 1290 { 1291 int s = 0; 1292 1293 if (!lc_try_lock(device->act_log)) { 1294 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n"); 1295 return; 1296 } 1297 1298 drbd_al_shrink(device); 1299 spin_lock_irq(&device->resource->req_lock); 1300 if (device->state.conn < C_CONNECTED) 1301 s = !test_and_set_bit(AL_SUSPENDED, &device->flags); 1302 spin_unlock_irq(&device->resource->req_lock); 1303 lc_unlock(device->act_log); 1304 1305 if (s) 1306 drbd_info(device, "Suspended AL updates\n"); 1307 } 1308 1309 1310 static bool should_set_defaults(struct genl_info *info) 1311 { 1312 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags; 1313 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS); 1314 } 1315 1316 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) 1317 { 1318 /* This is limited by 16 bit "slot" numbers, 1319 * and by available on-disk context storage. 1320 * 1321 * Also (u16)~0 is special (denotes a "free" extent). 1322 * 1323 * One transaction occupies one 4kB on-disk block, 1324 * we have n such blocks in the on disk ring buffer, 1325 * the "current" transaction may fail (n-1), 1326 * and there is 919 slot numbers context information per transaction. 1327 * 1328 * 72 transaction blocks amounts to more than 2**16 context slots, 1329 * so cap there first. 1330 */ 1331 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX; 1332 const unsigned int sufficient_on_disk = 1333 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1) 1334 /AL_CONTEXT_PER_TRANSACTION; 1335 1336 unsigned int al_size_4k = bdev->md.al_size_4k; 1337 1338 if (al_size_4k > sufficient_on_disk) 1339 return max_al_nr; 1340 1341 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION; 1342 } 1343 1344 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b) 1345 { 1346 return a->disk_barrier != b->disk_barrier || 1347 a->disk_flushes != b->disk_flushes || 1348 a->disk_drain != b->disk_drain; 1349 } 1350 1351 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) 1352 { 1353 struct drbd_config_context adm_ctx; 1354 enum drbd_ret_code retcode; 1355 struct drbd_device *device; 1356 struct disk_conf *new_disk_conf, *old_disk_conf; 1357 struct fifo_buffer *old_plan = NULL, *new_plan = NULL; 1358 int err, fifo_size; 1359 1360 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 1361 if (!adm_ctx.reply_skb) 1362 return retcode; 1363 if (retcode != NO_ERROR) 1364 goto finish; 1365 1366 device = adm_ctx.device; 1367 mutex_lock(&adm_ctx.resource->adm_mutex); 1368 1369 /* we also need a disk 1370 * to change the options on */ 1371 if (!get_ldev(device)) { 1372 retcode = ERR_NO_DISK; 1373 goto out; 1374 } 1375 1376 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL); 1377 if (!new_disk_conf) { 1378 retcode = ERR_NOMEM; 1379 goto fail; 1380 } 1381 1382 mutex_lock(&device->resource->conf_update); 1383 old_disk_conf = device->ldev->disk_conf; 1384 *new_disk_conf = *old_disk_conf; 1385 if (should_set_defaults(info)) 1386 set_disk_conf_defaults(new_disk_conf); 1387 1388 err = disk_conf_from_attrs_for_change(new_disk_conf, info); 1389 if (err && err != -ENOMSG) { 1390 retcode = ERR_MANDATORY_TAG; 1391 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 1392 goto fail_unlock; 1393 } 1394 1395 if (!expect(new_disk_conf->resync_rate >= 1)) 1396 new_disk_conf->resync_rate = 1; 1397 1398 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) 1399 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; 1400 if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev)) 1401 new_disk_conf->al_extents = drbd_al_extents_max(device->ldev); 1402 1403 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) 1404 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; 1405 1406 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; 1407 if (fifo_size != device->rs_plan_s->size) { 1408 new_plan = fifo_alloc(fifo_size); 1409 if (!new_plan) { 1410 drbd_err(device, "kmalloc of fifo_buffer failed"); 1411 retcode = ERR_NOMEM; 1412 goto fail_unlock; 1413 } 1414 } 1415 1416 drbd_suspend_io(device); 1417 wait_event(device->al_wait, lc_try_lock(device->act_log)); 1418 drbd_al_shrink(device); 1419 err = drbd_check_al_size(device, new_disk_conf); 1420 lc_unlock(device->act_log); 1421 wake_up(&device->al_wait); 1422 drbd_resume_io(device); 1423 1424 if (err) { 1425 retcode = ERR_NOMEM; 1426 goto fail_unlock; 1427 } 1428 1429 lock_all_resources(); 1430 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after); 1431 if (retcode == NO_ERROR) { 1432 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 1433 drbd_resync_after_changed(device); 1434 } 1435 unlock_all_resources(); 1436 1437 if (retcode != NO_ERROR) 1438 goto fail_unlock; 1439 1440 if (new_plan) { 1441 old_plan = device->rs_plan_s; 1442 rcu_assign_pointer(device->rs_plan_s, new_plan); 1443 } 1444 1445 mutex_unlock(&device->resource->conf_update); 1446 1447 if (new_disk_conf->al_updates) 1448 device->ldev->md.flags &= ~MDF_AL_DISABLED; 1449 else 1450 device->ldev->md.flags |= MDF_AL_DISABLED; 1451 1452 if (new_disk_conf->md_flushes) 1453 clear_bit(MD_NO_FUA, &device->flags); 1454 else 1455 set_bit(MD_NO_FUA, &device->flags); 1456 1457 if (write_ordering_changed(old_disk_conf, new_disk_conf)) 1458 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH); 1459 1460 drbd_md_sync(device); 1461 1462 if (device->state.conn >= C_CONNECTED) { 1463 struct drbd_peer_device *peer_device; 1464 1465 for_each_peer_device(peer_device, device) 1466 drbd_send_sync_param(peer_device); 1467 } 1468 1469 synchronize_rcu(); 1470 kfree(old_disk_conf); 1471 kfree(old_plan); 1472 mod_timer(&device->request_timer, jiffies + HZ); 1473 goto success; 1474 1475 fail_unlock: 1476 mutex_unlock(&device->resource->conf_update); 1477 fail: 1478 kfree(new_disk_conf); 1479 kfree(new_plan); 1480 success: 1481 put_ldev(device); 1482 out: 1483 mutex_unlock(&adm_ctx.resource->adm_mutex); 1484 finish: 1485 drbd_adm_finish(&adm_ctx, info, retcode); 1486 return 0; 1487 } 1488 1489 static struct block_device *open_backing_dev(struct drbd_device *device, 1490 const char *bdev_path, void *claim_ptr, bool do_bd_link) 1491 { 1492 struct block_device *bdev; 1493 int err = 0; 1494 1495 bdev = blkdev_get_by_path(bdev_path, 1496 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr); 1497 if (IS_ERR(bdev)) { 1498 drbd_err(device, "open(\"%s\") failed with %ld\n", 1499 bdev_path, PTR_ERR(bdev)); 1500 return bdev; 1501 } 1502 1503 if (!do_bd_link) 1504 return bdev; 1505 1506 err = bd_link_disk_holder(bdev, device->vdisk); 1507 if (err) { 1508 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1509 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n", 1510 bdev_path, err); 1511 bdev = ERR_PTR(err); 1512 } 1513 return bdev; 1514 } 1515 1516 static int open_backing_devices(struct drbd_device *device, 1517 struct disk_conf *new_disk_conf, 1518 struct drbd_backing_dev *nbc) 1519 { 1520 struct block_device *bdev; 1521 1522 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true); 1523 if (IS_ERR(bdev)) 1524 return ERR_OPEN_DISK; 1525 nbc->backing_bdev = bdev; 1526 1527 /* 1528 * meta_dev_idx >= 0: external fixed size, possibly multiple 1529 * drbd sharing one meta device. TODO in that case, paranoia 1530 * check that [md_bdev, meta_dev_idx] is not yet used by some 1531 * other drbd minor! (if you use drbd.conf + drbdadm, that 1532 * should check it for you already; but if you don't, or 1533 * someone fooled it, we need to double check here) 1534 */ 1535 bdev = open_backing_dev(device, new_disk_conf->meta_dev, 1536 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder, 1537 * if potentially shared with other drbd minors */ 1538 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder, 1539 /* avoid double bd_claim_by_disk() for the same (source,target) tuple, 1540 * as would happen with internal metadata. */ 1541 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT && 1542 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL)); 1543 if (IS_ERR(bdev)) 1544 return ERR_OPEN_MD_DISK; 1545 nbc->md_bdev = bdev; 1546 return NO_ERROR; 1547 } 1548 1549 static void close_backing_dev(struct drbd_device *device, struct block_device *bdev, 1550 bool do_bd_unlink) 1551 { 1552 if (!bdev) 1553 return; 1554 if (do_bd_unlink) 1555 bd_unlink_disk_holder(bdev, device->vdisk); 1556 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1557 } 1558 1559 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev) 1560 { 1561 if (ldev == NULL) 1562 return; 1563 1564 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev); 1565 close_backing_dev(device, ldev->backing_bdev, true); 1566 1567 kfree(ldev->disk_conf); 1568 kfree(ldev); 1569 } 1570 1571 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) 1572 { 1573 struct drbd_config_context adm_ctx; 1574 struct drbd_device *device; 1575 struct drbd_peer_device *peer_device; 1576 struct drbd_connection *connection; 1577 int err; 1578 enum drbd_ret_code retcode; 1579 enum determine_dev_size dd; 1580 sector_t max_possible_sectors; 1581 sector_t min_md_device_sectors; 1582 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 1583 struct disk_conf *new_disk_conf = NULL; 1584 struct lru_cache *resync_lru = NULL; 1585 struct fifo_buffer *new_plan = NULL; 1586 union drbd_state ns, os; 1587 enum drbd_state_rv rv; 1588 struct net_conf *nc; 1589 1590 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 1591 if (!adm_ctx.reply_skb) 1592 return retcode; 1593 if (retcode != NO_ERROR) 1594 goto finish; 1595 1596 device = adm_ctx.device; 1597 mutex_lock(&adm_ctx.resource->adm_mutex); 1598 peer_device = first_peer_device(device); 1599 connection = peer_device->connection; 1600 conn_reconfig_start(connection); 1601 1602 /* if you want to reconfigure, please tear down first */ 1603 if (device->state.disk > D_DISKLESS) { 1604 retcode = ERR_DISK_CONFIGURED; 1605 goto fail; 1606 } 1607 /* It may just now have detached because of IO error. Make sure 1608 * drbd_ldev_destroy is done already, we may end up here very fast, 1609 * e.g. if someone calls attach from the on-io-error handler, 1610 * to realize a "hot spare" feature (not that I'd recommend that) */ 1611 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags)); 1612 1613 /* make sure there is no leftover from previous force-detach attempts */ 1614 clear_bit(FORCE_DETACH, &device->flags); 1615 clear_bit(WAS_IO_ERROR, &device->flags); 1616 clear_bit(WAS_READ_ERROR, &device->flags); 1617 1618 /* and no leftover from previously aborted resync or verify, either */ 1619 device->rs_total = 0; 1620 device->rs_failed = 0; 1621 atomic_set(&device->rs_pending_cnt, 0); 1622 1623 /* allocation not in the IO path, drbdsetup context */ 1624 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 1625 if (!nbc) { 1626 retcode = ERR_NOMEM; 1627 goto fail; 1628 } 1629 spin_lock_init(&nbc->md.uuid_lock); 1630 1631 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 1632 if (!new_disk_conf) { 1633 retcode = ERR_NOMEM; 1634 goto fail; 1635 } 1636 nbc->disk_conf = new_disk_conf; 1637 1638 set_disk_conf_defaults(new_disk_conf); 1639 err = disk_conf_from_attrs(new_disk_conf, info); 1640 if (err) { 1641 retcode = ERR_MANDATORY_TAG; 1642 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 1643 goto fail; 1644 } 1645 1646 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) 1647 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; 1648 1649 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ); 1650 if (!new_plan) { 1651 retcode = ERR_NOMEM; 1652 goto fail; 1653 } 1654 1655 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 1656 retcode = ERR_MD_IDX_INVALID; 1657 goto fail; 1658 } 1659 1660 rcu_read_lock(); 1661 nc = rcu_dereference(connection->net_conf); 1662 if (nc) { 1663 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { 1664 rcu_read_unlock(); 1665 retcode = ERR_STONITH_AND_PROT_A; 1666 goto fail; 1667 } 1668 } 1669 rcu_read_unlock(); 1670 1671 retcode = open_backing_devices(device, new_disk_conf, nbc); 1672 if (retcode != NO_ERROR) 1673 goto fail; 1674 1675 if ((nbc->backing_bdev == nbc->md_bdev) != 1676 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 1677 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 1678 retcode = ERR_MD_IDX_INVALID; 1679 goto fail; 1680 } 1681 1682 resync_lru = lc_create("resync", drbd_bm_ext_cache, 1683 1, 61, sizeof(struct bm_extent), 1684 offsetof(struct bm_extent, lce)); 1685 if (!resync_lru) { 1686 retcode = ERR_NOMEM; 1687 goto fail; 1688 } 1689 1690 /* Read our meta data super block early. 1691 * This also sets other on-disk offsets. */ 1692 retcode = drbd_md_read(device, nbc); 1693 if (retcode != NO_ERROR) 1694 goto fail; 1695 1696 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) 1697 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; 1698 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc)) 1699 new_disk_conf->al_extents = drbd_al_extents_max(nbc); 1700 1701 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { 1702 drbd_err(device, "max capacity %llu smaller than disk size %llu\n", 1703 (unsigned long long) drbd_get_max_capacity(nbc), 1704 (unsigned long long) new_disk_conf->disk_size); 1705 retcode = ERR_DISK_TOO_SMALL; 1706 goto fail; 1707 } 1708 1709 if (new_disk_conf->meta_dev_idx < 0) { 1710 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 1711 /* at least one MB, otherwise it does not make sense */ 1712 min_md_device_sectors = (2<<10); 1713 } else { 1714 max_possible_sectors = DRBD_MAX_SECTORS; 1715 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1); 1716 } 1717 1718 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 1719 retcode = ERR_MD_DISK_TOO_SMALL; 1720 drbd_warn(device, "refusing attach: md-device too small, " 1721 "at least %llu sectors needed for this meta-disk type\n", 1722 (unsigned long long) min_md_device_sectors); 1723 goto fail; 1724 } 1725 1726 /* Make sure the new disk is big enough 1727 * (we may currently be R_PRIMARY with no local disk...) */ 1728 if (drbd_get_max_capacity(nbc) < 1729 drbd_get_capacity(device->this_bdev)) { 1730 retcode = ERR_DISK_TOO_SMALL; 1731 goto fail; 1732 } 1733 1734 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 1735 1736 if (nbc->known_size > max_possible_sectors) { 1737 drbd_warn(device, "==> truncating very big lower level device " 1738 "to currently maximum possible %llu sectors <==\n", 1739 (unsigned long long) max_possible_sectors); 1740 if (new_disk_conf->meta_dev_idx >= 0) 1741 drbd_warn(device, "==>> using internal or flexible " 1742 "meta data may help <<==\n"); 1743 } 1744 1745 drbd_suspend_io(device); 1746 /* also wait for the last barrier ack. */ 1747 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171 1748 * We need a way to either ignore barrier acks for barriers sent before a device 1749 * was attached, or a way to wait for all pending barrier acks to come in. 1750 * As barriers are counted per resource, 1751 * we'd need to suspend io on all devices of a resource. 1752 */ 1753 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); 1754 /* and for any other previously queued work */ 1755 drbd_flush_workqueue(&connection->sender_work); 1756 1757 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); 1758 retcode = rv; /* FIXME: Type mismatch. */ 1759 drbd_resume_io(device); 1760 if (rv < SS_SUCCESS) 1761 goto fail; 1762 1763 if (!get_ldev_if_state(device, D_ATTACHING)) 1764 goto force_diskless; 1765 1766 if (!device->bitmap) { 1767 if (drbd_bm_init(device)) { 1768 retcode = ERR_NOMEM; 1769 goto force_diskless_dec; 1770 } 1771 } 1772 1773 if (device->state.conn < C_CONNECTED && 1774 device->state.role == R_PRIMARY && device->ed_uuid && 1775 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1776 drbd_err(device, "Can only attach to data with current UUID=%016llX\n", 1777 (unsigned long long)device->ed_uuid); 1778 retcode = ERR_DATA_NOT_CURRENT; 1779 goto force_diskless_dec; 1780 } 1781 1782 /* Since we are diskless, fix the activity log first... */ 1783 if (drbd_check_al_size(device, new_disk_conf)) { 1784 retcode = ERR_NOMEM; 1785 goto force_diskless_dec; 1786 } 1787 1788 /* Prevent shrinking of consistent devices ! */ 1789 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1790 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) { 1791 drbd_warn(device, "refusing to truncate a consistent device\n"); 1792 retcode = ERR_DISK_TOO_SMALL; 1793 goto force_diskless_dec; 1794 } 1795 1796 lock_all_resources(); 1797 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after); 1798 if (retcode != NO_ERROR) { 1799 unlock_all_resources(); 1800 goto force_diskless_dec; 1801 } 1802 1803 /* Reset the "barriers don't work" bits here, then force meta data to 1804 * be written, to ensure we determine if barriers are supported. */ 1805 if (new_disk_conf->md_flushes) 1806 clear_bit(MD_NO_FUA, &device->flags); 1807 else 1808 set_bit(MD_NO_FUA, &device->flags); 1809 1810 /* Point of no return reached. 1811 * Devices and memory are no longer released by error cleanup below. 1812 * now device takes over responsibility, and the state engine should 1813 * clean it up somewhere. */ 1814 D_ASSERT(device, device->ldev == NULL); 1815 device->ldev = nbc; 1816 device->resync = resync_lru; 1817 device->rs_plan_s = new_plan; 1818 nbc = NULL; 1819 resync_lru = NULL; 1820 new_disk_conf = NULL; 1821 new_plan = NULL; 1822 1823 drbd_resync_after_changed(device); 1824 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH); 1825 unlock_all_resources(); 1826 1827 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) 1828 set_bit(CRASHED_PRIMARY, &device->flags); 1829 else 1830 clear_bit(CRASHED_PRIMARY, &device->flags); 1831 1832 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && 1833 !(device->state.role == R_PRIMARY && device->resource->susp_nod)) 1834 set_bit(CRASHED_PRIMARY, &device->flags); 1835 1836 device->send_cnt = 0; 1837 device->recv_cnt = 0; 1838 device->read_cnt = 0; 1839 device->writ_cnt = 0; 1840 1841 drbd_reconsider_max_bio_size(device, device->ldev); 1842 1843 /* If I am currently not R_PRIMARY, 1844 * but meta data primary indicator is set, 1845 * I just now recover from a hard crash, 1846 * and have been R_PRIMARY before that crash. 1847 * 1848 * Now, if I had no connection before that crash 1849 * (have been degraded R_PRIMARY), chances are that 1850 * I won't find my peer now either. 1851 * 1852 * In that case, and _only_ in that case, 1853 * we use the degr-wfc-timeout instead of the default, 1854 * so we can automatically recover from a crash of a 1855 * degraded but active "cluster" after a certain timeout. 1856 */ 1857 clear_bit(USE_DEGR_WFC_T, &device->flags); 1858 if (device->state.role != R_PRIMARY && 1859 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && 1860 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND)) 1861 set_bit(USE_DEGR_WFC_T, &device->flags); 1862 1863 dd = drbd_determine_dev_size(device, 0, NULL); 1864 if (dd <= DS_ERROR) { 1865 retcode = ERR_NOMEM_BITMAP; 1866 goto force_diskless_dec; 1867 } else if (dd == DS_GREW) 1868 set_bit(RESYNC_AFTER_NEG, &device->flags); 1869 1870 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) || 1871 (test_bit(CRASHED_PRIMARY, &device->flags) && 1872 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) { 1873 drbd_info(device, "Assuming that all blocks are out of sync " 1874 "(aka FullSync)\n"); 1875 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, 1876 "set_n_write from attaching", BM_LOCKED_MASK)) { 1877 retcode = ERR_IO_MD_DISK; 1878 goto force_diskless_dec; 1879 } 1880 } else { 1881 if (drbd_bitmap_io(device, &drbd_bm_read, 1882 "read from attaching", BM_LOCKED_MASK)) { 1883 retcode = ERR_IO_MD_DISK; 1884 goto force_diskless_dec; 1885 } 1886 } 1887 1888 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) 1889 drbd_suspend_al(device); /* IO is still suspended here... */ 1890 1891 spin_lock_irq(&device->resource->req_lock); 1892 os = drbd_read_state(device); 1893 ns = os; 1894 /* If MDF_CONSISTENT is not set go into inconsistent state, 1895 otherwise investigate MDF_WasUpToDate... 1896 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1897 otherwise into D_CONSISTENT state. 1898 */ 1899 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) { 1900 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE)) 1901 ns.disk = D_CONSISTENT; 1902 else 1903 ns.disk = D_OUTDATED; 1904 } else { 1905 ns.disk = D_INCONSISTENT; 1906 } 1907 1908 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED)) 1909 ns.pdsk = D_OUTDATED; 1910 1911 rcu_read_lock(); 1912 if (ns.disk == D_CONSISTENT && 1913 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE)) 1914 ns.disk = D_UP_TO_DATE; 1915 1916 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1917 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1918 this point, because drbd_request_state() modifies these 1919 flags. */ 1920 1921 if (rcu_dereference(device->ldev->disk_conf)->al_updates) 1922 device->ldev->md.flags &= ~MDF_AL_DISABLED; 1923 else 1924 device->ldev->md.flags |= MDF_AL_DISABLED; 1925 1926 rcu_read_unlock(); 1927 1928 /* In case we are C_CONNECTED postpone any decision on the new disk 1929 state after the negotiation phase. */ 1930 if (device->state.conn == C_CONNECTED) { 1931 device->new_state_tmp.i = ns.i; 1932 ns.i = os.i; 1933 ns.disk = D_NEGOTIATING; 1934 1935 /* We expect to receive up-to-date UUIDs soon. 1936 To avoid a race in receive_state, free p_uuid while 1937 holding req_lock. I.e. atomic with the state change */ 1938 kfree(device->p_uuid); 1939 device->p_uuid = NULL; 1940 } 1941 1942 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); 1943 spin_unlock_irq(&device->resource->req_lock); 1944 1945 if (rv < SS_SUCCESS) 1946 goto force_diskless_dec; 1947 1948 mod_timer(&device->request_timer, jiffies + HZ); 1949 1950 if (device->state.role == R_PRIMARY) 1951 device->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1952 else 1953 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1954 1955 drbd_md_mark_dirty(device); 1956 drbd_md_sync(device); 1957 1958 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 1959 put_ldev(device); 1960 conn_reconfig_done(connection); 1961 mutex_unlock(&adm_ctx.resource->adm_mutex); 1962 drbd_adm_finish(&adm_ctx, info, retcode); 1963 return 0; 1964 1965 force_diskless_dec: 1966 put_ldev(device); 1967 force_diskless: 1968 drbd_force_state(device, NS(disk, D_DISKLESS)); 1969 drbd_md_sync(device); 1970 fail: 1971 conn_reconfig_done(connection); 1972 if (nbc) { 1973 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev); 1974 close_backing_dev(device, nbc->backing_bdev, true); 1975 kfree(nbc); 1976 } 1977 kfree(new_disk_conf); 1978 lc_destroy(resync_lru); 1979 kfree(new_plan); 1980 mutex_unlock(&adm_ctx.resource->adm_mutex); 1981 finish: 1982 drbd_adm_finish(&adm_ctx, info, retcode); 1983 return 0; 1984 } 1985 1986 static int adm_detach(struct drbd_device *device, int force) 1987 { 1988 enum drbd_state_rv retcode; 1989 void *buffer; 1990 int ret; 1991 1992 if (force) { 1993 set_bit(FORCE_DETACH, &device->flags); 1994 drbd_force_state(device, NS(disk, D_FAILED)); 1995 retcode = SS_SUCCESS; 1996 goto out; 1997 } 1998 1999 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ 2000 buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */ 2001 if (buffer) { 2002 retcode = drbd_request_state(device, NS(disk, D_FAILED)); 2003 drbd_md_put_buffer(device); 2004 } else /* already <= D_FAILED */ 2005 retcode = SS_NOTHING_TO_DO; 2006 /* D_FAILED will transition to DISKLESS. */ 2007 drbd_resume_io(device); 2008 ret = wait_event_interruptible(device->misc_wait, 2009 device->state.disk != D_FAILED); 2010 if ((int)retcode == (int)SS_IS_DISKLESS) 2011 retcode = SS_NOTHING_TO_DO; 2012 if (ret) 2013 retcode = ERR_INTR; 2014 out: 2015 return retcode; 2016 } 2017 2018 /* Detaching the disk is a process in multiple stages. First we need to lock 2019 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io. 2020 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all 2021 * internal references as well. 2022 * Only then we have finally detached. */ 2023 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) 2024 { 2025 struct drbd_config_context adm_ctx; 2026 enum drbd_ret_code retcode; 2027 struct detach_parms parms = { }; 2028 int err; 2029 2030 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2031 if (!adm_ctx.reply_skb) 2032 return retcode; 2033 if (retcode != NO_ERROR) 2034 goto out; 2035 2036 if (info->attrs[DRBD_NLA_DETACH_PARMS]) { 2037 err = detach_parms_from_attrs(&parms, info); 2038 if (err) { 2039 retcode = ERR_MANDATORY_TAG; 2040 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2041 goto out; 2042 } 2043 } 2044 2045 mutex_lock(&adm_ctx.resource->adm_mutex); 2046 retcode = adm_detach(adm_ctx.device, parms.force_detach); 2047 mutex_unlock(&adm_ctx.resource->adm_mutex); 2048 out: 2049 drbd_adm_finish(&adm_ctx, info, retcode); 2050 return 0; 2051 } 2052 2053 static bool conn_resync_running(struct drbd_connection *connection) 2054 { 2055 struct drbd_peer_device *peer_device; 2056 bool rv = false; 2057 int vnr; 2058 2059 rcu_read_lock(); 2060 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 2061 struct drbd_device *device = peer_device->device; 2062 if (device->state.conn == C_SYNC_SOURCE || 2063 device->state.conn == C_SYNC_TARGET || 2064 device->state.conn == C_PAUSED_SYNC_S || 2065 device->state.conn == C_PAUSED_SYNC_T) { 2066 rv = true; 2067 break; 2068 } 2069 } 2070 rcu_read_unlock(); 2071 2072 return rv; 2073 } 2074 2075 static bool conn_ov_running(struct drbd_connection *connection) 2076 { 2077 struct drbd_peer_device *peer_device; 2078 bool rv = false; 2079 int vnr; 2080 2081 rcu_read_lock(); 2082 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 2083 struct drbd_device *device = peer_device->device; 2084 if (device->state.conn == C_VERIFY_S || 2085 device->state.conn == C_VERIFY_T) { 2086 rv = true; 2087 break; 2088 } 2089 } 2090 rcu_read_unlock(); 2091 2092 return rv; 2093 } 2094 2095 static enum drbd_ret_code 2096 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf) 2097 { 2098 struct drbd_peer_device *peer_device; 2099 int i; 2100 2101 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) { 2102 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol) 2103 return ERR_NEED_APV_100; 2104 2105 if (new_net_conf->two_primaries != old_net_conf->two_primaries) 2106 return ERR_NEED_APV_100; 2107 2108 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg)) 2109 return ERR_NEED_APV_100; 2110 } 2111 2112 if (!new_net_conf->two_primaries && 2113 conn_highest_role(connection) == R_PRIMARY && 2114 conn_highest_peer(connection) == R_PRIMARY) 2115 return ERR_NEED_ALLOW_TWO_PRI; 2116 2117 if (new_net_conf->two_primaries && 2118 (new_net_conf->wire_protocol != DRBD_PROT_C)) 2119 return ERR_NOT_PROTO_C; 2120 2121 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2122 struct drbd_device *device = peer_device->device; 2123 if (get_ldev(device)) { 2124 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing; 2125 put_ldev(device); 2126 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) 2127 return ERR_STONITH_AND_PROT_A; 2128 } 2129 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data) 2130 return ERR_DISCARD_IMPOSSIBLE; 2131 } 2132 2133 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A) 2134 return ERR_CONG_NOT_PROTO_A; 2135 2136 return NO_ERROR; 2137 } 2138 2139 static enum drbd_ret_code 2140 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf) 2141 { 2142 static enum drbd_ret_code rv; 2143 struct drbd_peer_device *peer_device; 2144 int i; 2145 2146 rcu_read_lock(); 2147 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf); 2148 rcu_read_unlock(); 2149 2150 /* connection->peer_devices protected by genl_lock() here */ 2151 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2152 struct drbd_device *device = peer_device->device; 2153 if (!device->bitmap) { 2154 if (drbd_bm_init(device)) 2155 return ERR_NOMEM; 2156 } 2157 } 2158 2159 return rv; 2160 } 2161 2162 struct crypto { 2163 struct crypto_hash *verify_tfm; 2164 struct crypto_hash *csums_tfm; 2165 struct crypto_hash *cram_hmac_tfm; 2166 struct crypto_hash *integrity_tfm; 2167 }; 2168 2169 static int 2170 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg) 2171 { 2172 if (!tfm_name[0]) 2173 return NO_ERROR; 2174 2175 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC); 2176 if (IS_ERR(*tfm)) { 2177 *tfm = NULL; 2178 return err_alg; 2179 } 2180 2181 return NO_ERROR; 2182 } 2183 2184 static enum drbd_ret_code 2185 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf) 2186 { 2187 char hmac_name[CRYPTO_MAX_ALG_NAME]; 2188 enum drbd_ret_code rv; 2189 2190 rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg, 2191 ERR_CSUMS_ALG); 2192 if (rv != NO_ERROR) 2193 return rv; 2194 rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg, 2195 ERR_VERIFY_ALG); 2196 if (rv != NO_ERROR) 2197 return rv; 2198 rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg, 2199 ERR_INTEGRITY_ALG); 2200 if (rv != NO_ERROR) 2201 return rv; 2202 if (new_net_conf->cram_hmac_alg[0] != 0) { 2203 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 2204 new_net_conf->cram_hmac_alg); 2205 2206 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name, 2207 ERR_AUTH_ALG); 2208 } 2209 2210 return rv; 2211 } 2212 2213 static void free_crypto(struct crypto *crypto) 2214 { 2215 crypto_free_hash(crypto->cram_hmac_tfm); 2216 crypto_free_hash(crypto->integrity_tfm); 2217 crypto_free_hash(crypto->csums_tfm); 2218 crypto_free_hash(crypto->verify_tfm); 2219 } 2220 2221 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) 2222 { 2223 struct drbd_config_context adm_ctx; 2224 enum drbd_ret_code retcode; 2225 struct drbd_connection *connection; 2226 struct net_conf *old_net_conf, *new_net_conf = NULL; 2227 int err; 2228 int ovr; /* online verify running */ 2229 int rsr; /* re-sync running */ 2230 struct crypto crypto = { }; 2231 2232 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); 2233 if (!adm_ctx.reply_skb) 2234 return retcode; 2235 if (retcode != NO_ERROR) 2236 goto finish; 2237 2238 connection = adm_ctx.connection; 2239 mutex_lock(&adm_ctx.resource->adm_mutex); 2240 2241 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 2242 if (!new_net_conf) { 2243 retcode = ERR_NOMEM; 2244 goto out; 2245 } 2246 2247 conn_reconfig_start(connection); 2248 2249 mutex_lock(&connection->data.mutex); 2250 mutex_lock(&connection->resource->conf_update); 2251 old_net_conf = connection->net_conf; 2252 2253 if (!old_net_conf) { 2254 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect"); 2255 retcode = ERR_INVALID_REQUEST; 2256 goto fail; 2257 } 2258 2259 *new_net_conf = *old_net_conf; 2260 if (should_set_defaults(info)) 2261 set_net_conf_defaults(new_net_conf); 2262 2263 err = net_conf_from_attrs_for_change(new_net_conf, info); 2264 if (err && err != -ENOMSG) { 2265 retcode = ERR_MANDATORY_TAG; 2266 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2267 goto fail; 2268 } 2269 2270 retcode = check_net_options(connection, new_net_conf); 2271 if (retcode != NO_ERROR) 2272 goto fail; 2273 2274 /* re-sync running */ 2275 rsr = conn_resync_running(connection); 2276 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) { 2277 retcode = ERR_CSUMS_RESYNC_RUNNING; 2278 goto fail; 2279 } 2280 2281 /* online verify running */ 2282 ovr = conn_ov_running(connection); 2283 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) { 2284 retcode = ERR_VERIFY_RUNNING; 2285 goto fail; 2286 } 2287 2288 retcode = alloc_crypto(&crypto, new_net_conf); 2289 if (retcode != NO_ERROR) 2290 goto fail; 2291 2292 rcu_assign_pointer(connection->net_conf, new_net_conf); 2293 2294 if (!rsr) { 2295 crypto_free_hash(connection->csums_tfm); 2296 connection->csums_tfm = crypto.csums_tfm; 2297 crypto.csums_tfm = NULL; 2298 } 2299 if (!ovr) { 2300 crypto_free_hash(connection->verify_tfm); 2301 connection->verify_tfm = crypto.verify_tfm; 2302 crypto.verify_tfm = NULL; 2303 } 2304 2305 crypto_free_hash(connection->integrity_tfm); 2306 connection->integrity_tfm = crypto.integrity_tfm; 2307 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100) 2308 /* Do this without trying to take connection->data.mutex again. */ 2309 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE); 2310 2311 crypto_free_hash(connection->cram_hmac_tfm); 2312 connection->cram_hmac_tfm = crypto.cram_hmac_tfm; 2313 2314 mutex_unlock(&connection->resource->conf_update); 2315 mutex_unlock(&connection->data.mutex); 2316 synchronize_rcu(); 2317 kfree(old_net_conf); 2318 2319 if (connection->cstate >= C_WF_REPORT_PARAMS) { 2320 struct drbd_peer_device *peer_device; 2321 int vnr; 2322 2323 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 2324 drbd_send_sync_param(peer_device); 2325 } 2326 2327 goto done; 2328 2329 fail: 2330 mutex_unlock(&connection->resource->conf_update); 2331 mutex_unlock(&connection->data.mutex); 2332 free_crypto(&crypto); 2333 kfree(new_net_conf); 2334 done: 2335 conn_reconfig_done(connection); 2336 out: 2337 mutex_unlock(&adm_ctx.resource->adm_mutex); 2338 finish: 2339 drbd_adm_finish(&adm_ctx, info, retcode); 2340 return 0; 2341 } 2342 2343 static void connection_to_info(struct connection_info *info, 2344 struct drbd_connection *connection) 2345 { 2346 info->conn_connection_state = connection->cstate; 2347 info->conn_role = conn_highest_peer(connection); 2348 } 2349 2350 static void peer_device_to_info(struct peer_device_info *info, 2351 struct drbd_peer_device *peer_device) 2352 { 2353 struct drbd_device *device = peer_device->device; 2354 2355 info->peer_repl_state = 2356 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn); 2357 info->peer_disk_state = device->state.pdsk; 2358 info->peer_resync_susp_user = device->state.user_isp; 2359 info->peer_resync_susp_peer = device->state.peer_isp; 2360 info->peer_resync_susp_dependency = device->state.aftr_isp; 2361 } 2362 2363 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) 2364 { 2365 struct connection_info connection_info; 2366 enum drbd_notification_type flags; 2367 unsigned int peer_devices = 0; 2368 struct drbd_config_context adm_ctx; 2369 struct drbd_peer_device *peer_device; 2370 struct net_conf *old_net_conf, *new_net_conf = NULL; 2371 struct crypto crypto = { }; 2372 struct drbd_resource *resource; 2373 struct drbd_connection *connection; 2374 enum drbd_ret_code retcode; 2375 int i; 2376 int err; 2377 2378 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 2379 2380 if (!adm_ctx.reply_skb) 2381 return retcode; 2382 if (retcode != NO_ERROR) 2383 goto out; 2384 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) { 2385 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing"); 2386 retcode = ERR_INVALID_REQUEST; 2387 goto out; 2388 } 2389 2390 /* No need for _rcu here. All reconfiguration is 2391 * strictly serialized on genl_lock(). We are protected against 2392 * concurrent reconfiguration/addition/deletion */ 2393 for_each_resource(resource, &drbd_resources) { 2394 for_each_connection(connection, resource) { 2395 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len && 2396 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr, 2397 connection->my_addr_len)) { 2398 retcode = ERR_LOCAL_ADDR; 2399 goto out; 2400 } 2401 2402 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len && 2403 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr, 2404 connection->peer_addr_len)) { 2405 retcode = ERR_PEER_ADDR; 2406 goto out; 2407 } 2408 } 2409 } 2410 2411 mutex_lock(&adm_ctx.resource->adm_mutex); 2412 connection = first_connection(adm_ctx.resource); 2413 conn_reconfig_start(connection); 2414 2415 if (connection->cstate > C_STANDALONE) { 2416 retcode = ERR_NET_CONFIGURED; 2417 goto fail; 2418 } 2419 2420 /* allocation not in the IO path, drbdsetup / netlink process context */ 2421 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL); 2422 if (!new_net_conf) { 2423 retcode = ERR_NOMEM; 2424 goto fail; 2425 } 2426 2427 set_net_conf_defaults(new_net_conf); 2428 2429 err = net_conf_from_attrs(new_net_conf, info); 2430 if (err && err != -ENOMSG) { 2431 retcode = ERR_MANDATORY_TAG; 2432 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2433 goto fail; 2434 } 2435 2436 retcode = check_net_options(connection, new_net_conf); 2437 if (retcode != NO_ERROR) 2438 goto fail; 2439 2440 retcode = alloc_crypto(&crypto, new_net_conf); 2441 if (retcode != NO_ERROR) 2442 goto fail; 2443 2444 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 2445 2446 drbd_flush_workqueue(&connection->sender_work); 2447 2448 mutex_lock(&adm_ctx.resource->conf_update); 2449 old_net_conf = connection->net_conf; 2450 if (old_net_conf) { 2451 retcode = ERR_NET_CONFIGURED; 2452 mutex_unlock(&adm_ctx.resource->conf_update); 2453 goto fail; 2454 } 2455 rcu_assign_pointer(connection->net_conf, new_net_conf); 2456 2457 conn_free_crypto(connection); 2458 connection->cram_hmac_tfm = crypto.cram_hmac_tfm; 2459 connection->integrity_tfm = crypto.integrity_tfm; 2460 connection->csums_tfm = crypto.csums_tfm; 2461 connection->verify_tfm = crypto.verify_tfm; 2462 2463 connection->my_addr_len = nla_len(adm_ctx.my_addr); 2464 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len); 2465 connection->peer_addr_len = nla_len(adm_ctx.peer_addr); 2466 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len); 2467 2468 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2469 peer_devices++; 2470 } 2471 2472 connection_to_info(&connection_info, connection); 2473 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0; 2474 mutex_lock(¬ification_mutex); 2475 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags); 2476 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2477 struct peer_device_info peer_device_info; 2478 2479 peer_device_to_info(&peer_device_info, peer_device); 2480 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0; 2481 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags); 2482 } 2483 mutex_unlock(¬ification_mutex); 2484 mutex_unlock(&adm_ctx.resource->conf_update); 2485 2486 rcu_read_lock(); 2487 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2488 struct drbd_device *device = peer_device->device; 2489 device->send_cnt = 0; 2490 device->recv_cnt = 0; 2491 } 2492 rcu_read_unlock(); 2493 2494 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 2495 2496 conn_reconfig_done(connection); 2497 mutex_unlock(&adm_ctx.resource->adm_mutex); 2498 drbd_adm_finish(&adm_ctx, info, retcode); 2499 return 0; 2500 2501 fail: 2502 free_crypto(&crypto); 2503 kfree(new_net_conf); 2504 2505 conn_reconfig_done(connection); 2506 mutex_unlock(&adm_ctx.resource->adm_mutex); 2507 out: 2508 drbd_adm_finish(&adm_ctx, info, retcode); 2509 return 0; 2510 } 2511 2512 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force) 2513 { 2514 enum drbd_state_rv rv; 2515 2516 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), 2517 force ? CS_HARD : 0); 2518 2519 switch (rv) { 2520 case SS_NOTHING_TO_DO: 2521 break; 2522 case SS_ALREADY_STANDALONE: 2523 return SS_SUCCESS; 2524 case SS_PRIMARY_NOP: 2525 /* Our state checking code wants to see the peer outdated. */ 2526 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); 2527 2528 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */ 2529 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE); 2530 2531 break; 2532 case SS_CW_FAILED_BY_PEER: 2533 /* The peer probably wants to see us outdated. */ 2534 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, 2535 disk, D_OUTDATED), 0); 2536 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) { 2537 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), 2538 CS_HARD); 2539 } 2540 break; 2541 default:; 2542 /* no special handling necessary */ 2543 } 2544 2545 if (rv >= SS_SUCCESS) { 2546 enum drbd_state_rv rv2; 2547 /* No one else can reconfigure the network while I am here. 2548 * The state handling only uses drbd_thread_stop_nowait(), 2549 * we want to really wait here until the receiver is no more. 2550 */ 2551 drbd_thread_stop(&connection->receiver); 2552 2553 /* Race breaker. This additional state change request may be 2554 * necessary, if this was a forced disconnect during a receiver 2555 * restart. We may have "killed" the receiver thread just 2556 * after drbd_receiver() returned. Typically, we should be 2557 * C_STANDALONE already, now, and this becomes a no-op. 2558 */ 2559 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE), 2560 CS_VERBOSE | CS_HARD); 2561 if (rv2 < SS_SUCCESS) 2562 drbd_err(connection, 2563 "unexpected rv2=%d in conn_try_disconnect()\n", 2564 rv2); 2565 /* Unlike in DRBD 9, the state engine has generated 2566 * NOTIFY_DESTROY events before clearing connection->net_conf. */ 2567 } 2568 return rv; 2569 } 2570 2571 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) 2572 { 2573 struct drbd_config_context adm_ctx; 2574 struct disconnect_parms parms; 2575 struct drbd_connection *connection; 2576 enum drbd_state_rv rv; 2577 enum drbd_ret_code retcode; 2578 int err; 2579 2580 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); 2581 if (!adm_ctx.reply_skb) 2582 return retcode; 2583 if (retcode != NO_ERROR) 2584 goto fail; 2585 2586 connection = adm_ctx.connection; 2587 memset(&parms, 0, sizeof(parms)); 2588 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { 2589 err = disconnect_parms_from_attrs(&parms, info); 2590 if (err) { 2591 retcode = ERR_MANDATORY_TAG; 2592 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2593 goto fail; 2594 } 2595 } 2596 2597 mutex_lock(&adm_ctx.resource->adm_mutex); 2598 rv = conn_try_disconnect(connection, parms.force_disconnect); 2599 if (rv < SS_SUCCESS) 2600 retcode = rv; /* FIXME: Type mismatch. */ 2601 else 2602 retcode = NO_ERROR; 2603 mutex_unlock(&adm_ctx.resource->adm_mutex); 2604 fail: 2605 drbd_adm_finish(&adm_ctx, info, retcode); 2606 return 0; 2607 } 2608 2609 void resync_after_online_grow(struct drbd_device *device) 2610 { 2611 int iass; /* I am sync source */ 2612 2613 drbd_info(device, "Resync of new storage after online grow\n"); 2614 if (device->state.role != device->state.peer) 2615 iass = (device->state.role == R_PRIMARY); 2616 else 2617 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags); 2618 2619 if (iass) 2620 drbd_start_resync(device, C_SYNC_SOURCE); 2621 else 2622 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 2623 } 2624 2625 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) 2626 { 2627 struct drbd_config_context adm_ctx; 2628 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 2629 struct resize_parms rs; 2630 struct drbd_device *device; 2631 enum drbd_ret_code retcode; 2632 enum determine_dev_size dd; 2633 bool change_al_layout = false; 2634 enum dds_flags ddsf; 2635 sector_t u_size; 2636 int err; 2637 2638 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2639 if (!adm_ctx.reply_skb) 2640 return retcode; 2641 if (retcode != NO_ERROR) 2642 goto finish; 2643 2644 mutex_lock(&adm_ctx.resource->adm_mutex); 2645 device = adm_ctx.device; 2646 if (!get_ldev(device)) { 2647 retcode = ERR_NO_DISK; 2648 goto fail; 2649 } 2650 2651 memset(&rs, 0, sizeof(struct resize_parms)); 2652 rs.al_stripes = device->ldev->md.al_stripes; 2653 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4; 2654 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { 2655 err = resize_parms_from_attrs(&rs, info); 2656 if (err) { 2657 retcode = ERR_MANDATORY_TAG; 2658 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2659 goto fail_ldev; 2660 } 2661 } 2662 2663 if (device->state.conn > C_CONNECTED) { 2664 retcode = ERR_RESIZE_RESYNC; 2665 goto fail_ldev; 2666 } 2667 2668 if (device->state.role == R_SECONDARY && 2669 device->state.peer == R_SECONDARY) { 2670 retcode = ERR_NO_PRIMARY; 2671 goto fail_ldev; 2672 } 2673 2674 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) { 2675 retcode = ERR_NEED_APV_93; 2676 goto fail_ldev; 2677 } 2678 2679 rcu_read_lock(); 2680 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; 2681 rcu_read_unlock(); 2682 if (u_size != (sector_t)rs.resize_size) { 2683 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL); 2684 if (!new_disk_conf) { 2685 retcode = ERR_NOMEM; 2686 goto fail_ldev; 2687 } 2688 } 2689 2690 if (device->ldev->md.al_stripes != rs.al_stripes || 2691 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) { 2692 u32 al_size_k = rs.al_stripes * rs.al_stripe_size; 2693 2694 if (al_size_k > (16 * 1024 * 1024)) { 2695 retcode = ERR_MD_LAYOUT_TOO_BIG; 2696 goto fail_ldev; 2697 } 2698 2699 if (al_size_k < MD_32kB_SECT/2) { 2700 retcode = ERR_MD_LAYOUT_TOO_SMALL; 2701 goto fail_ldev; 2702 } 2703 2704 if (device->state.conn != C_CONNECTED && !rs.resize_force) { 2705 retcode = ERR_MD_LAYOUT_CONNECTED; 2706 goto fail_ldev; 2707 } 2708 2709 change_al_layout = true; 2710 } 2711 2712 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) 2713 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 2714 2715 if (new_disk_conf) { 2716 mutex_lock(&device->resource->conf_update); 2717 old_disk_conf = device->ldev->disk_conf; 2718 *new_disk_conf = *old_disk_conf; 2719 new_disk_conf->disk_size = (sector_t)rs.resize_size; 2720 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 2721 mutex_unlock(&device->resource->conf_update); 2722 synchronize_rcu(); 2723 kfree(old_disk_conf); 2724 new_disk_conf = NULL; 2725 } 2726 2727 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 2728 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL); 2729 drbd_md_sync(device); 2730 put_ldev(device); 2731 if (dd == DS_ERROR) { 2732 retcode = ERR_NOMEM_BITMAP; 2733 goto fail; 2734 } else if (dd == DS_ERROR_SPACE_MD) { 2735 retcode = ERR_MD_LAYOUT_NO_FIT; 2736 goto fail; 2737 } else if (dd == DS_ERROR_SHRINK) { 2738 retcode = ERR_IMPLICIT_SHRINK; 2739 goto fail; 2740 } 2741 2742 if (device->state.conn == C_CONNECTED) { 2743 if (dd == DS_GREW) 2744 set_bit(RESIZE_PENDING, &device->flags); 2745 2746 drbd_send_uuids(first_peer_device(device)); 2747 drbd_send_sizes(first_peer_device(device), 1, ddsf); 2748 } 2749 2750 fail: 2751 mutex_unlock(&adm_ctx.resource->adm_mutex); 2752 finish: 2753 drbd_adm_finish(&adm_ctx, info, retcode); 2754 return 0; 2755 2756 fail_ldev: 2757 put_ldev(device); 2758 kfree(new_disk_conf); 2759 goto fail; 2760 } 2761 2762 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) 2763 { 2764 struct drbd_config_context adm_ctx; 2765 enum drbd_ret_code retcode; 2766 struct res_opts res_opts; 2767 int err; 2768 2769 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 2770 if (!adm_ctx.reply_skb) 2771 return retcode; 2772 if (retcode != NO_ERROR) 2773 goto fail; 2774 2775 res_opts = adm_ctx.resource->res_opts; 2776 if (should_set_defaults(info)) 2777 set_res_opts_defaults(&res_opts); 2778 2779 err = res_opts_from_attrs(&res_opts, info); 2780 if (err && err != -ENOMSG) { 2781 retcode = ERR_MANDATORY_TAG; 2782 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2783 goto fail; 2784 } 2785 2786 mutex_lock(&adm_ctx.resource->adm_mutex); 2787 err = set_resource_options(adm_ctx.resource, &res_opts); 2788 if (err) { 2789 retcode = ERR_INVALID_REQUEST; 2790 if (err == -ENOMEM) 2791 retcode = ERR_NOMEM; 2792 } 2793 mutex_unlock(&adm_ctx.resource->adm_mutex); 2794 2795 fail: 2796 drbd_adm_finish(&adm_ctx, info, retcode); 2797 return 0; 2798 } 2799 2800 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) 2801 { 2802 struct drbd_config_context adm_ctx; 2803 struct drbd_device *device; 2804 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 2805 2806 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2807 if (!adm_ctx.reply_skb) 2808 return retcode; 2809 if (retcode != NO_ERROR) 2810 goto out; 2811 2812 device = adm_ctx.device; 2813 if (!get_ldev(device)) { 2814 retcode = ERR_NO_DISK; 2815 goto out; 2816 } 2817 2818 mutex_lock(&adm_ctx.resource->adm_mutex); 2819 2820 /* If there is still bitmap IO pending, probably because of a previous 2821 * resync just being finished, wait for it before requesting a new resync. 2822 * Also wait for it's after_state_ch(). */ 2823 drbd_suspend_io(device); 2824 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); 2825 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); 2826 2827 /* If we happen to be C_STANDALONE R_SECONDARY, just change to 2828 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise, 2829 * try to start a resync handshake as sync target for full sync. 2830 */ 2831 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) { 2832 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT)); 2833 if (retcode >= SS_SUCCESS) { 2834 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, 2835 "set_n_write from invalidate", BM_LOCKED_MASK)) 2836 retcode = ERR_IO_MD_DISK; 2837 } 2838 } else 2839 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); 2840 drbd_resume_io(device); 2841 mutex_unlock(&adm_ctx.resource->adm_mutex); 2842 put_ldev(device); 2843 out: 2844 drbd_adm_finish(&adm_ctx, info, retcode); 2845 return 0; 2846 } 2847 2848 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, 2849 union drbd_state mask, union drbd_state val) 2850 { 2851 struct drbd_config_context adm_ctx; 2852 enum drbd_ret_code retcode; 2853 2854 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2855 if (!adm_ctx.reply_skb) 2856 return retcode; 2857 if (retcode != NO_ERROR) 2858 goto out; 2859 2860 mutex_lock(&adm_ctx.resource->adm_mutex); 2861 retcode = drbd_request_state(adm_ctx.device, mask, val); 2862 mutex_unlock(&adm_ctx.resource->adm_mutex); 2863 out: 2864 drbd_adm_finish(&adm_ctx, info, retcode); 2865 return 0; 2866 } 2867 2868 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local) 2869 { 2870 int rv; 2871 2872 rv = drbd_bmio_set_n_write(device); 2873 drbd_suspend_al(device); 2874 return rv; 2875 } 2876 2877 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) 2878 { 2879 struct drbd_config_context adm_ctx; 2880 int retcode; /* drbd_ret_code, drbd_state_rv */ 2881 struct drbd_device *device; 2882 2883 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2884 if (!adm_ctx.reply_skb) 2885 return retcode; 2886 if (retcode != NO_ERROR) 2887 goto out; 2888 2889 device = adm_ctx.device; 2890 if (!get_ldev(device)) { 2891 retcode = ERR_NO_DISK; 2892 goto out; 2893 } 2894 2895 mutex_lock(&adm_ctx.resource->adm_mutex); 2896 2897 /* If there is still bitmap IO pending, probably because of a previous 2898 * resync just being finished, wait for it before requesting a new resync. 2899 * Also wait for it's after_state_ch(). */ 2900 drbd_suspend_io(device); 2901 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); 2902 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); 2903 2904 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits 2905 * in the bitmap. Otherwise, try to start a resync handshake 2906 * as sync source for full sync. 2907 */ 2908 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) { 2909 /* The peer will get a resync upon connect anyways. Just make that 2910 into a full resync. */ 2911 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT)); 2912 if (retcode >= SS_SUCCESS) { 2913 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al, 2914 "set_n_write from invalidate_peer", 2915 BM_LOCKED_SET_ALLOWED)) 2916 retcode = ERR_IO_MD_DISK; 2917 } 2918 } else 2919 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); 2920 drbd_resume_io(device); 2921 mutex_unlock(&adm_ctx.resource->adm_mutex); 2922 put_ldev(device); 2923 out: 2924 drbd_adm_finish(&adm_ctx, info, retcode); 2925 return 0; 2926 } 2927 2928 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) 2929 { 2930 struct drbd_config_context adm_ctx; 2931 enum drbd_ret_code retcode; 2932 2933 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2934 if (!adm_ctx.reply_skb) 2935 return retcode; 2936 if (retcode != NO_ERROR) 2937 goto out; 2938 2939 mutex_lock(&adm_ctx.resource->adm_mutex); 2940 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 2941 retcode = ERR_PAUSE_IS_SET; 2942 mutex_unlock(&adm_ctx.resource->adm_mutex); 2943 out: 2944 drbd_adm_finish(&adm_ctx, info, retcode); 2945 return 0; 2946 } 2947 2948 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) 2949 { 2950 struct drbd_config_context adm_ctx; 2951 union drbd_dev_state s; 2952 enum drbd_ret_code retcode; 2953 2954 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2955 if (!adm_ctx.reply_skb) 2956 return retcode; 2957 if (retcode != NO_ERROR) 2958 goto out; 2959 2960 mutex_lock(&adm_ctx.resource->adm_mutex); 2961 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { 2962 s = adm_ctx.device->state; 2963 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { 2964 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : 2965 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; 2966 } else { 2967 retcode = ERR_PAUSE_IS_CLEAR; 2968 } 2969 } 2970 mutex_unlock(&adm_ctx.resource->adm_mutex); 2971 out: 2972 drbd_adm_finish(&adm_ctx, info, retcode); 2973 return 0; 2974 } 2975 2976 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info) 2977 { 2978 return drbd_adm_simple_request_state(skb, info, NS(susp, 1)); 2979 } 2980 2981 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) 2982 { 2983 struct drbd_config_context adm_ctx; 2984 struct drbd_device *device; 2985 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 2986 2987 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2988 if (!adm_ctx.reply_skb) 2989 return retcode; 2990 if (retcode != NO_ERROR) 2991 goto out; 2992 2993 mutex_lock(&adm_ctx.resource->adm_mutex); 2994 device = adm_ctx.device; 2995 if (test_bit(NEW_CUR_UUID, &device->flags)) { 2996 if (get_ldev_if_state(device, D_ATTACHING)) { 2997 drbd_uuid_new_current(device); 2998 put_ldev(device); 2999 } else { 3000 /* This is effectively a multi-stage "forced down". 3001 * The NEW_CUR_UUID bit is supposedly only set, if we 3002 * lost the replication connection, and are configured 3003 * to freeze IO and wait for some fence-peer handler. 3004 * So we still don't have a replication connection. 3005 * And now we don't have a local disk either. After 3006 * resume, we will fail all pending and new IO, because 3007 * we don't have any data anymore. Which means we will 3008 * eventually be able to terminate all users of this 3009 * device, and then take it down. By bumping the 3010 * "effective" data uuid, we make sure that you really 3011 * need to tear down before you reconfigure, we will 3012 * the refuse to re-connect or re-attach (because no 3013 * matching real data uuid exists). 3014 */ 3015 u64 val; 3016 get_random_bytes(&val, sizeof(u64)); 3017 drbd_set_ed_uuid(device, val); 3018 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n"); 3019 } 3020 clear_bit(NEW_CUR_UUID, &device->flags); 3021 } 3022 drbd_suspend_io(device); 3023 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 3024 if (retcode == SS_SUCCESS) { 3025 if (device->state.conn < C_CONNECTED) 3026 tl_clear(first_peer_device(device)->connection); 3027 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED) 3028 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); 3029 } 3030 drbd_resume_io(device); 3031 mutex_unlock(&adm_ctx.resource->adm_mutex); 3032 out: 3033 drbd_adm_finish(&adm_ctx, info, retcode); 3034 return 0; 3035 } 3036 3037 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info) 3038 { 3039 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED)); 3040 } 3041 3042 static int nla_put_drbd_cfg_context(struct sk_buff *skb, 3043 struct drbd_resource *resource, 3044 struct drbd_connection *connection, 3045 struct drbd_device *device) 3046 { 3047 struct nlattr *nla; 3048 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); 3049 if (!nla) 3050 goto nla_put_failure; 3051 if (device && 3052 nla_put_u32(skb, T_ctx_volume, device->vnr)) 3053 goto nla_put_failure; 3054 if (nla_put_string(skb, T_ctx_resource_name, resource->name)) 3055 goto nla_put_failure; 3056 if (connection) { 3057 if (connection->my_addr_len && 3058 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr)) 3059 goto nla_put_failure; 3060 if (connection->peer_addr_len && 3061 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr)) 3062 goto nla_put_failure; 3063 } 3064 nla_nest_end(skb, nla); 3065 return 0; 3066 3067 nla_put_failure: 3068 if (nla) 3069 nla_nest_cancel(skb, nla); 3070 return -EMSGSIZE; 3071 } 3072 3073 /* 3074 * The generic netlink dump callbacks are called outside the genl_lock(), so 3075 * they cannot use the simple attribute parsing code which uses global 3076 * attribute tables. 3077 */ 3078 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr) 3079 { 3080 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; 3081 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1; 3082 struct nlattr *nla; 3083 3084 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), 3085 DRBD_NLA_CFG_CONTEXT); 3086 if (!nla) 3087 return NULL; 3088 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr)); 3089 } 3090 3091 static void resource_to_info(struct resource_info *, struct drbd_resource *); 3092 3093 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb) 3094 { 3095 struct drbd_genlmsghdr *dh; 3096 struct drbd_resource *resource; 3097 struct resource_info resource_info; 3098 struct resource_statistics resource_statistics; 3099 int err; 3100 3101 rcu_read_lock(); 3102 if (cb->args[0]) { 3103 for_each_resource_rcu(resource, &drbd_resources) 3104 if (resource == (struct drbd_resource *)cb->args[0]) 3105 goto found_resource; 3106 err = 0; /* resource was probably deleted */ 3107 goto out; 3108 } 3109 resource = list_entry(&drbd_resources, 3110 struct drbd_resource, resources); 3111 3112 found_resource: 3113 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) { 3114 goto put_result; 3115 } 3116 err = 0; 3117 goto out; 3118 3119 put_result: 3120 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3121 cb->nlh->nlmsg_seq, &drbd_genl_family, 3122 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES); 3123 err = -ENOMEM; 3124 if (!dh) 3125 goto out; 3126 dh->minor = -1U; 3127 dh->ret_code = NO_ERROR; 3128 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL); 3129 if (err) 3130 goto out; 3131 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN)); 3132 if (err) 3133 goto out; 3134 resource_to_info(&resource_info, resource); 3135 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN)); 3136 if (err) 3137 goto out; 3138 resource_statistics.res_stat_write_ordering = resource->write_ordering; 3139 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN)); 3140 if (err) 3141 goto out; 3142 cb->args[0] = (long)resource; 3143 genlmsg_end(skb, dh); 3144 err = 0; 3145 3146 out: 3147 rcu_read_unlock(); 3148 if (err) 3149 return err; 3150 return skb->len; 3151 } 3152 3153 static void device_to_statistics(struct device_statistics *s, 3154 struct drbd_device *device) 3155 { 3156 memset(s, 0, sizeof(*s)); 3157 s->dev_upper_blocked = !may_inc_ap_bio(device); 3158 if (get_ldev(device)) { 3159 struct drbd_md *md = &device->ldev->md; 3160 u64 *history_uuids = (u64 *)s->history_uuids; 3161 struct request_queue *q; 3162 int n; 3163 3164 spin_lock_irq(&md->uuid_lock); 3165 s->dev_current_uuid = md->uuid[UI_CURRENT]; 3166 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1); 3167 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++) 3168 history_uuids[n] = md->uuid[UI_HISTORY_START + n]; 3169 for (; n < HISTORY_UUIDS; n++) 3170 history_uuids[n] = 0; 3171 s->history_uuids_len = HISTORY_UUIDS; 3172 spin_unlock_irq(&md->uuid_lock); 3173 3174 s->dev_disk_flags = md->flags; 3175 q = bdev_get_queue(device->ldev->backing_bdev); 3176 s->dev_lower_blocked = 3177 bdi_congested(&q->backing_dev_info, 3178 (1 << WB_async_congested) | 3179 (1 << WB_sync_congested)); 3180 put_ldev(device); 3181 } 3182 s->dev_size = drbd_get_capacity(device->this_bdev); 3183 s->dev_read = device->read_cnt; 3184 s->dev_write = device->writ_cnt; 3185 s->dev_al_writes = device->al_writ_cnt; 3186 s->dev_bm_writes = device->bm_writ_cnt; 3187 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt); 3188 s->dev_lower_pending = atomic_read(&device->local_cnt); 3189 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags); 3190 s->dev_exposed_data_uuid = device->ed_uuid; 3191 } 3192 3193 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr) 3194 { 3195 if (cb->args[0]) { 3196 struct drbd_resource *resource = 3197 (struct drbd_resource *)cb->args[0]; 3198 kref_put(&resource->kref, drbd_destroy_resource); 3199 } 3200 3201 return 0; 3202 } 3203 3204 int drbd_adm_dump_devices_done(struct netlink_callback *cb) { 3205 return put_resource_in_arg0(cb, 7); 3206 } 3207 3208 static void device_to_info(struct device_info *, struct drbd_device *); 3209 3210 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) 3211 { 3212 struct nlattr *resource_filter; 3213 struct drbd_resource *resource; 3214 struct drbd_device *uninitialized_var(device); 3215 int minor, err, retcode; 3216 struct drbd_genlmsghdr *dh; 3217 struct device_info device_info; 3218 struct device_statistics device_statistics; 3219 struct idr *idr_to_search; 3220 3221 resource = (struct drbd_resource *)cb->args[0]; 3222 if (!cb->args[0] && !cb->args[1]) { 3223 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3224 if (resource_filter) { 3225 retcode = ERR_RES_NOT_KNOWN; 3226 resource = drbd_find_resource(nla_data(resource_filter)); 3227 if (!resource) 3228 goto put_result; 3229 cb->args[0] = (long)resource; 3230 } 3231 } 3232 3233 rcu_read_lock(); 3234 minor = cb->args[1]; 3235 idr_to_search = resource ? &resource->devices : &drbd_devices; 3236 device = idr_get_next(idr_to_search, &minor); 3237 if (!device) { 3238 err = 0; 3239 goto out; 3240 } 3241 idr_for_each_entry_continue(idr_to_search, device, minor) { 3242 retcode = NO_ERROR; 3243 goto put_result; /* only one iteration */ 3244 } 3245 err = 0; 3246 goto out; /* no more devices */ 3247 3248 put_result: 3249 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3250 cb->nlh->nlmsg_seq, &drbd_genl_family, 3251 NLM_F_MULTI, DRBD_ADM_GET_DEVICES); 3252 err = -ENOMEM; 3253 if (!dh) 3254 goto out; 3255 dh->ret_code = retcode; 3256 dh->minor = -1U; 3257 if (retcode == NO_ERROR) { 3258 dh->minor = device->minor; 3259 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device); 3260 if (err) 3261 goto out; 3262 if (get_ldev(device)) { 3263 struct disk_conf *disk_conf = 3264 rcu_dereference(device->ldev->disk_conf); 3265 3266 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN)); 3267 put_ldev(device); 3268 if (err) 3269 goto out; 3270 } 3271 device_to_info(&device_info, device); 3272 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN)); 3273 if (err) 3274 goto out; 3275 3276 device_to_statistics(&device_statistics, device); 3277 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN)); 3278 if (err) 3279 goto out; 3280 cb->args[1] = minor + 1; 3281 } 3282 genlmsg_end(skb, dh); 3283 err = 0; 3284 3285 out: 3286 rcu_read_unlock(); 3287 if (err) 3288 return err; 3289 return skb->len; 3290 } 3291 3292 int drbd_adm_dump_connections_done(struct netlink_callback *cb) 3293 { 3294 return put_resource_in_arg0(cb, 6); 3295 } 3296 3297 enum { SINGLE_RESOURCE, ITERATE_RESOURCES }; 3298 3299 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb) 3300 { 3301 struct nlattr *resource_filter; 3302 struct drbd_resource *resource = NULL, *next_resource; 3303 struct drbd_connection *uninitialized_var(connection); 3304 int err = 0, retcode; 3305 struct drbd_genlmsghdr *dh; 3306 struct connection_info connection_info; 3307 struct connection_statistics connection_statistics; 3308 3309 rcu_read_lock(); 3310 resource = (struct drbd_resource *)cb->args[0]; 3311 if (!cb->args[0]) { 3312 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3313 if (resource_filter) { 3314 retcode = ERR_RES_NOT_KNOWN; 3315 resource = drbd_find_resource(nla_data(resource_filter)); 3316 if (!resource) 3317 goto put_result; 3318 cb->args[0] = (long)resource; 3319 cb->args[1] = SINGLE_RESOURCE; 3320 } 3321 } 3322 if (!resource) { 3323 if (list_empty(&drbd_resources)) 3324 goto out; 3325 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources); 3326 kref_get(&resource->kref); 3327 cb->args[0] = (long)resource; 3328 cb->args[1] = ITERATE_RESOURCES; 3329 } 3330 3331 next_resource: 3332 rcu_read_unlock(); 3333 mutex_lock(&resource->conf_update); 3334 rcu_read_lock(); 3335 if (cb->args[2]) { 3336 for_each_connection_rcu(connection, resource) 3337 if (connection == (struct drbd_connection *)cb->args[2]) 3338 goto found_connection; 3339 /* connection was probably deleted */ 3340 goto no_more_connections; 3341 } 3342 connection = list_entry(&resource->connections, struct drbd_connection, connections); 3343 3344 found_connection: 3345 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) { 3346 if (!has_net_conf(connection)) 3347 continue; 3348 retcode = NO_ERROR; 3349 goto put_result; /* only one iteration */ 3350 } 3351 3352 no_more_connections: 3353 if (cb->args[1] == ITERATE_RESOURCES) { 3354 for_each_resource_rcu(next_resource, &drbd_resources) { 3355 if (next_resource == resource) 3356 goto found_resource; 3357 } 3358 /* resource was probably deleted */ 3359 } 3360 goto out; 3361 3362 found_resource: 3363 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) { 3364 mutex_unlock(&resource->conf_update); 3365 kref_put(&resource->kref, drbd_destroy_resource); 3366 resource = next_resource; 3367 kref_get(&resource->kref); 3368 cb->args[0] = (long)resource; 3369 cb->args[2] = 0; 3370 goto next_resource; 3371 } 3372 goto out; /* no more resources */ 3373 3374 put_result: 3375 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3376 cb->nlh->nlmsg_seq, &drbd_genl_family, 3377 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS); 3378 err = -ENOMEM; 3379 if (!dh) 3380 goto out; 3381 dh->ret_code = retcode; 3382 dh->minor = -1U; 3383 if (retcode == NO_ERROR) { 3384 struct net_conf *net_conf; 3385 3386 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL); 3387 if (err) 3388 goto out; 3389 net_conf = rcu_dereference(connection->net_conf); 3390 if (net_conf) { 3391 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN)); 3392 if (err) 3393 goto out; 3394 } 3395 connection_to_info(&connection_info, connection); 3396 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN)); 3397 if (err) 3398 goto out; 3399 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags); 3400 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN)); 3401 if (err) 3402 goto out; 3403 cb->args[2] = (long)connection; 3404 } 3405 genlmsg_end(skb, dh); 3406 err = 0; 3407 3408 out: 3409 rcu_read_unlock(); 3410 if (resource) 3411 mutex_unlock(&resource->conf_update); 3412 if (err) 3413 return err; 3414 return skb->len; 3415 } 3416 3417 enum mdf_peer_flag { 3418 MDF_PEER_CONNECTED = 1 << 0, 3419 MDF_PEER_OUTDATED = 1 << 1, 3420 MDF_PEER_FENCING = 1 << 2, 3421 MDF_PEER_FULL_SYNC = 1 << 3, 3422 }; 3423 3424 static void peer_device_to_statistics(struct peer_device_statistics *s, 3425 struct drbd_peer_device *peer_device) 3426 { 3427 struct drbd_device *device = peer_device->device; 3428 3429 memset(s, 0, sizeof(*s)); 3430 s->peer_dev_received = device->recv_cnt; 3431 s->peer_dev_sent = device->send_cnt; 3432 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) + 3433 atomic_read(&device->rs_pending_cnt); 3434 s->peer_dev_unacked = atomic_read(&device->unacked_cnt); 3435 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9); 3436 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9); 3437 if (get_ldev(device)) { 3438 struct drbd_md *md = &device->ldev->md; 3439 3440 spin_lock_irq(&md->uuid_lock); 3441 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP]; 3442 spin_unlock_irq(&md->uuid_lock); 3443 s->peer_dev_flags = 3444 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ? 3445 MDF_PEER_CONNECTED : 0) + 3446 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) && 3447 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ? 3448 MDF_PEER_OUTDATED : 0) + 3449 /* FIXME: MDF_PEER_FENCING? */ 3450 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ? 3451 MDF_PEER_FULL_SYNC : 0); 3452 put_ldev(device); 3453 } 3454 } 3455 3456 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb) 3457 { 3458 return put_resource_in_arg0(cb, 9); 3459 } 3460 3461 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb) 3462 { 3463 struct nlattr *resource_filter; 3464 struct drbd_resource *resource; 3465 struct drbd_device *uninitialized_var(device); 3466 struct drbd_peer_device *peer_device = NULL; 3467 int minor, err, retcode; 3468 struct drbd_genlmsghdr *dh; 3469 struct idr *idr_to_search; 3470 3471 resource = (struct drbd_resource *)cb->args[0]; 3472 if (!cb->args[0] && !cb->args[1]) { 3473 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name); 3474 if (resource_filter) { 3475 retcode = ERR_RES_NOT_KNOWN; 3476 resource = drbd_find_resource(nla_data(resource_filter)); 3477 if (!resource) 3478 goto put_result; 3479 } 3480 cb->args[0] = (long)resource; 3481 } 3482 3483 rcu_read_lock(); 3484 minor = cb->args[1]; 3485 idr_to_search = resource ? &resource->devices : &drbd_devices; 3486 device = idr_find(idr_to_search, minor); 3487 if (!device) { 3488 next_device: 3489 minor++; 3490 cb->args[2] = 0; 3491 device = idr_get_next(idr_to_search, &minor); 3492 if (!device) { 3493 err = 0; 3494 goto out; 3495 } 3496 } 3497 if (cb->args[2]) { 3498 for_each_peer_device(peer_device, device) 3499 if (peer_device == (struct drbd_peer_device *)cb->args[2]) 3500 goto found_peer_device; 3501 /* peer device was probably deleted */ 3502 goto next_device; 3503 } 3504 /* Make peer_device point to the list head (not the first entry). */ 3505 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices); 3506 3507 found_peer_device: 3508 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) { 3509 if (!has_net_conf(peer_device->connection)) 3510 continue; 3511 retcode = NO_ERROR; 3512 goto put_result; /* only one iteration */ 3513 } 3514 goto next_device; 3515 3516 put_result: 3517 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3518 cb->nlh->nlmsg_seq, &drbd_genl_family, 3519 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES); 3520 err = -ENOMEM; 3521 if (!dh) 3522 goto out; 3523 dh->ret_code = retcode; 3524 dh->minor = -1U; 3525 if (retcode == NO_ERROR) { 3526 struct peer_device_info peer_device_info; 3527 struct peer_device_statistics peer_device_statistics; 3528 3529 dh->minor = minor; 3530 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device); 3531 if (err) 3532 goto out; 3533 peer_device_to_info(&peer_device_info, peer_device); 3534 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN)); 3535 if (err) 3536 goto out; 3537 peer_device_to_statistics(&peer_device_statistics, peer_device); 3538 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN)); 3539 if (err) 3540 goto out; 3541 cb->args[1] = minor; 3542 cb->args[2] = (long)peer_device; 3543 } 3544 genlmsg_end(skb, dh); 3545 err = 0; 3546 3547 out: 3548 rcu_read_unlock(); 3549 if (err) 3550 return err; 3551 return skb->len; 3552 } 3553 /* 3554 * Return the connection of @resource if @resource has exactly one connection. 3555 */ 3556 static struct drbd_connection *the_only_connection(struct drbd_resource *resource) 3557 { 3558 struct list_head *connections = &resource->connections; 3559 3560 if (list_empty(connections) || connections->next->next != connections) 3561 return NULL; 3562 return list_first_entry(&resource->connections, struct drbd_connection, connections); 3563 } 3564 3565 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, 3566 const struct sib_info *sib) 3567 { 3568 struct drbd_resource *resource = device->resource; 3569 struct state_info *si = NULL; /* for sizeof(si->member); */ 3570 struct nlattr *nla; 3571 int got_ldev; 3572 int err = 0; 3573 int exclude_sensitive; 3574 3575 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen 3576 * to. So we better exclude_sensitive information. 3577 * 3578 * If sib == NULL, this is drbd_adm_get_status, executed synchronously 3579 * in the context of the requesting user process. Exclude sensitive 3580 * information, unless current has superuser. 3581 * 3582 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and 3583 * relies on the current implementation of netlink_dump(), which 3584 * executes the dump callback successively from netlink_recvmsg(), 3585 * always in the context of the receiving process */ 3586 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN); 3587 3588 got_ldev = get_ldev(device); 3589 3590 /* We need to add connection name and volume number information still. 3591 * Minor number is in drbd_genlmsghdr. */ 3592 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device)) 3593 goto nla_put_failure; 3594 3595 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive)) 3596 goto nla_put_failure; 3597 3598 rcu_read_lock(); 3599 if (got_ldev) { 3600 struct disk_conf *disk_conf; 3601 3602 disk_conf = rcu_dereference(device->ldev->disk_conf); 3603 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive); 3604 } 3605 if (!err) { 3606 struct net_conf *nc; 3607 3608 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 3609 if (nc) 3610 err = net_conf_to_skb(skb, nc, exclude_sensitive); 3611 } 3612 rcu_read_unlock(); 3613 if (err) 3614 goto nla_put_failure; 3615 3616 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO); 3617 if (!nla) 3618 goto nla_put_failure; 3619 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) || 3620 nla_put_u32(skb, T_current_state, device->state.i) || 3621 nla_put_u64(skb, T_ed_uuid, device->ed_uuid) || 3622 nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) || 3623 nla_put_u64(skb, T_send_cnt, device->send_cnt) || 3624 nla_put_u64(skb, T_recv_cnt, device->recv_cnt) || 3625 nla_put_u64(skb, T_read_cnt, device->read_cnt) || 3626 nla_put_u64(skb, T_writ_cnt, device->writ_cnt) || 3627 nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) || 3628 nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) || 3629 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) || 3630 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) || 3631 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt))) 3632 goto nla_put_failure; 3633 3634 if (got_ldev) { 3635 int err; 3636 3637 spin_lock_irq(&device->ldev->md.uuid_lock); 3638 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid); 3639 spin_unlock_irq(&device->ldev->md.uuid_lock); 3640 3641 if (err) 3642 goto nla_put_failure; 3643 3644 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) || 3645 nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) || 3646 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device))) 3647 goto nla_put_failure; 3648 if (C_SYNC_SOURCE <= device->state.conn && 3649 C_PAUSED_SYNC_T >= device->state.conn) { 3650 if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) || 3651 nla_put_u64(skb, T_bits_rs_failed, device->rs_failed)) 3652 goto nla_put_failure; 3653 } 3654 } 3655 3656 if (sib) { 3657 switch(sib->sib_reason) { 3658 case SIB_SYNC_PROGRESS: 3659 case SIB_GET_STATUS_REPLY: 3660 break; 3661 case SIB_STATE_CHANGE: 3662 if (nla_put_u32(skb, T_prev_state, sib->os.i) || 3663 nla_put_u32(skb, T_new_state, sib->ns.i)) 3664 goto nla_put_failure; 3665 break; 3666 case SIB_HELPER_POST: 3667 if (nla_put_u32(skb, T_helper_exit_code, 3668 sib->helper_exit_code)) 3669 goto nla_put_failure; 3670 /* fall through */ 3671 case SIB_HELPER_PRE: 3672 if (nla_put_string(skb, T_helper, sib->helper_name)) 3673 goto nla_put_failure; 3674 break; 3675 } 3676 } 3677 nla_nest_end(skb, nla); 3678 3679 if (0) 3680 nla_put_failure: 3681 err = -EMSGSIZE; 3682 if (got_ldev) 3683 put_ldev(device); 3684 return err; 3685 } 3686 3687 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) 3688 { 3689 struct drbd_config_context adm_ctx; 3690 enum drbd_ret_code retcode; 3691 int err; 3692 3693 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3694 if (!adm_ctx.reply_skb) 3695 return retcode; 3696 if (retcode != NO_ERROR) 3697 goto out; 3698 3699 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL); 3700 if (err) { 3701 nlmsg_free(adm_ctx.reply_skb); 3702 return err; 3703 } 3704 out: 3705 drbd_adm_finish(&adm_ctx, info, retcode); 3706 return 0; 3707 } 3708 3709 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) 3710 { 3711 struct drbd_device *device; 3712 struct drbd_genlmsghdr *dh; 3713 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0]; 3714 struct drbd_resource *resource = NULL; 3715 struct drbd_resource *tmp; 3716 unsigned volume = cb->args[1]; 3717 3718 /* Open coded, deferred, iteration: 3719 * for_each_resource_safe(resource, tmp, &drbd_resources) { 3720 * connection = "first connection of resource or undefined"; 3721 * idr_for_each_entry(&resource->devices, device, i) { 3722 * ... 3723 * } 3724 * } 3725 * where resource is cb->args[0]; 3726 * and i is cb->args[1]; 3727 * 3728 * cb->args[2] indicates if we shall loop over all resources, 3729 * or just dump all volumes of a single resource. 3730 * 3731 * This may miss entries inserted after this dump started, 3732 * or entries deleted before they are reached. 3733 * 3734 * We need to make sure the device won't disappear while 3735 * we are looking at it, and revalidate our iterators 3736 * on each iteration. 3737 */ 3738 3739 /* synchronize with conn_create()/drbd_destroy_connection() */ 3740 rcu_read_lock(); 3741 /* revalidate iterator position */ 3742 for_each_resource_rcu(tmp, &drbd_resources) { 3743 if (pos == NULL) { 3744 /* first iteration */ 3745 pos = tmp; 3746 resource = pos; 3747 break; 3748 } 3749 if (tmp == pos) { 3750 resource = pos; 3751 break; 3752 } 3753 } 3754 if (resource) { 3755 next_resource: 3756 device = idr_get_next(&resource->devices, &volume); 3757 if (!device) { 3758 /* No more volumes to dump on this resource. 3759 * Advance resource iterator. */ 3760 pos = list_entry_rcu(resource->resources.next, 3761 struct drbd_resource, resources); 3762 /* Did we dump any volume of this resource yet? */ 3763 if (volume != 0) { 3764 /* If we reached the end of the list, 3765 * or only a single resource dump was requested, 3766 * we are done. */ 3767 if (&pos->resources == &drbd_resources || cb->args[2]) 3768 goto out; 3769 volume = 0; 3770 resource = pos; 3771 goto next_resource; 3772 } 3773 } 3774 3775 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3776 cb->nlh->nlmsg_seq, &drbd_genl_family, 3777 NLM_F_MULTI, DRBD_ADM_GET_STATUS); 3778 if (!dh) 3779 goto out; 3780 3781 if (!device) { 3782 /* This is a connection without a single volume. 3783 * Suprisingly enough, it may have a network 3784 * configuration. */ 3785 struct drbd_connection *connection; 3786 3787 dh->minor = -1U; 3788 dh->ret_code = NO_ERROR; 3789 connection = the_only_connection(resource); 3790 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL)) 3791 goto cancel; 3792 if (connection) { 3793 struct net_conf *nc; 3794 3795 nc = rcu_dereference(connection->net_conf); 3796 if (nc && net_conf_to_skb(skb, nc, 1) != 0) 3797 goto cancel; 3798 } 3799 goto done; 3800 } 3801 3802 D_ASSERT(device, device->vnr == volume); 3803 D_ASSERT(device, device->resource == resource); 3804 3805 dh->minor = device_to_minor(device); 3806 dh->ret_code = NO_ERROR; 3807 3808 if (nla_put_status_info(skb, device, NULL)) { 3809 cancel: 3810 genlmsg_cancel(skb, dh); 3811 goto out; 3812 } 3813 done: 3814 genlmsg_end(skb, dh); 3815 } 3816 3817 out: 3818 rcu_read_unlock(); 3819 /* where to start the next iteration */ 3820 cb->args[0] = (long)pos; 3821 cb->args[1] = (pos == resource) ? volume + 1 : 0; 3822 3823 /* No more resources/volumes/minors found results in an empty skb. 3824 * Which will terminate the dump. */ 3825 return skb->len; 3826 } 3827 3828 /* 3829 * Request status of all resources, or of all volumes within a single resource. 3830 * 3831 * This is a dump, as the answer may not fit in a single reply skb otherwise. 3832 * Which means we cannot use the family->attrbuf or other such members, because 3833 * dump is NOT protected by the genl_lock(). During dump, we only have access 3834 * to the incoming skb, and need to opencode "parsing" of the nlattr payload. 3835 * 3836 * Once things are setup properly, we call into get_one_status(). 3837 */ 3838 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb) 3839 { 3840 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; 3841 struct nlattr *nla; 3842 const char *resource_name; 3843 struct drbd_resource *resource; 3844 int maxtype; 3845 3846 /* Is this a followup call? */ 3847 if (cb->args[0]) { 3848 /* ... of a single resource dump, 3849 * and the resource iterator has been advanced already? */ 3850 if (cb->args[2] && cb->args[2] != cb->args[0]) 3851 return 0; /* DONE. */ 3852 goto dump; 3853 } 3854 3855 /* First call (from netlink_dump_start). We need to figure out 3856 * which resource(s) the user wants us to dump. */ 3857 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen), 3858 nlmsg_attrlen(cb->nlh, hdrlen), 3859 DRBD_NLA_CFG_CONTEXT); 3860 3861 /* No explicit context given. Dump all. */ 3862 if (!nla) 3863 goto dump; 3864 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1; 3865 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name)); 3866 if (IS_ERR(nla)) 3867 return PTR_ERR(nla); 3868 /* context given, but no name present? */ 3869 if (!nla) 3870 return -EINVAL; 3871 resource_name = nla_data(nla); 3872 if (!*resource_name) 3873 return -ENODEV; 3874 resource = drbd_find_resource(resource_name); 3875 if (!resource) 3876 return -ENODEV; 3877 3878 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */ 3879 3880 /* prime iterators, and set "filter" mode mark: 3881 * only dump this connection. */ 3882 cb->args[0] = (long)resource; 3883 /* cb->args[1] = 0; passed in this way. */ 3884 cb->args[2] = (long)resource; 3885 3886 dump: 3887 return get_one_status(skb, cb); 3888 } 3889 3890 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) 3891 { 3892 struct drbd_config_context adm_ctx; 3893 enum drbd_ret_code retcode; 3894 struct timeout_parms tp; 3895 int err; 3896 3897 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3898 if (!adm_ctx.reply_skb) 3899 return retcode; 3900 if (retcode != NO_ERROR) 3901 goto out; 3902 3903 tp.timeout_type = 3904 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 3905 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED : 3906 UT_DEFAULT; 3907 3908 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp); 3909 if (err) { 3910 nlmsg_free(adm_ctx.reply_skb); 3911 return err; 3912 } 3913 out: 3914 drbd_adm_finish(&adm_ctx, info, retcode); 3915 return 0; 3916 } 3917 3918 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) 3919 { 3920 struct drbd_config_context adm_ctx; 3921 struct drbd_device *device; 3922 enum drbd_ret_code retcode; 3923 struct start_ov_parms parms; 3924 3925 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3926 if (!adm_ctx.reply_skb) 3927 return retcode; 3928 if (retcode != NO_ERROR) 3929 goto out; 3930 3931 device = adm_ctx.device; 3932 3933 /* resume from last known position, if possible */ 3934 parms.ov_start_sector = device->ov_start_sector; 3935 parms.ov_stop_sector = ULLONG_MAX; 3936 if (info->attrs[DRBD_NLA_START_OV_PARMS]) { 3937 int err = start_ov_parms_from_attrs(&parms, info); 3938 if (err) { 3939 retcode = ERR_MANDATORY_TAG; 3940 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 3941 goto out; 3942 } 3943 } 3944 mutex_lock(&adm_ctx.resource->adm_mutex); 3945 3946 /* w_make_ov_request expects position to be aligned */ 3947 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); 3948 device->ov_stop_sector = parms.ov_stop_sector; 3949 3950 /* If there is still bitmap IO pending, e.g. previous resync or verify 3951 * just being finished, wait for it before requesting a new resync. */ 3952 drbd_suspend_io(device); 3953 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); 3954 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); 3955 drbd_resume_io(device); 3956 3957 mutex_unlock(&adm_ctx.resource->adm_mutex); 3958 out: 3959 drbd_adm_finish(&adm_ctx, info, retcode); 3960 return 0; 3961 } 3962 3963 3964 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) 3965 { 3966 struct drbd_config_context adm_ctx; 3967 struct drbd_device *device; 3968 enum drbd_ret_code retcode; 3969 int skip_initial_sync = 0; 3970 int err; 3971 struct new_c_uuid_parms args; 3972 3973 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3974 if (!adm_ctx.reply_skb) 3975 return retcode; 3976 if (retcode != NO_ERROR) 3977 goto out_nolock; 3978 3979 device = adm_ctx.device; 3980 memset(&args, 0, sizeof(args)); 3981 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) { 3982 err = new_c_uuid_parms_from_attrs(&args, info); 3983 if (err) { 3984 retcode = ERR_MANDATORY_TAG; 3985 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 3986 goto out_nolock; 3987 } 3988 } 3989 3990 mutex_lock(&adm_ctx.resource->adm_mutex); 3991 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ 3992 3993 if (!get_ldev(device)) { 3994 retcode = ERR_NO_DISK; 3995 goto out; 3996 } 3997 3998 /* this is "skip initial sync", assume to be clean */ 3999 if (device->state.conn == C_CONNECTED && 4000 first_peer_device(device)->connection->agreed_pro_version >= 90 && 4001 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 4002 drbd_info(device, "Preparing to skip initial sync\n"); 4003 skip_initial_sync = 1; 4004 } else if (device->state.conn != C_STANDALONE) { 4005 retcode = ERR_CONNECTED; 4006 goto out_dec; 4007 } 4008 4009 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 4010 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */ 4011 4012 if (args.clear_bm) { 4013 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write, 4014 "clear_n_write from new_c_uuid", BM_LOCKED_MASK); 4015 if (err) { 4016 drbd_err(device, "Writing bitmap failed with %d\n", err); 4017 retcode = ERR_IO_MD_DISK; 4018 } 4019 if (skip_initial_sync) { 4020 drbd_send_uuids_skip_initial_sync(first_peer_device(device)); 4021 _drbd_uuid_set(device, UI_BITMAP, 0); 4022 drbd_print_uuids(device, "cleared bitmap UUID"); 4023 spin_lock_irq(&device->resource->req_lock); 4024 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 4025 CS_VERBOSE, NULL); 4026 spin_unlock_irq(&device->resource->req_lock); 4027 } 4028 } 4029 4030 drbd_md_sync(device); 4031 out_dec: 4032 put_ldev(device); 4033 out: 4034 mutex_unlock(device->state_mutex); 4035 mutex_unlock(&adm_ctx.resource->adm_mutex); 4036 out_nolock: 4037 drbd_adm_finish(&adm_ctx, info, retcode); 4038 return 0; 4039 } 4040 4041 static enum drbd_ret_code 4042 drbd_check_resource_name(struct drbd_config_context *adm_ctx) 4043 { 4044 const char *name = adm_ctx->resource_name; 4045 if (!name || !name[0]) { 4046 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing"); 4047 return ERR_MANDATORY_TAG; 4048 } 4049 /* if we want to use these in sysfs/configfs/debugfs some day, 4050 * we must not allow slashes */ 4051 if (strchr(name, '/')) { 4052 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name"); 4053 return ERR_INVALID_REQUEST; 4054 } 4055 return NO_ERROR; 4056 } 4057 4058 static void resource_to_info(struct resource_info *info, 4059 struct drbd_resource *resource) 4060 { 4061 info->res_role = conn_highest_role(first_connection(resource)); 4062 info->res_susp = resource->susp; 4063 info->res_susp_nod = resource->susp_nod; 4064 info->res_susp_fen = resource->susp_fen; 4065 } 4066 4067 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) 4068 { 4069 struct drbd_connection *connection; 4070 struct drbd_config_context adm_ctx; 4071 enum drbd_ret_code retcode; 4072 struct res_opts res_opts; 4073 int err; 4074 4075 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0); 4076 if (!adm_ctx.reply_skb) 4077 return retcode; 4078 if (retcode != NO_ERROR) 4079 goto out; 4080 4081 set_res_opts_defaults(&res_opts); 4082 err = res_opts_from_attrs(&res_opts, info); 4083 if (err && err != -ENOMSG) { 4084 retcode = ERR_MANDATORY_TAG; 4085 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 4086 goto out; 4087 } 4088 4089 retcode = drbd_check_resource_name(&adm_ctx); 4090 if (retcode != NO_ERROR) 4091 goto out; 4092 4093 if (adm_ctx.resource) { 4094 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { 4095 retcode = ERR_INVALID_REQUEST; 4096 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists"); 4097 } 4098 /* else: still NO_ERROR */ 4099 goto out; 4100 } 4101 4102 /* not yet safe for genl_family.parallel_ops */ 4103 mutex_lock(&resources_mutex); 4104 connection = conn_create(adm_ctx.resource_name, &res_opts); 4105 mutex_unlock(&resources_mutex); 4106 4107 if (connection) { 4108 struct resource_info resource_info; 4109 4110 mutex_lock(¬ification_mutex); 4111 resource_to_info(&resource_info, connection->resource); 4112 notify_resource_state(NULL, 0, connection->resource, 4113 &resource_info, NOTIFY_CREATE); 4114 mutex_unlock(¬ification_mutex); 4115 } else 4116 retcode = ERR_NOMEM; 4117 4118 out: 4119 drbd_adm_finish(&adm_ctx, info, retcode); 4120 return 0; 4121 } 4122 4123 static void device_to_info(struct device_info *info, 4124 struct drbd_device *device) 4125 { 4126 info->dev_disk_state = device->state.disk; 4127 } 4128 4129 4130 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) 4131 { 4132 struct drbd_config_context adm_ctx; 4133 struct drbd_genlmsghdr *dh = info->userhdr; 4134 enum drbd_ret_code retcode; 4135 4136 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4137 if (!adm_ctx.reply_skb) 4138 return retcode; 4139 if (retcode != NO_ERROR) 4140 goto out; 4141 4142 if (dh->minor > MINORMASK) { 4143 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range"); 4144 retcode = ERR_INVALID_REQUEST; 4145 goto out; 4146 } 4147 if (adm_ctx.volume > DRBD_VOLUME_MAX) { 4148 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range"); 4149 retcode = ERR_INVALID_REQUEST; 4150 goto out; 4151 } 4152 4153 /* drbd_adm_prepare made sure already 4154 * that first_peer_device(device)->connection and device->vnr match the request. */ 4155 if (adm_ctx.device) { 4156 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) 4157 retcode = ERR_MINOR_OR_VOLUME_EXISTS; 4158 /* else: still NO_ERROR */ 4159 goto out; 4160 } 4161 4162 mutex_lock(&adm_ctx.resource->adm_mutex); 4163 retcode = drbd_create_device(&adm_ctx, dh->minor); 4164 if (retcode == NO_ERROR) { 4165 struct drbd_device *device; 4166 struct drbd_peer_device *peer_device; 4167 struct device_info info; 4168 unsigned int peer_devices = 0; 4169 enum drbd_notification_type flags; 4170 4171 device = minor_to_device(dh->minor); 4172 for_each_peer_device(peer_device, device) { 4173 if (!has_net_conf(peer_device->connection)) 4174 continue; 4175 peer_devices++; 4176 } 4177 4178 device_to_info(&info, device); 4179 mutex_lock(¬ification_mutex); 4180 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0; 4181 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags); 4182 for_each_peer_device(peer_device, device) { 4183 struct peer_device_info peer_device_info; 4184 4185 if (!has_net_conf(peer_device->connection)) 4186 continue; 4187 peer_device_to_info(&peer_device_info, peer_device); 4188 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0; 4189 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, 4190 NOTIFY_CREATE | flags); 4191 } 4192 mutex_unlock(¬ification_mutex); 4193 } 4194 mutex_unlock(&adm_ctx.resource->adm_mutex); 4195 out: 4196 drbd_adm_finish(&adm_ctx, info, retcode); 4197 return 0; 4198 } 4199 4200 static enum drbd_ret_code adm_del_minor(struct drbd_device *device) 4201 { 4202 struct drbd_peer_device *peer_device; 4203 4204 if (device->state.disk == D_DISKLESS && 4205 /* no need to be device->state.conn == C_STANDALONE && 4206 * we may want to delete a minor from a live replication group. 4207 */ 4208 device->state.role == R_SECONDARY) { 4209 struct drbd_connection *connection = 4210 first_connection(device->resource); 4211 4212 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS), 4213 CS_VERBOSE + CS_WAIT_COMPLETE); 4214 4215 /* If the state engine hasn't stopped the sender thread yet, we 4216 * need to flush the sender work queue before generating the 4217 * DESTROY events here. */ 4218 if (get_t_state(&connection->worker) == RUNNING) 4219 drbd_flush_workqueue(&connection->sender_work); 4220 4221 mutex_lock(¬ification_mutex); 4222 for_each_peer_device(peer_device, device) { 4223 if (!has_net_conf(peer_device->connection)) 4224 continue; 4225 notify_peer_device_state(NULL, 0, peer_device, NULL, 4226 NOTIFY_DESTROY | NOTIFY_CONTINUES); 4227 } 4228 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY); 4229 mutex_unlock(¬ification_mutex); 4230 4231 drbd_delete_device(device); 4232 return NO_ERROR; 4233 } else 4234 return ERR_MINOR_CONFIGURED; 4235 } 4236 4237 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info) 4238 { 4239 struct drbd_config_context adm_ctx; 4240 enum drbd_ret_code retcode; 4241 4242 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 4243 if (!adm_ctx.reply_skb) 4244 return retcode; 4245 if (retcode != NO_ERROR) 4246 goto out; 4247 4248 mutex_lock(&adm_ctx.resource->adm_mutex); 4249 retcode = adm_del_minor(adm_ctx.device); 4250 mutex_unlock(&adm_ctx.resource->adm_mutex); 4251 out: 4252 drbd_adm_finish(&adm_ctx, info, retcode); 4253 return 0; 4254 } 4255 4256 static int adm_del_resource(struct drbd_resource *resource) 4257 { 4258 struct drbd_connection *connection; 4259 4260 for_each_connection(connection, resource) { 4261 if (connection->cstate > C_STANDALONE) 4262 return ERR_NET_CONFIGURED; 4263 } 4264 if (!idr_is_empty(&resource->devices)) 4265 return ERR_RES_IN_USE; 4266 4267 /* The state engine has stopped the sender thread, so we don't 4268 * need to flush the sender work queue before generating the 4269 * DESTROY event here. */ 4270 mutex_lock(¬ification_mutex); 4271 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY); 4272 mutex_unlock(¬ification_mutex); 4273 4274 mutex_lock(&resources_mutex); 4275 list_del_rcu(&resource->resources); 4276 mutex_unlock(&resources_mutex); 4277 /* Make sure all threads have actually stopped: state handling only 4278 * does drbd_thread_stop_nowait(). */ 4279 list_for_each_entry(connection, &resource->connections, connections) 4280 drbd_thread_stop(&connection->worker); 4281 synchronize_rcu(); 4282 drbd_free_resource(resource); 4283 return NO_ERROR; 4284 } 4285 4286 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) 4287 { 4288 struct drbd_config_context adm_ctx; 4289 struct drbd_resource *resource; 4290 struct drbd_connection *connection; 4291 struct drbd_device *device; 4292 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 4293 unsigned i; 4294 4295 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4296 if (!adm_ctx.reply_skb) 4297 return retcode; 4298 if (retcode != NO_ERROR) 4299 goto finish; 4300 4301 resource = adm_ctx.resource; 4302 mutex_lock(&resource->adm_mutex); 4303 /* demote */ 4304 for_each_connection(connection, resource) { 4305 struct drbd_peer_device *peer_device; 4306 4307 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 4308 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0); 4309 if (retcode < SS_SUCCESS) { 4310 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote"); 4311 goto out; 4312 } 4313 } 4314 4315 retcode = conn_try_disconnect(connection, 0); 4316 if (retcode < SS_SUCCESS) { 4317 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect"); 4318 goto out; 4319 } 4320 } 4321 4322 /* detach */ 4323 idr_for_each_entry(&resource->devices, device, i) { 4324 retcode = adm_detach(device, 0); 4325 if (retcode < SS_SUCCESS || retcode > NO_ERROR) { 4326 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach"); 4327 goto out; 4328 } 4329 } 4330 4331 /* delete volumes */ 4332 idr_for_each_entry(&resource->devices, device, i) { 4333 retcode = adm_del_minor(device); 4334 if (retcode != NO_ERROR) { 4335 /* "can not happen" */ 4336 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume"); 4337 goto out; 4338 } 4339 } 4340 4341 retcode = adm_del_resource(resource); 4342 out: 4343 mutex_unlock(&resource->adm_mutex); 4344 finish: 4345 drbd_adm_finish(&adm_ctx, info, retcode); 4346 return 0; 4347 } 4348 4349 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) 4350 { 4351 struct drbd_config_context adm_ctx; 4352 struct drbd_resource *resource; 4353 enum drbd_ret_code retcode; 4354 4355 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4356 if (!adm_ctx.reply_skb) 4357 return retcode; 4358 if (retcode != NO_ERROR) 4359 goto finish; 4360 resource = adm_ctx.resource; 4361 4362 mutex_lock(&resource->adm_mutex); 4363 retcode = adm_del_resource(resource); 4364 mutex_unlock(&resource->adm_mutex); 4365 finish: 4366 drbd_adm_finish(&adm_ctx, info, retcode); 4367 return 0; 4368 } 4369 4370 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib) 4371 { 4372 struct sk_buff *msg; 4373 struct drbd_genlmsghdr *d_out; 4374 unsigned seq; 4375 int err = -ENOMEM; 4376 4377 seq = atomic_inc_return(&drbd_genl_seq); 4378 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4379 if (!msg) 4380 goto failed; 4381 4382 err = -EMSGSIZE; 4383 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT); 4384 if (!d_out) /* cannot happen, but anyways. */ 4385 goto nla_put_failure; 4386 d_out->minor = device_to_minor(device); 4387 d_out->ret_code = NO_ERROR; 4388 4389 if (nla_put_status_info(msg, device, sib)) 4390 goto nla_put_failure; 4391 genlmsg_end(msg, d_out); 4392 err = drbd_genl_multicast_events(msg, GFP_NOWAIT); 4393 /* msg has been consumed or freed in netlink_broadcast() */ 4394 if (err && err != -ESRCH) 4395 goto failed; 4396 4397 return; 4398 4399 nla_put_failure: 4400 nlmsg_free(msg); 4401 failed: 4402 drbd_err(device, "Error %d while broadcasting event. " 4403 "Event seq:%u sib_reason:%u\n", 4404 err, seq, sib->sib_reason); 4405 } 4406 4407 static int nla_put_notification_header(struct sk_buff *msg, 4408 enum drbd_notification_type type) 4409 { 4410 struct drbd_notification_header nh = { 4411 .nh_type = type, 4412 }; 4413 4414 return drbd_notification_header_to_skb(msg, &nh, true); 4415 } 4416 4417 void notify_resource_state(struct sk_buff *skb, 4418 unsigned int seq, 4419 struct drbd_resource *resource, 4420 struct resource_info *resource_info, 4421 enum drbd_notification_type type) 4422 { 4423 struct resource_statistics resource_statistics; 4424 struct drbd_genlmsghdr *dh; 4425 bool multicast = false; 4426 int err; 4427 4428 if (!skb) { 4429 seq = atomic_inc_return(¬ify_genl_seq); 4430 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4431 err = -ENOMEM; 4432 if (!skb) 4433 goto failed; 4434 multicast = true; 4435 } 4436 4437 err = -EMSGSIZE; 4438 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE); 4439 if (!dh) 4440 goto nla_put_failure; 4441 dh->minor = -1U; 4442 dh->ret_code = NO_ERROR; 4443 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) || 4444 nla_put_notification_header(skb, type) || 4445 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY && 4446 resource_info_to_skb(skb, resource_info, true))) 4447 goto nla_put_failure; 4448 resource_statistics.res_stat_write_ordering = resource->write_ordering; 4449 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN)); 4450 if (err) 4451 goto nla_put_failure; 4452 genlmsg_end(skb, dh); 4453 if (multicast) { 4454 err = drbd_genl_multicast_events(skb, GFP_NOWAIT); 4455 /* skb has been consumed or freed in netlink_broadcast() */ 4456 if (err && err != -ESRCH) 4457 goto failed; 4458 } 4459 return; 4460 4461 nla_put_failure: 4462 nlmsg_free(skb); 4463 failed: 4464 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", 4465 err, seq); 4466 } 4467 4468 void notify_device_state(struct sk_buff *skb, 4469 unsigned int seq, 4470 struct drbd_device *device, 4471 struct device_info *device_info, 4472 enum drbd_notification_type type) 4473 { 4474 struct device_statistics device_statistics; 4475 struct drbd_genlmsghdr *dh; 4476 bool multicast = false; 4477 int err; 4478 4479 if (!skb) { 4480 seq = atomic_inc_return(¬ify_genl_seq); 4481 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4482 err = -ENOMEM; 4483 if (!skb) 4484 goto failed; 4485 multicast = true; 4486 } 4487 4488 err = -EMSGSIZE; 4489 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE); 4490 if (!dh) 4491 goto nla_put_failure; 4492 dh->minor = device->minor; 4493 dh->ret_code = NO_ERROR; 4494 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) || 4495 nla_put_notification_header(skb, type) || 4496 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY && 4497 device_info_to_skb(skb, device_info, true))) 4498 goto nla_put_failure; 4499 device_to_statistics(&device_statistics, device); 4500 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN)); 4501 genlmsg_end(skb, dh); 4502 if (multicast) { 4503 err = drbd_genl_multicast_events(skb, GFP_NOWAIT); 4504 /* skb has been consumed or freed in netlink_broadcast() */ 4505 if (err && err != -ESRCH) 4506 goto failed; 4507 } 4508 return; 4509 4510 nla_put_failure: 4511 nlmsg_free(skb); 4512 failed: 4513 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", 4514 err, seq); 4515 } 4516 4517 void notify_connection_state(struct sk_buff *skb, 4518 unsigned int seq, 4519 struct drbd_connection *connection, 4520 struct connection_info *connection_info, 4521 enum drbd_notification_type type) 4522 { 4523 struct connection_statistics connection_statistics; 4524 struct drbd_genlmsghdr *dh; 4525 bool multicast = false; 4526 int err; 4527 4528 if (!skb) { 4529 seq = atomic_inc_return(¬ify_genl_seq); 4530 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4531 err = -ENOMEM; 4532 if (!skb) 4533 goto failed; 4534 multicast = true; 4535 } 4536 4537 err = -EMSGSIZE; 4538 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE); 4539 if (!dh) 4540 goto nla_put_failure; 4541 dh->minor = -1U; 4542 dh->ret_code = NO_ERROR; 4543 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) || 4544 nla_put_notification_header(skb, type) || 4545 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY && 4546 connection_info_to_skb(skb, connection_info, true))) 4547 goto nla_put_failure; 4548 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags); 4549 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN)); 4550 genlmsg_end(skb, dh); 4551 if (multicast) { 4552 err = drbd_genl_multicast_events(skb, GFP_NOWAIT); 4553 /* skb has been consumed or freed in netlink_broadcast() */ 4554 if (err && err != -ESRCH) 4555 goto failed; 4556 } 4557 return; 4558 4559 nla_put_failure: 4560 nlmsg_free(skb); 4561 failed: 4562 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", 4563 err, seq); 4564 } 4565 4566 void notify_peer_device_state(struct sk_buff *skb, 4567 unsigned int seq, 4568 struct drbd_peer_device *peer_device, 4569 struct peer_device_info *peer_device_info, 4570 enum drbd_notification_type type) 4571 { 4572 struct peer_device_statistics peer_device_statistics; 4573 struct drbd_resource *resource = peer_device->device->resource; 4574 struct drbd_genlmsghdr *dh; 4575 bool multicast = false; 4576 int err; 4577 4578 if (!skb) { 4579 seq = atomic_inc_return(¬ify_genl_seq); 4580 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4581 err = -ENOMEM; 4582 if (!skb) 4583 goto failed; 4584 multicast = true; 4585 } 4586 4587 err = -EMSGSIZE; 4588 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE); 4589 if (!dh) 4590 goto nla_put_failure; 4591 dh->minor = -1U; 4592 dh->ret_code = NO_ERROR; 4593 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) || 4594 nla_put_notification_header(skb, type) || 4595 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY && 4596 peer_device_info_to_skb(skb, peer_device_info, true))) 4597 goto nla_put_failure; 4598 peer_device_to_statistics(&peer_device_statistics, peer_device); 4599 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN)); 4600 genlmsg_end(skb, dh); 4601 if (multicast) { 4602 err = drbd_genl_multicast_events(skb, GFP_NOWAIT); 4603 /* skb has been consumed or freed in netlink_broadcast() */ 4604 if (err && err != -ESRCH) 4605 goto failed; 4606 } 4607 return; 4608 4609 nla_put_failure: 4610 nlmsg_free(skb); 4611 failed: 4612 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", 4613 err, seq); 4614 } 4615 4616 void notify_helper(enum drbd_notification_type type, 4617 struct drbd_device *device, struct drbd_connection *connection, 4618 const char *name, int status) 4619 { 4620 struct drbd_resource *resource = device ? device->resource : connection->resource; 4621 struct drbd_helper_info helper_info; 4622 unsigned int seq = atomic_inc_return(¬ify_genl_seq); 4623 struct sk_buff *skb = NULL; 4624 struct drbd_genlmsghdr *dh; 4625 int err; 4626 4627 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name)); 4628 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name)); 4629 helper_info.helper_status = status; 4630 4631 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 4632 err = -ENOMEM; 4633 if (!skb) 4634 goto fail; 4635 4636 err = -EMSGSIZE; 4637 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER); 4638 if (!dh) 4639 goto fail; 4640 dh->minor = device ? device->minor : -1; 4641 dh->ret_code = NO_ERROR; 4642 mutex_lock(¬ification_mutex); 4643 if (nla_put_drbd_cfg_context(skb, resource, connection, device) || 4644 nla_put_notification_header(skb, type) || 4645 drbd_helper_info_to_skb(skb, &helper_info, true)) 4646 goto unlock_fail; 4647 genlmsg_end(skb, dh); 4648 err = drbd_genl_multicast_events(skb, GFP_NOWAIT); 4649 skb = NULL; 4650 /* skb has been consumed or freed in netlink_broadcast() */ 4651 if (err && err != -ESRCH) 4652 goto unlock_fail; 4653 mutex_unlock(¬ification_mutex); 4654 return; 4655 4656 unlock_fail: 4657 mutex_unlock(¬ification_mutex); 4658 fail: 4659 nlmsg_free(skb); 4660 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", 4661 err, seq); 4662 } 4663 4664 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) 4665 { 4666 struct drbd_genlmsghdr *dh; 4667 int err; 4668 4669 err = -EMSGSIZE; 4670 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE); 4671 if (!dh) 4672 goto nla_put_failure; 4673 dh->minor = -1U; 4674 dh->ret_code = NO_ERROR; 4675 if (nla_put_notification_header(skb, NOTIFY_EXISTS)) 4676 goto nla_put_failure; 4677 genlmsg_end(skb, dh); 4678 return; 4679 4680 nla_put_failure: 4681 nlmsg_free(skb); 4682 pr_err("Error %d sending event. Event seq:%u\n", err, seq); 4683 } 4684 4685 static void free_state_changes(struct list_head *list) 4686 { 4687 while (!list_empty(list)) { 4688 struct drbd_state_change *state_change = 4689 list_first_entry(list, struct drbd_state_change, list); 4690 list_del(&state_change->list); 4691 forget_state_change(state_change); 4692 } 4693 } 4694 4695 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change) 4696 { 4697 return 1 + 4698 state_change->n_connections + 4699 state_change->n_devices + 4700 state_change->n_devices * state_change->n_connections; 4701 } 4702 4703 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) 4704 { 4705 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0]; 4706 unsigned int seq = cb->args[2]; 4707 unsigned int n; 4708 enum drbd_notification_type flags = 0; 4709 4710 /* There is no need for taking notification_mutex here: it doesn't 4711 matter if the initial state events mix with later state chage 4712 events; we can always tell the events apart by the NOTIFY_EXISTS 4713 flag. */ 4714 4715 cb->args[5]--; 4716 if (cb->args[5] == 1) { 4717 notify_initial_state_done(skb, seq); 4718 goto out; 4719 } 4720 n = cb->args[4]++; 4721 if (cb->args[4] < cb->args[3]) 4722 flags |= NOTIFY_CONTINUES; 4723 if (n < 1) { 4724 notify_resource_state_change(skb, seq, state_change->resource, 4725 NOTIFY_EXISTS | flags); 4726 goto next; 4727 } 4728 n--; 4729 if (n < state_change->n_connections) { 4730 notify_connection_state_change(skb, seq, &state_change->connections[n], 4731 NOTIFY_EXISTS | flags); 4732 goto next; 4733 } 4734 n -= state_change->n_connections; 4735 if (n < state_change->n_devices) { 4736 notify_device_state_change(skb, seq, &state_change->devices[n], 4737 NOTIFY_EXISTS | flags); 4738 goto next; 4739 } 4740 n -= state_change->n_devices; 4741 if (n < state_change->n_devices * state_change->n_connections) { 4742 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], 4743 NOTIFY_EXISTS | flags); 4744 goto next; 4745 } 4746 4747 next: 4748 if (cb->args[4] == cb->args[3]) { 4749 struct drbd_state_change *next_state_change = 4750 list_entry(state_change->list.next, 4751 struct drbd_state_change, list); 4752 cb->args[0] = (long)next_state_change; 4753 cb->args[3] = notifications_for_state_change(next_state_change); 4754 cb->args[4] = 0; 4755 } 4756 out: 4757 return skb->len; 4758 } 4759 4760 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) 4761 { 4762 struct drbd_resource *resource; 4763 LIST_HEAD(head); 4764 4765 if (cb->args[5] >= 1) { 4766 if (cb->args[5] > 1) 4767 return get_initial_state(skb, cb); 4768 if (cb->args[0]) { 4769 struct drbd_state_change *state_change = 4770 (struct drbd_state_change *)cb->args[0]; 4771 4772 /* connect list to head */ 4773 list_add(&head, &state_change->list); 4774 free_state_changes(&head); 4775 } 4776 return 0; 4777 } 4778 4779 cb->args[5] = 2; /* number of iterations */ 4780 mutex_lock(&resources_mutex); 4781 for_each_resource(resource, &drbd_resources) { 4782 struct drbd_state_change *state_change; 4783 4784 state_change = remember_old_state(resource, GFP_KERNEL); 4785 if (!state_change) { 4786 if (!list_empty(&head)) 4787 free_state_changes(&head); 4788 mutex_unlock(&resources_mutex); 4789 return -ENOMEM; 4790 } 4791 copy_old_to_new_state_change(state_change); 4792 list_add_tail(&state_change->list, &head); 4793 cb->args[5] += notifications_for_state_change(state_change); 4794 } 4795 mutex_unlock(&resources_mutex); 4796 4797 if (!list_empty(&head)) { 4798 struct drbd_state_change *state_change = 4799 list_entry(head.next, struct drbd_state_change, list); 4800 cb->args[0] = (long)state_change; 4801 cb->args[3] = notifications_for_state_change(state_change); 4802 list_del(&head); /* detach list from head */ 4803 } 4804 4805 cb->args[2] = cb->nlh->nlmsg_seq; 4806 return get_initial_state(skb, cb); 4807 } 4808