189b17223SAlexander Motin /*- 289b17223SAlexander Motin * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 389b17223SAlexander Motin * All rights reserved. 489b17223SAlexander Motin * 589b17223SAlexander Motin * Redistribution and use in source and binary forms, with or without 689b17223SAlexander Motin * modification, are permitted provided that the following conditions 789b17223SAlexander Motin * are met: 889b17223SAlexander Motin * 1. Redistributions of source code must retain the above copyright 989b17223SAlexander Motin * notice, this list of conditions and the following disclaimer. 1089b17223SAlexander Motin * 2. Redistributions in binary form must reproduce the above copyright 1189b17223SAlexander Motin * notice, this list of conditions and the following disclaimer in the 1289b17223SAlexander Motin * documentation and/or other materials provided with the distribution. 1389b17223SAlexander Motin * 1489b17223SAlexander Motin * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 1589b17223SAlexander Motin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1689b17223SAlexander Motin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1789b17223SAlexander Motin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 1889b17223SAlexander Motin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1989b17223SAlexander Motin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2089b17223SAlexander Motin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2189b17223SAlexander Motin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2289b17223SAlexander Motin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2389b17223SAlexander Motin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2489b17223SAlexander Motin * SUCH DAMAGE. 2589b17223SAlexander Motin */ 2689b17223SAlexander Motin 2789b17223SAlexander Motin #include <sys/cdefs.h> 2889b17223SAlexander Motin __FBSDID("$FreeBSD$"); 2989b17223SAlexander Motin 3089b17223SAlexander Motin #include <sys/param.h> 3189b17223SAlexander Motin #include <sys/systm.h> 3289b17223SAlexander Motin #include <sys/kernel.h> 3389b17223SAlexander Motin #include <sys/module.h> 3489b17223SAlexander Motin #include <sys/limits.h> 3589b17223SAlexander Motin #include <sys/lock.h> 3689b17223SAlexander Motin #include <sys/mutex.h> 3789b17223SAlexander Motin #include <sys/bio.h> 385d807a0eSAndrey V. Elsukov #include <sys/sbuf.h> 3989b17223SAlexander Motin #include <sys/sysctl.h> 4089b17223SAlexander Motin #include <sys/malloc.h> 4189b17223SAlexander Motin #include <sys/eventhandler.h> 4289b17223SAlexander Motin #include <vm/uma.h> 4389b17223SAlexander Motin #include <geom/geom.h> 4489b17223SAlexander Motin #include <sys/proc.h> 4589b17223SAlexander Motin #include <sys/kthread.h> 4689b17223SAlexander Motin #include <sys/sched.h> 4789b17223SAlexander Motin #include <geom/raid/g_raid.h> 4889b17223SAlexander Motin #include "g_raid_md_if.h" 4989b17223SAlexander Motin #include "g_raid_tr_if.h" 5089b17223SAlexander Motin 5189b17223SAlexander Motin static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data"); 5289b17223SAlexander Motin 5389b17223SAlexander Motin SYSCTL_DECL(_kern_geom); 5489b17223SAlexander Motin SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff"); 55c89d2fbeSAlexander Motin int g_raid_enable = 1; 56c89d2fbeSAlexander Motin TUNABLE_INT("kern.geom.raid.enable", &g_raid_enable); 57c89d2fbeSAlexander Motin SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RW, 58c89d2fbeSAlexander Motin &g_raid_enable, 0, "Enable on-disk metadata taste"); 5989b17223SAlexander Motin u_int g_raid_aggressive_spare = 0; 6089b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare); 6189b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW, 6289b17223SAlexander Motin &g_raid_aggressive_spare, 0, "Use disks without metadata as spare"); 63fe51d6c1SAlexander Motin u_int g_raid_debug = 0; 6489b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug); 6589b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0, 6689b17223SAlexander Motin "Debug level"); 6789b17223SAlexander Motin int g_raid_read_err_thresh = 10; 6889b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.read_err_thresh", &g_raid_read_err_thresh); 6989b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RW, 7089b17223SAlexander Motin &g_raid_read_err_thresh, 0, 7189b17223SAlexander Motin "Number of read errors equated to disk failure"); 7289b17223SAlexander Motin u_int g_raid_start_timeout = 30; 7389b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout); 7489b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RW, 7589b17223SAlexander Motin &g_raid_start_timeout, 0, 7689b17223SAlexander Motin "Time to wait for all array components"); 7789b17223SAlexander Motin static u_int g_raid_clean_time = 5; 7889b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.clean_time", &g_raid_clean_time); 7989b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RW, 8089b17223SAlexander Motin &g_raid_clean_time, 0, "Mark volume as clean when idling"); 8189b17223SAlexander Motin static u_int g_raid_disconnect_on_failure = 1; 8289b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.disconnect_on_failure", 8389b17223SAlexander Motin &g_raid_disconnect_on_failure); 8489b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 8589b17223SAlexander Motin &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 8689b17223SAlexander Motin static u_int g_raid_name_format = 0; 8789b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.name_format", &g_raid_name_format); 8889b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RW, 8989b17223SAlexander Motin &g_raid_name_format, 0, "Providers name format."); 9089b17223SAlexander Motin static u_int g_raid_idle_threshold = 1000000; 9189b17223SAlexander Motin TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold); 9289b17223SAlexander Motin SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW, 9389b17223SAlexander Motin &g_raid_idle_threshold, 1000000, 9489b17223SAlexander Motin "Time in microseconds to consider a volume idle."); 95bd9fba0cSSean Bruno static u_int ar_legacy_aliases = 1; 96bd9fba0cSSean Bruno SYSCTL_INT(_kern_geom_raid, OID_AUTO, legacy_aliases, CTLFLAG_RW, 97bd9fba0cSSean Bruno &ar_legacy_aliases, 0, "Create aliases named as the legacy ataraid style."); 98bd9fba0cSSean Bruno TUNABLE_INT("kern.geom_raid.legacy_aliases", &ar_legacy_aliases); 99bd9fba0cSSean Bruno 10089b17223SAlexander Motin 10189b17223SAlexander Motin #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \ 10289b17223SAlexander Motin G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 10389b17223SAlexander Motin rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 10489b17223SAlexander Motin G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 10589b17223SAlexander Motin } while (0) 10689b17223SAlexander Motin 10789b17223SAlexander Motin LIST_HEAD(, g_raid_md_class) g_raid_md_classes = 10889b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_md_classes); 10989b17223SAlexander Motin 11089b17223SAlexander Motin LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes = 11189b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_tr_classes); 11289b17223SAlexander Motin 11389b17223SAlexander Motin LIST_HEAD(, g_raid_volume) g_raid_volumes = 11489b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_volumes); 11589b17223SAlexander Motin 116a479c51bSAlexander Motin static eventhandler_tag g_raid_post_sync = NULL; 11789b17223SAlexander Motin static int g_raid_started = 0; 118a479c51bSAlexander Motin static int g_raid_shutdown = 0; 11989b17223SAlexander Motin 12089b17223SAlexander Motin static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp, 12189b17223SAlexander Motin struct g_geom *gp); 12289b17223SAlexander Motin static g_taste_t g_raid_taste; 12389b17223SAlexander Motin static void g_raid_init(struct g_class *mp); 12489b17223SAlexander Motin static void g_raid_fini(struct g_class *mp); 12589b17223SAlexander Motin 12689b17223SAlexander Motin struct g_class g_raid_class = { 12789b17223SAlexander Motin .name = G_RAID_CLASS_NAME, 12889b17223SAlexander Motin .version = G_VERSION, 12989b17223SAlexander Motin .ctlreq = g_raid_ctl, 13089b17223SAlexander Motin .taste = g_raid_taste, 13189b17223SAlexander Motin .destroy_geom = g_raid_destroy_geom, 13289b17223SAlexander Motin .init = g_raid_init, 13389b17223SAlexander Motin .fini = g_raid_fini 13489b17223SAlexander Motin }; 13589b17223SAlexander Motin 13689b17223SAlexander Motin static void g_raid_destroy_provider(struct g_raid_volume *vol); 13789b17223SAlexander Motin static int g_raid_update_disk(struct g_raid_disk *disk, u_int event); 13889b17223SAlexander Motin static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event); 13989b17223SAlexander Motin static int g_raid_update_volume(struct g_raid_volume *vol, u_int event); 14089b17223SAlexander Motin static int g_raid_update_node(struct g_raid_softc *sc, u_int event); 14189b17223SAlexander Motin static void g_raid_dumpconf(struct sbuf *sb, const char *indent, 14289b17223SAlexander Motin struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 14389b17223SAlexander Motin static void g_raid_start(struct bio *bp); 14489b17223SAlexander Motin static void g_raid_start_request(struct bio *bp); 14589b17223SAlexander Motin static void g_raid_disk_done(struct bio *bp); 14689b17223SAlexander Motin static void g_raid_poll(struct g_raid_softc *sc); 14789b17223SAlexander Motin 14889b17223SAlexander Motin static const char * 14989b17223SAlexander Motin g_raid_node_event2str(int event) 15089b17223SAlexander Motin { 15189b17223SAlexander Motin 15289b17223SAlexander Motin switch (event) { 15389b17223SAlexander Motin case G_RAID_NODE_E_WAKE: 15489b17223SAlexander Motin return ("WAKE"); 15589b17223SAlexander Motin case G_RAID_NODE_E_START: 15689b17223SAlexander Motin return ("START"); 15789b17223SAlexander Motin default: 15889b17223SAlexander Motin return ("INVALID"); 15989b17223SAlexander Motin } 16089b17223SAlexander Motin } 16189b17223SAlexander Motin 16289b17223SAlexander Motin const char * 16389b17223SAlexander Motin g_raid_disk_state2str(int state) 16489b17223SAlexander Motin { 16589b17223SAlexander Motin 16689b17223SAlexander Motin switch (state) { 16789b17223SAlexander Motin case G_RAID_DISK_S_NONE: 16889b17223SAlexander Motin return ("NONE"); 16989b17223SAlexander Motin case G_RAID_DISK_S_OFFLINE: 17089b17223SAlexander Motin return ("OFFLINE"); 17126c538bcSAlexander Motin case G_RAID_DISK_S_DISABLED: 17226c538bcSAlexander Motin return ("DISABLED"); 17389b17223SAlexander Motin case G_RAID_DISK_S_FAILED: 17489b17223SAlexander Motin return ("FAILED"); 17589b17223SAlexander Motin case G_RAID_DISK_S_STALE_FAILED: 17689b17223SAlexander Motin return ("STALE_FAILED"); 17789b17223SAlexander Motin case G_RAID_DISK_S_SPARE: 17889b17223SAlexander Motin return ("SPARE"); 17989b17223SAlexander Motin case G_RAID_DISK_S_STALE: 18089b17223SAlexander Motin return ("STALE"); 18189b17223SAlexander Motin case G_RAID_DISK_S_ACTIVE: 18289b17223SAlexander Motin return ("ACTIVE"); 18389b17223SAlexander Motin default: 18489b17223SAlexander Motin return ("INVALID"); 18589b17223SAlexander Motin } 18689b17223SAlexander Motin } 18789b17223SAlexander Motin 18889b17223SAlexander Motin static const char * 18989b17223SAlexander Motin g_raid_disk_event2str(int event) 19089b17223SAlexander Motin { 19189b17223SAlexander Motin 19289b17223SAlexander Motin switch (event) { 19389b17223SAlexander Motin case G_RAID_DISK_E_DISCONNECTED: 19489b17223SAlexander Motin return ("DISCONNECTED"); 19589b17223SAlexander Motin default: 19689b17223SAlexander Motin return ("INVALID"); 19789b17223SAlexander Motin } 19889b17223SAlexander Motin } 19989b17223SAlexander Motin 20089b17223SAlexander Motin const char * 20189b17223SAlexander Motin g_raid_subdisk_state2str(int state) 20289b17223SAlexander Motin { 20389b17223SAlexander Motin 20489b17223SAlexander Motin switch (state) { 20589b17223SAlexander Motin case G_RAID_SUBDISK_S_NONE: 20689b17223SAlexander Motin return ("NONE"); 20789b17223SAlexander Motin case G_RAID_SUBDISK_S_FAILED: 20889b17223SAlexander Motin return ("FAILED"); 20989b17223SAlexander Motin case G_RAID_SUBDISK_S_NEW: 21089b17223SAlexander Motin return ("NEW"); 21189b17223SAlexander Motin case G_RAID_SUBDISK_S_REBUILD: 21289b17223SAlexander Motin return ("REBUILD"); 21389b17223SAlexander Motin case G_RAID_SUBDISK_S_UNINITIALIZED: 21489b17223SAlexander Motin return ("UNINITIALIZED"); 21589b17223SAlexander Motin case G_RAID_SUBDISK_S_STALE: 21689b17223SAlexander Motin return ("STALE"); 21789b17223SAlexander Motin case G_RAID_SUBDISK_S_RESYNC: 21889b17223SAlexander Motin return ("RESYNC"); 21989b17223SAlexander Motin case G_RAID_SUBDISK_S_ACTIVE: 22089b17223SAlexander Motin return ("ACTIVE"); 22189b17223SAlexander Motin default: 22289b17223SAlexander Motin return ("INVALID"); 22389b17223SAlexander Motin } 22489b17223SAlexander Motin } 22589b17223SAlexander Motin 22689b17223SAlexander Motin static const char * 22789b17223SAlexander Motin g_raid_subdisk_event2str(int event) 22889b17223SAlexander Motin { 22989b17223SAlexander Motin 23089b17223SAlexander Motin switch (event) { 23189b17223SAlexander Motin case G_RAID_SUBDISK_E_NEW: 23289b17223SAlexander Motin return ("NEW"); 233d9d68496SAlexander Motin case G_RAID_SUBDISK_E_FAILED: 234d9d68496SAlexander Motin return ("FAILED"); 23589b17223SAlexander Motin case G_RAID_SUBDISK_E_DISCONNECTED: 23689b17223SAlexander Motin return ("DISCONNECTED"); 23789b17223SAlexander Motin default: 23889b17223SAlexander Motin return ("INVALID"); 23989b17223SAlexander Motin } 24089b17223SAlexander Motin } 24189b17223SAlexander Motin 24289b17223SAlexander Motin const char * 24389b17223SAlexander Motin g_raid_volume_state2str(int state) 24489b17223SAlexander Motin { 24589b17223SAlexander Motin 24689b17223SAlexander Motin switch (state) { 24789b17223SAlexander Motin case G_RAID_VOLUME_S_STARTING: 24889b17223SAlexander Motin return ("STARTING"); 24989b17223SAlexander Motin case G_RAID_VOLUME_S_BROKEN: 25089b17223SAlexander Motin return ("BROKEN"); 25189b17223SAlexander Motin case G_RAID_VOLUME_S_DEGRADED: 25289b17223SAlexander Motin return ("DEGRADED"); 25389b17223SAlexander Motin case G_RAID_VOLUME_S_SUBOPTIMAL: 25489b17223SAlexander Motin return ("SUBOPTIMAL"); 25589b17223SAlexander Motin case G_RAID_VOLUME_S_OPTIMAL: 25689b17223SAlexander Motin return ("OPTIMAL"); 25789b17223SAlexander Motin case G_RAID_VOLUME_S_UNSUPPORTED: 25889b17223SAlexander Motin return ("UNSUPPORTED"); 25989b17223SAlexander Motin case G_RAID_VOLUME_S_STOPPED: 26089b17223SAlexander Motin return ("STOPPED"); 26189b17223SAlexander Motin default: 26289b17223SAlexander Motin return ("INVALID"); 26389b17223SAlexander Motin } 26489b17223SAlexander Motin } 26589b17223SAlexander Motin 26689b17223SAlexander Motin static const char * 26789b17223SAlexander Motin g_raid_volume_event2str(int event) 26889b17223SAlexander Motin { 26989b17223SAlexander Motin 27089b17223SAlexander Motin switch (event) { 27189b17223SAlexander Motin case G_RAID_VOLUME_E_UP: 27289b17223SAlexander Motin return ("UP"); 27389b17223SAlexander Motin case G_RAID_VOLUME_E_DOWN: 27489b17223SAlexander Motin return ("DOWN"); 27589b17223SAlexander Motin case G_RAID_VOLUME_E_START: 27689b17223SAlexander Motin return ("START"); 27789b17223SAlexander Motin case G_RAID_VOLUME_E_STARTMD: 27889b17223SAlexander Motin return ("STARTMD"); 27989b17223SAlexander Motin default: 28089b17223SAlexander Motin return ("INVALID"); 28189b17223SAlexander Motin } 28289b17223SAlexander Motin } 28389b17223SAlexander Motin 28489b17223SAlexander Motin const char * 28589b17223SAlexander Motin g_raid_volume_level2str(int level, int qual) 28689b17223SAlexander Motin { 28789b17223SAlexander Motin 28889b17223SAlexander Motin switch (level) { 28989b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID0: 29089b17223SAlexander Motin return ("RAID0"); 29189b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID1: 29289b17223SAlexander Motin return ("RAID1"); 29389b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID3: 294dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R3P0) 295dbb2e755SAlexander Motin return ("RAID3-P0"); 296dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R3PN) 297dbb2e755SAlexander Motin return ("RAID3-PN"); 29889b17223SAlexander Motin return ("RAID3"); 29989b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID4: 300dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R4P0) 301bafd0b5bSAlexander Motin return ("RAID4-P0"); 302dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R4PN) 303bafd0b5bSAlexander Motin return ("RAID4-PN"); 30489b17223SAlexander Motin return ("RAID4"); 30589b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5: 306fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RA) 307dbb2e755SAlexander Motin return ("RAID5-RA"); 308fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RS) 309dbb2e755SAlexander Motin return ("RAID5-RS"); 310fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5LA) 311dbb2e755SAlexander Motin return ("RAID5-LA"); 312fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5LS) 313dbb2e755SAlexander Motin return ("RAID5-LS"); 31489b17223SAlexander Motin return ("RAID5"); 31589b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID6: 316dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6RA) 317dbb2e755SAlexander Motin return ("RAID6-RA"); 318dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6RS) 319dbb2e755SAlexander Motin return ("RAID6-RS"); 320dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6LA) 321dbb2e755SAlexander Motin return ("RAID6-LA"); 322dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6LS) 323dbb2e755SAlexander Motin return ("RAID6-LS"); 32489b17223SAlexander Motin return ("RAID6"); 325dbb2e755SAlexander Motin case G_RAID_VOLUME_RL_RAIDMDF: 326dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFRA) 327dbb2e755SAlexander Motin return ("RAIDMDF-RA"); 328dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFRS) 329dbb2e755SAlexander Motin return ("RAIDMDF-RS"); 330dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFLA) 331dbb2e755SAlexander Motin return ("RAIDMDF-LA"); 332dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFLS) 333dbb2e755SAlexander Motin return ("RAIDMDF-LS"); 334dbb2e755SAlexander Motin return ("RAIDMDF"); 33589b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID1E: 336dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R1EA) 337dbb2e755SAlexander Motin return ("RAID1E-A"); 338dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R1EO) 339dbb2e755SAlexander Motin return ("RAID1E-O"); 34089b17223SAlexander Motin return ("RAID1E"); 34189b17223SAlexander Motin case G_RAID_VOLUME_RL_SINGLE: 34289b17223SAlexander Motin return ("SINGLE"); 34389b17223SAlexander Motin case G_RAID_VOLUME_RL_CONCAT: 34489b17223SAlexander Motin return ("CONCAT"); 34589b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5E: 346dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ERA) 347dbb2e755SAlexander Motin return ("RAID5E-RA"); 348dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ERS) 349dbb2e755SAlexander Motin return ("RAID5E-RS"); 350dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ELA) 351dbb2e755SAlexander Motin return ("RAID5E-LA"); 352dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ELS) 353dbb2e755SAlexander Motin return ("RAID5E-LS"); 35489b17223SAlexander Motin return ("RAID5E"); 35589b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5EE: 356dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EERA) 357dbb2e755SAlexander Motin return ("RAID5EE-RA"); 358dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EERS) 359dbb2e755SAlexander Motin return ("RAID5EE-RS"); 360dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EELA) 361dbb2e755SAlexander Motin return ("RAID5EE-LA"); 362dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EELS) 363dbb2e755SAlexander Motin return ("RAID5EE-LS"); 36489b17223SAlexander Motin return ("RAID5EE"); 365dbb2e755SAlexander Motin case G_RAID_VOLUME_RL_RAID5R: 366dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RRA) 367dbb2e755SAlexander Motin return ("RAID5R-RA"); 368dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RRS) 369dbb2e755SAlexander Motin return ("RAID5R-RS"); 370dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RLA) 371dbb2e755SAlexander Motin return ("RAID5R-LA"); 372dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RLS) 373dbb2e755SAlexander Motin return ("RAID5R-LS"); 374dbb2e755SAlexander Motin return ("RAID5E"); 37589b17223SAlexander Motin default: 37689b17223SAlexander Motin return ("UNKNOWN"); 37789b17223SAlexander Motin } 37889b17223SAlexander Motin } 37989b17223SAlexander Motin 38089b17223SAlexander Motin int 38189b17223SAlexander Motin g_raid_volume_str2level(const char *str, int *level, int *qual) 38289b17223SAlexander Motin { 38389b17223SAlexander Motin 38489b17223SAlexander Motin *level = G_RAID_VOLUME_RL_UNKNOWN; 38589b17223SAlexander Motin *qual = G_RAID_VOLUME_RLQ_NONE; 38689b17223SAlexander Motin if (strcasecmp(str, "RAID0") == 0) 38789b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID0; 38889b17223SAlexander Motin else if (strcasecmp(str, "RAID1") == 0) 38989b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1; 390dbb2e755SAlexander Motin else if (strcasecmp(str, "RAID3-P0") == 0) { 39189b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID3; 392dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R3P0; 3934b97ff61SAlexander Motin } else if (strcasecmp(str, "RAID3-PN") == 0 || 394dbb2e755SAlexander Motin strcasecmp(str, "RAID3") == 0) { 395dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID3; 3964b97ff61SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R3PN; 397dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID4-P0") == 0) { 39889b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID4; 399dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R4P0; 4004b97ff61SAlexander Motin } else if (strcasecmp(str, "RAID4-PN") == 0 || 401dbb2e755SAlexander Motin strcasecmp(str, "RAID4") == 0) { 402dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID4; 4034b97ff61SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R4PN; 404dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-RA") == 0) { 40589b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 406fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RA; 407dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-RS") == 0) { 408fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 409fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RS; 410fc1de960SAlexander Motin } else if (strcasecmp(str, "RAID5") == 0 || 411dbb2e755SAlexander Motin strcasecmp(str, "RAID5-LA") == 0) { 412fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 413fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5LA; 414dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-LS") == 0) { 415fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 416fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5LS; 417dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-RA") == 0) { 41889b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 419dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6RA; 420dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-RS") == 0) { 421dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 422dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6RS; 423dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6") == 0 || 424dbb2e755SAlexander Motin strcasecmp(str, "RAID6-LA") == 0) { 425dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 426dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6LA; 427dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-LS") == 0) { 428dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 429dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6LS; 430dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-RA") == 0) { 431dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 432dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFRA; 433dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-RS") == 0) { 434dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 435dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFRS; 436dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF") == 0 || 437dbb2e755SAlexander Motin strcasecmp(str, "RAIDMDF-LA") == 0) { 438dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 439dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFLA; 440dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-LS") == 0) { 441dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 442dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFLS; 443dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID10") == 0 || 444dbb2e755SAlexander Motin strcasecmp(str, "RAID1E") == 0 || 445dbb2e755SAlexander Motin strcasecmp(str, "RAID1E-A") == 0) { 44689b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1E; 447dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R1EA; 448dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID1E-O") == 0) { 449dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1E; 450dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R1EO; 451dbb2e755SAlexander Motin } else if (strcasecmp(str, "SINGLE") == 0) 45289b17223SAlexander Motin *level = G_RAID_VOLUME_RL_SINGLE; 45389b17223SAlexander Motin else if (strcasecmp(str, "CONCAT") == 0) 45489b17223SAlexander Motin *level = G_RAID_VOLUME_RL_CONCAT; 455dbb2e755SAlexander Motin else if (strcasecmp(str, "RAID5E-RA") == 0) { 45689b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 457dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ERA; 458dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E-RS") == 0) { 459dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 460dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ERS; 461dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E") == 0 || 462dbb2e755SAlexander Motin strcasecmp(str, "RAID5E-LA") == 0) { 463dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 464dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ELA; 465dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E-LS") == 0) { 466dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 467dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ELS; 468dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-RA") == 0) { 46989b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 470dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EERA; 471dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-RS") == 0) { 472dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 473dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EERS; 474dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE") == 0 || 475dbb2e755SAlexander Motin strcasecmp(str, "RAID5EE-LA") == 0) { 476dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 477dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EELA; 478dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-LS") == 0) { 479dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 480dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EELS; 481dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-RA") == 0) { 482dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 483dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RRA; 484dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-RS") == 0) { 485dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 486dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RRS; 487dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R") == 0 || 488dbb2e755SAlexander Motin strcasecmp(str, "RAID5R-LA") == 0) { 489dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 490dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RLA; 491dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-LS") == 0) { 492dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 493dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RLS; 494dbb2e755SAlexander Motin } else 49589b17223SAlexander Motin return (-1); 49689b17223SAlexander Motin return (0); 49789b17223SAlexander Motin } 49889b17223SAlexander Motin 49989b17223SAlexander Motin const char * 50089b17223SAlexander Motin g_raid_get_diskname(struct g_raid_disk *disk) 50189b17223SAlexander Motin { 50289b17223SAlexander Motin 50389b17223SAlexander Motin if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 50489b17223SAlexander Motin return ("[unknown]"); 50589b17223SAlexander Motin return (disk->d_consumer->provider->name); 50689b17223SAlexander Motin } 50789b17223SAlexander Motin 50889b17223SAlexander Motin void 509609a7474SAlexander Motin g_raid_get_disk_info(struct g_raid_disk *disk) 510609a7474SAlexander Motin { 511609a7474SAlexander Motin struct g_consumer *cp = disk->d_consumer; 512609a7474SAlexander Motin int error, len; 513609a7474SAlexander Motin 514609a7474SAlexander Motin /* Read kernel dumping information. */ 515609a7474SAlexander Motin disk->d_kd.offset = 0; 516609a7474SAlexander Motin disk->d_kd.length = OFF_MAX; 517609a7474SAlexander Motin len = sizeof(disk->d_kd); 518609a7474SAlexander Motin error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd); 519609a7474SAlexander Motin if (error) 520609a7474SAlexander Motin disk->d_kd.di.dumper = NULL; 521609a7474SAlexander Motin if (disk->d_kd.di.dumper == NULL) 522609a7474SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, 523609a7474SAlexander Motin "Dumping not supported by %s: %d.", 524609a7474SAlexander Motin cp->provider->name, error); 525609a7474SAlexander Motin 526609a7474SAlexander Motin /* Read BIO_DELETE support. */ 527609a7474SAlexander Motin error = g_getattr("GEOM::candelete", cp, &disk->d_candelete); 528609a7474SAlexander Motin if (error) 529609a7474SAlexander Motin disk->d_candelete = 0; 530609a7474SAlexander Motin if (!disk->d_candelete) 531609a7474SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, 532609a7474SAlexander Motin "BIO_DELETE not supported by %s: %d.", 533609a7474SAlexander Motin cp->provider->name, error); 534609a7474SAlexander Motin } 535609a7474SAlexander Motin 536609a7474SAlexander Motin void 53789b17223SAlexander Motin g_raid_report_disk_state(struct g_raid_disk *disk) 53889b17223SAlexander Motin { 53989b17223SAlexander Motin struct g_raid_subdisk *sd; 54089b17223SAlexander Motin int len, state; 54189b17223SAlexander Motin uint32_t s; 54289b17223SAlexander Motin 54389b17223SAlexander Motin if (disk->d_consumer == NULL) 54489b17223SAlexander Motin return; 54526c538bcSAlexander Motin if (disk->d_state == G_RAID_DISK_S_DISABLED) { 546b99586c2SAlexander Motin s = G_STATE_ACTIVE; /* XXX */ 54726c538bcSAlexander Motin } else if (disk->d_state == G_RAID_DISK_S_FAILED || 54889b17223SAlexander Motin disk->d_state == G_RAID_DISK_S_STALE_FAILED) { 54989b17223SAlexander Motin s = G_STATE_FAILED; 55089b17223SAlexander Motin } else { 55189b17223SAlexander Motin state = G_RAID_SUBDISK_S_ACTIVE; 55289b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 55389b17223SAlexander Motin if (sd->sd_state < state) 55489b17223SAlexander Motin state = sd->sd_state; 55589b17223SAlexander Motin } 55689b17223SAlexander Motin if (state == G_RAID_SUBDISK_S_FAILED) 55789b17223SAlexander Motin s = G_STATE_FAILED; 55889b17223SAlexander Motin else if (state == G_RAID_SUBDISK_S_NEW || 55989b17223SAlexander Motin state == G_RAID_SUBDISK_S_REBUILD) 56089b17223SAlexander Motin s = G_STATE_REBUILD; 56189b17223SAlexander Motin else if (state == G_RAID_SUBDISK_S_STALE || 56289b17223SAlexander Motin state == G_RAID_SUBDISK_S_RESYNC) 56389b17223SAlexander Motin s = G_STATE_RESYNC; 56489b17223SAlexander Motin else 56589b17223SAlexander Motin s = G_STATE_ACTIVE; 56689b17223SAlexander Motin } 56789b17223SAlexander Motin len = sizeof(s); 56889b17223SAlexander Motin g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s); 56989b17223SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.", 57089b17223SAlexander Motin g_raid_get_diskname(disk), s); 57189b17223SAlexander Motin } 57289b17223SAlexander Motin 57389b17223SAlexander Motin void 57489b17223SAlexander Motin g_raid_change_disk_state(struct g_raid_disk *disk, int state) 57589b17223SAlexander Motin { 57689b17223SAlexander Motin 57789b17223SAlexander Motin G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.", 57889b17223SAlexander Motin g_raid_get_diskname(disk), 57989b17223SAlexander Motin g_raid_disk_state2str(disk->d_state), 58089b17223SAlexander Motin g_raid_disk_state2str(state)); 58189b17223SAlexander Motin disk->d_state = state; 58289b17223SAlexander Motin g_raid_report_disk_state(disk); 58389b17223SAlexander Motin } 58489b17223SAlexander Motin 58589b17223SAlexander Motin void 58689b17223SAlexander Motin g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state) 58789b17223SAlexander Motin { 58889b17223SAlexander Motin 58989b17223SAlexander Motin G_RAID_DEBUG1(0, sd->sd_softc, 59089b17223SAlexander Motin "Subdisk %s:%d-%s state changed from %s to %s.", 59189b17223SAlexander Motin sd->sd_volume->v_name, sd->sd_pos, 59289b17223SAlexander Motin sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]", 59389b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state), 59489b17223SAlexander Motin g_raid_subdisk_state2str(state)); 59589b17223SAlexander Motin sd->sd_state = state; 59689b17223SAlexander Motin if (sd->sd_disk) 59789b17223SAlexander Motin g_raid_report_disk_state(sd->sd_disk); 59889b17223SAlexander Motin } 59989b17223SAlexander Motin 60089b17223SAlexander Motin void 60189b17223SAlexander Motin g_raid_change_volume_state(struct g_raid_volume *vol, int state) 60289b17223SAlexander Motin { 60389b17223SAlexander Motin 60489b17223SAlexander Motin G_RAID_DEBUG1(0, vol->v_softc, 60589b17223SAlexander Motin "Volume %s state changed from %s to %s.", 60689b17223SAlexander Motin vol->v_name, 60789b17223SAlexander Motin g_raid_volume_state2str(vol->v_state), 60889b17223SAlexander Motin g_raid_volume_state2str(state)); 60989b17223SAlexander Motin vol->v_state = state; 61089b17223SAlexander Motin } 61189b17223SAlexander Motin 61289b17223SAlexander Motin /* 61389b17223SAlexander Motin * --- Events handling functions --- 61489b17223SAlexander Motin * Events in geom_raid are used to maintain subdisks and volumes status 61589b17223SAlexander Motin * from one thread to simplify locking. 61689b17223SAlexander Motin */ 61789b17223SAlexander Motin static void 61889b17223SAlexander Motin g_raid_event_free(struct g_raid_event *ep) 61989b17223SAlexander Motin { 62089b17223SAlexander Motin 62189b17223SAlexander Motin free(ep, M_RAID); 62289b17223SAlexander Motin } 62389b17223SAlexander Motin 62489b17223SAlexander Motin int 62589b17223SAlexander Motin g_raid_event_send(void *arg, int event, int flags) 62689b17223SAlexander Motin { 62789b17223SAlexander Motin struct g_raid_softc *sc; 62889b17223SAlexander Motin struct g_raid_event *ep; 62989b17223SAlexander Motin int error; 63089b17223SAlexander Motin 63189b17223SAlexander Motin if ((flags & G_RAID_EVENT_VOLUME) != 0) { 63289b17223SAlexander Motin sc = ((struct g_raid_volume *)arg)->v_softc; 63389b17223SAlexander Motin } else if ((flags & G_RAID_EVENT_DISK) != 0) { 63489b17223SAlexander Motin sc = ((struct g_raid_disk *)arg)->d_softc; 63589b17223SAlexander Motin } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) { 63689b17223SAlexander Motin sc = ((struct g_raid_subdisk *)arg)->sd_softc; 63789b17223SAlexander Motin } else { 63889b17223SAlexander Motin sc = arg; 63989b17223SAlexander Motin } 64089b17223SAlexander Motin ep = malloc(sizeof(*ep), M_RAID, 64189b17223SAlexander Motin sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT); 64289b17223SAlexander Motin if (ep == NULL) 64389b17223SAlexander Motin return (ENOMEM); 64489b17223SAlexander Motin ep->e_tgt = arg; 64589b17223SAlexander Motin ep->e_event = event; 64689b17223SAlexander Motin ep->e_flags = flags; 64789b17223SAlexander Motin ep->e_error = 0; 64889b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc); 64989b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 65089b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 65189b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 65289b17223SAlexander Motin wakeup(sc); 65389b17223SAlexander Motin 65489b17223SAlexander Motin if ((flags & G_RAID_EVENT_WAIT) == 0) 65589b17223SAlexander Motin return (0); 65689b17223SAlexander Motin 65789b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 65889b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep); 65989b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 66089b17223SAlexander Motin while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) { 66189b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 66289b17223SAlexander Motin MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event", 66389b17223SAlexander Motin hz * 5); 66489b17223SAlexander Motin } 66589b17223SAlexander Motin error = ep->e_error; 66689b17223SAlexander Motin g_raid_event_free(ep); 66789b17223SAlexander Motin sx_xlock(&sc->sc_lock); 66889b17223SAlexander Motin return (error); 66989b17223SAlexander Motin } 67089b17223SAlexander Motin 67189b17223SAlexander Motin static void 67289b17223SAlexander Motin g_raid_event_cancel(struct g_raid_softc *sc, void *tgt) 67389b17223SAlexander Motin { 67489b17223SAlexander Motin struct g_raid_event *ep, *tmpep; 67589b17223SAlexander Motin 67689b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 67789b17223SAlexander Motin 67889b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 67989b17223SAlexander Motin TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 68089b17223SAlexander Motin if (ep->e_tgt != tgt) 68189b17223SAlexander Motin continue; 68289b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 68389b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) 68489b17223SAlexander Motin g_raid_event_free(ep); 68589b17223SAlexander Motin else { 68689b17223SAlexander Motin ep->e_error = ECANCELED; 68789b17223SAlexander Motin wakeup(ep); 68889b17223SAlexander Motin } 68989b17223SAlexander Motin } 69089b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 69189b17223SAlexander Motin } 69289b17223SAlexander Motin 69389b17223SAlexander Motin static int 69489b17223SAlexander Motin g_raid_event_check(struct g_raid_softc *sc, void *tgt) 69589b17223SAlexander Motin { 69689b17223SAlexander Motin struct g_raid_event *ep; 69789b17223SAlexander Motin int res = 0; 69889b17223SAlexander Motin 69989b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 70089b17223SAlexander Motin 70189b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 70289b17223SAlexander Motin TAILQ_FOREACH(ep, &sc->sc_events, e_next) { 70389b17223SAlexander Motin if (ep->e_tgt != tgt) 70489b17223SAlexander Motin continue; 70589b17223SAlexander Motin res = 1; 70689b17223SAlexander Motin break; 70789b17223SAlexander Motin } 70889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 70989b17223SAlexander Motin return (res); 71089b17223SAlexander Motin } 71189b17223SAlexander Motin 71289b17223SAlexander Motin /* 71389b17223SAlexander Motin * Return the number of disks in given state. 71489b17223SAlexander Motin * If state is equal to -1, count all connected disks. 71589b17223SAlexander Motin */ 71689b17223SAlexander Motin u_int 71789b17223SAlexander Motin g_raid_ndisks(struct g_raid_softc *sc, int state) 71889b17223SAlexander Motin { 71989b17223SAlexander Motin struct g_raid_disk *disk; 72089b17223SAlexander Motin u_int n; 72189b17223SAlexander Motin 72289b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 72389b17223SAlexander Motin 72489b17223SAlexander Motin n = 0; 72589b17223SAlexander Motin TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 72689b17223SAlexander Motin if (disk->d_state == state || state == -1) 72789b17223SAlexander Motin n++; 72889b17223SAlexander Motin } 72989b17223SAlexander Motin return (n); 73089b17223SAlexander Motin } 73189b17223SAlexander Motin 73289b17223SAlexander Motin /* 73389b17223SAlexander Motin * Return the number of subdisks in given state. 73489b17223SAlexander Motin * If state is equal to -1, count all connected disks. 73589b17223SAlexander Motin */ 73689b17223SAlexander Motin u_int 73789b17223SAlexander Motin g_raid_nsubdisks(struct g_raid_volume *vol, int state) 73889b17223SAlexander Motin { 73989b17223SAlexander Motin struct g_raid_subdisk *subdisk; 74089b17223SAlexander Motin struct g_raid_softc *sc; 74189b17223SAlexander Motin u_int i, n ; 74289b17223SAlexander Motin 74389b17223SAlexander Motin sc = vol->v_softc; 74489b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 74589b17223SAlexander Motin 74689b17223SAlexander Motin n = 0; 74789b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 74889b17223SAlexander Motin subdisk = &vol->v_subdisks[i]; 74989b17223SAlexander Motin if ((state == -1 && 75089b17223SAlexander Motin subdisk->sd_state != G_RAID_SUBDISK_S_NONE) || 75189b17223SAlexander Motin subdisk->sd_state == state) 75289b17223SAlexander Motin n++; 75389b17223SAlexander Motin } 75489b17223SAlexander Motin return (n); 75589b17223SAlexander Motin } 75689b17223SAlexander Motin 75789b17223SAlexander Motin /* 75889b17223SAlexander Motin * Return the first subdisk in given state. 75989b17223SAlexander Motin * If state is equal to -1, then the first connected disks. 76089b17223SAlexander Motin */ 76189b17223SAlexander Motin struct g_raid_subdisk * 76289b17223SAlexander Motin g_raid_get_subdisk(struct g_raid_volume *vol, int state) 76389b17223SAlexander Motin { 76489b17223SAlexander Motin struct g_raid_subdisk *sd; 76589b17223SAlexander Motin struct g_raid_softc *sc; 76689b17223SAlexander Motin u_int i; 76789b17223SAlexander Motin 76889b17223SAlexander Motin sc = vol->v_softc; 76989b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 77089b17223SAlexander Motin 77189b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 77289b17223SAlexander Motin sd = &vol->v_subdisks[i]; 77389b17223SAlexander Motin if ((state == -1 && 77489b17223SAlexander Motin sd->sd_state != G_RAID_SUBDISK_S_NONE) || 77589b17223SAlexander Motin sd->sd_state == state) 77689b17223SAlexander Motin return (sd); 77789b17223SAlexander Motin } 77889b17223SAlexander Motin return (NULL); 77989b17223SAlexander Motin } 78089b17223SAlexander Motin 78189b17223SAlexander Motin struct g_consumer * 78289b17223SAlexander Motin g_raid_open_consumer(struct g_raid_softc *sc, const char *name) 78389b17223SAlexander Motin { 78489b17223SAlexander Motin struct g_consumer *cp; 78589b17223SAlexander Motin struct g_provider *pp; 78689b17223SAlexander Motin 78789b17223SAlexander Motin g_topology_assert(); 78889b17223SAlexander Motin 78989b17223SAlexander Motin if (strncmp(name, "/dev/", 5) == 0) 79089b17223SAlexander Motin name += 5; 79189b17223SAlexander Motin pp = g_provider_by_name(name); 79289b17223SAlexander Motin if (pp == NULL) 79389b17223SAlexander Motin return (NULL); 79489b17223SAlexander Motin cp = g_new_consumer(sc->sc_geom); 79589b17223SAlexander Motin if (g_attach(cp, pp) != 0) { 79689b17223SAlexander Motin g_destroy_consumer(cp); 79789b17223SAlexander Motin return (NULL); 79889b17223SAlexander Motin } 79989b17223SAlexander Motin if (g_access(cp, 1, 1, 1) != 0) { 80089b17223SAlexander Motin g_detach(cp); 80189b17223SAlexander Motin g_destroy_consumer(cp); 80289b17223SAlexander Motin return (NULL); 80389b17223SAlexander Motin } 80489b17223SAlexander Motin return (cp); 80589b17223SAlexander Motin } 80689b17223SAlexander Motin 80789b17223SAlexander Motin static u_int 80889b17223SAlexander Motin g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp) 80989b17223SAlexander Motin { 81089b17223SAlexander Motin struct bio *bp; 81189b17223SAlexander Motin u_int nreqs = 0; 81289b17223SAlexander Motin 81389b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 81489b17223SAlexander Motin TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 81589b17223SAlexander Motin if (bp->bio_from == cp) 81689b17223SAlexander Motin nreqs++; 81789b17223SAlexander Motin } 81889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 81989b17223SAlexander Motin return (nreqs); 82089b17223SAlexander Motin } 82189b17223SAlexander Motin 82289b17223SAlexander Motin u_int 82389b17223SAlexander Motin g_raid_nopens(struct g_raid_softc *sc) 82489b17223SAlexander Motin { 82589b17223SAlexander Motin struct g_raid_volume *vol; 82689b17223SAlexander Motin u_int opens; 82789b17223SAlexander Motin 82889b17223SAlexander Motin opens = 0; 82989b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 83089b17223SAlexander Motin if (vol->v_provider_open != 0) 83189b17223SAlexander Motin opens++; 83289b17223SAlexander Motin } 83389b17223SAlexander Motin return (opens); 83489b17223SAlexander Motin } 83589b17223SAlexander Motin 83689b17223SAlexander Motin static int 83789b17223SAlexander Motin g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp) 83889b17223SAlexander Motin { 83989b17223SAlexander Motin 84089b17223SAlexander Motin if (cp->index > 0) { 84189b17223SAlexander Motin G_RAID_DEBUG1(2, sc, 84289b17223SAlexander Motin "I/O requests for %s exist, can't destroy it now.", 84389b17223SAlexander Motin cp->provider->name); 84489b17223SAlexander Motin return (1); 84589b17223SAlexander Motin } 84689b17223SAlexander Motin if (g_raid_nrequests(sc, cp) > 0) { 84789b17223SAlexander Motin G_RAID_DEBUG1(2, sc, 84889b17223SAlexander Motin "I/O requests for %s in queue, can't destroy it now.", 84989b17223SAlexander Motin cp->provider->name); 85089b17223SAlexander Motin return (1); 85189b17223SAlexander Motin } 85289b17223SAlexander Motin return (0); 85389b17223SAlexander Motin } 85489b17223SAlexander Motin 85589b17223SAlexander Motin static void 85689b17223SAlexander Motin g_raid_destroy_consumer(void *arg, int flags __unused) 85789b17223SAlexander Motin { 85889b17223SAlexander Motin struct g_consumer *cp; 85989b17223SAlexander Motin 86089b17223SAlexander Motin g_topology_assert(); 86189b17223SAlexander Motin 86289b17223SAlexander Motin cp = arg; 86389b17223SAlexander Motin G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 86489b17223SAlexander Motin g_detach(cp); 86589b17223SAlexander Motin g_destroy_consumer(cp); 86689b17223SAlexander Motin } 86789b17223SAlexander Motin 86889b17223SAlexander Motin void 86989b17223SAlexander Motin g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp) 87089b17223SAlexander Motin { 87189b17223SAlexander Motin struct g_provider *pp; 87289b17223SAlexander Motin int retaste_wait; 87389b17223SAlexander Motin 87489b17223SAlexander Motin g_topology_assert_not(); 87589b17223SAlexander Motin 87689b17223SAlexander Motin g_topology_lock(); 87789b17223SAlexander Motin cp->private = NULL; 87889b17223SAlexander Motin if (g_raid_consumer_is_busy(sc, cp)) 87989b17223SAlexander Motin goto out; 88089b17223SAlexander Motin pp = cp->provider; 88189b17223SAlexander Motin retaste_wait = 0; 88289b17223SAlexander Motin if (cp->acw == 1) { 88389b17223SAlexander Motin if ((pp->geom->flags & G_GEOM_WITHER) == 0) 88489b17223SAlexander Motin retaste_wait = 1; 88589b17223SAlexander Motin } 88689b17223SAlexander Motin if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 88789b17223SAlexander Motin g_access(cp, -cp->acr, -cp->acw, -cp->ace); 88889b17223SAlexander Motin if (retaste_wait) { 88989b17223SAlexander Motin /* 89089b17223SAlexander Motin * After retaste event was send (inside g_access()), we can send 89189b17223SAlexander Motin * event to detach and destroy consumer. 89289b17223SAlexander Motin * A class, which has consumer to the given provider connected 89389b17223SAlexander Motin * will not receive retaste event for the provider. 89489b17223SAlexander Motin * This is the way how I ignore retaste events when I close 89589b17223SAlexander Motin * consumers opened for write: I detach and destroy consumer 89689b17223SAlexander Motin * after retaste event is sent. 89789b17223SAlexander Motin */ 89889b17223SAlexander Motin g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL); 89989b17223SAlexander Motin goto out; 90089b17223SAlexander Motin } 90189b17223SAlexander Motin G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name); 90289b17223SAlexander Motin g_detach(cp); 90389b17223SAlexander Motin g_destroy_consumer(cp); 90489b17223SAlexander Motin out: 90589b17223SAlexander Motin g_topology_unlock(); 90689b17223SAlexander Motin } 90789b17223SAlexander Motin 90889b17223SAlexander Motin static void 90989b17223SAlexander Motin g_raid_orphan(struct g_consumer *cp) 91089b17223SAlexander Motin { 91189b17223SAlexander Motin struct g_raid_disk *disk; 91289b17223SAlexander Motin 91389b17223SAlexander Motin g_topology_assert(); 91489b17223SAlexander Motin 91589b17223SAlexander Motin disk = cp->private; 91689b17223SAlexander Motin if (disk == NULL) 91789b17223SAlexander Motin return; 91889b17223SAlexander Motin g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED, 91989b17223SAlexander Motin G_RAID_EVENT_DISK); 92089b17223SAlexander Motin } 92189b17223SAlexander Motin 922a479c51bSAlexander Motin static void 92389b17223SAlexander Motin g_raid_clean(struct g_raid_volume *vol, int acw) 92489b17223SAlexander Motin { 92589b17223SAlexander Motin struct g_raid_softc *sc; 92689b17223SAlexander Motin int timeout; 92789b17223SAlexander Motin 92889b17223SAlexander Motin sc = vol->v_softc; 92989b17223SAlexander Motin g_topology_assert_not(); 93089b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 93189b17223SAlexander Motin 93289b17223SAlexander Motin // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0) 933a479c51bSAlexander Motin // return; 93489b17223SAlexander Motin if (!vol->v_dirty) 935a479c51bSAlexander Motin return; 93689b17223SAlexander Motin if (vol->v_writes > 0) 937a479c51bSAlexander Motin return; 93889b17223SAlexander Motin if (acw > 0 || (acw == -1 && 93989b17223SAlexander Motin vol->v_provider != NULL && vol->v_provider->acw > 0)) { 94089b17223SAlexander Motin timeout = g_raid_clean_time - (time_uptime - vol->v_last_write); 941a479c51bSAlexander Motin if (!g_raid_shutdown && timeout > 0) 942a479c51bSAlexander Motin return; 94389b17223SAlexander Motin } 94489b17223SAlexander Motin vol->v_dirty = 0; 94589b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.", 94689b17223SAlexander Motin vol->v_name); 94789b17223SAlexander Motin g_raid_write_metadata(sc, vol, NULL, NULL); 94889b17223SAlexander Motin } 94989b17223SAlexander Motin 95089b17223SAlexander Motin static void 95189b17223SAlexander Motin g_raid_dirty(struct g_raid_volume *vol) 95289b17223SAlexander Motin { 95389b17223SAlexander Motin struct g_raid_softc *sc; 95489b17223SAlexander Motin 95589b17223SAlexander Motin sc = vol->v_softc; 95689b17223SAlexander Motin g_topology_assert_not(); 95789b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 95889b17223SAlexander Motin 95989b17223SAlexander Motin // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0) 96089b17223SAlexander Motin // return; 96189b17223SAlexander Motin vol->v_dirty = 1; 96289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.", 96389b17223SAlexander Motin vol->v_name); 96489b17223SAlexander Motin g_raid_write_metadata(sc, vol, NULL, NULL); 96589b17223SAlexander Motin } 96689b17223SAlexander Motin 96789b17223SAlexander Motin void 96889b17223SAlexander Motin g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp) 96989b17223SAlexander Motin { 97089b17223SAlexander Motin struct g_raid_softc *sc; 97189b17223SAlexander Motin struct g_raid_volume *vol; 97289b17223SAlexander Motin struct g_raid_subdisk *sd; 97389b17223SAlexander Motin struct bio_queue_head queue; 97489b17223SAlexander Motin struct bio *cbp; 97589b17223SAlexander Motin int i; 97689b17223SAlexander Motin 97789b17223SAlexander Motin vol = tr->tro_volume; 97889b17223SAlexander Motin sc = vol->v_softc; 97989b17223SAlexander Motin 98089b17223SAlexander Motin /* 98189b17223SAlexander Motin * Allocate all bios before sending any request, so we can return 98289b17223SAlexander Motin * ENOMEM in nice and clean way. 98389b17223SAlexander Motin */ 98489b17223SAlexander Motin bioq_init(&queue); 98589b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 98689b17223SAlexander Motin sd = &vol->v_subdisks[i]; 98789b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_NONE || 98889b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_FAILED) 98989b17223SAlexander Motin continue; 99089b17223SAlexander Motin cbp = g_clone_bio(bp); 99189b17223SAlexander Motin if (cbp == NULL) 99289b17223SAlexander Motin goto failure; 99389b17223SAlexander Motin cbp->bio_caller1 = sd; 99489b17223SAlexander Motin bioq_insert_tail(&queue, cbp); 99589b17223SAlexander Motin } 99689b17223SAlexander Motin for (cbp = bioq_first(&queue); cbp != NULL; 99789b17223SAlexander Motin cbp = bioq_first(&queue)) { 99889b17223SAlexander Motin bioq_remove(&queue, cbp); 99989b17223SAlexander Motin sd = cbp->bio_caller1; 100089b17223SAlexander Motin cbp->bio_caller1 = NULL; 100189b17223SAlexander Motin g_raid_subdisk_iostart(sd, cbp); 100289b17223SAlexander Motin } 100389b17223SAlexander Motin return; 100489b17223SAlexander Motin failure: 100589b17223SAlexander Motin for (cbp = bioq_first(&queue); cbp != NULL; 100689b17223SAlexander Motin cbp = bioq_first(&queue)) { 100789b17223SAlexander Motin bioq_remove(&queue, cbp); 100889b17223SAlexander Motin g_destroy_bio(cbp); 100989b17223SAlexander Motin } 101089b17223SAlexander Motin if (bp->bio_error == 0) 101189b17223SAlexander Motin bp->bio_error = ENOMEM; 101289b17223SAlexander Motin g_raid_iodone(bp, bp->bio_error); 101389b17223SAlexander Motin } 101489b17223SAlexander Motin 101589b17223SAlexander Motin static void 101689b17223SAlexander Motin g_raid_tr_kerneldump_common_done(struct bio *bp) 101789b17223SAlexander Motin { 101889b17223SAlexander Motin 101989b17223SAlexander Motin bp->bio_flags |= BIO_DONE; 102089b17223SAlexander Motin } 102189b17223SAlexander Motin 102289b17223SAlexander Motin int 102389b17223SAlexander Motin g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr, 102489b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 102589b17223SAlexander Motin { 102689b17223SAlexander Motin struct g_raid_softc *sc; 102789b17223SAlexander Motin struct g_raid_volume *vol; 102889b17223SAlexander Motin struct bio bp; 102989b17223SAlexander Motin 103089b17223SAlexander Motin vol = tr->tro_volume; 103189b17223SAlexander Motin sc = vol->v_softc; 103289b17223SAlexander Motin 103389b17223SAlexander Motin bzero(&bp, sizeof(bp)); 103489b17223SAlexander Motin bp.bio_cmd = BIO_WRITE; 103589b17223SAlexander Motin bp.bio_done = g_raid_tr_kerneldump_common_done; 103689b17223SAlexander Motin bp.bio_attribute = NULL; 103789b17223SAlexander Motin bp.bio_offset = offset; 103889b17223SAlexander Motin bp.bio_length = length; 103989b17223SAlexander Motin bp.bio_data = virtual; 104089b17223SAlexander Motin bp.bio_to = vol->v_provider; 104189b17223SAlexander Motin 104289b17223SAlexander Motin g_raid_start(&bp); 104389b17223SAlexander Motin while (!(bp.bio_flags & BIO_DONE)) { 104489b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Poll..."); 104589b17223SAlexander Motin g_raid_poll(sc); 104689b17223SAlexander Motin DELAY(10); 104789b17223SAlexander Motin } 104889b17223SAlexander Motin 104989b17223SAlexander Motin return (bp.bio_error != 0 ? EIO : 0); 105089b17223SAlexander Motin } 105189b17223SAlexander Motin 105289b17223SAlexander Motin static int 105389b17223SAlexander Motin g_raid_dump(void *arg, 105489b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 105589b17223SAlexander Motin { 105689b17223SAlexander Motin struct g_raid_volume *vol; 105789b17223SAlexander Motin int error; 105889b17223SAlexander Motin 105989b17223SAlexander Motin vol = (struct g_raid_volume *)arg; 106089b17223SAlexander Motin G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.", 106189b17223SAlexander Motin (long long unsigned)offset, (long long unsigned)length); 106289b17223SAlexander Motin 106389b17223SAlexander Motin error = G_RAID_TR_KERNELDUMP(vol->v_tr, 106489b17223SAlexander Motin virtual, physical, offset, length); 106589b17223SAlexander Motin return (error); 106689b17223SAlexander Motin } 106789b17223SAlexander Motin 106889b17223SAlexander Motin static void 106989b17223SAlexander Motin g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp) 107089b17223SAlexander Motin { 107189b17223SAlexander Motin struct g_kerneldump *gkd; 107289b17223SAlexander Motin struct g_provider *pp; 107389b17223SAlexander Motin struct g_raid_volume *vol; 107489b17223SAlexander Motin 107589b17223SAlexander Motin gkd = (struct g_kerneldump*)bp->bio_data; 107689b17223SAlexander Motin pp = bp->bio_to; 107789b17223SAlexander Motin vol = pp->private; 107889b17223SAlexander Motin g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)", 107989b17223SAlexander Motin pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); 108089b17223SAlexander Motin gkd->di.dumper = g_raid_dump; 108189b17223SAlexander Motin gkd->di.priv = vol; 108289b17223SAlexander Motin gkd->di.blocksize = vol->v_sectorsize; 108389b17223SAlexander Motin gkd->di.maxiosize = DFLTPHYS; 108489b17223SAlexander Motin gkd->di.mediaoffset = gkd->offset; 108589b17223SAlexander Motin if ((gkd->offset + gkd->length) > vol->v_mediasize) 108689b17223SAlexander Motin gkd->length = vol->v_mediasize - gkd->offset; 108789b17223SAlexander Motin gkd->di.mediasize = gkd->length; 108889b17223SAlexander Motin g_io_deliver(bp, 0); 108989b17223SAlexander Motin } 109089b17223SAlexander Motin 109189b17223SAlexander Motin static void 1092609a7474SAlexander Motin g_raid_candelete(struct g_raid_softc *sc, struct bio *bp) 1093609a7474SAlexander Motin { 1094609a7474SAlexander Motin struct g_provider *pp; 1095609a7474SAlexander Motin struct g_raid_volume *vol; 1096609a7474SAlexander Motin struct g_raid_subdisk *sd; 1097609a7474SAlexander Motin int *val; 1098609a7474SAlexander Motin int i; 1099609a7474SAlexander Motin 1100609a7474SAlexander Motin val = (int *)bp->bio_data; 1101609a7474SAlexander Motin pp = bp->bio_to; 1102609a7474SAlexander Motin vol = pp->private; 1103609a7474SAlexander Motin *val = 0; 1104609a7474SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 1105609a7474SAlexander Motin sd = &vol->v_subdisks[i]; 1106609a7474SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_NONE) 1107609a7474SAlexander Motin continue; 1108609a7474SAlexander Motin if (sd->sd_disk->d_candelete) { 1109609a7474SAlexander Motin *val = 1; 1110609a7474SAlexander Motin break; 1111609a7474SAlexander Motin } 1112609a7474SAlexander Motin } 1113609a7474SAlexander Motin g_io_deliver(bp, 0); 1114609a7474SAlexander Motin } 1115609a7474SAlexander Motin 1116609a7474SAlexander Motin static void 111789b17223SAlexander Motin g_raid_start(struct bio *bp) 111889b17223SAlexander Motin { 111989b17223SAlexander Motin struct g_raid_softc *sc; 112089b17223SAlexander Motin 112189b17223SAlexander Motin sc = bp->bio_to->geom->softc; 112289b17223SAlexander Motin /* 112389b17223SAlexander Motin * If sc == NULL or there are no valid disks, provider's error 112489b17223SAlexander Motin * should be set and g_raid_start() should not be called at all. 112589b17223SAlexander Motin */ 112689b17223SAlexander Motin // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING, 112789b17223SAlexander Motin // ("Provider's error should be set (error=%d)(mirror=%s).", 112889b17223SAlexander Motin // bp->bio_to->error, bp->bio_to->name)); 112989b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Request received."); 113089b17223SAlexander Motin 113189b17223SAlexander Motin switch (bp->bio_cmd) { 113289b17223SAlexander Motin case BIO_READ: 113389b17223SAlexander Motin case BIO_WRITE: 113489b17223SAlexander Motin case BIO_DELETE: 113589b17223SAlexander Motin case BIO_FLUSH: 113689b17223SAlexander Motin break; 113789b17223SAlexander Motin case BIO_GETATTR: 1138609a7474SAlexander Motin if (!strcmp(bp->bio_attribute, "GEOM::candelete")) 1139609a7474SAlexander Motin g_raid_candelete(sc, bp); 1140609a7474SAlexander Motin else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) 114189b17223SAlexander Motin g_raid_kerneldump(sc, bp); 114289b17223SAlexander Motin else 114389b17223SAlexander Motin g_io_deliver(bp, EOPNOTSUPP); 114489b17223SAlexander Motin return; 114589b17223SAlexander Motin default: 114689b17223SAlexander Motin g_io_deliver(bp, EOPNOTSUPP); 114789b17223SAlexander Motin return; 114889b17223SAlexander Motin } 114989b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 115089b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 115189b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 115289b17223SAlexander Motin if (!dumping) { 115389b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Waking up %p.", sc); 115489b17223SAlexander Motin wakeup(sc); 115589b17223SAlexander Motin } 115689b17223SAlexander Motin } 115789b17223SAlexander Motin 115889b17223SAlexander Motin static int 115989b17223SAlexander Motin g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len) 116089b17223SAlexander Motin { 116189b17223SAlexander Motin /* 116289b17223SAlexander Motin * 5 cases: 116389b17223SAlexander Motin * (1) bp entirely below NO 116489b17223SAlexander Motin * (2) bp entirely above NO 116589b17223SAlexander Motin * (3) bp start below, but end in range YES 116689b17223SAlexander Motin * (4) bp entirely within YES 116789b17223SAlexander Motin * (5) bp starts within, ends above YES 116889b17223SAlexander Motin * 116989b17223SAlexander Motin * lock range 10-19 (offset 10 length 10) 117089b17223SAlexander Motin * (1) 1-5: first if kicks it out 117189b17223SAlexander Motin * (2) 30-35: second if kicks it out 117289b17223SAlexander Motin * (3) 5-15: passes both ifs 117389b17223SAlexander Motin * (4) 12-14: passes both ifs 117489b17223SAlexander Motin * (5) 19-20: passes both 117589b17223SAlexander Motin */ 117689b17223SAlexander Motin off_t lend = lstart + len - 1; 117789b17223SAlexander Motin off_t bstart = bp->bio_offset; 117889b17223SAlexander Motin off_t bend = bp->bio_offset + bp->bio_length - 1; 117989b17223SAlexander Motin 118089b17223SAlexander Motin if (bend < lstart) 118189b17223SAlexander Motin return (0); 118289b17223SAlexander Motin if (lend < bstart) 118389b17223SAlexander Motin return (0); 118489b17223SAlexander Motin return (1); 118589b17223SAlexander Motin } 118689b17223SAlexander Motin 118789b17223SAlexander Motin static int 118889b17223SAlexander Motin g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp) 118989b17223SAlexander Motin { 119089b17223SAlexander Motin struct g_raid_lock *lp; 119189b17223SAlexander Motin 119289b17223SAlexander Motin sx_assert(&vol->v_softc->sc_lock, SX_LOCKED); 119389b17223SAlexander Motin 119489b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 119589b17223SAlexander Motin if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length)) 119689b17223SAlexander Motin return (1); 119789b17223SAlexander Motin } 119889b17223SAlexander Motin return (0); 119989b17223SAlexander Motin } 120089b17223SAlexander Motin 120189b17223SAlexander Motin static void 120289b17223SAlexander Motin g_raid_start_request(struct bio *bp) 120389b17223SAlexander Motin { 120489b17223SAlexander Motin struct g_raid_softc *sc; 120589b17223SAlexander Motin struct g_raid_volume *vol; 120689b17223SAlexander Motin 120789b17223SAlexander Motin sc = bp->bio_to->geom->softc; 120889b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 120989b17223SAlexander Motin vol = bp->bio_to->private; 121089b17223SAlexander Motin 121189b17223SAlexander Motin /* 121289b17223SAlexander Motin * Check to see if this item is in a locked range. If so, 121389b17223SAlexander Motin * queue it to our locked queue and return. We'll requeue 121489b17223SAlexander Motin * it when the range is unlocked. Internal I/O for the 121589b17223SAlexander Motin * rebuild/rescan/recovery process is excluded from this 121689b17223SAlexander Motin * check so we can actually do the recovery. 121789b17223SAlexander Motin */ 121889b17223SAlexander Motin if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) && 121989b17223SAlexander Motin g_raid_is_in_locked_range(vol, bp)) { 122089b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Defer request."); 122189b17223SAlexander Motin bioq_insert_tail(&vol->v_locked, bp); 122289b17223SAlexander Motin return; 122389b17223SAlexander Motin } 122489b17223SAlexander Motin 122589b17223SAlexander Motin /* 122689b17223SAlexander Motin * If we're actually going to do the write/delete, then 122789b17223SAlexander Motin * update the idle stats for the volume. 122889b17223SAlexander Motin */ 122989b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) { 123089b17223SAlexander Motin if (!vol->v_dirty) 123189b17223SAlexander Motin g_raid_dirty(vol); 123289b17223SAlexander Motin vol->v_writes++; 123389b17223SAlexander Motin } 123489b17223SAlexander Motin 123589b17223SAlexander Motin /* 123689b17223SAlexander Motin * Put request onto inflight queue, so we can check if new 123789b17223SAlexander Motin * synchronization requests don't collide with it. Then tell 123889b17223SAlexander Motin * the transformation layer to start the I/O. 123989b17223SAlexander Motin */ 124089b17223SAlexander Motin bioq_insert_tail(&vol->v_inflight, bp); 124189b17223SAlexander Motin G_RAID_LOGREQ(4, bp, "Request started"); 124289b17223SAlexander Motin G_RAID_TR_IOSTART(vol->v_tr, bp); 124389b17223SAlexander Motin } 124489b17223SAlexander Motin 124589b17223SAlexander Motin static void 124689b17223SAlexander Motin g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp) 124789b17223SAlexander Motin { 124889b17223SAlexander Motin off_t off, len; 124989b17223SAlexander Motin struct bio *nbp; 125089b17223SAlexander Motin struct g_raid_lock *lp; 125189b17223SAlexander Motin 125289b17223SAlexander Motin vol->v_pending_lock = 0; 125389b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 125489b17223SAlexander Motin if (lp->l_pending) { 125589b17223SAlexander Motin off = lp->l_offset; 125689b17223SAlexander Motin len = lp->l_length; 125789b17223SAlexander Motin lp->l_pending = 0; 125889b17223SAlexander Motin TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) { 125989b17223SAlexander Motin if (g_raid_bio_overlaps(nbp, off, len)) 126089b17223SAlexander Motin lp->l_pending++; 126189b17223SAlexander Motin } 126289b17223SAlexander Motin if (lp->l_pending) { 126389b17223SAlexander Motin vol->v_pending_lock = 1; 126489b17223SAlexander Motin G_RAID_DEBUG1(4, vol->v_softc, 126589b17223SAlexander Motin "Deferred lock(%jd, %jd) has %d pending", 126689b17223SAlexander Motin (intmax_t)off, (intmax_t)(off + len), 126789b17223SAlexander Motin lp->l_pending); 126889b17223SAlexander Motin continue; 126989b17223SAlexander Motin } 127089b17223SAlexander Motin G_RAID_DEBUG1(4, vol->v_softc, 127189b17223SAlexander Motin "Deferred lock of %jd to %jd completed", 127289b17223SAlexander Motin (intmax_t)off, (intmax_t)(off + len)); 127389b17223SAlexander Motin G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg); 127489b17223SAlexander Motin } 127589b17223SAlexander Motin } 127689b17223SAlexander Motin } 127789b17223SAlexander Motin 127889b17223SAlexander Motin void 127989b17223SAlexander Motin g_raid_iodone(struct bio *bp, int error) 128089b17223SAlexander Motin { 128189b17223SAlexander Motin struct g_raid_softc *sc; 128289b17223SAlexander Motin struct g_raid_volume *vol; 128389b17223SAlexander Motin 128489b17223SAlexander Motin sc = bp->bio_to->geom->softc; 128589b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 128689b17223SAlexander Motin vol = bp->bio_to->private; 128789b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Request done: %d.", error); 128889b17223SAlexander Motin 128989b17223SAlexander Motin /* Update stats if we done write/delete. */ 129089b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) { 129189b17223SAlexander Motin vol->v_writes--; 129289b17223SAlexander Motin vol->v_last_write = time_uptime; 129389b17223SAlexander Motin } 129489b17223SAlexander Motin 129589b17223SAlexander Motin bioq_remove(&vol->v_inflight, bp); 129689b17223SAlexander Motin if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp)) 129789b17223SAlexander Motin g_raid_finish_with_locked_ranges(vol, bp); 129889b17223SAlexander Motin getmicrouptime(&vol->v_last_done); 129989b17223SAlexander Motin g_io_deliver(bp, error); 130089b17223SAlexander Motin } 130189b17223SAlexander Motin 130289b17223SAlexander Motin int 130389b17223SAlexander Motin g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len, 130489b17223SAlexander Motin struct bio *ignore, void *argp) 130589b17223SAlexander Motin { 130689b17223SAlexander Motin struct g_raid_softc *sc; 130789b17223SAlexander Motin struct g_raid_lock *lp; 130889b17223SAlexander Motin struct bio *bp; 130989b17223SAlexander Motin 131089b17223SAlexander Motin sc = vol->v_softc; 131189b17223SAlexander Motin lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO); 131289b17223SAlexander Motin LIST_INSERT_HEAD(&vol->v_locks, lp, l_next); 131389b17223SAlexander Motin lp->l_offset = off; 131489b17223SAlexander Motin lp->l_length = len; 131589b17223SAlexander Motin lp->l_callback_arg = argp; 131689b17223SAlexander Motin 131789b17223SAlexander Motin lp->l_pending = 0; 131889b17223SAlexander Motin TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) { 131989b17223SAlexander Motin if (bp != ignore && g_raid_bio_overlaps(bp, off, len)) 132089b17223SAlexander Motin lp->l_pending++; 132189b17223SAlexander Motin } 132289b17223SAlexander Motin 132389b17223SAlexander Motin /* 132489b17223SAlexander Motin * If there are any writes that are pending, we return EBUSY. All 132589b17223SAlexander Motin * callers will have to wait until all pending writes clear. 132689b17223SAlexander Motin */ 132789b17223SAlexander Motin if (lp->l_pending > 0) { 132889b17223SAlexander Motin vol->v_pending_lock = 1; 132989b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend", 133089b17223SAlexander Motin (intmax_t)off, (intmax_t)(off+len), lp->l_pending); 133189b17223SAlexander Motin return (EBUSY); 133289b17223SAlexander Motin } 133389b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd", 133489b17223SAlexander Motin (intmax_t)off, (intmax_t)(off+len)); 133589b17223SAlexander Motin G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg); 133689b17223SAlexander Motin return (0); 133789b17223SAlexander Motin } 133889b17223SAlexander Motin 133989b17223SAlexander Motin int 134089b17223SAlexander Motin g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len) 134189b17223SAlexander Motin { 134289b17223SAlexander Motin struct g_raid_lock *lp; 134389b17223SAlexander Motin struct g_raid_softc *sc; 134489b17223SAlexander Motin struct bio *bp; 134589b17223SAlexander Motin 134689b17223SAlexander Motin sc = vol->v_softc; 134789b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 134889b17223SAlexander Motin if (lp->l_offset == off && lp->l_length == len) { 134989b17223SAlexander Motin LIST_REMOVE(lp, l_next); 135089b17223SAlexander Motin /* XXX 135189b17223SAlexander Motin * Right now we just put them all back on the queue 135289b17223SAlexander Motin * and hope for the best. We hope this because any 135389b17223SAlexander Motin * locked ranges will go right back on this list 135489b17223SAlexander Motin * when the worker thread runs. 135589b17223SAlexander Motin * XXX 135689b17223SAlexander Motin */ 135789b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd", 135889b17223SAlexander Motin (intmax_t)lp->l_offset, 135989b17223SAlexander Motin (intmax_t)(lp->l_offset+lp->l_length)); 136089b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 136189b17223SAlexander Motin while ((bp = bioq_takefirst(&vol->v_locked)) != NULL) 136289b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 136389b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 136489b17223SAlexander Motin free(lp, M_RAID); 136589b17223SAlexander Motin return (0); 136689b17223SAlexander Motin } 136789b17223SAlexander Motin } 136889b17223SAlexander Motin return (EINVAL); 136989b17223SAlexander Motin } 137089b17223SAlexander Motin 137189b17223SAlexander Motin void 137289b17223SAlexander Motin g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp) 137389b17223SAlexander Motin { 137489b17223SAlexander Motin struct g_consumer *cp; 137589b17223SAlexander Motin struct g_raid_disk *disk, *tdisk; 137689b17223SAlexander Motin 137789b17223SAlexander Motin bp->bio_caller1 = sd; 137889b17223SAlexander Motin 137989b17223SAlexander Motin /* 138089b17223SAlexander Motin * Make sure that the disk is present. Generally it is a task of 138189b17223SAlexander Motin * transformation layers to not send requests to absent disks, but 138289b17223SAlexander Motin * it is better to be safe and report situation then sorry. 138389b17223SAlexander Motin */ 138489b17223SAlexander Motin if (sd->sd_disk == NULL) { 138589b17223SAlexander Motin G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!"); 138689b17223SAlexander Motin nodisk: 138789b17223SAlexander Motin bp->bio_from = NULL; 138889b17223SAlexander Motin bp->bio_to = NULL; 138989b17223SAlexander Motin bp->bio_error = ENXIO; 139089b17223SAlexander Motin g_raid_disk_done(bp); 139189b17223SAlexander Motin return; 139289b17223SAlexander Motin } 139389b17223SAlexander Motin disk = sd->sd_disk; 139489b17223SAlexander Motin if (disk->d_state != G_RAID_DISK_S_ACTIVE && 139589b17223SAlexander Motin disk->d_state != G_RAID_DISK_S_FAILED) { 139689b17223SAlexander Motin G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a " 139789b17223SAlexander Motin "wrong state (%s)!", g_raid_disk_state2str(disk->d_state)); 139889b17223SAlexander Motin goto nodisk; 139989b17223SAlexander Motin } 140089b17223SAlexander Motin 140189b17223SAlexander Motin cp = disk->d_consumer; 140289b17223SAlexander Motin bp->bio_from = cp; 140389b17223SAlexander Motin bp->bio_to = cp->provider; 140489b17223SAlexander Motin cp->index++; 140589b17223SAlexander Motin 140689b17223SAlexander Motin /* Update average disks load. */ 140789b17223SAlexander Motin TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) { 140889b17223SAlexander Motin if (tdisk->d_consumer == NULL) 140989b17223SAlexander Motin tdisk->d_load = 0; 141089b17223SAlexander Motin else 141189b17223SAlexander Motin tdisk->d_load = (tdisk->d_consumer->index * 141289b17223SAlexander Motin G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8; 141389b17223SAlexander Motin } 141489b17223SAlexander Motin 141589b17223SAlexander Motin disk->d_last_offset = bp->bio_offset + bp->bio_length; 141689b17223SAlexander Motin if (dumping) { 141789b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Sending dumping request."); 141889b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE) { 141989b17223SAlexander Motin bp->bio_error = g_raid_subdisk_kerneldump(sd, 142089b17223SAlexander Motin bp->bio_data, 0, bp->bio_offset, bp->bio_length); 142189b17223SAlexander Motin } else 142289b17223SAlexander Motin bp->bio_error = EOPNOTSUPP; 142389b17223SAlexander Motin g_raid_disk_done(bp); 142489b17223SAlexander Motin } else { 142589b17223SAlexander Motin bp->bio_done = g_raid_disk_done; 142689b17223SAlexander Motin bp->bio_offset += sd->sd_offset; 142789b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Sending request."); 142889b17223SAlexander Motin g_io_request(bp, cp); 142989b17223SAlexander Motin } 143089b17223SAlexander Motin } 143189b17223SAlexander Motin 143289b17223SAlexander Motin int 143389b17223SAlexander Motin g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd, 143489b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 143589b17223SAlexander Motin { 143689b17223SAlexander Motin 143789b17223SAlexander Motin if (sd->sd_disk == NULL) 143889b17223SAlexander Motin return (ENXIO); 143989b17223SAlexander Motin if (sd->sd_disk->d_kd.di.dumper == NULL) 144089b17223SAlexander Motin return (EOPNOTSUPP); 144189b17223SAlexander Motin return (dump_write(&sd->sd_disk->d_kd.di, 144289b17223SAlexander Motin virtual, physical, 144389b17223SAlexander Motin sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset, 144489b17223SAlexander Motin length)); 144589b17223SAlexander Motin } 144689b17223SAlexander Motin 144789b17223SAlexander Motin static void 144889b17223SAlexander Motin g_raid_disk_done(struct bio *bp) 144989b17223SAlexander Motin { 145089b17223SAlexander Motin struct g_raid_softc *sc; 145189b17223SAlexander Motin struct g_raid_subdisk *sd; 145289b17223SAlexander Motin 145389b17223SAlexander Motin sd = bp->bio_caller1; 145489b17223SAlexander Motin sc = sd->sd_softc; 145589b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 145689b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 145789b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 145889b17223SAlexander Motin if (!dumping) 145989b17223SAlexander Motin wakeup(sc); 146089b17223SAlexander Motin } 146189b17223SAlexander Motin 146289b17223SAlexander Motin static void 146389b17223SAlexander Motin g_raid_disk_done_request(struct bio *bp) 146489b17223SAlexander Motin { 146589b17223SAlexander Motin struct g_raid_softc *sc; 146689b17223SAlexander Motin struct g_raid_disk *disk; 146789b17223SAlexander Motin struct g_raid_subdisk *sd; 146889b17223SAlexander Motin struct g_raid_volume *vol; 146989b17223SAlexander Motin 147089b17223SAlexander Motin g_topology_assert_not(); 147189b17223SAlexander Motin 147289b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error); 147389b17223SAlexander Motin sd = bp->bio_caller1; 147489b17223SAlexander Motin sc = sd->sd_softc; 147589b17223SAlexander Motin vol = sd->sd_volume; 147689b17223SAlexander Motin if (bp->bio_from != NULL) { 147789b17223SAlexander Motin bp->bio_from->index--; 147889b17223SAlexander Motin disk = bp->bio_from->private; 147989b17223SAlexander Motin if (disk == NULL) 148089b17223SAlexander Motin g_raid_kill_consumer(sc, bp->bio_from); 148189b17223SAlexander Motin } 148289b17223SAlexander Motin bp->bio_offset -= sd->sd_offset; 148389b17223SAlexander Motin 148489b17223SAlexander Motin G_RAID_TR_IODONE(vol->v_tr, sd, bp); 148589b17223SAlexander Motin } 148689b17223SAlexander Motin 148789b17223SAlexander Motin static void 148889b17223SAlexander Motin g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep) 148989b17223SAlexander Motin { 149089b17223SAlexander Motin 149189b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0) 149289b17223SAlexander Motin ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event); 149389b17223SAlexander Motin else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0) 149489b17223SAlexander Motin ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event); 149589b17223SAlexander Motin else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0) 149689b17223SAlexander Motin ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event); 149789b17223SAlexander Motin else 149889b17223SAlexander Motin ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event); 149989b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) { 150089b17223SAlexander Motin KASSERT(ep->e_error == 0, 150189b17223SAlexander Motin ("Error cannot be handled.")); 150289b17223SAlexander Motin g_raid_event_free(ep); 150389b17223SAlexander Motin } else { 150489b17223SAlexander Motin ep->e_flags |= G_RAID_EVENT_DONE; 150589b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Waking up %p.", ep); 150689b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 150789b17223SAlexander Motin wakeup(ep); 150889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 150989b17223SAlexander Motin } 151089b17223SAlexander Motin } 151189b17223SAlexander Motin 151289b17223SAlexander Motin /* 151389b17223SAlexander Motin * Worker thread. 151489b17223SAlexander Motin */ 151589b17223SAlexander Motin static void 151689b17223SAlexander Motin g_raid_worker(void *arg) 151789b17223SAlexander Motin { 151889b17223SAlexander Motin struct g_raid_softc *sc; 151989b17223SAlexander Motin struct g_raid_event *ep; 152089b17223SAlexander Motin struct g_raid_volume *vol; 152189b17223SAlexander Motin struct bio *bp; 152289b17223SAlexander Motin struct timeval now, t; 152389b17223SAlexander Motin int timeout, rv; 152489b17223SAlexander Motin 152589b17223SAlexander Motin sc = arg; 152689b17223SAlexander Motin thread_lock(curthread); 152789b17223SAlexander Motin sched_prio(curthread, PRIBIO); 152889b17223SAlexander Motin thread_unlock(curthread); 152989b17223SAlexander Motin 153089b17223SAlexander Motin sx_xlock(&sc->sc_lock); 153189b17223SAlexander Motin for (;;) { 153289b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 153389b17223SAlexander Motin /* 153489b17223SAlexander Motin * First take a look at events. 153589b17223SAlexander Motin * This is important to handle events before any I/O requests. 153689b17223SAlexander Motin */ 153789b17223SAlexander Motin bp = NULL; 153889b17223SAlexander Motin vol = NULL; 153989b17223SAlexander Motin rv = 0; 154089b17223SAlexander Motin ep = TAILQ_FIRST(&sc->sc_events); 154189b17223SAlexander Motin if (ep != NULL) 154289b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 154389b17223SAlexander Motin else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) 154489b17223SAlexander Motin ; 154589b17223SAlexander Motin else { 154689b17223SAlexander Motin getmicrouptime(&now); 154789b17223SAlexander Motin t = now; 154889b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 154989b17223SAlexander Motin if (bioq_first(&vol->v_inflight) == NULL && 155089b17223SAlexander Motin vol->v_tr && 155189b17223SAlexander Motin timevalcmp(&vol->v_last_done, &t, < )) 155289b17223SAlexander Motin t = vol->v_last_done; 155389b17223SAlexander Motin } 155489b17223SAlexander Motin timevalsub(&t, &now); 155589b17223SAlexander Motin timeout = g_raid_idle_threshold + 155689b17223SAlexander Motin t.tv_sec * 1000000 + t.tv_usec; 155789b17223SAlexander Motin if (timeout > 0) { 155889b17223SAlexander Motin /* 155989b17223SAlexander Motin * Two steps to avoid overflows at HZ=1000 156089b17223SAlexander Motin * and idle timeouts > 2.1s. Some rounding 156189b17223SAlexander Motin * errors can occur, but they are < 1tick, 156289b17223SAlexander Motin * which is deemed to be close enough for 156389b17223SAlexander Motin * this purpose. 156489b17223SAlexander Motin */ 156589b17223SAlexander Motin int micpertic = 1000000 / hz; 156689b17223SAlexander Motin timeout = (timeout + micpertic - 1) / micpertic; 156789b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 156889b17223SAlexander Motin MSLEEP(rv, sc, &sc->sc_queue_mtx, 156989b17223SAlexander Motin PRIBIO | PDROP, "-", timeout); 157089b17223SAlexander Motin sx_xlock(&sc->sc_lock); 157189b17223SAlexander Motin goto process; 157289b17223SAlexander Motin } else 157389b17223SAlexander Motin rv = EWOULDBLOCK; 157489b17223SAlexander Motin } 157589b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 157689b17223SAlexander Motin process: 157789b17223SAlexander Motin if (ep != NULL) { 157889b17223SAlexander Motin g_raid_handle_event(sc, ep); 157989b17223SAlexander Motin } else if (bp != NULL) { 158089b17223SAlexander Motin if (bp->bio_to != NULL && 158189b17223SAlexander Motin bp->bio_to->geom == sc->sc_geom) 158289b17223SAlexander Motin g_raid_start_request(bp); 158389b17223SAlexander Motin else 158489b17223SAlexander Motin g_raid_disk_done_request(bp); 158589b17223SAlexander Motin } else if (rv == EWOULDBLOCK) { 158689b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 158789b17223SAlexander Motin g_raid_clean(vol, -1); 158889b17223SAlexander Motin if (bioq_first(&vol->v_inflight) == NULL && 158989b17223SAlexander Motin vol->v_tr) { 159089b17223SAlexander Motin t.tv_sec = g_raid_idle_threshold / 1000000; 159189b17223SAlexander Motin t.tv_usec = g_raid_idle_threshold % 1000000; 159289b17223SAlexander Motin timevaladd(&t, &vol->v_last_done); 159389b17223SAlexander Motin getmicrouptime(&now); 159489b17223SAlexander Motin if (timevalcmp(&t, &now, <= )) { 159589b17223SAlexander Motin G_RAID_TR_IDLE(vol->v_tr); 159689b17223SAlexander Motin vol->v_last_done = now; 159789b17223SAlexander Motin } 159889b17223SAlexander Motin } 159989b17223SAlexander Motin } 160089b17223SAlexander Motin } 160189b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) 160289b17223SAlexander Motin g_raid_destroy_node(sc, 1); /* May not return. */ 160389b17223SAlexander Motin } 160489b17223SAlexander Motin } 160589b17223SAlexander Motin 160689b17223SAlexander Motin static void 160789b17223SAlexander Motin g_raid_poll(struct g_raid_softc *sc) 160889b17223SAlexander Motin { 160989b17223SAlexander Motin struct g_raid_event *ep; 161089b17223SAlexander Motin struct bio *bp; 161189b17223SAlexander Motin 161289b17223SAlexander Motin sx_xlock(&sc->sc_lock); 161389b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 161489b17223SAlexander Motin /* 161589b17223SAlexander Motin * First take a look at events. 161689b17223SAlexander Motin * This is important to handle events before any I/O requests. 161789b17223SAlexander Motin */ 161889b17223SAlexander Motin ep = TAILQ_FIRST(&sc->sc_events); 161989b17223SAlexander Motin if (ep != NULL) { 162089b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 162189b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 162289b17223SAlexander Motin g_raid_handle_event(sc, ep); 162389b17223SAlexander Motin goto out; 162489b17223SAlexander Motin } 162589b17223SAlexander Motin bp = bioq_takefirst(&sc->sc_queue); 162689b17223SAlexander Motin if (bp != NULL) { 162789b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 162889b17223SAlexander Motin if (bp->bio_from == NULL || 162989b17223SAlexander Motin bp->bio_from->geom != sc->sc_geom) 163089b17223SAlexander Motin g_raid_start_request(bp); 163189b17223SAlexander Motin else 163289b17223SAlexander Motin g_raid_disk_done_request(bp); 163389b17223SAlexander Motin } 163489b17223SAlexander Motin out: 163589b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 163689b17223SAlexander Motin } 163789b17223SAlexander Motin 163889b17223SAlexander Motin static void 163989b17223SAlexander Motin g_raid_launch_provider(struct g_raid_volume *vol) 164089b17223SAlexander Motin { 164189b17223SAlexander Motin struct g_raid_disk *disk; 164289b17223SAlexander Motin struct g_raid_softc *sc; 164389b17223SAlexander Motin struct g_provider *pp; 164489b17223SAlexander Motin char name[G_RAID_MAX_VOLUMENAME]; 1645bd9fba0cSSean Bruno char announce_buf[80], buf1[32]; 164689b17223SAlexander Motin off_t off; 164789b17223SAlexander Motin 164889b17223SAlexander Motin sc = vol->v_softc; 164989b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 165089b17223SAlexander Motin 165189b17223SAlexander Motin g_topology_lock(); 165289b17223SAlexander Motin /* Try to name provider with volume name. */ 165389b17223SAlexander Motin snprintf(name, sizeof(name), "raid/%s", vol->v_name); 165489b17223SAlexander Motin if (g_raid_name_format == 0 || vol->v_name[0] == 0 || 165589b17223SAlexander Motin g_provider_by_name(name) != NULL) { 165689b17223SAlexander Motin /* Otherwise use sequential volume number. */ 165789b17223SAlexander Motin snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id); 165889b17223SAlexander Motin } 1659bd9fba0cSSean Bruno 1660bd9fba0cSSean Bruno /* 1661bd9fba0cSSean Bruno * Create a /dev/ar%d that the old ataraid(4) stack once 1662bd9fba0cSSean Bruno * created as an alias for /dev/raid/r%d if requested. 1663bd9fba0cSSean Bruno * This helps going from stable/7 ataraid devices to newer 1664bd9fba0cSSean Bruno * FreeBSD releases. sbruno 07 MAY 2013 1665bd9fba0cSSean Bruno */ 1666bd9fba0cSSean Bruno 1667bd9fba0cSSean Bruno if (ar_legacy_aliases) { 1668bd9fba0cSSean Bruno snprintf(announce_buf, sizeof(announce_buf), 1669bd9fba0cSSean Bruno "kern.devalias.%s", name); 1670bd9fba0cSSean Bruno snprintf(buf1, sizeof(buf1), 1671bd9fba0cSSean Bruno "ar%d", vol->v_global_id); 1672bd9fba0cSSean Bruno setenv(announce_buf, buf1); 1673bd9fba0cSSean Bruno } 1674bd9fba0cSSean Bruno 167589b17223SAlexander Motin pp = g_new_providerf(sc->sc_geom, "%s", name); 167689b17223SAlexander Motin pp->private = vol; 167789b17223SAlexander Motin pp->mediasize = vol->v_mediasize; 167889b17223SAlexander Motin pp->sectorsize = vol->v_sectorsize; 167989b17223SAlexander Motin pp->stripesize = 0; 168089b17223SAlexander Motin pp->stripeoffset = 0; 168189b17223SAlexander Motin if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 || 168289b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 || 168389b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE || 168489b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) { 168589b17223SAlexander Motin if ((disk = vol->v_subdisks[0].sd_disk) != NULL && 168689b17223SAlexander Motin disk->d_consumer != NULL && 168789b17223SAlexander Motin disk->d_consumer->provider != NULL) { 168889b17223SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 168989b17223SAlexander Motin off = disk->d_consumer->provider->stripeoffset; 169089b17223SAlexander Motin pp->stripeoffset = off + vol->v_subdisks[0].sd_offset; 169189b17223SAlexander Motin if (off > 0) 169289b17223SAlexander Motin pp->stripeoffset %= off; 169389b17223SAlexander Motin } 169489b17223SAlexander Motin if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) { 169589b17223SAlexander Motin pp->stripesize *= (vol->v_disks_count - 1); 169689b17223SAlexander Motin pp->stripeoffset *= (vol->v_disks_count - 1); 169789b17223SAlexander Motin } 169889b17223SAlexander Motin } else 169989b17223SAlexander Motin pp->stripesize = vol->v_strip_size; 170089b17223SAlexander Motin vol->v_provider = pp; 170189b17223SAlexander Motin g_error_provider(pp, 0); 170289b17223SAlexander Motin g_topology_unlock(); 170389b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.", 170489b17223SAlexander Motin pp->name, vol->v_name); 170589b17223SAlexander Motin } 170689b17223SAlexander Motin 170789b17223SAlexander Motin static void 170889b17223SAlexander Motin g_raid_destroy_provider(struct g_raid_volume *vol) 170989b17223SAlexander Motin { 171089b17223SAlexander Motin struct g_raid_softc *sc; 171189b17223SAlexander Motin struct g_provider *pp; 171289b17223SAlexander Motin struct bio *bp, *tmp; 171389b17223SAlexander Motin 171489b17223SAlexander Motin g_topology_assert_not(); 171589b17223SAlexander Motin sc = vol->v_softc; 171689b17223SAlexander Motin pp = vol->v_provider; 171789b17223SAlexander Motin KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name)); 171889b17223SAlexander Motin 171989b17223SAlexander Motin g_topology_lock(); 172089b17223SAlexander Motin g_error_provider(pp, ENXIO); 172189b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 172289b17223SAlexander Motin TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) { 172389b17223SAlexander Motin if (bp->bio_to != pp) 172489b17223SAlexander Motin continue; 172589b17223SAlexander Motin bioq_remove(&sc->sc_queue, bp); 172689b17223SAlexander Motin g_io_deliver(bp, ENXIO); 172789b17223SAlexander Motin } 172889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 172989b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.", 173089b17223SAlexander Motin pp->name, vol->v_name); 173189b17223SAlexander Motin g_wither_provider(pp, ENXIO); 173289b17223SAlexander Motin g_topology_unlock(); 173389b17223SAlexander Motin vol->v_provider = NULL; 173489b17223SAlexander Motin } 173589b17223SAlexander Motin 173689b17223SAlexander Motin /* 173789b17223SAlexander Motin * Update device state. 173889b17223SAlexander Motin */ 173989b17223SAlexander Motin static int 174089b17223SAlexander Motin g_raid_update_volume(struct g_raid_volume *vol, u_int event) 174189b17223SAlexander Motin { 174289b17223SAlexander Motin struct g_raid_softc *sc; 174389b17223SAlexander Motin 174489b17223SAlexander Motin sc = vol->v_softc; 174589b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 174689b17223SAlexander Motin 174789b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for volume %s.", 174889b17223SAlexander Motin g_raid_volume_event2str(event), 174989b17223SAlexander Motin vol->v_name); 175089b17223SAlexander Motin switch (event) { 175189b17223SAlexander Motin case G_RAID_VOLUME_E_DOWN: 175289b17223SAlexander Motin if (vol->v_provider != NULL) 175389b17223SAlexander Motin g_raid_destroy_provider(vol); 175489b17223SAlexander Motin break; 175589b17223SAlexander Motin case G_RAID_VOLUME_E_UP: 175689b17223SAlexander Motin if (vol->v_provider == NULL) 175789b17223SAlexander Motin g_raid_launch_provider(vol); 175889b17223SAlexander Motin break; 175989b17223SAlexander Motin case G_RAID_VOLUME_E_START: 176089b17223SAlexander Motin if (vol->v_tr) 176189b17223SAlexander Motin G_RAID_TR_START(vol->v_tr); 176289b17223SAlexander Motin return (0); 176389b17223SAlexander Motin default: 176489b17223SAlexander Motin if (sc->sc_md) 176589b17223SAlexander Motin G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event); 176689b17223SAlexander Motin return (0); 176789b17223SAlexander Motin } 176889b17223SAlexander Motin 176989b17223SAlexander Motin /* Manage root mount release. */ 177089b17223SAlexander Motin if (vol->v_starting) { 177189b17223SAlexander Motin vol->v_starting = 0; 177289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount); 177389b17223SAlexander Motin root_mount_rel(vol->v_rootmount); 177489b17223SAlexander Motin vol->v_rootmount = NULL; 177589b17223SAlexander Motin } 177689b17223SAlexander Motin if (vol->v_stopping && vol->v_provider_open == 0) 177789b17223SAlexander Motin g_raid_destroy_volume(vol); 177889b17223SAlexander Motin return (0); 177989b17223SAlexander Motin } 178089b17223SAlexander Motin 178189b17223SAlexander Motin /* 178289b17223SAlexander Motin * Update subdisk state. 178389b17223SAlexander Motin */ 178489b17223SAlexander Motin static int 178589b17223SAlexander Motin g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event) 178689b17223SAlexander Motin { 178789b17223SAlexander Motin struct g_raid_softc *sc; 178889b17223SAlexander Motin struct g_raid_volume *vol; 178989b17223SAlexander Motin 179089b17223SAlexander Motin sc = sd->sd_softc; 179189b17223SAlexander Motin vol = sd->sd_volume; 179289b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 179389b17223SAlexander Motin 179489b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.", 179589b17223SAlexander Motin g_raid_subdisk_event2str(event), 179689b17223SAlexander Motin vol->v_name, sd->sd_pos, 179789b17223SAlexander Motin sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]"); 179889b17223SAlexander Motin if (vol->v_tr) 179989b17223SAlexander Motin G_RAID_TR_EVENT(vol->v_tr, sd, event); 180089b17223SAlexander Motin 180189b17223SAlexander Motin return (0); 180289b17223SAlexander Motin } 180389b17223SAlexander Motin 180489b17223SAlexander Motin /* 180589b17223SAlexander Motin * Update disk state. 180689b17223SAlexander Motin */ 180789b17223SAlexander Motin static int 180889b17223SAlexander Motin g_raid_update_disk(struct g_raid_disk *disk, u_int event) 180989b17223SAlexander Motin { 181089b17223SAlexander Motin struct g_raid_softc *sc; 181189b17223SAlexander Motin 181289b17223SAlexander Motin sc = disk->d_softc; 181389b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 181489b17223SAlexander Motin 181589b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for disk %s.", 181689b17223SAlexander Motin g_raid_disk_event2str(event), 181789b17223SAlexander Motin g_raid_get_diskname(disk)); 181889b17223SAlexander Motin 181989b17223SAlexander Motin if (sc->sc_md) 182089b17223SAlexander Motin G_RAID_MD_EVENT(sc->sc_md, disk, event); 182189b17223SAlexander Motin return (0); 182289b17223SAlexander Motin } 182389b17223SAlexander Motin 182489b17223SAlexander Motin /* 182589b17223SAlexander Motin * Node event. 182689b17223SAlexander Motin */ 182789b17223SAlexander Motin static int 182889b17223SAlexander Motin g_raid_update_node(struct g_raid_softc *sc, u_int event) 182989b17223SAlexander Motin { 183089b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 183189b17223SAlexander Motin 183289b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for the array.", 183389b17223SAlexander Motin g_raid_node_event2str(event)); 183489b17223SAlexander Motin 183589b17223SAlexander Motin if (event == G_RAID_NODE_E_WAKE) 183689b17223SAlexander Motin return (0); 183789b17223SAlexander Motin if (sc->sc_md) 183889b17223SAlexander Motin G_RAID_MD_EVENT(sc->sc_md, NULL, event); 183989b17223SAlexander Motin return (0); 184089b17223SAlexander Motin } 184189b17223SAlexander Motin 184289b17223SAlexander Motin static int 184389b17223SAlexander Motin g_raid_access(struct g_provider *pp, int acr, int acw, int ace) 184489b17223SAlexander Motin { 184589b17223SAlexander Motin struct g_raid_volume *vol; 184689b17223SAlexander Motin struct g_raid_softc *sc; 184714e2cd0aSAlexander Motin int dcw, opens, error = 0; 184889b17223SAlexander Motin 184989b17223SAlexander Motin g_topology_assert(); 185089b17223SAlexander Motin sc = pp->geom->softc; 185189b17223SAlexander Motin vol = pp->private; 185289b17223SAlexander Motin KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 185389b17223SAlexander Motin KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name)); 185489b17223SAlexander Motin 185589b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name, 185689b17223SAlexander Motin acr, acw, ace); 185789b17223SAlexander Motin dcw = pp->acw + acw; 185889b17223SAlexander Motin 185989b17223SAlexander Motin g_topology_unlock(); 186089b17223SAlexander Motin sx_xlock(&sc->sc_lock); 186189b17223SAlexander Motin /* Deny new opens while dying. */ 186289b17223SAlexander Motin if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) { 186389b17223SAlexander Motin error = ENXIO; 186489b17223SAlexander Motin goto out; 186589b17223SAlexander Motin } 1866a479c51bSAlexander Motin if (dcw == 0) 186789b17223SAlexander Motin g_raid_clean(vol, dcw); 186889b17223SAlexander Motin vol->v_provider_open += acr + acw + ace; 186989b17223SAlexander Motin /* Handle delayed node destruction. */ 187089b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_DELAYED && 187189b17223SAlexander Motin vol->v_provider_open == 0) { 187289b17223SAlexander Motin /* Count open volumes. */ 187389b17223SAlexander Motin opens = g_raid_nopens(sc); 187489b17223SAlexander Motin if (opens == 0) { 187589b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 187689b17223SAlexander Motin /* Wake up worker to make it selfdestruct. */ 187789b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 187889b17223SAlexander Motin } 187989b17223SAlexander Motin } 188089b17223SAlexander Motin /* Handle open volume destruction. */ 188189b17223SAlexander Motin if (vol->v_stopping && vol->v_provider_open == 0) 188289b17223SAlexander Motin g_raid_destroy_volume(vol); 188389b17223SAlexander Motin out: 188489b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 188589b17223SAlexander Motin g_topology_lock(); 188689b17223SAlexander Motin return (error); 188789b17223SAlexander Motin } 188889b17223SAlexander Motin 188989b17223SAlexander Motin struct g_raid_softc * 189089b17223SAlexander Motin g_raid_create_node(struct g_class *mp, 189189b17223SAlexander Motin const char *name, struct g_raid_md_object *md) 189289b17223SAlexander Motin { 189389b17223SAlexander Motin struct g_raid_softc *sc; 189489b17223SAlexander Motin struct g_geom *gp; 189589b17223SAlexander Motin int error; 189689b17223SAlexander Motin 189789b17223SAlexander Motin g_topology_assert(); 189889b17223SAlexander Motin G_RAID_DEBUG(1, "Creating array %s.", name); 189989b17223SAlexander Motin 190089b17223SAlexander Motin gp = g_new_geomf(mp, "%s", name); 190189b17223SAlexander Motin sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO); 190289b17223SAlexander Motin gp->start = g_raid_start; 190389b17223SAlexander Motin gp->orphan = g_raid_orphan; 190489b17223SAlexander Motin gp->access = g_raid_access; 190589b17223SAlexander Motin gp->dumpconf = g_raid_dumpconf; 190689b17223SAlexander Motin 190789b17223SAlexander Motin sc->sc_md = md; 190889b17223SAlexander Motin sc->sc_geom = gp; 190989b17223SAlexander Motin sc->sc_flags = 0; 191089b17223SAlexander Motin TAILQ_INIT(&sc->sc_volumes); 191189b17223SAlexander Motin TAILQ_INIT(&sc->sc_disks); 1912c9f545e5SAlexander Motin sx_init(&sc->sc_lock, "graid:lock"); 1913c9f545e5SAlexander Motin mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF); 191489b17223SAlexander Motin TAILQ_INIT(&sc->sc_events); 191589b17223SAlexander Motin bioq_init(&sc->sc_queue); 191689b17223SAlexander Motin gp->softc = sc; 191789b17223SAlexander Motin error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0, 191889b17223SAlexander Motin "g_raid %s", name); 191989b17223SAlexander Motin if (error != 0) { 192089b17223SAlexander Motin G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name); 192189b17223SAlexander Motin mtx_destroy(&sc->sc_queue_mtx); 192289b17223SAlexander Motin sx_destroy(&sc->sc_lock); 192389b17223SAlexander Motin g_destroy_geom(sc->sc_geom); 192489b17223SAlexander Motin free(sc, M_RAID); 192589b17223SAlexander Motin return (NULL); 192689b17223SAlexander Motin } 192789b17223SAlexander Motin 192889b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Array %s created.", name); 192989b17223SAlexander Motin return (sc); 193089b17223SAlexander Motin } 193189b17223SAlexander Motin 193289b17223SAlexander Motin struct g_raid_volume * 193389b17223SAlexander Motin g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id) 193489b17223SAlexander Motin { 193589b17223SAlexander Motin struct g_raid_volume *vol, *vol1; 193689b17223SAlexander Motin int i; 193789b17223SAlexander Motin 193889b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Creating volume %s.", name); 193989b17223SAlexander Motin vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO); 194089b17223SAlexander Motin vol->v_softc = sc; 194189b17223SAlexander Motin strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME); 194289b17223SAlexander Motin vol->v_state = G_RAID_VOLUME_S_STARTING; 194389b17223SAlexander Motin vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN; 194489b17223SAlexander Motin vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN; 19458f12ca2eSAlexander Motin vol->v_rotate_parity = 1; 194689b17223SAlexander Motin bioq_init(&vol->v_inflight); 194789b17223SAlexander Motin bioq_init(&vol->v_locked); 194889b17223SAlexander Motin LIST_INIT(&vol->v_locks); 194989b17223SAlexander Motin for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) { 195089b17223SAlexander Motin vol->v_subdisks[i].sd_softc = sc; 195189b17223SAlexander Motin vol->v_subdisks[i].sd_volume = vol; 195289b17223SAlexander Motin vol->v_subdisks[i].sd_pos = i; 195389b17223SAlexander Motin vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE; 195489b17223SAlexander Motin } 195589b17223SAlexander Motin 195689b17223SAlexander Motin /* Find free ID for this volume. */ 195789b17223SAlexander Motin g_topology_lock(); 195889b17223SAlexander Motin vol1 = vol; 195989b17223SAlexander Motin if (id >= 0) { 196089b17223SAlexander Motin LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) { 196189b17223SAlexander Motin if (vol1->v_global_id == id) 196289b17223SAlexander Motin break; 196389b17223SAlexander Motin } 196489b17223SAlexander Motin } 196589b17223SAlexander Motin if (vol1 != NULL) { 196689b17223SAlexander Motin for (id = 0; ; id++) { 196789b17223SAlexander Motin LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) { 196889b17223SAlexander Motin if (vol1->v_global_id == id) 196989b17223SAlexander Motin break; 197089b17223SAlexander Motin } 197189b17223SAlexander Motin if (vol1 == NULL) 197289b17223SAlexander Motin break; 197389b17223SAlexander Motin } 197489b17223SAlexander Motin } 197589b17223SAlexander Motin vol->v_global_id = id; 197689b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next); 197789b17223SAlexander Motin g_topology_unlock(); 197889b17223SAlexander Motin 197989b17223SAlexander Motin /* Delay root mounting. */ 198089b17223SAlexander Motin vol->v_rootmount = root_mount_hold("GRAID"); 198189b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount); 198289b17223SAlexander Motin vol->v_starting = 1; 198389b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next); 198489b17223SAlexander Motin return (vol); 198589b17223SAlexander Motin } 198689b17223SAlexander Motin 198789b17223SAlexander Motin struct g_raid_disk * 198889b17223SAlexander Motin g_raid_create_disk(struct g_raid_softc *sc) 198989b17223SAlexander Motin { 199089b17223SAlexander Motin struct g_raid_disk *disk; 199189b17223SAlexander Motin 199289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Creating disk."); 199389b17223SAlexander Motin disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO); 199489b17223SAlexander Motin disk->d_softc = sc; 199589b17223SAlexander Motin disk->d_state = G_RAID_DISK_S_NONE; 199689b17223SAlexander Motin TAILQ_INIT(&disk->d_subdisks); 199789b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next); 199889b17223SAlexander Motin return (disk); 199989b17223SAlexander Motin } 200089b17223SAlexander Motin 200189b17223SAlexander Motin int g_raid_start_volume(struct g_raid_volume *vol) 200289b17223SAlexander Motin { 200389b17223SAlexander Motin struct g_raid_tr_class *class; 200489b17223SAlexander Motin struct g_raid_tr_object *obj; 200589b17223SAlexander Motin int status; 200689b17223SAlexander Motin 200789b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name); 200889b17223SAlexander Motin LIST_FOREACH(class, &g_raid_tr_classes, trc_list) { 2009c89d2fbeSAlexander Motin if (!class->trc_enable) 2010c89d2fbeSAlexander Motin continue; 201189b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, 201289b17223SAlexander Motin "Tasting volume %s for %s transformation.", 201389b17223SAlexander Motin vol->v_name, class->name); 201489b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 201589b17223SAlexander Motin M_WAITOK); 201689b17223SAlexander Motin obj->tro_class = class; 201789b17223SAlexander Motin obj->tro_volume = vol; 201889b17223SAlexander Motin status = G_RAID_TR_TASTE(obj, vol); 201989b17223SAlexander Motin if (status != G_RAID_TR_TASTE_FAIL) 202089b17223SAlexander Motin break; 202189b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 202289b17223SAlexander Motin } 202389b17223SAlexander Motin if (class == NULL) { 202489b17223SAlexander Motin G_RAID_DEBUG1(0, vol->v_softc, 202589b17223SAlexander Motin "No transformation module found for %s.", 202689b17223SAlexander Motin vol->v_name); 202789b17223SAlexander Motin vol->v_tr = NULL; 202889b17223SAlexander Motin g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED); 202989b17223SAlexander Motin g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN, 203089b17223SAlexander Motin G_RAID_EVENT_VOLUME); 203189b17223SAlexander Motin return (-1); 203289b17223SAlexander Motin } 203389b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, 203489b17223SAlexander Motin "Transformation module %s chosen for %s.", 203589b17223SAlexander Motin class->name, vol->v_name); 203689b17223SAlexander Motin vol->v_tr = obj; 203789b17223SAlexander Motin return (0); 203889b17223SAlexander Motin } 203989b17223SAlexander Motin 204089b17223SAlexander Motin int 204189b17223SAlexander Motin g_raid_destroy_node(struct g_raid_softc *sc, int worker) 204289b17223SAlexander Motin { 204389b17223SAlexander Motin struct g_raid_volume *vol, *tmpv; 204489b17223SAlexander Motin struct g_raid_disk *disk, *tmpd; 204589b17223SAlexander Motin int error = 0; 204689b17223SAlexander Motin 204789b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 204889b17223SAlexander Motin TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) { 204989b17223SAlexander Motin if (g_raid_destroy_volume(vol)) 205089b17223SAlexander Motin error = EBUSY; 205189b17223SAlexander Motin } 205289b17223SAlexander Motin if (error) 205389b17223SAlexander Motin return (error); 205489b17223SAlexander Motin TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) { 205589b17223SAlexander Motin if (g_raid_destroy_disk(disk)) 205689b17223SAlexander Motin error = EBUSY; 205789b17223SAlexander Motin } 205889b17223SAlexander Motin if (error) 205989b17223SAlexander Motin return (error); 206089b17223SAlexander Motin if (sc->sc_md) { 206189b17223SAlexander Motin G_RAID_MD_FREE(sc->sc_md); 206289b17223SAlexander Motin kobj_delete((kobj_t)sc->sc_md, M_RAID); 206389b17223SAlexander Motin sc->sc_md = NULL; 206489b17223SAlexander Motin } 206589b17223SAlexander Motin if (sc->sc_geom != NULL) { 206689b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name); 206789b17223SAlexander Motin g_topology_lock(); 206889b17223SAlexander Motin sc->sc_geom->softc = NULL; 206989b17223SAlexander Motin g_wither_geom(sc->sc_geom, ENXIO); 207089b17223SAlexander Motin g_topology_unlock(); 207189b17223SAlexander Motin sc->sc_geom = NULL; 207289b17223SAlexander Motin } else 207389b17223SAlexander Motin G_RAID_DEBUG(1, "Array destroyed."); 207489b17223SAlexander Motin if (worker) { 207589b17223SAlexander Motin g_raid_event_cancel(sc, sc); 207689b17223SAlexander Motin mtx_destroy(&sc->sc_queue_mtx); 207789b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 207889b17223SAlexander Motin sx_destroy(&sc->sc_lock); 207989b17223SAlexander Motin wakeup(&sc->sc_stopping); 208089b17223SAlexander Motin free(sc, M_RAID); 208189b17223SAlexander Motin curthread->td_pflags &= ~TDP_GEOM; 208289b17223SAlexander Motin G_RAID_DEBUG(1, "Thread exiting."); 208389b17223SAlexander Motin kproc_exit(0); 208489b17223SAlexander Motin } else { 208589b17223SAlexander Motin /* Wake up worker to make it selfdestruct. */ 208689b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 208789b17223SAlexander Motin } 208889b17223SAlexander Motin return (0); 208989b17223SAlexander Motin } 209089b17223SAlexander Motin 209189b17223SAlexander Motin int 209289b17223SAlexander Motin g_raid_destroy_volume(struct g_raid_volume *vol) 209389b17223SAlexander Motin { 209489b17223SAlexander Motin struct g_raid_softc *sc; 209589b17223SAlexander Motin struct g_raid_disk *disk; 209689b17223SAlexander Motin int i; 209789b17223SAlexander Motin 209889b17223SAlexander Motin sc = vol->v_softc; 209989b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name); 210089b17223SAlexander Motin vol->v_stopping = 1; 210189b17223SAlexander Motin if (vol->v_state != G_RAID_VOLUME_S_STOPPED) { 210289b17223SAlexander Motin if (vol->v_tr) { 210389b17223SAlexander Motin G_RAID_TR_STOP(vol->v_tr); 210489b17223SAlexander Motin return (EBUSY); 210589b17223SAlexander Motin } else 210689b17223SAlexander Motin vol->v_state = G_RAID_VOLUME_S_STOPPED; 210789b17223SAlexander Motin } 210889b17223SAlexander Motin if (g_raid_event_check(sc, vol) != 0) 210989b17223SAlexander Motin return (EBUSY); 211089b17223SAlexander Motin if (vol->v_provider != NULL) 211189b17223SAlexander Motin return (EBUSY); 211289b17223SAlexander Motin if (vol->v_provider_open != 0) 211389b17223SAlexander Motin return (EBUSY); 211489b17223SAlexander Motin if (vol->v_tr) { 211589b17223SAlexander Motin G_RAID_TR_FREE(vol->v_tr); 211689b17223SAlexander Motin kobj_delete((kobj_t)vol->v_tr, M_RAID); 211789b17223SAlexander Motin vol->v_tr = NULL; 211889b17223SAlexander Motin } 211989b17223SAlexander Motin if (vol->v_rootmount) 212089b17223SAlexander Motin root_mount_rel(vol->v_rootmount); 212189b17223SAlexander Motin g_topology_lock(); 212289b17223SAlexander Motin LIST_REMOVE(vol, v_global_next); 212389b17223SAlexander Motin g_topology_unlock(); 212489b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_volumes, vol, v_next); 212589b17223SAlexander Motin for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) { 212689b17223SAlexander Motin g_raid_event_cancel(sc, &vol->v_subdisks[i]); 212789b17223SAlexander Motin disk = vol->v_subdisks[i].sd_disk; 212889b17223SAlexander Motin if (disk == NULL) 212989b17223SAlexander Motin continue; 213089b17223SAlexander Motin TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next); 213189b17223SAlexander Motin } 213289b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name); 213389b17223SAlexander Motin if (sc->sc_md) 213489b17223SAlexander Motin G_RAID_MD_FREE_VOLUME(sc->sc_md, vol); 213589b17223SAlexander Motin g_raid_event_cancel(sc, vol); 213689b17223SAlexander Motin free(vol, M_RAID); 213789b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) { 213889b17223SAlexander Motin /* Wake up worker to let it selfdestruct. */ 213989b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 214089b17223SAlexander Motin } 214189b17223SAlexander Motin return (0); 214289b17223SAlexander Motin } 214389b17223SAlexander Motin 214489b17223SAlexander Motin int 214589b17223SAlexander Motin g_raid_destroy_disk(struct g_raid_disk *disk) 214689b17223SAlexander Motin { 214789b17223SAlexander Motin struct g_raid_softc *sc; 214889b17223SAlexander Motin struct g_raid_subdisk *sd, *tmp; 214989b17223SAlexander Motin 215089b17223SAlexander Motin sc = disk->d_softc; 215189b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Destroying disk."); 215289b17223SAlexander Motin if (disk->d_consumer) { 215389b17223SAlexander Motin g_raid_kill_consumer(sc, disk->d_consumer); 215489b17223SAlexander Motin disk->d_consumer = NULL; 215589b17223SAlexander Motin } 215689b17223SAlexander Motin TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) { 215789b17223SAlexander Motin g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); 215889b17223SAlexander Motin g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, 215989b17223SAlexander Motin G_RAID_EVENT_SUBDISK); 216089b17223SAlexander Motin TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next); 216189b17223SAlexander Motin sd->sd_disk = NULL; 216289b17223SAlexander Motin } 216389b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_disks, disk, d_next); 216489b17223SAlexander Motin if (sc->sc_md) 216589b17223SAlexander Motin G_RAID_MD_FREE_DISK(sc->sc_md, disk); 216689b17223SAlexander Motin g_raid_event_cancel(sc, disk); 216789b17223SAlexander Motin free(disk, M_RAID); 216889b17223SAlexander Motin return (0); 216989b17223SAlexander Motin } 217089b17223SAlexander Motin 217189b17223SAlexander Motin int 217289b17223SAlexander Motin g_raid_destroy(struct g_raid_softc *sc, int how) 217389b17223SAlexander Motin { 217489b17223SAlexander Motin int opens; 217589b17223SAlexander Motin 217689b17223SAlexander Motin g_topology_assert_not(); 217789b17223SAlexander Motin if (sc == NULL) 217889b17223SAlexander Motin return (ENXIO); 217989b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 218089b17223SAlexander Motin 218189b17223SAlexander Motin /* Count open volumes. */ 218289b17223SAlexander Motin opens = g_raid_nopens(sc); 218389b17223SAlexander Motin 218489b17223SAlexander Motin /* React on some opened volumes. */ 218589b17223SAlexander Motin if (opens > 0) { 218689b17223SAlexander Motin switch (how) { 218789b17223SAlexander Motin case G_RAID_DESTROY_SOFT: 218889b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 218989b17223SAlexander Motin "%d volumes are still open.", 219089b17223SAlexander Motin opens); 219189b17223SAlexander Motin return (EBUSY); 219289b17223SAlexander Motin case G_RAID_DESTROY_DELAYED: 219389b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 219489b17223SAlexander Motin "Array will be destroyed on last close."); 219589b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_DELAYED; 219689b17223SAlexander Motin return (EBUSY); 219789b17223SAlexander Motin case G_RAID_DESTROY_HARD: 219889b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 219989b17223SAlexander Motin "%d volumes are still open.", 220089b17223SAlexander Motin opens); 220189b17223SAlexander Motin } 220289b17223SAlexander Motin } 220389b17223SAlexander Motin 220489b17223SAlexander Motin /* Mark node for destruction. */ 220589b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 220689b17223SAlexander Motin /* Wake up worker to let it selfdestruct. */ 220789b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 220889b17223SAlexander Motin /* Sleep until node destroyed. */ 220989b17223SAlexander Motin sx_sleep(&sc->sc_stopping, &sc->sc_lock, 221089b17223SAlexander Motin PRIBIO | PDROP, "r:destroy", 0); 221189b17223SAlexander Motin return (0); 221289b17223SAlexander Motin } 221389b17223SAlexander Motin 221489b17223SAlexander Motin static void 221589b17223SAlexander Motin g_raid_taste_orphan(struct g_consumer *cp) 221689b17223SAlexander Motin { 221789b17223SAlexander Motin 221889b17223SAlexander Motin KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 221989b17223SAlexander Motin cp->provider->name)); 222089b17223SAlexander Motin } 222189b17223SAlexander Motin 222289b17223SAlexander Motin static struct g_geom * 222389b17223SAlexander Motin g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 222489b17223SAlexander Motin { 222589b17223SAlexander Motin struct g_consumer *cp; 222689b17223SAlexander Motin struct g_geom *gp, *geom; 222789b17223SAlexander Motin struct g_raid_md_class *class; 222889b17223SAlexander Motin struct g_raid_md_object *obj; 222989b17223SAlexander Motin int status; 223089b17223SAlexander Motin 223189b17223SAlexander Motin g_topology_assert(); 223289b17223SAlexander Motin g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2233c89d2fbeSAlexander Motin if (!g_raid_enable) 2234c89d2fbeSAlexander Motin return (NULL); 223589b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s.", pp->name); 223689b17223SAlexander Motin 22378df8e26aSAlexander Motin gp = g_new_geomf(mp, "raid:taste"); 223889b17223SAlexander Motin /* 223989b17223SAlexander Motin * This orphan function should be never called. 224089b17223SAlexander Motin */ 224189b17223SAlexander Motin gp->orphan = g_raid_taste_orphan; 224289b17223SAlexander Motin cp = g_new_consumer(gp); 224389b17223SAlexander Motin g_attach(cp, pp); 224489b17223SAlexander Motin 224589b17223SAlexander Motin geom = NULL; 224689b17223SAlexander Motin LIST_FOREACH(class, &g_raid_md_classes, mdc_list) { 2247c89d2fbeSAlexander Motin if (!class->mdc_enable) 2248c89d2fbeSAlexander Motin continue; 224989b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.", 225089b17223SAlexander Motin pp->name, class->name); 225189b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 225289b17223SAlexander Motin M_WAITOK); 225389b17223SAlexander Motin obj->mdo_class = class; 225489b17223SAlexander Motin status = G_RAID_MD_TASTE(obj, mp, cp, &geom); 225589b17223SAlexander Motin if (status != G_RAID_MD_TASTE_NEW) 225689b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 225789b17223SAlexander Motin if (status != G_RAID_MD_TASTE_FAIL) 225889b17223SAlexander Motin break; 225989b17223SAlexander Motin } 226089b17223SAlexander Motin 226189b17223SAlexander Motin g_detach(cp); 226289b17223SAlexander Motin g_destroy_consumer(cp); 226389b17223SAlexander Motin g_destroy_geom(gp); 226489b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name); 226589b17223SAlexander Motin return (geom); 226689b17223SAlexander Motin } 226789b17223SAlexander Motin 226889b17223SAlexander Motin int 22698df8e26aSAlexander Motin g_raid_create_node_format(const char *format, struct gctl_req *req, 22708df8e26aSAlexander Motin struct g_geom **gp) 227189b17223SAlexander Motin { 227289b17223SAlexander Motin struct g_raid_md_class *class; 227389b17223SAlexander Motin struct g_raid_md_object *obj; 227489b17223SAlexander Motin int status; 227589b17223SAlexander Motin 227689b17223SAlexander Motin G_RAID_DEBUG(2, "Creating array for %s metadata.", format); 227789b17223SAlexander Motin LIST_FOREACH(class, &g_raid_md_classes, mdc_list) { 227889b17223SAlexander Motin if (strcasecmp(class->name, format) == 0) 227989b17223SAlexander Motin break; 228089b17223SAlexander Motin } 228189b17223SAlexander Motin if (class == NULL) { 228289b17223SAlexander Motin G_RAID_DEBUG(1, "No support for %s metadata.", format); 228389b17223SAlexander Motin return (G_RAID_MD_TASTE_FAIL); 228489b17223SAlexander Motin } 228589b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 228689b17223SAlexander Motin M_WAITOK); 228789b17223SAlexander Motin obj->mdo_class = class; 22888df8e26aSAlexander Motin status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp); 228989b17223SAlexander Motin if (status != G_RAID_MD_TASTE_NEW) 229089b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 229189b17223SAlexander Motin return (status); 229289b17223SAlexander Motin } 229389b17223SAlexander Motin 229489b17223SAlexander Motin static int 229589b17223SAlexander Motin g_raid_destroy_geom(struct gctl_req *req __unused, 229689b17223SAlexander Motin struct g_class *mp __unused, struct g_geom *gp) 229789b17223SAlexander Motin { 229889b17223SAlexander Motin struct g_raid_softc *sc; 229989b17223SAlexander Motin int error; 230089b17223SAlexander Motin 230189b17223SAlexander Motin g_topology_unlock(); 230289b17223SAlexander Motin sc = gp->softc; 230389b17223SAlexander Motin sx_xlock(&sc->sc_lock); 230489b17223SAlexander Motin g_cancel_event(sc); 230589b17223SAlexander Motin error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT); 230689b17223SAlexander Motin if (error != 0) 230789b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 230889b17223SAlexander Motin g_topology_lock(); 230989b17223SAlexander Motin return (error); 231089b17223SAlexander Motin } 231189b17223SAlexander Motin 231289b17223SAlexander Motin void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol, 231389b17223SAlexander Motin struct g_raid_subdisk *sd, struct g_raid_disk *disk) 231489b17223SAlexander Motin { 231589b17223SAlexander Motin 231689b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) 231789b17223SAlexander Motin return; 231889b17223SAlexander Motin if (sc->sc_md) 231989b17223SAlexander Motin G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk); 232089b17223SAlexander Motin } 232189b17223SAlexander Motin 232289b17223SAlexander Motin void g_raid_fail_disk(struct g_raid_softc *sc, 232389b17223SAlexander Motin struct g_raid_subdisk *sd, struct g_raid_disk *disk) 232489b17223SAlexander Motin { 232589b17223SAlexander Motin 232689b17223SAlexander Motin if (disk == NULL) 232789b17223SAlexander Motin disk = sd->sd_disk; 232889b17223SAlexander Motin if (disk == NULL) { 232989b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!"); 233089b17223SAlexander Motin return; 233189b17223SAlexander Motin } 233289b17223SAlexander Motin if (disk->d_state != G_RAID_DISK_S_ACTIVE) { 233389b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a " 233489b17223SAlexander Motin "wrong state (%s)!", g_raid_disk_state2str(disk->d_state)); 233589b17223SAlexander Motin return; 233689b17223SAlexander Motin } 233789b17223SAlexander Motin if (sc->sc_md) 233889b17223SAlexander Motin G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk); 233989b17223SAlexander Motin } 234089b17223SAlexander Motin 234189b17223SAlexander Motin static void 234289b17223SAlexander Motin g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 234389b17223SAlexander Motin struct g_consumer *cp, struct g_provider *pp) 234489b17223SAlexander Motin { 234589b17223SAlexander Motin struct g_raid_softc *sc; 234689b17223SAlexander Motin struct g_raid_volume *vol; 234789b17223SAlexander Motin struct g_raid_subdisk *sd; 234889b17223SAlexander Motin struct g_raid_disk *disk; 234989b17223SAlexander Motin int i, s; 235089b17223SAlexander Motin 235189b17223SAlexander Motin g_topology_assert(); 235289b17223SAlexander Motin 235389b17223SAlexander Motin sc = gp->softc; 235489b17223SAlexander Motin if (sc == NULL) 235589b17223SAlexander Motin return; 235689b17223SAlexander Motin if (pp != NULL) { 235789b17223SAlexander Motin vol = pp->private; 235889b17223SAlexander Motin g_topology_unlock(); 235989b17223SAlexander Motin sx_xlock(&sc->sc_lock); 2360*bcb6ad36SAlexander Motin sbuf_printf(sb, "%s<descr>%s %s volume</descr>\n", indent, 2361*bcb6ad36SAlexander Motin sc->sc_md->mdo_class->name, 2362*bcb6ad36SAlexander Motin g_raid_volume_level2str(vol->v_raid_level, 2363*bcb6ad36SAlexander Motin vol->v_raid_level_qualifier)); 236489b17223SAlexander Motin sbuf_printf(sb, "%s<Label>%s</Label>\n", indent, 236589b17223SAlexander Motin vol->v_name); 236689b17223SAlexander Motin sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent, 236789b17223SAlexander Motin g_raid_volume_level2str(vol->v_raid_level, 236889b17223SAlexander Motin vol->v_raid_level_qualifier)); 236989b17223SAlexander Motin sbuf_printf(sb, 237089b17223SAlexander Motin "%s<Transformation>%s</Transformation>\n", indent, 237189b17223SAlexander Motin vol->v_tr ? vol->v_tr->tro_class->name : "NONE"); 237289b17223SAlexander Motin sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 237389b17223SAlexander Motin vol->v_disks_count); 237489b17223SAlexander Motin sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent, 237589b17223SAlexander Motin vol->v_strip_size); 237689b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s</State>\n", indent, 237789b17223SAlexander Motin g_raid_volume_state2str(vol->v_state)); 237889b17223SAlexander Motin sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent, 237989b17223SAlexander Motin vol->v_dirty ? "Yes" : "No"); 238089b17223SAlexander Motin sbuf_printf(sb, "%s<Subdisks>", indent); 238189b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 238289b17223SAlexander Motin sd = &vol->v_subdisks[i]; 238389b17223SAlexander Motin if (sd->sd_disk != NULL && 238489b17223SAlexander Motin sd->sd_disk->d_consumer != NULL) { 238589b17223SAlexander Motin sbuf_printf(sb, "%s ", 238689b17223SAlexander Motin g_raid_get_diskname(sd->sd_disk)); 238789b17223SAlexander Motin } else { 238889b17223SAlexander Motin sbuf_printf(sb, "NONE "); 238989b17223SAlexander Motin } 239089b17223SAlexander Motin sbuf_printf(sb, "(%s", 239189b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state)); 239289b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || 239389b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_RESYNC) { 239489b17223SAlexander Motin sbuf_printf(sb, " %d%%", 239589b17223SAlexander Motin (int)(sd->sd_rebuild_pos * 100 / 239689b17223SAlexander Motin sd->sd_size)); 239789b17223SAlexander Motin } 239889b17223SAlexander Motin sbuf_printf(sb, ")"); 239989b17223SAlexander Motin if (i + 1 < vol->v_disks_count) 240089b17223SAlexander Motin sbuf_printf(sb, ", "); 240189b17223SAlexander Motin } 240289b17223SAlexander Motin sbuf_printf(sb, "</Subdisks>\n"); 240389b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 240489b17223SAlexander Motin g_topology_lock(); 240589b17223SAlexander Motin } else if (cp != NULL) { 240689b17223SAlexander Motin disk = cp->private; 240789b17223SAlexander Motin if (disk == NULL) 240889b17223SAlexander Motin return; 240989b17223SAlexander Motin g_topology_unlock(); 241089b17223SAlexander Motin sx_xlock(&sc->sc_lock); 241189b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s", indent, 241289b17223SAlexander Motin g_raid_disk_state2str(disk->d_state)); 241389b17223SAlexander Motin if (!TAILQ_EMPTY(&disk->d_subdisks)) { 241489b17223SAlexander Motin sbuf_printf(sb, " ("); 241589b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 241689b17223SAlexander Motin sbuf_printf(sb, "%s", 241789b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state)); 241889b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || 241989b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_RESYNC) { 242089b17223SAlexander Motin sbuf_printf(sb, " %d%%", 242189b17223SAlexander Motin (int)(sd->sd_rebuild_pos * 100 / 242289b17223SAlexander Motin sd->sd_size)); 242389b17223SAlexander Motin } 242489b17223SAlexander Motin if (TAILQ_NEXT(sd, sd_next)) 242589b17223SAlexander Motin sbuf_printf(sb, ", "); 242689b17223SAlexander Motin } 242789b17223SAlexander Motin sbuf_printf(sb, ")"); 242889b17223SAlexander Motin } 242989b17223SAlexander Motin sbuf_printf(sb, "</State>\n"); 243089b17223SAlexander Motin sbuf_printf(sb, "%s<Subdisks>", indent); 243189b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 243289b17223SAlexander Motin sbuf_printf(sb, "r%d(%s):%d@%ju", 243389b17223SAlexander Motin sd->sd_volume->v_global_id, 243489b17223SAlexander Motin sd->sd_volume->v_name, 243589b17223SAlexander Motin sd->sd_pos, sd->sd_offset); 243689b17223SAlexander Motin if (TAILQ_NEXT(sd, sd_next)) 243789b17223SAlexander Motin sbuf_printf(sb, ", "); 243889b17223SAlexander Motin } 243989b17223SAlexander Motin sbuf_printf(sb, "</Subdisks>\n"); 244089b17223SAlexander Motin sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent, 244189b17223SAlexander Motin disk->d_read_errs); 244289b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 244389b17223SAlexander Motin g_topology_lock(); 244489b17223SAlexander Motin } else { 244589b17223SAlexander Motin g_topology_unlock(); 244689b17223SAlexander Motin sx_xlock(&sc->sc_lock); 244789b17223SAlexander Motin if (sc->sc_md) { 244889b17223SAlexander Motin sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent, 244989b17223SAlexander Motin sc->sc_md->mdo_class->name); 245089b17223SAlexander Motin } 245189b17223SAlexander Motin if (!TAILQ_EMPTY(&sc->sc_volumes)) { 245289b17223SAlexander Motin s = 0xff; 245389b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 245489b17223SAlexander Motin if (vol->v_state < s) 245589b17223SAlexander Motin s = vol->v_state; 245689b17223SAlexander Motin } 245789b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s</State>\n", indent, 245889b17223SAlexander Motin g_raid_volume_state2str(s)); 245989b17223SAlexander Motin } 246089b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 246189b17223SAlexander Motin g_topology_lock(); 246289b17223SAlexander Motin } 246389b17223SAlexander Motin } 246489b17223SAlexander Motin 246589b17223SAlexander Motin static void 2466a479c51bSAlexander Motin g_raid_shutdown_post_sync(void *arg, int howto) 246789b17223SAlexander Motin { 246889b17223SAlexander Motin struct g_class *mp; 246989b17223SAlexander Motin struct g_geom *gp, *gp2; 247089b17223SAlexander Motin struct g_raid_softc *sc; 2471a479c51bSAlexander Motin struct g_raid_volume *vol; 247289b17223SAlexander Motin int error; 247389b17223SAlexander Motin 247489b17223SAlexander Motin mp = arg; 247589b17223SAlexander Motin DROP_GIANT(); 247689b17223SAlexander Motin g_topology_lock(); 2477a479c51bSAlexander Motin g_raid_shutdown = 1; 247889b17223SAlexander Motin LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 247989b17223SAlexander Motin if ((sc = gp->softc) == NULL) 248089b17223SAlexander Motin continue; 248189b17223SAlexander Motin g_topology_unlock(); 248289b17223SAlexander Motin sx_xlock(&sc->sc_lock); 2483a479c51bSAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) 2484a479c51bSAlexander Motin g_raid_clean(vol, -1); 248589b17223SAlexander Motin g_cancel_event(sc); 248689b17223SAlexander Motin error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED); 248789b17223SAlexander Motin if (error != 0) 248889b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 248989b17223SAlexander Motin g_topology_lock(); 249089b17223SAlexander Motin } 249189b17223SAlexander Motin g_topology_unlock(); 249289b17223SAlexander Motin PICKUP_GIANT(); 249389b17223SAlexander Motin } 249489b17223SAlexander Motin 249589b17223SAlexander Motin static void 249689b17223SAlexander Motin g_raid_init(struct g_class *mp) 249789b17223SAlexander Motin { 249889b17223SAlexander Motin 2499a479c51bSAlexander Motin g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 2500a479c51bSAlexander Motin g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 2501a479c51bSAlexander Motin if (g_raid_post_sync == NULL) 250289b17223SAlexander Motin G_RAID_DEBUG(0, "Warning! Cannot register shutdown event."); 250389b17223SAlexander Motin g_raid_started = 1; 250489b17223SAlexander Motin } 250589b17223SAlexander Motin 250689b17223SAlexander Motin static void 250789b17223SAlexander Motin g_raid_fini(struct g_class *mp) 250889b17223SAlexander Motin { 250989b17223SAlexander Motin 2510a479c51bSAlexander Motin if (g_raid_post_sync != NULL) 2511a479c51bSAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync); 251289b17223SAlexander Motin g_raid_started = 0; 251389b17223SAlexander Motin } 251489b17223SAlexander Motin 251589b17223SAlexander Motin int 251689b17223SAlexander Motin g_raid_md_modevent(module_t mod, int type, void *arg) 251789b17223SAlexander Motin { 251889b17223SAlexander Motin struct g_raid_md_class *class, *c, *nc; 251989b17223SAlexander Motin int error; 252089b17223SAlexander Motin 252189b17223SAlexander Motin error = 0; 252289b17223SAlexander Motin class = arg; 252389b17223SAlexander Motin switch (type) { 252489b17223SAlexander Motin case MOD_LOAD: 252589b17223SAlexander Motin c = LIST_FIRST(&g_raid_md_classes); 252689b17223SAlexander Motin if (c == NULL || c->mdc_priority > class->mdc_priority) 252789b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list); 252889b17223SAlexander Motin else { 252989b17223SAlexander Motin while ((nc = LIST_NEXT(c, mdc_list)) != NULL && 253089b17223SAlexander Motin nc->mdc_priority < class->mdc_priority) 253189b17223SAlexander Motin c = nc; 253289b17223SAlexander Motin LIST_INSERT_AFTER(c, class, mdc_list); 253389b17223SAlexander Motin } 253489b17223SAlexander Motin if (g_raid_started) 253589b17223SAlexander Motin g_retaste(&g_raid_class); 253689b17223SAlexander Motin break; 253789b17223SAlexander Motin case MOD_UNLOAD: 253889b17223SAlexander Motin LIST_REMOVE(class, mdc_list); 253989b17223SAlexander Motin break; 254089b17223SAlexander Motin default: 254189b17223SAlexander Motin error = EOPNOTSUPP; 254289b17223SAlexander Motin break; 254389b17223SAlexander Motin } 254489b17223SAlexander Motin 254589b17223SAlexander Motin return (error); 254689b17223SAlexander Motin } 254789b17223SAlexander Motin 254889b17223SAlexander Motin int 254989b17223SAlexander Motin g_raid_tr_modevent(module_t mod, int type, void *arg) 255089b17223SAlexander Motin { 255189b17223SAlexander Motin struct g_raid_tr_class *class, *c, *nc; 255289b17223SAlexander Motin int error; 255389b17223SAlexander Motin 255489b17223SAlexander Motin error = 0; 255589b17223SAlexander Motin class = arg; 255689b17223SAlexander Motin switch (type) { 255789b17223SAlexander Motin case MOD_LOAD: 255889b17223SAlexander Motin c = LIST_FIRST(&g_raid_tr_classes); 255989b17223SAlexander Motin if (c == NULL || c->trc_priority > class->trc_priority) 256089b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list); 256189b17223SAlexander Motin else { 256289b17223SAlexander Motin while ((nc = LIST_NEXT(c, trc_list)) != NULL && 256389b17223SAlexander Motin nc->trc_priority < class->trc_priority) 256489b17223SAlexander Motin c = nc; 256589b17223SAlexander Motin LIST_INSERT_AFTER(c, class, trc_list); 256689b17223SAlexander Motin } 256789b17223SAlexander Motin break; 256889b17223SAlexander Motin case MOD_UNLOAD: 256989b17223SAlexander Motin LIST_REMOVE(class, trc_list); 257089b17223SAlexander Motin break; 257189b17223SAlexander Motin default: 257289b17223SAlexander Motin error = EOPNOTSUPP; 257389b17223SAlexander Motin break; 257489b17223SAlexander Motin } 257589b17223SAlexander Motin 257689b17223SAlexander Motin return (error); 257789b17223SAlexander Motin } 257889b17223SAlexander Motin 257989b17223SAlexander Motin /* 258089b17223SAlexander Motin * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid) 258189b17223SAlexander Motin * to reduce module priority, allowing submodules to register them first. 258289b17223SAlexander Motin */ 258389b17223SAlexander Motin static moduledata_t g_raid_mod = { 258489b17223SAlexander Motin "g_raid", 258589b17223SAlexander Motin g_modevent, 258689b17223SAlexander Motin &g_raid_class 258789b17223SAlexander Motin }; 258889b17223SAlexander Motin DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD); 258989b17223SAlexander Motin MODULE_VERSION(geom_raid, 0); 2590