12d1661a5SPawel Jakub Dawidek /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 33728855aSPedro F. Giffuni * 4e6757059SPawel Jakub Dawidek * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 52d1661a5SPawel Jakub Dawidek * All rights reserved. 62d1661a5SPawel Jakub Dawidek * 72d1661a5SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 82d1661a5SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 92d1661a5SPawel Jakub Dawidek * are met: 102d1661a5SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 112d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 122d1661a5SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 132d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 142d1661a5SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 152d1661a5SPawel Jakub Dawidek * 162d1661a5SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 172d1661a5SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 182d1661a5SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 192d1661a5SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 202d1661a5SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 212d1661a5SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 222d1661a5SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 232d1661a5SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 242d1661a5SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 252d1661a5SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 262d1661a5SPawel Jakub Dawidek * SUCH DAMAGE. 272d1661a5SPawel Jakub Dawidek */ 282d1661a5SPawel Jakub Dawidek 292d1661a5SPawel Jakub Dawidek #include <sys/param.h> 302d1661a5SPawel Jakub Dawidek #include <sys/systm.h> 31f3dc1727SMitchell Horne #include <sys/bio.h> 32f3dc1727SMitchell Horne #include <sys/eventhandler.h> 332d1661a5SPawel Jakub Dawidek #include <sys/kernel.h> 34f3dc1727SMitchell Horne #include <sys/kthread.h> 352d1661a5SPawel Jakub Dawidek #include <sys/limits.h> 362d1661a5SPawel Jakub Dawidek #include <sys/lock.h> 372d1661a5SPawel Jakub Dawidek #include <sys/malloc.h> 38f3dc1727SMitchell Horne #include <sys/module.h> 39f3dc1727SMitchell Horne #include <sys/mutex.h> 40f3dc1727SMitchell Horne #include <sys/proc.h> 414eb861d3SMitchell Horne #include <sys/reboot.h> 42f3dc1727SMitchell Horne #include <sys/sbuf.h> 43f3dc1727SMitchell Horne #include <sys/sched.h> 44f3dc1727SMitchell Horne #include <sys/sysctl.h> 45f3dc1727SMitchell Horne 462d1661a5SPawel Jakub Dawidek #include <vm/uma.h> 47f3dc1727SMitchell Horne 482d1661a5SPawel Jakub Dawidek #include <geom/geom.h> 49ac03832eSConrad Meyer #include <geom/geom_dbg.h> 502d1661a5SPawel Jakub Dawidek #include <geom/raid3/g_raid3.h> 512d1661a5SPawel Jakub Dawidek 52cb08c2ccSAlexander Leidinger FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 532d1661a5SPawel Jakub Dawidek 545bb84bc8SRobert Watson static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 552d1661a5SPawel Jakub Dawidek 562d1661a5SPawel Jakub Dawidek SYSCTL_DECL(_kern_geom); 577029da5cSPawel Biernacki static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 586472ac3dSEd Schouten "GEOM_RAID3 stuff"); 59809a9dc6SPawel Jakub Dawidek u_int g_raid3_debug = 0; 60af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0, 612d1661a5SPawel Jakub Dawidek "Debug level"); 62e5e7825cSPawel Jakub Dawidek static u_int g_raid3_timeout = 4; 63af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout, 642d1661a5SPawel Jakub Dawidek 0, "Time to wait on all raid3 components"); 654d006a98SPawel Jakub Dawidek static u_int g_raid3_idletime = 5; 66af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN, 674d006a98SPawel Jakub Dawidek &g_raid3_idletime, 0, "Mark components as clean when idling"); 683aae74ecSPawel Jakub Dawidek static u_int g_raid3_disconnect_on_failure = 1; 69af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 703aae74ecSPawel Jakub Dawidek &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71e6757059SPawel Jakub Dawidek static u_int g_raid3_syncreqs = 2; 723650be51SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 733650be51SPawel Jakub Dawidek &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 74ed940a82SPawel Jakub Dawidek static u_int g_raid3_use_malloc = 0; 75ed940a82SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 76ed940a82SPawel Jakub Dawidek &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 772d1661a5SPawel Jakub Dawidek 782d1661a5SPawel Jakub Dawidek static u_int g_raid3_n64k = 50; 79af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0, 802d1661a5SPawel Jakub Dawidek "Maximum number of 64kB allocations"); 812d1661a5SPawel Jakub Dawidek static u_int g_raid3_n16k = 200; 82af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0, 832d1661a5SPawel Jakub Dawidek "Maximum number of 16kB allocations"); 842d1661a5SPawel Jakub Dawidek static u_int g_raid3_n4k = 1200; 85af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0, 862d1661a5SPawel Jakub Dawidek "Maximum number of 4kB allocations"); 872d1661a5SPawel Jakub Dawidek 887029da5cSPawel Biernacki static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, 897029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 902d1661a5SPawel Jakub Dawidek "GEOM_RAID3 statistics"); 91dba915cfSPawel Jakub Dawidek static u_int g_raid3_parity_mismatch = 0; 92dba915cfSPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 93dba915cfSPawel Jakub Dawidek &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 942d1661a5SPawel Jakub Dawidek 952d1661a5SPawel Jakub Dawidek #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 962d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 972d1661a5SPawel Jakub Dawidek msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 992d1661a5SPawel Jakub Dawidek } while (0) 1002d1661a5SPawel Jakub Dawidek 101f62c1a47SAlexander Motin static eventhandler_tag g_raid3_post_sync = NULL; 102f62c1a47SAlexander Motin static int g_raid3_shutdown = 0; 1032d1661a5SPawel Jakub Dawidek 1042d1661a5SPawel Jakub Dawidek static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 1052d1661a5SPawel Jakub Dawidek struct g_geom *gp); 1062d1661a5SPawel Jakub Dawidek static g_taste_t g_raid3_taste; 1079da3072cSPawel Jakub Dawidek static void g_raid3_init(struct g_class *mp); 1089da3072cSPawel Jakub Dawidek static void g_raid3_fini(struct g_class *mp); 10933cb9b3cSMark Johnston static void g_raid3_providergone(struct g_provider *pp); 1102d1661a5SPawel Jakub Dawidek 1112d1661a5SPawel Jakub Dawidek struct g_class g_raid3_class = { 1122d1661a5SPawel Jakub Dawidek .name = G_RAID3_CLASS_NAME, 1132d1661a5SPawel Jakub Dawidek .version = G_VERSION, 1142d1661a5SPawel Jakub Dawidek .ctlreq = g_raid3_config, 1152d1661a5SPawel Jakub Dawidek .taste = g_raid3_taste, 1169da3072cSPawel Jakub Dawidek .destroy_geom = g_raid3_destroy_geom, 1179da3072cSPawel Jakub Dawidek .init = g_raid3_init, 11833cb9b3cSMark Johnston .fini = g_raid3_fini, 11933cb9b3cSMark Johnston .providergone = g_raid3_providergone, 1202d1661a5SPawel Jakub Dawidek }; 1212d1661a5SPawel Jakub Dawidek 1222d1661a5SPawel Jakub Dawidek static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 123d97d5ee9SPawel Jakub Dawidek static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 124d97d5ee9SPawel Jakub Dawidek static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 1252d1661a5SPawel Jakub Dawidek static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 1262d1661a5SPawel Jakub Dawidek struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 1272d1661a5SPawel Jakub Dawidek static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 1283650be51SPawel Jakub Dawidek static int g_raid3_register_request(struct bio *pbp); 1293650be51SPawel Jakub Dawidek static void g_raid3_sync_release(struct g_raid3_softc *sc); 130fd02d0bcSMark Johnston static void g_raid3_timeout_drain(struct g_raid3_softc *sc); 1312d1661a5SPawel Jakub Dawidek 1322d1661a5SPawel Jakub Dawidek static const char * 1332d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(int state) 1342d1661a5SPawel Jakub Dawidek { 1352d1661a5SPawel Jakub Dawidek 1362d1661a5SPawel Jakub Dawidek switch (state) { 1372d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NODISK: 1382d1661a5SPawel Jakub Dawidek return ("NODISK"); 1392d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NONE: 1402d1661a5SPawel Jakub Dawidek return ("NONE"); 1412d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 1422d1661a5SPawel Jakub Dawidek return ("NEW"); 1432d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 1442d1661a5SPawel Jakub Dawidek return ("ACTIVE"); 1452d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 1462d1661a5SPawel Jakub Dawidek return ("STALE"); 1472d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 1482d1661a5SPawel Jakub Dawidek return ("SYNCHRONIZING"); 1492d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 1502d1661a5SPawel Jakub Dawidek return ("DISCONNECTED"); 1512d1661a5SPawel Jakub Dawidek default: 1522d1661a5SPawel Jakub Dawidek return ("INVALID"); 1532d1661a5SPawel Jakub Dawidek } 1542d1661a5SPawel Jakub Dawidek } 1552d1661a5SPawel Jakub Dawidek 1562d1661a5SPawel Jakub Dawidek static const char * 1572d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(int state) 1582d1661a5SPawel Jakub Dawidek { 1592d1661a5SPawel Jakub Dawidek 1602d1661a5SPawel Jakub Dawidek switch (state) { 1612d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 1622d1661a5SPawel Jakub Dawidek return ("STARTING"); 1632d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 1642d1661a5SPawel Jakub Dawidek return ("DEGRADED"); 1652d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 1662d1661a5SPawel Jakub Dawidek return ("COMPLETE"); 1672d1661a5SPawel Jakub Dawidek default: 1682d1661a5SPawel Jakub Dawidek return ("INVALID"); 1692d1661a5SPawel Jakub Dawidek } 1702d1661a5SPawel Jakub Dawidek } 1712d1661a5SPawel Jakub Dawidek 1722d1661a5SPawel Jakub Dawidek const char * 1732d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(struct g_raid3_disk *disk) 1742d1661a5SPawel Jakub Dawidek { 1752d1661a5SPawel Jakub Dawidek 1762d1661a5SPawel Jakub Dawidek if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 1772d1661a5SPawel Jakub Dawidek return ("[unknown]"); 1782d1661a5SPawel Jakub Dawidek return (disk->d_name); 1792d1661a5SPawel Jakub Dawidek } 1802d1661a5SPawel Jakub Dawidek 181ed940a82SPawel Jakub Dawidek static void * 182ed940a82SPawel Jakub Dawidek g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 183ed940a82SPawel Jakub Dawidek { 184ed940a82SPawel Jakub Dawidek void *ptr; 185d4060fa6SAlexander Motin enum g_raid3_zones zone; 186ed940a82SPawel Jakub Dawidek 187d4060fa6SAlexander Motin if (g_raid3_use_malloc || 188d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 189ed940a82SPawel Jakub Dawidek ptr = malloc(size, M_RAID3, flags); 190ed940a82SPawel Jakub Dawidek else { 191d4060fa6SAlexander Motin ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 192d4060fa6SAlexander Motin &sc->sc_zones[zone], flags); 193d4060fa6SAlexander Motin sc->sc_zones[zone].sz_requested++; 194ed940a82SPawel Jakub Dawidek if (ptr == NULL) 195d4060fa6SAlexander Motin sc->sc_zones[zone].sz_failed++; 196ed940a82SPawel Jakub Dawidek } 197ed940a82SPawel Jakub Dawidek return (ptr); 198ed940a82SPawel Jakub Dawidek } 199ed940a82SPawel Jakub Dawidek 200ed940a82SPawel Jakub Dawidek static void 201ed940a82SPawel Jakub Dawidek g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 202ed940a82SPawel Jakub Dawidek { 203d4060fa6SAlexander Motin enum g_raid3_zones zone; 204ed940a82SPawel Jakub Dawidek 205d4060fa6SAlexander Motin if (g_raid3_use_malloc || 206d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 207ed940a82SPawel Jakub Dawidek free(ptr, M_RAID3); 208ed940a82SPawel Jakub Dawidek else { 209d4060fa6SAlexander Motin uma_zfree_arg(sc->sc_zones[zone].sz_zone, 210d4060fa6SAlexander Motin ptr, &sc->sc_zones[zone]); 211ed940a82SPawel Jakub Dawidek } 212ed940a82SPawel Jakub Dawidek } 213ed940a82SPawel Jakub Dawidek 2143650be51SPawel Jakub Dawidek static int 2153650be51SPawel Jakub Dawidek g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 2163650be51SPawel Jakub Dawidek { 2173650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2183650be51SPawel Jakub Dawidek 2190d14fae5SPawel Jakub Dawidek if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 2203650be51SPawel Jakub Dawidek return (ENOMEM); 2213650be51SPawel Jakub Dawidek sz->sz_inuse++; 2223650be51SPawel Jakub Dawidek return (0); 2233650be51SPawel Jakub Dawidek } 2243650be51SPawel Jakub Dawidek 2253650be51SPawel Jakub Dawidek static void 2263650be51SPawel Jakub Dawidek g_raid3_uma_dtor(void *mem, int size, void *arg) 2273650be51SPawel Jakub Dawidek { 2283650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2293650be51SPawel Jakub Dawidek 2303650be51SPawel Jakub Dawidek sz->sz_inuse--; 2313650be51SPawel Jakub Dawidek } 2323650be51SPawel Jakub Dawidek 23306b215fdSAlexander Motin #define g_raid3_xor(src, dst, size) \ 23406b215fdSAlexander Motin _g_raid3_xor((uint64_t *)(src), \ 2352d1661a5SPawel Jakub Dawidek (uint64_t *)(dst), (size_t)size) 2362d1661a5SPawel Jakub Dawidek static void 23706b215fdSAlexander Motin _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 2382d1661a5SPawel Jakub Dawidek { 2392d1661a5SPawel Jakub Dawidek 2402d1661a5SPawel Jakub Dawidek KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 2412d1661a5SPawel Jakub Dawidek for (; size > 0; size -= 128) { 24206b215fdSAlexander Motin *dst++ ^= (*src++); 24306b215fdSAlexander Motin *dst++ ^= (*src++); 24406b215fdSAlexander Motin *dst++ ^= (*src++); 24506b215fdSAlexander Motin *dst++ ^= (*src++); 24606b215fdSAlexander Motin *dst++ ^= (*src++); 24706b215fdSAlexander Motin *dst++ ^= (*src++); 24806b215fdSAlexander Motin *dst++ ^= (*src++); 24906b215fdSAlexander Motin *dst++ ^= (*src++); 25006b215fdSAlexander Motin *dst++ ^= (*src++); 25106b215fdSAlexander Motin *dst++ ^= (*src++); 25206b215fdSAlexander Motin *dst++ ^= (*src++); 25306b215fdSAlexander Motin *dst++ ^= (*src++); 25406b215fdSAlexander Motin *dst++ ^= (*src++); 25506b215fdSAlexander Motin *dst++ ^= (*src++); 25606b215fdSAlexander Motin *dst++ ^= (*src++); 25706b215fdSAlexander Motin *dst++ ^= (*src++); 2582d1661a5SPawel Jakub Dawidek } 2592d1661a5SPawel Jakub Dawidek } 2602d1661a5SPawel Jakub Dawidek 261dba915cfSPawel Jakub Dawidek static int 262dba915cfSPawel Jakub Dawidek g_raid3_is_zero(struct bio *bp) 263dba915cfSPawel Jakub Dawidek { 264dba915cfSPawel Jakub Dawidek static const uint64_t zeros[] = { 265dba915cfSPawel Jakub Dawidek 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 266dba915cfSPawel Jakub Dawidek }; 267dba915cfSPawel Jakub Dawidek u_char *addr; 268dba915cfSPawel Jakub Dawidek ssize_t size; 269dba915cfSPawel Jakub Dawidek 270dba915cfSPawel Jakub Dawidek size = bp->bio_length; 271dba915cfSPawel Jakub Dawidek addr = (u_char *)bp->bio_data; 272dba915cfSPawel Jakub Dawidek for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 273dba915cfSPawel Jakub Dawidek if (bcmp(addr, zeros, sizeof(zeros)) != 0) 274dba915cfSPawel Jakub Dawidek return (0); 275dba915cfSPawel Jakub Dawidek } 276dba915cfSPawel Jakub Dawidek return (1); 277dba915cfSPawel Jakub Dawidek } 278dba915cfSPawel Jakub Dawidek 2792d1661a5SPawel Jakub Dawidek /* 2802d1661a5SPawel Jakub Dawidek * --- Events handling functions --- 2812d1661a5SPawel Jakub Dawidek * Events in geom_raid3 are used to maintain disks and device status 2822d1661a5SPawel Jakub Dawidek * from one thread to simplify locking. 2832d1661a5SPawel Jakub Dawidek */ 2842d1661a5SPawel Jakub Dawidek static void 2852d1661a5SPawel Jakub Dawidek g_raid3_event_free(struct g_raid3_event *ep) 2862d1661a5SPawel Jakub Dawidek { 2872d1661a5SPawel Jakub Dawidek 2882d1661a5SPawel Jakub Dawidek free(ep, M_RAID3); 2892d1661a5SPawel Jakub Dawidek } 2902d1661a5SPawel Jakub Dawidek 291fd02d0bcSMark Johnston static int 292fd02d0bcSMark Johnston g_raid3_event_dispatch(struct g_raid3_event *ep, void *arg, int state, 293fd02d0bcSMark Johnston int flags) 2942d1661a5SPawel Jakub Dawidek { 2952d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 2962d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 2972d1661a5SPawel Jakub Dawidek int error; 2982d1661a5SPawel Jakub Dawidek 2992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 3002d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 3012d1661a5SPawel Jakub Dawidek disk = NULL; 3022d1661a5SPawel Jakub Dawidek sc = arg; 3032d1661a5SPawel Jakub Dawidek } else { 3042d1661a5SPawel Jakub Dawidek disk = arg; 3052d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3062d1661a5SPawel Jakub Dawidek } 3072d1661a5SPawel Jakub Dawidek ep->e_disk = disk; 3082d1661a5SPawel Jakub Dawidek ep->e_state = state; 3092d1661a5SPawel Jakub Dawidek ep->e_flags = flags; 3102d1661a5SPawel Jakub Dawidek ep->e_error = 0; 3112d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3122d1661a5SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 3132d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3142d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3152d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 3162d1661a5SPawel Jakub Dawidek wakeup(sc); 3172d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 3182d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 3192d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 3202d1661a5SPawel Jakub Dawidek return (0); 3213650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3222d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 3233650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3242d1661a5SPawel Jakub Dawidek while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 3252d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3262d1661a5SPawel Jakub Dawidek MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 3272d1661a5SPawel Jakub Dawidek hz * 5); 3282d1661a5SPawel Jakub Dawidek } 3292d1661a5SPawel Jakub Dawidek error = ep->e_error; 3302d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3313650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3322d1661a5SPawel Jakub Dawidek return (error); 3332d1661a5SPawel Jakub Dawidek } 3342d1661a5SPawel Jakub Dawidek 335fd02d0bcSMark Johnston int 336fd02d0bcSMark Johnston g_raid3_event_send(void *arg, int state, int flags) 337fd02d0bcSMark Johnston { 338fd02d0bcSMark Johnston struct g_raid3_event *ep; 339fd02d0bcSMark Johnston 340fd02d0bcSMark Johnston ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 341fd02d0bcSMark Johnston return (g_raid3_event_dispatch(ep, arg, state, flags)); 342fd02d0bcSMark Johnston } 343fd02d0bcSMark Johnston 3442d1661a5SPawel Jakub Dawidek static struct g_raid3_event * 3452d1661a5SPawel Jakub Dawidek g_raid3_event_get(struct g_raid3_softc *sc) 3462d1661a5SPawel Jakub Dawidek { 3472d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 3482d1661a5SPawel Jakub Dawidek 3492d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3502d1661a5SPawel Jakub Dawidek ep = TAILQ_FIRST(&sc->sc_events); 3512d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3522d1661a5SPawel Jakub Dawidek return (ep); 3532d1661a5SPawel Jakub Dawidek } 3542d1661a5SPawel Jakub Dawidek 3552d1661a5SPawel Jakub Dawidek static void 356d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 357d97d5ee9SPawel Jakub Dawidek { 358d97d5ee9SPawel Jakub Dawidek 359d97d5ee9SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 360d97d5ee9SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 361d97d5ee9SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 362d97d5ee9SPawel Jakub Dawidek } 363d97d5ee9SPawel Jakub Dawidek 364d97d5ee9SPawel Jakub Dawidek static void 3652d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(struct g_raid3_disk *disk) 3662d1661a5SPawel Jakub Dawidek { 3672d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3682d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep, *tmpep; 3692d1661a5SPawel Jakub Dawidek 3702d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3713650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3723650be51SPawel Jakub Dawidek 3732d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3742d1661a5SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 3752d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 3762d1661a5SPawel Jakub Dawidek continue; 3772d1661a5SPawel Jakub Dawidek if (ep->e_disk != disk) 3782d1661a5SPawel Jakub Dawidek continue; 3792d1661a5SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 3802d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 3812d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3822d1661a5SPawel Jakub Dawidek else { 3832d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 3842d1661a5SPawel Jakub Dawidek wakeup(ep); 3852d1661a5SPawel Jakub Dawidek } 3862d1661a5SPawel Jakub Dawidek } 3872d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3882d1661a5SPawel Jakub Dawidek } 3892d1661a5SPawel Jakub Dawidek 3902d1661a5SPawel Jakub Dawidek /* 3912d1661a5SPawel Jakub Dawidek * Return the number of disks in the given state. 3922d1661a5SPawel Jakub Dawidek * If state is equal to -1, count all connected disks. 3932d1661a5SPawel Jakub Dawidek */ 3942d1661a5SPawel Jakub Dawidek u_int 3952d1661a5SPawel Jakub Dawidek g_raid3_ndisks(struct g_raid3_softc *sc, int state) 3962d1661a5SPawel Jakub Dawidek { 3972d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 398fa6a7837SDavid E. O'Brien u_int n, ndisks; 3992d1661a5SPawel Jakub Dawidek 4003650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 4013650be51SPawel Jakub Dawidek 402fa6a7837SDavid E. O'Brien for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 4032d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 4042d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 4052d1661a5SPawel Jakub Dawidek continue; 4062d1661a5SPawel Jakub Dawidek if (state == -1 || disk->d_state == state) 4072d1661a5SPawel Jakub Dawidek ndisks++; 4082d1661a5SPawel Jakub Dawidek } 4092d1661a5SPawel Jakub Dawidek return (ndisks); 4102d1661a5SPawel Jakub Dawidek } 4112d1661a5SPawel Jakub Dawidek 4122d1661a5SPawel Jakub Dawidek static u_int 4132d1661a5SPawel Jakub Dawidek g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 4142d1661a5SPawel Jakub Dawidek { 4152d1661a5SPawel Jakub Dawidek struct bio *bp; 4162d1661a5SPawel Jakub Dawidek u_int nreqs = 0; 4172d1661a5SPawel Jakub Dawidek 4182d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 4192d1661a5SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 4202d1661a5SPawel Jakub Dawidek if (bp->bio_from == cp) 4212d1661a5SPawel Jakub Dawidek nreqs++; 4222d1661a5SPawel Jakub Dawidek } 4232d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 4242d1661a5SPawel Jakub Dawidek return (nreqs); 4252d1661a5SPawel Jakub Dawidek } 4262d1661a5SPawel Jakub Dawidek 4272d1661a5SPawel Jakub Dawidek static int 4282d1661a5SPawel Jakub Dawidek g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 4292d1661a5SPawel Jakub Dawidek { 4302d1661a5SPawel Jakub Dawidek 43179e61493SPawel Jakub Dawidek if (cp->index > 0) { 4322d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4332d1661a5SPawel Jakub Dawidek "I/O requests for %s exist, can't destroy it now.", 4342d1661a5SPawel Jakub Dawidek cp->provider->name); 4352d1661a5SPawel Jakub Dawidek return (1); 4362d1661a5SPawel Jakub Dawidek } 4372d1661a5SPawel Jakub Dawidek if (g_raid3_nrequests(sc, cp) > 0) { 4382d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4392d1661a5SPawel Jakub Dawidek "I/O requests for %s in queue, can't destroy it now.", 4402d1661a5SPawel Jakub Dawidek cp->provider->name); 4412d1661a5SPawel Jakub Dawidek return (1); 4422d1661a5SPawel Jakub Dawidek } 4432d1661a5SPawel Jakub Dawidek return (0); 4442d1661a5SPawel Jakub Dawidek } 4452d1661a5SPawel Jakub Dawidek 4462d1661a5SPawel Jakub Dawidek static void 447d97d5ee9SPawel Jakub Dawidek g_raid3_destroy_consumer(void *arg, int flags __unused) 448d97d5ee9SPawel Jakub Dawidek { 449d97d5ee9SPawel Jakub Dawidek struct g_consumer *cp; 450d97d5ee9SPawel Jakub Dawidek 4513650be51SPawel Jakub Dawidek g_topology_assert(); 4523650be51SPawel Jakub Dawidek 453d97d5ee9SPawel Jakub Dawidek cp = arg; 454d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 455d97d5ee9SPawel Jakub Dawidek g_detach(cp); 456d97d5ee9SPawel Jakub Dawidek g_destroy_consumer(cp); 457d97d5ee9SPawel Jakub Dawidek } 458d97d5ee9SPawel Jakub Dawidek 459d97d5ee9SPawel Jakub Dawidek static void 4602d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 4612d1661a5SPawel Jakub Dawidek { 462d97d5ee9SPawel Jakub Dawidek struct g_provider *pp; 463d97d5ee9SPawel Jakub Dawidek int retaste_wait; 4642d1661a5SPawel Jakub Dawidek 4652d1661a5SPawel Jakub Dawidek g_topology_assert(); 4662d1661a5SPawel Jakub Dawidek 4672d1661a5SPawel Jakub Dawidek cp->private = NULL; 4682d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 4692d1661a5SPawel Jakub Dawidek return; 4702d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 471d97d5ee9SPawel Jakub Dawidek pp = cp->provider; 472d97d5ee9SPawel Jakub Dawidek retaste_wait = 0; 473d97d5ee9SPawel Jakub Dawidek if (cp->acw == 1) { 474d97d5ee9SPawel Jakub Dawidek if ((pp->geom->flags & G_GEOM_WITHER) == 0) 475d97d5ee9SPawel Jakub Dawidek retaste_wait = 1; 476d97d5ee9SPawel Jakub Dawidek } 477d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 478d97d5ee9SPawel Jakub Dawidek -cp->acw, -cp->ace, 0); 479d97d5ee9SPawel Jakub Dawidek if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 480d97d5ee9SPawel Jakub Dawidek g_access(cp, -cp->acr, -cp->acw, -cp->ace); 481d97d5ee9SPawel Jakub Dawidek if (retaste_wait) { 482d97d5ee9SPawel Jakub Dawidek /* 483d97d5ee9SPawel Jakub Dawidek * After retaste event was send (inside g_access()), we can send 484d97d5ee9SPawel Jakub Dawidek * event to detach and destroy consumer. 485d97d5ee9SPawel Jakub Dawidek * A class, which has consumer to the given provider connected 486d97d5ee9SPawel Jakub Dawidek * will not receive retaste event for the provider. 487d97d5ee9SPawel Jakub Dawidek * This is the way how I ignore retaste events when I close 488d97d5ee9SPawel Jakub Dawidek * consumers opened for write: I detach and destroy consumer 489d97d5ee9SPawel Jakub Dawidek * after retaste event is sent. 490d97d5ee9SPawel Jakub Dawidek */ 491d97d5ee9SPawel Jakub Dawidek g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 492d97d5ee9SPawel Jakub Dawidek return; 493d97d5ee9SPawel Jakub Dawidek } 494d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 4952d1661a5SPawel Jakub Dawidek g_detach(cp); 4962d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 4972d1661a5SPawel Jakub Dawidek } 4982d1661a5SPawel Jakub Dawidek 4992d1661a5SPawel Jakub Dawidek static int 5002d1661a5SPawel Jakub Dawidek g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 5012d1661a5SPawel Jakub Dawidek { 50234cb1517SPawel Jakub Dawidek struct g_consumer *cp; 5032d1661a5SPawel Jakub Dawidek int error; 5042d1661a5SPawel Jakub Dawidek 5053650be51SPawel Jakub Dawidek g_topology_assert_not(); 5062d1661a5SPawel Jakub Dawidek KASSERT(disk->d_consumer == NULL, 5072d1661a5SPawel Jakub Dawidek ("Disk already connected (device %s).", disk->d_softc->sc_name)); 5082d1661a5SPawel Jakub Dawidek 5093650be51SPawel Jakub Dawidek g_topology_lock(); 51034cb1517SPawel Jakub Dawidek cp = g_new_consumer(disk->d_softc->sc_geom); 51134cb1517SPawel Jakub Dawidek error = g_attach(cp, pp); 512d97d5ee9SPawel Jakub Dawidek if (error != 0) { 51334cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 5143650be51SPawel Jakub Dawidek g_topology_unlock(); 51534cb1517SPawel Jakub Dawidek return (error); 51634cb1517SPawel Jakub Dawidek } 51734cb1517SPawel Jakub Dawidek error = g_access(cp, 1, 1, 1); 5183650be51SPawel Jakub Dawidek g_topology_unlock(); 51934cb1517SPawel Jakub Dawidek if (error != 0) { 52034cb1517SPawel Jakub Dawidek g_detach(cp); 52134cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 522d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 523d97d5ee9SPawel Jakub Dawidek pp->name, error); 524d97d5ee9SPawel Jakub Dawidek return (error); 525d97d5ee9SPawel Jakub Dawidek } 52634cb1517SPawel Jakub Dawidek disk->d_consumer = cp; 52734cb1517SPawel Jakub Dawidek disk->d_consumer->private = disk; 52834cb1517SPawel Jakub Dawidek disk->d_consumer->index = 0; 5292d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 5302d1661a5SPawel Jakub Dawidek return (0); 5312d1661a5SPawel Jakub Dawidek } 5322d1661a5SPawel Jakub Dawidek 5332d1661a5SPawel Jakub Dawidek static void 5342d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 5352d1661a5SPawel Jakub Dawidek { 5362d1661a5SPawel Jakub Dawidek 5372d1661a5SPawel Jakub Dawidek g_topology_assert(); 5382d1661a5SPawel Jakub Dawidek 5392d1661a5SPawel Jakub Dawidek if (cp == NULL) 5402d1661a5SPawel Jakub Dawidek return; 541d97d5ee9SPawel Jakub Dawidek if (cp->provider != NULL) 5422d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 543d97d5ee9SPawel Jakub Dawidek else 5442d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 5452d1661a5SPawel Jakub Dawidek } 5462d1661a5SPawel Jakub Dawidek 5472d1661a5SPawel Jakub Dawidek /* 5482d1661a5SPawel Jakub Dawidek * Initialize disk. This means allocate memory, create consumer, attach it 5492d1661a5SPawel Jakub Dawidek * to the provider and open access (r1w1e1) to it. 5502d1661a5SPawel Jakub Dawidek */ 5512d1661a5SPawel Jakub Dawidek static struct g_raid3_disk * 5522d1661a5SPawel Jakub Dawidek g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 5532d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md, int *errorp) 5542d1661a5SPawel Jakub Dawidek { 5552d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 5562d1661a5SPawel Jakub Dawidek int error; 5572d1661a5SPawel Jakub Dawidek 5582d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[md->md_no]; 5592d1661a5SPawel Jakub Dawidek error = g_raid3_connect_disk(disk, pp); 56034cb1517SPawel Jakub Dawidek if (error != 0) { 56134cb1517SPawel Jakub Dawidek if (errorp != NULL) 56234cb1517SPawel Jakub Dawidek *errorp = error; 56334cb1517SPawel Jakub Dawidek return (NULL); 56434cb1517SPawel Jakub Dawidek } 5652d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NONE; 5662d1661a5SPawel Jakub Dawidek disk->d_flags = md->md_dflags; 5672d1661a5SPawel Jakub Dawidek if (md->md_provider[0] != '\0') 5682d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 5692d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 5702d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = md->md_sync_offset; 5712d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = md->md_sync_offset; 572a245a548SPawel Jakub Dawidek disk->d_genid = md->md_genid; 5732d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = md->md_syncid; 5742d1661a5SPawel Jakub Dawidek if (errorp != NULL) 5752d1661a5SPawel Jakub Dawidek *errorp = 0; 5762d1661a5SPawel Jakub Dawidek return (disk); 5772d1661a5SPawel Jakub Dawidek } 5782d1661a5SPawel Jakub Dawidek 5792d1661a5SPawel Jakub Dawidek static void 5802d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(struct g_raid3_disk *disk) 5812d1661a5SPawel Jakub Dawidek { 5822d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 5832d1661a5SPawel Jakub Dawidek 5843650be51SPawel Jakub Dawidek g_topology_assert_not(); 5853650be51SPawel Jakub Dawidek sc = disk->d_softc; 5863650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 5872d1661a5SPawel Jakub Dawidek 5882d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 5892d1661a5SPawel Jakub Dawidek return; 5902d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(disk); 5912d1661a5SPawel Jakub Dawidek switch (disk->d_state) { 5922d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 5932d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 5942d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 5952d1661a5SPawel Jakub Dawidek /* FALLTHROUGH */ 5962d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 5972d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 5982d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 5993650be51SPawel Jakub Dawidek g_topology_lock(); 6002d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, disk->d_consumer); 6013650be51SPawel Jakub Dawidek g_topology_unlock(); 6022d1661a5SPawel Jakub Dawidek disk->d_consumer = NULL; 6032d1661a5SPawel Jakub Dawidek break; 6042d1661a5SPawel Jakub Dawidek default: 6052d1661a5SPawel Jakub Dawidek KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 6062d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 6072d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 6082d1661a5SPawel Jakub Dawidek } 6092d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NODISK; 6102d1661a5SPawel Jakub Dawidek } 6112d1661a5SPawel Jakub Dawidek 6122d1661a5SPawel Jakub Dawidek static void 61333cb9b3cSMark Johnston g_raid3_free_device(struct g_raid3_softc *sc) 61433cb9b3cSMark Johnston { 61533cb9b3cSMark Johnston KASSERT(sc->sc_refcnt == 0, 61633cb9b3cSMark Johnston ("%s: non-zero refcount %u", __func__, sc->sc_refcnt)); 61733cb9b3cSMark Johnston 61833cb9b3cSMark Johnston if (!g_raid3_use_malloc) { 61933cb9b3cSMark Johnston uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 62033cb9b3cSMark Johnston uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 62133cb9b3cSMark Johnston uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 62233cb9b3cSMark Johnston } 62333cb9b3cSMark Johnston mtx_destroy(&sc->sc_queue_mtx); 62433cb9b3cSMark Johnston mtx_destroy(&sc->sc_events_mtx); 62533cb9b3cSMark Johnston sx_xunlock(&sc->sc_lock); 62633cb9b3cSMark Johnston sx_destroy(&sc->sc_lock); 62733cb9b3cSMark Johnston free(sc->sc_disks, M_RAID3); 62833cb9b3cSMark Johnston free(sc, M_RAID3); 62933cb9b3cSMark Johnston } 63033cb9b3cSMark Johnston 63133cb9b3cSMark Johnston static void 63233cb9b3cSMark Johnston g_raid3_providergone(struct g_provider *pp) 63333cb9b3cSMark Johnston { 63433cb9b3cSMark Johnston struct g_raid3_softc *sc = pp->private; 63533cb9b3cSMark Johnston 63633cb9b3cSMark Johnston if (--sc->sc_refcnt == 0) 63733cb9b3cSMark Johnston g_raid3_free_device(sc); 63833cb9b3cSMark Johnston } 63933cb9b3cSMark Johnston 64033cb9b3cSMark Johnston static void 6412d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(struct g_raid3_softc *sc) 6422d1661a5SPawel Jakub Dawidek { 6432d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 6449da3072cSPawel Jakub Dawidek struct g_raid3_disk *disk; 6452d1661a5SPawel Jakub Dawidek struct g_geom *gp; 6462d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6472d1661a5SPawel Jakub Dawidek u_int n; 6482d1661a5SPawel Jakub Dawidek 6493650be51SPawel Jakub Dawidek g_topology_assert_not(); 6503650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 6512d1661a5SPawel Jakub Dawidek 6522d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 6532d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 6542d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 6559da3072cSPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 6569da3072cSPawel Jakub Dawidek disk = &sc->sc_disks[n]; 657d97d5ee9SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 6589da3072cSPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 6599da3072cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 6609da3072cSPawel Jakub Dawidek g_raid3_destroy_disk(disk); 6619da3072cSPawel Jakub Dawidek } 662d97d5ee9SPawel Jakub Dawidek } 6632d1661a5SPawel Jakub Dawidek while ((ep = g_raid3_event_get(sc)) != NULL) { 664d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 6652d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 6662d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 6672d1661a5SPawel Jakub Dawidek else { 6682d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 6692d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 6702d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 6712d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 6722d1661a5SPawel Jakub Dawidek wakeup(ep); 6732d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 6742d1661a5SPawel Jakub Dawidek } 6752d1661a5SPawel Jakub Dawidek } 676fd02d0bcSMark Johnston g_raid3_timeout_drain(sc); 6772d1661a5SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 6783650be51SPawel Jakub Dawidek g_topology_lock(); 6792d1661a5SPawel Jakub Dawidek if (cp != NULL) 6802d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, cp); 6812d1661a5SPawel Jakub Dawidek g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 6822d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 6832d1661a5SPawel Jakub Dawidek g_wither_geom(gp, ENXIO); 68433cb9b3cSMark Johnston if (--sc->sc_refcnt == 0) 68533cb9b3cSMark Johnston g_raid3_free_device(sc); 6863650be51SPawel Jakub Dawidek g_topology_unlock(); 6872d1661a5SPawel Jakub Dawidek } 6882d1661a5SPawel Jakub Dawidek 6892d1661a5SPawel Jakub Dawidek static void 6902d1661a5SPawel Jakub Dawidek g_raid3_orphan(struct g_consumer *cp) 6912d1661a5SPawel Jakub Dawidek { 6922d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 6932d1661a5SPawel Jakub Dawidek 6942d1661a5SPawel Jakub Dawidek g_topology_assert(); 6952d1661a5SPawel Jakub Dawidek 6962d1661a5SPawel Jakub Dawidek disk = cp->private; 6972d1661a5SPawel Jakub Dawidek if (disk == NULL) 6982d1661a5SPawel Jakub Dawidek return; 699ea973705SPawel Jakub Dawidek disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 7002d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 7012d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 7022d1661a5SPawel Jakub Dawidek } 7032d1661a5SPawel Jakub Dawidek 7042d1661a5SPawel Jakub Dawidek static int 7052d1661a5SPawel Jakub Dawidek g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 7062d1661a5SPawel Jakub Dawidek { 7072d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 7082d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 7092d1661a5SPawel Jakub Dawidek off_t offset, length; 7102d1661a5SPawel Jakub Dawidek u_char *sector; 711d97d5ee9SPawel Jakub Dawidek int error = 0; 7122d1661a5SPawel Jakub Dawidek 7133650be51SPawel Jakub Dawidek g_topology_assert_not(); 7142d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 7153650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 7163650be51SPawel Jakub Dawidek 7172d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 7182d1661a5SPawel Jakub Dawidek KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 7192d1661a5SPawel Jakub Dawidek KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 7203650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 721d97d5ee9SPawel Jakub Dawidek ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 722d97d5ee9SPawel Jakub Dawidek cp->acw, cp->ace)); 7232d1661a5SPawel Jakub Dawidek length = cp->provider->sectorsize; 7242d1661a5SPawel Jakub Dawidek offset = cp->provider->mediasize - length; 7252d1661a5SPawel Jakub Dawidek sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 7262d1661a5SPawel Jakub Dawidek if (md != NULL) 7272d1661a5SPawel Jakub Dawidek raid3_metadata_encode(md, sector); 7282d1661a5SPawel Jakub Dawidek error = g_write_data(cp, offset, sector, length); 7292d1661a5SPawel Jakub Dawidek free(sector, M_RAID3); 7302d1661a5SPawel Jakub Dawidek if (error != 0) { 7313aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 7323aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot write metadata on %s " 7333aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7343aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7353aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 7363aae74ecSPawel Jakub Dawidek } else { 7373aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot write metadata on %s " 7383aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7393aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7403aae74ecSPawel Jakub Dawidek } 7413aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 7423aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 7433aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 7443aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 7453aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 7462d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 7472d1661a5SPawel Jakub Dawidek } 7483aae74ecSPawel Jakub Dawidek } 7492d1661a5SPawel Jakub Dawidek return (error); 7502d1661a5SPawel Jakub Dawidek } 7512d1661a5SPawel Jakub Dawidek 7522d1661a5SPawel Jakub Dawidek int 7532d1661a5SPawel Jakub Dawidek g_raid3_clear_metadata(struct g_raid3_disk *disk) 7542d1661a5SPawel Jakub Dawidek { 7552d1661a5SPawel Jakub Dawidek int error; 7562d1661a5SPawel Jakub Dawidek 7573650be51SPawel Jakub Dawidek g_topology_assert_not(); 7583650be51SPawel Jakub Dawidek sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 7593650be51SPawel Jakub Dawidek 7602d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, NULL); 7612d1661a5SPawel Jakub Dawidek if (error == 0) { 7622d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s cleared.", 7632d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7642d1661a5SPawel Jakub Dawidek } else { 7652d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7662d1661a5SPawel Jakub Dawidek "Cannot clear metadata on disk %s (error=%d).", 7672d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7682d1661a5SPawel Jakub Dawidek } 7692d1661a5SPawel Jakub Dawidek return (error); 7702d1661a5SPawel Jakub Dawidek } 7712d1661a5SPawel Jakub Dawidek 7722d1661a5SPawel Jakub Dawidek void 7732d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 7742d1661a5SPawel Jakub Dawidek { 7752d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 776e6890985SPawel Jakub Dawidek struct g_provider *pp; 7772d1661a5SPawel Jakub Dawidek 77839552dffSMark Johnston bzero(md, sizeof(*md)); 7792d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 7802d1661a5SPawel Jakub Dawidek strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 7812d1661a5SPawel Jakub Dawidek md->md_version = G_RAID3_VERSION; 7822d1661a5SPawel Jakub Dawidek strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 7832d1661a5SPawel Jakub Dawidek md->md_id = sc->sc_id; 7842d1661a5SPawel Jakub Dawidek md->md_all = sc->sc_ndisks; 785a245a548SPawel Jakub Dawidek md->md_genid = sc->sc_genid; 7862d1661a5SPawel Jakub Dawidek md->md_mediasize = sc->sc_mediasize; 7872d1661a5SPawel Jakub Dawidek md->md_sectorsize = sc->sc_sectorsize; 7882d1661a5SPawel Jakub Dawidek md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 7892d1661a5SPawel Jakub Dawidek md->md_no = disk->d_no; 7902d1661a5SPawel Jakub Dawidek md->md_syncid = disk->d_sync.ds_syncid; 7912d1661a5SPawel Jakub Dawidek md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 79239552dffSMark Johnston if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 793c082905bSPawel Jakub Dawidek md->md_sync_offset = 794c082905bSPawel Jakub Dawidek disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 795c082905bSPawel Jakub Dawidek } 796e6890985SPawel Jakub Dawidek if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 797e6890985SPawel Jakub Dawidek pp = disk->d_consumer->provider; 798e6890985SPawel Jakub Dawidek else 799e6890985SPawel Jakub Dawidek pp = NULL; 800e6890985SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 801e6890985SPawel Jakub Dawidek strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 802e6890985SPawel Jakub Dawidek if (pp != NULL) 803e6890985SPawel Jakub Dawidek md->md_provsize = pp->mediasize; 8042d1661a5SPawel Jakub Dawidek } 8052d1661a5SPawel Jakub Dawidek 8062d1661a5SPawel Jakub Dawidek void 8072d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(struct g_raid3_disk *disk) 8082d1661a5SPawel Jakub Dawidek { 8092cc5a480SMateusz Guzik struct g_raid3_softc *sc __diagused; 8102d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 8112d1661a5SPawel Jakub Dawidek int error; 8122d1661a5SPawel Jakub Dawidek 8133650be51SPawel Jakub Dawidek g_topology_assert_not(); 8143650be51SPawel Jakub Dawidek sc = disk->d_softc; 8153650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 8163650be51SPawel Jakub Dawidek 8172d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(disk, &md); 8182d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, &md); 8192d1661a5SPawel Jakub Dawidek if (error == 0) { 8202d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s updated.", 8212d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 8222d1661a5SPawel Jakub Dawidek } else { 8232d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 8242d1661a5SPawel Jakub Dawidek "Cannot update metadata on disk %s (error=%d).", 8252d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 8262d1661a5SPawel Jakub Dawidek } 8272d1661a5SPawel Jakub Dawidek } 8282d1661a5SPawel Jakub Dawidek 8292d1661a5SPawel Jakub Dawidek static void 830d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(struct g_raid3_softc *sc) 8312d1661a5SPawel Jakub Dawidek { 8322d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 8332d1661a5SPawel Jakub Dawidek u_int n; 8342d1661a5SPawel Jakub Dawidek 8353650be51SPawel Jakub Dawidek g_topology_assert_not(); 8363650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8372d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 8382d1661a5SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 8392d1661a5SPawel Jakub Dawidek sc->sc_name)); 8402d1661a5SPawel Jakub Dawidek 8412d1661a5SPawel Jakub Dawidek sc->sc_syncid++; 842a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 843a245a548SPawel Jakub Dawidek sc->sc_syncid); 8442d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 8452d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 8462d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 8472d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 8482d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 8492d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8502d1661a5SPawel Jakub Dawidek } 8512d1661a5SPawel Jakub Dawidek } 8522d1661a5SPawel Jakub Dawidek } 8532d1661a5SPawel Jakub Dawidek 8544d006a98SPawel Jakub Dawidek static void 855a245a548SPawel Jakub Dawidek g_raid3_bump_genid(struct g_raid3_softc *sc) 856a245a548SPawel Jakub Dawidek { 857a245a548SPawel Jakub Dawidek struct g_raid3_disk *disk; 858a245a548SPawel Jakub Dawidek u_int n; 859a245a548SPawel Jakub Dawidek 8603650be51SPawel Jakub Dawidek g_topology_assert_not(); 8613650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 862a245a548SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 863a245a548SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 864a245a548SPawel Jakub Dawidek sc->sc_name)); 865a245a548SPawel Jakub Dawidek 866a245a548SPawel Jakub Dawidek sc->sc_genid++; 867a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 868a245a548SPawel Jakub Dawidek sc->sc_genid); 869a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 870a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 871a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 872a245a548SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 873a245a548SPawel Jakub Dawidek disk->d_genid = sc->sc_genid; 874a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 875a245a548SPawel Jakub Dawidek } 876a245a548SPawel Jakub Dawidek } 877a245a548SPawel Jakub Dawidek } 878a245a548SPawel Jakub Dawidek 8790962f942SPawel Jakub Dawidek static int 8803650be51SPawel Jakub Dawidek g_raid3_idle(struct g_raid3_softc *sc, int acw) 8814d006a98SPawel Jakub Dawidek { 8824d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8834d006a98SPawel Jakub Dawidek u_int i; 8840962f942SPawel Jakub Dawidek int timeout; 8854d006a98SPawel Jakub Dawidek 8863650be51SPawel Jakub Dawidek g_topology_assert_not(); 8873650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8883650be51SPawel Jakub Dawidek 8890962f942SPawel Jakub Dawidek if (sc->sc_provider == NULL) 8900962f942SPawel Jakub Dawidek return (0); 891501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 892501250baSPawel Jakub Dawidek return (0); 8930962f942SPawel Jakub Dawidek if (sc->sc_idle) 8940962f942SPawel Jakub Dawidek return (0); 8950962f942SPawel Jakub Dawidek if (sc->sc_writes > 0) 8960962f942SPawel Jakub Dawidek return (0); 8973650be51SPawel Jakub Dawidek if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 89801f1f41cSPawel Jakub Dawidek timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 899f62c1a47SAlexander Motin if (!g_raid3_shutdown && timeout > 0) 9000962f942SPawel Jakub Dawidek return (timeout); 9010962f942SPawel Jakub Dawidek } 9024d006a98SPawel Jakub Dawidek sc->sc_idle = 1; 9034d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 9044d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 9054d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 9064d006a98SPawel Jakub Dawidek continue; 9074d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 9084d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 9094d006a98SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 9104d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 9114d006a98SPawel Jakub Dawidek } 9120962f942SPawel Jakub Dawidek return (0); 9134d006a98SPawel Jakub Dawidek } 9144d006a98SPawel Jakub Dawidek 9154d006a98SPawel Jakub Dawidek static void 9164d006a98SPawel Jakub Dawidek g_raid3_unidle(struct g_raid3_softc *sc) 9174d006a98SPawel Jakub Dawidek { 9184d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 9194d006a98SPawel Jakub Dawidek u_int i; 9204d006a98SPawel Jakub Dawidek 9213650be51SPawel Jakub Dawidek g_topology_assert_not(); 9223650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 9233650be51SPawel Jakub Dawidek 924501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 925501250baSPawel Jakub Dawidek return; 9264d006a98SPawel Jakub Dawidek sc->sc_idle = 0; 92701f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 9284d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 9294d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 9304d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 9314d006a98SPawel Jakub Dawidek continue; 9324d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 9334d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 9344d006a98SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 9354d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 9364d006a98SPawel Jakub Dawidek } 9374d006a98SPawel Jakub Dawidek } 9384d006a98SPawel Jakub Dawidek 9392d1661a5SPawel Jakub Dawidek /* 9402d1661a5SPawel Jakub Dawidek * Treat bio_driver1 field in parent bio as list head and field bio_caller1 9412d1661a5SPawel Jakub Dawidek * in child bio as pointer to the next element on the list. 9422d1661a5SPawel Jakub Dawidek */ 9432d1661a5SPawel Jakub Dawidek #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 9442d1661a5SPawel Jakub Dawidek 9452d1661a5SPawel Jakub Dawidek #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 9462d1661a5SPawel Jakub Dawidek 9472d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_BIO(pbp, bp) \ 9482d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 9492d1661a5SPawel Jakub Dawidek (bp) = G_RAID3_NEXT_BIO(bp)) 9502d1661a5SPawel Jakub Dawidek 9512d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 9522d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 9532d1661a5SPawel Jakub Dawidek (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 9542d1661a5SPawel Jakub Dawidek (bp) = (tmpbp)) 9552d1661a5SPawel Jakub Dawidek 9562d1661a5SPawel Jakub Dawidek static void 9572d1661a5SPawel Jakub Dawidek g_raid3_init_bio(struct bio *pbp) 9582d1661a5SPawel Jakub Dawidek { 9592d1661a5SPawel Jakub Dawidek 9602d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = NULL; 9612d1661a5SPawel Jakub Dawidek } 9622d1661a5SPawel Jakub Dawidek 9632d1661a5SPawel Jakub Dawidek static void 964dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(struct bio *cbp) 965dba915cfSPawel Jakub Dawidek { 966dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 967dba915cfSPawel Jakub Dawidek 968dba915cfSPawel Jakub Dawidek pbp = cbp->bio_parent; 969dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) 970dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 971dba915cfSPawel Jakub Dawidek else { 972dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 973dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) { 974dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 975dba915cfSPawel Jakub Dawidek break; 976dba915cfSPawel Jakub Dawidek } 977dba915cfSPawel Jakub Dawidek } 978dba915cfSPawel Jakub Dawidek } 979dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 980dba915cfSPawel Jakub Dawidek } 981dba915cfSPawel Jakub Dawidek 982dba915cfSPawel Jakub Dawidek static void 983dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 984dba915cfSPawel Jakub Dawidek { 985dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 986dba915cfSPawel Jakub Dawidek 987dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(sbp); 988dba915cfSPawel Jakub Dawidek pbp = dbp->bio_parent; 989dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 990dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == dbp) 991dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = sbp; 992dba915cfSPawel Jakub Dawidek else { 993dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 994dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == dbp) { 995dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = sbp; 996dba915cfSPawel Jakub Dawidek break; 997dba915cfSPawel Jakub Dawidek } 998dba915cfSPawel Jakub Dawidek } 999dba915cfSPawel Jakub Dawidek } 1000dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(dbp) = NULL; 1001dba915cfSPawel Jakub Dawidek } 1002dba915cfSPawel Jakub Dawidek 1003dba915cfSPawel Jakub Dawidek static void 10042d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 10052d1661a5SPawel Jakub Dawidek { 10062d1661a5SPawel Jakub Dawidek struct bio *bp, *pbp; 10072d1661a5SPawel Jakub Dawidek size_t size; 10082d1661a5SPawel Jakub Dawidek 10092d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 10102d1661a5SPawel Jakub Dawidek pbp->bio_children--; 10112d1661a5SPawel Jakub Dawidek KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 10122d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 1013ed940a82SPawel Jakub Dawidek g_raid3_free(sc, cbp->bio_data, size); 10142d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) { 10152d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 10162d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 10172d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10182d1661a5SPawel Jakub Dawidek } else { 10192d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 10202d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) 10212d1661a5SPawel Jakub Dawidek break; 10222d1661a5SPawel Jakub Dawidek } 1023dba915cfSPawel Jakub Dawidek if (bp != NULL) { 1024dba915cfSPawel Jakub Dawidek KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 1025dba915cfSPawel Jakub Dawidek ("NULL bp->bio_driver1")); 10262d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 10272d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 1028dba915cfSPawel Jakub Dawidek } 10292d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10302d1661a5SPawel Jakub Dawidek } 10312d1661a5SPawel Jakub Dawidek } 10322d1661a5SPawel Jakub Dawidek 10332d1661a5SPawel Jakub Dawidek static struct bio * 10342d1661a5SPawel Jakub Dawidek g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 10352d1661a5SPawel Jakub Dawidek { 10362d1661a5SPawel Jakub Dawidek struct bio *bp, *cbp; 10372d1661a5SPawel Jakub Dawidek size_t size; 10383650be51SPawel Jakub Dawidek int memflag; 10392d1661a5SPawel Jakub Dawidek 10402d1661a5SPawel Jakub Dawidek cbp = g_clone_bio(pbp); 10412d1661a5SPawel Jakub Dawidek if (cbp == NULL) 10422d1661a5SPawel Jakub Dawidek return (NULL); 10432d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 10443650be51SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 10453650be51SPawel Jakub Dawidek memflag = M_WAITOK; 10462d1661a5SPawel Jakub Dawidek else 10473650be51SPawel Jakub Dawidek memflag = M_NOWAIT; 1048ed940a82SPawel Jakub Dawidek cbp->bio_data = g_raid3_alloc(sc, size, memflag); 10493650be51SPawel Jakub Dawidek if (cbp->bio_data == NULL) { 10502d1661a5SPawel Jakub Dawidek pbp->bio_children--; 10512d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10522d1661a5SPawel Jakub Dawidek return (NULL); 10532d1661a5SPawel Jakub Dawidek } 10542d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 10552d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == NULL) 10562d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = cbp; 10572d1661a5SPawel Jakub Dawidek else { 10582d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 10592d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == NULL) { 10602d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = cbp; 10612d1661a5SPawel Jakub Dawidek break; 10622d1661a5SPawel Jakub Dawidek } 10632d1661a5SPawel Jakub Dawidek } 10642d1661a5SPawel Jakub Dawidek } 10652d1661a5SPawel Jakub Dawidek return (cbp); 10662d1661a5SPawel Jakub Dawidek } 10672d1661a5SPawel Jakub Dawidek 10682d1661a5SPawel Jakub Dawidek static void 10692d1661a5SPawel Jakub Dawidek g_raid3_scatter(struct bio *pbp) 10702d1661a5SPawel Jakub Dawidek { 10712d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 10722d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1073ee40c7aaSPawel Jakub Dawidek struct bio *bp, *cbp, *tmpbp; 10742d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 107506b215fdSAlexander Motin int first; 10762d1661a5SPawel Jakub Dawidek 107733cb9b3cSMark Johnston sc = pbp->bio_to->private; 10782d1661a5SPawel Jakub Dawidek bp = NULL; 10792d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10802d1661a5SPawel Jakub Dawidek /* 10812d1661a5SPawel Jakub Dawidek * Find bio for which we should calculate data. 10822d1661a5SPawel Jakub Dawidek */ 10832d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10842d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 10852d1661a5SPawel Jakub Dawidek bp = cbp; 10862d1661a5SPawel Jakub Dawidek break; 10872d1661a5SPawel Jakub Dawidek } 10882d1661a5SPawel Jakub Dawidek } 10892d1661a5SPawel Jakub Dawidek KASSERT(bp != NULL, ("NULL parity bio.")); 10902d1661a5SPawel Jakub Dawidek } 10912d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 10922d1661a5SPawel Jakub Dawidek cadd = padd = 0; 10932d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 10942d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10952d1661a5SPawel Jakub Dawidek if (cbp == bp) 10962d1661a5SPawel Jakub Dawidek continue; 10972d1661a5SPawel Jakub Dawidek bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 10982d1661a5SPawel Jakub Dawidek padd += atom; 10992d1661a5SPawel Jakub Dawidek } 11002d1661a5SPawel Jakub Dawidek cadd += atom; 11012d1661a5SPawel Jakub Dawidek } 11022d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 11032d1661a5SPawel Jakub Dawidek /* 11042d1661a5SPawel Jakub Dawidek * Calculate parity. 11052d1661a5SPawel Jakub Dawidek */ 110606b215fdSAlexander Motin first = 1; 11072d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 11082d1661a5SPawel Jakub Dawidek if (cbp == bp) 11092d1661a5SPawel Jakub Dawidek continue; 111006b215fdSAlexander Motin if (first) { 111106b215fdSAlexander Motin bcopy(cbp->bio_data, bp->bio_data, 11122d1661a5SPawel Jakub Dawidek bp->bio_length); 111306b215fdSAlexander Motin first = 0; 111406b215fdSAlexander Motin } else { 111506b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, bp->bio_data, 111606b215fdSAlexander Motin bp->bio_length); 111706b215fdSAlexander Motin } 11182d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 11192d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 11202d1661a5SPawel Jakub Dawidek } 11212d1661a5SPawel Jakub Dawidek } 1122ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 11232d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 11242d1661a5SPawel Jakub Dawidek 11252d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 11262d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 11272d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 11282d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 11293650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1130d97d5ee9SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1131d97d5ee9SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 113279e61493SPawel Jakub Dawidek cp->index++; 11330962f942SPawel Jakub Dawidek sc->sc_writes++; 11342d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 11352d1661a5SPawel Jakub Dawidek } 11362d1661a5SPawel Jakub Dawidek } 11372d1661a5SPawel Jakub Dawidek 11382d1661a5SPawel Jakub Dawidek static void 11392d1661a5SPawel Jakub Dawidek g_raid3_gather(struct bio *pbp) 11402d1661a5SPawel Jakub Dawidek { 11412d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 11422d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1143f5a2f7feSPawel Jakub Dawidek struct bio *xbp, *fbp, *cbp; 11442d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 11452d1661a5SPawel Jakub Dawidek 114633cb9b3cSMark Johnston sc = pbp->bio_to->private; 11472d1661a5SPawel Jakub Dawidek /* 1148f5a2f7feSPawel Jakub Dawidek * Find bio for which we have to calculate data. 11492d1661a5SPawel Jakub Dawidek * While going through this path, check if all requests 11502d1661a5SPawel Jakub Dawidek * succeeded, if not, deny whole request. 1151f5a2f7feSPawel Jakub Dawidek * If we're in COMPLETE mode, we allow one request to fail, 1152f5a2f7feSPawel Jakub Dawidek * so if we find one, we're sending it to the parity consumer. 1153f5a2f7feSPawel Jakub Dawidek * If there are more failed requests, we deny whole request. 11542d1661a5SPawel Jakub Dawidek */ 1155f5a2f7feSPawel Jakub Dawidek xbp = fbp = NULL; 11562d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 11572d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1158f5a2f7feSPawel Jakub Dawidek KASSERT(xbp == NULL, ("More than one parity bio.")); 1159f5a2f7feSPawel Jakub Dawidek xbp = cbp; 11602d1661a5SPawel Jakub Dawidek } 11612d1661a5SPawel Jakub Dawidek if (cbp->bio_error == 0) 11622d1661a5SPawel Jakub Dawidek continue; 11632d1661a5SPawel Jakub Dawidek /* 11642d1661a5SPawel Jakub Dawidek * Found failed request. 11652d1661a5SPawel Jakub Dawidek */ 1166f5a2f7feSPawel Jakub Dawidek if (fbp == NULL) { 1167f5a2f7feSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 11682d1661a5SPawel Jakub Dawidek /* 1169f5a2f7feSPawel Jakub Dawidek * We are already in degraded mode, so we can't 1170f5a2f7feSPawel Jakub Dawidek * accept any failures. 11712d1661a5SPawel Jakub Dawidek */ 1172f5a2f7feSPawel Jakub Dawidek if (pbp->bio_error == 0) 117317fec17eSPawel Jakub Dawidek pbp->bio_error = cbp->bio_error; 11742d1661a5SPawel Jakub Dawidek } else { 1175f5a2f7feSPawel Jakub Dawidek fbp = cbp; 11762d1661a5SPawel Jakub Dawidek } 1177f5a2f7feSPawel Jakub Dawidek } else { 11782d1661a5SPawel Jakub Dawidek /* 11792d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 11802d1661a5SPawel Jakub Dawidek */ 11812d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 1182f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11832d1661a5SPawel Jakub Dawidek } 11843aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 11853aae74ecSPawel Jakub Dawidek if (disk == NULL) 11863aae74ecSPawel Jakub Dawidek continue; 11873aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 11883aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 11893aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 11903aae74ecSPawel Jakub Dawidek cbp->bio_error); 11913aae74ecSPawel Jakub Dawidek } else { 11923aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 11933aae74ecSPawel Jakub Dawidek cbp->bio_error); 11943aae74ecSPawel Jakub Dawidek } 11953aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 11963aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 11973aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 11983aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 11993aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 12003aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 12013aae74ecSPawel Jakub Dawidek } 12022d1661a5SPawel Jakub Dawidek } 12032d1661a5SPawel Jakub Dawidek if (pbp->bio_error != 0) 12042d1661a5SPawel Jakub Dawidek goto finish; 1205dba915cfSPawel Jakub Dawidek if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1206dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1207dba915cfSPawel Jakub Dawidek if (xbp != fbp) 1208dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(xbp, fbp); 1209dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, fbp); 1210dba915cfSPawel Jakub Dawidek } else if (fbp != NULL) { 12112d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 12122d1661a5SPawel Jakub Dawidek 12132d1661a5SPawel Jakub Dawidek /* 12142d1661a5SPawel Jakub Dawidek * One request failed, so send the same request to 12152d1661a5SPawel Jakub Dawidek * the parity consumer. 12162d1661a5SPawel Jakub Dawidek */ 1217f5a2f7feSPawel Jakub Dawidek disk = pbp->bio_driver2; 12182d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1219f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 12202d1661a5SPawel Jakub Dawidek goto finish; 12212d1661a5SPawel Jakub Dawidek } 12222d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 12232d1661a5SPawel Jakub Dawidek pbp->bio_inbed--; 1224f5a2f7feSPawel Jakub Dawidek fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1225f5a2f7feSPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 1226f5a2f7feSPawel Jakub Dawidek fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1227f5a2f7feSPawel Jakub Dawidek fbp->bio_error = 0; 1228f5a2f7feSPawel Jakub Dawidek fbp->bio_completed = 0; 1229f5a2f7feSPawel Jakub Dawidek fbp->bio_children = 0; 1230f5a2f7feSPawel Jakub Dawidek fbp->bio_inbed = 0; 12312d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 1232f5a2f7feSPawel Jakub Dawidek fbp->bio_caller2 = disk; 1233f5a2f7feSPawel Jakub Dawidek fbp->bio_to = cp->provider; 1234f5a2f7feSPawel Jakub Dawidek G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 12353650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 12362d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 12372d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 123879e61493SPawel Jakub Dawidek cp->index++; 1239f5a2f7feSPawel Jakub Dawidek g_io_request(fbp, cp); 12402d1661a5SPawel Jakub Dawidek return; 12412d1661a5SPawel Jakub Dawidek } 1242f5a2f7feSPawel Jakub Dawidek if (xbp != NULL) { 1243f5a2f7feSPawel Jakub Dawidek /* 1244f5a2f7feSPawel Jakub Dawidek * Calculate parity. 1245f5a2f7feSPawel Jakub Dawidek */ 1246f5a2f7feSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 1247f5a2f7feSPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1248f5a2f7feSPawel Jakub Dawidek continue; 124906b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, xbp->bio_data, 1250f5a2f7feSPawel Jakub Dawidek xbp->bio_length); 1251f5a2f7feSPawel Jakub Dawidek } 1252f5a2f7feSPawel Jakub Dawidek xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1253dba915cfSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1254dba915cfSPawel Jakub Dawidek if (!g_raid3_is_zero(xbp)) { 1255dba915cfSPawel Jakub Dawidek g_raid3_parity_mismatch++; 1256dba915cfSPawel Jakub Dawidek pbp->bio_error = EIO; 1257dba915cfSPawel Jakub Dawidek goto finish; 1258dba915cfSPawel Jakub Dawidek } 1259dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, xbp); 1260dba915cfSPawel Jakub Dawidek } 12612d1661a5SPawel Jakub Dawidek } 12622d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 12632d1661a5SPawel Jakub Dawidek cadd = padd = 0; 12642d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 12652d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 12662d1661a5SPawel Jakub Dawidek bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 12672d1661a5SPawel Jakub Dawidek pbp->bio_completed += atom; 12682d1661a5SPawel Jakub Dawidek padd += atom; 12692d1661a5SPawel Jakub Dawidek } 12702d1661a5SPawel Jakub Dawidek cadd += atom; 12712d1661a5SPawel Jakub Dawidek } 12722d1661a5SPawel Jakub Dawidek finish: 12732d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 12742d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 12754cf67afeSPawel Jakub Dawidek else { 12764cf67afeSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 12774cf67afeSPawel Jakub Dawidek G_RAID3_LOGREQ(1, pbp, "Verification error."); 12782d1661a5SPawel Jakub Dawidek else 12792d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 12804cf67afeSPawel Jakub Dawidek } 1281dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 12822d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 12832d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1284290c6161SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 12852d1661a5SPawel Jakub Dawidek } 12862d1661a5SPawel Jakub Dawidek 12872d1661a5SPawel Jakub Dawidek static void 12882d1661a5SPawel Jakub Dawidek g_raid3_done(struct bio *bp) 12892d1661a5SPawel Jakub Dawidek { 12902d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12912d1661a5SPawel Jakub Dawidek 12922d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 12932d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 12942d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 12952d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 12962d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 12978de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 12982d1661a5SPawel Jakub Dawidek wakeup(sc); 12992d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 13002d1661a5SPawel Jakub Dawidek } 13012d1661a5SPawel Jakub Dawidek 13022d1661a5SPawel Jakub Dawidek static void 13032d1661a5SPawel Jakub Dawidek g_raid3_regular_request(struct bio *cbp) 13042d1661a5SPawel Jakub Dawidek { 13052d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 13062d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 13072d1661a5SPawel Jakub Dawidek struct bio *pbp; 13082d1661a5SPawel Jakub Dawidek 13092d1661a5SPawel Jakub Dawidek g_topology_assert_not(); 13102d1661a5SPawel Jakub Dawidek 13112d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 131233cb9b3cSMark Johnston sc = pbp->bio_to->private; 13130962f942SPawel Jakub Dawidek cbp->bio_from->index--; 13140962f942SPawel Jakub Dawidek if (cbp->bio_cmd == BIO_WRITE) 13150962f942SPawel Jakub Dawidek sc->sc_writes--; 13162d1661a5SPawel Jakub Dawidek disk = cbp->bio_from->private; 13172d1661a5SPawel Jakub Dawidek if (disk == NULL) { 13182d1661a5SPawel Jakub Dawidek g_topology_lock(); 13192d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cbp->bio_from); 13202d1661a5SPawel Jakub Dawidek g_topology_unlock(); 13212d1661a5SPawel Jakub Dawidek } 13222d1661a5SPawel Jakub Dawidek 13232d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Request finished."); 13242d1661a5SPawel Jakub Dawidek pbp->bio_inbed++; 13252d1661a5SPawel Jakub Dawidek KASSERT(pbp->bio_inbed <= pbp->bio_children, 13262d1661a5SPawel Jakub Dawidek ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 13272d1661a5SPawel Jakub Dawidek pbp->bio_children)); 13282d1661a5SPawel Jakub Dawidek if (pbp->bio_inbed != pbp->bio_children) 13292d1661a5SPawel Jakub Dawidek return; 13302d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 13312d1661a5SPawel Jakub Dawidek case BIO_READ: 13322d1661a5SPawel Jakub Dawidek g_raid3_gather(pbp); 13332d1661a5SPawel Jakub Dawidek break; 13342d1661a5SPawel Jakub Dawidek case BIO_WRITE: 13352d1661a5SPawel Jakub Dawidek case BIO_DELETE: 13362d1661a5SPawel Jakub Dawidek { 13372d1661a5SPawel Jakub Dawidek int error = 0; 13382d1661a5SPawel Jakub Dawidek 13392d1661a5SPawel Jakub Dawidek pbp->bio_completed = pbp->bio_length; 13402d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 13413aae74ecSPawel Jakub Dawidek if (cbp->bio_error == 0) { 13423aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13433aae74ecSPawel Jakub Dawidek continue; 13442d1661a5SPawel Jakub Dawidek } 13453aae74ecSPawel Jakub Dawidek 13462d1661a5SPawel Jakub Dawidek if (error == 0) 13472d1661a5SPawel Jakub Dawidek error = cbp->bio_error; 13482d1661a5SPawel Jakub Dawidek else if (pbp->bio_error == 0) { 13492d1661a5SPawel Jakub Dawidek /* 13502d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 13512d1661a5SPawel Jakub Dawidek */ 13522d1661a5SPawel Jakub Dawidek pbp->bio_error = error; 13532d1661a5SPawel Jakub Dawidek } 13543aae74ecSPawel Jakub Dawidek 13553aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 13563aae74ecSPawel Jakub Dawidek if (disk == NULL) { 13573aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13583aae74ecSPawel Jakub Dawidek continue; 13593aae74ecSPawel Jakub Dawidek } 13603aae74ecSPawel Jakub Dawidek 13613aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 13623aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 13633aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, 13643aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13653aae74ecSPawel Jakub Dawidek cbp->bio_error); 13663aae74ecSPawel Jakub Dawidek } else { 13673aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, 13683aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13693aae74ecSPawel Jakub Dawidek cbp->bio_error); 13703aae74ecSPawel Jakub Dawidek } 13713aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 13723aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 13733aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 13743aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 13753aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 13763aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 13772d1661a5SPawel Jakub Dawidek } 13782d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13792d1661a5SPawel Jakub Dawidek } 13802d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 13812d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 13822d1661a5SPawel Jakub Dawidek else 13832d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 13842d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 13852d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 13863650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_inflight, pbp); 13873650be51SPawel Jakub Dawidek /* Release delayed sync requests if possible. */ 13883650be51SPawel Jakub Dawidek g_raid3_sync_release(sc); 13892d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 13902d1661a5SPawel Jakub Dawidek break; 13912d1661a5SPawel Jakub Dawidek } 13922d1661a5SPawel Jakub Dawidek } 13932d1661a5SPawel Jakub Dawidek } 13942d1661a5SPawel Jakub Dawidek 13952d1661a5SPawel Jakub Dawidek static void 13962d1661a5SPawel Jakub Dawidek g_raid3_sync_done(struct bio *bp) 13972d1661a5SPawel Jakub Dawidek { 13982d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 13992d1661a5SPawel Jakub Dawidek 14002d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 14012d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 14022d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 14032d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 14042d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 14058de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 14062d1661a5SPawel Jakub Dawidek wakeup(sc); 14072d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 14082d1661a5SPawel Jakub Dawidek } 14092d1661a5SPawel Jakub Dawidek 14102d1661a5SPawel Jakub Dawidek static void 141142461fbaSPawel Jakub Dawidek g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 141242461fbaSPawel Jakub Dawidek { 141342461fbaSPawel Jakub Dawidek struct bio_queue_head queue; 141442461fbaSPawel Jakub Dawidek struct g_raid3_disk *disk; 14152cc5a480SMateusz Guzik struct g_consumer *cp __diagused; 141642461fbaSPawel Jakub Dawidek struct bio *cbp; 141742461fbaSPawel Jakub Dawidek u_int i; 141842461fbaSPawel Jakub Dawidek 141942461fbaSPawel Jakub Dawidek bioq_init(&queue); 142042461fbaSPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 142142461fbaSPawel Jakub Dawidek disk = &sc->sc_disks[i]; 142242461fbaSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 142342461fbaSPawel Jakub Dawidek continue; 142442461fbaSPawel Jakub Dawidek cbp = g_clone_bio(bp); 142542461fbaSPawel Jakub Dawidek if (cbp == NULL) { 142642461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; 142742461fbaSPawel Jakub Dawidek cbp = bioq_first(&queue)) { 142842461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 142942461fbaSPawel Jakub Dawidek g_destroy_bio(cbp); 143042461fbaSPawel Jakub Dawidek } 143142461fbaSPawel Jakub Dawidek if (bp->bio_error == 0) 143242461fbaSPawel Jakub Dawidek bp->bio_error = ENOMEM; 143342461fbaSPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 143442461fbaSPawel Jakub Dawidek return; 143542461fbaSPawel Jakub Dawidek } 143642461fbaSPawel Jakub Dawidek bioq_insert_tail(&queue, cbp); 143742461fbaSPawel Jakub Dawidek cbp->bio_done = g_std_done; 143842461fbaSPawel Jakub Dawidek cbp->bio_caller1 = disk; 143942461fbaSPawel Jakub Dawidek cbp->bio_to = disk->d_consumer->provider; 144042461fbaSPawel Jakub Dawidek } 144142461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 144242461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 144342461fbaSPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 144442461fbaSPawel Jakub Dawidek disk = cbp->bio_caller1; 144542461fbaSPawel Jakub Dawidek cbp->bio_caller1 = NULL; 144642461fbaSPawel Jakub Dawidek cp = disk->d_consumer; 144742461fbaSPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 144842461fbaSPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 144942461fbaSPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 145042461fbaSPawel Jakub Dawidek g_io_request(cbp, disk->d_consumer); 145142461fbaSPawel Jakub Dawidek } 145242461fbaSPawel Jakub Dawidek } 145342461fbaSPawel Jakub Dawidek 145442461fbaSPawel Jakub Dawidek static void 14552d1661a5SPawel Jakub Dawidek g_raid3_start(struct bio *bp) 14562d1661a5SPawel Jakub Dawidek { 14572d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 14582d1661a5SPawel Jakub Dawidek 145933cb9b3cSMark Johnston sc = bp->bio_to->private; 14602d1661a5SPawel Jakub Dawidek /* 14612d1661a5SPawel Jakub Dawidek * If sc == NULL or there are no valid disks, provider's error 14622d1661a5SPawel Jakub Dawidek * should be set and g_raid3_start() should not be called at all. 14632d1661a5SPawel Jakub Dawidek */ 14642d1661a5SPawel Jakub Dawidek KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 14652d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 14662d1661a5SPawel Jakub Dawidek ("Provider's error should be set (error=%d)(device=%s).", 14672d1661a5SPawel Jakub Dawidek bp->bio_to->error, bp->bio_to->name)); 14682d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Request received."); 14692d1661a5SPawel Jakub Dawidek 14702d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 14712d1661a5SPawel Jakub Dawidek case BIO_READ: 14722d1661a5SPawel Jakub Dawidek case BIO_WRITE: 14732d1661a5SPawel Jakub Dawidek case BIO_DELETE: 14742d1661a5SPawel Jakub Dawidek break; 14758b522bdaSWarner Losh case BIO_SPEEDUP: 147642461fbaSPawel Jakub Dawidek case BIO_FLUSH: 147742461fbaSPawel Jakub Dawidek g_raid3_flush(sc, bp); 147842461fbaSPawel Jakub Dawidek return; 14792d1661a5SPawel Jakub Dawidek case BIO_GETATTR: 14802d1661a5SPawel Jakub Dawidek default: 14812d1661a5SPawel Jakub Dawidek g_io_deliver(bp, EOPNOTSUPP); 14822d1661a5SPawel Jakub Dawidek return; 14832d1661a5SPawel Jakub Dawidek } 14842d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 14852d1661a5SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_queue, bp); 14868de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 14872d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 14882d1661a5SPawel Jakub Dawidek wakeup(sc); 14892d1661a5SPawel Jakub Dawidek } 14902d1661a5SPawel Jakub Dawidek 14912d1661a5SPawel Jakub Dawidek /* 14923650be51SPawel Jakub Dawidek * Return TRUE if the given request is colliding with a in-progress 14933650be51SPawel Jakub Dawidek * synchronization request. 14942d1661a5SPawel Jakub Dawidek */ 14953650be51SPawel Jakub Dawidek static int 14963650be51SPawel Jakub Dawidek g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 14972d1661a5SPawel Jakub Dawidek { 14982d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 14993650be51SPawel Jakub Dawidek struct bio *sbp; 15003650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 15013650be51SPawel Jakub Dawidek int i; 15023650be51SPawel Jakub Dawidek 15033650be51SPawel Jakub Dawidek disk = sc->sc_syncdisk; 15043650be51SPawel Jakub Dawidek if (disk == NULL) 15053650be51SPawel Jakub Dawidek return (0); 15063650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 15073650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 15083650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 15093650be51SPawel Jakub Dawidek sbp = disk->d_sync.ds_bios[i]; 15103650be51SPawel Jakub Dawidek if (sbp == NULL) 15113650be51SPawel Jakub Dawidek continue; 15123650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 15133650be51SPawel Jakub Dawidek send = sbp->bio_length; 15143650be51SPawel Jakub Dawidek if (sbp->bio_cmd == BIO_WRITE) { 15153650be51SPawel Jakub Dawidek sstart *= sc->sc_ndisks - 1; 15163650be51SPawel Jakub Dawidek send *= sc->sc_ndisks - 1; 15173650be51SPawel Jakub Dawidek } 15183650be51SPawel Jakub Dawidek send += sstart; 15193650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 15203650be51SPawel Jakub Dawidek return (1); 15213650be51SPawel Jakub Dawidek } 15223650be51SPawel Jakub Dawidek return (0); 15233650be51SPawel Jakub Dawidek } 15243650be51SPawel Jakub Dawidek 15253650be51SPawel Jakub Dawidek /* 15263650be51SPawel Jakub Dawidek * Return TRUE if the given sync request is colliding with a in-progress regular 15273650be51SPawel Jakub Dawidek * request. 15283650be51SPawel Jakub Dawidek */ 15293650be51SPawel Jakub Dawidek static int 15303650be51SPawel Jakub Dawidek g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 15313650be51SPawel Jakub Dawidek { 15323650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 15332d1661a5SPawel Jakub Dawidek struct bio *bp; 15342d1661a5SPawel Jakub Dawidek 15353650be51SPawel Jakub Dawidek if (sc->sc_syncdisk == NULL) 15363650be51SPawel Jakub Dawidek return (0); 15373650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 15383650be51SPawel Jakub Dawidek send = sstart + sbp->bio_length; 15393650be51SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 15403650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 15413650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 15423650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 15433650be51SPawel Jakub Dawidek return (1); 15442d1661a5SPawel Jakub Dawidek } 15453650be51SPawel Jakub Dawidek return (0); 15462d1661a5SPawel Jakub Dawidek } 15472d1661a5SPawel Jakub Dawidek 15483650be51SPawel Jakub Dawidek /* 15493650be51SPawel Jakub Dawidek * Puts request onto delayed queue. 15503650be51SPawel Jakub Dawidek */ 15513650be51SPawel Jakub Dawidek static void 15523650be51SPawel Jakub Dawidek g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 15533650be51SPawel Jakub Dawidek { 15543650be51SPawel Jakub Dawidek 15553650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying request."); 15563650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_regular_delayed, bp); 15573650be51SPawel Jakub Dawidek } 15583650be51SPawel Jakub Dawidek 15593650be51SPawel Jakub Dawidek /* 15603650be51SPawel Jakub Dawidek * Puts synchronization request onto delayed queue. 15613650be51SPawel Jakub Dawidek */ 15623650be51SPawel Jakub Dawidek static void 15633650be51SPawel Jakub Dawidek g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 15643650be51SPawel Jakub Dawidek { 15653650be51SPawel Jakub Dawidek 15663650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 15673650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_sync_delayed, bp); 15683650be51SPawel Jakub Dawidek } 15693650be51SPawel Jakub Dawidek 15703650be51SPawel Jakub Dawidek /* 15713650be51SPawel Jakub Dawidek * Releases delayed regular requests which don't collide anymore with sync 15723650be51SPawel Jakub Dawidek * requests. 15733650be51SPawel Jakub Dawidek */ 15743650be51SPawel Jakub Dawidek static void 15753650be51SPawel Jakub Dawidek g_raid3_regular_release(struct g_raid3_softc *sc) 15763650be51SPawel Jakub Dawidek { 15773650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15783650be51SPawel Jakub Dawidek 15793650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 15803650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, bp)) 15813650be51SPawel Jakub Dawidek continue; 15823650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_regular_delayed, bp); 15833650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 15843650be51SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 15853650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 15863650be51SPawel Jakub Dawidek #if 0 15873650be51SPawel Jakub Dawidek /* 15883650be51SPawel Jakub Dawidek * wakeup() is not needed, because this function is called from 15893650be51SPawel Jakub Dawidek * the worker thread. 15903650be51SPawel Jakub Dawidek */ 15913650be51SPawel Jakub Dawidek wakeup(&sc->sc_queue); 15923650be51SPawel Jakub Dawidek #endif 15933650be51SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 15943650be51SPawel Jakub Dawidek } 15953650be51SPawel Jakub Dawidek } 15963650be51SPawel Jakub Dawidek 15973650be51SPawel Jakub Dawidek /* 15983650be51SPawel Jakub Dawidek * Releases delayed sync requests which don't collide anymore with regular 15993650be51SPawel Jakub Dawidek * requests. 16003650be51SPawel Jakub Dawidek */ 16013650be51SPawel Jakub Dawidek static void 16023650be51SPawel Jakub Dawidek g_raid3_sync_release(struct g_raid3_softc *sc) 16033650be51SPawel Jakub Dawidek { 16043650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 16053650be51SPawel Jakub Dawidek 16063650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 16073650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 16083650be51SPawel Jakub Dawidek continue; 16093650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_sync_delayed, bp); 16103650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, 16113650be51SPawel Jakub Dawidek "Releasing delayed synchronization request."); 16123650be51SPawel Jakub Dawidek g_io_request(bp, bp->bio_from); 16133650be51SPawel Jakub Dawidek } 16143650be51SPawel Jakub Dawidek } 16153650be51SPawel Jakub Dawidek 16163650be51SPawel Jakub Dawidek /* 16173650be51SPawel Jakub Dawidek * Handle synchronization requests. 16183650be51SPawel Jakub Dawidek * Every synchronization request is two-steps process: first, READ request is 16193650be51SPawel Jakub Dawidek * send to active provider and then WRITE request (with read data) to the provider 1620e8d57122SPedro F. Giffuni * being synchronized. When WRITE is finished, new synchronization request is 16213650be51SPawel Jakub Dawidek * send. 16223650be51SPawel Jakub Dawidek */ 16232d1661a5SPawel Jakub Dawidek static void 16242d1661a5SPawel Jakub Dawidek g_raid3_sync_request(struct bio *bp) 16252d1661a5SPawel Jakub Dawidek { 16262d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 16272d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 16282d1661a5SPawel Jakub Dawidek 162979e61493SPawel Jakub Dawidek bp->bio_from->index--; 16302d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 16312d1661a5SPawel Jakub Dawidek disk = bp->bio_from->private; 16322d1661a5SPawel Jakub Dawidek if (disk == NULL) { 16333650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 16342d1661a5SPawel Jakub Dawidek g_topology_lock(); 16352d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, bp->bio_from); 16362d1661a5SPawel Jakub Dawidek g_topology_unlock(); 16373650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 16382d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16393650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 16402d1661a5SPawel Jakub Dawidek return; 16412d1661a5SPawel Jakub Dawidek } 16422d1661a5SPawel Jakub Dawidek 16432d1661a5SPawel Jakub Dawidek /* 16442d1661a5SPawel Jakub Dawidek * Synchronization request. 16452d1661a5SPawel Jakub Dawidek */ 16462d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 16472d1661a5SPawel Jakub Dawidek case BIO_READ: 16482d1661a5SPawel Jakub Dawidek { 16492d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 16502d1661a5SPawel Jakub Dawidek u_char *dst, *src; 16512d1661a5SPawel Jakub Dawidek off_t left; 16522d1661a5SPawel Jakub Dawidek u_int atom; 16532d1661a5SPawel Jakub Dawidek 16542d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16552d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16562d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16572d1661a5SPawel Jakub Dawidek bp->bio_error); 16582d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16592d1661a5SPawel Jakub Dawidek return; 16602d1661a5SPawel Jakub Dawidek } 16612d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 16622d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 16632d1661a5SPawel Jakub Dawidek dst = src = bp->bio_data; 16642d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) { 16652d1661a5SPawel Jakub Dawidek u_int n; 16662d1661a5SPawel Jakub Dawidek 16672d1661a5SPawel Jakub Dawidek /* Parity component. */ 16682d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16692d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16702d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16712d1661a5SPawel Jakub Dawidek src += atom; 16722d1661a5SPawel Jakub Dawidek for (n = 1; n < sc->sc_ndisks - 1; n++) { 167306b215fdSAlexander Motin g_raid3_xor(src, dst, atom); 16742d1661a5SPawel Jakub Dawidek src += atom; 16752d1661a5SPawel Jakub Dawidek } 16762d1661a5SPawel Jakub Dawidek dst += atom; 16772d1661a5SPawel Jakub Dawidek } 16782d1661a5SPawel Jakub Dawidek } else { 16792d1661a5SPawel Jakub Dawidek /* Regular component. */ 16802d1661a5SPawel Jakub Dawidek src += atom * disk->d_no; 16812d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16822d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16832d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16842d1661a5SPawel Jakub Dawidek src += sc->sc_sectorsize; 16852d1661a5SPawel Jakub Dawidek dst += atom; 16862d1661a5SPawel Jakub Dawidek } 16872d1661a5SPawel Jakub Dawidek } 16883650be51SPawel Jakub Dawidek bp->bio_driver1 = bp->bio_driver2 = NULL; 16893650be51SPawel Jakub Dawidek bp->bio_pflags = 0; 16902d1661a5SPawel Jakub Dawidek bp->bio_offset /= sc->sc_ndisks - 1; 16912d1661a5SPawel Jakub Dawidek bp->bio_length /= sc->sc_ndisks - 1; 16922d1661a5SPawel Jakub Dawidek bp->bio_cmd = BIO_WRITE; 16932d1661a5SPawel Jakub Dawidek bp->bio_cflags = 0; 16942d1661a5SPawel Jakub Dawidek bp->bio_children = bp->bio_inbed = 0; 16952d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 16963650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 16972d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 16982d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 169979e61493SPawel Jakub Dawidek cp->index++; 17002d1661a5SPawel Jakub Dawidek g_io_request(bp, cp); 17012d1661a5SPawel Jakub Dawidek return; 17022d1661a5SPawel Jakub Dawidek } 17032d1661a5SPawel Jakub Dawidek case BIO_WRITE: 1704d2fb9c62SPawel Jakub Dawidek { 1705d2fb9c62SPawel Jakub Dawidek struct g_raid3_disk_sync *sync; 17063650be51SPawel Jakub Dawidek off_t boffset, moffset; 17073650be51SPawel Jakub Dawidek void *data; 17083650be51SPawel Jakub Dawidek int i; 1709d2fb9c62SPawel Jakub Dawidek 17102d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 17112d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 17122d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 17132d1661a5SPawel Jakub Dawidek bp->bio_error); 17142d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 1715ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 17162d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, 17172d1661a5SPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 17182d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 17192d1661a5SPawel Jakub Dawidek return; 17202d1661a5SPawel Jakub Dawidek } 17212d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1722d2fb9c62SPawel Jakub Dawidek sync = &disk->d_sync; 17233650be51SPawel Jakub Dawidek if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 17243650be51SPawel Jakub Dawidek sync->ds_consumer == NULL || 17253650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17263650be51SPawel Jakub Dawidek /* Don't send more synchronization requests. */ 17273650be51SPawel Jakub Dawidek sync->ds_inflight--; 17283650be51SPawel Jakub Dawidek if (sync->ds_bios != NULL) { 1729ef25813dSRuslan Ermilov i = (int)(uintptr_t)bp->bio_caller1; 17303650be51SPawel Jakub Dawidek sync->ds_bios[i] = NULL; 17313650be51SPawel Jakub Dawidek } 17323650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 17332d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 17343650be51SPawel Jakub Dawidek if (sync->ds_inflight > 0) 1735d2fb9c62SPawel Jakub Dawidek return; 17363650be51SPawel Jakub Dawidek if (sync->ds_consumer == NULL || 17373650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17383650be51SPawel Jakub Dawidek return; 17393650be51SPawel Jakub Dawidek } 17402d1661a5SPawel Jakub Dawidek /* 17412d1661a5SPawel Jakub Dawidek * Disk up-to-date, activate it. 17422d1661a5SPawel Jakub Dawidek */ 17432d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 17442d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 17452d1661a5SPawel Jakub Dawidek return; 17463650be51SPawel Jakub Dawidek } 17473650be51SPawel Jakub Dawidek 17483650be51SPawel Jakub Dawidek /* Send next synchronization request. */ 17493650be51SPawel Jakub Dawidek data = bp->bio_data; 1750c55f5707SWarner Losh g_reset_bio(bp); 17513650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 17523650be51SPawel Jakub Dawidek bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 1753cd853791SKonstantin Belousov bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 17543650be51SPawel Jakub Dawidek sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 17553650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 17563650be51SPawel Jakub Dawidek bp->bio_data = data; 17573650be51SPawel Jakub Dawidek bp->bio_from = sync->ds_consumer; 17583650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 17593650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 17603650be51SPawel Jakub Dawidek sync->ds_consumer->index++; 17612d1661a5SPawel Jakub Dawidek /* 17623650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 17632d1661a5SPawel Jakub Dawidek */ 17643650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 17653650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 17663650be51SPawel Jakub Dawidek else 17673650be51SPawel Jakub Dawidek g_io_request(bp, sync->ds_consumer); 17683650be51SPawel Jakub Dawidek 17693650be51SPawel Jakub Dawidek /* Release delayed requests if possible. */ 17703650be51SPawel Jakub Dawidek g_raid3_regular_release(sc); 17713650be51SPawel Jakub Dawidek 17723650be51SPawel Jakub Dawidek /* Find the smallest offset. */ 17733650be51SPawel Jakub Dawidek moffset = sc->sc_mediasize; 17743650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 17753650be51SPawel Jakub Dawidek bp = sync->ds_bios[i]; 17763650be51SPawel Jakub Dawidek boffset = bp->bio_offset; 17773650be51SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) 17783650be51SPawel Jakub Dawidek boffset *= sc->sc_ndisks - 1; 17793650be51SPawel Jakub Dawidek if (boffset < moffset) 17803650be51SPawel Jakub Dawidek moffset = boffset; 17813650be51SPawel Jakub Dawidek } 1782cd853791SKonstantin Belousov if (sync->ds_offset_done + maxphys * 100 < moffset) { 17833650be51SPawel Jakub Dawidek /* Update offset_done on every 100 blocks. */ 17843650be51SPawel Jakub Dawidek sync->ds_offset_done = moffset; 17852d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 17862d1661a5SPawel Jakub Dawidek } 17872d1661a5SPawel Jakub Dawidek return; 1788d2fb9c62SPawel Jakub Dawidek } 17892d1661a5SPawel Jakub Dawidek default: 17902d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 17912d1661a5SPawel Jakub Dawidek bp->bio_cmd, sc->sc_name)); 17922d1661a5SPawel Jakub Dawidek break; 17932d1661a5SPawel Jakub Dawidek } 17942d1661a5SPawel Jakub Dawidek } 17952d1661a5SPawel Jakub Dawidek 17962d1661a5SPawel Jakub Dawidek static int 17972d1661a5SPawel Jakub Dawidek g_raid3_register_request(struct bio *pbp) 17982d1661a5SPawel Jakub Dawidek { 17992d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 18002d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 18012d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 1802ee40c7aaSPawel Jakub Dawidek struct bio *cbp, *tmpbp; 18032d1661a5SPawel Jakub Dawidek off_t offset, length; 1804fa6a7837SDavid E. O'Brien u_int n, ndisks; 1805dba915cfSPawel Jakub Dawidek int round_robin, verify; 18062d1661a5SPawel Jakub Dawidek 1807fa6a7837SDavid E. O'Brien ndisks = 0; 180833cb9b3cSMark Johnston sc = pbp->bio_to->private; 18092d1661a5SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 18102d1661a5SPawel Jakub Dawidek sc->sc_syncdisk == NULL) { 18112d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, EIO); 18122d1661a5SPawel Jakub Dawidek return (0); 18132d1661a5SPawel Jakub Dawidek } 18142d1661a5SPawel Jakub Dawidek g_raid3_init_bio(pbp); 18152d1661a5SPawel Jakub Dawidek length = pbp->bio_length / (sc->sc_ndisks - 1); 18162d1661a5SPawel Jakub Dawidek offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1817dba915cfSPawel Jakub Dawidek round_robin = verify = 0; 18182d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 18192d1661a5SPawel Jakub Dawidek case BIO_READ: 1820dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1821dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1822dba915cfSPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1823dba915cfSPawel Jakub Dawidek verify = 1; 1824dba915cfSPawel Jakub Dawidek ndisks = sc->sc_ndisks; 1825dba915cfSPawel Jakub Dawidek } else { 1826dba915cfSPawel Jakub Dawidek verify = 0; 18272d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks - 1; 1828dba915cfSPawel Jakub Dawidek } 1829dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1830dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1831dba915cfSPawel Jakub Dawidek round_robin = 1; 1832dba915cfSPawel Jakub Dawidek } else { 1833dba915cfSPawel Jakub Dawidek round_robin = 0; 1834dba915cfSPawel Jakub Dawidek } 1835dba915cfSPawel Jakub Dawidek KASSERT(!round_robin || !verify, 1836dba915cfSPawel Jakub Dawidek ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1837f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 18382d1661a5SPawel Jakub Dawidek break; 18392d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18402d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18413650be51SPawel Jakub Dawidek /* 18423650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a synchronization 18433650be51SPawel Jakub Dawidek * request. 18443650be51SPawel Jakub Dawidek */ 18453650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, pbp)) { 18463650be51SPawel Jakub Dawidek g_raid3_regular_delay(sc, pbp); 18473650be51SPawel Jakub Dawidek return (0); 18483650be51SPawel Jakub Dawidek } 1849d2fb9c62SPawel Jakub Dawidek 18504d006a98SPawel Jakub Dawidek if (sc->sc_idle) 18514d006a98SPawel Jakub Dawidek g_raid3_unidle(sc); 18520962f942SPawel Jakub Dawidek else 185301f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 18544d006a98SPawel Jakub Dawidek 18552d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks; 18562d1661a5SPawel Jakub Dawidek break; 18572d1661a5SPawel Jakub Dawidek } 18582d1661a5SPawel Jakub Dawidek for (n = 0; n < ndisks; n++) { 18592d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 18602d1661a5SPawel Jakub Dawidek cbp = g_raid3_clone_bio(sc, pbp); 18612d1661a5SPawel Jakub Dawidek if (cbp == NULL) { 18622d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 18632d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1864a65a0da2SPawel Jakub Dawidek /* 1865a65a0da2SPawel Jakub Dawidek * To prevent deadlock, we must run back up 1866a65a0da2SPawel Jakub Dawidek * with the ENOMEM for failed requests of any 1867a65a0da2SPawel Jakub Dawidek * of our consumers. Our own sync requests 1868a65a0da2SPawel Jakub Dawidek * can stick around, as they are finite. 1869a65a0da2SPawel Jakub Dawidek */ 1870a65a0da2SPawel Jakub Dawidek if ((pbp->bio_cflags & 1871a65a0da2SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1872a65a0da2SPawel Jakub Dawidek g_io_deliver(pbp, ENOMEM); 1873a65a0da2SPawel Jakub Dawidek return (0); 1874a65a0da2SPawel Jakub Dawidek } 18752d1661a5SPawel Jakub Dawidek return (ENOMEM); 18762d1661a5SPawel Jakub Dawidek } 18772d1661a5SPawel Jakub Dawidek cbp->bio_offset = offset; 18782d1661a5SPawel Jakub Dawidek cbp->bio_length = length; 18792d1661a5SPawel Jakub Dawidek cbp->bio_done = g_raid3_done; 18802d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 18812d1661a5SPawel Jakub Dawidek case BIO_READ: 18822d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 18832d1661a5SPawel Jakub Dawidek /* 18842d1661a5SPawel Jakub Dawidek * Replace invalid component with the parity 18852d1661a5SPawel Jakub Dawidek * component. 18862d1661a5SPawel Jakub Dawidek */ 18872d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 18882d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18892d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1890f5a2f7feSPawel Jakub Dawidek } else if (round_robin && 1891f5a2f7feSPawel Jakub Dawidek disk->d_no == sc->sc_round_robin) { 1892f5a2f7feSPawel Jakub Dawidek /* 1893f5a2f7feSPawel Jakub Dawidek * In round-robin mode skip one data component 1894f5a2f7feSPawel Jakub Dawidek * and use parity component when reading. 1895f5a2f7feSPawel Jakub Dawidek */ 1896f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = disk; 1897f5a2f7feSPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1898f5a2f7feSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1899f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin++; 1900f5a2f7feSPawel Jakub Dawidek round_robin = 0; 1901dba915cfSPawel Jakub Dawidek } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1902dba915cfSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 19032d1661a5SPawel Jakub Dawidek } 19042d1661a5SPawel Jakub Dawidek break; 19052d1661a5SPawel Jakub Dawidek case BIO_WRITE: 19062d1661a5SPawel Jakub Dawidek case BIO_DELETE: 19072d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 19082d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 19092d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 19102d1661a5SPawel Jakub Dawidek /* 19112d1661a5SPawel Jakub Dawidek * Active parity component, mark it as such. 19122d1661a5SPawel Jakub Dawidek */ 19132d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 19142d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_PARITY; 19152d1661a5SPawel Jakub Dawidek } 19162d1661a5SPawel Jakub Dawidek } else { 19172d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 19182d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 19192d1661a5SPawel Jakub Dawidek /* 19202d1661a5SPawel Jakub Dawidek * Parity component is not connected, 19212d1661a5SPawel Jakub Dawidek * so destroy its request. 19222d1661a5SPawel Jakub Dawidek */ 19232d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= 19242d1661a5SPawel Jakub Dawidek G_RAID3_BIO_PFLAG_NOPARITY; 19252d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 19262d1661a5SPawel Jakub Dawidek cbp = NULL; 19272d1661a5SPawel Jakub Dawidek } else { 19282d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 19292d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_NODISK; 19302d1661a5SPawel Jakub Dawidek disk = NULL; 19312d1661a5SPawel Jakub Dawidek } 19322d1661a5SPawel Jakub Dawidek } 19332d1661a5SPawel Jakub Dawidek break; 19342d1661a5SPawel Jakub Dawidek } 19352d1661a5SPawel Jakub Dawidek if (cbp != NULL) 19362d1661a5SPawel Jakub Dawidek cbp->bio_caller2 = disk; 19372d1661a5SPawel Jakub Dawidek } 19382d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 19392d1661a5SPawel Jakub Dawidek case BIO_READ: 1940f5a2f7feSPawel Jakub Dawidek if (round_robin) { 1941f5a2f7feSPawel Jakub Dawidek /* 1942f5a2f7feSPawel Jakub Dawidek * If we are in round-robin mode and 'round_robin' is 1943f5a2f7feSPawel Jakub Dawidek * still 1, it means, that we skipped parity component 1944f5a2f7feSPawel Jakub Dawidek * for this read and must reset sc_round_robin field. 1945f5a2f7feSPawel Jakub Dawidek */ 1946f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 1947f5a2f7feSPawel Jakub Dawidek } 1948ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 19492d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 19502d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 19512d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 19522d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 19533650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 19542d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", 19552d1661a5SPawel Jakub Dawidek cp->provider->name, cp->acr, cp->acw, cp->ace)); 195679e61493SPawel Jakub Dawidek cp->index++; 19572d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 19582d1661a5SPawel Jakub Dawidek } 19592d1661a5SPawel Jakub Dawidek break; 19602d1661a5SPawel Jakub Dawidek case BIO_WRITE: 19612d1661a5SPawel Jakub Dawidek case BIO_DELETE: 19622d1661a5SPawel Jakub Dawidek /* 19633650be51SPawel Jakub Dawidek * Put request onto inflight queue, so we can check if new 19643650be51SPawel Jakub Dawidek * synchronization requests don't collide with it. 19653650be51SPawel Jakub Dawidek */ 19663650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_inflight, pbp); 19673650be51SPawel Jakub Dawidek 19683650be51SPawel Jakub Dawidek /* 19692d1661a5SPawel Jakub Dawidek * Bump syncid on first write. 19702d1661a5SPawel Jakub Dawidek */ 1971ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1972a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1973d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(sc); 19742d1661a5SPawel Jakub Dawidek } 19752d1661a5SPawel Jakub Dawidek g_raid3_scatter(pbp); 19762d1661a5SPawel Jakub Dawidek break; 19772d1661a5SPawel Jakub Dawidek } 19782d1661a5SPawel Jakub Dawidek return (0); 19792d1661a5SPawel Jakub Dawidek } 19802d1661a5SPawel Jakub Dawidek 19812d1661a5SPawel Jakub Dawidek static int 19822d1661a5SPawel Jakub Dawidek g_raid3_can_destroy(struct g_raid3_softc *sc) 19832d1661a5SPawel Jakub Dawidek { 19842d1661a5SPawel Jakub Dawidek struct g_geom *gp; 19852d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 19862d1661a5SPawel Jakub Dawidek 19872d1661a5SPawel Jakub Dawidek g_topology_assert(); 19882d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 198918486a5eSPawel Jakub Dawidek if (gp->softc == NULL) 199018486a5eSPawel Jakub Dawidek return (1); 19912d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19922d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19932d1661a5SPawel Jakub Dawidek return (0); 19942d1661a5SPawel Jakub Dawidek } 19952d1661a5SPawel Jakub Dawidek gp = sc->sc_sync.ds_geom; 19962d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19972d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19982d1661a5SPawel Jakub Dawidek return (0); 19992d1661a5SPawel Jakub Dawidek } 20002d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 20012d1661a5SPawel Jakub Dawidek sc->sc_name); 20022d1661a5SPawel Jakub Dawidek return (1); 20032d1661a5SPawel Jakub Dawidek } 20042d1661a5SPawel Jakub Dawidek 20052d1661a5SPawel Jakub Dawidek static int 20062d1661a5SPawel Jakub Dawidek g_raid3_try_destroy(struct g_raid3_softc *sc) 20072d1661a5SPawel Jakub Dawidek { 20082d1661a5SPawel Jakub Dawidek 20093650be51SPawel Jakub Dawidek g_topology_assert_not(); 20103650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 20113650be51SPawel Jakub Dawidek 20124ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 20134ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 20144ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 20154ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 20164ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 20174ed854e8SPawel Jakub Dawidek } 20184ed854e8SPawel Jakub Dawidek 20192d1661a5SPawel Jakub Dawidek g_topology_lock(); 20202d1661a5SPawel Jakub Dawidek if (!g_raid3_can_destroy(sc)) { 20212d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20222d1661a5SPawel Jakub Dawidek return (0); 20232d1661a5SPawel Jakub Dawidek } 202418486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 202518486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 2026a245a548SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 20272d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20282d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20292d1661a5SPawel Jakub Dawidek &sc->sc_worker); 20303650be51SPawel Jakub Dawidek /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 20313650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 20322d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_worker); 20332d1661a5SPawel Jakub Dawidek sc->sc_worker = NULL; 20342d1661a5SPawel Jakub Dawidek } else { 20352d1661a5SPawel Jakub Dawidek g_topology_unlock(); 2036*955f213fSMark Johnston g_raid3_destroy_device(sc); 20372d1661a5SPawel Jakub Dawidek } 20382d1661a5SPawel Jakub Dawidek return (1); 20392d1661a5SPawel Jakub Dawidek } 20402d1661a5SPawel Jakub Dawidek 20412d1661a5SPawel Jakub Dawidek /* 20422d1661a5SPawel Jakub Dawidek * Worker thread. 20432d1661a5SPawel Jakub Dawidek */ 20442d1661a5SPawel Jakub Dawidek static void 20452d1661a5SPawel Jakub Dawidek g_raid3_worker(void *arg) 20462d1661a5SPawel Jakub Dawidek { 20472d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 20482d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 20492d1661a5SPawel Jakub Dawidek struct bio *bp; 20500962f942SPawel Jakub Dawidek int timeout; 20512d1661a5SPawel Jakub Dawidek 20522d1661a5SPawel Jakub Dawidek sc = arg; 2053982d11f8SJeff Roberson thread_lock(curthread); 205463710c4dSJohn Baldwin sched_prio(curthread, PRIBIO); 2055982d11f8SJeff Roberson thread_unlock(curthread); 20562d1661a5SPawel Jakub Dawidek 20573650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 20582d1661a5SPawel Jakub Dawidek for (;;) { 20592d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 20602d1661a5SPawel Jakub Dawidek /* 20612d1661a5SPawel Jakub Dawidek * First take a look at events. 20622d1661a5SPawel Jakub Dawidek * This is important to handle events before any I/O requests. 20632d1661a5SPawel Jakub Dawidek */ 20642d1661a5SPawel Jakub Dawidek ep = g_raid3_event_get(sc); 20653650be51SPawel Jakub Dawidek if (ep != NULL) { 2066d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 20672d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 20682d1661a5SPawel Jakub Dawidek /* Update only device status. */ 20692d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, 20702d1661a5SPawel Jakub Dawidek "Running event for device %s.", 20712d1661a5SPawel Jakub Dawidek sc->sc_name); 20722d1661a5SPawel Jakub Dawidek ep->e_error = 0; 2073d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 1); 20742d1661a5SPawel Jakub Dawidek } else { 20752d1661a5SPawel Jakub Dawidek /* Update disk status. */ 20762d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Running event for disk %s.", 20772d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(ep->e_disk)); 20782d1661a5SPawel Jakub Dawidek ep->e_error = g_raid3_update_disk(ep->e_disk, 2079d97d5ee9SPawel Jakub Dawidek ep->e_state); 20802d1661a5SPawel Jakub Dawidek if (ep->e_error == 0) 2081d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 0); 20822d1661a5SPawel Jakub Dawidek } 20832d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 20842d1661a5SPawel Jakub Dawidek KASSERT(ep->e_error == 0, 20852d1661a5SPawel Jakub Dawidek ("Error cannot be handled.")); 20862d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 20872d1661a5SPawel Jakub Dawidek } else { 20882d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 20892d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20902d1661a5SPawel Jakub Dawidek ep); 20912d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 20922d1661a5SPawel Jakub Dawidek wakeup(ep); 20932d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 20942d1661a5SPawel Jakub Dawidek } 20952d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20962d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20973650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20983650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 20993650be51SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 21003745c395SJulian Elischer kproc_exit(0); 21012d1661a5SPawel Jakub Dawidek } 21023650be51SPawel Jakub Dawidek } 21032d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 21042d1661a5SPawel Jakub Dawidek continue; 21052d1661a5SPawel Jakub Dawidek } 21062d1661a5SPawel Jakub Dawidek /* 21070962f942SPawel Jakub Dawidek * Check if we can mark array as CLEAN and if we can't take 21080962f942SPawel Jakub Dawidek * how much seconds should we wait. 21090962f942SPawel Jakub Dawidek */ 21103650be51SPawel Jakub Dawidek timeout = g_raid3_idle(sc, -1); 21110962f942SPawel Jakub Dawidek /* 21122d1661a5SPawel Jakub Dawidek * Now I/O requests. 21132d1661a5SPawel Jakub Dawidek */ 21142d1661a5SPawel Jakub Dawidek /* Get first request from the queue. */ 21152d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21162d1661a5SPawel Jakub Dawidek bp = bioq_first(&sc->sc_queue); 21172d1661a5SPawel Jakub Dawidek if (bp == NULL) { 21182d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 21192d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 21202d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 21213650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 21223650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 2123d7fad9f6SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 21243745c395SJulian Elischer kproc_exit(0); 21253650be51SPawel Jakub Dawidek } 21262d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21272d1661a5SPawel Jakub Dawidek } 21283650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 2129a2fe5c66SPawel Jakub Dawidek /* 2130a2fe5c66SPawel Jakub Dawidek * XXX: We can miss an event here, because an event 2131a2fe5c66SPawel Jakub Dawidek * can be added without sx-device-lock and without 2132a2fe5c66SPawel Jakub Dawidek * mtx-queue-lock. Maybe I should just stop using 2133a2fe5c66SPawel Jakub Dawidek * dedicated mutex for events synchronization and 2134a2fe5c66SPawel Jakub Dawidek * stick with the queue lock? 2135a2fe5c66SPawel Jakub Dawidek * The event will hang here until next I/O request 2136a2fe5c66SPawel Jakub Dawidek * or next event is received. 2137a2fe5c66SPawel Jakub Dawidek */ 21380962f942SPawel Jakub Dawidek MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 21390962f942SPawel Jakub Dawidek timeout * hz); 21403650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21419bb09163SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 21422d1661a5SPawel Jakub Dawidek continue; 21432d1661a5SPawel Jakub Dawidek } 214484edb86dSPawel Jakub Dawidek process: 21452d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 21462d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 21472d1661a5SPawel Jakub Dawidek 21488e007c52SPawel Jakub Dawidek if (bp->bio_from->geom == sc->sc_sync.ds_geom && 21498e007c52SPawel Jakub Dawidek (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 21508e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* READ */ 21518e007c52SPawel Jakub Dawidek } else if (bp->bio_to != sc->sc_provider) { 21523650be51SPawel Jakub Dawidek if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 21532d1661a5SPawel Jakub Dawidek g_raid3_regular_request(bp); 21543650be51SPawel Jakub Dawidek else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 21558e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* WRITE */ 2156de6f1c7cSPawel Jakub Dawidek else { 2157de6f1c7cSPawel Jakub Dawidek KASSERT(0, 21589a8fa125SWarner Losh ("Invalid request cflags=0x%hx to=%s.", 2159de6f1c7cSPawel Jakub Dawidek bp->bio_cflags, bp->bio_to->name)); 2160de6f1c7cSPawel Jakub Dawidek } 2161de6f1c7cSPawel Jakub Dawidek } else if (g_raid3_register_request(bp) != 0) { 21622d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21633650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 216484edb86dSPawel Jakub Dawidek /* 216584edb86dSPawel Jakub Dawidek * We are short in memory, let see if there are finished 216684edb86dSPawel Jakub Dawidek * request we can free. 216784edb86dSPawel Jakub Dawidek */ 216884edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 216984edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 217084edb86dSPawel Jakub Dawidek goto process; 21712d1661a5SPawel Jakub Dawidek } 217284edb86dSPawel Jakub Dawidek /* 217384edb86dSPawel Jakub Dawidek * No finished regular request, so at least keep 217484edb86dSPawel Jakub Dawidek * synchronization running. 217584edb86dSPawel Jakub Dawidek */ 217684edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 217784edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 217884edb86dSPawel Jakub Dawidek goto process; 217984edb86dSPawel Jakub Dawidek } 218084edb86dSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 218184edb86dSPawel Jakub Dawidek MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 218284edb86dSPawel Jakub Dawidek "r3:lowmem", hz / 10); 218384edb86dSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21842d1661a5SPawel Jakub Dawidek } 2185d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 21862d1661a5SPawel Jakub Dawidek } 21872d1661a5SPawel Jakub Dawidek } 21882d1661a5SPawel Jakub Dawidek 21892d1661a5SPawel Jakub Dawidek static void 21900962f942SPawel Jakub Dawidek g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 21912d1661a5SPawel Jakub Dawidek { 21922d1661a5SPawel Jakub Dawidek 21933650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 2194501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2195501250baSPawel Jakub Dawidek return; 21960962f942SPawel Jakub Dawidek if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 21972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 21983650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21992d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 22000962f942SPawel Jakub Dawidek } else if (sc->sc_idle && 22010962f942SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 22022d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 22033650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 22042d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 22052d1661a5SPawel Jakub Dawidek } 22062d1661a5SPawel Jakub Dawidek } 22072d1661a5SPawel Jakub Dawidek 22082d1661a5SPawel Jakub Dawidek static void 22092d1661a5SPawel Jakub Dawidek g_raid3_sync_start(struct g_raid3_softc *sc) 22102d1661a5SPawel Jakub Dawidek { 22112d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 22123650be51SPawel Jakub Dawidek struct g_consumer *cp; 22133650be51SPawel Jakub Dawidek struct bio *bp; 22142cc5a480SMateusz Guzik int error __diagused; 22152d1661a5SPawel Jakub Dawidek u_int n; 22162d1661a5SPawel Jakub Dawidek 22173650be51SPawel Jakub Dawidek g_topology_assert_not(); 22183650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 22192d1661a5SPawel Jakub Dawidek 22202d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 22212d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 22222d1661a5SPawel Jakub Dawidek sc->sc_state)); 22232d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 22242d1661a5SPawel Jakub Dawidek sc->sc_name, sc->sc_state)); 22252d1661a5SPawel Jakub Dawidek disk = NULL; 22262d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 22272d1661a5SPawel Jakub Dawidek if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 22282d1661a5SPawel Jakub Dawidek continue; 22292d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 22302d1661a5SPawel Jakub Dawidek break; 22312d1661a5SPawel Jakub Dawidek } 22322d1661a5SPawel Jakub Dawidek if (disk == NULL) 22332d1661a5SPawel Jakub Dawidek return; 22342d1661a5SPawel Jakub Dawidek 22353650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 22363650be51SPawel Jakub Dawidek g_topology_lock(); 22373650be51SPawel Jakub Dawidek cp = g_new_consumer(sc->sc_sync.ds_geom); 22383650be51SPawel Jakub Dawidek error = g_attach(cp, sc->sc_provider); 22393650be51SPawel Jakub Dawidek KASSERT(error == 0, 22403650be51SPawel Jakub Dawidek ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 22413650be51SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 22423650be51SPawel Jakub Dawidek KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 22433650be51SPawel Jakub Dawidek g_topology_unlock(); 22443650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 22453650be51SPawel Jakub Dawidek 22462d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 22472d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 2248501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 22492d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 22502d1661a5SPawel Jakub Dawidek KASSERT(disk->d_sync.ds_consumer == NULL, 22512d1661a5SPawel Jakub Dawidek ("Sync consumer already exists (device=%s, disk=%s).", 22522d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk))); 22533650be51SPawel Jakub Dawidek 22543650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer = cp; 22552d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer->private = disk; 225679e61493SPawel Jakub Dawidek disk->d_sync.ds_consumer->index = 0; 22572d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = disk; 22583650be51SPawel Jakub Dawidek 22593650be51SPawel Jakub Dawidek /* 22603650be51SPawel Jakub Dawidek * Allocate memory for synchronization bios and initialize them. 22613650be51SPawel Jakub Dawidek */ 22623650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 22633650be51SPawel Jakub Dawidek M_RAID3, M_WAITOK); 22643650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22653650be51SPawel Jakub Dawidek bp = g_alloc_bio(); 22663650be51SPawel Jakub Dawidek disk->d_sync.ds_bios[n] = bp; 22673650be51SPawel Jakub Dawidek bp->bio_parent = NULL; 22683650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 2269cd853791SKonstantin Belousov bp->bio_data = malloc(maxphys, M_RAID3, M_WAITOK); 22703650be51SPawel Jakub Dawidek bp->bio_cflags = 0; 22713650be51SPawel Jakub Dawidek bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 2272cd853791SKonstantin Belousov bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 22733650be51SPawel Jakub Dawidek disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 22743650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 22753650be51SPawel Jakub Dawidek bp->bio_from = disk->d_sync.ds_consumer; 22763650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 2277ef25813dSRuslan Ermilov bp->bio_caller1 = (void *)(uintptr_t)n; 22783650be51SPawel Jakub Dawidek } 22793650be51SPawel Jakub Dawidek 22803650be51SPawel Jakub Dawidek /* Set the number of in-flight synchronization requests. */ 22813650be51SPawel Jakub Dawidek disk->d_sync.ds_inflight = g_raid3_syncreqs; 22823650be51SPawel Jakub Dawidek 22833650be51SPawel Jakub Dawidek /* 22843650be51SPawel Jakub Dawidek * Fire off first synchronization requests. 22853650be51SPawel Jakub Dawidek */ 22863650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22873650be51SPawel Jakub Dawidek bp = disk->d_sync.ds_bios[n]; 22883650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 22893650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer->index++; 22903650be51SPawel Jakub Dawidek /* 22913650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 22923650be51SPawel Jakub Dawidek */ 22933650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 22943650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 22953650be51SPawel Jakub Dawidek else 22963650be51SPawel Jakub Dawidek g_io_request(bp, disk->d_sync.ds_consumer); 22973650be51SPawel Jakub Dawidek } 22982d1661a5SPawel Jakub Dawidek } 22992d1661a5SPawel Jakub Dawidek 23002d1661a5SPawel Jakub Dawidek /* 23012d1661a5SPawel Jakub Dawidek * Stop synchronization process. 23022d1661a5SPawel Jakub Dawidek * type: 0 - synchronization finished 23032d1661a5SPawel Jakub Dawidek * 1 - synchronization stopped 23042d1661a5SPawel Jakub Dawidek */ 23052d1661a5SPawel Jakub Dawidek static void 23062d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 23072d1661a5SPawel Jakub Dawidek { 23082d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 23093650be51SPawel Jakub Dawidek struct g_consumer *cp; 23102d1661a5SPawel Jakub Dawidek 23113650be51SPawel Jakub Dawidek g_topology_assert_not(); 23123650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 23133650be51SPawel Jakub Dawidek 23142d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 23152d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 23162d1661a5SPawel Jakub Dawidek sc->sc_state)); 23172d1661a5SPawel Jakub Dawidek disk = sc->sc_syncdisk; 23182d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = NULL; 23192d1661a5SPawel Jakub Dawidek KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 23202d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 23212d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 23222d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 23232d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_consumer == NULL) 23242d1661a5SPawel Jakub Dawidek return; 23252d1661a5SPawel Jakub Dawidek 23262d1661a5SPawel Jakub Dawidek if (type == 0) { 23272d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 23283650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23292d1661a5SPawel Jakub Dawidek } else /* if (type == 1) */ { 23302d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 23313650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23322d1661a5SPawel Jakub Dawidek } 23333650be51SPawel Jakub Dawidek free(disk->d_sync.ds_bios, M_RAID3); 23343650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = NULL; 23353650be51SPawel Jakub Dawidek cp = disk->d_sync.ds_consumer; 23362d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 23372d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 23383650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 23393650be51SPawel Jakub Dawidek g_topology_lock(); 23403650be51SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 23413650be51SPawel Jakub Dawidek g_topology_unlock(); 23423650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 23432d1661a5SPawel Jakub Dawidek } 23442d1661a5SPawel Jakub Dawidek 23452d1661a5SPawel Jakub Dawidek static void 23462d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(struct g_raid3_softc *sc) 23472d1661a5SPawel Jakub Dawidek { 23482d1661a5SPawel Jakub Dawidek struct g_provider *pp; 2349113d8e50SAlexander Motin struct g_raid3_disk *disk; 2350113d8e50SAlexander Motin int n; 23512d1661a5SPawel Jakub Dawidek 23523650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 23532d1661a5SPawel Jakub Dawidek 23543650be51SPawel Jakub Dawidek g_topology_lock(); 23552d1661a5SPawel Jakub Dawidek pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 23562d1661a5SPawel Jakub Dawidek pp->mediasize = sc->sc_mediasize; 23572d1661a5SPawel Jakub Dawidek pp->sectorsize = sc->sc_sectorsize; 2358113d8e50SAlexander Motin pp->stripesize = 0; 2359113d8e50SAlexander Motin pp->stripeoffset = 0; 2360113d8e50SAlexander Motin for (n = 0; n < sc->sc_ndisks; n++) { 2361113d8e50SAlexander Motin disk = &sc->sc_disks[n]; 2362113d8e50SAlexander Motin if (disk->d_consumer && disk->d_consumer->provider && 2363113d8e50SAlexander Motin disk->d_consumer->provider->stripesize > pp->stripesize) { 2364113d8e50SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 2365113d8e50SAlexander Motin pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2366113d8e50SAlexander Motin } 2367113d8e50SAlexander Motin } 2368113d8e50SAlexander Motin pp->stripesize *= sc->sc_ndisks - 1; 2369113d8e50SAlexander Motin pp->stripeoffset *= sc->sc_ndisks - 1; 237033cb9b3cSMark Johnston pp->private = sc; 237133cb9b3cSMark Johnston sc->sc_refcnt++; 23722d1661a5SPawel Jakub Dawidek sc->sc_provider = pp; 23732d1661a5SPawel Jakub Dawidek g_error_provider(pp, 0); 23743650be51SPawel Jakub Dawidek g_topology_unlock(); 23750cca572eSJohn-Mark Gurney G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 23760cca572eSJohn-Mark Gurney g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 23770cca572eSJohn-Mark Gurney 23782d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 23792d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 23802d1661a5SPawel Jakub Dawidek } 23812d1661a5SPawel Jakub Dawidek 23822d1661a5SPawel Jakub Dawidek static void 23832d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(struct g_raid3_softc *sc) 23842d1661a5SPawel Jakub Dawidek { 23852d1661a5SPawel Jakub Dawidek struct bio *bp; 23862d1661a5SPawel Jakub Dawidek 23873650be51SPawel Jakub Dawidek g_topology_assert_not(); 23882d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 23892d1661a5SPawel Jakub Dawidek sc->sc_name)); 23902d1661a5SPawel Jakub Dawidek 23913650be51SPawel Jakub Dawidek g_topology_lock(); 23922d1661a5SPawel Jakub Dawidek g_error_provider(sc->sc_provider, ENXIO); 23932d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 23942d1661a5SPawel Jakub Dawidek while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 23952d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 23962d1661a5SPawel Jakub Dawidek g_io_deliver(bp, ENXIO); 23972d1661a5SPawel Jakub Dawidek } 23982d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 23992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 24002d1661a5SPawel Jakub Dawidek sc->sc_provider->name); 24018b64f3caSAlexander Motin g_wither_provider(sc->sc_provider, ENXIO); 24023650be51SPawel Jakub Dawidek g_topology_unlock(); 24032d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 24042d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 24052d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 24062d1661a5SPawel Jakub Dawidek } 24072d1661a5SPawel Jakub Dawidek 24082d1661a5SPawel Jakub Dawidek static void 24092d1661a5SPawel Jakub Dawidek g_raid3_go(void *arg) 24102d1661a5SPawel Jakub Dawidek { 24112d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 2412fd02d0bcSMark Johnston struct g_raid3_event *ep; 24132d1661a5SPawel Jakub Dawidek 24142d1661a5SPawel Jakub Dawidek sc = arg; 24152d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2416fd02d0bcSMark Johnston ep = sc->sc_timeout_event; 2417fd02d0bcSMark Johnston sc->sc_timeout_event = NULL; 2418fd02d0bcSMark Johnston g_raid3_event_dispatch(ep, sc, 0, 24192d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 24202d1661a5SPawel Jakub Dawidek } 24212d1661a5SPawel Jakub Dawidek 2422fd02d0bcSMark Johnston static void 2423fd02d0bcSMark Johnston g_raid3_timeout_drain(struct g_raid3_softc *sc) 2424fd02d0bcSMark Johnston { 2425fd02d0bcSMark Johnston sx_assert(&sc->sc_lock, SX_XLOCKED); 2426fd02d0bcSMark Johnston 2427fd02d0bcSMark Johnston callout_drain(&sc->sc_callout); 2428fd02d0bcSMark Johnston g_raid3_event_free(sc->sc_timeout_event); 2429fd02d0bcSMark Johnston sc->sc_timeout_event = NULL; 2430fd02d0bcSMark Johnston } 2431fd02d0bcSMark Johnston 24322d1661a5SPawel Jakub Dawidek static u_int 24332d1661a5SPawel Jakub Dawidek g_raid3_determine_state(struct g_raid3_disk *disk) 24342d1661a5SPawel Jakub Dawidek { 24352d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 24362d1661a5SPawel Jakub Dawidek u_int state; 24372d1661a5SPawel Jakub Dawidek 24382d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 24392d1661a5SPawel Jakub Dawidek if (sc->sc_syncid == disk->d_sync.ds_syncid) { 24402d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 24412d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 24422d1661a5SPawel Jakub Dawidek /* Disk does not need synchronization. */ 24432d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_ACTIVE; 24442d1661a5SPawel Jakub Dawidek } else { 24452d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 24462d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24472d1661a5SPawel Jakub Dawidek (disk->d_flags & 24482d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24492d1661a5SPawel Jakub Dawidek /* 24502d1661a5SPawel Jakub Dawidek * We can start synchronization from 24512d1661a5SPawel Jakub Dawidek * the stored offset. 24522d1661a5SPawel Jakub Dawidek */ 24532d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24542d1661a5SPawel Jakub Dawidek } else { 24552d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24562d1661a5SPawel Jakub Dawidek } 24572d1661a5SPawel Jakub Dawidek } 24582d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 24592d1661a5SPawel Jakub Dawidek /* 24602d1661a5SPawel Jakub Dawidek * Reset all synchronization data for this disk, 24612d1661a5SPawel Jakub Dawidek * because if it even was synchronized, it was 24622d1661a5SPawel Jakub Dawidek * synchronized to disks with different syncid. 24632d1661a5SPawel Jakub Dawidek */ 24642d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 24652d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 24662d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 24672d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 24682d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24692d1661a5SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24702d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24712d1661a5SPawel Jakub Dawidek } else { 24722d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24732d1661a5SPawel Jakub Dawidek } 24742d1661a5SPawel Jakub Dawidek } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 24752d1661a5SPawel Jakub Dawidek /* 24762d1661a5SPawel Jakub Dawidek * Not good, NOT GOOD! 24772d1661a5SPawel Jakub Dawidek * It means that device was started on stale disks 24782d1661a5SPawel Jakub Dawidek * and more fresh disk just arrive. 24793c57a41dSPawel Jakub Dawidek * If there were writes, device is broken, sorry. 24802d1661a5SPawel Jakub Dawidek * I think the best choice here is don't touch 2481776fc0e9SYaroslav Tykhiy * this disk and inform the user loudly. 24822d1661a5SPawel Jakub Dawidek */ 24832d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s was started before the freshest " 24842d1661a5SPawel Jakub Dawidek "disk (%s) arrives!! It will not be connected to the " 24852d1661a5SPawel Jakub Dawidek "running device.", sc->sc_name, 24862d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 24872d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 24882d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_NONE; 24892d1661a5SPawel Jakub Dawidek /* Return immediately, because disk was destroyed. */ 24902d1661a5SPawel Jakub Dawidek return (state); 24912d1661a5SPawel Jakub Dawidek } 24922d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "State for %s disk: %s.", 24932d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 24942d1661a5SPawel Jakub Dawidek return (state); 24952d1661a5SPawel Jakub Dawidek } 24962d1661a5SPawel Jakub Dawidek 24972d1661a5SPawel Jakub Dawidek /* 24982d1661a5SPawel Jakub Dawidek * Update device state. 24992d1661a5SPawel Jakub Dawidek */ 25002d1661a5SPawel Jakub Dawidek static void 2501d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 25022d1661a5SPawel Jakub Dawidek { 25032d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 25042d1661a5SPawel Jakub Dawidek u_int state; 25052d1661a5SPawel Jakub Dawidek 25063650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 25072d1661a5SPawel Jakub Dawidek 25082d1661a5SPawel Jakub Dawidek switch (sc->sc_state) { 25092d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 25102d1661a5SPawel Jakub Dawidek { 2511a245a548SPawel Jakub Dawidek u_int n, ndirty, ndisks, genid, syncid; 25122d1661a5SPawel Jakub Dawidek 25132d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider == NULL, 25142d1661a5SPawel Jakub Dawidek ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 25152d1661a5SPawel Jakub Dawidek /* 25162d1661a5SPawel Jakub Dawidek * Are we ready? We are, if all disks are connected or 25172d1661a5SPawel Jakub Dawidek * one disk is missing and 'force' is true. 25182d1661a5SPawel Jakub Dawidek */ 25192d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 25202d1661a5SPawel Jakub Dawidek if (!force) 2521fd02d0bcSMark Johnston g_raid3_timeout_drain(sc); 25222d1661a5SPawel Jakub Dawidek } else { 25232d1661a5SPawel Jakub Dawidek if (force) { 25242d1661a5SPawel Jakub Dawidek /* 25252d1661a5SPawel Jakub Dawidek * Timeout expired, so destroy device. 25262d1661a5SPawel Jakub Dawidek */ 25272d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 25284ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 25294ed854e8SPawel Jakub Dawidek __LINE__, sc->sc_rootmount); 25304ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 25314ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 25322d1661a5SPawel Jakub Dawidek } 25332d1661a5SPawel Jakub Dawidek return; 25342d1661a5SPawel Jakub Dawidek } 25352d1661a5SPawel Jakub Dawidek 25362d1661a5SPawel Jakub Dawidek /* 2537a245a548SPawel Jakub Dawidek * Find the biggest genid. 2538a245a548SPawel Jakub Dawidek */ 2539a245a548SPawel Jakub Dawidek genid = 0; 2540a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2541a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2542a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2543a245a548SPawel Jakub Dawidek continue; 2544a245a548SPawel Jakub Dawidek if (disk->d_genid > genid) 2545a245a548SPawel Jakub Dawidek genid = disk->d_genid; 2546a245a548SPawel Jakub Dawidek } 2547a245a548SPawel Jakub Dawidek sc->sc_genid = genid; 2548a245a548SPawel Jakub Dawidek /* 2549a245a548SPawel Jakub Dawidek * Remove all disks without the biggest genid. 2550a245a548SPawel Jakub Dawidek */ 2551a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2552a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2553a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2554a245a548SPawel Jakub Dawidek continue; 2555a245a548SPawel Jakub Dawidek if (disk->d_genid < genid) { 2556a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2557a245a548SPawel Jakub Dawidek "Component %s (device %s) broken, skipping.", 2558a245a548SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 2559a245a548SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 2560a245a548SPawel Jakub Dawidek } 2561a245a548SPawel Jakub Dawidek } 2562a245a548SPawel Jakub Dawidek 2563a245a548SPawel Jakub Dawidek /* 25642d1661a5SPawel Jakub Dawidek * There must be at least 'sc->sc_ndisks - 1' components 25652d1661a5SPawel Jakub Dawidek * with the same syncid and without SYNCHRONIZING flag. 25662d1661a5SPawel Jakub Dawidek */ 25672d1661a5SPawel Jakub Dawidek 25682d1661a5SPawel Jakub Dawidek /* 25692d1661a5SPawel Jakub Dawidek * Find the biggest syncid, number of valid components and 25702d1661a5SPawel Jakub Dawidek * number of dirty components. 25712d1661a5SPawel Jakub Dawidek */ 25722d1661a5SPawel Jakub Dawidek ndirty = ndisks = syncid = 0; 25732d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25742d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25752d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25762d1661a5SPawel Jakub Dawidek continue; 25772d1661a5SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 25782d1661a5SPawel Jakub Dawidek ndirty++; 25792d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_syncid > syncid) { 25802d1661a5SPawel Jakub Dawidek syncid = disk->d_sync.ds_syncid; 25812d1661a5SPawel Jakub Dawidek ndisks = 0; 25822d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < syncid) { 25832d1661a5SPawel Jakub Dawidek continue; 25842d1661a5SPawel Jakub Dawidek } 25852d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25862d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 25872d1661a5SPawel Jakub Dawidek continue; 25882d1661a5SPawel Jakub Dawidek } 25892d1661a5SPawel Jakub Dawidek ndisks++; 25902d1661a5SPawel Jakub Dawidek } 25912d1661a5SPawel Jakub Dawidek /* 25922d1661a5SPawel Jakub Dawidek * Do we have enough valid components? 25932d1661a5SPawel Jakub Dawidek */ 25942d1661a5SPawel Jakub Dawidek if (ndisks + 1 < sc->sc_ndisks) { 25952d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 25962d1661a5SPawel Jakub Dawidek "Device %s is broken, too few valid components.", 25972d1661a5SPawel Jakub Dawidek sc->sc_name); 25982d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 25992d1661a5SPawel Jakub Dawidek return; 26002d1661a5SPawel Jakub Dawidek } 26012d1661a5SPawel Jakub Dawidek /* 26022d1661a5SPawel Jakub Dawidek * If there is one DIRTY component and all disks are present, 26032d1661a5SPawel Jakub Dawidek * mark it for synchronization. If there is more than one DIRTY 26042d1661a5SPawel Jakub Dawidek * component, mark parity component for synchronization. 26052d1661a5SPawel Jakub Dawidek */ 26062d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks && ndirty == 1) { 26072d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 26082d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 26092d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 26102d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_DIRTY) == 0) { 26112d1661a5SPawel Jakub Dawidek continue; 26122d1661a5SPawel Jakub Dawidek } 26132d1661a5SPawel Jakub Dawidek disk->d_flags |= 26142d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING; 26152d1661a5SPawel Jakub Dawidek } 26162d1661a5SPawel Jakub Dawidek } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 26172d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 26182d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 26192d1661a5SPawel Jakub Dawidek } 26202d1661a5SPawel Jakub Dawidek 26212d1661a5SPawel Jakub Dawidek sc->sc_syncid = syncid; 26222d1661a5SPawel Jakub Dawidek if (force) { 26232d1661a5SPawel Jakub Dawidek /* Remember to bump syncid on first write. */ 2624ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 26252d1661a5SPawel Jakub Dawidek } 26262d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks) 26272d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 26282d1661a5SPawel Jakub Dawidek else /* if (ndisks == sc->sc_ndisks - 1) */ 26292d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 26302d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 26312d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26322d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26332d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26342d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 26352d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 26362d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 26372d1661a5SPawel Jakub Dawidek continue; 26382d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 26392d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2640a245a548SPawel Jakub Dawidek if (state == G_RAID3_DISK_STATE_STALE) 2641ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 26422d1661a5SPawel Jakub Dawidek } 26432d1661a5SPawel Jakub Dawidek break; 26442d1661a5SPawel Jakub Dawidek } 26452d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 26462d1661a5SPawel Jakub Dawidek /* 2647ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26482d1661a5SPawel Jakub Dawidek */ 2649ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2650a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2651a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2652a245a548SPawel Jakub Dawidek } 2653a245a548SPawel Jakub Dawidek 26542d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26552d1661a5SPawel Jakub Dawidek return; 26562d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 26572d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26582d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 26592d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 26602d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 26612d1661a5SPawel Jakub Dawidek return; 26622d1661a5SPawel Jakub Dawidek } 26632d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26642d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 26652d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 26662d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26672d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26682d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26692d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26702d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26712d1661a5SPawel Jakub Dawidek } 26722d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26732d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26744ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26754ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26764ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26774ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26784ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26794ed854e8SPawel Jakub Dawidek } 26802d1661a5SPawel Jakub Dawidek break; 26812d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 26822d1661a5SPawel Jakub Dawidek /* 2683ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26842d1661a5SPawel Jakub Dawidek */ 2685ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2686a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2687a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2688a245a548SPawel Jakub Dawidek } 2689a245a548SPawel Jakub Dawidek 26902d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26912d1661a5SPawel Jakub Dawidek return; 26922d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 26932d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1, 26942d1661a5SPawel Jakub Dawidek ("Too few ACTIVE components in COMPLETE state (device %s).", 26952d1661a5SPawel Jakub Dawidek sc->sc_name)); 26962d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26972d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26982d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 26992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 27002d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 27012d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 27022d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 27032d1661a5SPawel Jakub Dawidek sc->sc_state = state; 27042d1661a5SPawel Jakub Dawidek } 27052d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 27062d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 27074ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 27084ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 27094ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 27104ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 27114ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 27124ed854e8SPawel Jakub Dawidek } 27132d1661a5SPawel Jakub Dawidek break; 27142d1661a5SPawel Jakub Dawidek default: 27152d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 27162d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state))); 27172d1661a5SPawel Jakub Dawidek break; 27182d1661a5SPawel Jakub Dawidek } 27192d1661a5SPawel Jakub Dawidek } 27202d1661a5SPawel Jakub Dawidek 27212d1661a5SPawel Jakub Dawidek /* 27222d1661a5SPawel Jakub Dawidek * Update disk state and device state if needed. 27232d1661a5SPawel Jakub Dawidek */ 27242d1661a5SPawel Jakub Dawidek #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 27252d1661a5SPawel Jakub Dawidek "Disk %s state changed from %s to %s (device %s).", \ 27262d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), \ 27272d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state), \ 27282d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state), sc->sc_name) 27292d1661a5SPawel Jakub Dawidek static int 2730d97d5ee9SPawel Jakub Dawidek g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 27312d1661a5SPawel Jakub Dawidek { 27322d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 27332d1661a5SPawel Jakub Dawidek 27342d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 27353650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 27363650be51SPawel Jakub Dawidek 27372d1661a5SPawel Jakub Dawidek again: 27382d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 27392d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 27402d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state)); 27412d1661a5SPawel Jakub Dawidek switch (state) { 27422d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 27432d1661a5SPawel Jakub Dawidek /* 27442d1661a5SPawel Jakub Dawidek * Possible scenarios: 27452d1661a5SPawel Jakub Dawidek * 1. New disk arrive. 27462d1661a5SPawel Jakub Dawidek */ 27472d1661a5SPawel Jakub Dawidek /* Previous state should be NONE. */ 27482d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 27492d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27502d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27512d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27522d1661a5SPawel Jakub Dawidek 27532d1661a5SPawel Jakub Dawidek disk->d_state = state; 27540cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 27552d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27562d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 27572d1661a5SPawel Jakub Dawidek break; 27582d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27592d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27602d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27612d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27622d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27632d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27642d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 27652d1661a5SPawel Jakub Dawidek if (state != G_RAID3_DISK_STATE_NONE) 27662d1661a5SPawel Jakub Dawidek goto again; 27672d1661a5SPawel Jakub Dawidek break; 27682d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 27692d1661a5SPawel Jakub Dawidek /* 27702d1661a5SPawel Jakub Dawidek * Possible scenarios: 27712d1661a5SPawel Jakub Dawidek * 1. New disk does not need synchronization. 27722d1661a5SPawel Jakub Dawidek * 2. Synchronization process finished successfully. 27732d1661a5SPawel Jakub Dawidek */ 27742d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27752d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27762d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27772d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27782d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27792d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27802d1661a5SPawel Jakub Dawidek /* Previous state should be NEW or SYNCHRONIZING. */ 27812d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 27822d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 27832d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27842d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27852d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27862d1661a5SPawel Jakub Dawidek 2787bf31327cSPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 27882d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 27892d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 27902d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 0); 27912d1661a5SPawel Jakub Dawidek } 27922d1661a5SPawel Jakub Dawidek disk->d_state = state; 27932d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 27942d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 27950962f942SPawel Jakub Dawidek g_raid3_update_idle(sc, disk); 2796bf31327cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 27970cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 27982d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27992d1661a5SPawel Jakub Dawidek break; 28002d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 28012d1661a5SPawel Jakub Dawidek /* 28022d1661a5SPawel Jakub Dawidek * Possible scenarios: 28032d1661a5SPawel Jakub Dawidek * 1. Stale disk was connected. 28042d1661a5SPawel Jakub Dawidek */ 28052d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28062d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28072d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 28082d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28092d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28102d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 28112d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 28122d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28132d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28142d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28152d1661a5SPawel Jakub Dawidek /* 28162d1661a5SPawel Jakub Dawidek * STALE state is only possible if device is marked 28172d1661a5SPawel Jakub Dawidek * NOAUTOSYNC. 28182d1661a5SPawel Jakub Dawidek */ 28192d1661a5SPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 28202d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 28212d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28222d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28232d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28242d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28252d1661a5SPawel Jakub Dawidek 28262d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 28272d1661a5SPawel Jakub Dawidek disk->d_state = state; 28282d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 28292d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 28302d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 28312d1661a5SPawel Jakub Dawidek break; 28322d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 28332d1661a5SPawel Jakub Dawidek /* 28342d1661a5SPawel Jakub Dawidek * Possible scenarios: 28352d1661a5SPawel Jakub Dawidek * 1. Disk which needs synchronization was connected. 28362d1661a5SPawel Jakub Dawidek */ 28372d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28382d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28392d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 28402d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28412d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28422d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 28432d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 28442d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28452d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28462d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28472d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28482d1661a5SPawel Jakub Dawidek 28492d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NEW) 28502d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 28512d1661a5SPawel Jakub Dawidek disk->d_state = state; 28522d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) { 28532d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 28542d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 28552d1661a5SPawel Jakub Dawidek } 28562d1661a5SPawel Jakub Dawidek break; 28572d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 28582d1661a5SPawel Jakub Dawidek /* 28592d1661a5SPawel Jakub Dawidek * Possible scenarios: 28602d1661a5SPawel Jakub Dawidek * 1. Device wasn't running yet, but disk disappear. 28612d1661a5SPawel Jakub Dawidek * 2. Disk was active and disapppear. 28622d1661a5SPawel Jakub Dawidek * 3. Disk disappear during synchronization process. 28632d1661a5SPawel Jakub Dawidek */ 28642d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28652d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 28662d1661a5SPawel Jakub Dawidek /* 28672d1661a5SPawel Jakub Dawidek * Previous state should be ACTIVE, STALE or 28682d1661a5SPawel Jakub Dawidek * SYNCHRONIZING. 28692d1661a5SPawel Jakub Dawidek */ 28702d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 28712d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_STALE || 28722d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 28732d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28742d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28752d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28762d1661a5SPawel Jakub Dawidek } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 28772d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28782d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28792d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28802d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28812d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28822d1661a5SPawel Jakub Dawidek /* 28832d1661a5SPawel Jakub Dawidek * Reset bumping syncid if disk disappeared in STARTING 28842d1661a5SPawel Jakub Dawidek * state. 28852d1661a5SPawel Jakub Dawidek */ 2886ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2887a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 28882d1661a5SPawel Jakub Dawidek #ifdef INVARIANTS 28892d1661a5SPawel Jakub Dawidek } else { 28902d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 28912d1661a5SPawel Jakub Dawidek sc->sc_name, 28922d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28932d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28942d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28952d1661a5SPawel Jakub Dawidek #endif 28962d1661a5SPawel Jakub Dawidek } 28972d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 28992d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 29002d1661a5SPawel Jakub Dawidek 29012d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 29022d1661a5SPawel Jakub Dawidek break; 29032d1661a5SPawel Jakub Dawidek default: 29042d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Unknown state (%u).", state)); 29052d1661a5SPawel Jakub Dawidek break; 29062d1661a5SPawel Jakub Dawidek } 29072d1661a5SPawel Jakub Dawidek return (0); 29082d1661a5SPawel Jakub Dawidek } 29092d1661a5SPawel Jakub Dawidek #undef DISK_STATE_CHANGED 29102d1661a5SPawel Jakub Dawidek 2911ea973705SPawel Jakub Dawidek int 29122d1661a5SPawel Jakub Dawidek g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 29132d1661a5SPawel Jakub Dawidek { 29142d1661a5SPawel Jakub Dawidek struct g_provider *pp; 29152d1661a5SPawel Jakub Dawidek u_char *buf; 29162d1661a5SPawel Jakub Dawidek int error; 29172d1661a5SPawel Jakub Dawidek 29182d1661a5SPawel Jakub Dawidek g_topology_assert(); 29192d1661a5SPawel Jakub Dawidek 29202d1661a5SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 29212d1661a5SPawel Jakub Dawidek if (error != 0) 29222d1661a5SPawel Jakub Dawidek return (error); 29232d1661a5SPawel Jakub Dawidek pp = cp->provider; 29242d1661a5SPawel Jakub Dawidek g_topology_unlock(); 29252d1661a5SPawel Jakub Dawidek /* Metadata are stored on last sector. */ 29262d1661a5SPawel Jakub Dawidek buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 29272d1661a5SPawel Jakub Dawidek &error); 29282d1661a5SPawel Jakub Dawidek g_topology_lock(); 29292d1661a5SPawel Jakub Dawidek g_access(cp, -1, 0, 0); 29308a4a44b5SMaxim Sobolev if (buf == NULL) { 2931a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2932a245a548SPawel Jakub Dawidek cp->provider->name, error); 29332d1661a5SPawel Jakub Dawidek return (error); 29342d1661a5SPawel Jakub Dawidek } 29352d1661a5SPawel Jakub Dawidek 29362d1661a5SPawel Jakub Dawidek /* Decode metadata. */ 29372d1661a5SPawel Jakub Dawidek error = raid3_metadata_decode(buf, md); 29382d1661a5SPawel Jakub Dawidek g_free(buf); 29392d1661a5SPawel Jakub Dawidek if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 29402d1661a5SPawel Jakub Dawidek return (EINVAL); 2941a245a548SPawel Jakub Dawidek if (md->md_version > G_RAID3_VERSION) { 2942a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2943a245a548SPawel Jakub Dawidek "Kernel module is too old to handle metadata from %s.", 2944a245a548SPawel Jakub Dawidek cp->provider->name); 2945a245a548SPawel Jakub Dawidek return (EINVAL); 2946a245a548SPawel Jakub Dawidek } 29472d1661a5SPawel Jakub Dawidek if (error != 0) { 29482d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 29492d1661a5SPawel Jakub Dawidek cp->provider->name); 29502d1661a5SPawel Jakub Dawidek return (error); 29512d1661a5SPawel Jakub Dawidek } 2952cd853791SKonstantin Belousov if (md->md_sectorsize > maxphys) { 295395959703SAndrey V. Elsukov G_RAID3_DEBUG(0, "The blocksize is too big."); 295495959703SAndrey V. Elsukov return (EINVAL); 295595959703SAndrey V. Elsukov } 29562d1661a5SPawel Jakub Dawidek 29572d1661a5SPawel Jakub Dawidek return (0); 29582d1661a5SPawel Jakub Dawidek } 29592d1661a5SPawel Jakub Dawidek 29602d1661a5SPawel Jakub Dawidek static int 29612d1661a5SPawel Jakub Dawidek g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 29622d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 29632d1661a5SPawel Jakub Dawidek { 29642d1661a5SPawel Jakub Dawidek 29652d1661a5SPawel Jakub Dawidek if (md->md_no >= sc->sc_ndisks) { 29662d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 29672d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29682d1661a5SPawel Jakub Dawidek return (EINVAL); 29692d1661a5SPawel Jakub Dawidek } 29702d1661a5SPawel Jakub Dawidek if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 29712d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 29722d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29732d1661a5SPawel Jakub Dawidek return (EEXIST); 29742d1661a5SPawel Jakub Dawidek } 29752d1661a5SPawel Jakub Dawidek if (md->md_all != sc->sc_ndisks) { 29762d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29772d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29782d1661a5SPawel Jakub Dawidek "md_all", pp->name, sc->sc_name); 29792d1661a5SPawel Jakub Dawidek return (EINVAL); 29802d1661a5SPawel Jakub Dawidek } 298111b2174fSPawel Jakub Dawidek if ((md->md_mediasize % md->md_sectorsize) != 0) { 298211b2174fSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 298311b2174fSPawel Jakub Dawidek "0) on disk %s (device %s), skipping.", pp->name, 298411b2174fSPawel Jakub Dawidek sc->sc_name); 298511b2174fSPawel Jakub Dawidek return (EINVAL); 298611b2174fSPawel Jakub Dawidek } 29872d1661a5SPawel Jakub Dawidek if (md->md_mediasize != sc->sc_mediasize) { 29882d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29892d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29902d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29912d1661a5SPawel Jakub Dawidek return (EINVAL); 29922d1661a5SPawel Jakub Dawidek } 29932d1661a5SPawel Jakub Dawidek if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 29942d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29952d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29962d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29972d1661a5SPawel Jakub Dawidek return (EINVAL); 29982d1661a5SPawel Jakub Dawidek } 29992d1661a5SPawel Jakub Dawidek if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 30002d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30012d1661a5SPawel Jakub Dawidek "Invalid size of disk %s (device %s), skipping.", pp->name, 30022d1661a5SPawel Jakub Dawidek sc->sc_name); 30032d1661a5SPawel Jakub Dawidek return (EINVAL); 30042d1661a5SPawel Jakub Dawidek } 30052d1661a5SPawel Jakub Dawidek if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 30062d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30072d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 30082d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 30092d1661a5SPawel Jakub Dawidek return (EINVAL); 30102d1661a5SPawel Jakub Dawidek } 30112d1661a5SPawel Jakub Dawidek if (md->md_sectorsize != sc->sc_sectorsize) { 30122d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30132d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 30142d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 30152d1661a5SPawel Jakub Dawidek return (EINVAL); 30162d1661a5SPawel Jakub Dawidek } 30172d1661a5SPawel Jakub Dawidek if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 30182d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30192d1661a5SPawel Jakub Dawidek "Invalid sector size of disk %s (device %s), skipping.", 30202d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 30212d1661a5SPawel Jakub Dawidek return (EINVAL); 30222d1661a5SPawel Jakub Dawidek } 30232d1661a5SPawel Jakub Dawidek if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 30242d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30252d1661a5SPawel Jakub Dawidek "Invalid device flags on disk %s (device %s), skipping.", 30262d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 30272d1661a5SPawel Jakub Dawidek return (EINVAL); 30282d1661a5SPawel Jakub Dawidek } 3029dba915cfSPawel Jakub Dawidek if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 3030dba915cfSPawel Jakub Dawidek (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 3031dba915cfSPawel Jakub Dawidek /* 3032dba915cfSPawel Jakub Dawidek * VERIFY and ROUND-ROBIN options are mutally exclusive. 3033dba915cfSPawel Jakub Dawidek */ 3034dba915cfSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 3035dba915cfSPawel Jakub Dawidek "disk %s (device %s), skipping.", pp->name, sc->sc_name); 3036dba915cfSPawel Jakub Dawidek return (EINVAL); 3037dba915cfSPawel Jakub Dawidek } 30382d1661a5SPawel Jakub Dawidek if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 30392d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30402d1661a5SPawel Jakub Dawidek "Invalid disk flags on disk %s (device %s), skipping.", 30412d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 30422d1661a5SPawel Jakub Dawidek return (EINVAL); 30432d1661a5SPawel Jakub Dawidek } 30442d1661a5SPawel Jakub Dawidek return (0); 30452d1661a5SPawel Jakub Dawidek } 30462d1661a5SPawel Jakub Dawidek 3047ea973705SPawel Jakub Dawidek int 30482d1661a5SPawel Jakub Dawidek g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 30492d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 30502d1661a5SPawel Jakub Dawidek { 30512d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 30522d1661a5SPawel Jakub Dawidek int error; 30532d1661a5SPawel Jakub Dawidek 30543650be51SPawel Jakub Dawidek g_topology_assert_not(); 30552d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 30562d1661a5SPawel Jakub Dawidek 30572d1661a5SPawel Jakub Dawidek error = g_raid3_check_metadata(sc, pp, md); 30582d1661a5SPawel Jakub Dawidek if (error != 0) 30592d1661a5SPawel Jakub Dawidek return (error); 3060a245a548SPawel Jakub Dawidek if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3061a245a548SPawel Jakub Dawidek md->md_genid < sc->sc_genid) { 3062a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3063a245a548SPawel Jakub Dawidek pp->name, sc->sc_name); 3064a245a548SPawel Jakub Dawidek return (EINVAL); 3065a245a548SPawel Jakub Dawidek } 30662d1661a5SPawel Jakub Dawidek disk = g_raid3_init_disk(sc, pp, md, &error); 30672d1661a5SPawel Jakub Dawidek if (disk == NULL) 30682d1661a5SPawel Jakub Dawidek return (error); 30692d1661a5SPawel Jakub Dawidek error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 30702d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_WAIT); 3071a245a548SPawel Jakub Dawidek if (error != 0) 30722d1661a5SPawel Jakub Dawidek return (error); 3073a245a548SPawel Jakub Dawidek if (md->md_version < G_RAID3_VERSION) { 3074a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3075a245a548SPawel Jakub Dawidek pp->name, md->md_version, G_RAID3_VERSION); 3076a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 3077a245a548SPawel Jakub Dawidek } 3078a245a548SPawel Jakub Dawidek return (0); 30792d1661a5SPawel Jakub Dawidek } 30802d1661a5SPawel Jakub Dawidek 3081712fe9bdSPawel Jakub Dawidek static void 3082712fe9bdSPawel Jakub Dawidek g_raid3_destroy_delayed(void *arg, int flag) 3083712fe9bdSPawel Jakub Dawidek { 3084712fe9bdSPawel Jakub Dawidek struct g_raid3_softc *sc; 3085712fe9bdSPawel Jakub Dawidek int error; 3086712fe9bdSPawel Jakub Dawidek 3087712fe9bdSPawel Jakub Dawidek if (flag == EV_CANCEL) { 3088712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Destroying canceled."); 3089712fe9bdSPawel Jakub Dawidek return; 3090712fe9bdSPawel Jakub Dawidek } 3091712fe9bdSPawel Jakub Dawidek sc = arg; 3092712fe9bdSPawel Jakub Dawidek g_topology_unlock(); 3093712fe9bdSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3094712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3095712fe9bdSPawel Jakub Dawidek ("DESTROY flag set on %s.", sc->sc_name)); 3096712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3097712fe9bdSPawel Jakub Dawidek ("DESTROYING flag not set on %s.", sc->sc_name)); 3098712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3099712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3100712fe9bdSPawel Jakub Dawidek if (error != 0) { 3101712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3102712fe9bdSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3103712fe9bdSPawel Jakub Dawidek } 3104712fe9bdSPawel Jakub Dawidek g_topology_lock(); 3105712fe9bdSPawel Jakub Dawidek } 3106712fe9bdSPawel Jakub Dawidek 31072d1661a5SPawel Jakub Dawidek static int 31082d1661a5SPawel Jakub Dawidek g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 31092d1661a5SPawel Jakub Dawidek { 31102d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3111712fe9bdSPawel Jakub Dawidek int dcr, dcw, dce, error = 0; 31122d1661a5SPawel Jakub Dawidek 31132d1661a5SPawel Jakub Dawidek g_topology_assert(); 31142d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 31152d1661a5SPawel Jakub Dawidek acw, ace); 31162d1661a5SPawel Jakub Dawidek 311733cb9b3cSMark Johnston sc = pp->private; 31181f7fec3cSPawel Jakub Dawidek KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 31191f7fec3cSPawel Jakub Dawidek 31202d1661a5SPawel Jakub Dawidek dcr = pp->acr + acr; 31212d1661a5SPawel Jakub Dawidek dcw = pp->acw + acw; 31222d1661a5SPawel Jakub Dawidek dce = pp->ace + ace; 31232d1661a5SPawel Jakub Dawidek 31243650be51SPawel Jakub Dawidek g_topology_unlock(); 31253650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3126712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 31273650be51SPawel Jakub Dawidek g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 31283650be51SPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) 31293650be51SPawel Jakub Dawidek error = ENXIO; 31303650be51SPawel Jakub Dawidek goto end; 31312d1661a5SPawel Jakub Dawidek } 3132f62c1a47SAlexander Motin if (dcw == 0) 31333650be51SPawel Jakub Dawidek g_raid3_idle(sc, dcw); 3134712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3135712fe9bdSPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) { 3136712fe9bdSPawel Jakub Dawidek error = ENXIO; 3137712fe9bdSPawel Jakub Dawidek goto end; 3138712fe9bdSPawel Jakub Dawidek } 3139712fe9bdSPawel Jakub Dawidek if (dcr == 0 && dcw == 0 && dce == 0) { 3140712fe9bdSPawel Jakub Dawidek g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3141712fe9bdSPawel Jakub Dawidek sc, NULL); 3142712fe9bdSPawel Jakub Dawidek } 3143712fe9bdSPawel Jakub Dawidek } 31443650be51SPawel Jakub Dawidek end: 31453650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 31463650be51SPawel Jakub Dawidek g_topology_lock(); 31473650be51SPawel Jakub Dawidek return (error); 31482d1661a5SPawel Jakub Dawidek } 31492d1661a5SPawel Jakub Dawidek 31502d1661a5SPawel Jakub Dawidek static struct g_geom * 31512d1661a5SPawel Jakub Dawidek g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 31522d1661a5SPawel Jakub Dawidek { 31532d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 31542d1661a5SPawel Jakub Dawidek struct g_geom *gp; 31552d1661a5SPawel Jakub Dawidek int error, timeout; 31562d1661a5SPawel Jakub Dawidek u_int n; 31572d1661a5SPawel Jakub Dawidek 31582d1661a5SPawel Jakub Dawidek g_topology_assert(); 31592d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 31602d1661a5SPawel Jakub Dawidek 31612d1661a5SPawel Jakub Dawidek /* One disk is minimum. */ 31622d1661a5SPawel Jakub Dawidek if (md->md_all < 1) 31632d1661a5SPawel Jakub Dawidek return (NULL); 31642d1661a5SPawel Jakub Dawidek /* 31652d1661a5SPawel Jakub Dawidek * Action geom. 31662d1661a5SPawel Jakub Dawidek */ 31672d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s", md->md_name); 31682d1661a5SPawel Jakub Dawidek sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 31692d1661a5SPawel Jakub Dawidek sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 31702d1661a5SPawel Jakub Dawidek M_WAITOK | M_ZERO); 31712d1661a5SPawel Jakub Dawidek gp->start = g_raid3_start; 31722d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31732d1661a5SPawel Jakub Dawidek gp->access = g_raid3_access; 31742d1661a5SPawel Jakub Dawidek gp->dumpconf = g_raid3_dumpconf; 31752d1661a5SPawel Jakub Dawidek 31762d1661a5SPawel Jakub Dawidek sc->sc_id = md->md_id; 31772d1661a5SPawel Jakub Dawidek sc->sc_mediasize = md->md_mediasize; 31782d1661a5SPawel Jakub Dawidek sc->sc_sectorsize = md->md_sectorsize; 31792d1661a5SPawel Jakub Dawidek sc->sc_ndisks = md->md_all; 3180f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 31812d1661a5SPawel Jakub Dawidek sc->sc_flags = md->md_mflags; 3182a245a548SPawel Jakub Dawidek sc->sc_bump_id = 0; 31830962f942SPawel Jakub Dawidek sc->sc_idle = 1; 318401f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 31850962f942SPawel Jakub Dawidek sc->sc_writes = 0; 318633cb9b3cSMark Johnston sc->sc_refcnt = 1; 3187afd05d74SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 3188afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_softc = sc; 3189afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_no = n; 31902d1661a5SPawel Jakub Dawidek sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3191afd05d74SPawel Jakub Dawidek } 31923650be51SPawel Jakub Dawidek sx_init(&sc->sc_lock, "graid3:lock"); 31932d1661a5SPawel Jakub Dawidek bioq_init(&sc->sc_queue); 31942d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 31953650be51SPawel Jakub Dawidek bioq_init(&sc->sc_regular_delayed); 31963650be51SPawel Jakub Dawidek bioq_init(&sc->sc_inflight); 31973650be51SPawel Jakub Dawidek bioq_init(&sc->sc_sync_delayed); 31982d1661a5SPawel Jakub Dawidek TAILQ_INIT(&sc->sc_events); 31992d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3200fd90e2edSJung-uk Kim callout_init(&sc->sc_callout, 1); 32012d1661a5SPawel Jakub Dawidek sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 32022d1661a5SPawel Jakub Dawidek gp->softc = sc; 32032d1661a5SPawel Jakub Dawidek sc->sc_geom = gp; 32042d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 32052d1661a5SPawel Jakub Dawidek /* 32062d1661a5SPawel Jakub Dawidek * Synchronization geom. 32072d1661a5SPawel Jakub Dawidek */ 32082d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s.sync", md->md_name); 32092d1661a5SPawel Jakub Dawidek gp->softc = sc; 32102d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 32112d1661a5SPawel Jakub Dawidek sc->sc_sync.ds_geom = gp; 32123650be51SPawel Jakub Dawidek 3213ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3214ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3215ed940a82SPawel Jakub Dawidek 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3216ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 32173650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 32183650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 32193650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 32203650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3221ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3222ed940a82SPawel Jakub Dawidek 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3223ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 32243650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 32253650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 32263650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 32273650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3228ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3229ed940a82SPawel Jakub Dawidek 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3230ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 32313650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 32323650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 32333650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 32343650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3235ed940a82SPawel Jakub Dawidek } 32363650be51SPawel Jakub Dawidek 32373745c395SJulian Elischer error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 32382d1661a5SPawel Jakub Dawidek "g_raid3 %s", md->md_name); 32392d1661a5SPawel Jakub Dawidek if (error != 0) { 32402d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 32412d1661a5SPawel Jakub Dawidek sc->sc_name); 32422d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_geom); 324333cb9b3cSMark Johnston g_raid3_free_device(sc); 32442d1661a5SPawel Jakub Dawidek return (NULL); 32452d1661a5SPawel Jakub Dawidek } 32462d1661a5SPawel Jakub Dawidek 32470cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 32480cca572eSJohn-Mark Gurney sc->sc_name, sc->sc_ndisks, sc->sc_id); 32492d1661a5SPawel Jakub Dawidek 3250853a10a5SAndrew Thompson sc->sc_rootmount = root_mount_hold("GRAID3"); 32514ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 32524ed854e8SPawel Jakub Dawidek 32532d1661a5SPawel Jakub Dawidek /* 3254fd02d0bcSMark Johnston * Schedule startup timeout. 32552d1661a5SPawel Jakub Dawidek */ 32562d1661a5SPawel Jakub Dawidek timeout = atomic_load_acq_int(&g_raid3_timeout); 3257fd02d0bcSMark Johnston sc->sc_timeout_event = malloc(sizeof(struct g_raid3_event), M_RAID3, 3258fd02d0bcSMark Johnston M_WAITOK); 32592d1661a5SPawel Jakub Dawidek callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 32602d1661a5SPawel Jakub Dawidek return (sc->sc_geom); 32612d1661a5SPawel Jakub Dawidek } 32622d1661a5SPawel Jakub Dawidek 32632d1661a5SPawel Jakub Dawidek int 3264712fe9bdSPawel Jakub Dawidek g_raid3_destroy(struct g_raid3_softc *sc, int how) 32652d1661a5SPawel Jakub Dawidek { 32662d1661a5SPawel Jakub Dawidek struct g_provider *pp; 32672d1661a5SPawel Jakub Dawidek 32683650be51SPawel Jakub Dawidek g_topology_assert_not(); 32693650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 32703650be51SPawel Jakub Dawidek 32712d1661a5SPawel Jakub Dawidek pp = sc->sc_provider; 32722d1661a5SPawel Jakub Dawidek if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3273712fe9bdSPawel Jakub Dawidek switch (how) { 3274712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_SOFT: 32752d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 32762d1661a5SPawel Jakub Dawidek "Device %s is still open (r%dw%de%d).", pp->name, 32772d1661a5SPawel Jakub Dawidek pp->acr, pp->acw, pp->ace); 32782d1661a5SPawel Jakub Dawidek return (EBUSY); 3279712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_DELAYED: 3280712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, 3281712fe9bdSPawel Jakub Dawidek "Device %s will be destroyed on last close.", 3282712fe9bdSPawel Jakub Dawidek pp->name); 3283712fe9bdSPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 3284712fe9bdSPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 3285712fe9bdSPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3286712fe9bdSPawel Jakub Dawidek return (EBUSY); 3287712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_HARD: 3288712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s is still open, so it " 3289712fe9bdSPawel Jakub Dawidek "can't be definitely removed.", pp->name); 3290712fe9bdSPawel Jakub Dawidek break; 32912d1661a5SPawel Jakub Dawidek } 32922d1661a5SPawel Jakub Dawidek } 32932d1661a5SPawel Jakub Dawidek 329418486a5eSPawel Jakub Dawidek g_topology_lock(); 329518486a5eSPawel Jakub Dawidek if (sc->sc_geom->softc == NULL) { 329618486a5eSPawel Jakub Dawidek g_topology_unlock(); 329718486a5eSPawel Jakub Dawidek return (0); 329818486a5eSPawel Jakub Dawidek } 329918486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 330018486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 330118486a5eSPawel Jakub Dawidek g_topology_unlock(); 330218486a5eSPawel Jakub Dawidek 33032d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 33042d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 33052d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 33063650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33072d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 33082d1661a5SPawel Jakub Dawidek wakeup(sc); 33092d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 33102d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 33112d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 33122d1661a5SPawel Jakub Dawidek while (sc->sc_worker != NULL) 33132d1661a5SPawel Jakub Dawidek tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 33142d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 33153650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 33162d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(sc); 33172d1661a5SPawel Jakub Dawidek return (0); 33182d1661a5SPawel Jakub Dawidek } 33192d1661a5SPawel Jakub Dawidek 33202d1661a5SPawel Jakub Dawidek static void 33212d1661a5SPawel Jakub Dawidek g_raid3_taste_orphan(struct g_consumer *cp) 33222d1661a5SPawel Jakub Dawidek { 33232d1661a5SPawel Jakub Dawidek 33242d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 33252d1661a5SPawel Jakub Dawidek cp->provider->name)); 33262d1661a5SPawel Jakub Dawidek } 33272d1661a5SPawel Jakub Dawidek 33282d1661a5SPawel Jakub Dawidek static struct g_geom * 33292d1661a5SPawel Jakub Dawidek g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 33302d1661a5SPawel Jakub Dawidek { 33312d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 33322d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 33332d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 33342d1661a5SPawel Jakub Dawidek struct g_geom *gp; 33352d1661a5SPawel Jakub Dawidek int error; 33362d1661a5SPawel Jakub Dawidek 33372d1661a5SPawel Jakub Dawidek g_topology_assert(); 33382d1661a5SPawel Jakub Dawidek g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 33392d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 33402d1661a5SPawel Jakub Dawidek 33412d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "raid3:taste"); 33422d1661a5SPawel Jakub Dawidek /* This orphan function should be never called. */ 33432d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_taste_orphan; 33442d1661a5SPawel Jakub Dawidek cp = g_new_consumer(gp); 334510ae42ccSAlexander Motin cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 3346d22ff249SEdward Tomasz Napierala error = g_attach(cp, pp); 3347d22ff249SEdward Tomasz Napierala if (error == 0) { 33482d1661a5SPawel Jakub Dawidek error = g_raid3_read_metadata(cp, &md); 33492d1661a5SPawel Jakub Dawidek g_detach(cp); 3350d22ff249SEdward Tomasz Napierala } 33512d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 33522d1661a5SPawel Jakub Dawidek g_destroy_geom(gp); 33532d1661a5SPawel Jakub Dawidek if (error != 0) 33542d1661a5SPawel Jakub Dawidek return (NULL); 33552d1661a5SPawel Jakub Dawidek gp = NULL; 33562d1661a5SPawel Jakub Dawidek 335790f2be24SAlexander Motin if (md.md_provider[0] != '\0' && 335890f2be24SAlexander Motin !g_compare_names(md.md_provider, pp->name)) 33592d1661a5SPawel Jakub Dawidek return (NULL); 3360e6890985SPawel Jakub Dawidek if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3361e6890985SPawel Jakub Dawidek return (NULL); 33622d1661a5SPawel Jakub Dawidek if (g_raid3_debug >= 2) 33632d1661a5SPawel Jakub Dawidek raid3_metadata_dump(&md); 33642d1661a5SPawel Jakub Dawidek 33652d1661a5SPawel Jakub Dawidek /* 33662d1661a5SPawel Jakub Dawidek * Let's check if device already exists. 33672d1661a5SPawel Jakub Dawidek */ 336845d5e85aSPawel Jakub Dawidek sc = NULL; 33692d1661a5SPawel Jakub Dawidek LIST_FOREACH(gp, &mp->geom, geom) { 33702d1661a5SPawel Jakub Dawidek sc = gp->softc; 33712d1661a5SPawel Jakub Dawidek if (sc == NULL) 33722d1661a5SPawel Jakub Dawidek continue; 33732d1661a5SPawel Jakub Dawidek if (sc->sc_sync.ds_geom == gp) 33742d1661a5SPawel Jakub Dawidek continue; 33752d1661a5SPawel Jakub Dawidek if (strcmp(md.md_name, sc->sc_name) != 0) 33762d1661a5SPawel Jakub Dawidek continue; 33772d1661a5SPawel Jakub Dawidek if (md.md_id != sc->sc_id) { 33782d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s already configured.", 33792d1661a5SPawel Jakub Dawidek sc->sc_name); 33802d1661a5SPawel Jakub Dawidek return (NULL); 33812d1661a5SPawel Jakub Dawidek } 33822d1661a5SPawel Jakub Dawidek break; 33832d1661a5SPawel Jakub Dawidek } 33842d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33852d1661a5SPawel Jakub Dawidek gp = g_raid3_create(mp, &md); 33862d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33872d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot create device %s.", 33882d1661a5SPawel Jakub Dawidek md.md_name); 33892d1661a5SPawel Jakub Dawidek return (NULL); 33902d1661a5SPawel Jakub Dawidek } 33912d1661a5SPawel Jakub Dawidek sc = gp->softc; 33922d1661a5SPawel Jakub Dawidek } 33932d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 33943650be51SPawel Jakub Dawidek g_topology_unlock(); 33953650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 33962d1661a5SPawel Jakub Dawidek error = g_raid3_add_disk(sc, pp, &md); 33972d1661a5SPawel Jakub Dawidek if (error != 0) { 33982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 33992d1661a5SPawel Jakub Dawidek pp->name, gp->name, error); 34002d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 34012d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 3402712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 34033525bb6bSPawel Jakub Dawidek g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 34043650be51SPawel Jakub Dawidek g_topology_lock(); 34052d1661a5SPawel Jakub Dawidek return (NULL); 34062d1661a5SPawel Jakub Dawidek } 34073650be51SPawel Jakub Dawidek gp = NULL; 34083650be51SPawel Jakub Dawidek } 34093650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 34103650be51SPawel Jakub Dawidek g_topology_lock(); 34112d1661a5SPawel Jakub Dawidek return (gp); 34122d1661a5SPawel Jakub Dawidek } 34132d1661a5SPawel Jakub Dawidek 34142d1661a5SPawel Jakub Dawidek static int 34152d1661a5SPawel Jakub Dawidek g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 34162d1661a5SPawel Jakub Dawidek struct g_geom *gp) 34172d1661a5SPawel Jakub Dawidek { 34183650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 34193650be51SPawel Jakub Dawidek int error; 34202d1661a5SPawel Jakub Dawidek 34213650be51SPawel Jakub Dawidek g_topology_unlock(); 34223650be51SPawel Jakub Dawidek sc = gp->softc; 34233650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3424712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 34253525bb6bSPawel Jakub Dawidek error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 34263650be51SPawel Jakub Dawidek if (error != 0) 34273650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 34283650be51SPawel Jakub Dawidek g_topology_lock(); 34293650be51SPawel Jakub Dawidek return (error); 34302d1661a5SPawel Jakub Dawidek } 34312d1661a5SPawel Jakub Dawidek 34322d1661a5SPawel Jakub Dawidek static void 34332d1661a5SPawel Jakub Dawidek g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 34342d1661a5SPawel Jakub Dawidek struct g_consumer *cp, struct g_provider *pp) 34352d1661a5SPawel Jakub Dawidek { 34362d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 34372d1661a5SPawel Jakub Dawidek 34382d1661a5SPawel Jakub Dawidek g_topology_assert(); 34392d1661a5SPawel Jakub Dawidek 34402d1661a5SPawel Jakub Dawidek sc = gp->softc; 34412d1661a5SPawel Jakub Dawidek if (sc == NULL) 34422d1661a5SPawel Jakub Dawidek return; 34432d1661a5SPawel Jakub Dawidek /* Skip synchronization geom. */ 34442d1661a5SPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 34452d1661a5SPawel Jakub Dawidek return; 34462d1661a5SPawel Jakub Dawidek if (pp != NULL) { 34472d1661a5SPawel Jakub Dawidek /* Nothing here. */ 34482d1661a5SPawel Jakub Dawidek } else if (cp != NULL) { 34492d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 34502d1661a5SPawel Jakub Dawidek 34512d1661a5SPawel Jakub Dawidek disk = cp->private; 34522d1661a5SPawel Jakub Dawidek if (disk == NULL) 34532d1661a5SPawel Jakub Dawidek return; 34543650be51SPawel Jakub Dawidek g_topology_unlock(); 34553650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 34562d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Type>", indent); 34572d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 345849ee0fceSAlexander Motin sbuf_cat(sb, "PARITY"); 34592d1661a5SPawel Jakub Dawidek else 346049ee0fceSAlexander Motin sbuf_cat(sb, "DATA"); 346149ee0fceSAlexander Motin sbuf_cat(sb, "</Type>\n"); 34622d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 34632d1661a5SPawel Jakub Dawidek (u_int)disk->d_no); 34642d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 34652d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Synchronized>", indent); 34663650be51SPawel Jakub Dawidek if (disk->d_sync.ds_offset == 0) 346749ee0fceSAlexander Motin sbuf_cat(sb, "0%"); 34682d1661a5SPawel Jakub Dawidek else { 34692d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%u%%", 34703650be51SPawel Jakub Dawidek (u_int)((disk->d_sync.ds_offset * 100) / 3471c0d68b6eSPawel Jakub Dawidek (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 34722d1661a5SPawel Jakub Dawidek } 347349ee0fceSAlexander Motin sbuf_cat(sb, "</Synchronized>\n"); 34744a7f7b10SGleb Smirnoff if (disk->d_sync.ds_offset > 0) { 34754a7f7b10SGleb Smirnoff sbuf_printf(sb, "%s<BytesSynced>%jd" 34764a7f7b10SGleb Smirnoff "</BytesSynced>\n", indent, 34774a7f7b10SGleb Smirnoff (intmax_t)disk->d_sync.ds_offset); 34784a7f7b10SGleb Smirnoff } 34792d1661a5SPawel Jakub Dawidek } 34802d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 34812d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid); 3482a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 34832d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 34842d1661a5SPawel Jakub Dawidek if (disk->d_flags == 0) 348549ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 34862d1661a5SPawel Jakub Dawidek else { 34872d1661a5SPawel Jakub Dawidek int first = 1; 34882d1661a5SPawel Jakub Dawidek 34892d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 34902d1661a5SPawel Jakub Dawidek if ((disk->d_flags & (flag)) != 0) { \ 34912d1661a5SPawel Jakub Dawidek if (!first) \ 349249ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 34932d1661a5SPawel Jakub Dawidek else \ 34942d1661a5SPawel Jakub Dawidek first = 0; \ 349549ee0fceSAlexander Motin sbuf_cat(sb, name); \ 34962d1661a5SPawel Jakub Dawidek } \ 34972d1661a5SPawel Jakub Dawidek } while (0) 34982d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 34992d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 35002d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 35012d1661a5SPawel Jakub Dawidek "SYNCHRONIZING"); 35022d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 35033aae74ecSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 35042d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 35052d1661a5SPawel Jakub Dawidek } 350649ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 35072d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 35082d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state)); 35093650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35103650be51SPawel Jakub Dawidek g_topology_lock(); 35112d1661a5SPawel Jakub Dawidek } else { 35123650be51SPawel Jakub Dawidek g_topology_unlock(); 35133650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3514ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3515ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3516ed940a82SPawel Jakub Dawidek "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3517ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3518ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3519ed940a82SPawel Jakub Dawidek "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3520ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3521ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3522ed940a82SPawel Jakub Dawidek "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3523ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3524ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3525ed940a82SPawel Jakub Dawidek "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3526ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3527ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3528ed940a82SPawel Jakub Dawidek "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3529ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3530ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3531ed940a82SPawel Jakub Dawidek "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3532ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3533ed940a82SPawel Jakub Dawidek } 35342d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 35352d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3536a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 35372d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 35382d1661a5SPawel Jakub Dawidek if (sc->sc_flags == 0) 353949ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 35402d1661a5SPawel Jakub Dawidek else { 35412d1661a5SPawel Jakub Dawidek int first = 1; 35422d1661a5SPawel Jakub Dawidek 35432d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 35442d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & (flag)) != 0) { \ 35452d1661a5SPawel Jakub Dawidek if (!first) \ 354649ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 35472d1661a5SPawel Jakub Dawidek else \ 35482d1661a5SPawel Jakub Dawidek first = 0; \ 354949ee0fceSAlexander Motin sbuf_cat(sb, name); \ 35502d1661a5SPawel Jakub Dawidek } \ 35512d1661a5SPawel Jakub Dawidek } while (0) 3552501250baSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 35532d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3554f5a2f7feSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3555f5a2f7feSPawel Jakub Dawidek "ROUND-ROBIN"); 3556dba915cfSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 35572d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 35582d1661a5SPawel Jakub Dawidek } 355949ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 35602d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 35612d1661a5SPawel Jakub Dawidek sc->sc_ndisks); 356228b31df7SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 356328b31df7SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state)); 35643650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35653650be51SPawel Jakub Dawidek g_topology_lock(); 35662d1661a5SPawel Jakub Dawidek } 35672d1661a5SPawel Jakub Dawidek } 35682d1661a5SPawel Jakub Dawidek 35699da3072cSPawel Jakub Dawidek static void 3570f62c1a47SAlexander Motin g_raid3_shutdown_post_sync(void *arg, int howto) 35719da3072cSPawel Jakub Dawidek { 35729da3072cSPawel Jakub Dawidek struct g_class *mp; 35739da3072cSPawel Jakub Dawidek struct g_geom *gp, *gp2; 35743650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 3575712fe9bdSPawel Jakub Dawidek int error; 35769da3072cSPawel Jakub Dawidek 35774eb861d3SMitchell Horne if ((howto & RB_NOSYNC) != 0) 35784eb861d3SMitchell Horne return; 35794eb861d3SMitchell Horne 35809da3072cSPawel Jakub Dawidek mp = arg; 35819da3072cSPawel Jakub Dawidek g_topology_lock(); 3582f62c1a47SAlexander Motin g_raid3_shutdown = 1; 35839da3072cSPawel Jakub Dawidek LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 35843650be51SPawel Jakub Dawidek if ((sc = gp->softc) == NULL) 35859da3072cSPawel Jakub Dawidek continue; 3586712fe9bdSPawel Jakub Dawidek /* Skip synchronization geom. */ 3587712fe9bdSPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 3588712fe9bdSPawel Jakub Dawidek continue; 35893650be51SPawel Jakub Dawidek g_topology_unlock(); 35903650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3591f62c1a47SAlexander Motin g_raid3_idle(sc, -1); 3592712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 3593712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3594712fe9bdSPawel Jakub Dawidek if (error != 0) 35953650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35963650be51SPawel Jakub Dawidek g_topology_lock(); 35973650be51SPawel Jakub Dawidek } 35983650be51SPawel Jakub Dawidek g_topology_unlock(); 35993650be51SPawel Jakub Dawidek } 36003650be51SPawel Jakub Dawidek 36013650be51SPawel Jakub Dawidek static void 36029da3072cSPawel Jakub Dawidek g_raid3_init(struct g_class *mp) 36039da3072cSPawel Jakub Dawidek { 36049da3072cSPawel Jakub Dawidek 3605f62c1a47SAlexander Motin g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3606f62c1a47SAlexander Motin g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3607f62c1a47SAlexander Motin if (g_raid3_post_sync == NULL) 36089da3072cSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 36099da3072cSPawel Jakub Dawidek } 36109da3072cSPawel Jakub Dawidek 36119da3072cSPawel Jakub Dawidek static void 36129da3072cSPawel Jakub Dawidek g_raid3_fini(struct g_class *mp) 36139da3072cSPawel Jakub Dawidek { 36149da3072cSPawel Jakub Dawidek 3615f62c1a47SAlexander Motin if (g_raid3_post_sync != NULL) 3616f62c1a47SAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync); 36179da3072cSPawel Jakub Dawidek } 36189da3072cSPawel Jakub Dawidek 36192d1661a5SPawel Jakub Dawidek DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 362074d6c131SKyle Evans MODULE_VERSION(geom_raid3, 0); 3621