12d1661a5SPawel Jakub Dawidek /*- 23728855aSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 33728855aSPedro F. Giffuni * 4e6757059SPawel Jakub Dawidek * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 52d1661a5SPawel Jakub Dawidek * All rights reserved. 62d1661a5SPawel Jakub Dawidek * 72d1661a5SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 82d1661a5SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 92d1661a5SPawel Jakub Dawidek * are met: 102d1661a5SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 112d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 122d1661a5SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 132d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 142d1661a5SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 152d1661a5SPawel Jakub Dawidek * 162d1661a5SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 172d1661a5SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 182d1661a5SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 192d1661a5SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 202d1661a5SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 212d1661a5SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 222d1661a5SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 232d1661a5SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 242d1661a5SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 252d1661a5SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 262d1661a5SPawel Jakub Dawidek * SUCH DAMAGE. 272d1661a5SPawel Jakub Dawidek */ 282d1661a5SPawel Jakub Dawidek 292d1661a5SPawel Jakub Dawidek #include <sys/cdefs.h> 302d1661a5SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 312d1661a5SPawel Jakub Dawidek 322d1661a5SPawel Jakub Dawidek #include <sys/param.h> 332d1661a5SPawel Jakub Dawidek #include <sys/systm.h> 342d1661a5SPawel Jakub Dawidek #include <sys/kernel.h> 352d1661a5SPawel Jakub Dawidek #include <sys/module.h> 362d1661a5SPawel Jakub Dawidek #include <sys/limits.h> 372d1661a5SPawel Jakub Dawidek #include <sys/lock.h> 382d1661a5SPawel Jakub Dawidek #include <sys/mutex.h> 392d1661a5SPawel Jakub Dawidek #include <sys/bio.h> 405d807a0eSAndrey V. Elsukov #include <sys/sbuf.h> 412d1661a5SPawel Jakub Dawidek #include <sys/sysctl.h> 422d1661a5SPawel Jakub Dawidek #include <sys/malloc.h> 439da3072cSPawel Jakub Dawidek #include <sys/eventhandler.h> 442d1661a5SPawel Jakub Dawidek #include <vm/uma.h> 452d1661a5SPawel Jakub Dawidek #include <geom/geom.h> 46ac03832eSConrad Meyer #include <geom/geom_dbg.h> 472d1661a5SPawel Jakub Dawidek #include <sys/proc.h> 482d1661a5SPawel Jakub Dawidek #include <sys/kthread.h> 4963710c4dSJohn Baldwin #include <sys/sched.h> 502d1661a5SPawel Jakub Dawidek #include <geom/raid3/g_raid3.h> 512d1661a5SPawel Jakub Dawidek 52cb08c2ccSAlexander Leidinger FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 532d1661a5SPawel Jakub Dawidek 545bb84bc8SRobert Watson static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 552d1661a5SPawel Jakub Dawidek 562d1661a5SPawel Jakub Dawidek SYSCTL_DECL(_kern_geom); 577029da5cSPawel Biernacki static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 586472ac3dSEd Schouten "GEOM_RAID3 stuff"); 59809a9dc6SPawel Jakub Dawidek u_int g_raid3_debug = 0; 60af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0, 612d1661a5SPawel Jakub Dawidek "Debug level"); 62e5e7825cSPawel Jakub Dawidek static u_int g_raid3_timeout = 4; 63af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout, 642d1661a5SPawel Jakub Dawidek 0, "Time to wait on all raid3 components"); 654d006a98SPawel Jakub Dawidek static u_int g_raid3_idletime = 5; 66af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN, 674d006a98SPawel Jakub Dawidek &g_raid3_idletime, 0, "Mark components as clean when idling"); 683aae74ecSPawel Jakub Dawidek static u_int g_raid3_disconnect_on_failure = 1; 69af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 703aae74ecSPawel Jakub Dawidek &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71e6757059SPawel Jakub Dawidek static u_int g_raid3_syncreqs = 2; 723650be51SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 733650be51SPawel Jakub Dawidek &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 74ed940a82SPawel Jakub Dawidek static u_int g_raid3_use_malloc = 0; 75ed940a82SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 76ed940a82SPawel Jakub Dawidek &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 772d1661a5SPawel Jakub Dawidek 782d1661a5SPawel Jakub Dawidek static u_int g_raid3_n64k = 50; 79af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0, 802d1661a5SPawel Jakub Dawidek "Maximum number of 64kB allocations"); 812d1661a5SPawel Jakub Dawidek static u_int g_raid3_n16k = 200; 82af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0, 832d1661a5SPawel Jakub Dawidek "Maximum number of 16kB allocations"); 842d1661a5SPawel Jakub Dawidek static u_int g_raid3_n4k = 1200; 85af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0, 862d1661a5SPawel Jakub Dawidek "Maximum number of 4kB allocations"); 872d1661a5SPawel Jakub Dawidek 887029da5cSPawel Biernacki static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, 897029da5cSPawel Biernacki CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 902d1661a5SPawel Jakub Dawidek "GEOM_RAID3 statistics"); 91dba915cfSPawel Jakub Dawidek static u_int g_raid3_parity_mismatch = 0; 92dba915cfSPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 93dba915cfSPawel Jakub Dawidek &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 942d1661a5SPawel Jakub Dawidek 952d1661a5SPawel Jakub Dawidek #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 962d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 972d1661a5SPawel Jakub Dawidek msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 992d1661a5SPawel Jakub Dawidek } while (0) 1002d1661a5SPawel Jakub Dawidek 101f62c1a47SAlexander Motin static eventhandler_tag g_raid3_post_sync = NULL; 102f62c1a47SAlexander Motin static int g_raid3_shutdown = 0; 1032d1661a5SPawel Jakub Dawidek 1042d1661a5SPawel Jakub Dawidek static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 1052d1661a5SPawel Jakub Dawidek struct g_geom *gp); 1062d1661a5SPawel Jakub Dawidek static g_taste_t g_raid3_taste; 1079da3072cSPawel Jakub Dawidek static void g_raid3_init(struct g_class *mp); 1089da3072cSPawel Jakub Dawidek static void g_raid3_fini(struct g_class *mp); 1092d1661a5SPawel Jakub Dawidek 1102d1661a5SPawel Jakub Dawidek struct g_class g_raid3_class = { 1112d1661a5SPawel Jakub Dawidek .name = G_RAID3_CLASS_NAME, 1122d1661a5SPawel Jakub Dawidek .version = G_VERSION, 1132d1661a5SPawel Jakub Dawidek .ctlreq = g_raid3_config, 1142d1661a5SPawel Jakub Dawidek .taste = g_raid3_taste, 1159da3072cSPawel Jakub Dawidek .destroy_geom = g_raid3_destroy_geom, 1169da3072cSPawel Jakub Dawidek .init = g_raid3_init, 1179da3072cSPawel Jakub Dawidek .fini = g_raid3_fini 1182d1661a5SPawel Jakub Dawidek }; 1192d1661a5SPawel Jakub Dawidek 1202d1661a5SPawel Jakub Dawidek static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 121d97d5ee9SPawel Jakub Dawidek static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 122d97d5ee9SPawel Jakub Dawidek static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 1232d1661a5SPawel Jakub Dawidek static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 1242d1661a5SPawel Jakub Dawidek struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 1252d1661a5SPawel Jakub Dawidek static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 1263650be51SPawel Jakub Dawidek static int g_raid3_register_request(struct bio *pbp); 1273650be51SPawel Jakub Dawidek static void g_raid3_sync_release(struct g_raid3_softc *sc); 1282d1661a5SPawel Jakub Dawidek 1292d1661a5SPawel Jakub Dawidek static const char * 1302d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(int state) 1312d1661a5SPawel Jakub Dawidek { 1322d1661a5SPawel Jakub Dawidek 1332d1661a5SPawel Jakub Dawidek switch (state) { 1342d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NODISK: 1352d1661a5SPawel Jakub Dawidek return ("NODISK"); 1362d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NONE: 1372d1661a5SPawel Jakub Dawidek return ("NONE"); 1382d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 1392d1661a5SPawel Jakub Dawidek return ("NEW"); 1402d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 1412d1661a5SPawel Jakub Dawidek return ("ACTIVE"); 1422d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 1432d1661a5SPawel Jakub Dawidek return ("STALE"); 1442d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 1452d1661a5SPawel Jakub Dawidek return ("SYNCHRONIZING"); 1462d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 1472d1661a5SPawel Jakub Dawidek return ("DISCONNECTED"); 1482d1661a5SPawel Jakub Dawidek default: 1492d1661a5SPawel Jakub Dawidek return ("INVALID"); 1502d1661a5SPawel Jakub Dawidek } 1512d1661a5SPawel Jakub Dawidek } 1522d1661a5SPawel Jakub Dawidek 1532d1661a5SPawel Jakub Dawidek static const char * 1542d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(int state) 1552d1661a5SPawel Jakub Dawidek { 1562d1661a5SPawel Jakub Dawidek 1572d1661a5SPawel Jakub Dawidek switch (state) { 1582d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 1592d1661a5SPawel Jakub Dawidek return ("STARTING"); 1602d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 1612d1661a5SPawel Jakub Dawidek return ("DEGRADED"); 1622d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 1632d1661a5SPawel Jakub Dawidek return ("COMPLETE"); 1642d1661a5SPawel Jakub Dawidek default: 1652d1661a5SPawel Jakub Dawidek return ("INVALID"); 1662d1661a5SPawel Jakub Dawidek } 1672d1661a5SPawel Jakub Dawidek } 1682d1661a5SPawel Jakub Dawidek 1692d1661a5SPawel Jakub Dawidek const char * 1702d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(struct g_raid3_disk *disk) 1712d1661a5SPawel Jakub Dawidek { 1722d1661a5SPawel Jakub Dawidek 1732d1661a5SPawel Jakub Dawidek if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 1742d1661a5SPawel Jakub Dawidek return ("[unknown]"); 1752d1661a5SPawel Jakub Dawidek return (disk->d_name); 1762d1661a5SPawel Jakub Dawidek } 1772d1661a5SPawel Jakub Dawidek 178ed940a82SPawel Jakub Dawidek static void * 179ed940a82SPawel Jakub Dawidek g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 180ed940a82SPawel Jakub Dawidek { 181ed940a82SPawel Jakub Dawidek void *ptr; 182d4060fa6SAlexander Motin enum g_raid3_zones zone; 183ed940a82SPawel Jakub Dawidek 184d4060fa6SAlexander Motin if (g_raid3_use_malloc || 185d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 186ed940a82SPawel Jakub Dawidek ptr = malloc(size, M_RAID3, flags); 187ed940a82SPawel Jakub Dawidek else { 188d4060fa6SAlexander Motin ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 189d4060fa6SAlexander Motin &sc->sc_zones[zone], flags); 190d4060fa6SAlexander Motin sc->sc_zones[zone].sz_requested++; 191ed940a82SPawel Jakub Dawidek if (ptr == NULL) 192d4060fa6SAlexander Motin sc->sc_zones[zone].sz_failed++; 193ed940a82SPawel Jakub Dawidek } 194ed940a82SPawel Jakub Dawidek return (ptr); 195ed940a82SPawel Jakub Dawidek } 196ed940a82SPawel Jakub Dawidek 197ed940a82SPawel Jakub Dawidek static void 198ed940a82SPawel Jakub Dawidek g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 199ed940a82SPawel Jakub Dawidek { 200d4060fa6SAlexander Motin enum g_raid3_zones zone; 201ed940a82SPawel Jakub Dawidek 202d4060fa6SAlexander Motin if (g_raid3_use_malloc || 203d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 204ed940a82SPawel Jakub Dawidek free(ptr, M_RAID3); 205ed940a82SPawel Jakub Dawidek else { 206d4060fa6SAlexander Motin uma_zfree_arg(sc->sc_zones[zone].sz_zone, 207d4060fa6SAlexander Motin ptr, &sc->sc_zones[zone]); 208ed940a82SPawel Jakub Dawidek } 209ed940a82SPawel Jakub Dawidek } 210ed940a82SPawel Jakub Dawidek 2113650be51SPawel Jakub Dawidek static int 2123650be51SPawel Jakub Dawidek g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 2133650be51SPawel Jakub Dawidek { 2143650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2153650be51SPawel Jakub Dawidek 2160d14fae5SPawel Jakub Dawidek if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 2173650be51SPawel Jakub Dawidek return (ENOMEM); 2183650be51SPawel Jakub Dawidek sz->sz_inuse++; 2193650be51SPawel Jakub Dawidek return (0); 2203650be51SPawel Jakub Dawidek } 2213650be51SPawel Jakub Dawidek 2223650be51SPawel Jakub Dawidek static void 2233650be51SPawel Jakub Dawidek g_raid3_uma_dtor(void *mem, int size, void *arg) 2243650be51SPawel Jakub Dawidek { 2253650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2263650be51SPawel Jakub Dawidek 2273650be51SPawel Jakub Dawidek sz->sz_inuse--; 2283650be51SPawel Jakub Dawidek } 2293650be51SPawel Jakub Dawidek 23006b215fdSAlexander Motin #define g_raid3_xor(src, dst, size) \ 23106b215fdSAlexander Motin _g_raid3_xor((uint64_t *)(src), \ 2322d1661a5SPawel Jakub Dawidek (uint64_t *)(dst), (size_t)size) 2332d1661a5SPawel Jakub Dawidek static void 23406b215fdSAlexander Motin _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 2352d1661a5SPawel Jakub Dawidek { 2362d1661a5SPawel Jakub Dawidek 2372d1661a5SPawel Jakub Dawidek KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 2382d1661a5SPawel Jakub Dawidek for (; size > 0; size -= 128) { 23906b215fdSAlexander Motin *dst++ ^= (*src++); 24006b215fdSAlexander Motin *dst++ ^= (*src++); 24106b215fdSAlexander Motin *dst++ ^= (*src++); 24206b215fdSAlexander Motin *dst++ ^= (*src++); 24306b215fdSAlexander Motin *dst++ ^= (*src++); 24406b215fdSAlexander Motin *dst++ ^= (*src++); 24506b215fdSAlexander Motin *dst++ ^= (*src++); 24606b215fdSAlexander Motin *dst++ ^= (*src++); 24706b215fdSAlexander Motin *dst++ ^= (*src++); 24806b215fdSAlexander Motin *dst++ ^= (*src++); 24906b215fdSAlexander Motin *dst++ ^= (*src++); 25006b215fdSAlexander Motin *dst++ ^= (*src++); 25106b215fdSAlexander Motin *dst++ ^= (*src++); 25206b215fdSAlexander Motin *dst++ ^= (*src++); 25306b215fdSAlexander Motin *dst++ ^= (*src++); 25406b215fdSAlexander Motin *dst++ ^= (*src++); 2552d1661a5SPawel Jakub Dawidek } 2562d1661a5SPawel Jakub Dawidek } 2572d1661a5SPawel Jakub Dawidek 258dba915cfSPawel Jakub Dawidek static int 259dba915cfSPawel Jakub Dawidek g_raid3_is_zero(struct bio *bp) 260dba915cfSPawel Jakub Dawidek { 261dba915cfSPawel Jakub Dawidek static const uint64_t zeros[] = { 262dba915cfSPawel Jakub Dawidek 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 263dba915cfSPawel Jakub Dawidek }; 264dba915cfSPawel Jakub Dawidek u_char *addr; 265dba915cfSPawel Jakub Dawidek ssize_t size; 266dba915cfSPawel Jakub Dawidek 267dba915cfSPawel Jakub Dawidek size = bp->bio_length; 268dba915cfSPawel Jakub Dawidek addr = (u_char *)bp->bio_data; 269dba915cfSPawel Jakub Dawidek for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 270dba915cfSPawel Jakub Dawidek if (bcmp(addr, zeros, sizeof(zeros)) != 0) 271dba915cfSPawel Jakub Dawidek return (0); 272dba915cfSPawel Jakub Dawidek } 273dba915cfSPawel Jakub Dawidek return (1); 274dba915cfSPawel Jakub Dawidek } 275dba915cfSPawel Jakub Dawidek 2762d1661a5SPawel Jakub Dawidek /* 2772d1661a5SPawel Jakub Dawidek * --- Events handling functions --- 2782d1661a5SPawel Jakub Dawidek * Events in geom_raid3 are used to maintain disks and device status 2792d1661a5SPawel Jakub Dawidek * from one thread to simplify locking. 2802d1661a5SPawel Jakub Dawidek */ 2812d1661a5SPawel Jakub Dawidek static void 2822d1661a5SPawel Jakub Dawidek g_raid3_event_free(struct g_raid3_event *ep) 2832d1661a5SPawel Jakub Dawidek { 2842d1661a5SPawel Jakub Dawidek 2852d1661a5SPawel Jakub Dawidek free(ep, M_RAID3); 2862d1661a5SPawel Jakub Dawidek } 2872d1661a5SPawel Jakub Dawidek 2882d1661a5SPawel Jakub Dawidek int 2892d1661a5SPawel Jakub Dawidek g_raid3_event_send(void *arg, int state, int flags) 2902d1661a5SPawel Jakub Dawidek { 2912d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 2922d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 2932d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 2942d1661a5SPawel Jakub Dawidek int error; 2952d1661a5SPawel Jakub Dawidek 2962d1661a5SPawel Jakub Dawidek ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 2972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 2982d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 2992d1661a5SPawel Jakub Dawidek disk = NULL; 3002d1661a5SPawel Jakub Dawidek sc = arg; 3012d1661a5SPawel Jakub Dawidek } else { 3022d1661a5SPawel Jakub Dawidek disk = arg; 3032d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3042d1661a5SPawel Jakub Dawidek } 3052d1661a5SPawel Jakub Dawidek ep->e_disk = disk; 3062d1661a5SPawel Jakub Dawidek ep->e_state = state; 3072d1661a5SPawel Jakub Dawidek ep->e_flags = flags; 3082d1661a5SPawel Jakub Dawidek ep->e_error = 0; 3092d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3102d1661a5SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 3112d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3122d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3132d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 3142d1661a5SPawel Jakub Dawidek wakeup(sc); 3152d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 3162d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 3172d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 3182d1661a5SPawel Jakub Dawidek return (0); 3193650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3202d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 3213650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3222d1661a5SPawel Jakub Dawidek while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 3232d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3242d1661a5SPawel Jakub Dawidek MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 3252d1661a5SPawel Jakub Dawidek hz * 5); 3262d1661a5SPawel Jakub Dawidek } 3272d1661a5SPawel Jakub Dawidek error = ep->e_error; 3282d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3293650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3302d1661a5SPawel Jakub Dawidek return (error); 3312d1661a5SPawel Jakub Dawidek } 3322d1661a5SPawel Jakub Dawidek 3332d1661a5SPawel Jakub Dawidek static struct g_raid3_event * 3342d1661a5SPawel Jakub Dawidek g_raid3_event_get(struct g_raid3_softc *sc) 3352d1661a5SPawel Jakub Dawidek { 3362d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 3372d1661a5SPawel Jakub Dawidek 3382d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3392d1661a5SPawel Jakub Dawidek ep = TAILQ_FIRST(&sc->sc_events); 3402d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3412d1661a5SPawel Jakub Dawidek return (ep); 3422d1661a5SPawel Jakub Dawidek } 3432d1661a5SPawel Jakub Dawidek 3442d1661a5SPawel Jakub Dawidek static void 345d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 346d97d5ee9SPawel Jakub Dawidek { 347d97d5ee9SPawel Jakub Dawidek 348d97d5ee9SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 349d97d5ee9SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 350d97d5ee9SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 351d97d5ee9SPawel Jakub Dawidek } 352d97d5ee9SPawel Jakub Dawidek 353d97d5ee9SPawel Jakub Dawidek static void 3542d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(struct g_raid3_disk *disk) 3552d1661a5SPawel Jakub Dawidek { 3562d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3572d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep, *tmpep; 3582d1661a5SPawel Jakub Dawidek 3592d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3603650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3613650be51SPawel Jakub Dawidek 3622d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3632d1661a5SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 3642d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 3652d1661a5SPawel Jakub Dawidek continue; 3662d1661a5SPawel Jakub Dawidek if (ep->e_disk != disk) 3672d1661a5SPawel Jakub Dawidek continue; 3682d1661a5SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 3692d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 3702d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3712d1661a5SPawel Jakub Dawidek else { 3722d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 3732d1661a5SPawel Jakub Dawidek wakeup(ep); 3742d1661a5SPawel Jakub Dawidek } 3752d1661a5SPawel Jakub Dawidek } 3762d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3772d1661a5SPawel Jakub Dawidek } 3782d1661a5SPawel Jakub Dawidek 3792d1661a5SPawel Jakub Dawidek /* 3802d1661a5SPawel Jakub Dawidek * Return the number of disks in the given state. 3812d1661a5SPawel Jakub Dawidek * If state is equal to -1, count all connected disks. 3822d1661a5SPawel Jakub Dawidek */ 3832d1661a5SPawel Jakub Dawidek u_int 3842d1661a5SPawel Jakub Dawidek g_raid3_ndisks(struct g_raid3_softc *sc, int state) 3852d1661a5SPawel Jakub Dawidek { 3862d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 387fa6a7837SDavid E. O'Brien u_int n, ndisks; 3882d1661a5SPawel Jakub Dawidek 3893650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 3903650be51SPawel Jakub Dawidek 391fa6a7837SDavid E. O'Brien for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 3922d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 3932d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 3942d1661a5SPawel Jakub Dawidek continue; 3952d1661a5SPawel Jakub Dawidek if (state == -1 || disk->d_state == state) 3962d1661a5SPawel Jakub Dawidek ndisks++; 3972d1661a5SPawel Jakub Dawidek } 3982d1661a5SPawel Jakub Dawidek return (ndisks); 3992d1661a5SPawel Jakub Dawidek } 4002d1661a5SPawel Jakub Dawidek 4012d1661a5SPawel Jakub Dawidek static u_int 4022d1661a5SPawel Jakub Dawidek g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 4032d1661a5SPawel Jakub Dawidek { 4042d1661a5SPawel Jakub Dawidek struct bio *bp; 4052d1661a5SPawel Jakub Dawidek u_int nreqs = 0; 4062d1661a5SPawel Jakub Dawidek 4072d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 4082d1661a5SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 4092d1661a5SPawel Jakub Dawidek if (bp->bio_from == cp) 4102d1661a5SPawel Jakub Dawidek nreqs++; 4112d1661a5SPawel Jakub Dawidek } 4122d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 4132d1661a5SPawel Jakub Dawidek return (nreqs); 4142d1661a5SPawel Jakub Dawidek } 4152d1661a5SPawel Jakub Dawidek 4162d1661a5SPawel Jakub Dawidek static int 4172d1661a5SPawel Jakub Dawidek g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 4182d1661a5SPawel Jakub Dawidek { 4192d1661a5SPawel Jakub Dawidek 42079e61493SPawel Jakub Dawidek if (cp->index > 0) { 4212d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4222d1661a5SPawel Jakub Dawidek "I/O requests for %s exist, can't destroy it now.", 4232d1661a5SPawel Jakub Dawidek cp->provider->name); 4242d1661a5SPawel Jakub Dawidek return (1); 4252d1661a5SPawel Jakub Dawidek } 4262d1661a5SPawel Jakub Dawidek if (g_raid3_nrequests(sc, cp) > 0) { 4272d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4282d1661a5SPawel Jakub Dawidek "I/O requests for %s in queue, can't destroy it now.", 4292d1661a5SPawel Jakub Dawidek cp->provider->name); 4302d1661a5SPawel Jakub Dawidek return (1); 4312d1661a5SPawel Jakub Dawidek } 4322d1661a5SPawel Jakub Dawidek return (0); 4332d1661a5SPawel Jakub Dawidek } 4342d1661a5SPawel Jakub Dawidek 4352d1661a5SPawel Jakub Dawidek static void 436d97d5ee9SPawel Jakub Dawidek g_raid3_destroy_consumer(void *arg, int flags __unused) 437d97d5ee9SPawel Jakub Dawidek { 438d97d5ee9SPawel Jakub Dawidek struct g_consumer *cp; 439d97d5ee9SPawel Jakub Dawidek 4403650be51SPawel Jakub Dawidek g_topology_assert(); 4413650be51SPawel Jakub Dawidek 442d97d5ee9SPawel Jakub Dawidek cp = arg; 443d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 444d97d5ee9SPawel Jakub Dawidek g_detach(cp); 445d97d5ee9SPawel Jakub Dawidek g_destroy_consumer(cp); 446d97d5ee9SPawel Jakub Dawidek } 447d97d5ee9SPawel Jakub Dawidek 448d97d5ee9SPawel Jakub Dawidek static void 4492d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 4502d1661a5SPawel Jakub Dawidek { 451d97d5ee9SPawel Jakub Dawidek struct g_provider *pp; 452d97d5ee9SPawel Jakub Dawidek int retaste_wait; 4532d1661a5SPawel Jakub Dawidek 4542d1661a5SPawel Jakub Dawidek g_topology_assert(); 4552d1661a5SPawel Jakub Dawidek 4562d1661a5SPawel Jakub Dawidek cp->private = NULL; 4572d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 4582d1661a5SPawel Jakub Dawidek return; 4592d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 460d97d5ee9SPawel Jakub Dawidek pp = cp->provider; 461d97d5ee9SPawel Jakub Dawidek retaste_wait = 0; 462d97d5ee9SPawel Jakub Dawidek if (cp->acw == 1) { 463d97d5ee9SPawel Jakub Dawidek if ((pp->geom->flags & G_GEOM_WITHER) == 0) 464d97d5ee9SPawel Jakub Dawidek retaste_wait = 1; 465d97d5ee9SPawel Jakub Dawidek } 466d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 467d97d5ee9SPawel Jakub Dawidek -cp->acw, -cp->ace, 0); 468d97d5ee9SPawel Jakub Dawidek if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 469d97d5ee9SPawel Jakub Dawidek g_access(cp, -cp->acr, -cp->acw, -cp->ace); 470d97d5ee9SPawel Jakub Dawidek if (retaste_wait) { 471d97d5ee9SPawel Jakub Dawidek /* 472d97d5ee9SPawel Jakub Dawidek * After retaste event was send (inside g_access()), we can send 473d97d5ee9SPawel Jakub Dawidek * event to detach and destroy consumer. 474d97d5ee9SPawel Jakub Dawidek * A class, which has consumer to the given provider connected 475d97d5ee9SPawel Jakub Dawidek * will not receive retaste event for the provider. 476d97d5ee9SPawel Jakub Dawidek * This is the way how I ignore retaste events when I close 477d97d5ee9SPawel Jakub Dawidek * consumers opened for write: I detach and destroy consumer 478d97d5ee9SPawel Jakub Dawidek * after retaste event is sent. 479d97d5ee9SPawel Jakub Dawidek */ 480d97d5ee9SPawel Jakub Dawidek g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 481d97d5ee9SPawel Jakub Dawidek return; 482d97d5ee9SPawel Jakub Dawidek } 483d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 4842d1661a5SPawel Jakub Dawidek g_detach(cp); 4852d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 4862d1661a5SPawel Jakub Dawidek } 4872d1661a5SPawel Jakub Dawidek 4882d1661a5SPawel Jakub Dawidek static int 4892d1661a5SPawel Jakub Dawidek g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 4902d1661a5SPawel Jakub Dawidek { 49134cb1517SPawel Jakub Dawidek struct g_consumer *cp; 4922d1661a5SPawel Jakub Dawidek int error; 4932d1661a5SPawel Jakub Dawidek 4943650be51SPawel Jakub Dawidek g_topology_assert_not(); 4952d1661a5SPawel Jakub Dawidek KASSERT(disk->d_consumer == NULL, 4962d1661a5SPawel Jakub Dawidek ("Disk already connected (device %s).", disk->d_softc->sc_name)); 4972d1661a5SPawel Jakub Dawidek 4983650be51SPawel Jakub Dawidek g_topology_lock(); 49934cb1517SPawel Jakub Dawidek cp = g_new_consumer(disk->d_softc->sc_geom); 50034cb1517SPawel Jakub Dawidek error = g_attach(cp, pp); 501d97d5ee9SPawel Jakub Dawidek if (error != 0) { 50234cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 5033650be51SPawel Jakub Dawidek g_topology_unlock(); 50434cb1517SPawel Jakub Dawidek return (error); 50534cb1517SPawel Jakub Dawidek } 50634cb1517SPawel Jakub Dawidek error = g_access(cp, 1, 1, 1); 5073650be51SPawel Jakub Dawidek g_topology_unlock(); 50834cb1517SPawel Jakub Dawidek if (error != 0) { 50934cb1517SPawel Jakub Dawidek g_detach(cp); 51034cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 511d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 512d97d5ee9SPawel Jakub Dawidek pp->name, error); 513d97d5ee9SPawel Jakub Dawidek return (error); 514d97d5ee9SPawel Jakub Dawidek } 51534cb1517SPawel Jakub Dawidek disk->d_consumer = cp; 51634cb1517SPawel Jakub Dawidek disk->d_consumer->private = disk; 51734cb1517SPawel Jakub Dawidek disk->d_consumer->index = 0; 5182d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 5192d1661a5SPawel Jakub Dawidek return (0); 5202d1661a5SPawel Jakub Dawidek } 5212d1661a5SPawel Jakub Dawidek 5222d1661a5SPawel Jakub Dawidek static void 5232d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 5242d1661a5SPawel Jakub Dawidek { 5252d1661a5SPawel Jakub Dawidek 5262d1661a5SPawel Jakub Dawidek g_topology_assert(); 5272d1661a5SPawel Jakub Dawidek 5282d1661a5SPawel Jakub Dawidek if (cp == NULL) 5292d1661a5SPawel Jakub Dawidek return; 530d97d5ee9SPawel Jakub Dawidek if (cp->provider != NULL) 5312d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 532d97d5ee9SPawel Jakub Dawidek else 5332d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 5342d1661a5SPawel Jakub Dawidek } 5352d1661a5SPawel Jakub Dawidek 5362d1661a5SPawel Jakub Dawidek /* 5372d1661a5SPawel Jakub Dawidek * Initialize disk. This means allocate memory, create consumer, attach it 5382d1661a5SPawel Jakub Dawidek * to the provider and open access (r1w1e1) to it. 5392d1661a5SPawel Jakub Dawidek */ 5402d1661a5SPawel Jakub Dawidek static struct g_raid3_disk * 5412d1661a5SPawel Jakub Dawidek g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 5422d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md, int *errorp) 5432d1661a5SPawel Jakub Dawidek { 5442d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 5452d1661a5SPawel Jakub Dawidek int error; 5462d1661a5SPawel Jakub Dawidek 5472d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[md->md_no]; 5482d1661a5SPawel Jakub Dawidek error = g_raid3_connect_disk(disk, pp); 54934cb1517SPawel Jakub Dawidek if (error != 0) { 55034cb1517SPawel Jakub Dawidek if (errorp != NULL) 55134cb1517SPawel Jakub Dawidek *errorp = error; 55234cb1517SPawel Jakub Dawidek return (NULL); 55334cb1517SPawel Jakub Dawidek } 5542d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NONE; 5552d1661a5SPawel Jakub Dawidek disk->d_flags = md->md_dflags; 5562d1661a5SPawel Jakub Dawidek if (md->md_provider[0] != '\0') 5572d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 5582d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 5592d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = md->md_sync_offset; 5602d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = md->md_sync_offset; 561a245a548SPawel Jakub Dawidek disk->d_genid = md->md_genid; 5622d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = md->md_syncid; 5632d1661a5SPawel Jakub Dawidek if (errorp != NULL) 5642d1661a5SPawel Jakub Dawidek *errorp = 0; 5652d1661a5SPawel Jakub Dawidek return (disk); 5662d1661a5SPawel Jakub Dawidek } 5672d1661a5SPawel Jakub Dawidek 5682d1661a5SPawel Jakub Dawidek static void 5692d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(struct g_raid3_disk *disk) 5702d1661a5SPawel Jakub Dawidek { 5712d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 5722d1661a5SPawel Jakub Dawidek 5733650be51SPawel Jakub Dawidek g_topology_assert_not(); 5743650be51SPawel Jakub Dawidek sc = disk->d_softc; 5753650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 5762d1661a5SPawel Jakub Dawidek 5772d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 5782d1661a5SPawel Jakub Dawidek return; 5792d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(disk); 5802d1661a5SPawel Jakub Dawidek switch (disk->d_state) { 5812d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 5822d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 5832d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 5842d1661a5SPawel Jakub Dawidek /* FALLTHROUGH */ 5852d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 5862d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 5872d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 5883650be51SPawel Jakub Dawidek g_topology_lock(); 5892d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, disk->d_consumer); 5903650be51SPawel Jakub Dawidek g_topology_unlock(); 5912d1661a5SPawel Jakub Dawidek disk->d_consumer = NULL; 5922d1661a5SPawel Jakub Dawidek break; 5932d1661a5SPawel Jakub Dawidek default: 5942d1661a5SPawel Jakub Dawidek KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 5952d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 5962d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 5972d1661a5SPawel Jakub Dawidek } 5982d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NODISK; 5992d1661a5SPawel Jakub Dawidek } 6002d1661a5SPawel Jakub Dawidek 6012d1661a5SPawel Jakub Dawidek static void 6022d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(struct g_raid3_softc *sc) 6032d1661a5SPawel Jakub Dawidek { 6042d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 6059da3072cSPawel Jakub Dawidek struct g_raid3_disk *disk; 6062d1661a5SPawel Jakub Dawidek struct g_geom *gp; 6072d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6082d1661a5SPawel Jakub Dawidek u_int n; 6092d1661a5SPawel Jakub Dawidek 6103650be51SPawel Jakub Dawidek g_topology_assert_not(); 6113650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 6122d1661a5SPawel Jakub Dawidek 6132d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 6142d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 6152d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 6169da3072cSPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 6179da3072cSPawel Jakub Dawidek disk = &sc->sc_disks[n]; 618d97d5ee9SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 6199da3072cSPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 6209da3072cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 6219da3072cSPawel Jakub Dawidek g_raid3_destroy_disk(disk); 6229da3072cSPawel Jakub Dawidek } 623d97d5ee9SPawel Jakub Dawidek } 6242d1661a5SPawel Jakub Dawidek while ((ep = g_raid3_event_get(sc)) != NULL) { 625d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 6262d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 6272d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 6282d1661a5SPawel Jakub Dawidek else { 6292d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 6302d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 6312d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 6322d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 6332d1661a5SPawel Jakub Dawidek wakeup(ep); 6342d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 6352d1661a5SPawel Jakub Dawidek } 6362d1661a5SPawel Jakub Dawidek } 6372d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 6382d1661a5SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 6393650be51SPawel Jakub Dawidek g_topology_lock(); 6402d1661a5SPawel Jakub Dawidek if (cp != NULL) 6412d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, cp); 6422d1661a5SPawel Jakub Dawidek g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 6432d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 6442d1661a5SPawel Jakub Dawidek g_wither_geom(gp, ENXIO); 6453650be51SPawel Jakub Dawidek g_topology_unlock(); 646ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 6473650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 6483650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 6493650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 650ed940a82SPawel Jakub Dawidek } 6513650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 6523650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 6533650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 6543650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 6552d1661a5SPawel Jakub Dawidek } 6562d1661a5SPawel Jakub Dawidek 6572d1661a5SPawel Jakub Dawidek static void 6582d1661a5SPawel Jakub Dawidek g_raid3_orphan(struct g_consumer *cp) 6592d1661a5SPawel Jakub Dawidek { 6602d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 6612d1661a5SPawel Jakub Dawidek 6622d1661a5SPawel Jakub Dawidek g_topology_assert(); 6632d1661a5SPawel Jakub Dawidek 6642d1661a5SPawel Jakub Dawidek disk = cp->private; 6652d1661a5SPawel Jakub Dawidek if (disk == NULL) 6662d1661a5SPawel Jakub Dawidek return; 667ea973705SPawel Jakub Dawidek disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 6682d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 6692d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 6702d1661a5SPawel Jakub Dawidek } 6712d1661a5SPawel Jakub Dawidek 6722d1661a5SPawel Jakub Dawidek static int 6732d1661a5SPawel Jakub Dawidek g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 6742d1661a5SPawel Jakub Dawidek { 6752d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 6762d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6772d1661a5SPawel Jakub Dawidek off_t offset, length; 6782d1661a5SPawel Jakub Dawidek u_char *sector; 679d97d5ee9SPawel Jakub Dawidek int error = 0; 6802d1661a5SPawel Jakub Dawidek 6813650be51SPawel Jakub Dawidek g_topology_assert_not(); 6822d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 6833650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 6843650be51SPawel Jakub Dawidek 6852d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 6862d1661a5SPawel Jakub Dawidek KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 6872d1661a5SPawel Jakub Dawidek KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 6883650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 689d97d5ee9SPawel Jakub Dawidek ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 690d97d5ee9SPawel Jakub Dawidek cp->acw, cp->ace)); 6912d1661a5SPawel Jakub Dawidek length = cp->provider->sectorsize; 6922d1661a5SPawel Jakub Dawidek offset = cp->provider->mediasize - length; 6932d1661a5SPawel Jakub Dawidek sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 6942d1661a5SPawel Jakub Dawidek if (md != NULL) 6952d1661a5SPawel Jakub Dawidek raid3_metadata_encode(md, sector); 6962d1661a5SPawel Jakub Dawidek error = g_write_data(cp, offset, sector, length); 6972d1661a5SPawel Jakub Dawidek free(sector, M_RAID3); 6982d1661a5SPawel Jakub Dawidek if (error != 0) { 6993aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 7003aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot write metadata on %s " 7013aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7023aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7033aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 7043aae74ecSPawel Jakub Dawidek } else { 7053aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot write metadata on %s " 7063aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7073aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7083aae74ecSPawel Jakub Dawidek } 7093aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 7103aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 7113aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 7123aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 7133aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 7142d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 7152d1661a5SPawel Jakub Dawidek } 7163aae74ecSPawel Jakub Dawidek } 7172d1661a5SPawel Jakub Dawidek return (error); 7182d1661a5SPawel Jakub Dawidek } 7192d1661a5SPawel Jakub Dawidek 7202d1661a5SPawel Jakub Dawidek int 7212d1661a5SPawel Jakub Dawidek g_raid3_clear_metadata(struct g_raid3_disk *disk) 7222d1661a5SPawel Jakub Dawidek { 7232d1661a5SPawel Jakub Dawidek int error; 7242d1661a5SPawel Jakub Dawidek 7253650be51SPawel Jakub Dawidek g_topology_assert_not(); 7263650be51SPawel Jakub Dawidek sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 7273650be51SPawel Jakub Dawidek 7282d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, NULL); 7292d1661a5SPawel Jakub Dawidek if (error == 0) { 7302d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s cleared.", 7312d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7322d1661a5SPawel Jakub Dawidek } else { 7332d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7342d1661a5SPawel Jakub Dawidek "Cannot clear metadata on disk %s (error=%d).", 7352d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7362d1661a5SPawel Jakub Dawidek } 7372d1661a5SPawel Jakub Dawidek return (error); 7382d1661a5SPawel Jakub Dawidek } 7392d1661a5SPawel Jakub Dawidek 7402d1661a5SPawel Jakub Dawidek void 7412d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 7422d1661a5SPawel Jakub Dawidek { 7432d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 744e6890985SPawel Jakub Dawidek struct g_provider *pp; 7452d1661a5SPawel Jakub Dawidek 74639552dffSMark Johnston bzero(md, sizeof(*md)); 7472d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 7482d1661a5SPawel Jakub Dawidek strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 7492d1661a5SPawel Jakub Dawidek md->md_version = G_RAID3_VERSION; 7502d1661a5SPawel Jakub Dawidek strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 7512d1661a5SPawel Jakub Dawidek md->md_id = sc->sc_id; 7522d1661a5SPawel Jakub Dawidek md->md_all = sc->sc_ndisks; 753a245a548SPawel Jakub Dawidek md->md_genid = sc->sc_genid; 7542d1661a5SPawel Jakub Dawidek md->md_mediasize = sc->sc_mediasize; 7552d1661a5SPawel Jakub Dawidek md->md_sectorsize = sc->sc_sectorsize; 7562d1661a5SPawel Jakub Dawidek md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 7572d1661a5SPawel Jakub Dawidek md->md_no = disk->d_no; 7582d1661a5SPawel Jakub Dawidek md->md_syncid = disk->d_sync.ds_syncid; 7592d1661a5SPawel Jakub Dawidek md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 76039552dffSMark Johnston if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 761c082905bSPawel Jakub Dawidek md->md_sync_offset = 762c082905bSPawel Jakub Dawidek disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 763c082905bSPawel Jakub Dawidek } 764e6890985SPawel Jakub Dawidek if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 765e6890985SPawel Jakub Dawidek pp = disk->d_consumer->provider; 766e6890985SPawel Jakub Dawidek else 767e6890985SPawel Jakub Dawidek pp = NULL; 768e6890985SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 769e6890985SPawel Jakub Dawidek strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 770e6890985SPawel Jakub Dawidek if (pp != NULL) 771e6890985SPawel Jakub Dawidek md->md_provsize = pp->mediasize; 7722d1661a5SPawel Jakub Dawidek } 7732d1661a5SPawel Jakub Dawidek 7742d1661a5SPawel Jakub Dawidek void 7752d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(struct g_raid3_disk *disk) 7762d1661a5SPawel Jakub Dawidek { 777*2cc5a480SMateusz Guzik struct g_raid3_softc *sc __diagused; 7782d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 7792d1661a5SPawel Jakub Dawidek int error; 7802d1661a5SPawel Jakub Dawidek 7813650be51SPawel Jakub Dawidek g_topology_assert_not(); 7823650be51SPawel Jakub Dawidek sc = disk->d_softc; 7833650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 7843650be51SPawel Jakub Dawidek 7852d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(disk, &md); 7862d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, &md); 7872d1661a5SPawel Jakub Dawidek if (error == 0) { 7882d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s updated.", 7892d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7902d1661a5SPawel Jakub Dawidek } else { 7912d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7922d1661a5SPawel Jakub Dawidek "Cannot update metadata on disk %s (error=%d).", 7932d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7942d1661a5SPawel Jakub Dawidek } 7952d1661a5SPawel Jakub Dawidek } 7962d1661a5SPawel Jakub Dawidek 7972d1661a5SPawel Jakub Dawidek static void 798d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(struct g_raid3_softc *sc) 7992d1661a5SPawel Jakub Dawidek { 8002d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 8012d1661a5SPawel Jakub Dawidek u_int n; 8022d1661a5SPawel Jakub Dawidek 8033650be51SPawel Jakub Dawidek g_topology_assert_not(); 8043650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8052d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 8062d1661a5SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 8072d1661a5SPawel Jakub Dawidek sc->sc_name)); 8082d1661a5SPawel Jakub Dawidek 8092d1661a5SPawel Jakub Dawidek sc->sc_syncid++; 810a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 811a245a548SPawel Jakub Dawidek sc->sc_syncid); 8122d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 8132d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 8142d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 8152d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 8162d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 8172d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8182d1661a5SPawel Jakub Dawidek } 8192d1661a5SPawel Jakub Dawidek } 8202d1661a5SPawel Jakub Dawidek } 8212d1661a5SPawel Jakub Dawidek 8224d006a98SPawel Jakub Dawidek static void 823a245a548SPawel Jakub Dawidek g_raid3_bump_genid(struct g_raid3_softc *sc) 824a245a548SPawel Jakub Dawidek { 825a245a548SPawel Jakub Dawidek struct g_raid3_disk *disk; 826a245a548SPawel Jakub Dawidek u_int n; 827a245a548SPawel Jakub Dawidek 8283650be51SPawel Jakub Dawidek g_topology_assert_not(); 8293650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 830a245a548SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 831a245a548SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 832a245a548SPawel Jakub Dawidek sc->sc_name)); 833a245a548SPawel Jakub Dawidek 834a245a548SPawel Jakub Dawidek sc->sc_genid++; 835a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 836a245a548SPawel Jakub Dawidek sc->sc_genid); 837a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 838a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 839a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 840a245a548SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 841a245a548SPawel Jakub Dawidek disk->d_genid = sc->sc_genid; 842a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 843a245a548SPawel Jakub Dawidek } 844a245a548SPawel Jakub Dawidek } 845a245a548SPawel Jakub Dawidek } 846a245a548SPawel Jakub Dawidek 8470962f942SPawel Jakub Dawidek static int 8483650be51SPawel Jakub Dawidek g_raid3_idle(struct g_raid3_softc *sc, int acw) 8494d006a98SPawel Jakub Dawidek { 8504d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8514d006a98SPawel Jakub Dawidek u_int i; 8520962f942SPawel Jakub Dawidek int timeout; 8534d006a98SPawel Jakub Dawidek 8543650be51SPawel Jakub Dawidek g_topology_assert_not(); 8553650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8563650be51SPawel Jakub Dawidek 8570962f942SPawel Jakub Dawidek if (sc->sc_provider == NULL) 8580962f942SPawel Jakub Dawidek return (0); 859501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 860501250baSPawel Jakub Dawidek return (0); 8610962f942SPawel Jakub Dawidek if (sc->sc_idle) 8620962f942SPawel Jakub Dawidek return (0); 8630962f942SPawel Jakub Dawidek if (sc->sc_writes > 0) 8640962f942SPawel Jakub Dawidek return (0); 8653650be51SPawel Jakub Dawidek if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 86601f1f41cSPawel Jakub Dawidek timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 867f62c1a47SAlexander Motin if (!g_raid3_shutdown && timeout > 0) 8680962f942SPawel Jakub Dawidek return (timeout); 8690962f942SPawel Jakub Dawidek } 8704d006a98SPawel Jakub Dawidek sc->sc_idle = 1; 8714d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 8724d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 8734d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 8744d006a98SPawel Jakub Dawidek continue; 8754d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 8764d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 8774d006a98SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 8784d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8794d006a98SPawel Jakub Dawidek } 8800962f942SPawel Jakub Dawidek return (0); 8814d006a98SPawel Jakub Dawidek } 8824d006a98SPawel Jakub Dawidek 8834d006a98SPawel Jakub Dawidek static void 8844d006a98SPawel Jakub Dawidek g_raid3_unidle(struct g_raid3_softc *sc) 8854d006a98SPawel Jakub Dawidek { 8864d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8874d006a98SPawel Jakub Dawidek u_int i; 8884d006a98SPawel Jakub Dawidek 8893650be51SPawel Jakub Dawidek g_topology_assert_not(); 8903650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8913650be51SPawel Jakub Dawidek 892501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 893501250baSPawel Jakub Dawidek return; 8944d006a98SPawel Jakub Dawidek sc->sc_idle = 0; 89501f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 8964d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 8974d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 8984d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 8994d006a98SPawel Jakub Dawidek continue; 9004d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 9014d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 9024d006a98SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 9034d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 9044d006a98SPawel Jakub Dawidek } 9054d006a98SPawel Jakub Dawidek } 9064d006a98SPawel Jakub Dawidek 9072d1661a5SPawel Jakub Dawidek /* 9082d1661a5SPawel Jakub Dawidek * Treat bio_driver1 field in parent bio as list head and field bio_caller1 9092d1661a5SPawel Jakub Dawidek * in child bio as pointer to the next element on the list. 9102d1661a5SPawel Jakub Dawidek */ 9112d1661a5SPawel Jakub Dawidek #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 9122d1661a5SPawel Jakub Dawidek 9132d1661a5SPawel Jakub Dawidek #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 9142d1661a5SPawel Jakub Dawidek 9152d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_BIO(pbp, bp) \ 9162d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 9172d1661a5SPawel Jakub Dawidek (bp) = G_RAID3_NEXT_BIO(bp)) 9182d1661a5SPawel Jakub Dawidek 9192d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 9202d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 9212d1661a5SPawel Jakub Dawidek (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 9222d1661a5SPawel Jakub Dawidek (bp) = (tmpbp)) 9232d1661a5SPawel Jakub Dawidek 9242d1661a5SPawel Jakub Dawidek static void 9252d1661a5SPawel Jakub Dawidek g_raid3_init_bio(struct bio *pbp) 9262d1661a5SPawel Jakub Dawidek { 9272d1661a5SPawel Jakub Dawidek 9282d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = NULL; 9292d1661a5SPawel Jakub Dawidek } 9302d1661a5SPawel Jakub Dawidek 9312d1661a5SPawel Jakub Dawidek static void 932dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(struct bio *cbp) 933dba915cfSPawel Jakub Dawidek { 934dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 935dba915cfSPawel Jakub Dawidek 936dba915cfSPawel Jakub Dawidek pbp = cbp->bio_parent; 937dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) 938dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 939dba915cfSPawel Jakub Dawidek else { 940dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 941dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) { 942dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 943dba915cfSPawel Jakub Dawidek break; 944dba915cfSPawel Jakub Dawidek } 945dba915cfSPawel Jakub Dawidek } 946dba915cfSPawel Jakub Dawidek } 947dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 948dba915cfSPawel Jakub Dawidek } 949dba915cfSPawel Jakub Dawidek 950dba915cfSPawel Jakub Dawidek static void 951dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 952dba915cfSPawel Jakub Dawidek { 953dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 954dba915cfSPawel Jakub Dawidek 955dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(sbp); 956dba915cfSPawel Jakub Dawidek pbp = dbp->bio_parent; 957dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 958dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == dbp) 959dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = sbp; 960dba915cfSPawel Jakub Dawidek else { 961dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 962dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == dbp) { 963dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = sbp; 964dba915cfSPawel Jakub Dawidek break; 965dba915cfSPawel Jakub Dawidek } 966dba915cfSPawel Jakub Dawidek } 967dba915cfSPawel Jakub Dawidek } 968dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(dbp) = NULL; 969dba915cfSPawel Jakub Dawidek } 970dba915cfSPawel Jakub Dawidek 971dba915cfSPawel Jakub Dawidek static void 9722d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 9732d1661a5SPawel Jakub Dawidek { 9742d1661a5SPawel Jakub Dawidek struct bio *bp, *pbp; 9752d1661a5SPawel Jakub Dawidek size_t size; 9762d1661a5SPawel Jakub Dawidek 9772d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 9782d1661a5SPawel Jakub Dawidek pbp->bio_children--; 9792d1661a5SPawel Jakub Dawidek KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 9802d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 981ed940a82SPawel Jakub Dawidek g_raid3_free(sc, cbp->bio_data, size); 9822d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) { 9832d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 9842d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 9852d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 9862d1661a5SPawel Jakub Dawidek } else { 9872d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 9882d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) 9892d1661a5SPawel Jakub Dawidek break; 9902d1661a5SPawel Jakub Dawidek } 991dba915cfSPawel Jakub Dawidek if (bp != NULL) { 992dba915cfSPawel Jakub Dawidek KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 993dba915cfSPawel Jakub Dawidek ("NULL bp->bio_driver1")); 9942d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 9952d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 996dba915cfSPawel Jakub Dawidek } 9972d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 9982d1661a5SPawel Jakub Dawidek } 9992d1661a5SPawel Jakub Dawidek } 10002d1661a5SPawel Jakub Dawidek 10012d1661a5SPawel Jakub Dawidek static struct bio * 10022d1661a5SPawel Jakub Dawidek g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 10032d1661a5SPawel Jakub Dawidek { 10042d1661a5SPawel Jakub Dawidek struct bio *bp, *cbp; 10052d1661a5SPawel Jakub Dawidek size_t size; 10063650be51SPawel Jakub Dawidek int memflag; 10072d1661a5SPawel Jakub Dawidek 10082d1661a5SPawel Jakub Dawidek cbp = g_clone_bio(pbp); 10092d1661a5SPawel Jakub Dawidek if (cbp == NULL) 10102d1661a5SPawel Jakub Dawidek return (NULL); 10112d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 10123650be51SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 10133650be51SPawel Jakub Dawidek memflag = M_WAITOK; 10142d1661a5SPawel Jakub Dawidek else 10153650be51SPawel Jakub Dawidek memflag = M_NOWAIT; 1016ed940a82SPawel Jakub Dawidek cbp->bio_data = g_raid3_alloc(sc, size, memflag); 10173650be51SPawel Jakub Dawidek if (cbp->bio_data == NULL) { 10182d1661a5SPawel Jakub Dawidek pbp->bio_children--; 10192d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10202d1661a5SPawel Jakub Dawidek return (NULL); 10212d1661a5SPawel Jakub Dawidek } 10222d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 10232d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == NULL) 10242d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = cbp; 10252d1661a5SPawel Jakub Dawidek else { 10262d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 10272d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == NULL) { 10282d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = cbp; 10292d1661a5SPawel Jakub Dawidek break; 10302d1661a5SPawel Jakub Dawidek } 10312d1661a5SPawel Jakub Dawidek } 10322d1661a5SPawel Jakub Dawidek } 10332d1661a5SPawel Jakub Dawidek return (cbp); 10342d1661a5SPawel Jakub Dawidek } 10352d1661a5SPawel Jakub Dawidek 10362d1661a5SPawel Jakub Dawidek static void 10372d1661a5SPawel Jakub Dawidek g_raid3_scatter(struct bio *pbp) 10382d1661a5SPawel Jakub Dawidek { 10392d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 10402d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1041ee40c7aaSPawel Jakub Dawidek struct bio *bp, *cbp, *tmpbp; 10422d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 104306b215fdSAlexander Motin int first; 10442d1661a5SPawel Jakub Dawidek 10452d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 10462d1661a5SPawel Jakub Dawidek bp = NULL; 10472d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10482d1661a5SPawel Jakub Dawidek /* 10492d1661a5SPawel Jakub Dawidek * Find bio for which we should calculate data. 10502d1661a5SPawel Jakub Dawidek */ 10512d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10522d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 10532d1661a5SPawel Jakub Dawidek bp = cbp; 10542d1661a5SPawel Jakub Dawidek break; 10552d1661a5SPawel Jakub Dawidek } 10562d1661a5SPawel Jakub Dawidek } 10572d1661a5SPawel Jakub Dawidek KASSERT(bp != NULL, ("NULL parity bio.")); 10582d1661a5SPawel Jakub Dawidek } 10592d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 10602d1661a5SPawel Jakub Dawidek cadd = padd = 0; 10612d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 10622d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10632d1661a5SPawel Jakub Dawidek if (cbp == bp) 10642d1661a5SPawel Jakub Dawidek continue; 10652d1661a5SPawel Jakub Dawidek bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 10662d1661a5SPawel Jakub Dawidek padd += atom; 10672d1661a5SPawel Jakub Dawidek } 10682d1661a5SPawel Jakub Dawidek cadd += atom; 10692d1661a5SPawel Jakub Dawidek } 10702d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10712d1661a5SPawel Jakub Dawidek /* 10722d1661a5SPawel Jakub Dawidek * Calculate parity. 10732d1661a5SPawel Jakub Dawidek */ 107406b215fdSAlexander Motin first = 1; 10752d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10762d1661a5SPawel Jakub Dawidek if (cbp == bp) 10772d1661a5SPawel Jakub Dawidek continue; 107806b215fdSAlexander Motin if (first) { 107906b215fdSAlexander Motin bcopy(cbp->bio_data, bp->bio_data, 10802d1661a5SPawel Jakub Dawidek bp->bio_length); 108106b215fdSAlexander Motin first = 0; 108206b215fdSAlexander Motin } else { 108306b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, bp->bio_data, 108406b215fdSAlexander Motin bp->bio_length); 108506b215fdSAlexander Motin } 10862d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 10872d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 10882d1661a5SPawel Jakub Dawidek } 10892d1661a5SPawel Jakub Dawidek } 1090ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10912d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 10922d1661a5SPawel Jakub Dawidek 10932d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 10942d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 10952d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 10962d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 10973650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1098d97d5ee9SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1099d97d5ee9SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 110079e61493SPawel Jakub Dawidek cp->index++; 11010962f942SPawel Jakub Dawidek sc->sc_writes++; 11022d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 11032d1661a5SPawel Jakub Dawidek } 11042d1661a5SPawel Jakub Dawidek } 11052d1661a5SPawel Jakub Dawidek 11062d1661a5SPawel Jakub Dawidek static void 11072d1661a5SPawel Jakub Dawidek g_raid3_gather(struct bio *pbp) 11082d1661a5SPawel Jakub Dawidek { 11092d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 11102d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1111f5a2f7feSPawel Jakub Dawidek struct bio *xbp, *fbp, *cbp; 11122d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 11132d1661a5SPawel Jakub Dawidek 11142d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 11152d1661a5SPawel Jakub Dawidek /* 1116f5a2f7feSPawel Jakub Dawidek * Find bio for which we have to calculate data. 11172d1661a5SPawel Jakub Dawidek * While going through this path, check if all requests 11182d1661a5SPawel Jakub Dawidek * succeeded, if not, deny whole request. 1119f5a2f7feSPawel Jakub Dawidek * If we're in COMPLETE mode, we allow one request to fail, 1120f5a2f7feSPawel Jakub Dawidek * so if we find one, we're sending it to the parity consumer. 1121f5a2f7feSPawel Jakub Dawidek * If there are more failed requests, we deny whole request. 11222d1661a5SPawel Jakub Dawidek */ 1123f5a2f7feSPawel Jakub Dawidek xbp = fbp = NULL; 11242d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 11252d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1126f5a2f7feSPawel Jakub Dawidek KASSERT(xbp == NULL, ("More than one parity bio.")); 1127f5a2f7feSPawel Jakub Dawidek xbp = cbp; 11282d1661a5SPawel Jakub Dawidek } 11292d1661a5SPawel Jakub Dawidek if (cbp->bio_error == 0) 11302d1661a5SPawel Jakub Dawidek continue; 11312d1661a5SPawel Jakub Dawidek /* 11322d1661a5SPawel Jakub Dawidek * Found failed request. 11332d1661a5SPawel Jakub Dawidek */ 1134f5a2f7feSPawel Jakub Dawidek if (fbp == NULL) { 1135f5a2f7feSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 11362d1661a5SPawel Jakub Dawidek /* 1137f5a2f7feSPawel Jakub Dawidek * We are already in degraded mode, so we can't 1138f5a2f7feSPawel Jakub Dawidek * accept any failures. 11392d1661a5SPawel Jakub Dawidek */ 1140f5a2f7feSPawel Jakub Dawidek if (pbp->bio_error == 0) 114117fec17eSPawel Jakub Dawidek pbp->bio_error = cbp->bio_error; 11422d1661a5SPawel Jakub Dawidek } else { 1143f5a2f7feSPawel Jakub Dawidek fbp = cbp; 11442d1661a5SPawel Jakub Dawidek } 1145f5a2f7feSPawel Jakub Dawidek } else { 11462d1661a5SPawel Jakub Dawidek /* 11472d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 11482d1661a5SPawel Jakub Dawidek */ 11492d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 1150f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11512d1661a5SPawel Jakub Dawidek } 11523aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 11533aae74ecSPawel Jakub Dawidek if (disk == NULL) 11543aae74ecSPawel Jakub Dawidek continue; 11553aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 11563aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 11573aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 11583aae74ecSPawel Jakub Dawidek cbp->bio_error); 11593aae74ecSPawel Jakub Dawidek } else { 11603aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 11613aae74ecSPawel Jakub Dawidek cbp->bio_error); 11623aae74ecSPawel Jakub Dawidek } 11633aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 11643aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 11653aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 11663aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 11673aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 11683aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 11693aae74ecSPawel Jakub Dawidek } 11702d1661a5SPawel Jakub Dawidek } 11712d1661a5SPawel Jakub Dawidek if (pbp->bio_error != 0) 11722d1661a5SPawel Jakub Dawidek goto finish; 1173dba915cfSPawel Jakub Dawidek if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1174dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1175dba915cfSPawel Jakub Dawidek if (xbp != fbp) 1176dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(xbp, fbp); 1177dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, fbp); 1178dba915cfSPawel Jakub Dawidek } else if (fbp != NULL) { 11792d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 11802d1661a5SPawel Jakub Dawidek 11812d1661a5SPawel Jakub Dawidek /* 11822d1661a5SPawel Jakub Dawidek * One request failed, so send the same request to 11832d1661a5SPawel Jakub Dawidek * the parity consumer. 11842d1661a5SPawel Jakub Dawidek */ 1185f5a2f7feSPawel Jakub Dawidek disk = pbp->bio_driver2; 11862d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1187f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11882d1661a5SPawel Jakub Dawidek goto finish; 11892d1661a5SPawel Jakub Dawidek } 11902d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 11912d1661a5SPawel Jakub Dawidek pbp->bio_inbed--; 1192f5a2f7feSPawel Jakub Dawidek fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1193f5a2f7feSPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 1194f5a2f7feSPawel Jakub Dawidek fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1195f5a2f7feSPawel Jakub Dawidek fbp->bio_error = 0; 1196f5a2f7feSPawel Jakub Dawidek fbp->bio_completed = 0; 1197f5a2f7feSPawel Jakub Dawidek fbp->bio_children = 0; 1198f5a2f7feSPawel Jakub Dawidek fbp->bio_inbed = 0; 11992d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 1200f5a2f7feSPawel Jakub Dawidek fbp->bio_caller2 = disk; 1201f5a2f7feSPawel Jakub Dawidek fbp->bio_to = cp->provider; 1202f5a2f7feSPawel Jakub Dawidek G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 12033650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 12042d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 12052d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 120679e61493SPawel Jakub Dawidek cp->index++; 1207f5a2f7feSPawel Jakub Dawidek g_io_request(fbp, cp); 12082d1661a5SPawel Jakub Dawidek return; 12092d1661a5SPawel Jakub Dawidek } 1210f5a2f7feSPawel Jakub Dawidek if (xbp != NULL) { 1211f5a2f7feSPawel Jakub Dawidek /* 1212f5a2f7feSPawel Jakub Dawidek * Calculate parity. 1213f5a2f7feSPawel Jakub Dawidek */ 1214f5a2f7feSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 1215f5a2f7feSPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1216f5a2f7feSPawel Jakub Dawidek continue; 121706b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, xbp->bio_data, 1218f5a2f7feSPawel Jakub Dawidek xbp->bio_length); 1219f5a2f7feSPawel Jakub Dawidek } 1220f5a2f7feSPawel Jakub Dawidek xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1221dba915cfSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1222dba915cfSPawel Jakub Dawidek if (!g_raid3_is_zero(xbp)) { 1223dba915cfSPawel Jakub Dawidek g_raid3_parity_mismatch++; 1224dba915cfSPawel Jakub Dawidek pbp->bio_error = EIO; 1225dba915cfSPawel Jakub Dawidek goto finish; 1226dba915cfSPawel Jakub Dawidek } 1227dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, xbp); 1228dba915cfSPawel Jakub Dawidek } 12292d1661a5SPawel Jakub Dawidek } 12302d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 12312d1661a5SPawel Jakub Dawidek cadd = padd = 0; 12322d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 12332d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 12342d1661a5SPawel Jakub Dawidek bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 12352d1661a5SPawel Jakub Dawidek pbp->bio_completed += atom; 12362d1661a5SPawel Jakub Dawidek padd += atom; 12372d1661a5SPawel Jakub Dawidek } 12382d1661a5SPawel Jakub Dawidek cadd += atom; 12392d1661a5SPawel Jakub Dawidek } 12402d1661a5SPawel Jakub Dawidek finish: 12412d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 12422d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 12434cf67afeSPawel Jakub Dawidek else { 12444cf67afeSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 12454cf67afeSPawel Jakub Dawidek G_RAID3_LOGREQ(1, pbp, "Verification error."); 12462d1661a5SPawel Jakub Dawidek else 12472d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 12484cf67afeSPawel Jakub Dawidek } 1249dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 12502d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 12512d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1252290c6161SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 12532d1661a5SPawel Jakub Dawidek } 12542d1661a5SPawel Jakub Dawidek 12552d1661a5SPawel Jakub Dawidek static void 12562d1661a5SPawel Jakub Dawidek g_raid3_done(struct bio *bp) 12572d1661a5SPawel Jakub Dawidek { 12582d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12592d1661a5SPawel Jakub Dawidek 12602d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 12612d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 12622d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 12632d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 12642d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 12658de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 12662d1661a5SPawel Jakub Dawidek wakeup(sc); 12672d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 12682d1661a5SPawel Jakub Dawidek } 12692d1661a5SPawel Jakub Dawidek 12702d1661a5SPawel Jakub Dawidek static void 12712d1661a5SPawel Jakub Dawidek g_raid3_regular_request(struct bio *cbp) 12722d1661a5SPawel Jakub Dawidek { 12732d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12742d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 12752d1661a5SPawel Jakub Dawidek struct bio *pbp; 12762d1661a5SPawel Jakub Dawidek 12772d1661a5SPawel Jakub Dawidek g_topology_assert_not(); 12782d1661a5SPawel Jakub Dawidek 12792d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 12802d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 12810962f942SPawel Jakub Dawidek cbp->bio_from->index--; 12820962f942SPawel Jakub Dawidek if (cbp->bio_cmd == BIO_WRITE) 12830962f942SPawel Jakub Dawidek sc->sc_writes--; 12842d1661a5SPawel Jakub Dawidek disk = cbp->bio_from->private; 12852d1661a5SPawel Jakub Dawidek if (disk == NULL) { 12862d1661a5SPawel Jakub Dawidek g_topology_lock(); 12872d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cbp->bio_from); 12882d1661a5SPawel Jakub Dawidek g_topology_unlock(); 12892d1661a5SPawel Jakub Dawidek } 12902d1661a5SPawel Jakub Dawidek 12912d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Request finished."); 12922d1661a5SPawel Jakub Dawidek pbp->bio_inbed++; 12932d1661a5SPawel Jakub Dawidek KASSERT(pbp->bio_inbed <= pbp->bio_children, 12942d1661a5SPawel Jakub Dawidek ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 12952d1661a5SPawel Jakub Dawidek pbp->bio_children)); 12962d1661a5SPawel Jakub Dawidek if (pbp->bio_inbed != pbp->bio_children) 12972d1661a5SPawel Jakub Dawidek return; 12982d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 12992d1661a5SPawel Jakub Dawidek case BIO_READ: 13002d1661a5SPawel Jakub Dawidek g_raid3_gather(pbp); 13012d1661a5SPawel Jakub Dawidek break; 13022d1661a5SPawel Jakub Dawidek case BIO_WRITE: 13032d1661a5SPawel Jakub Dawidek case BIO_DELETE: 13042d1661a5SPawel Jakub Dawidek { 13052d1661a5SPawel Jakub Dawidek int error = 0; 13062d1661a5SPawel Jakub Dawidek 13072d1661a5SPawel Jakub Dawidek pbp->bio_completed = pbp->bio_length; 13082d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 13093aae74ecSPawel Jakub Dawidek if (cbp->bio_error == 0) { 13103aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13113aae74ecSPawel Jakub Dawidek continue; 13122d1661a5SPawel Jakub Dawidek } 13133aae74ecSPawel Jakub Dawidek 13142d1661a5SPawel Jakub Dawidek if (error == 0) 13152d1661a5SPawel Jakub Dawidek error = cbp->bio_error; 13162d1661a5SPawel Jakub Dawidek else if (pbp->bio_error == 0) { 13172d1661a5SPawel Jakub Dawidek /* 13182d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 13192d1661a5SPawel Jakub Dawidek */ 13202d1661a5SPawel Jakub Dawidek pbp->bio_error = error; 13212d1661a5SPawel Jakub Dawidek } 13223aae74ecSPawel Jakub Dawidek 13233aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 13243aae74ecSPawel Jakub Dawidek if (disk == NULL) { 13253aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13263aae74ecSPawel Jakub Dawidek continue; 13273aae74ecSPawel Jakub Dawidek } 13283aae74ecSPawel Jakub Dawidek 13293aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 13303aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 13313aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, 13323aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13333aae74ecSPawel Jakub Dawidek cbp->bio_error); 13343aae74ecSPawel Jakub Dawidek } else { 13353aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, 13363aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13373aae74ecSPawel Jakub Dawidek cbp->bio_error); 13383aae74ecSPawel Jakub Dawidek } 13393aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 13403aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 13413aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 13423aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 13433aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 13443aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 13452d1661a5SPawel Jakub Dawidek } 13462d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13472d1661a5SPawel Jakub Dawidek } 13482d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 13492d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 13502d1661a5SPawel Jakub Dawidek else 13512d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 13522d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 13532d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 13543650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_inflight, pbp); 13553650be51SPawel Jakub Dawidek /* Release delayed sync requests if possible. */ 13563650be51SPawel Jakub Dawidek g_raid3_sync_release(sc); 13572d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 13582d1661a5SPawel Jakub Dawidek break; 13592d1661a5SPawel Jakub Dawidek } 13602d1661a5SPawel Jakub Dawidek } 13612d1661a5SPawel Jakub Dawidek } 13622d1661a5SPawel Jakub Dawidek 13632d1661a5SPawel Jakub Dawidek static void 13642d1661a5SPawel Jakub Dawidek g_raid3_sync_done(struct bio *bp) 13652d1661a5SPawel Jakub Dawidek { 13662d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 13672d1661a5SPawel Jakub Dawidek 13682d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 13692d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 13702d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 13712d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 13722d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 13738de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 13742d1661a5SPawel Jakub Dawidek wakeup(sc); 13752d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 13762d1661a5SPawel Jakub Dawidek } 13772d1661a5SPawel Jakub Dawidek 13782d1661a5SPawel Jakub Dawidek static void 137942461fbaSPawel Jakub Dawidek g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 138042461fbaSPawel Jakub Dawidek { 138142461fbaSPawel Jakub Dawidek struct bio_queue_head queue; 138242461fbaSPawel Jakub Dawidek struct g_raid3_disk *disk; 1383*2cc5a480SMateusz Guzik struct g_consumer *cp __diagused; 138442461fbaSPawel Jakub Dawidek struct bio *cbp; 138542461fbaSPawel Jakub Dawidek u_int i; 138642461fbaSPawel Jakub Dawidek 138742461fbaSPawel Jakub Dawidek bioq_init(&queue); 138842461fbaSPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 138942461fbaSPawel Jakub Dawidek disk = &sc->sc_disks[i]; 139042461fbaSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 139142461fbaSPawel Jakub Dawidek continue; 139242461fbaSPawel Jakub Dawidek cbp = g_clone_bio(bp); 139342461fbaSPawel Jakub Dawidek if (cbp == NULL) { 139442461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; 139542461fbaSPawel Jakub Dawidek cbp = bioq_first(&queue)) { 139642461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 139742461fbaSPawel Jakub Dawidek g_destroy_bio(cbp); 139842461fbaSPawel Jakub Dawidek } 139942461fbaSPawel Jakub Dawidek if (bp->bio_error == 0) 140042461fbaSPawel Jakub Dawidek bp->bio_error = ENOMEM; 140142461fbaSPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 140242461fbaSPawel Jakub Dawidek return; 140342461fbaSPawel Jakub Dawidek } 140442461fbaSPawel Jakub Dawidek bioq_insert_tail(&queue, cbp); 140542461fbaSPawel Jakub Dawidek cbp->bio_done = g_std_done; 140642461fbaSPawel Jakub Dawidek cbp->bio_caller1 = disk; 140742461fbaSPawel Jakub Dawidek cbp->bio_to = disk->d_consumer->provider; 140842461fbaSPawel Jakub Dawidek } 140942461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 141042461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 141142461fbaSPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 141242461fbaSPawel Jakub Dawidek disk = cbp->bio_caller1; 141342461fbaSPawel Jakub Dawidek cbp->bio_caller1 = NULL; 141442461fbaSPawel Jakub Dawidek cp = disk->d_consumer; 141542461fbaSPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 141642461fbaSPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 141742461fbaSPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 141842461fbaSPawel Jakub Dawidek g_io_request(cbp, disk->d_consumer); 141942461fbaSPawel Jakub Dawidek } 142042461fbaSPawel Jakub Dawidek } 142142461fbaSPawel Jakub Dawidek 142242461fbaSPawel Jakub Dawidek static void 14232d1661a5SPawel Jakub Dawidek g_raid3_start(struct bio *bp) 14242d1661a5SPawel Jakub Dawidek { 14252d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 14262d1661a5SPawel Jakub Dawidek 14272d1661a5SPawel Jakub Dawidek sc = bp->bio_to->geom->softc; 14282d1661a5SPawel Jakub Dawidek /* 14292d1661a5SPawel Jakub Dawidek * If sc == NULL or there are no valid disks, provider's error 14302d1661a5SPawel Jakub Dawidek * should be set and g_raid3_start() should not be called at all. 14312d1661a5SPawel Jakub Dawidek */ 14322d1661a5SPawel Jakub Dawidek KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 14332d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 14342d1661a5SPawel Jakub Dawidek ("Provider's error should be set (error=%d)(device=%s).", 14352d1661a5SPawel Jakub Dawidek bp->bio_to->error, bp->bio_to->name)); 14362d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Request received."); 14372d1661a5SPawel Jakub Dawidek 14382d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 14392d1661a5SPawel Jakub Dawidek case BIO_READ: 14402d1661a5SPawel Jakub Dawidek case BIO_WRITE: 14412d1661a5SPawel Jakub Dawidek case BIO_DELETE: 14422d1661a5SPawel Jakub Dawidek break; 14438b522bdaSWarner Losh case BIO_SPEEDUP: 144442461fbaSPawel Jakub Dawidek case BIO_FLUSH: 144542461fbaSPawel Jakub Dawidek g_raid3_flush(sc, bp); 144642461fbaSPawel Jakub Dawidek return; 14472d1661a5SPawel Jakub Dawidek case BIO_GETATTR: 14482d1661a5SPawel Jakub Dawidek default: 14492d1661a5SPawel Jakub Dawidek g_io_deliver(bp, EOPNOTSUPP); 14502d1661a5SPawel Jakub Dawidek return; 14512d1661a5SPawel Jakub Dawidek } 14522d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 14532d1661a5SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_queue, bp); 14548de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 14552d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 14562d1661a5SPawel Jakub Dawidek wakeup(sc); 14572d1661a5SPawel Jakub Dawidek } 14582d1661a5SPawel Jakub Dawidek 14592d1661a5SPawel Jakub Dawidek /* 14603650be51SPawel Jakub Dawidek * Return TRUE if the given request is colliding with a in-progress 14613650be51SPawel Jakub Dawidek * synchronization request. 14622d1661a5SPawel Jakub Dawidek */ 14633650be51SPawel Jakub Dawidek static int 14643650be51SPawel Jakub Dawidek g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 14652d1661a5SPawel Jakub Dawidek { 14662d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 14673650be51SPawel Jakub Dawidek struct bio *sbp; 14683650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 14693650be51SPawel Jakub Dawidek int i; 14703650be51SPawel Jakub Dawidek 14713650be51SPawel Jakub Dawidek disk = sc->sc_syncdisk; 14723650be51SPawel Jakub Dawidek if (disk == NULL) 14733650be51SPawel Jakub Dawidek return (0); 14743650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 14753650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 14763650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 14773650be51SPawel Jakub Dawidek sbp = disk->d_sync.ds_bios[i]; 14783650be51SPawel Jakub Dawidek if (sbp == NULL) 14793650be51SPawel Jakub Dawidek continue; 14803650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 14813650be51SPawel Jakub Dawidek send = sbp->bio_length; 14823650be51SPawel Jakub Dawidek if (sbp->bio_cmd == BIO_WRITE) { 14833650be51SPawel Jakub Dawidek sstart *= sc->sc_ndisks - 1; 14843650be51SPawel Jakub Dawidek send *= sc->sc_ndisks - 1; 14853650be51SPawel Jakub Dawidek } 14863650be51SPawel Jakub Dawidek send += sstart; 14873650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 14883650be51SPawel Jakub Dawidek return (1); 14893650be51SPawel Jakub Dawidek } 14903650be51SPawel Jakub Dawidek return (0); 14913650be51SPawel Jakub Dawidek } 14923650be51SPawel Jakub Dawidek 14933650be51SPawel Jakub Dawidek /* 14943650be51SPawel Jakub Dawidek * Return TRUE if the given sync request is colliding with a in-progress regular 14953650be51SPawel Jakub Dawidek * request. 14963650be51SPawel Jakub Dawidek */ 14973650be51SPawel Jakub Dawidek static int 14983650be51SPawel Jakub Dawidek g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 14993650be51SPawel Jakub Dawidek { 15003650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 15012d1661a5SPawel Jakub Dawidek struct bio *bp; 15022d1661a5SPawel Jakub Dawidek 15033650be51SPawel Jakub Dawidek if (sc->sc_syncdisk == NULL) 15043650be51SPawel Jakub Dawidek return (0); 15053650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 15063650be51SPawel Jakub Dawidek send = sstart + sbp->bio_length; 15073650be51SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 15083650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 15093650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 15103650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 15113650be51SPawel Jakub Dawidek return (1); 15122d1661a5SPawel Jakub Dawidek } 15133650be51SPawel Jakub Dawidek return (0); 15142d1661a5SPawel Jakub Dawidek } 15152d1661a5SPawel Jakub Dawidek 15163650be51SPawel Jakub Dawidek /* 15173650be51SPawel Jakub Dawidek * Puts request onto delayed queue. 15183650be51SPawel Jakub Dawidek */ 15193650be51SPawel Jakub Dawidek static void 15203650be51SPawel Jakub Dawidek g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 15213650be51SPawel Jakub Dawidek { 15223650be51SPawel Jakub Dawidek 15233650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying request."); 15243650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_regular_delayed, bp); 15253650be51SPawel Jakub Dawidek } 15263650be51SPawel Jakub Dawidek 15273650be51SPawel Jakub Dawidek /* 15283650be51SPawel Jakub Dawidek * Puts synchronization request onto delayed queue. 15293650be51SPawel Jakub Dawidek */ 15303650be51SPawel Jakub Dawidek static void 15313650be51SPawel Jakub Dawidek g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 15323650be51SPawel Jakub Dawidek { 15333650be51SPawel Jakub Dawidek 15343650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 15353650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_sync_delayed, bp); 15363650be51SPawel Jakub Dawidek } 15373650be51SPawel Jakub Dawidek 15383650be51SPawel Jakub Dawidek /* 15393650be51SPawel Jakub Dawidek * Releases delayed regular requests which don't collide anymore with sync 15403650be51SPawel Jakub Dawidek * requests. 15413650be51SPawel Jakub Dawidek */ 15423650be51SPawel Jakub Dawidek static void 15433650be51SPawel Jakub Dawidek g_raid3_regular_release(struct g_raid3_softc *sc) 15443650be51SPawel Jakub Dawidek { 15453650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15463650be51SPawel Jakub Dawidek 15473650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 15483650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, bp)) 15493650be51SPawel Jakub Dawidek continue; 15503650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_regular_delayed, bp); 15513650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 15523650be51SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 15533650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 15543650be51SPawel Jakub Dawidek #if 0 15553650be51SPawel Jakub Dawidek /* 15563650be51SPawel Jakub Dawidek * wakeup() is not needed, because this function is called from 15573650be51SPawel Jakub Dawidek * the worker thread. 15583650be51SPawel Jakub Dawidek */ 15593650be51SPawel Jakub Dawidek wakeup(&sc->sc_queue); 15603650be51SPawel Jakub Dawidek #endif 15613650be51SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 15623650be51SPawel Jakub Dawidek } 15633650be51SPawel Jakub Dawidek } 15643650be51SPawel Jakub Dawidek 15653650be51SPawel Jakub Dawidek /* 15663650be51SPawel Jakub Dawidek * Releases delayed sync requests which don't collide anymore with regular 15673650be51SPawel Jakub Dawidek * requests. 15683650be51SPawel Jakub Dawidek */ 15693650be51SPawel Jakub Dawidek static void 15703650be51SPawel Jakub Dawidek g_raid3_sync_release(struct g_raid3_softc *sc) 15713650be51SPawel Jakub Dawidek { 15723650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15733650be51SPawel Jakub Dawidek 15743650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 15753650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 15763650be51SPawel Jakub Dawidek continue; 15773650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_sync_delayed, bp); 15783650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, 15793650be51SPawel Jakub Dawidek "Releasing delayed synchronization request."); 15803650be51SPawel Jakub Dawidek g_io_request(bp, bp->bio_from); 15813650be51SPawel Jakub Dawidek } 15823650be51SPawel Jakub Dawidek } 15833650be51SPawel Jakub Dawidek 15843650be51SPawel Jakub Dawidek /* 15853650be51SPawel Jakub Dawidek * Handle synchronization requests. 15863650be51SPawel Jakub Dawidek * Every synchronization request is two-steps process: first, READ request is 15873650be51SPawel Jakub Dawidek * send to active provider and then WRITE request (with read data) to the provider 1588e8d57122SPedro F. Giffuni * being synchronized. When WRITE is finished, new synchronization request is 15893650be51SPawel Jakub Dawidek * send. 15903650be51SPawel Jakub Dawidek */ 15912d1661a5SPawel Jakub Dawidek static void 15922d1661a5SPawel Jakub Dawidek g_raid3_sync_request(struct bio *bp) 15932d1661a5SPawel Jakub Dawidek { 15942d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 15952d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 15962d1661a5SPawel Jakub Dawidek 159779e61493SPawel Jakub Dawidek bp->bio_from->index--; 15982d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 15992d1661a5SPawel Jakub Dawidek disk = bp->bio_from->private; 16002d1661a5SPawel Jakub Dawidek if (disk == NULL) { 16013650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 16022d1661a5SPawel Jakub Dawidek g_topology_lock(); 16032d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, bp->bio_from); 16042d1661a5SPawel Jakub Dawidek g_topology_unlock(); 16053650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 16062d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16073650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 16082d1661a5SPawel Jakub Dawidek return; 16092d1661a5SPawel Jakub Dawidek } 16102d1661a5SPawel Jakub Dawidek 16112d1661a5SPawel Jakub Dawidek /* 16122d1661a5SPawel Jakub Dawidek * Synchronization request. 16132d1661a5SPawel Jakub Dawidek */ 16142d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 16152d1661a5SPawel Jakub Dawidek case BIO_READ: 16162d1661a5SPawel Jakub Dawidek { 16172d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 16182d1661a5SPawel Jakub Dawidek u_char *dst, *src; 16192d1661a5SPawel Jakub Dawidek off_t left; 16202d1661a5SPawel Jakub Dawidek u_int atom; 16212d1661a5SPawel Jakub Dawidek 16222d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16232d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16242d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16252d1661a5SPawel Jakub Dawidek bp->bio_error); 16262d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16272d1661a5SPawel Jakub Dawidek return; 16282d1661a5SPawel Jakub Dawidek } 16292d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 16302d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 16312d1661a5SPawel Jakub Dawidek dst = src = bp->bio_data; 16322d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) { 16332d1661a5SPawel Jakub Dawidek u_int n; 16342d1661a5SPawel Jakub Dawidek 16352d1661a5SPawel Jakub Dawidek /* Parity component. */ 16362d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16372d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16382d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16392d1661a5SPawel Jakub Dawidek src += atom; 16402d1661a5SPawel Jakub Dawidek for (n = 1; n < sc->sc_ndisks - 1; n++) { 164106b215fdSAlexander Motin g_raid3_xor(src, dst, atom); 16422d1661a5SPawel Jakub Dawidek src += atom; 16432d1661a5SPawel Jakub Dawidek } 16442d1661a5SPawel Jakub Dawidek dst += atom; 16452d1661a5SPawel Jakub Dawidek } 16462d1661a5SPawel Jakub Dawidek } else { 16472d1661a5SPawel Jakub Dawidek /* Regular component. */ 16482d1661a5SPawel Jakub Dawidek src += atom * disk->d_no; 16492d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16502d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16512d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16522d1661a5SPawel Jakub Dawidek src += sc->sc_sectorsize; 16532d1661a5SPawel Jakub Dawidek dst += atom; 16542d1661a5SPawel Jakub Dawidek } 16552d1661a5SPawel Jakub Dawidek } 16563650be51SPawel Jakub Dawidek bp->bio_driver1 = bp->bio_driver2 = NULL; 16573650be51SPawel Jakub Dawidek bp->bio_pflags = 0; 16582d1661a5SPawel Jakub Dawidek bp->bio_offset /= sc->sc_ndisks - 1; 16592d1661a5SPawel Jakub Dawidek bp->bio_length /= sc->sc_ndisks - 1; 16602d1661a5SPawel Jakub Dawidek bp->bio_cmd = BIO_WRITE; 16612d1661a5SPawel Jakub Dawidek bp->bio_cflags = 0; 16622d1661a5SPawel Jakub Dawidek bp->bio_children = bp->bio_inbed = 0; 16632d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 16643650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 16652d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 16662d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 166779e61493SPawel Jakub Dawidek cp->index++; 16682d1661a5SPawel Jakub Dawidek g_io_request(bp, cp); 16692d1661a5SPawel Jakub Dawidek return; 16702d1661a5SPawel Jakub Dawidek } 16712d1661a5SPawel Jakub Dawidek case BIO_WRITE: 1672d2fb9c62SPawel Jakub Dawidek { 1673d2fb9c62SPawel Jakub Dawidek struct g_raid3_disk_sync *sync; 16743650be51SPawel Jakub Dawidek off_t boffset, moffset; 16753650be51SPawel Jakub Dawidek void *data; 16763650be51SPawel Jakub Dawidek int i; 1677d2fb9c62SPawel Jakub Dawidek 16782d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16792d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16802d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16812d1661a5SPawel Jakub Dawidek bp->bio_error); 16822d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 1683ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 16842d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, 16852d1661a5SPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 16862d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 16872d1661a5SPawel Jakub Dawidek return; 16882d1661a5SPawel Jakub Dawidek } 16892d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1690d2fb9c62SPawel Jakub Dawidek sync = &disk->d_sync; 16913650be51SPawel Jakub Dawidek if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 16923650be51SPawel Jakub Dawidek sync->ds_consumer == NULL || 16933650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 16943650be51SPawel Jakub Dawidek /* Don't send more synchronization requests. */ 16953650be51SPawel Jakub Dawidek sync->ds_inflight--; 16963650be51SPawel Jakub Dawidek if (sync->ds_bios != NULL) { 1697ef25813dSRuslan Ermilov i = (int)(uintptr_t)bp->bio_caller1; 16983650be51SPawel Jakub Dawidek sync->ds_bios[i] = NULL; 16993650be51SPawel Jakub Dawidek } 17003650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 17012d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 17023650be51SPawel Jakub Dawidek if (sync->ds_inflight > 0) 1703d2fb9c62SPawel Jakub Dawidek return; 17043650be51SPawel Jakub Dawidek if (sync->ds_consumer == NULL || 17053650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17063650be51SPawel Jakub Dawidek return; 17073650be51SPawel Jakub Dawidek } 17082d1661a5SPawel Jakub Dawidek /* 17092d1661a5SPawel Jakub Dawidek * Disk up-to-date, activate it. 17102d1661a5SPawel Jakub Dawidek */ 17112d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 17122d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 17132d1661a5SPawel Jakub Dawidek return; 17143650be51SPawel Jakub Dawidek } 17153650be51SPawel Jakub Dawidek 17163650be51SPawel Jakub Dawidek /* Send next synchronization request. */ 17173650be51SPawel Jakub Dawidek data = bp->bio_data; 1718c55f5707SWarner Losh g_reset_bio(bp); 17193650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 17203650be51SPawel Jakub Dawidek bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 1721cd853791SKonstantin Belousov bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 17223650be51SPawel Jakub Dawidek sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 17233650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 17243650be51SPawel Jakub Dawidek bp->bio_data = data; 17253650be51SPawel Jakub Dawidek bp->bio_from = sync->ds_consumer; 17263650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 17273650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 17283650be51SPawel Jakub Dawidek sync->ds_consumer->index++; 17292d1661a5SPawel Jakub Dawidek /* 17303650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 17312d1661a5SPawel Jakub Dawidek */ 17323650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 17333650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 17343650be51SPawel Jakub Dawidek else 17353650be51SPawel Jakub Dawidek g_io_request(bp, sync->ds_consumer); 17363650be51SPawel Jakub Dawidek 17373650be51SPawel Jakub Dawidek /* Release delayed requests if possible. */ 17383650be51SPawel Jakub Dawidek g_raid3_regular_release(sc); 17393650be51SPawel Jakub Dawidek 17403650be51SPawel Jakub Dawidek /* Find the smallest offset. */ 17413650be51SPawel Jakub Dawidek moffset = sc->sc_mediasize; 17423650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 17433650be51SPawel Jakub Dawidek bp = sync->ds_bios[i]; 17443650be51SPawel Jakub Dawidek boffset = bp->bio_offset; 17453650be51SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) 17463650be51SPawel Jakub Dawidek boffset *= sc->sc_ndisks - 1; 17473650be51SPawel Jakub Dawidek if (boffset < moffset) 17483650be51SPawel Jakub Dawidek moffset = boffset; 17493650be51SPawel Jakub Dawidek } 1750cd853791SKonstantin Belousov if (sync->ds_offset_done + maxphys * 100 < moffset) { 17513650be51SPawel Jakub Dawidek /* Update offset_done on every 100 blocks. */ 17523650be51SPawel Jakub Dawidek sync->ds_offset_done = moffset; 17532d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 17542d1661a5SPawel Jakub Dawidek } 17552d1661a5SPawel Jakub Dawidek return; 1756d2fb9c62SPawel Jakub Dawidek } 17572d1661a5SPawel Jakub Dawidek default: 17582d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 17592d1661a5SPawel Jakub Dawidek bp->bio_cmd, sc->sc_name)); 17602d1661a5SPawel Jakub Dawidek break; 17612d1661a5SPawel Jakub Dawidek } 17622d1661a5SPawel Jakub Dawidek } 17632d1661a5SPawel Jakub Dawidek 17642d1661a5SPawel Jakub Dawidek static int 17652d1661a5SPawel Jakub Dawidek g_raid3_register_request(struct bio *pbp) 17662d1661a5SPawel Jakub Dawidek { 17672d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 17682d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 17692d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 1770ee40c7aaSPawel Jakub Dawidek struct bio *cbp, *tmpbp; 17712d1661a5SPawel Jakub Dawidek off_t offset, length; 1772fa6a7837SDavid E. O'Brien u_int n, ndisks; 1773dba915cfSPawel Jakub Dawidek int round_robin, verify; 17742d1661a5SPawel Jakub Dawidek 1775fa6a7837SDavid E. O'Brien ndisks = 0; 17762d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 17772d1661a5SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 17782d1661a5SPawel Jakub Dawidek sc->sc_syncdisk == NULL) { 17792d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, EIO); 17802d1661a5SPawel Jakub Dawidek return (0); 17812d1661a5SPawel Jakub Dawidek } 17822d1661a5SPawel Jakub Dawidek g_raid3_init_bio(pbp); 17832d1661a5SPawel Jakub Dawidek length = pbp->bio_length / (sc->sc_ndisks - 1); 17842d1661a5SPawel Jakub Dawidek offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1785dba915cfSPawel Jakub Dawidek round_robin = verify = 0; 17862d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 17872d1661a5SPawel Jakub Dawidek case BIO_READ: 1788dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1789dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1790dba915cfSPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1791dba915cfSPawel Jakub Dawidek verify = 1; 1792dba915cfSPawel Jakub Dawidek ndisks = sc->sc_ndisks; 1793dba915cfSPawel Jakub Dawidek } else { 1794dba915cfSPawel Jakub Dawidek verify = 0; 17952d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks - 1; 1796dba915cfSPawel Jakub Dawidek } 1797dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1798dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1799dba915cfSPawel Jakub Dawidek round_robin = 1; 1800dba915cfSPawel Jakub Dawidek } else { 1801dba915cfSPawel Jakub Dawidek round_robin = 0; 1802dba915cfSPawel Jakub Dawidek } 1803dba915cfSPawel Jakub Dawidek KASSERT(!round_robin || !verify, 1804dba915cfSPawel Jakub Dawidek ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1805f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 18062d1661a5SPawel Jakub Dawidek break; 18072d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18082d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18093650be51SPawel Jakub Dawidek /* 18103650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a synchronization 18113650be51SPawel Jakub Dawidek * request. 18123650be51SPawel Jakub Dawidek */ 18133650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, pbp)) { 18143650be51SPawel Jakub Dawidek g_raid3_regular_delay(sc, pbp); 18153650be51SPawel Jakub Dawidek return (0); 18163650be51SPawel Jakub Dawidek } 1817d2fb9c62SPawel Jakub Dawidek 18184d006a98SPawel Jakub Dawidek if (sc->sc_idle) 18194d006a98SPawel Jakub Dawidek g_raid3_unidle(sc); 18200962f942SPawel Jakub Dawidek else 182101f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 18224d006a98SPawel Jakub Dawidek 18232d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks; 18242d1661a5SPawel Jakub Dawidek break; 18252d1661a5SPawel Jakub Dawidek } 18262d1661a5SPawel Jakub Dawidek for (n = 0; n < ndisks; n++) { 18272d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 18282d1661a5SPawel Jakub Dawidek cbp = g_raid3_clone_bio(sc, pbp); 18292d1661a5SPawel Jakub Dawidek if (cbp == NULL) { 18302d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 18312d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1832a65a0da2SPawel Jakub Dawidek /* 1833a65a0da2SPawel Jakub Dawidek * To prevent deadlock, we must run back up 1834a65a0da2SPawel Jakub Dawidek * with the ENOMEM for failed requests of any 1835a65a0da2SPawel Jakub Dawidek * of our consumers. Our own sync requests 1836a65a0da2SPawel Jakub Dawidek * can stick around, as they are finite. 1837a65a0da2SPawel Jakub Dawidek */ 1838a65a0da2SPawel Jakub Dawidek if ((pbp->bio_cflags & 1839a65a0da2SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1840a65a0da2SPawel Jakub Dawidek g_io_deliver(pbp, ENOMEM); 1841a65a0da2SPawel Jakub Dawidek return (0); 1842a65a0da2SPawel Jakub Dawidek } 18432d1661a5SPawel Jakub Dawidek return (ENOMEM); 18442d1661a5SPawel Jakub Dawidek } 18452d1661a5SPawel Jakub Dawidek cbp->bio_offset = offset; 18462d1661a5SPawel Jakub Dawidek cbp->bio_length = length; 18472d1661a5SPawel Jakub Dawidek cbp->bio_done = g_raid3_done; 18482d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 18492d1661a5SPawel Jakub Dawidek case BIO_READ: 18502d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 18512d1661a5SPawel Jakub Dawidek /* 18522d1661a5SPawel Jakub Dawidek * Replace invalid component with the parity 18532d1661a5SPawel Jakub Dawidek * component. 18542d1661a5SPawel Jakub Dawidek */ 18552d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 18562d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18572d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1858f5a2f7feSPawel Jakub Dawidek } else if (round_robin && 1859f5a2f7feSPawel Jakub Dawidek disk->d_no == sc->sc_round_robin) { 1860f5a2f7feSPawel Jakub Dawidek /* 1861f5a2f7feSPawel Jakub Dawidek * In round-robin mode skip one data component 1862f5a2f7feSPawel Jakub Dawidek * and use parity component when reading. 1863f5a2f7feSPawel Jakub Dawidek */ 1864f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = disk; 1865f5a2f7feSPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1866f5a2f7feSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1867f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin++; 1868f5a2f7feSPawel Jakub Dawidek round_robin = 0; 1869dba915cfSPawel Jakub Dawidek } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1870dba915cfSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18712d1661a5SPawel Jakub Dawidek } 18722d1661a5SPawel Jakub Dawidek break; 18732d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18742d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18752d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 18762d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 18772d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18782d1661a5SPawel Jakub Dawidek /* 18792d1661a5SPawel Jakub Dawidek * Active parity component, mark it as such. 18802d1661a5SPawel Jakub Dawidek */ 18812d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 18822d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_PARITY; 18832d1661a5SPawel Jakub Dawidek } 18842d1661a5SPawel Jakub Dawidek } else { 18852d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 18862d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18872d1661a5SPawel Jakub Dawidek /* 18882d1661a5SPawel Jakub Dawidek * Parity component is not connected, 18892d1661a5SPawel Jakub Dawidek * so destroy its request. 18902d1661a5SPawel Jakub Dawidek */ 18912d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= 18922d1661a5SPawel Jakub Dawidek G_RAID3_BIO_PFLAG_NOPARITY; 18932d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 18942d1661a5SPawel Jakub Dawidek cbp = NULL; 18952d1661a5SPawel Jakub Dawidek } else { 18962d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 18972d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_NODISK; 18982d1661a5SPawel Jakub Dawidek disk = NULL; 18992d1661a5SPawel Jakub Dawidek } 19002d1661a5SPawel Jakub Dawidek } 19012d1661a5SPawel Jakub Dawidek break; 19022d1661a5SPawel Jakub Dawidek } 19032d1661a5SPawel Jakub Dawidek if (cbp != NULL) 19042d1661a5SPawel Jakub Dawidek cbp->bio_caller2 = disk; 19052d1661a5SPawel Jakub Dawidek } 19062d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 19072d1661a5SPawel Jakub Dawidek case BIO_READ: 1908f5a2f7feSPawel Jakub Dawidek if (round_robin) { 1909f5a2f7feSPawel Jakub Dawidek /* 1910f5a2f7feSPawel Jakub Dawidek * If we are in round-robin mode and 'round_robin' is 1911f5a2f7feSPawel Jakub Dawidek * still 1, it means, that we skipped parity component 1912f5a2f7feSPawel Jakub Dawidek * for this read and must reset sc_round_robin field. 1913f5a2f7feSPawel Jakub Dawidek */ 1914f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 1915f5a2f7feSPawel Jakub Dawidek } 1916ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 19172d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 19182d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 19192d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 19202d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 19213650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 19222d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", 19232d1661a5SPawel Jakub Dawidek cp->provider->name, cp->acr, cp->acw, cp->ace)); 192479e61493SPawel Jakub Dawidek cp->index++; 19252d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 19262d1661a5SPawel Jakub Dawidek } 19272d1661a5SPawel Jakub Dawidek break; 19282d1661a5SPawel Jakub Dawidek case BIO_WRITE: 19292d1661a5SPawel Jakub Dawidek case BIO_DELETE: 19302d1661a5SPawel Jakub Dawidek /* 19313650be51SPawel Jakub Dawidek * Put request onto inflight queue, so we can check if new 19323650be51SPawel Jakub Dawidek * synchronization requests don't collide with it. 19333650be51SPawel Jakub Dawidek */ 19343650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_inflight, pbp); 19353650be51SPawel Jakub Dawidek 19363650be51SPawel Jakub Dawidek /* 19372d1661a5SPawel Jakub Dawidek * Bump syncid on first write. 19382d1661a5SPawel Jakub Dawidek */ 1939ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1940a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1941d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(sc); 19422d1661a5SPawel Jakub Dawidek } 19432d1661a5SPawel Jakub Dawidek g_raid3_scatter(pbp); 19442d1661a5SPawel Jakub Dawidek break; 19452d1661a5SPawel Jakub Dawidek } 19462d1661a5SPawel Jakub Dawidek return (0); 19472d1661a5SPawel Jakub Dawidek } 19482d1661a5SPawel Jakub Dawidek 19492d1661a5SPawel Jakub Dawidek static int 19502d1661a5SPawel Jakub Dawidek g_raid3_can_destroy(struct g_raid3_softc *sc) 19512d1661a5SPawel Jakub Dawidek { 19522d1661a5SPawel Jakub Dawidek struct g_geom *gp; 19532d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 19542d1661a5SPawel Jakub Dawidek 19552d1661a5SPawel Jakub Dawidek g_topology_assert(); 19562d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 195718486a5eSPawel Jakub Dawidek if (gp->softc == NULL) 195818486a5eSPawel Jakub Dawidek return (1); 19592d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19602d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19612d1661a5SPawel Jakub Dawidek return (0); 19622d1661a5SPawel Jakub Dawidek } 19632d1661a5SPawel Jakub Dawidek gp = sc->sc_sync.ds_geom; 19642d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19652d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19662d1661a5SPawel Jakub Dawidek return (0); 19672d1661a5SPawel Jakub Dawidek } 19682d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 19692d1661a5SPawel Jakub Dawidek sc->sc_name); 19702d1661a5SPawel Jakub Dawidek return (1); 19712d1661a5SPawel Jakub Dawidek } 19722d1661a5SPawel Jakub Dawidek 19732d1661a5SPawel Jakub Dawidek static int 19742d1661a5SPawel Jakub Dawidek g_raid3_try_destroy(struct g_raid3_softc *sc) 19752d1661a5SPawel Jakub Dawidek { 19762d1661a5SPawel Jakub Dawidek 19773650be51SPawel Jakub Dawidek g_topology_assert_not(); 19783650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 19793650be51SPawel Jakub Dawidek 19804ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 19814ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 19824ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 19834ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 19844ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 19854ed854e8SPawel Jakub Dawidek } 19864ed854e8SPawel Jakub Dawidek 19872d1661a5SPawel Jakub Dawidek g_topology_lock(); 19882d1661a5SPawel Jakub Dawidek if (!g_raid3_can_destroy(sc)) { 19892d1661a5SPawel Jakub Dawidek g_topology_unlock(); 19902d1661a5SPawel Jakub Dawidek return (0); 19912d1661a5SPawel Jakub Dawidek } 199218486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 199318486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 1994a245a548SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 19952d1661a5SPawel Jakub Dawidek g_topology_unlock(); 19962d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 19972d1661a5SPawel Jakub Dawidek &sc->sc_worker); 19983650be51SPawel Jakub Dawidek /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 19993650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 20002d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_worker); 20012d1661a5SPawel Jakub Dawidek sc->sc_worker = NULL; 20022d1661a5SPawel Jakub Dawidek } else { 20032d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20043650be51SPawel Jakub Dawidek g_raid3_destroy_device(sc); 20052d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 20062d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 20072d1661a5SPawel Jakub Dawidek } 20082d1661a5SPawel Jakub Dawidek return (1); 20092d1661a5SPawel Jakub Dawidek } 20102d1661a5SPawel Jakub Dawidek 20112d1661a5SPawel Jakub Dawidek /* 20122d1661a5SPawel Jakub Dawidek * Worker thread. 20132d1661a5SPawel Jakub Dawidek */ 20142d1661a5SPawel Jakub Dawidek static void 20152d1661a5SPawel Jakub Dawidek g_raid3_worker(void *arg) 20162d1661a5SPawel Jakub Dawidek { 20172d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 20182d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 20192d1661a5SPawel Jakub Dawidek struct bio *bp; 20200962f942SPawel Jakub Dawidek int timeout; 20212d1661a5SPawel Jakub Dawidek 20222d1661a5SPawel Jakub Dawidek sc = arg; 2023982d11f8SJeff Roberson thread_lock(curthread); 202463710c4dSJohn Baldwin sched_prio(curthread, PRIBIO); 2025982d11f8SJeff Roberson thread_unlock(curthread); 20262d1661a5SPawel Jakub Dawidek 20273650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 20282d1661a5SPawel Jakub Dawidek for (;;) { 20292d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 20302d1661a5SPawel Jakub Dawidek /* 20312d1661a5SPawel Jakub Dawidek * First take a look at events. 20322d1661a5SPawel Jakub Dawidek * This is important to handle events before any I/O requests. 20332d1661a5SPawel Jakub Dawidek */ 20342d1661a5SPawel Jakub Dawidek ep = g_raid3_event_get(sc); 20353650be51SPawel Jakub Dawidek if (ep != NULL) { 2036d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 20372d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 20382d1661a5SPawel Jakub Dawidek /* Update only device status. */ 20392d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, 20402d1661a5SPawel Jakub Dawidek "Running event for device %s.", 20412d1661a5SPawel Jakub Dawidek sc->sc_name); 20422d1661a5SPawel Jakub Dawidek ep->e_error = 0; 2043d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 1); 20442d1661a5SPawel Jakub Dawidek } else { 20452d1661a5SPawel Jakub Dawidek /* Update disk status. */ 20462d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Running event for disk %s.", 20472d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(ep->e_disk)); 20482d1661a5SPawel Jakub Dawidek ep->e_error = g_raid3_update_disk(ep->e_disk, 2049d97d5ee9SPawel Jakub Dawidek ep->e_state); 20502d1661a5SPawel Jakub Dawidek if (ep->e_error == 0) 2051d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 0); 20522d1661a5SPawel Jakub Dawidek } 20532d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 20542d1661a5SPawel Jakub Dawidek KASSERT(ep->e_error == 0, 20552d1661a5SPawel Jakub Dawidek ("Error cannot be handled.")); 20562d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 20572d1661a5SPawel Jakub Dawidek } else { 20582d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 20592d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20602d1661a5SPawel Jakub Dawidek ep); 20612d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 20622d1661a5SPawel Jakub Dawidek wakeup(ep); 20632d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 20642d1661a5SPawel Jakub Dawidek } 20652d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20662d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20673650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20683650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 20693650be51SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 20703745c395SJulian Elischer kproc_exit(0); 20712d1661a5SPawel Jakub Dawidek } 20723650be51SPawel Jakub Dawidek } 20732d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 20742d1661a5SPawel Jakub Dawidek continue; 20752d1661a5SPawel Jakub Dawidek } 20762d1661a5SPawel Jakub Dawidek /* 20770962f942SPawel Jakub Dawidek * Check if we can mark array as CLEAN and if we can't take 20780962f942SPawel Jakub Dawidek * how much seconds should we wait. 20790962f942SPawel Jakub Dawidek */ 20803650be51SPawel Jakub Dawidek timeout = g_raid3_idle(sc, -1); 20810962f942SPawel Jakub Dawidek /* 20822d1661a5SPawel Jakub Dawidek * Now I/O requests. 20832d1661a5SPawel Jakub Dawidek */ 20842d1661a5SPawel Jakub Dawidek /* Get first request from the queue. */ 20852d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 20862d1661a5SPawel Jakub Dawidek bp = bioq_first(&sc->sc_queue); 20872d1661a5SPawel Jakub Dawidek if (bp == NULL) { 20882d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20892d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20902d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 20913650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20923650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 2093d7fad9f6SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 20943745c395SJulian Elischer kproc_exit(0); 20953650be51SPawel Jakub Dawidek } 20962d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 20972d1661a5SPawel Jakub Dawidek } 20983650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 2099a2fe5c66SPawel Jakub Dawidek /* 2100a2fe5c66SPawel Jakub Dawidek * XXX: We can miss an event here, because an event 2101a2fe5c66SPawel Jakub Dawidek * can be added without sx-device-lock and without 2102a2fe5c66SPawel Jakub Dawidek * mtx-queue-lock. Maybe I should just stop using 2103a2fe5c66SPawel Jakub Dawidek * dedicated mutex for events synchronization and 2104a2fe5c66SPawel Jakub Dawidek * stick with the queue lock? 2105a2fe5c66SPawel Jakub Dawidek * The event will hang here until next I/O request 2106a2fe5c66SPawel Jakub Dawidek * or next event is received. 2107a2fe5c66SPawel Jakub Dawidek */ 21080962f942SPawel Jakub Dawidek MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 21090962f942SPawel Jakub Dawidek timeout * hz); 21103650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21119bb09163SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 21122d1661a5SPawel Jakub Dawidek continue; 21132d1661a5SPawel Jakub Dawidek } 211484edb86dSPawel Jakub Dawidek process: 21152d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 21162d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 21172d1661a5SPawel Jakub Dawidek 21188e007c52SPawel Jakub Dawidek if (bp->bio_from->geom == sc->sc_sync.ds_geom && 21198e007c52SPawel Jakub Dawidek (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 21208e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* READ */ 21218e007c52SPawel Jakub Dawidek } else if (bp->bio_to != sc->sc_provider) { 21223650be51SPawel Jakub Dawidek if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 21232d1661a5SPawel Jakub Dawidek g_raid3_regular_request(bp); 21243650be51SPawel Jakub Dawidek else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 21258e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* WRITE */ 2126de6f1c7cSPawel Jakub Dawidek else { 2127de6f1c7cSPawel Jakub Dawidek KASSERT(0, 21289a8fa125SWarner Losh ("Invalid request cflags=0x%hx to=%s.", 2129de6f1c7cSPawel Jakub Dawidek bp->bio_cflags, bp->bio_to->name)); 2130de6f1c7cSPawel Jakub Dawidek } 2131de6f1c7cSPawel Jakub Dawidek } else if (g_raid3_register_request(bp) != 0) { 21322d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21333650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 213484edb86dSPawel Jakub Dawidek /* 213584edb86dSPawel Jakub Dawidek * We are short in memory, let see if there are finished 213684edb86dSPawel Jakub Dawidek * request we can free. 213784edb86dSPawel Jakub Dawidek */ 213884edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 213984edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 214084edb86dSPawel Jakub Dawidek goto process; 21412d1661a5SPawel Jakub Dawidek } 214284edb86dSPawel Jakub Dawidek /* 214384edb86dSPawel Jakub Dawidek * No finished regular request, so at least keep 214484edb86dSPawel Jakub Dawidek * synchronization running. 214584edb86dSPawel Jakub Dawidek */ 214684edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 214784edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 214884edb86dSPawel Jakub Dawidek goto process; 214984edb86dSPawel Jakub Dawidek } 215084edb86dSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 215184edb86dSPawel Jakub Dawidek MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 215284edb86dSPawel Jakub Dawidek "r3:lowmem", hz / 10); 215384edb86dSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21542d1661a5SPawel Jakub Dawidek } 2155d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 21562d1661a5SPawel Jakub Dawidek } 21572d1661a5SPawel Jakub Dawidek } 21582d1661a5SPawel Jakub Dawidek 21592d1661a5SPawel Jakub Dawidek static void 21600962f942SPawel Jakub Dawidek g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 21612d1661a5SPawel Jakub Dawidek { 21622d1661a5SPawel Jakub Dawidek 21633650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 2164501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2165501250baSPawel Jakub Dawidek return; 21660962f942SPawel Jakub Dawidek if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 21672d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 21683650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21692d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 21700962f942SPawel Jakub Dawidek } else if (sc->sc_idle && 21710962f942SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 21722d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 21733650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21742d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 21752d1661a5SPawel Jakub Dawidek } 21762d1661a5SPawel Jakub Dawidek } 21772d1661a5SPawel Jakub Dawidek 21782d1661a5SPawel Jakub Dawidek static void 21792d1661a5SPawel Jakub Dawidek g_raid3_sync_start(struct g_raid3_softc *sc) 21802d1661a5SPawel Jakub Dawidek { 21812d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 21823650be51SPawel Jakub Dawidek struct g_consumer *cp; 21833650be51SPawel Jakub Dawidek struct bio *bp; 2184*2cc5a480SMateusz Guzik int error __diagused; 21852d1661a5SPawel Jakub Dawidek u_int n; 21862d1661a5SPawel Jakub Dawidek 21873650be51SPawel Jakub Dawidek g_topology_assert_not(); 21883650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 21892d1661a5SPawel Jakub Dawidek 21902d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 21912d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 21922d1661a5SPawel Jakub Dawidek sc->sc_state)); 21932d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 21942d1661a5SPawel Jakub Dawidek sc->sc_name, sc->sc_state)); 21952d1661a5SPawel Jakub Dawidek disk = NULL; 21962d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 21972d1661a5SPawel Jakub Dawidek if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 21982d1661a5SPawel Jakub Dawidek continue; 21992d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 22002d1661a5SPawel Jakub Dawidek break; 22012d1661a5SPawel Jakub Dawidek } 22022d1661a5SPawel Jakub Dawidek if (disk == NULL) 22032d1661a5SPawel Jakub Dawidek return; 22042d1661a5SPawel Jakub Dawidek 22053650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 22063650be51SPawel Jakub Dawidek g_topology_lock(); 22073650be51SPawel Jakub Dawidek cp = g_new_consumer(sc->sc_sync.ds_geom); 22083650be51SPawel Jakub Dawidek error = g_attach(cp, sc->sc_provider); 22093650be51SPawel Jakub Dawidek KASSERT(error == 0, 22103650be51SPawel Jakub Dawidek ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 22113650be51SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 22123650be51SPawel Jakub Dawidek KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 22133650be51SPawel Jakub Dawidek g_topology_unlock(); 22143650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 22153650be51SPawel Jakub Dawidek 22162d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 22172d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 2218501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 22192d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 22202d1661a5SPawel Jakub Dawidek KASSERT(disk->d_sync.ds_consumer == NULL, 22212d1661a5SPawel Jakub Dawidek ("Sync consumer already exists (device=%s, disk=%s).", 22222d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk))); 22233650be51SPawel Jakub Dawidek 22243650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer = cp; 22252d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer->private = disk; 222679e61493SPawel Jakub Dawidek disk->d_sync.ds_consumer->index = 0; 22272d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = disk; 22283650be51SPawel Jakub Dawidek 22293650be51SPawel Jakub Dawidek /* 22303650be51SPawel Jakub Dawidek * Allocate memory for synchronization bios and initialize them. 22313650be51SPawel Jakub Dawidek */ 22323650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 22333650be51SPawel Jakub Dawidek M_RAID3, M_WAITOK); 22343650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22353650be51SPawel Jakub Dawidek bp = g_alloc_bio(); 22363650be51SPawel Jakub Dawidek disk->d_sync.ds_bios[n] = bp; 22373650be51SPawel Jakub Dawidek bp->bio_parent = NULL; 22383650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 2239cd853791SKonstantin Belousov bp->bio_data = malloc(maxphys, M_RAID3, M_WAITOK); 22403650be51SPawel Jakub Dawidek bp->bio_cflags = 0; 22413650be51SPawel Jakub Dawidek bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 2242cd853791SKonstantin Belousov bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 22433650be51SPawel Jakub Dawidek disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 22443650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 22453650be51SPawel Jakub Dawidek bp->bio_from = disk->d_sync.ds_consumer; 22463650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 2247ef25813dSRuslan Ermilov bp->bio_caller1 = (void *)(uintptr_t)n; 22483650be51SPawel Jakub Dawidek } 22493650be51SPawel Jakub Dawidek 22503650be51SPawel Jakub Dawidek /* Set the number of in-flight synchronization requests. */ 22513650be51SPawel Jakub Dawidek disk->d_sync.ds_inflight = g_raid3_syncreqs; 22523650be51SPawel Jakub Dawidek 22533650be51SPawel Jakub Dawidek /* 22543650be51SPawel Jakub Dawidek * Fire off first synchronization requests. 22553650be51SPawel Jakub Dawidek */ 22563650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22573650be51SPawel Jakub Dawidek bp = disk->d_sync.ds_bios[n]; 22583650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 22593650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer->index++; 22603650be51SPawel Jakub Dawidek /* 22613650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 22623650be51SPawel Jakub Dawidek */ 22633650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 22643650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 22653650be51SPawel Jakub Dawidek else 22663650be51SPawel Jakub Dawidek g_io_request(bp, disk->d_sync.ds_consumer); 22673650be51SPawel Jakub Dawidek } 22682d1661a5SPawel Jakub Dawidek } 22692d1661a5SPawel Jakub Dawidek 22702d1661a5SPawel Jakub Dawidek /* 22712d1661a5SPawel Jakub Dawidek * Stop synchronization process. 22722d1661a5SPawel Jakub Dawidek * type: 0 - synchronization finished 22732d1661a5SPawel Jakub Dawidek * 1 - synchronization stopped 22742d1661a5SPawel Jakub Dawidek */ 22752d1661a5SPawel Jakub Dawidek static void 22762d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 22772d1661a5SPawel Jakub Dawidek { 22782d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 22793650be51SPawel Jakub Dawidek struct g_consumer *cp; 22802d1661a5SPawel Jakub Dawidek 22813650be51SPawel Jakub Dawidek g_topology_assert_not(); 22823650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 22833650be51SPawel Jakub Dawidek 22842d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 22852d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 22862d1661a5SPawel Jakub Dawidek sc->sc_state)); 22872d1661a5SPawel Jakub Dawidek disk = sc->sc_syncdisk; 22882d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = NULL; 22892d1661a5SPawel Jakub Dawidek KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 22902d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 22912d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 22922d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 22932d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_consumer == NULL) 22942d1661a5SPawel Jakub Dawidek return; 22952d1661a5SPawel Jakub Dawidek 22962d1661a5SPawel Jakub Dawidek if (type == 0) { 22972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 22983650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 22992d1661a5SPawel Jakub Dawidek } else /* if (type == 1) */ { 23002d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 23013650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23022d1661a5SPawel Jakub Dawidek } 23033650be51SPawel Jakub Dawidek free(disk->d_sync.ds_bios, M_RAID3); 23043650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = NULL; 23053650be51SPawel Jakub Dawidek cp = disk->d_sync.ds_consumer; 23062d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 23072d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 23083650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 23093650be51SPawel Jakub Dawidek g_topology_lock(); 23103650be51SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 23113650be51SPawel Jakub Dawidek g_topology_unlock(); 23123650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 23132d1661a5SPawel Jakub Dawidek } 23142d1661a5SPawel Jakub Dawidek 23152d1661a5SPawel Jakub Dawidek static void 23162d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(struct g_raid3_softc *sc) 23172d1661a5SPawel Jakub Dawidek { 23182d1661a5SPawel Jakub Dawidek struct g_provider *pp; 2319113d8e50SAlexander Motin struct g_raid3_disk *disk; 2320113d8e50SAlexander Motin int n; 23212d1661a5SPawel Jakub Dawidek 23223650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 23232d1661a5SPawel Jakub Dawidek 23243650be51SPawel Jakub Dawidek g_topology_lock(); 23252d1661a5SPawel Jakub Dawidek pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 23262d1661a5SPawel Jakub Dawidek pp->mediasize = sc->sc_mediasize; 23272d1661a5SPawel Jakub Dawidek pp->sectorsize = sc->sc_sectorsize; 2328113d8e50SAlexander Motin pp->stripesize = 0; 2329113d8e50SAlexander Motin pp->stripeoffset = 0; 2330113d8e50SAlexander Motin for (n = 0; n < sc->sc_ndisks; n++) { 2331113d8e50SAlexander Motin disk = &sc->sc_disks[n]; 2332113d8e50SAlexander Motin if (disk->d_consumer && disk->d_consumer->provider && 2333113d8e50SAlexander Motin disk->d_consumer->provider->stripesize > pp->stripesize) { 2334113d8e50SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 2335113d8e50SAlexander Motin pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2336113d8e50SAlexander Motin } 2337113d8e50SAlexander Motin } 2338113d8e50SAlexander Motin pp->stripesize *= sc->sc_ndisks - 1; 2339113d8e50SAlexander Motin pp->stripeoffset *= sc->sc_ndisks - 1; 23402d1661a5SPawel Jakub Dawidek sc->sc_provider = pp; 23412d1661a5SPawel Jakub Dawidek g_error_provider(pp, 0); 23423650be51SPawel Jakub Dawidek g_topology_unlock(); 23430cca572eSJohn-Mark Gurney G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 23440cca572eSJohn-Mark Gurney g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 23450cca572eSJohn-Mark Gurney 23462d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 23472d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 23482d1661a5SPawel Jakub Dawidek } 23492d1661a5SPawel Jakub Dawidek 23502d1661a5SPawel Jakub Dawidek static void 23512d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(struct g_raid3_softc *sc) 23522d1661a5SPawel Jakub Dawidek { 23532d1661a5SPawel Jakub Dawidek struct bio *bp; 23542d1661a5SPawel Jakub Dawidek 23553650be51SPawel Jakub Dawidek g_topology_assert_not(); 23562d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 23572d1661a5SPawel Jakub Dawidek sc->sc_name)); 23582d1661a5SPawel Jakub Dawidek 23593650be51SPawel Jakub Dawidek g_topology_lock(); 23602d1661a5SPawel Jakub Dawidek g_error_provider(sc->sc_provider, ENXIO); 23612d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 23622d1661a5SPawel Jakub Dawidek while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 23632d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 23642d1661a5SPawel Jakub Dawidek g_io_deliver(bp, ENXIO); 23652d1661a5SPawel Jakub Dawidek } 23662d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 23672d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 23682d1661a5SPawel Jakub Dawidek sc->sc_provider->name); 23698b64f3caSAlexander Motin g_wither_provider(sc->sc_provider, ENXIO); 23703650be51SPawel Jakub Dawidek g_topology_unlock(); 23712d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 23722d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 23732d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 23742d1661a5SPawel Jakub Dawidek } 23752d1661a5SPawel Jakub Dawidek 23762d1661a5SPawel Jakub Dawidek static void 23772d1661a5SPawel Jakub Dawidek g_raid3_go(void *arg) 23782d1661a5SPawel Jakub Dawidek { 23792d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23802d1661a5SPawel Jakub Dawidek 23812d1661a5SPawel Jakub Dawidek sc = arg; 23822d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 23832d1661a5SPawel Jakub Dawidek g_raid3_event_send(sc, 0, 23842d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 23852d1661a5SPawel Jakub Dawidek } 23862d1661a5SPawel Jakub Dawidek 23872d1661a5SPawel Jakub Dawidek static u_int 23882d1661a5SPawel Jakub Dawidek g_raid3_determine_state(struct g_raid3_disk *disk) 23892d1661a5SPawel Jakub Dawidek { 23902d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23912d1661a5SPawel Jakub Dawidek u_int state; 23922d1661a5SPawel Jakub Dawidek 23932d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 23942d1661a5SPawel Jakub Dawidek if (sc->sc_syncid == disk->d_sync.ds_syncid) { 23952d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 23962d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 23972d1661a5SPawel Jakub Dawidek /* Disk does not need synchronization. */ 23982d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_ACTIVE; 23992d1661a5SPawel Jakub Dawidek } else { 24002d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 24012d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24022d1661a5SPawel Jakub Dawidek (disk->d_flags & 24032d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24042d1661a5SPawel Jakub Dawidek /* 24052d1661a5SPawel Jakub Dawidek * We can start synchronization from 24062d1661a5SPawel Jakub Dawidek * the stored offset. 24072d1661a5SPawel Jakub Dawidek */ 24082d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24092d1661a5SPawel Jakub Dawidek } else { 24102d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24112d1661a5SPawel Jakub Dawidek } 24122d1661a5SPawel Jakub Dawidek } 24132d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 24142d1661a5SPawel Jakub Dawidek /* 24152d1661a5SPawel Jakub Dawidek * Reset all synchronization data for this disk, 24162d1661a5SPawel Jakub Dawidek * because if it even was synchronized, it was 24172d1661a5SPawel Jakub Dawidek * synchronized to disks with different syncid. 24182d1661a5SPawel Jakub Dawidek */ 24192d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 24202d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 24212d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 24222d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 24232d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24242d1661a5SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24252d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24262d1661a5SPawel Jakub Dawidek } else { 24272d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24282d1661a5SPawel Jakub Dawidek } 24292d1661a5SPawel Jakub Dawidek } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 24302d1661a5SPawel Jakub Dawidek /* 24312d1661a5SPawel Jakub Dawidek * Not good, NOT GOOD! 24322d1661a5SPawel Jakub Dawidek * It means that device was started on stale disks 24332d1661a5SPawel Jakub Dawidek * and more fresh disk just arrive. 24343c57a41dSPawel Jakub Dawidek * If there were writes, device is broken, sorry. 24352d1661a5SPawel Jakub Dawidek * I think the best choice here is don't touch 2436776fc0e9SYaroslav Tykhiy * this disk and inform the user loudly. 24372d1661a5SPawel Jakub Dawidek */ 24382d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s was started before the freshest " 24392d1661a5SPawel Jakub Dawidek "disk (%s) arrives!! It will not be connected to the " 24402d1661a5SPawel Jakub Dawidek "running device.", sc->sc_name, 24412d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 24422d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 24432d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_NONE; 24442d1661a5SPawel Jakub Dawidek /* Return immediately, because disk was destroyed. */ 24452d1661a5SPawel Jakub Dawidek return (state); 24462d1661a5SPawel Jakub Dawidek } 24472d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "State for %s disk: %s.", 24482d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 24492d1661a5SPawel Jakub Dawidek return (state); 24502d1661a5SPawel Jakub Dawidek } 24512d1661a5SPawel Jakub Dawidek 24522d1661a5SPawel Jakub Dawidek /* 24532d1661a5SPawel Jakub Dawidek * Update device state. 24542d1661a5SPawel Jakub Dawidek */ 24552d1661a5SPawel Jakub Dawidek static void 2456d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 24572d1661a5SPawel Jakub Dawidek { 24582d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 24592d1661a5SPawel Jakub Dawidek u_int state; 24602d1661a5SPawel Jakub Dawidek 24613650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 24622d1661a5SPawel Jakub Dawidek 24632d1661a5SPawel Jakub Dawidek switch (sc->sc_state) { 24642d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 24652d1661a5SPawel Jakub Dawidek { 2466a245a548SPawel Jakub Dawidek u_int n, ndirty, ndisks, genid, syncid; 24672d1661a5SPawel Jakub Dawidek 24682d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider == NULL, 24692d1661a5SPawel Jakub Dawidek ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 24702d1661a5SPawel Jakub Dawidek /* 24712d1661a5SPawel Jakub Dawidek * Are we ready? We are, if all disks are connected or 24722d1661a5SPawel Jakub Dawidek * one disk is missing and 'force' is true. 24732d1661a5SPawel Jakub Dawidek */ 24742d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 24752d1661a5SPawel Jakub Dawidek if (!force) 24762d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 24772d1661a5SPawel Jakub Dawidek } else { 24782d1661a5SPawel Jakub Dawidek if (force) { 24792d1661a5SPawel Jakub Dawidek /* 24802d1661a5SPawel Jakub Dawidek * Timeout expired, so destroy device. 24812d1661a5SPawel Jakub Dawidek */ 24822d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 24834ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 24844ed854e8SPawel Jakub Dawidek __LINE__, sc->sc_rootmount); 24854ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 24864ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 24872d1661a5SPawel Jakub Dawidek } 24882d1661a5SPawel Jakub Dawidek return; 24892d1661a5SPawel Jakub Dawidek } 24902d1661a5SPawel Jakub Dawidek 24912d1661a5SPawel Jakub Dawidek /* 2492a245a548SPawel Jakub Dawidek * Find the biggest genid. 2493a245a548SPawel Jakub Dawidek */ 2494a245a548SPawel Jakub Dawidek genid = 0; 2495a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2496a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2497a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2498a245a548SPawel Jakub Dawidek continue; 2499a245a548SPawel Jakub Dawidek if (disk->d_genid > genid) 2500a245a548SPawel Jakub Dawidek genid = disk->d_genid; 2501a245a548SPawel Jakub Dawidek } 2502a245a548SPawel Jakub Dawidek sc->sc_genid = genid; 2503a245a548SPawel Jakub Dawidek /* 2504a245a548SPawel Jakub Dawidek * Remove all disks without the biggest genid. 2505a245a548SPawel Jakub Dawidek */ 2506a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2507a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2508a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2509a245a548SPawel Jakub Dawidek continue; 2510a245a548SPawel Jakub Dawidek if (disk->d_genid < genid) { 2511a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2512a245a548SPawel Jakub Dawidek "Component %s (device %s) broken, skipping.", 2513a245a548SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 2514a245a548SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 2515a245a548SPawel Jakub Dawidek } 2516a245a548SPawel Jakub Dawidek } 2517a245a548SPawel Jakub Dawidek 2518a245a548SPawel Jakub Dawidek /* 25192d1661a5SPawel Jakub Dawidek * There must be at least 'sc->sc_ndisks - 1' components 25202d1661a5SPawel Jakub Dawidek * with the same syncid and without SYNCHRONIZING flag. 25212d1661a5SPawel Jakub Dawidek */ 25222d1661a5SPawel Jakub Dawidek 25232d1661a5SPawel Jakub Dawidek /* 25242d1661a5SPawel Jakub Dawidek * Find the biggest syncid, number of valid components and 25252d1661a5SPawel Jakub Dawidek * number of dirty components. 25262d1661a5SPawel Jakub Dawidek */ 25272d1661a5SPawel Jakub Dawidek ndirty = ndisks = syncid = 0; 25282d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25292d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25302d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25312d1661a5SPawel Jakub Dawidek continue; 25322d1661a5SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 25332d1661a5SPawel Jakub Dawidek ndirty++; 25342d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_syncid > syncid) { 25352d1661a5SPawel Jakub Dawidek syncid = disk->d_sync.ds_syncid; 25362d1661a5SPawel Jakub Dawidek ndisks = 0; 25372d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < syncid) { 25382d1661a5SPawel Jakub Dawidek continue; 25392d1661a5SPawel Jakub Dawidek } 25402d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25412d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 25422d1661a5SPawel Jakub Dawidek continue; 25432d1661a5SPawel Jakub Dawidek } 25442d1661a5SPawel Jakub Dawidek ndisks++; 25452d1661a5SPawel Jakub Dawidek } 25462d1661a5SPawel Jakub Dawidek /* 25472d1661a5SPawel Jakub Dawidek * Do we have enough valid components? 25482d1661a5SPawel Jakub Dawidek */ 25492d1661a5SPawel Jakub Dawidek if (ndisks + 1 < sc->sc_ndisks) { 25502d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 25512d1661a5SPawel Jakub Dawidek "Device %s is broken, too few valid components.", 25522d1661a5SPawel Jakub Dawidek sc->sc_name); 25532d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 25542d1661a5SPawel Jakub Dawidek return; 25552d1661a5SPawel Jakub Dawidek } 25562d1661a5SPawel Jakub Dawidek /* 25572d1661a5SPawel Jakub Dawidek * If there is one DIRTY component and all disks are present, 25582d1661a5SPawel Jakub Dawidek * mark it for synchronization. If there is more than one DIRTY 25592d1661a5SPawel Jakub Dawidek * component, mark parity component for synchronization. 25602d1661a5SPawel Jakub Dawidek */ 25612d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks && ndirty == 1) { 25622d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25632d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25642d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25652d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_DIRTY) == 0) { 25662d1661a5SPawel Jakub Dawidek continue; 25672d1661a5SPawel Jakub Dawidek } 25682d1661a5SPawel Jakub Dawidek disk->d_flags |= 25692d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING; 25702d1661a5SPawel Jakub Dawidek } 25712d1661a5SPawel Jakub Dawidek } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 25722d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 25732d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 25742d1661a5SPawel Jakub Dawidek } 25752d1661a5SPawel Jakub Dawidek 25762d1661a5SPawel Jakub Dawidek sc->sc_syncid = syncid; 25772d1661a5SPawel Jakub Dawidek if (force) { 25782d1661a5SPawel Jakub Dawidek /* Remember to bump syncid on first write. */ 2579ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 25802d1661a5SPawel Jakub Dawidek } 25812d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks) 25822d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 25832d1661a5SPawel Jakub Dawidek else /* if (ndisks == sc->sc_ndisks - 1) */ 25842d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 25852d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 25862d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 25872d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 25882d1661a5SPawel Jakub Dawidek sc->sc_state = state; 25892d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25902d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25912d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25922d1661a5SPawel Jakub Dawidek continue; 25932d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 25942d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2595a245a548SPawel Jakub Dawidek if (state == G_RAID3_DISK_STATE_STALE) 2596ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 25972d1661a5SPawel Jakub Dawidek } 25982d1661a5SPawel Jakub Dawidek break; 25992d1661a5SPawel Jakub Dawidek } 26002d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 26012d1661a5SPawel Jakub Dawidek /* 2602ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26032d1661a5SPawel Jakub Dawidek */ 2604ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2605a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2606a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2607a245a548SPawel Jakub Dawidek } 2608a245a548SPawel Jakub Dawidek 26092d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26102d1661a5SPawel Jakub Dawidek return; 26112d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 26122d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26132d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 26142d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 26152d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 26162d1661a5SPawel Jakub Dawidek return; 26172d1661a5SPawel Jakub Dawidek } 26182d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26192d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 26202d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 26212d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26222d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26232d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26242d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26252d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26262d1661a5SPawel Jakub Dawidek } 26272d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26282d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26294ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26304ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26314ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26324ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26334ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26344ed854e8SPawel Jakub Dawidek } 26352d1661a5SPawel Jakub Dawidek break; 26362d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 26372d1661a5SPawel Jakub Dawidek /* 2638ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26392d1661a5SPawel Jakub Dawidek */ 2640ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2641a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2642a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2643a245a548SPawel Jakub Dawidek } 2644a245a548SPawel Jakub Dawidek 26452d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26462d1661a5SPawel Jakub Dawidek return; 26472d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 26482d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1, 26492d1661a5SPawel Jakub Dawidek ("Too few ACTIVE components in COMPLETE state (device %s).", 26502d1661a5SPawel Jakub Dawidek sc->sc_name)); 26512d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26522d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26532d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 26542d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26552d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26562d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26572d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26582d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26592d1661a5SPawel Jakub Dawidek } 26602d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26612d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26624ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26634ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26644ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26654ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26664ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26674ed854e8SPawel Jakub Dawidek } 26682d1661a5SPawel Jakub Dawidek break; 26692d1661a5SPawel Jakub Dawidek default: 26702d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 26712d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state))); 26722d1661a5SPawel Jakub Dawidek break; 26732d1661a5SPawel Jakub Dawidek } 26742d1661a5SPawel Jakub Dawidek } 26752d1661a5SPawel Jakub Dawidek 26762d1661a5SPawel Jakub Dawidek /* 26772d1661a5SPawel Jakub Dawidek * Update disk state and device state if needed. 26782d1661a5SPawel Jakub Dawidek */ 26792d1661a5SPawel Jakub Dawidek #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 26802d1661a5SPawel Jakub Dawidek "Disk %s state changed from %s to %s (device %s).", \ 26812d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), \ 26822d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state), \ 26832d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state), sc->sc_name) 26842d1661a5SPawel Jakub Dawidek static int 2685d97d5ee9SPawel Jakub Dawidek g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 26862d1661a5SPawel Jakub Dawidek { 26872d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 26882d1661a5SPawel Jakub Dawidek 26892d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 26903650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 26913650be51SPawel Jakub Dawidek 26922d1661a5SPawel Jakub Dawidek again: 26932d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 26942d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 26952d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state)); 26962d1661a5SPawel Jakub Dawidek switch (state) { 26972d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 26982d1661a5SPawel Jakub Dawidek /* 26992d1661a5SPawel Jakub Dawidek * Possible scenarios: 27002d1661a5SPawel Jakub Dawidek * 1. New disk arrive. 27012d1661a5SPawel Jakub Dawidek */ 27022d1661a5SPawel Jakub Dawidek /* Previous state should be NONE. */ 27032d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 27042d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27052d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27062d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27072d1661a5SPawel Jakub Dawidek 27082d1661a5SPawel Jakub Dawidek disk->d_state = state; 27090cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 27102d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27112d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 27122d1661a5SPawel Jakub Dawidek break; 27132d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27142d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27152d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27162d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27172d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27182d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27192d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 27202d1661a5SPawel Jakub Dawidek if (state != G_RAID3_DISK_STATE_NONE) 27212d1661a5SPawel Jakub Dawidek goto again; 27222d1661a5SPawel Jakub Dawidek break; 27232d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 27242d1661a5SPawel Jakub Dawidek /* 27252d1661a5SPawel Jakub Dawidek * Possible scenarios: 27262d1661a5SPawel Jakub Dawidek * 1. New disk does not need synchronization. 27272d1661a5SPawel Jakub Dawidek * 2. Synchronization process finished successfully. 27282d1661a5SPawel Jakub Dawidek */ 27292d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27302d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27312d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27322d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27332d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27342d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27352d1661a5SPawel Jakub Dawidek /* Previous state should be NEW or SYNCHRONIZING. */ 27362d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 27372d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 27382d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27392d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27402d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27412d1661a5SPawel Jakub Dawidek 2742bf31327cSPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 27432d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 27442d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 27452d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 0); 27462d1661a5SPawel Jakub Dawidek } 27472d1661a5SPawel Jakub Dawidek disk->d_state = state; 27482d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 27492d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 27500962f942SPawel Jakub Dawidek g_raid3_update_idle(sc, disk); 2751bf31327cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 27520cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 27532d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27542d1661a5SPawel Jakub Dawidek break; 27552d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 27562d1661a5SPawel Jakub Dawidek /* 27572d1661a5SPawel Jakub Dawidek * Possible scenarios: 27582d1661a5SPawel Jakub Dawidek * 1. Stale disk was connected. 27592d1661a5SPawel Jakub Dawidek */ 27602d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27612d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 27622d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27632d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27642d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27652d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27662d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27672d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27682d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27692d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27702d1661a5SPawel Jakub Dawidek /* 27712d1661a5SPawel Jakub Dawidek * STALE state is only possible if device is marked 27722d1661a5SPawel Jakub Dawidek * NOAUTOSYNC. 27732d1661a5SPawel Jakub Dawidek */ 27742d1661a5SPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 27752d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27762d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27772d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27782d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27792d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27802d1661a5SPawel Jakub Dawidek 27812d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 27822d1661a5SPawel Jakub Dawidek disk->d_state = state; 27832d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 27842d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 27852d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27862d1661a5SPawel Jakub Dawidek break; 27872d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 27882d1661a5SPawel Jakub Dawidek /* 27892d1661a5SPawel Jakub Dawidek * Possible scenarios: 27902d1661a5SPawel Jakub Dawidek * 1. Disk which needs synchronization was connected. 27912d1661a5SPawel Jakub Dawidek */ 27922d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27932d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 27942d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27952d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27962d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27972d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27982d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27992d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28002d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28012d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28022d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28032d1661a5SPawel Jakub Dawidek 28042d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NEW) 28052d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 28062d1661a5SPawel Jakub Dawidek disk->d_state = state; 28072d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) { 28082d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 28092d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 28102d1661a5SPawel Jakub Dawidek } 28112d1661a5SPawel Jakub Dawidek break; 28122d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 28132d1661a5SPawel Jakub Dawidek /* 28142d1661a5SPawel Jakub Dawidek * Possible scenarios: 28152d1661a5SPawel Jakub Dawidek * 1. Device wasn't running yet, but disk disappear. 28162d1661a5SPawel Jakub Dawidek * 2. Disk was active and disapppear. 28172d1661a5SPawel Jakub Dawidek * 3. Disk disappear during synchronization process. 28182d1661a5SPawel Jakub Dawidek */ 28192d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28202d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 28212d1661a5SPawel Jakub Dawidek /* 28222d1661a5SPawel Jakub Dawidek * Previous state should be ACTIVE, STALE or 28232d1661a5SPawel Jakub Dawidek * SYNCHRONIZING. 28242d1661a5SPawel Jakub Dawidek */ 28252d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 28262d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_STALE || 28272d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 28282d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28292d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28302d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28312d1661a5SPawel Jakub Dawidek } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 28322d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28332d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28342d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28352d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28362d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28372d1661a5SPawel Jakub Dawidek /* 28382d1661a5SPawel Jakub Dawidek * Reset bumping syncid if disk disappeared in STARTING 28392d1661a5SPawel Jakub Dawidek * state. 28402d1661a5SPawel Jakub Dawidek */ 2841ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2842a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 28432d1661a5SPawel Jakub Dawidek #ifdef INVARIANTS 28442d1661a5SPawel Jakub Dawidek } else { 28452d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 28462d1661a5SPawel Jakub Dawidek sc->sc_name, 28472d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28482d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28492d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28502d1661a5SPawel Jakub Dawidek #endif 28512d1661a5SPawel Jakub Dawidek } 28522d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28532d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 28542d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 28552d1661a5SPawel Jakub Dawidek 28562d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 28572d1661a5SPawel Jakub Dawidek break; 28582d1661a5SPawel Jakub Dawidek default: 28592d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Unknown state (%u).", state)); 28602d1661a5SPawel Jakub Dawidek break; 28612d1661a5SPawel Jakub Dawidek } 28622d1661a5SPawel Jakub Dawidek return (0); 28632d1661a5SPawel Jakub Dawidek } 28642d1661a5SPawel Jakub Dawidek #undef DISK_STATE_CHANGED 28652d1661a5SPawel Jakub Dawidek 2866ea973705SPawel Jakub Dawidek int 28672d1661a5SPawel Jakub Dawidek g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 28682d1661a5SPawel Jakub Dawidek { 28692d1661a5SPawel Jakub Dawidek struct g_provider *pp; 28702d1661a5SPawel Jakub Dawidek u_char *buf; 28712d1661a5SPawel Jakub Dawidek int error; 28722d1661a5SPawel Jakub Dawidek 28732d1661a5SPawel Jakub Dawidek g_topology_assert(); 28742d1661a5SPawel Jakub Dawidek 28752d1661a5SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 28762d1661a5SPawel Jakub Dawidek if (error != 0) 28772d1661a5SPawel Jakub Dawidek return (error); 28782d1661a5SPawel Jakub Dawidek pp = cp->provider; 28792d1661a5SPawel Jakub Dawidek g_topology_unlock(); 28802d1661a5SPawel Jakub Dawidek /* Metadata are stored on last sector. */ 28812d1661a5SPawel Jakub Dawidek buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 28822d1661a5SPawel Jakub Dawidek &error); 28832d1661a5SPawel Jakub Dawidek g_topology_lock(); 28842d1661a5SPawel Jakub Dawidek g_access(cp, -1, 0, 0); 28858a4a44b5SMaxim Sobolev if (buf == NULL) { 2886a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2887a245a548SPawel Jakub Dawidek cp->provider->name, error); 28882d1661a5SPawel Jakub Dawidek return (error); 28892d1661a5SPawel Jakub Dawidek } 28902d1661a5SPawel Jakub Dawidek 28912d1661a5SPawel Jakub Dawidek /* Decode metadata. */ 28922d1661a5SPawel Jakub Dawidek error = raid3_metadata_decode(buf, md); 28932d1661a5SPawel Jakub Dawidek g_free(buf); 28942d1661a5SPawel Jakub Dawidek if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 28952d1661a5SPawel Jakub Dawidek return (EINVAL); 2896a245a548SPawel Jakub Dawidek if (md->md_version > G_RAID3_VERSION) { 2897a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2898a245a548SPawel Jakub Dawidek "Kernel module is too old to handle metadata from %s.", 2899a245a548SPawel Jakub Dawidek cp->provider->name); 2900a245a548SPawel Jakub Dawidek return (EINVAL); 2901a245a548SPawel Jakub Dawidek } 29022d1661a5SPawel Jakub Dawidek if (error != 0) { 29032d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 29042d1661a5SPawel Jakub Dawidek cp->provider->name); 29052d1661a5SPawel Jakub Dawidek return (error); 29062d1661a5SPawel Jakub Dawidek } 2907cd853791SKonstantin Belousov if (md->md_sectorsize > maxphys) { 290895959703SAndrey V. Elsukov G_RAID3_DEBUG(0, "The blocksize is too big."); 290995959703SAndrey V. Elsukov return (EINVAL); 291095959703SAndrey V. Elsukov } 29112d1661a5SPawel Jakub Dawidek 29122d1661a5SPawel Jakub Dawidek return (0); 29132d1661a5SPawel Jakub Dawidek } 29142d1661a5SPawel Jakub Dawidek 29152d1661a5SPawel Jakub Dawidek static int 29162d1661a5SPawel Jakub Dawidek g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 29172d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 29182d1661a5SPawel Jakub Dawidek { 29192d1661a5SPawel Jakub Dawidek 29202d1661a5SPawel Jakub Dawidek if (md->md_no >= sc->sc_ndisks) { 29212d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 29222d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29232d1661a5SPawel Jakub Dawidek return (EINVAL); 29242d1661a5SPawel Jakub Dawidek } 29252d1661a5SPawel Jakub Dawidek if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 29262d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 29272d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29282d1661a5SPawel Jakub Dawidek return (EEXIST); 29292d1661a5SPawel Jakub Dawidek } 29302d1661a5SPawel Jakub Dawidek if (md->md_all != sc->sc_ndisks) { 29312d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29322d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29332d1661a5SPawel Jakub Dawidek "md_all", pp->name, sc->sc_name); 29342d1661a5SPawel Jakub Dawidek return (EINVAL); 29352d1661a5SPawel Jakub Dawidek } 293611b2174fSPawel Jakub Dawidek if ((md->md_mediasize % md->md_sectorsize) != 0) { 293711b2174fSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 293811b2174fSPawel Jakub Dawidek "0) on disk %s (device %s), skipping.", pp->name, 293911b2174fSPawel Jakub Dawidek sc->sc_name); 294011b2174fSPawel Jakub Dawidek return (EINVAL); 294111b2174fSPawel Jakub Dawidek } 29422d1661a5SPawel Jakub Dawidek if (md->md_mediasize != sc->sc_mediasize) { 29432d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29442d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29452d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29462d1661a5SPawel Jakub Dawidek return (EINVAL); 29472d1661a5SPawel Jakub Dawidek } 29482d1661a5SPawel Jakub Dawidek if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 29492d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29502d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29512d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29522d1661a5SPawel Jakub Dawidek return (EINVAL); 29532d1661a5SPawel Jakub Dawidek } 29542d1661a5SPawel Jakub Dawidek if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 29552d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29562d1661a5SPawel Jakub Dawidek "Invalid size of disk %s (device %s), skipping.", pp->name, 29572d1661a5SPawel Jakub Dawidek sc->sc_name); 29582d1661a5SPawel Jakub Dawidek return (EINVAL); 29592d1661a5SPawel Jakub Dawidek } 29602d1661a5SPawel Jakub Dawidek if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 29612d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29622d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29632d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29642d1661a5SPawel Jakub Dawidek return (EINVAL); 29652d1661a5SPawel Jakub Dawidek } 29662d1661a5SPawel Jakub Dawidek if (md->md_sectorsize != sc->sc_sectorsize) { 29672d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29682d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29692d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29702d1661a5SPawel Jakub Dawidek return (EINVAL); 29712d1661a5SPawel Jakub Dawidek } 29722d1661a5SPawel Jakub Dawidek if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 29732d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29742d1661a5SPawel Jakub Dawidek "Invalid sector size of disk %s (device %s), skipping.", 29752d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29762d1661a5SPawel Jakub Dawidek return (EINVAL); 29772d1661a5SPawel Jakub Dawidek } 29782d1661a5SPawel Jakub Dawidek if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 29792d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29802d1661a5SPawel Jakub Dawidek "Invalid device flags on disk %s (device %s), skipping.", 29812d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29822d1661a5SPawel Jakub Dawidek return (EINVAL); 29832d1661a5SPawel Jakub Dawidek } 2984dba915cfSPawel Jakub Dawidek if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 2985dba915cfSPawel Jakub Dawidek (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 2986dba915cfSPawel Jakub Dawidek /* 2987dba915cfSPawel Jakub Dawidek * VERIFY and ROUND-ROBIN options are mutally exclusive. 2988dba915cfSPawel Jakub Dawidek */ 2989dba915cfSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 2990dba915cfSPawel Jakub Dawidek "disk %s (device %s), skipping.", pp->name, sc->sc_name); 2991dba915cfSPawel Jakub Dawidek return (EINVAL); 2992dba915cfSPawel Jakub Dawidek } 29932d1661a5SPawel Jakub Dawidek if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 29942d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29952d1661a5SPawel Jakub Dawidek "Invalid disk flags on disk %s (device %s), skipping.", 29962d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29972d1661a5SPawel Jakub Dawidek return (EINVAL); 29982d1661a5SPawel Jakub Dawidek } 29992d1661a5SPawel Jakub Dawidek return (0); 30002d1661a5SPawel Jakub Dawidek } 30012d1661a5SPawel Jakub Dawidek 3002ea973705SPawel Jakub Dawidek int 30032d1661a5SPawel Jakub Dawidek g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 30042d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 30052d1661a5SPawel Jakub Dawidek { 30062d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 30072d1661a5SPawel Jakub Dawidek int error; 30082d1661a5SPawel Jakub Dawidek 30093650be51SPawel Jakub Dawidek g_topology_assert_not(); 30102d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 30112d1661a5SPawel Jakub Dawidek 30122d1661a5SPawel Jakub Dawidek error = g_raid3_check_metadata(sc, pp, md); 30132d1661a5SPawel Jakub Dawidek if (error != 0) 30142d1661a5SPawel Jakub Dawidek return (error); 3015a245a548SPawel Jakub Dawidek if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3016a245a548SPawel Jakub Dawidek md->md_genid < sc->sc_genid) { 3017a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3018a245a548SPawel Jakub Dawidek pp->name, sc->sc_name); 3019a245a548SPawel Jakub Dawidek return (EINVAL); 3020a245a548SPawel Jakub Dawidek } 30212d1661a5SPawel Jakub Dawidek disk = g_raid3_init_disk(sc, pp, md, &error); 30222d1661a5SPawel Jakub Dawidek if (disk == NULL) 30232d1661a5SPawel Jakub Dawidek return (error); 30242d1661a5SPawel Jakub Dawidek error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 30252d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_WAIT); 3026a245a548SPawel Jakub Dawidek if (error != 0) 30272d1661a5SPawel Jakub Dawidek return (error); 3028a245a548SPawel Jakub Dawidek if (md->md_version < G_RAID3_VERSION) { 3029a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3030a245a548SPawel Jakub Dawidek pp->name, md->md_version, G_RAID3_VERSION); 3031a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 3032a245a548SPawel Jakub Dawidek } 3033a245a548SPawel Jakub Dawidek return (0); 30342d1661a5SPawel Jakub Dawidek } 30352d1661a5SPawel Jakub Dawidek 3036712fe9bdSPawel Jakub Dawidek static void 3037712fe9bdSPawel Jakub Dawidek g_raid3_destroy_delayed(void *arg, int flag) 3038712fe9bdSPawel Jakub Dawidek { 3039712fe9bdSPawel Jakub Dawidek struct g_raid3_softc *sc; 3040712fe9bdSPawel Jakub Dawidek int error; 3041712fe9bdSPawel Jakub Dawidek 3042712fe9bdSPawel Jakub Dawidek if (flag == EV_CANCEL) { 3043712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Destroying canceled."); 3044712fe9bdSPawel Jakub Dawidek return; 3045712fe9bdSPawel Jakub Dawidek } 3046712fe9bdSPawel Jakub Dawidek sc = arg; 3047712fe9bdSPawel Jakub Dawidek g_topology_unlock(); 3048712fe9bdSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3049712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3050712fe9bdSPawel Jakub Dawidek ("DESTROY flag set on %s.", sc->sc_name)); 3051712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3052712fe9bdSPawel Jakub Dawidek ("DESTROYING flag not set on %s.", sc->sc_name)); 3053712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3054712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3055712fe9bdSPawel Jakub Dawidek if (error != 0) { 3056712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3057712fe9bdSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3058712fe9bdSPawel Jakub Dawidek } 3059712fe9bdSPawel Jakub Dawidek g_topology_lock(); 3060712fe9bdSPawel Jakub Dawidek } 3061712fe9bdSPawel Jakub Dawidek 30622d1661a5SPawel Jakub Dawidek static int 30632d1661a5SPawel Jakub Dawidek g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 30642d1661a5SPawel Jakub Dawidek { 30652d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3066712fe9bdSPawel Jakub Dawidek int dcr, dcw, dce, error = 0; 30672d1661a5SPawel Jakub Dawidek 30682d1661a5SPawel Jakub Dawidek g_topology_assert(); 30692d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 30702d1661a5SPawel Jakub Dawidek acw, ace); 30712d1661a5SPawel Jakub Dawidek 30721f7fec3cSPawel Jakub Dawidek sc = pp->geom->softc; 30731f7fec3cSPawel Jakub Dawidek if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 30741f7fec3cSPawel Jakub Dawidek return (0); 30751f7fec3cSPawel Jakub Dawidek KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 30761f7fec3cSPawel Jakub Dawidek 30772d1661a5SPawel Jakub Dawidek dcr = pp->acr + acr; 30782d1661a5SPawel Jakub Dawidek dcw = pp->acw + acw; 30792d1661a5SPawel Jakub Dawidek dce = pp->ace + ace; 30802d1661a5SPawel Jakub Dawidek 30813650be51SPawel Jakub Dawidek g_topology_unlock(); 30823650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3083712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 30843650be51SPawel Jakub Dawidek g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 30853650be51SPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) 30863650be51SPawel Jakub Dawidek error = ENXIO; 30873650be51SPawel Jakub Dawidek goto end; 30882d1661a5SPawel Jakub Dawidek } 3089f62c1a47SAlexander Motin if (dcw == 0) 30903650be51SPawel Jakub Dawidek g_raid3_idle(sc, dcw); 3091712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3092712fe9bdSPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) { 3093712fe9bdSPawel Jakub Dawidek error = ENXIO; 3094712fe9bdSPawel Jakub Dawidek goto end; 3095712fe9bdSPawel Jakub Dawidek } 3096712fe9bdSPawel Jakub Dawidek if (dcr == 0 && dcw == 0 && dce == 0) { 3097712fe9bdSPawel Jakub Dawidek g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3098712fe9bdSPawel Jakub Dawidek sc, NULL); 3099712fe9bdSPawel Jakub Dawidek } 3100712fe9bdSPawel Jakub Dawidek } 31013650be51SPawel Jakub Dawidek end: 31023650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 31033650be51SPawel Jakub Dawidek g_topology_lock(); 31043650be51SPawel Jakub Dawidek return (error); 31052d1661a5SPawel Jakub Dawidek } 31062d1661a5SPawel Jakub Dawidek 31072d1661a5SPawel Jakub Dawidek static struct g_geom * 31082d1661a5SPawel Jakub Dawidek g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 31092d1661a5SPawel Jakub Dawidek { 31102d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 31112d1661a5SPawel Jakub Dawidek struct g_geom *gp; 31122d1661a5SPawel Jakub Dawidek int error, timeout; 31132d1661a5SPawel Jakub Dawidek u_int n; 31142d1661a5SPawel Jakub Dawidek 31152d1661a5SPawel Jakub Dawidek g_topology_assert(); 31162d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 31172d1661a5SPawel Jakub Dawidek 31182d1661a5SPawel Jakub Dawidek /* One disk is minimum. */ 31192d1661a5SPawel Jakub Dawidek if (md->md_all < 1) 31202d1661a5SPawel Jakub Dawidek return (NULL); 31212d1661a5SPawel Jakub Dawidek /* 31222d1661a5SPawel Jakub Dawidek * Action geom. 31232d1661a5SPawel Jakub Dawidek */ 31242d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s", md->md_name); 31252d1661a5SPawel Jakub Dawidek sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 31262d1661a5SPawel Jakub Dawidek sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 31272d1661a5SPawel Jakub Dawidek M_WAITOK | M_ZERO); 31282d1661a5SPawel Jakub Dawidek gp->start = g_raid3_start; 31292d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31302d1661a5SPawel Jakub Dawidek gp->access = g_raid3_access; 31312d1661a5SPawel Jakub Dawidek gp->dumpconf = g_raid3_dumpconf; 31322d1661a5SPawel Jakub Dawidek 31332d1661a5SPawel Jakub Dawidek sc->sc_id = md->md_id; 31342d1661a5SPawel Jakub Dawidek sc->sc_mediasize = md->md_mediasize; 31352d1661a5SPawel Jakub Dawidek sc->sc_sectorsize = md->md_sectorsize; 31362d1661a5SPawel Jakub Dawidek sc->sc_ndisks = md->md_all; 3137f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 31382d1661a5SPawel Jakub Dawidek sc->sc_flags = md->md_mflags; 3139a245a548SPawel Jakub Dawidek sc->sc_bump_id = 0; 31400962f942SPawel Jakub Dawidek sc->sc_idle = 1; 314101f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 31420962f942SPawel Jakub Dawidek sc->sc_writes = 0; 3143afd05d74SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 3144afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_softc = sc; 3145afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_no = n; 31462d1661a5SPawel Jakub Dawidek sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3147afd05d74SPawel Jakub Dawidek } 31483650be51SPawel Jakub Dawidek sx_init(&sc->sc_lock, "graid3:lock"); 31492d1661a5SPawel Jakub Dawidek bioq_init(&sc->sc_queue); 31502d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 31513650be51SPawel Jakub Dawidek bioq_init(&sc->sc_regular_delayed); 31523650be51SPawel Jakub Dawidek bioq_init(&sc->sc_inflight); 31533650be51SPawel Jakub Dawidek bioq_init(&sc->sc_sync_delayed); 31542d1661a5SPawel Jakub Dawidek TAILQ_INIT(&sc->sc_events); 31552d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3156fd90e2edSJung-uk Kim callout_init(&sc->sc_callout, 1); 31572d1661a5SPawel Jakub Dawidek sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 31582d1661a5SPawel Jakub Dawidek gp->softc = sc; 31592d1661a5SPawel Jakub Dawidek sc->sc_geom = gp; 31602d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 31612d1661a5SPawel Jakub Dawidek /* 31622d1661a5SPawel Jakub Dawidek * Synchronization geom. 31632d1661a5SPawel Jakub Dawidek */ 31642d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s.sync", md->md_name); 31652d1661a5SPawel Jakub Dawidek gp->softc = sc; 31662d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31672d1661a5SPawel Jakub Dawidek sc->sc_sync.ds_geom = gp; 31683650be51SPawel Jakub Dawidek 3169ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3170ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3171ed940a82SPawel Jakub Dawidek 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3172ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31733650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 31743650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 31753650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 31763650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3177ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3178ed940a82SPawel Jakub Dawidek 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3179ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31803650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 31813650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 31823650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 31833650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3184ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3185ed940a82SPawel Jakub Dawidek 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3186ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31873650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 31883650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 31893650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 31903650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3191ed940a82SPawel Jakub Dawidek } 31923650be51SPawel Jakub Dawidek 31933745c395SJulian Elischer error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 31942d1661a5SPawel Jakub Dawidek "g_raid3 %s", md->md_name); 31952d1661a5SPawel Jakub Dawidek if (error != 0) { 31962d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 31972d1661a5SPawel Jakub Dawidek sc->sc_name); 3198ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 31993650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 32003650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 32013650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 3202ed940a82SPawel Jakub Dawidek } 32032d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_sync.ds_geom); 32042d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 32052d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 32063650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 32072d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_geom); 32082d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32092d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32102d1661a5SPawel Jakub Dawidek return (NULL); 32112d1661a5SPawel Jakub Dawidek } 32122d1661a5SPawel Jakub Dawidek 32130cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 32140cca572eSJohn-Mark Gurney sc->sc_name, sc->sc_ndisks, sc->sc_id); 32152d1661a5SPawel Jakub Dawidek 3216853a10a5SAndrew Thompson sc->sc_rootmount = root_mount_hold("GRAID3"); 32174ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 32184ed854e8SPawel Jakub Dawidek 32192d1661a5SPawel Jakub Dawidek /* 32202d1661a5SPawel Jakub Dawidek * Run timeout. 32212d1661a5SPawel Jakub Dawidek */ 32222d1661a5SPawel Jakub Dawidek timeout = atomic_load_acq_int(&g_raid3_timeout); 32232d1661a5SPawel Jakub Dawidek callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 32242d1661a5SPawel Jakub Dawidek return (sc->sc_geom); 32252d1661a5SPawel Jakub Dawidek } 32262d1661a5SPawel Jakub Dawidek 32272d1661a5SPawel Jakub Dawidek int 3228712fe9bdSPawel Jakub Dawidek g_raid3_destroy(struct g_raid3_softc *sc, int how) 32292d1661a5SPawel Jakub Dawidek { 32302d1661a5SPawel Jakub Dawidek struct g_provider *pp; 32312d1661a5SPawel Jakub Dawidek 32323650be51SPawel Jakub Dawidek g_topology_assert_not(); 32332d1661a5SPawel Jakub Dawidek if (sc == NULL) 32342d1661a5SPawel Jakub Dawidek return (ENXIO); 32353650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 32363650be51SPawel Jakub Dawidek 32372d1661a5SPawel Jakub Dawidek pp = sc->sc_provider; 32382d1661a5SPawel Jakub Dawidek if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3239712fe9bdSPawel Jakub Dawidek switch (how) { 3240712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_SOFT: 32412d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 32422d1661a5SPawel Jakub Dawidek "Device %s is still open (r%dw%de%d).", pp->name, 32432d1661a5SPawel Jakub Dawidek pp->acr, pp->acw, pp->ace); 32442d1661a5SPawel Jakub Dawidek return (EBUSY); 3245712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_DELAYED: 3246712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, 3247712fe9bdSPawel Jakub Dawidek "Device %s will be destroyed on last close.", 3248712fe9bdSPawel Jakub Dawidek pp->name); 3249712fe9bdSPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 3250712fe9bdSPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 3251712fe9bdSPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3252712fe9bdSPawel Jakub Dawidek return (EBUSY); 3253712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_HARD: 3254712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s is still open, so it " 3255712fe9bdSPawel Jakub Dawidek "can't be definitely removed.", pp->name); 3256712fe9bdSPawel Jakub Dawidek break; 32572d1661a5SPawel Jakub Dawidek } 32582d1661a5SPawel Jakub Dawidek } 32592d1661a5SPawel Jakub Dawidek 326018486a5eSPawel Jakub Dawidek g_topology_lock(); 326118486a5eSPawel Jakub Dawidek if (sc->sc_geom->softc == NULL) { 326218486a5eSPawel Jakub Dawidek g_topology_unlock(); 326318486a5eSPawel Jakub Dawidek return (0); 326418486a5eSPawel Jakub Dawidek } 326518486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 326618486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 326718486a5eSPawel Jakub Dawidek g_topology_unlock(); 326818486a5eSPawel Jakub Dawidek 32692d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 32702d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 32712d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 32723650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 32732d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 32742d1661a5SPawel Jakub Dawidek wakeup(sc); 32752d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 32762d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 32772d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 32782d1661a5SPawel Jakub Dawidek while (sc->sc_worker != NULL) 32792d1661a5SPawel Jakub Dawidek tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 32802d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 32813650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 32822d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(sc); 32832d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32842d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32852d1661a5SPawel Jakub Dawidek return (0); 32862d1661a5SPawel Jakub Dawidek } 32872d1661a5SPawel Jakub Dawidek 32882d1661a5SPawel Jakub Dawidek static void 32892d1661a5SPawel Jakub Dawidek g_raid3_taste_orphan(struct g_consumer *cp) 32902d1661a5SPawel Jakub Dawidek { 32912d1661a5SPawel Jakub Dawidek 32922d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 32932d1661a5SPawel Jakub Dawidek cp->provider->name)); 32942d1661a5SPawel Jakub Dawidek } 32952d1661a5SPawel Jakub Dawidek 32962d1661a5SPawel Jakub Dawidek static struct g_geom * 32972d1661a5SPawel Jakub Dawidek g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 32982d1661a5SPawel Jakub Dawidek { 32992d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 33002d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 33012d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 33022d1661a5SPawel Jakub Dawidek struct g_geom *gp; 33032d1661a5SPawel Jakub Dawidek int error; 33042d1661a5SPawel Jakub Dawidek 33052d1661a5SPawel Jakub Dawidek g_topology_assert(); 33062d1661a5SPawel Jakub Dawidek g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 33072d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 33082d1661a5SPawel Jakub Dawidek 33092d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "raid3:taste"); 33102d1661a5SPawel Jakub Dawidek /* This orphan function should be never called. */ 33112d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_taste_orphan; 33122d1661a5SPawel Jakub Dawidek cp = g_new_consumer(gp); 3313d22ff249SEdward Tomasz Napierala error = g_attach(cp, pp); 3314d22ff249SEdward Tomasz Napierala if (error == 0) { 33152d1661a5SPawel Jakub Dawidek error = g_raid3_read_metadata(cp, &md); 33162d1661a5SPawel Jakub Dawidek g_detach(cp); 3317d22ff249SEdward Tomasz Napierala } 33182d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 33192d1661a5SPawel Jakub Dawidek g_destroy_geom(gp); 33202d1661a5SPawel Jakub Dawidek if (error != 0) 33212d1661a5SPawel Jakub Dawidek return (NULL); 33222d1661a5SPawel Jakub Dawidek gp = NULL; 33232d1661a5SPawel Jakub Dawidek 332490f2be24SAlexander Motin if (md.md_provider[0] != '\0' && 332590f2be24SAlexander Motin !g_compare_names(md.md_provider, pp->name)) 33262d1661a5SPawel Jakub Dawidek return (NULL); 3327e6890985SPawel Jakub Dawidek if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3328e6890985SPawel Jakub Dawidek return (NULL); 33292d1661a5SPawel Jakub Dawidek if (g_raid3_debug >= 2) 33302d1661a5SPawel Jakub Dawidek raid3_metadata_dump(&md); 33312d1661a5SPawel Jakub Dawidek 33322d1661a5SPawel Jakub Dawidek /* 33332d1661a5SPawel Jakub Dawidek * Let's check if device already exists. 33342d1661a5SPawel Jakub Dawidek */ 333545d5e85aSPawel Jakub Dawidek sc = NULL; 33362d1661a5SPawel Jakub Dawidek LIST_FOREACH(gp, &mp->geom, geom) { 33372d1661a5SPawel Jakub Dawidek sc = gp->softc; 33382d1661a5SPawel Jakub Dawidek if (sc == NULL) 33392d1661a5SPawel Jakub Dawidek continue; 33402d1661a5SPawel Jakub Dawidek if (sc->sc_sync.ds_geom == gp) 33412d1661a5SPawel Jakub Dawidek continue; 33422d1661a5SPawel Jakub Dawidek if (strcmp(md.md_name, sc->sc_name) != 0) 33432d1661a5SPawel Jakub Dawidek continue; 33442d1661a5SPawel Jakub Dawidek if (md.md_id != sc->sc_id) { 33452d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s already configured.", 33462d1661a5SPawel Jakub Dawidek sc->sc_name); 33472d1661a5SPawel Jakub Dawidek return (NULL); 33482d1661a5SPawel Jakub Dawidek } 33492d1661a5SPawel Jakub Dawidek break; 33502d1661a5SPawel Jakub Dawidek } 33512d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33522d1661a5SPawel Jakub Dawidek gp = g_raid3_create(mp, &md); 33532d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33542d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot create device %s.", 33552d1661a5SPawel Jakub Dawidek md.md_name); 33562d1661a5SPawel Jakub Dawidek return (NULL); 33572d1661a5SPawel Jakub Dawidek } 33582d1661a5SPawel Jakub Dawidek sc = gp->softc; 33592d1661a5SPawel Jakub Dawidek } 33602d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 33613650be51SPawel Jakub Dawidek g_topology_unlock(); 33623650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 33632d1661a5SPawel Jakub Dawidek error = g_raid3_add_disk(sc, pp, &md); 33642d1661a5SPawel Jakub Dawidek if (error != 0) { 33652d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 33662d1661a5SPawel Jakub Dawidek pp->name, gp->name, error); 33672d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 33682d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 3369712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33703525bb6bSPawel Jakub Dawidek g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 33713650be51SPawel Jakub Dawidek g_topology_lock(); 33722d1661a5SPawel Jakub Dawidek return (NULL); 33732d1661a5SPawel Jakub Dawidek } 33743650be51SPawel Jakub Dawidek gp = NULL; 33753650be51SPawel Jakub Dawidek } 33763650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33773650be51SPawel Jakub Dawidek g_topology_lock(); 33782d1661a5SPawel Jakub Dawidek return (gp); 33792d1661a5SPawel Jakub Dawidek } 33802d1661a5SPawel Jakub Dawidek 33812d1661a5SPawel Jakub Dawidek static int 33822d1661a5SPawel Jakub Dawidek g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 33832d1661a5SPawel Jakub Dawidek struct g_geom *gp) 33842d1661a5SPawel Jakub Dawidek { 33853650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 33863650be51SPawel Jakub Dawidek int error; 33872d1661a5SPawel Jakub Dawidek 33883650be51SPawel Jakub Dawidek g_topology_unlock(); 33893650be51SPawel Jakub Dawidek sc = gp->softc; 33903650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3391712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33923525bb6bSPawel Jakub Dawidek error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 33933650be51SPawel Jakub Dawidek if (error != 0) 33943650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33953650be51SPawel Jakub Dawidek g_topology_lock(); 33963650be51SPawel Jakub Dawidek return (error); 33972d1661a5SPawel Jakub Dawidek } 33982d1661a5SPawel Jakub Dawidek 33992d1661a5SPawel Jakub Dawidek static void 34002d1661a5SPawel Jakub Dawidek g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 34012d1661a5SPawel Jakub Dawidek struct g_consumer *cp, struct g_provider *pp) 34022d1661a5SPawel Jakub Dawidek { 34032d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 34042d1661a5SPawel Jakub Dawidek 34052d1661a5SPawel Jakub Dawidek g_topology_assert(); 34062d1661a5SPawel Jakub Dawidek 34072d1661a5SPawel Jakub Dawidek sc = gp->softc; 34082d1661a5SPawel Jakub Dawidek if (sc == NULL) 34092d1661a5SPawel Jakub Dawidek return; 34102d1661a5SPawel Jakub Dawidek /* Skip synchronization geom. */ 34112d1661a5SPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 34122d1661a5SPawel Jakub Dawidek return; 34132d1661a5SPawel Jakub Dawidek if (pp != NULL) { 34142d1661a5SPawel Jakub Dawidek /* Nothing here. */ 34152d1661a5SPawel Jakub Dawidek } else if (cp != NULL) { 34162d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 34172d1661a5SPawel Jakub Dawidek 34182d1661a5SPawel Jakub Dawidek disk = cp->private; 34192d1661a5SPawel Jakub Dawidek if (disk == NULL) 34202d1661a5SPawel Jakub Dawidek return; 34213650be51SPawel Jakub Dawidek g_topology_unlock(); 34223650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 34232d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Type>", indent); 34242d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 342549ee0fceSAlexander Motin sbuf_cat(sb, "PARITY"); 34262d1661a5SPawel Jakub Dawidek else 342749ee0fceSAlexander Motin sbuf_cat(sb, "DATA"); 342849ee0fceSAlexander Motin sbuf_cat(sb, "</Type>\n"); 34292d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 34302d1661a5SPawel Jakub Dawidek (u_int)disk->d_no); 34312d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 34322d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Synchronized>", indent); 34333650be51SPawel Jakub Dawidek if (disk->d_sync.ds_offset == 0) 343449ee0fceSAlexander Motin sbuf_cat(sb, "0%"); 34352d1661a5SPawel Jakub Dawidek else { 34362d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%u%%", 34373650be51SPawel Jakub Dawidek (u_int)((disk->d_sync.ds_offset * 100) / 3438c0d68b6eSPawel Jakub Dawidek (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 34392d1661a5SPawel Jakub Dawidek } 344049ee0fceSAlexander Motin sbuf_cat(sb, "</Synchronized>\n"); 34414a7f7b10SGleb Smirnoff if (disk->d_sync.ds_offset > 0) { 34424a7f7b10SGleb Smirnoff sbuf_printf(sb, "%s<BytesSynced>%jd" 34434a7f7b10SGleb Smirnoff "</BytesSynced>\n", indent, 34444a7f7b10SGleb Smirnoff (intmax_t)disk->d_sync.ds_offset); 34454a7f7b10SGleb Smirnoff } 34462d1661a5SPawel Jakub Dawidek } 34472d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 34482d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid); 3449a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 34502d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 34512d1661a5SPawel Jakub Dawidek if (disk->d_flags == 0) 345249ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 34532d1661a5SPawel Jakub Dawidek else { 34542d1661a5SPawel Jakub Dawidek int first = 1; 34552d1661a5SPawel Jakub Dawidek 34562d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 34572d1661a5SPawel Jakub Dawidek if ((disk->d_flags & (flag)) != 0) { \ 34582d1661a5SPawel Jakub Dawidek if (!first) \ 345949ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 34602d1661a5SPawel Jakub Dawidek else \ 34612d1661a5SPawel Jakub Dawidek first = 0; \ 346249ee0fceSAlexander Motin sbuf_cat(sb, name); \ 34632d1661a5SPawel Jakub Dawidek } \ 34642d1661a5SPawel Jakub Dawidek } while (0) 34652d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 34662d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 34672d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 34682d1661a5SPawel Jakub Dawidek "SYNCHRONIZING"); 34692d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 34703aae74ecSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 34712d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 34722d1661a5SPawel Jakub Dawidek } 347349ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 34742d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 34752d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state)); 34763650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 34773650be51SPawel Jakub Dawidek g_topology_lock(); 34782d1661a5SPawel Jakub Dawidek } else { 34793650be51SPawel Jakub Dawidek g_topology_unlock(); 34803650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3481ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3482ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3483ed940a82SPawel Jakub Dawidek "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3484ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3485ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3486ed940a82SPawel Jakub Dawidek "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3487ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3488ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3489ed940a82SPawel Jakub Dawidek "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3490ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3491ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3492ed940a82SPawel Jakub Dawidek "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3493ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3494ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3495ed940a82SPawel Jakub Dawidek "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3496ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3497ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3498ed940a82SPawel Jakub Dawidek "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3499ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3500ed940a82SPawel Jakub Dawidek } 35012d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 35022d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3503a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 35042d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 35052d1661a5SPawel Jakub Dawidek if (sc->sc_flags == 0) 350649ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 35072d1661a5SPawel Jakub Dawidek else { 35082d1661a5SPawel Jakub Dawidek int first = 1; 35092d1661a5SPawel Jakub Dawidek 35102d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 35112d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & (flag)) != 0) { \ 35122d1661a5SPawel Jakub Dawidek if (!first) \ 351349ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 35142d1661a5SPawel Jakub Dawidek else \ 35152d1661a5SPawel Jakub Dawidek first = 0; \ 351649ee0fceSAlexander Motin sbuf_cat(sb, name); \ 35172d1661a5SPawel Jakub Dawidek } \ 35182d1661a5SPawel Jakub Dawidek } while (0) 3519501250baSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 35202d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3521f5a2f7feSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3522f5a2f7feSPawel Jakub Dawidek "ROUND-ROBIN"); 3523dba915cfSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 35242d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 35252d1661a5SPawel Jakub Dawidek } 352649ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 35272d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 35282d1661a5SPawel Jakub Dawidek sc->sc_ndisks); 352928b31df7SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 353028b31df7SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state)); 35313650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35323650be51SPawel Jakub Dawidek g_topology_lock(); 35332d1661a5SPawel Jakub Dawidek } 35342d1661a5SPawel Jakub Dawidek } 35352d1661a5SPawel Jakub Dawidek 35369da3072cSPawel Jakub Dawidek static void 3537f62c1a47SAlexander Motin g_raid3_shutdown_post_sync(void *arg, int howto) 35389da3072cSPawel Jakub Dawidek { 35399da3072cSPawel Jakub Dawidek struct g_class *mp; 35409da3072cSPawel Jakub Dawidek struct g_geom *gp, *gp2; 35413650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 3542712fe9bdSPawel Jakub Dawidek int error; 35439da3072cSPawel Jakub Dawidek 35449da3072cSPawel Jakub Dawidek mp = arg; 35459da3072cSPawel Jakub Dawidek g_topology_lock(); 3546f62c1a47SAlexander Motin g_raid3_shutdown = 1; 35479da3072cSPawel Jakub Dawidek LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 35483650be51SPawel Jakub Dawidek if ((sc = gp->softc) == NULL) 35499da3072cSPawel Jakub Dawidek continue; 3550712fe9bdSPawel Jakub Dawidek /* Skip synchronization geom. */ 3551712fe9bdSPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 3552712fe9bdSPawel Jakub Dawidek continue; 35533650be51SPawel Jakub Dawidek g_topology_unlock(); 35543650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3555f62c1a47SAlexander Motin g_raid3_idle(sc, -1); 3556712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 3557712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3558712fe9bdSPawel Jakub Dawidek if (error != 0) 35593650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35603650be51SPawel Jakub Dawidek g_topology_lock(); 35613650be51SPawel Jakub Dawidek } 35623650be51SPawel Jakub Dawidek g_topology_unlock(); 35633650be51SPawel Jakub Dawidek } 35643650be51SPawel Jakub Dawidek 35653650be51SPawel Jakub Dawidek static void 35669da3072cSPawel Jakub Dawidek g_raid3_init(struct g_class *mp) 35679da3072cSPawel Jakub Dawidek { 35689da3072cSPawel Jakub Dawidek 3569f62c1a47SAlexander Motin g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3570f62c1a47SAlexander Motin g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3571f62c1a47SAlexander Motin if (g_raid3_post_sync == NULL) 35729da3072cSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 35739da3072cSPawel Jakub Dawidek } 35749da3072cSPawel Jakub Dawidek 35759da3072cSPawel Jakub Dawidek static void 35769da3072cSPawel Jakub Dawidek g_raid3_fini(struct g_class *mp) 35779da3072cSPawel Jakub Dawidek { 35789da3072cSPawel Jakub Dawidek 3579f62c1a47SAlexander Motin if (g_raid3_post_sync != NULL) 3580f62c1a47SAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync); 35819da3072cSPawel Jakub Dawidek } 35829da3072cSPawel Jakub Dawidek 35832d1661a5SPawel Jakub Dawidek DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 358474d6c131SKyle Evans MODULE_VERSION(geom_raid3, 0); 3585