12d1661a5SPawel Jakub Dawidek /*- 23728855aSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 33728855aSPedro F. Giffuni * 4e6757059SPawel Jakub Dawidek * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 52d1661a5SPawel Jakub Dawidek * All rights reserved. 62d1661a5SPawel Jakub Dawidek * 72d1661a5SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 82d1661a5SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 92d1661a5SPawel Jakub Dawidek * are met: 102d1661a5SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 112d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 122d1661a5SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 132d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 142d1661a5SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 152d1661a5SPawel Jakub Dawidek * 162d1661a5SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 172d1661a5SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 182d1661a5SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 192d1661a5SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 202d1661a5SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 212d1661a5SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 222d1661a5SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 232d1661a5SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 242d1661a5SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 252d1661a5SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 262d1661a5SPawel Jakub Dawidek * SUCH DAMAGE. 272d1661a5SPawel Jakub Dawidek */ 282d1661a5SPawel Jakub Dawidek 292d1661a5SPawel Jakub Dawidek #include <sys/cdefs.h> 302d1661a5SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 312d1661a5SPawel Jakub Dawidek 322d1661a5SPawel Jakub Dawidek #include <sys/param.h> 332d1661a5SPawel Jakub Dawidek #include <sys/systm.h> 342d1661a5SPawel Jakub Dawidek #include <sys/kernel.h> 352d1661a5SPawel Jakub Dawidek #include <sys/module.h> 362d1661a5SPawel Jakub Dawidek #include <sys/limits.h> 372d1661a5SPawel Jakub Dawidek #include <sys/lock.h> 382d1661a5SPawel Jakub Dawidek #include <sys/mutex.h> 392d1661a5SPawel Jakub Dawidek #include <sys/bio.h> 405d807a0eSAndrey V. Elsukov #include <sys/sbuf.h> 412d1661a5SPawel Jakub Dawidek #include <sys/sysctl.h> 422d1661a5SPawel Jakub Dawidek #include <sys/malloc.h> 439da3072cSPawel Jakub Dawidek #include <sys/eventhandler.h> 442d1661a5SPawel Jakub Dawidek #include <vm/uma.h> 452d1661a5SPawel Jakub Dawidek #include <geom/geom.h> 46ac03832eSConrad Meyer #include <geom/geom_dbg.h> 472d1661a5SPawel Jakub Dawidek #include <sys/proc.h> 482d1661a5SPawel Jakub Dawidek #include <sys/kthread.h> 4963710c4dSJohn Baldwin #include <sys/sched.h> 502d1661a5SPawel Jakub Dawidek #include <geom/raid3/g_raid3.h> 512d1661a5SPawel Jakub Dawidek 52cb08c2ccSAlexander Leidinger FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 532d1661a5SPawel Jakub Dawidek 545bb84bc8SRobert Watson static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 552d1661a5SPawel Jakub Dawidek 562d1661a5SPawel Jakub Dawidek SYSCTL_DECL(_kern_geom); 576472ac3dSEd Schouten static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, 586472ac3dSEd Schouten "GEOM_RAID3 stuff"); 59809a9dc6SPawel Jakub Dawidek u_int g_raid3_debug = 0; 60af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0, 612d1661a5SPawel Jakub Dawidek "Debug level"); 62e5e7825cSPawel Jakub Dawidek static u_int g_raid3_timeout = 4; 63af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout, 642d1661a5SPawel Jakub Dawidek 0, "Time to wait on all raid3 components"); 654d006a98SPawel Jakub Dawidek static u_int g_raid3_idletime = 5; 66af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN, 674d006a98SPawel Jakub Dawidek &g_raid3_idletime, 0, "Mark components as clean when idling"); 683aae74ecSPawel Jakub Dawidek static u_int g_raid3_disconnect_on_failure = 1; 69af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 703aae74ecSPawel Jakub Dawidek &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71e6757059SPawel Jakub Dawidek static u_int g_raid3_syncreqs = 2; 723650be51SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 733650be51SPawel Jakub Dawidek &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 74ed940a82SPawel Jakub Dawidek static u_int g_raid3_use_malloc = 0; 75ed940a82SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 76ed940a82SPawel Jakub Dawidek &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 772d1661a5SPawel Jakub Dawidek 782d1661a5SPawel Jakub Dawidek static u_int g_raid3_n64k = 50; 79af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0, 802d1661a5SPawel Jakub Dawidek "Maximum number of 64kB allocations"); 812d1661a5SPawel Jakub Dawidek static u_int g_raid3_n16k = 200; 82af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0, 832d1661a5SPawel Jakub Dawidek "Maximum number of 16kB allocations"); 842d1661a5SPawel Jakub Dawidek static u_int g_raid3_n4k = 1200; 85af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0, 862d1661a5SPawel Jakub Dawidek "Maximum number of 4kB allocations"); 872d1661a5SPawel Jakub Dawidek 886472ac3dSEd Schouten static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0, 892d1661a5SPawel Jakub Dawidek "GEOM_RAID3 statistics"); 90dba915cfSPawel Jakub Dawidek static u_int g_raid3_parity_mismatch = 0; 91dba915cfSPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 92dba915cfSPawel Jakub Dawidek &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 932d1661a5SPawel Jakub Dawidek 942d1661a5SPawel Jakub Dawidek #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 952d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 962d1661a5SPawel Jakub Dawidek msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 982d1661a5SPawel Jakub Dawidek } while (0) 992d1661a5SPawel Jakub Dawidek 100f62c1a47SAlexander Motin static eventhandler_tag g_raid3_post_sync = NULL; 101f62c1a47SAlexander Motin static int g_raid3_shutdown = 0; 1022d1661a5SPawel Jakub Dawidek 1032d1661a5SPawel Jakub Dawidek static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 1042d1661a5SPawel Jakub Dawidek struct g_geom *gp); 1052d1661a5SPawel Jakub Dawidek static g_taste_t g_raid3_taste; 1069da3072cSPawel Jakub Dawidek static void g_raid3_init(struct g_class *mp); 1079da3072cSPawel Jakub Dawidek static void g_raid3_fini(struct g_class *mp); 1082d1661a5SPawel Jakub Dawidek 1092d1661a5SPawel Jakub Dawidek struct g_class g_raid3_class = { 1102d1661a5SPawel Jakub Dawidek .name = G_RAID3_CLASS_NAME, 1112d1661a5SPawel Jakub Dawidek .version = G_VERSION, 1122d1661a5SPawel Jakub Dawidek .ctlreq = g_raid3_config, 1132d1661a5SPawel Jakub Dawidek .taste = g_raid3_taste, 1149da3072cSPawel Jakub Dawidek .destroy_geom = g_raid3_destroy_geom, 1159da3072cSPawel Jakub Dawidek .init = g_raid3_init, 1169da3072cSPawel Jakub Dawidek .fini = g_raid3_fini 1172d1661a5SPawel Jakub Dawidek }; 1182d1661a5SPawel Jakub Dawidek 1192d1661a5SPawel Jakub Dawidek 1202d1661a5SPawel Jakub Dawidek static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 121d97d5ee9SPawel Jakub Dawidek static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 122d97d5ee9SPawel Jakub Dawidek static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 1232d1661a5SPawel Jakub Dawidek static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 1242d1661a5SPawel Jakub Dawidek struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 1252d1661a5SPawel Jakub Dawidek static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 1263650be51SPawel Jakub Dawidek static int g_raid3_register_request(struct bio *pbp); 1273650be51SPawel Jakub Dawidek static void g_raid3_sync_release(struct g_raid3_softc *sc); 1282d1661a5SPawel Jakub Dawidek 1292d1661a5SPawel Jakub Dawidek 1302d1661a5SPawel Jakub Dawidek static const char * 1312d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(int state) 1322d1661a5SPawel Jakub Dawidek { 1332d1661a5SPawel Jakub Dawidek 1342d1661a5SPawel Jakub Dawidek switch (state) { 1352d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NODISK: 1362d1661a5SPawel Jakub Dawidek return ("NODISK"); 1372d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NONE: 1382d1661a5SPawel Jakub Dawidek return ("NONE"); 1392d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 1402d1661a5SPawel Jakub Dawidek return ("NEW"); 1412d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 1422d1661a5SPawel Jakub Dawidek return ("ACTIVE"); 1432d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 1442d1661a5SPawel Jakub Dawidek return ("STALE"); 1452d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 1462d1661a5SPawel Jakub Dawidek return ("SYNCHRONIZING"); 1472d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 1482d1661a5SPawel Jakub Dawidek return ("DISCONNECTED"); 1492d1661a5SPawel Jakub Dawidek default: 1502d1661a5SPawel Jakub Dawidek return ("INVALID"); 1512d1661a5SPawel Jakub Dawidek } 1522d1661a5SPawel Jakub Dawidek } 1532d1661a5SPawel Jakub Dawidek 1542d1661a5SPawel Jakub Dawidek static const char * 1552d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(int state) 1562d1661a5SPawel Jakub Dawidek { 1572d1661a5SPawel Jakub Dawidek 1582d1661a5SPawel Jakub Dawidek switch (state) { 1592d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 1602d1661a5SPawel Jakub Dawidek return ("STARTING"); 1612d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 1622d1661a5SPawel Jakub Dawidek return ("DEGRADED"); 1632d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 1642d1661a5SPawel Jakub Dawidek return ("COMPLETE"); 1652d1661a5SPawel Jakub Dawidek default: 1662d1661a5SPawel Jakub Dawidek return ("INVALID"); 1672d1661a5SPawel Jakub Dawidek } 1682d1661a5SPawel Jakub Dawidek } 1692d1661a5SPawel Jakub Dawidek 1702d1661a5SPawel Jakub Dawidek const char * 1712d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(struct g_raid3_disk *disk) 1722d1661a5SPawel Jakub Dawidek { 1732d1661a5SPawel Jakub Dawidek 1742d1661a5SPawel Jakub Dawidek if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 1752d1661a5SPawel Jakub Dawidek return ("[unknown]"); 1762d1661a5SPawel Jakub Dawidek return (disk->d_name); 1772d1661a5SPawel Jakub Dawidek } 1782d1661a5SPawel Jakub Dawidek 179ed940a82SPawel Jakub Dawidek static void * 180ed940a82SPawel Jakub Dawidek g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 181ed940a82SPawel Jakub Dawidek { 182ed940a82SPawel Jakub Dawidek void *ptr; 183d4060fa6SAlexander Motin enum g_raid3_zones zone; 184ed940a82SPawel Jakub Dawidek 185d4060fa6SAlexander Motin if (g_raid3_use_malloc || 186d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 187ed940a82SPawel Jakub Dawidek ptr = malloc(size, M_RAID3, flags); 188ed940a82SPawel Jakub Dawidek else { 189d4060fa6SAlexander Motin ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 190d4060fa6SAlexander Motin &sc->sc_zones[zone], flags); 191d4060fa6SAlexander Motin sc->sc_zones[zone].sz_requested++; 192ed940a82SPawel Jakub Dawidek if (ptr == NULL) 193d4060fa6SAlexander Motin sc->sc_zones[zone].sz_failed++; 194ed940a82SPawel Jakub Dawidek } 195ed940a82SPawel Jakub Dawidek return (ptr); 196ed940a82SPawel Jakub Dawidek } 197ed940a82SPawel Jakub Dawidek 198ed940a82SPawel Jakub Dawidek static void 199ed940a82SPawel Jakub Dawidek g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 200ed940a82SPawel Jakub Dawidek { 201d4060fa6SAlexander Motin enum g_raid3_zones zone; 202ed940a82SPawel Jakub Dawidek 203d4060fa6SAlexander Motin if (g_raid3_use_malloc || 204d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 205ed940a82SPawel Jakub Dawidek free(ptr, M_RAID3); 206ed940a82SPawel Jakub Dawidek else { 207d4060fa6SAlexander Motin uma_zfree_arg(sc->sc_zones[zone].sz_zone, 208d4060fa6SAlexander Motin ptr, &sc->sc_zones[zone]); 209ed940a82SPawel Jakub Dawidek } 210ed940a82SPawel Jakub Dawidek } 211ed940a82SPawel Jakub Dawidek 2123650be51SPawel Jakub Dawidek static int 2133650be51SPawel Jakub Dawidek g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 2143650be51SPawel Jakub Dawidek { 2153650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2163650be51SPawel Jakub Dawidek 2170d14fae5SPawel Jakub Dawidek if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 2183650be51SPawel Jakub Dawidek return (ENOMEM); 2193650be51SPawel Jakub Dawidek sz->sz_inuse++; 2203650be51SPawel Jakub Dawidek return (0); 2213650be51SPawel Jakub Dawidek } 2223650be51SPawel Jakub Dawidek 2233650be51SPawel Jakub Dawidek static void 2243650be51SPawel Jakub Dawidek g_raid3_uma_dtor(void *mem, int size, void *arg) 2253650be51SPawel Jakub Dawidek { 2263650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2273650be51SPawel Jakub Dawidek 2283650be51SPawel Jakub Dawidek sz->sz_inuse--; 2293650be51SPawel Jakub Dawidek } 2303650be51SPawel Jakub Dawidek 23106b215fdSAlexander Motin #define g_raid3_xor(src, dst, size) \ 23206b215fdSAlexander Motin _g_raid3_xor((uint64_t *)(src), \ 2332d1661a5SPawel Jakub Dawidek (uint64_t *)(dst), (size_t)size) 2342d1661a5SPawel Jakub Dawidek static void 23506b215fdSAlexander Motin _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 2362d1661a5SPawel Jakub Dawidek { 2372d1661a5SPawel Jakub Dawidek 2382d1661a5SPawel Jakub Dawidek KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 2392d1661a5SPawel Jakub Dawidek for (; size > 0; size -= 128) { 24006b215fdSAlexander Motin *dst++ ^= (*src++); 24106b215fdSAlexander Motin *dst++ ^= (*src++); 24206b215fdSAlexander Motin *dst++ ^= (*src++); 24306b215fdSAlexander Motin *dst++ ^= (*src++); 24406b215fdSAlexander Motin *dst++ ^= (*src++); 24506b215fdSAlexander Motin *dst++ ^= (*src++); 24606b215fdSAlexander Motin *dst++ ^= (*src++); 24706b215fdSAlexander Motin *dst++ ^= (*src++); 24806b215fdSAlexander Motin *dst++ ^= (*src++); 24906b215fdSAlexander Motin *dst++ ^= (*src++); 25006b215fdSAlexander Motin *dst++ ^= (*src++); 25106b215fdSAlexander Motin *dst++ ^= (*src++); 25206b215fdSAlexander Motin *dst++ ^= (*src++); 25306b215fdSAlexander Motin *dst++ ^= (*src++); 25406b215fdSAlexander Motin *dst++ ^= (*src++); 25506b215fdSAlexander Motin *dst++ ^= (*src++); 2562d1661a5SPawel Jakub Dawidek } 2572d1661a5SPawel Jakub Dawidek } 2582d1661a5SPawel Jakub Dawidek 259dba915cfSPawel Jakub Dawidek static int 260dba915cfSPawel Jakub Dawidek g_raid3_is_zero(struct bio *bp) 261dba915cfSPawel Jakub Dawidek { 262dba915cfSPawel Jakub Dawidek static const uint64_t zeros[] = { 263dba915cfSPawel Jakub Dawidek 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 264dba915cfSPawel Jakub Dawidek }; 265dba915cfSPawel Jakub Dawidek u_char *addr; 266dba915cfSPawel Jakub Dawidek ssize_t size; 267dba915cfSPawel Jakub Dawidek 268dba915cfSPawel Jakub Dawidek size = bp->bio_length; 269dba915cfSPawel Jakub Dawidek addr = (u_char *)bp->bio_data; 270dba915cfSPawel Jakub Dawidek for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 271dba915cfSPawel Jakub Dawidek if (bcmp(addr, zeros, sizeof(zeros)) != 0) 272dba915cfSPawel Jakub Dawidek return (0); 273dba915cfSPawel Jakub Dawidek } 274dba915cfSPawel Jakub Dawidek return (1); 275dba915cfSPawel Jakub Dawidek } 276dba915cfSPawel Jakub Dawidek 2772d1661a5SPawel Jakub Dawidek /* 2782d1661a5SPawel Jakub Dawidek * --- Events handling functions --- 2792d1661a5SPawel Jakub Dawidek * Events in geom_raid3 are used to maintain disks and device status 2802d1661a5SPawel Jakub Dawidek * from one thread to simplify locking. 2812d1661a5SPawel Jakub Dawidek */ 2822d1661a5SPawel Jakub Dawidek static void 2832d1661a5SPawel Jakub Dawidek g_raid3_event_free(struct g_raid3_event *ep) 2842d1661a5SPawel Jakub Dawidek { 2852d1661a5SPawel Jakub Dawidek 2862d1661a5SPawel Jakub Dawidek free(ep, M_RAID3); 2872d1661a5SPawel Jakub Dawidek } 2882d1661a5SPawel Jakub Dawidek 2892d1661a5SPawel Jakub Dawidek int 2902d1661a5SPawel Jakub Dawidek g_raid3_event_send(void *arg, int state, int flags) 2912d1661a5SPawel Jakub Dawidek { 2922d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 2932d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 2942d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 2952d1661a5SPawel Jakub Dawidek int error; 2962d1661a5SPawel Jakub Dawidek 2972d1661a5SPawel Jakub Dawidek ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 2982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 2992d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 3002d1661a5SPawel Jakub Dawidek disk = NULL; 3012d1661a5SPawel Jakub Dawidek sc = arg; 3022d1661a5SPawel Jakub Dawidek } else { 3032d1661a5SPawel Jakub Dawidek disk = arg; 3042d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3052d1661a5SPawel Jakub Dawidek } 3062d1661a5SPawel Jakub Dawidek ep->e_disk = disk; 3072d1661a5SPawel Jakub Dawidek ep->e_state = state; 3082d1661a5SPawel Jakub Dawidek ep->e_flags = flags; 3092d1661a5SPawel Jakub Dawidek ep->e_error = 0; 3102d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3112d1661a5SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 3122d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3132d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3142d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 3152d1661a5SPawel Jakub Dawidek wakeup(sc); 3162d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 3172d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 3182d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 3192d1661a5SPawel Jakub Dawidek return (0); 3203650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3212d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 3223650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3232d1661a5SPawel Jakub Dawidek while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 3242d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3252d1661a5SPawel Jakub Dawidek MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 3262d1661a5SPawel Jakub Dawidek hz * 5); 3272d1661a5SPawel Jakub Dawidek } 3282d1661a5SPawel Jakub Dawidek error = ep->e_error; 3292d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3303650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3312d1661a5SPawel Jakub Dawidek return (error); 3322d1661a5SPawel Jakub Dawidek } 3332d1661a5SPawel Jakub Dawidek 3342d1661a5SPawel Jakub Dawidek static struct g_raid3_event * 3352d1661a5SPawel Jakub Dawidek g_raid3_event_get(struct g_raid3_softc *sc) 3362d1661a5SPawel Jakub Dawidek { 3372d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 3382d1661a5SPawel Jakub Dawidek 3392d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3402d1661a5SPawel Jakub Dawidek ep = TAILQ_FIRST(&sc->sc_events); 3412d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3422d1661a5SPawel Jakub Dawidek return (ep); 3432d1661a5SPawel Jakub Dawidek } 3442d1661a5SPawel Jakub Dawidek 3452d1661a5SPawel Jakub Dawidek static void 346d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 347d97d5ee9SPawel Jakub Dawidek { 348d97d5ee9SPawel Jakub Dawidek 349d97d5ee9SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 350d97d5ee9SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 351d97d5ee9SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 352d97d5ee9SPawel Jakub Dawidek } 353d97d5ee9SPawel Jakub Dawidek 354d97d5ee9SPawel Jakub Dawidek static void 3552d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(struct g_raid3_disk *disk) 3562d1661a5SPawel Jakub Dawidek { 3572d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3582d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep, *tmpep; 3592d1661a5SPawel Jakub Dawidek 3602d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3613650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3623650be51SPawel Jakub Dawidek 3632d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3642d1661a5SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 3652d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 3662d1661a5SPawel Jakub Dawidek continue; 3672d1661a5SPawel Jakub Dawidek if (ep->e_disk != disk) 3682d1661a5SPawel Jakub Dawidek continue; 3692d1661a5SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 3702d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 3712d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3722d1661a5SPawel Jakub Dawidek else { 3732d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 3742d1661a5SPawel Jakub Dawidek wakeup(ep); 3752d1661a5SPawel Jakub Dawidek } 3762d1661a5SPawel Jakub Dawidek } 3772d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3782d1661a5SPawel Jakub Dawidek } 3792d1661a5SPawel Jakub Dawidek 3802d1661a5SPawel Jakub Dawidek /* 3812d1661a5SPawel Jakub Dawidek * Return the number of disks in the given state. 3822d1661a5SPawel Jakub Dawidek * If state is equal to -1, count all connected disks. 3832d1661a5SPawel Jakub Dawidek */ 3842d1661a5SPawel Jakub Dawidek u_int 3852d1661a5SPawel Jakub Dawidek g_raid3_ndisks(struct g_raid3_softc *sc, int state) 3862d1661a5SPawel Jakub Dawidek { 3872d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 388fa6a7837SDavid E. O'Brien u_int n, ndisks; 3892d1661a5SPawel Jakub Dawidek 3903650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 3913650be51SPawel Jakub Dawidek 392fa6a7837SDavid E. O'Brien for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 3932d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 3942d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 3952d1661a5SPawel Jakub Dawidek continue; 3962d1661a5SPawel Jakub Dawidek if (state == -1 || disk->d_state == state) 3972d1661a5SPawel Jakub Dawidek ndisks++; 3982d1661a5SPawel Jakub Dawidek } 3992d1661a5SPawel Jakub Dawidek return (ndisks); 4002d1661a5SPawel Jakub Dawidek } 4012d1661a5SPawel Jakub Dawidek 4022d1661a5SPawel Jakub Dawidek static u_int 4032d1661a5SPawel Jakub Dawidek g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 4042d1661a5SPawel Jakub Dawidek { 4052d1661a5SPawel Jakub Dawidek struct bio *bp; 4062d1661a5SPawel Jakub Dawidek u_int nreqs = 0; 4072d1661a5SPawel Jakub Dawidek 4082d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 4092d1661a5SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 4102d1661a5SPawel Jakub Dawidek if (bp->bio_from == cp) 4112d1661a5SPawel Jakub Dawidek nreqs++; 4122d1661a5SPawel Jakub Dawidek } 4132d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 4142d1661a5SPawel Jakub Dawidek return (nreqs); 4152d1661a5SPawel Jakub Dawidek } 4162d1661a5SPawel Jakub Dawidek 4172d1661a5SPawel Jakub Dawidek static int 4182d1661a5SPawel Jakub Dawidek g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 4192d1661a5SPawel Jakub Dawidek { 4202d1661a5SPawel Jakub Dawidek 42179e61493SPawel Jakub Dawidek if (cp->index > 0) { 4222d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4232d1661a5SPawel Jakub Dawidek "I/O requests for %s exist, can't destroy it now.", 4242d1661a5SPawel Jakub Dawidek cp->provider->name); 4252d1661a5SPawel Jakub Dawidek return (1); 4262d1661a5SPawel Jakub Dawidek } 4272d1661a5SPawel Jakub Dawidek if (g_raid3_nrequests(sc, cp) > 0) { 4282d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4292d1661a5SPawel Jakub Dawidek "I/O requests for %s in queue, can't destroy it now.", 4302d1661a5SPawel Jakub Dawidek cp->provider->name); 4312d1661a5SPawel Jakub Dawidek return (1); 4322d1661a5SPawel Jakub Dawidek } 4332d1661a5SPawel Jakub Dawidek return (0); 4342d1661a5SPawel Jakub Dawidek } 4352d1661a5SPawel Jakub Dawidek 4362d1661a5SPawel Jakub Dawidek static void 437d97d5ee9SPawel Jakub Dawidek g_raid3_destroy_consumer(void *arg, int flags __unused) 438d97d5ee9SPawel Jakub Dawidek { 439d97d5ee9SPawel Jakub Dawidek struct g_consumer *cp; 440d97d5ee9SPawel Jakub Dawidek 4413650be51SPawel Jakub Dawidek g_topology_assert(); 4423650be51SPawel Jakub Dawidek 443d97d5ee9SPawel Jakub Dawidek cp = arg; 444d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 445d97d5ee9SPawel Jakub Dawidek g_detach(cp); 446d97d5ee9SPawel Jakub Dawidek g_destroy_consumer(cp); 447d97d5ee9SPawel Jakub Dawidek } 448d97d5ee9SPawel Jakub Dawidek 449d97d5ee9SPawel Jakub Dawidek static void 4502d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 4512d1661a5SPawel Jakub Dawidek { 452d97d5ee9SPawel Jakub Dawidek struct g_provider *pp; 453d97d5ee9SPawel Jakub Dawidek int retaste_wait; 4542d1661a5SPawel Jakub Dawidek 4552d1661a5SPawel Jakub Dawidek g_topology_assert(); 4562d1661a5SPawel Jakub Dawidek 4572d1661a5SPawel Jakub Dawidek cp->private = NULL; 4582d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 4592d1661a5SPawel Jakub Dawidek return; 4602d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 461d97d5ee9SPawel Jakub Dawidek pp = cp->provider; 462d97d5ee9SPawel Jakub Dawidek retaste_wait = 0; 463d97d5ee9SPawel Jakub Dawidek if (cp->acw == 1) { 464d97d5ee9SPawel Jakub Dawidek if ((pp->geom->flags & G_GEOM_WITHER) == 0) 465d97d5ee9SPawel Jakub Dawidek retaste_wait = 1; 466d97d5ee9SPawel Jakub Dawidek } 467d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 468d97d5ee9SPawel Jakub Dawidek -cp->acw, -cp->ace, 0); 469d97d5ee9SPawel Jakub Dawidek if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 470d97d5ee9SPawel Jakub Dawidek g_access(cp, -cp->acr, -cp->acw, -cp->ace); 471d97d5ee9SPawel Jakub Dawidek if (retaste_wait) { 472d97d5ee9SPawel Jakub Dawidek /* 473d97d5ee9SPawel Jakub Dawidek * After retaste event was send (inside g_access()), we can send 474d97d5ee9SPawel Jakub Dawidek * event to detach and destroy consumer. 475d97d5ee9SPawel Jakub Dawidek * A class, which has consumer to the given provider connected 476d97d5ee9SPawel Jakub Dawidek * will not receive retaste event for the provider. 477d97d5ee9SPawel Jakub Dawidek * This is the way how I ignore retaste events when I close 478d97d5ee9SPawel Jakub Dawidek * consumers opened for write: I detach and destroy consumer 479d97d5ee9SPawel Jakub Dawidek * after retaste event is sent. 480d97d5ee9SPawel Jakub Dawidek */ 481d97d5ee9SPawel Jakub Dawidek g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 482d97d5ee9SPawel Jakub Dawidek return; 483d97d5ee9SPawel Jakub Dawidek } 484d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 4852d1661a5SPawel Jakub Dawidek g_detach(cp); 4862d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 4872d1661a5SPawel Jakub Dawidek } 4882d1661a5SPawel Jakub Dawidek 4892d1661a5SPawel Jakub Dawidek static int 4902d1661a5SPawel Jakub Dawidek g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 4912d1661a5SPawel Jakub Dawidek { 49234cb1517SPawel Jakub Dawidek struct g_consumer *cp; 4932d1661a5SPawel Jakub Dawidek int error; 4942d1661a5SPawel Jakub Dawidek 4953650be51SPawel Jakub Dawidek g_topology_assert_not(); 4962d1661a5SPawel Jakub Dawidek KASSERT(disk->d_consumer == NULL, 4972d1661a5SPawel Jakub Dawidek ("Disk already connected (device %s).", disk->d_softc->sc_name)); 4982d1661a5SPawel Jakub Dawidek 4993650be51SPawel Jakub Dawidek g_topology_lock(); 50034cb1517SPawel Jakub Dawidek cp = g_new_consumer(disk->d_softc->sc_geom); 50134cb1517SPawel Jakub Dawidek error = g_attach(cp, pp); 502d97d5ee9SPawel Jakub Dawidek if (error != 0) { 50334cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 5043650be51SPawel Jakub Dawidek g_topology_unlock(); 50534cb1517SPawel Jakub Dawidek return (error); 50634cb1517SPawel Jakub Dawidek } 50734cb1517SPawel Jakub Dawidek error = g_access(cp, 1, 1, 1); 5083650be51SPawel Jakub Dawidek g_topology_unlock(); 50934cb1517SPawel Jakub Dawidek if (error != 0) { 51034cb1517SPawel Jakub Dawidek g_detach(cp); 51134cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 512d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 513d97d5ee9SPawel Jakub Dawidek pp->name, error); 514d97d5ee9SPawel Jakub Dawidek return (error); 515d97d5ee9SPawel Jakub Dawidek } 51634cb1517SPawel Jakub Dawidek disk->d_consumer = cp; 51734cb1517SPawel Jakub Dawidek disk->d_consumer->private = disk; 51834cb1517SPawel Jakub Dawidek disk->d_consumer->index = 0; 5192d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 5202d1661a5SPawel Jakub Dawidek return (0); 5212d1661a5SPawel Jakub Dawidek } 5222d1661a5SPawel Jakub Dawidek 5232d1661a5SPawel Jakub Dawidek static void 5242d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 5252d1661a5SPawel Jakub Dawidek { 5262d1661a5SPawel Jakub Dawidek 5272d1661a5SPawel Jakub Dawidek g_topology_assert(); 5282d1661a5SPawel Jakub Dawidek 5292d1661a5SPawel Jakub Dawidek if (cp == NULL) 5302d1661a5SPawel Jakub Dawidek return; 531d97d5ee9SPawel Jakub Dawidek if (cp->provider != NULL) 5322d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 533d97d5ee9SPawel Jakub Dawidek else 5342d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 5352d1661a5SPawel Jakub Dawidek } 5362d1661a5SPawel Jakub Dawidek 5372d1661a5SPawel Jakub Dawidek /* 5382d1661a5SPawel Jakub Dawidek * Initialize disk. This means allocate memory, create consumer, attach it 5392d1661a5SPawel Jakub Dawidek * to the provider and open access (r1w1e1) to it. 5402d1661a5SPawel Jakub Dawidek */ 5412d1661a5SPawel Jakub Dawidek static struct g_raid3_disk * 5422d1661a5SPawel Jakub Dawidek g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 5432d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md, int *errorp) 5442d1661a5SPawel Jakub Dawidek { 5452d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 5462d1661a5SPawel Jakub Dawidek int error; 5472d1661a5SPawel Jakub Dawidek 5482d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[md->md_no]; 5492d1661a5SPawel Jakub Dawidek error = g_raid3_connect_disk(disk, pp); 55034cb1517SPawel Jakub Dawidek if (error != 0) { 55134cb1517SPawel Jakub Dawidek if (errorp != NULL) 55234cb1517SPawel Jakub Dawidek *errorp = error; 55334cb1517SPawel Jakub Dawidek return (NULL); 55434cb1517SPawel Jakub Dawidek } 5552d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NONE; 5562d1661a5SPawel Jakub Dawidek disk->d_flags = md->md_dflags; 5572d1661a5SPawel Jakub Dawidek if (md->md_provider[0] != '\0') 5582d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 5592d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 5602d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = md->md_sync_offset; 5612d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = md->md_sync_offset; 562a245a548SPawel Jakub Dawidek disk->d_genid = md->md_genid; 5632d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = md->md_syncid; 5642d1661a5SPawel Jakub Dawidek if (errorp != NULL) 5652d1661a5SPawel Jakub Dawidek *errorp = 0; 5662d1661a5SPawel Jakub Dawidek return (disk); 5672d1661a5SPawel Jakub Dawidek } 5682d1661a5SPawel Jakub Dawidek 5692d1661a5SPawel Jakub Dawidek static void 5702d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(struct g_raid3_disk *disk) 5712d1661a5SPawel Jakub Dawidek { 5722d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 5732d1661a5SPawel Jakub Dawidek 5743650be51SPawel Jakub Dawidek g_topology_assert_not(); 5753650be51SPawel Jakub Dawidek sc = disk->d_softc; 5763650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 5772d1661a5SPawel Jakub Dawidek 5782d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 5792d1661a5SPawel Jakub Dawidek return; 5802d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(disk); 5812d1661a5SPawel Jakub Dawidek switch (disk->d_state) { 5822d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 5832d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 5842d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 5852d1661a5SPawel Jakub Dawidek /* FALLTHROUGH */ 5862d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 5872d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 5882d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 5893650be51SPawel Jakub Dawidek g_topology_lock(); 5902d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, disk->d_consumer); 5913650be51SPawel Jakub Dawidek g_topology_unlock(); 5922d1661a5SPawel Jakub Dawidek disk->d_consumer = NULL; 5932d1661a5SPawel Jakub Dawidek break; 5942d1661a5SPawel Jakub Dawidek default: 5952d1661a5SPawel Jakub Dawidek KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 5962d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 5972d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 5982d1661a5SPawel Jakub Dawidek } 5992d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NODISK; 6002d1661a5SPawel Jakub Dawidek } 6012d1661a5SPawel Jakub Dawidek 6022d1661a5SPawel Jakub Dawidek static void 6032d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(struct g_raid3_softc *sc) 6042d1661a5SPawel Jakub Dawidek { 6052d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 6069da3072cSPawel Jakub Dawidek struct g_raid3_disk *disk; 6072d1661a5SPawel Jakub Dawidek struct g_geom *gp; 6082d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6092d1661a5SPawel Jakub Dawidek u_int n; 6102d1661a5SPawel Jakub Dawidek 6113650be51SPawel Jakub Dawidek g_topology_assert_not(); 6123650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 6132d1661a5SPawel Jakub Dawidek 6142d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 6152d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 6162d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 6179da3072cSPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 6189da3072cSPawel Jakub Dawidek disk = &sc->sc_disks[n]; 619d97d5ee9SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 6209da3072cSPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 6219da3072cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 6229da3072cSPawel Jakub Dawidek g_raid3_destroy_disk(disk); 6239da3072cSPawel Jakub Dawidek } 624d97d5ee9SPawel Jakub Dawidek } 6252d1661a5SPawel Jakub Dawidek while ((ep = g_raid3_event_get(sc)) != NULL) { 626d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 6272d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 6282d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 6292d1661a5SPawel Jakub Dawidek else { 6302d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 6312d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 6322d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 6332d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 6342d1661a5SPawel Jakub Dawidek wakeup(ep); 6352d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 6362d1661a5SPawel Jakub Dawidek } 6372d1661a5SPawel Jakub Dawidek } 6382d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 6392d1661a5SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 6403650be51SPawel Jakub Dawidek g_topology_lock(); 6412d1661a5SPawel Jakub Dawidek if (cp != NULL) 6422d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, cp); 6432d1661a5SPawel Jakub Dawidek g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 6442d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 6452d1661a5SPawel Jakub Dawidek g_wither_geom(gp, ENXIO); 6463650be51SPawel Jakub Dawidek g_topology_unlock(); 647ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 6483650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 6493650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 6503650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 651ed940a82SPawel Jakub Dawidek } 6523650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 6533650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 6543650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 6553650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 6562d1661a5SPawel Jakub Dawidek } 6572d1661a5SPawel Jakub Dawidek 6582d1661a5SPawel Jakub Dawidek static void 6592d1661a5SPawel Jakub Dawidek g_raid3_orphan(struct g_consumer *cp) 6602d1661a5SPawel Jakub Dawidek { 6612d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 6622d1661a5SPawel Jakub Dawidek 6632d1661a5SPawel Jakub Dawidek g_topology_assert(); 6642d1661a5SPawel Jakub Dawidek 6652d1661a5SPawel Jakub Dawidek disk = cp->private; 6662d1661a5SPawel Jakub Dawidek if (disk == NULL) 6672d1661a5SPawel Jakub Dawidek return; 668ea973705SPawel Jakub Dawidek disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 6692d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 6702d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 6712d1661a5SPawel Jakub Dawidek } 6722d1661a5SPawel Jakub Dawidek 6732d1661a5SPawel Jakub Dawidek static int 6742d1661a5SPawel Jakub Dawidek g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 6752d1661a5SPawel Jakub Dawidek { 6762d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 6772d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6782d1661a5SPawel Jakub Dawidek off_t offset, length; 6792d1661a5SPawel Jakub Dawidek u_char *sector; 680d97d5ee9SPawel Jakub Dawidek int error = 0; 6812d1661a5SPawel Jakub Dawidek 6823650be51SPawel Jakub Dawidek g_topology_assert_not(); 6832d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 6843650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 6853650be51SPawel Jakub Dawidek 6862d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 6872d1661a5SPawel Jakub Dawidek KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 6882d1661a5SPawel Jakub Dawidek KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 6893650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 690d97d5ee9SPawel Jakub Dawidek ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 691d97d5ee9SPawel Jakub Dawidek cp->acw, cp->ace)); 6922d1661a5SPawel Jakub Dawidek length = cp->provider->sectorsize; 6932d1661a5SPawel Jakub Dawidek offset = cp->provider->mediasize - length; 6942d1661a5SPawel Jakub Dawidek sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 6952d1661a5SPawel Jakub Dawidek if (md != NULL) 6962d1661a5SPawel Jakub Dawidek raid3_metadata_encode(md, sector); 6972d1661a5SPawel Jakub Dawidek error = g_write_data(cp, offset, sector, length); 6982d1661a5SPawel Jakub Dawidek free(sector, M_RAID3); 6992d1661a5SPawel Jakub Dawidek if (error != 0) { 7003aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 7013aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot write metadata on %s " 7023aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7033aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7043aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 7053aae74ecSPawel Jakub Dawidek } else { 7063aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot write metadata on %s " 7073aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7083aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7093aae74ecSPawel Jakub Dawidek } 7103aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 7113aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 7123aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 7133aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 7143aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 7152d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 7162d1661a5SPawel Jakub Dawidek } 7173aae74ecSPawel Jakub Dawidek } 7182d1661a5SPawel Jakub Dawidek return (error); 7192d1661a5SPawel Jakub Dawidek } 7202d1661a5SPawel Jakub Dawidek 7212d1661a5SPawel Jakub Dawidek int 7222d1661a5SPawel Jakub Dawidek g_raid3_clear_metadata(struct g_raid3_disk *disk) 7232d1661a5SPawel Jakub Dawidek { 7242d1661a5SPawel Jakub Dawidek int error; 7252d1661a5SPawel Jakub Dawidek 7263650be51SPawel Jakub Dawidek g_topology_assert_not(); 7273650be51SPawel Jakub Dawidek sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 7283650be51SPawel Jakub Dawidek 7292d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, NULL); 7302d1661a5SPawel Jakub Dawidek if (error == 0) { 7312d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s cleared.", 7322d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7332d1661a5SPawel Jakub Dawidek } else { 7342d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7352d1661a5SPawel Jakub Dawidek "Cannot clear metadata on disk %s (error=%d).", 7362d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7372d1661a5SPawel Jakub Dawidek } 7382d1661a5SPawel Jakub Dawidek return (error); 7392d1661a5SPawel Jakub Dawidek } 7402d1661a5SPawel Jakub Dawidek 7412d1661a5SPawel Jakub Dawidek void 7422d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 7432d1661a5SPawel Jakub Dawidek { 7442d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 745e6890985SPawel Jakub Dawidek struct g_provider *pp; 7462d1661a5SPawel Jakub Dawidek 7472d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 7482d1661a5SPawel Jakub Dawidek strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 7492d1661a5SPawel Jakub Dawidek md->md_version = G_RAID3_VERSION; 7502d1661a5SPawel Jakub Dawidek strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 7512d1661a5SPawel Jakub Dawidek md->md_id = sc->sc_id; 7522d1661a5SPawel Jakub Dawidek md->md_all = sc->sc_ndisks; 753a245a548SPawel Jakub Dawidek md->md_genid = sc->sc_genid; 7542d1661a5SPawel Jakub Dawidek md->md_mediasize = sc->sc_mediasize; 7552d1661a5SPawel Jakub Dawidek md->md_sectorsize = sc->sc_sectorsize; 7562d1661a5SPawel Jakub Dawidek md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 7572d1661a5SPawel Jakub Dawidek md->md_no = disk->d_no; 7582d1661a5SPawel Jakub Dawidek md->md_syncid = disk->d_sync.ds_syncid; 7592d1661a5SPawel Jakub Dawidek md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 760c082905bSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 7612d1661a5SPawel Jakub Dawidek md->md_sync_offset = 0; 762c082905bSPawel Jakub Dawidek else { 763c082905bSPawel Jakub Dawidek md->md_sync_offset = 764c082905bSPawel Jakub Dawidek disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 765c082905bSPawel Jakub Dawidek } 766e6890985SPawel Jakub Dawidek if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 767e6890985SPawel Jakub Dawidek pp = disk->d_consumer->provider; 768e6890985SPawel Jakub Dawidek else 769e6890985SPawel Jakub Dawidek pp = NULL; 770e6890985SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 771e6890985SPawel Jakub Dawidek strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 772e6890985SPawel Jakub Dawidek else 7732d1661a5SPawel Jakub Dawidek bzero(md->md_provider, sizeof(md->md_provider)); 774e6890985SPawel Jakub Dawidek if (pp != NULL) 775e6890985SPawel Jakub Dawidek md->md_provsize = pp->mediasize; 776e6890985SPawel Jakub Dawidek else 777e6890985SPawel Jakub Dawidek md->md_provsize = 0; 7782d1661a5SPawel Jakub Dawidek } 7792d1661a5SPawel Jakub Dawidek 7802d1661a5SPawel Jakub Dawidek void 7812d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(struct g_raid3_disk *disk) 7822d1661a5SPawel Jakub Dawidek { 7833650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 7842d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 7852d1661a5SPawel Jakub Dawidek int error; 7862d1661a5SPawel Jakub Dawidek 7873650be51SPawel Jakub Dawidek g_topology_assert_not(); 7883650be51SPawel Jakub Dawidek sc = disk->d_softc; 7893650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 7903650be51SPawel Jakub Dawidek 7912d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(disk, &md); 7922d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, &md); 7932d1661a5SPawel Jakub Dawidek if (error == 0) { 7942d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s updated.", 7952d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7962d1661a5SPawel Jakub Dawidek } else { 7972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7982d1661a5SPawel Jakub Dawidek "Cannot update metadata on disk %s (error=%d).", 7992d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 8002d1661a5SPawel Jakub Dawidek } 8012d1661a5SPawel Jakub Dawidek } 8022d1661a5SPawel Jakub Dawidek 8032d1661a5SPawel Jakub Dawidek static void 804d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(struct g_raid3_softc *sc) 8052d1661a5SPawel Jakub Dawidek { 8062d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 8072d1661a5SPawel Jakub Dawidek u_int n; 8082d1661a5SPawel Jakub Dawidek 8093650be51SPawel Jakub Dawidek g_topology_assert_not(); 8103650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8112d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 8122d1661a5SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 8132d1661a5SPawel Jakub Dawidek sc->sc_name)); 8142d1661a5SPawel Jakub Dawidek 8152d1661a5SPawel Jakub Dawidek sc->sc_syncid++; 816a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 817a245a548SPawel Jakub Dawidek sc->sc_syncid); 8182d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 8192d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 8202d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 8212d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 8222d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 8232d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8242d1661a5SPawel Jakub Dawidek } 8252d1661a5SPawel Jakub Dawidek } 8262d1661a5SPawel Jakub Dawidek } 8272d1661a5SPawel Jakub Dawidek 8284d006a98SPawel Jakub Dawidek static void 829a245a548SPawel Jakub Dawidek g_raid3_bump_genid(struct g_raid3_softc *sc) 830a245a548SPawel Jakub Dawidek { 831a245a548SPawel Jakub Dawidek struct g_raid3_disk *disk; 832a245a548SPawel Jakub Dawidek u_int n; 833a245a548SPawel Jakub Dawidek 8343650be51SPawel Jakub Dawidek g_topology_assert_not(); 8353650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 836a245a548SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 837a245a548SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 838a245a548SPawel Jakub Dawidek sc->sc_name)); 839a245a548SPawel Jakub Dawidek 840a245a548SPawel Jakub Dawidek sc->sc_genid++; 841a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 842a245a548SPawel Jakub Dawidek sc->sc_genid); 843a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 844a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 845a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 846a245a548SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 847a245a548SPawel Jakub Dawidek disk->d_genid = sc->sc_genid; 848a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 849a245a548SPawel Jakub Dawidek } 850a245a548SPawel Jakub Dawidek } 851a245a548SPawel Jakub Dawidek } 852a245a548SPawel Jakub Dawidek 8530962f942SPawel Jakub Dawidek static int 8543650be51SPawel Jakub Dawidek g_raid3_idle(struct g_raid3_softc *sc, int acw) 8554d006a98SPawel Jakub Dawidek { 8564d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8574d006a98SPawel Jakub Dawidek u_int i; 8580962f942SPawel Jakub Dawidek int timeout; 8594d006a98SPawel Jakub Dawidek 8603650be51SPawel Jakub Dawidek g_topology_assert_not(); 8613650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8623650be51SPawel Jakub Dawidek 8630962f942SPawel Jakub Dawidek if (sc->sc_provider == NULL) 8640962f942SPawel Jakub Dawidek return (0); 865501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 866501250baSPawel Jakub Dawidek return (0); 8670962f942SPawel Jakub Dawidek if (sc->sc_idle) 8680962f942SPawel Jakub Dawidek return (0); 8690962f942SPawel Jakub Dawidek if (sc->sc_writes > 0) 8700962f942SPawel Jakub Dawidek return (0); 8713650be51SPawel Jakub Dawidek if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 87201f1f41cSPawel Jakub Dawidek timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 873f62c1a47SAlexander Motin if (!g_raid3_shutdown && timeout > 0) 8740962f942SPawel Jakub Dawidek return (timeout); 8750962f942SPawel Jakub Dawidek } 8764d006a98SPawel Jakub Dawidek sc->sc_idle = 1; 8774d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 8784d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 8794d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 8804d006a98SPawel Jakub Dawidek continue; 8814d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 8824d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 8834d006a98SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 8844d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8854d006a98SPawel Jakub Dawidek } 8860962f942SPawel Jakub Dawidek return (0); 8874d006a98SPawel Jakub Dawidek } 8884d006a98SPawel Jakub Dawidek 8894d006a98SPawel Jakub Dawidek static void 8904d006a98SPawel Jakub Dawidek g_raid3_unidle(struct g_raid3_softc *sc) 8914d006a98SPawel Jakub Dawidek { 8924d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8934d006a98SPawel Jakub Dawidek u_int i; 8944d006a98SPawel Jakub Dawidek 8953650be51SPawel Jakub Dawidek g_topology_assert_not(); 8963650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8973650be51SPawel Jakub Dawidek 898501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 899501250baSPawel Jakub Dawidek return; 9004d006a98SPawel Jakub Dawidek sc->sc_idle = 0; 90101f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 9024d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 9034d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 9044d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 9054d006a98SPawel Jakub Dawidek continue; 9064d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 9074d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 9084d006a98SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 9094d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 9104d006a98SPawel Jakub Dawidek } 9114d006a98SPawel Jakub Dawidek } 9124d006a98SPawel Jakub Dawidek 9132d1661a5SPawel Jakub Dawidek /* 9142d1661a5SPawel Jakub Dawidek * Treat bio_driver1 field in parent bio as list head and field bio_caller1 9152d1661a5SPawel Jakub Dawidek * in child bio as pointer to the next element on the list. 9162d1661a5SPawel Jakub Dawidek */ 9172d1661a5SPawel Jakub Dawidek #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 9182d1661a5SPawel Jakub Dawidek 9192d1661a5SPawel Jakub Dawidek #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 9202d1661a5SPawel Jakub Dawidek 9212d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_BIO(pbp, bp) \ 9222d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 9232d1661a5SPawel Jakub Dawidek (bp) = G_RAID3_NEXT_BIO(bp)) 9242d1661a5SPawel Jakub Dawidek 9252d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 9262d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 9272d1661a5SPawel Jakub Dawidek (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 9282d1661a5SPawel Jakub Dawidek (bp) = (tmpbp)) 9292d1661a5SPawel Jakub Dawidek 9302d1661a5SPawel Jakub Dawidek static void 9312d1661a5SPawel Jakub Dawidek g_raid3_init_bio(struct bio *pbp) 9322d1661a5SPawel Jakub Dawidek { 9332d1661a5SPawel Jakub Dawidek 9342d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = NULL; 9352d1661a5SPawel Jakub Dawidek } 9362d1661a5SPawel Jakub Dawidek 9372d1661a5SPawel Jakub Dawidek static void 938dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(struct bio *cbp) 939dba915cfSPawel Jakub Dawidek { 940dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 941dba915cfSPawel Jakub Dawidek 942dba915cfSPawel Jakub Dawidek pbp = cbp->bio_parent; 943dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) 944dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 945dba915cfSPawel Jakub Dawidek else { 946dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 947dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) { 948dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 949dba915cfSPawel Jakub Dawidek break; 950dba915cfSPawel Jakub Dawidek } 951dba915cfSPawel Jakub Dawidek } 952dba915cfSPawel Jakub Dawidek } 953dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 954dba915cfSPawel Jakub Dawidek } 955dba915cfSPawel Jakub Dawidek 956dba915cfSPawel Jakub Dawidek static void 957dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 958dba915cfSPawel Jakub Dawidek { 959dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 960dba915cfSPawel Jakub Dawidek 961dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(sbp); 962dba915cfSPawel Jakub Dawidek pbp = dbp->bio_parent; 963dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 964dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == dbp) 965dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = sbp; 966dba915cfSPawel Jakub Dawidek else { 967dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 968dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == dbp) { 969dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = sbp; 970dba915cfSPawel Jakub Dawidek break; 971dba915cfSPawel Jakub Dawidek } 972dba915cfSPawel Jakub Dawidek } 973dba915cfSPawel Jakub Dawidek } 974dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(dbp) = NULL; 975dba915cfSPawel Jakub Dawidek } 976dba915cfSPawel Jakub Dawidek 977dba915cfSPawel Jakub Dawidek static void 9782d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 9792d1661a5SPawel Jakub Dawidek { 9802d1661a5SPawel Jakub Dawidek struct bio *bp, *pbp; 9812d1661a5SPawel Jakub Dawidek size_t size; 9822d1661a5SPawel Jakub Dawidek 9832d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 9842d1661a5SPawel Jakub Dawidek pbp->bio_children--; 9852d1661a5SPawel Jakub Dawidek KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 9862d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 987ed940a82SPawel Jakub Dawidek g_raid3_free(sc, cbp->bio_data, size); 9882d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) { 9892d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 9902d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 9912d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 9922d1661a5SPawel Jakub Dawidek } else { 9932d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 9942d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) 9952d1661a5SPawel Jakub Dawidek break; 9962d1661a5SPawel Jakub Dawidek } 997dba915cfSPawel Jakub Dawidek if (bp != NULL) { 998dba915cfSPawel Jakub Dawidek KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 999dba915cfSPawel Jakub Dawidek ("NULL bp->bio_driver1")); 10002d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 10012d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 1002dba915cfSPawel Jakub Dawidek } 10032d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10042d1661a5SPawel Jakub Dawidek } 10052d1661a5SPawel Jakub Dawidek } 10062d1661a5SPawel Jakub Dawidek 10072d1661a5SPawel Jakub Dawidek static struct bio * 10082d1661a5SPawel Jakub Dawidek g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 10092d1661a5SPawel Jakub Dawidek { 10102d1661a5SPawel Jakub Dawidek struct bio *bp, *cbp; 10112d1661a5SPawel Jakub Dawidek size_t size; 10123650be51SPawel Jakub Dawidek int memflag; 10132d1661a5SPawel Jakub Dawidek 10142d1661a5SPawel Jakub Dawidek cbp = g_clone_bio(pbp); 10152d1661a5SPawel Jakub Dawidek if (cbp == NULL) 10162d1661a5SPawel Jakub Dawidek return (NULL); 10172d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 10183650be51SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 10193650be51SPawel Jakub Dawidek memflag = M_WAITOK; 10202d1661a5SPawel Jakub Dawidek else 10213650be51SPawel Jakub Dawidek memflag = M_NOWAIT; 1022ed940a82SPawel Jakub Dawidek cbp->bio_data = g_raid3_alloc(sc, size, memflag); 10233650be51SPawel Jakub Dawidek if (cbp->bio_data == NULL) { 10242d1661a5SPawel Jakub Dawidek pbp->bio_children--; 10252d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10262d1661a5SPawel Jakub Dawidek return (NULL); 10272d1661a5SPawel Jakub Dawidek } 10282d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 10292d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == NULL) 10302d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = cbp; 10312d1661a5SPawel Jakub Dawidek else { 10322d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 10332d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == NULL) { 10342d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = cbp; 10352d1661a5SPawel Jakub Dawidek break; 10362d1661a5SPawel Jakub Dawidek } 10372d1661a5SPawel Jakub Dawidek } 10382d1661a5SPawel Jakub Dawidek } 10392d1661a5SPawel Jakub Dawidek return (cbp); 10402d1661a5SPawel Jakub Dawidek } 10412d1661a5SPawel Jakub Dawidek 10422d1661a5SPawel Jakub Dawidek static void 10432d1661a5SPawel Jakub Dawidek g_raid3_scatter(struct bio *pbp) 10442d1661a5SPawel Jakub Dawidek { 10452d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 10462d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1047ee40c7aaSPawel Jakub Dawidek struct bio *bp, *cbp, *tmpbp; 10482d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 104906b215fdSAlexander Motin int first; 10502d1661a5SPawel Jakub Dawidek 10512d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 10522d1661a5SPawel Jakub Dawidek bp = NULL; 10532d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10542d1661a5SPawel Jakub Dawidek /* 10552d1661a5SPawel Jakub Dawidek * Find bio for which we should calculate data. 10562d1661a5SPawel Jakub Dawidek */ 10572d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10582d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 10592d1661a5SPawel Jakub Dawidek bp = cbp; 10602d1661a5SPawel Jakub Dawidek break; 10612d1661a5SPawel Jakub Dawidek } 10622d1661a5SPawel Jakub Dawidek } 10632d1661a5SPawel Jakub Dawidek KASSERT(bp != NULL, ("NULL parity bio.")); 10642d1661a5SPawel Jakub Dawidek } 10652d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 10662d1661a5SPawel Jakub Dawidek cadd = padd = 0; 10672d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 10682d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10692d1661a5SPawel Jakub Dawidek if (cbp == bp) 10702d1661a5SPawel Jakub Dawidek continue; 10712d1661a5SPawel Jakub Dawidek bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 10722d1661a5SPawel Jakub Dawidek padd += atom; 10732d1661a5SPawel Jakub Dawidek } 10742d1661a5SPawel Jakub Dawidek cadd += atom; 10752d1661a5SPawel Jakub Dawidek } 10762d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10772d1661a5SPawel Jakub Dawidek /* 10782d1661a5SPawel Jakub Dawidek * Calculate parity. 10792d1661a5SPawel Jakub Dawidek */ 108006b215fdSAlexander Motin first = 1; 10812d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10822d1661a5SPawel Jakub Dawidek if (cbp == bp) 10832d1661a5SPawel Jakub Dawidek continue; 108406b215fdSAlexander Motin if (first) { 108506b215fdSAlexander Motin bcopy(cbp->bio_data, bp->bio_data, 10862d1661a5SPawel Jakub Dawidek bp->bio_length); 108706b215fdSAlexander Motin first = 0; 108806b215fdSAlexander Motin } else { 108906b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, bp->bio_data, 109006b215fdSAlexander Motin bp->bio_length); 109106b215fdSAlexander Motin } 10922d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 10932d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 10942d1661a5SPawel Jakub Dawidek } 10952d1661a5SPawel Jakub Dawidek } 1096ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10972d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 10982d1661a5SPawel Jakub Dawidek 10992d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 11002d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 11012d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 11022d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 11033650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1104d97d5ee9SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1105d97d5ee9SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 110679e61493SPawel Jakub Dawidek cp->index++; 11070962f942SPawel Jakub Dawidek sc->sc_writes++; 11082d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 11092d1661a5SPawel Jakub Dawidek } 11102d1661a5SPawel Jakub Dawidek } 11112d1661a5SPawel Jakub Dawidek 11122d1661a5SPawel Jakub Dawidek static void 11132d1661a5SPawel Jakub Dawidek g_raid3_gather(struct bio *pbp) 11142d1661a5SPawel Jakub Dawidek { 11152d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 11162d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1117f5a2f7feSPawel Jakub Dawidek struct bio *xbp, *fbp, *cbp; 11182d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 11192d1661a5SPawel Jakub Dawidek 11202d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 11212d1661a5SPawel Jakub Dawidek /* 1122f5a2f7feSPawel Jakub Dawidek * Find bio for which we have to calculate data. 11232d1661a5SPawel Jakub Dawidek * While going through this path, check if all requests 11242d1661a5SPawel Jakub Dawidek * succeeded, if not, deny whole request. 1125f5a2f7feSPawel Jakub Dawidek * If we're in COMPLETE mode, we allow one request to fail, 1126f5a2f7feSPawel Jakub Dawidek * so if we find one, we're sending it to the parity consumer. 1127f5a2f7feSPawel Jakub Dawidek * If there are more failed requests, we deny whole request. 11282d1661a5SPawel Jakub Dawidek */ 1129f5a2f7feSPawel Jakub Dawidek xbp = fbp = NULL; 11302d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 11312d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1132f5a2f7feSPawel Jakub Dawidek KASSERT(xbp == NULL, ("More than one parity bio.")); 1133f5a2f7feSPawel Jakub Dawidek xbp = cbp; 11342d1661a5SPawel Jakub Dawidek } 11352d1661a5SPawel Jakub Dawidek if (cbp->bio_error == 0) 11362d1661a5SPawel Jakub Dawidek continue; 11372d1661a5SPawel Jakub Dawidek /* 11382d1661a5SPawel Jakub Dawidek * Found failed request. 11392d1661a5SPawel Jakub Dawidek */ 1140f5a2f7feSPawel Jakub Dawidek if (fbp == NULL) { 1141f5a2f7feSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 11422d1661a5SPawel Jakub Dawidek /* 1143f5a2f7feSPawel Jakub Dawidek * We are already in degraded mode, so we can't 1144f5a2f7feSPawel Jakub Dawidek * accept any failures. 11452d1661a5SPawel Jakub Dawidek */ 1146f5a2f7feSPawel Jakub Dawidek if (pbp->bio_error == 0) 114717fec17eSPawel Jakub Dawidek pbp->bio_error = cbp->bio_error; 11482d1661a5SPawel Jakub Dawidek } else { 1149f5a2f7feSPawel Jakub Dawidek fbp = cbp; 11502d1661a5SPawel Jakub Dawidek } 1151f5a2f7feSPawel Jakub Dawidek } else { 11522d1661a5SPawel Jakub Dawidek /* 11532d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 11542d1661a5SPawel Jakub Dawidek */ 11552d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 1156f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11572d1661a5SPawel Jakub Dawidek } 11583aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 11593aae74ecSPawel Jakub Dawidek if (disk == NULL) 11603aae74ecSPawel Jakub Dawidek continue; 11613aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 11623aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 11633aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 11643aae74ecSPawel Jakub Dawidek cbp->bio_error); 11653aae74ecSPawel Jakub Dawidek } else { 11663aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 11673aae74ecSPawel Jakub Dawidek cbp->bio_error); 11683aae74ecSPawel Jakub Dawidek } 11693aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 11703aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 11713aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 11723aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 11733aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 11743aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 11753aae74ecSPawel Jakub Dawidek } 11762d1661a5SPawel Jakub Dawidek } 11772d1661a5SPawel Jakub Dawidek if (pbp->bio_error != 0) 11782d1661a5SPawel Jakub Dawidek goto finish; 1179dba915cfSPawel Jakub Dawidek if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1180dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1181dba915cfSPawel Jakub Dawidek if (xbp != fbp) 1182dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(xbp, fbp); 1183dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, fbp); 1184dba915cfSPawel Jakub Dawidek } else if (fbp != NULL) { 11852d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 11862d1661a5SPawel Jakub Dawidek 11872d1661a5SPawel Jakub Dawidek /* 11882d1661a5SPawel Jakub Dawidek * One request failed, so send the same request to 11892d1661a5SPawel Jakub Dawidek * the parity consumer. 11902d1661a5SPawel Jakub Dawidek */ 1191f5a2f7feSPawel Jakub Dawidek disk = pbp->bio_driver2; 11922d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1193f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11942d1661a5SPawel Jakub Dawidek goto finish; 11952d1661a5SPawel Jakub Dawidek } 11962d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 11972d1661a5SPawel Jakub Dawidek pbp->bio_inbed--; 1198f5a2f7feSPawel Jakub Dawidek fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1199f5a2f7feSPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 1200f5a2f7feSPawel Jakub Dawidek fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1201f5a2f7feSPawel Jakub Dawidek fbp->bio_error = 0; 1202f5a2f7feSPawel Jakub Dawidek fbp->bio_completed = 0; 1203f5a2f7feSPawel Jakub Dawidek fbp->bio_children = 0; 1204f5a2f7feSPawel Jakub Dawidek fbp->bio_inbed = 0; 12052d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 1206f5a2f7feSPawel Jakub Dawidek fbp->bio_caller2 = disk; 1207f5a2f7feSPawel Jakub Dawidek fbp->bio_to = cp->provider; 1208f5a2f7feSPawel Jakub Dawidek G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 12093650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 12102d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 12112d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 121279e61493SPawel Jakub Dawidek cp->index++; 1213f5a2f7feSPawel Jakub Dawidek g_io_request(fbp, cp); 12142d1661a5SPawel Jakub Dawidek return; 12152d1661a5SPawel Jakub Dawidek } 1216f5a2f7feSPawel Jakub Dawidek if (xbp != NULL) { 1217f5a2f7feSPawel Jakub Dawidek /* 1218f5a2f7feSPawel Jakub Dawidek * Calculate parity. 1219f5a2f7feSPawel Jakub Dawidek */ 1220f5a2f7feSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 1221f5a2f7feSPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1222f5a2f7feSPawel Jakub Dawidek continue; 122306b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, xbp->bio_data, 1224f5a2f7feSPawel Jakub Dawidek xbp->bio_length); 1225f5a2f7feSPawel Jakub Dawidek } 1226f5a2f7feSPawel Jakub Dawidek xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1227dba915cfSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1228dba915cfSPawel Jakub Dawidek if (!g_raid3_is_zero(xbp)) { 1229dba915cfSPawel Jakub Dawidek g_raid3_parity_mismatch++; 1230dba915cfSPawel Jakub Dawidek pbp->bio_error = EIO; 1231dba915cfSPawel Jakub Dawidek goto finish; 1232dba915cfSPawel Jakub Dawidek } 1233dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, xbp); 1234dba915cfSPawel Jakub Dawidek } 12352d1661a5SPawel Jakub Dawidek } 12362d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 12372d1661a5SPawel Jakub Dawidek cadd = padd = 0; 12382d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 12392d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 12402d1661a5SPawel Jakub Dawidek bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 12412d1661a5SPawel Jakub Dawidek pbp->bio_completed += atom; 12422d1661a5SPawel Jakub Dawidek padd += atom; 12432d1661a5SPawel Jakub Dawidek } 12442d1661a5SPawel Jakub Dawidek cadd += atom; 12452d1661a5SPawel Jakub Dawidek } 12462d1661a5SPawel Jakub Dawidek finish: 12472d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 12482d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 12494cf67afeSPawel Jakub Dawidek else { 12504cf67afeSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 12514cf67afeSPawel Jakub Dawidek G_RAID3_LOGREQ(1, pbp, "Verification error."); 12522d1661a5SPawel Jakub Dawidek else 12532d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 12544cf67afeSPawel Jakub Dawidek } 1255dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 12562d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 12572d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1258290c6161SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 12592d1661a5SPawel Jakub Dawidek } 12602d1661a5SPawel Jakub Dawidek 12612d1661a5SPawel Jakub Dawidek static void 12622d1661a5SPawel Jakub Dawidek g_raid3_done(struct bio *bp) 12632d1661a5SPawel Jakub Dawidek { 12642d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12652d1661a5SPawel Jakub Dawidek 12662d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 12672d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 12682d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 12692d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 12702d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 12718de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 12722d1661a5SPawel Jakub Dawidek wakeup(sc); 12732d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 12742d1661a5SPawel Jakub Dawidek } 12752d1661a5SPawel Jakub Dawidek 12762d1661a5SPawel Jakub Dawidek static void 12772d1661a5SPawel Jakub Dawidek g_raid3_regular_request(struct bio *cbp) 12782d1661a5SPawel Jakub Dawidek { 12792d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12802d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 12812d1661a5SPawel Jakub Dawidek struct bio *pbp; 12822d1661a5SPawel Jakub Dawidek 12832d1661a5SPawel Jakub Dawidek g_topology_assert_not(); 12842d1661a5SPawel Jakub Dawidek 12852d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 12862d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 12870962f942SPawel Jakub Dawidek cbp->bio_from->index--; 12880962f942SPawel Jakub Dawidek if (cbp->bio_cmd == BIO_WRITE) 12890962f942SPawel Jakub Dawidek sc->sc_writes--; 12902d1661a5SPawel Jakub Dawidek disk = cbp->bio_from->private; 12912d1661a5SPawel Jakub Dawidek if (disk == NULL) { 12922d1661a5SPawel Jakub Dawidek g_topology_lock(); 12932d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cbp->bio_from); 12942d1661a5SPawel Jakub Dawidek g_topology_unlock(); 12952d1661a5SPawel Jakub Dawidek } 12962d1661a5SPawel Jakub Dawidek 12972d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Request finished."); 12982d1661a5SPawel Jakub Dawidek pbp->bio_inbed++; 12992d1661a5SPawel Jakub Dawidek KASSERT(pbp->bio_inbed <= pbp->bio_children, 13002d1661a5SPawel Jakub Dawidek ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 13012d1661a5SPawel Jakub Dawidek pbp->bio_children)); 13022d1661a5SPawel Jakub Dawidek if (pbp->bio_inbed != pbp->bio_children) 13032d1661a5SPawel Jakub Dawidek return; 13042d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 13052d1661a5SPawel Jakub Dawidek case BIO_READ: 13062d1661a5SPawel Jakub Dawidek g_raid3_gather(pbp); 13072d1661a5SPawel Jakub Dawidek break; 13082d1661a5SPawel Jakub Dawidek case BIO_WRITE: 13092d1661a5SPawel Jakub Dawidek case BIO_DELETE: 13102d1661a5SPawel Jakub Dawidek { 13112d1661a5SPawel Jakub Dawidek int error = 0; 13122d1661a5SPawel Jakub Dawidek 13132d1661a5SPawel Jakub Dawidek pbp->bio_completed = pbp->bio_length; 13142d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 13153aae74ecSPawel Jakub Dawidek if (cbp->bio_error == 0) { 13163aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13173aae74ecSPawel Jakub Dawidek continue; 13182d1661a5SPawel Jakub Dawidek } 13193aae74ecSPawel Jakub Dawidek 13202d1661a5SPawel Jakub Dawidek if (error == 0) 13212d1661a5SPawel Jakub Dawidek error = cbp->bio_error; 13222d1661a5SPawel Jakub Dawidek else if (pbp->bio_error == 0) { 13232d1661a5SPawel Jakub Dawidek /* 13242d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 13252d1661a5SPawel Jakub Dawidek */ 13262d1661a5SPawel Jakub Dawidek pbp->bio_error = error; 13272d1661a5SPawel Jakub Dawidek } 13283aae74ecSPawel Jakub Dawidek 13293aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 13303aae74ecSPawel Jakub Dawidek if (disk == NULL) { 13313aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13323aae74ecSPawel Jakub Dawidek continue; 13333aae74ecSPawel Jakub Dawidek } 13343aae74ecSPawel Jakub Dawidek 13353aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 13363aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 13373aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, 13383aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13393aae74ecSPawel Jakub Dawidek cbp->bio_error); 13403aae74ecSPawel Jakub Dawidek } else { 13413aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, 13423aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13433aae74ecSPawel Jakub Dawidek cbp->bio_error); 13443aae74ecSPawel Jakub Dawidek } 13453aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 13463aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 13473aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 13483aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 13493aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 13503aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 13512d1661a5SPawel Jakub Dawidek } 13522d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13532d1661a5SPawel Jakub Dawidek } 13542d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 13552d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 13562d1661a5SPawel Jakub Dawidek else 13572d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 13582d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 13592d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 13603650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_inflight, pbp); 13613650be51SPawel Jakub Dawidek /* Release delayed sync requests if possible. */ 13623650be51SPawel Jakub Dawidek g_raid3_sync_release(sc); 13632d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 13642d1661a5SPawel Jakub Dawidek break; 13652d1661a5SPawel Jakub Dawidek } 13662d1661a5SPawel Jakub Dawidek } 13672d1661a5SPawel Jakub Dawidek } 13682d1661a5SPawel Jakub Dawidek 13692d1661a5SPawel Jakub Dawidek static void 13702d1661a5SPawel Jakub Dawidek g_raid3_sync_done(struct bio *bp) 13712d1661a5SPawel Jakub Dawidek { 13722d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 13732d1661a5SPawel Jakub Dawidek 13742d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 13752d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 13762d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 13772d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 13782d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 13798de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 13802d1661a5SPawel Jakub Dawidek wakeup(sc); 13812d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 13822d1661a5SPawel Jakub Dawidek } 13832d1661a5SPawel Jakub Dawidek 13842d1661a5SPawel Jakub Dawidek static void 138542461fbaSPawel Jakub Dawidek g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 138642461fbaSPawel Jakub Dawidek { 138742461fbaSPawel Jakub Dawidek struct bio_queue_head queue; 138842461fbaSPawel Jakub Dawidek struct g_raid3_disk *disk; 138942461fbaSPawel Jakub Dawidek struct g_consumer *cp; 139042461fbaSPawel Jakub Dawidek struct bio *cbp; 139142461fbaSPawel Jakub Dawidek u_int i; 139242461fbaSPawel Jakub Dawidek 139342461fbaSPawel Jakub Dawidek bioq_init(&queue); 139442461fbaSPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 139542461fbaSPawel Jakub Dawidek disk = &sc->sc_disks[i]; 139642461fbaSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 139742461fbaSPawel Jakub Dawidek continue; 139842461fbaSPawel Jakub Dawidek cbp = g_clone_bio(bp); 139942461fbaSPawel Jakub Dawidek if (cbp == NULL) { 140042461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; 140142461fbaSPawel Jakub Dawidek cbp = bioq_first(&queue)) { 140242461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 140342461fbaSPawel Jakub Dawidek g_destroy_bio(cbp); 140442461fbaSPawel Jakub Dawidek } 140542461fbaSPawel Jakub Dawidek if (bp->bio_error == 0) 140642461fbaSPawel Jakub Dawidek bp->bio_error = ENOMEM; 140742461fbaSPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 140842461fbaSPawel Jakub Dawidek return; 140942461fbaSPawel Jakub Dawidek } 141042461fbaSPawel Jakub Dawidek bioq_insert_tail(&queue, cbp); 141142461fbaSPawel Jakub Dawidek cbp->bio_done = g_std_done; 141242461fbaSPawel Jakub Dawidek cbp->bio_caller1 = disk; 141342461fbaSPawel Jakub Dawidek cbp->bio_to = disk->d_consumer->provider; 141442461fbaSPawel Jakub Dawidek } 141542461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 141642461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 141742461fbaSPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 141842461fbaSPawel Jakub Dawidek disk = cbp->bio_caller1; 141942461fbaSPawel Jakub Dawidek cbp->bio_caller1 = NULL; 142042461fbaSPawel Jakub Dawidek cp = disk->d_consumer; 142142461fbaSPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 142242461fbaSPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 142342461fbaSPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 142442461fbaSPawel Jakub Dawidek g_io_request(cbp, disk->d_consumer); 142542461fbaSPawel Jakub Dawidek } 142642461fbaSPawel Jakub Dawidek } 142742461fbaSPawel Jakub Dawidek 142842461fbaSPawel Jakub Dawidek static void 14292d1661a5SPawel Jakub Dawidek g_raid3_start(struct bio *bp) 14302d1661a5SPawel Jakub Dawidek { 14312d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 14322d1661a5SPawel Jakub Dawidek 14332d1661a5SPawel Jakub Dawidek sc = bp->bio_to->geom->softc; 14342d1661a5SPawel Jakub Dawidek /* 14352d1661a5SPawel Jakub Dawidek * If sc == NULL or there are no valid disks, provider's error 14362d1661a5SPawel Jakub Dawidek * should be set and g_raid3_start() should not be called at all. 14372d1661a5SPawel Jakub Dawidek */ 14382d1661a5SPawel Jakub Dawidek KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 14392d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 14402d1661a5SPawel Jakub Dawidek ("Provider's error should be set (error=%d)(device=%s).", 14412d1661a5SPawel Jakub Dawidek bp->bio_to->error, bp->bio_to->name)); 14422d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Request received."); 14432d1661a5SPawel Jakub Dawidek 14442d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 14452d1661a5SPawel Jakub Dawidek case BIO_READ: 14462d1661a5SPawel Jakub Dawidek case BIO_WRITE: 14472d1661a5SPawel Jakub Dawidek case BIO_DELETE: 14482d1661a5SPawel Jakub Dawidek break; 1449*8b522bdaSWarner Losh case BIO_SPEEDUP: 145042461fbaSPawel Jakub Dawidek case BIO_FLUSH: 145142461fbaSPawel Jakub Dawidek g_raid3_flush(sc, bp); 145242461fbaSPawel Jakub Dawidek return; 14532d1661a5SPawel Jakub Dawidek case BIO_GETATTR: 14542d1661a5SPawel Jakub Dawidek default: 14552d1661a5SPawel Jakub Dawidek g_io_deliver(bp, EOPNOTSUPP); 14562d1661a5SPawel Jakub Dawidek return; 14572d1661a5SPawel Jakub Dawidek } 14582d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 14592d1661a5SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_queue, bp); 14608de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 14612d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 14622d1661a5SPawel Jakub Dawidek wakeup(sc); 14632d1661a5SPawel Jakub Dawidek } 14642d1661a5SPawel Jakub Dawidek 14652d1661a5SPawel Jakub Dawidek /* 14663650be51SPawel Jakub Dawidek * Return TRUE if the given request is colliding with a in-progress 14673650be51SPawel Jakub Dawidek * synchronization request. 14682d1661a5SPawel Jakub Dawidek */ 14693650be51SPawel Jakub Dawidek static int 14703650be51SPawel Jakub Dawidek g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 14712d1661a5SPawel Jakub Dawidek { 14722d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 14733650be51SPawel Jakub Dawidek struct bio *sbp; 14743650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 14753650be51SPawel Jakub Dawidek int i; 14763650be51SPawel Jakub Dawidek 14773650be51SPawel Jakub Dawidek disk = sc->sc_syncdisk; 14783650be51SPawel Jakub Dawidek if (disk == NULL) 14793650be51SPawel Jakub Dawidek return (0); 14803650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 14813650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 14823650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 14833650be51SPawel Jakub Dawidek sbp = disk->d_sync.ds_bios[i]; 14843650be51SPawel Jakub Dawidek if (sbp == NULL) 14853650be51SPawel Jakub Dawidek continue; 14863650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 14873650be51SPawel Jakub Dawidek send = sbp->bio_length; 14883650be51SPawel Jakub Dawidek if (sbp->bio_cmd == BIO_WRITE) { 14893650be51SPawel Jakub Dawidek sstart *= sc->sc_ndisks - 1; 14903650be51SPawel Jakub Dawidek send *= sc->sc_ndisks - 1; 14913650be51SPawel Jakub Dawidek } 14923650be51SPawel Jakub Dawidek send += sstart; 14933650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 14943650be51SPawel Jakub Dawidek return (1); 14953650be51SPawel Jakub Dawidek } 14963650be51SPawel Jakub Dawidek return (0); 14973650be51SPawel Jakub Dawidek } 14983650be51SPawel Jakub Dawidek 14993650be51SPawel Jakub Dawidek /* 15003650be51SPawel Jakub Dawidek * Return TRUE if the given sync request is colliding with a in-progress regular 15013650be51SPawel Jakub Dawidek * request. 15023650be51SPawel Jakub Dawidek */ 15033650be51SPawel Jakub Dawidek static int 15043650be51SPawel Jakub Dawidek g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 15053650be51SPawel Jakub Dawidek { 15063650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 15072d1661a5SPawel Jakub Dawidek struct bio *bp; 15082d1661a5SPawel Jakub Dawidek 15093650be51SPawel Jakub Dawidek if (sc->sc_syncdisk == NULL) 15103650be51SPawel Jakub Dawidek return (0); 15113650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 15123650be51SPawel Jakub Dawidek send = sstart + sbp->bio_length; 15133650be51SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 15143650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 15153650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 15163650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 15173650be51SPawel Jakub Dawidek return (1); 15182d1661a5SPawel Jakub Dawidek } 15193650be51SPawel Jakub Dawidek return (0); 15202d1661a5SPawel Jakub Dawidek } 15212d1661a5SPawel Jakub Dawidek 15223650be51SPawel Jakub Dawidek /* 15233650be51SPawel Jakub Dawidek * Puts request onto delayed queue. 15243650be51SPawel Jakub Dawidek */ 15253650be51SPawel Jakub Dawidek static void 15263650be51SPawel Jakub Dawidek g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 15273650be51SPawel Jakub Dawidek { 15283650be51SPawel Jakub Dawidek 15293650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying request."); 15303650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_regular_delayed, bp); 15313650be51SPawel Jakub Dawidek } 15323650be51SPawel Jakub Dawidek 15333650be51SPawel Jakub Dawidek /* 15343650be51SPawel Jakub Dawidek * Puts synchronization request onto delayed queue. 15353650be51SPawel Jakub Dawidek */ 15363650be51SPawel Jakub Dawidek static void 15373650be51SPawel Jakub Dawidek g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 15383650be51SPawel Jakub Dawidek { 15393650be51SPawel Jakub Dawidek 15403650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 15413650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_sync_delayed, bp); 15423650be51SPawel Jakub Dawidek } 15433650be51SPawel Jakub Dawidek 15443650be51SPawel Jakub Dawidek /* 15453650be51SPawel Jakub Dawidek * Releases delayed regular requests which don't collide anymore with sync 15463650be51SPawel Jakub Dawidek * requests. 15473650be51SPawel Jakub Dawidek */ 15483650be51SPawel Jakub Dawidek static void 15493650be51SPawel Jakub Dawidek g_raid3_regular_release(struct g_raid3_softc *sc) 15503650be51SPawel Jakub Dawidek { 15513650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15523650be51SPawel Jakub Dawidek 15533650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 15543650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, bp)) 15553650be51SPawel Jakub Dawidek continue; 15563650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_regular_delayed, bp); 15573650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 15583650be51SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 15593650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 15603650be51SPawel Jakub Dawidek #if 0 15613650be51SPawel Jakub Dawidek /* 15623650be51SPawel Jakub Dawidek * wakeup() is not needed, because this function is called from 15633650be51SPawel Jakub Dawidek * the worker thread. 15643650be51SPawel Jakub Dawidek */ 15653650be51SPawel Jakub Dawidek wakeup(&sc->sc_queue); 15663650be51SPawel Jakub Dawidek #endif 15673650be51SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 15683650be51SPawel Jakub Dawidek } 15693650be51SPawel Jakub Dawidek } 15703650be51SPawel Jakub Dawidek 15713650be51SPawel Jakub Dawidek /* 15723650be51SPawel Jakub Dawidek * Releases delayed sync requests which don't collide anymore with regular 15733650be51SPawel Jakub Dawidek * requests. 15743650be51SPawel Jakub Dawidek */ 15753650be51SPawel Jakub Dawidek static void 15763650be51SPawel Jakub Dawidek g_raid3_sync_release(struct g_raid3_softc *sc) 15773650be51SPawel Jakub Dawidek { 15783650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15793650be51SPawel Jakub Dawidek 15803650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 15813650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 15823650be51SPawel Jakub Dawidek continue; 15833650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_sync_delayed, bp); 15843650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, 15853650be51SPawel Jakub Dawidek "Releasing delayed synchronization request."); 15863650be51SPawel Jakub Dawidek g_io_request(bp, bp->bio_from); 15873650be51SPawel Jakub Dawidek } 15883650be51SPawel Jakub Dawidek } 15893650be51SPawel Jakub Dawidek 15903650be51SPawel Jakub Dawidek /* 15913650be51SPawel Jakub Dawidek * Handle synchronization requests. 15923650be51SPawel Jakub Dawidek * Every synchronization request is two-steps process: first, READ request is 15933650be51SPawel Jakub Dawidek * send to active provider and then WRITE request (with read data) to the provider 1594e8d57122SPedro F. Giffuni * being synchronized. When WRITE is finished, new synchronization request is 15953650be51SPawel Jakub Dawidek * send. 15963650be51SPawel Jakub Dawidek */ 15972d1661a5SPawel Jakub Dawidek static void 15982d1661a5SPawel Jakub Dawidek g_raid3_sync_request(struct bio *bp) 15992d1661a5SPawel Jakub Dawidek { 16002d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 16012d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 16022d1661a5SPawel Jakub Dawidek 160379e61493SPawel Jakub Dawidek bp->bio_from->index--; 16042d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 16052d1661a5SPawel Jakub Dawidek disk = bp->bio_from->private; 16062d1661a5SPawel Jakub Dawidek if (disk == NULL) { 16073650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 16082d1661a5SPawel Jakub Dawidek g_topology_lock(); 16092d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, bp->bio_from); 16102d1661a5SPawel Jakub Dawidek g_topology_unlock(); 16113650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 16122d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16133650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 16142d1661a5SPawel Jakub Dawidek return; 16152d1661a5SPawel Jakub Dawidek } 16162d1661a5SPawel Jakub Dawidek 16172d1661a5SPawel Jakub Dawidek /* 16182d1661a5SPawel Jakub Dawidek * Synchronization request. 16192d1661a5SPawel Jakub Dawidek */ 16202d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 16212d1661a5SPawel Jakub Dawidek case BIO_READ: 16222d1661a5SPawel Jakub Dawidek { 16232d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 16242d1661a5SPawel Jakub Dawidek u_char *dst, *src; 16252d1661a5SPawel Jakub Dawidek off_t left; 16262d1661a5SPawel Jakub Dawidek u_int atom; 16272d1661a5SPawel Jakub Dawidek 16282d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16292d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16302d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16312d1661a5SPawel Jakub Dawidek bp->bio_error); 16322d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16332d1661a5SPawel Jakub Dawidek return; 16342d1661a5SPawel Jakub Dawidek } 16352d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 16362d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 16372d1661a5SPawel Jakub Dawidek dst = src = bp->bio_data; 16382d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) { 16392d1661a5SPawel Jakub Dawidek u_int n; 16402d1661a5SPawel Jakub Dawidek 16412d1661a5SPawel Jakub Dawidek /* Parity component. */ 16422d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16432d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16442d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16452d1661a5SPawel Jakub Dawidek src += atom; 16462d1661a5SPawel Jakub Dawidek for (n = 1; n < sc->sc_ndisks - 1; n++) { 164706b215fdSAlexander Motin g_raid3_xor(src, dst, atom); 16482d1661a5SPawel Jakub Dawidek src += atom; 16492d1661a5SPawel Jakub Dawidek } 16502d1661a5SPawel Jakub Dawidek dst += atom; 16512d1661a5SPawel Jakub Dawidek } 16522d1661a5SPawel Jakub Dawidek } else { 16532d1661a5SPawel Jakub Dawidek /* Regular component. */ 16542d1661a5SPawel Jakub Dawidek src += atom * disk->d_no; 16552d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16562d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16572d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16582d1661a5SPawel Jakub Dawidek src += sc->sc_sectorsize; 16592d1661a5SPawel Jakub Dawidek dst += atom; 16602d1661a5SPawel Jakub Dawidek } 16612d1661a5SPawel Jakub Dawidek } 16623650be51SPawel Jakub Dawidek bp->bio_driver1 = bp->bio_driver2 = NULL; 16633650be51SPawel Jakub Dawidek bp->bio_pflags = 0; 16642d1661a5SPawel Jakub Dawidek bp->bio_offset /= sc->sc_ndisks - 1; 16652d1661a5SPawel Jakub Dawidek bp->bio_length /= sc->sc_ndisks - 1; 16662d1661a5SPawel Jakub Dawidek bp->bio_cmd = BIO_WRITE; 16672d1661a5SPawel Jakub Dawidek bp->bio_cflags = 0; 16682d1661a5SPawel Jakub Dawidek bp->bio_children = bp->bio_inbed = 0; 16692d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 16703650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 16712d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 16722d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 167379e61493SPawel Jakub Dawidek cp->index++; 16742d1661a5SPawel Jakub Dawidek g_io_request(bp, cp); 16752d1661a5SPawel Jakub Dawidek return; 16762d1661a5SPawel Jakub Dawidek } 16772d1661a5SPawel Jakub Dawidek case BIO_WRITE: 1678d2fb9c62SPawel Jakub Dawidek { 1679d2fb9c62SPawel Jakub Dawidek struct g_raid3_disk_sync *sync; 16803650be51SPawel Jakub Dawidek off_t boffset, moffset; 16813650be51SPawel Jakub Dawidek void *data; 16823650be51SPawel Jakub Dawidek int i; 1683d2fb9c62SPawel Jakub Dawidek 16842d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16852d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16862d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16872d1661a5SPawel Jakub Dawidek bp->bio_error); 16882d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 1689ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 16902d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, 16912d1661a5SPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 16922d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 16932d1661a5SPawel Jakub Dawidek return; 16942d1661a5SPawel Jakub Dawidek } 16952d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1696d2fb9c62SPawel Jakub Dawidek sync = &disk->d_sync; 16973650be51SPawel Jakub Dawidek if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 16983650be51SPawel Jakub Dawidek sync->ds_consumer == NULL || 16993650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17003650be51SPawel Jakub Dawidek /* Don't send more synchronization requests. */ 17013650be51SPawel Jakub Dawidek sync->ds_inflight--; 17023650be51SPawel Jakub Dawidek if (sync->ds_bios != NULL) { 1703ef25813dSRuslan Ermilov i = (int)(uintptr_t)bp->bio_caller1; 17043650be51SPawel Jakub Dawidek sync->ds_bios[i] = NULL; 17053650be51SPawel Jakub Dawidek } 17063650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 17072d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 17083650be51SPawel Jakub Dawidek if (sync->ds_inflight > 0) 1709d2fb9c62SPawel Jakub Dawidek return; 17103650be51SPawel Jakub Dawidek if (sync->ds_consumer == NULL || 17113650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17123650be51SPawel Jakub Dawidek return; 17133650be51SPawel Jakub Dawidek } 17142d1661a5SPawel Jakub Dawidek /* 17152d1661a5SPawel Jakub Dawidek * Disk up-to-date, activate it. 17162d1661a5SPawel Jakub Dawidek */ 17172d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 17182d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 17192d1661a5SPawel Jakub Dawidek return; 17203650be51SPawel Jakub Dawidek } 17213650be51SPawel Jakub Dawidek 17223650be51SPawel Jakub Dawidek /* Send next synchronization request. */ 17233650be51SPawel Jakub Dawidek data = bp->bio_data; 1724c55f5707SWarner Losh g_reset_bio(bp); 17253650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 17263650be51SPawel Jakub Dawidek bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 17273650be51SPawel Jakub Dawidek bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 17283650be51SPawel Jakub Dawidek sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 17293650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 17303650be51SPawel Jakub Dawidek bp->bio_data = data; 17313650be51SPawel Jakub Dawidek bp->bio_from = sync->ds_consumer; 17323650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 17333650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 17343650be51SPawel Jakub Dawidek sync->ds_consumer->index++; 17352d1661a5SPawel Jakub Dawidek /* 17363650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 17372d1661a5SPawel Jakub Dawidek */ 17383650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 17393650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 17403650be51SPawel Jakub Dawidek else 17413650be51SPawel Jakub Dawidek g_io_request(bp, sync->ds_consumer); 17423650be51SPawel Jakub Dawidek 17433650be51SPawel Jakub Dawidek /* Release delayed requests if possible. */ 17443650be51SPawel Jakub Dawidek g_raid3_regular_release(sc); 17453650be51SPawel Jakub Dawidek 17463650be51SPawel Jakub Dawidek /* Find the smallest offset. */ 17473650be51SPawel Jakub Dawidek moffset = sc->sc_mediasize; 17483650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 17493650be51SPawel Jakub Dawidek bp = sync->ds_bios[i]; 17503650be51SPawel Jakub Dawidek boffset = bp->bio_offset; 17513650be51SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) 17523650be51SPawel Jakub Dawidek boffset *= sc->sc_ndisks - 1; 17533650be51SPawel Jakub Dawidek if (boffset < moffset) 17543650be51SPawel Jakub Dawidek moffset = boffset; 17553650be51SPawel Jakub Dawidek } 17563650be51SPawel Jakub Dawidek if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) { 17573650be51SPawel Jakub Dawidek /* Update offset_done on every 100 blocks. */ 17583650be51SPawel Jakub Dawidek sync->ds_offset_done = moffset; 17592d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 17602d1661a5SPawel Jakub Dawidek } 17612d1661a5SPawel Jakub Dawidek return; 1762d2fb9c62SPawel Jakub Dawidek } 17632d1661a5SPawel Jakub Dawidek default: 17642d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 17652d1661a5SPawel Jakub Dawidek bp->bio_cmd, sc->sc_name)); 17662d1661a5SPawel Jakub Dawidek break; 17672d1661a5SPawel Jakub Dawidek } 17682d1661a5SPawel Jakub Dawidek } 17692d1661a5SPawel Jakub Dawidek 17702d1661a5SPawel Jakub Dawidek static int 17712d1661a5SPawel Jakub Dawidek g_raid3_register_request(struct bio *pbp) 17722d1661a5SPawel Jakub Dawidek { 17732d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 17742d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 17752d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 1776ee40c7aaSPawel Jakub Dawidek struct bio *cbp, *tmpbp; 17772d1661a5SPawel Jakub Dawidek off_t offset, length; 1778fa6a7837SDavid E. O'Brien u_int n, ndisks; 1779dba915cfSPawel Jakub Dawidek int round_robin, verify; 17802d1661a5SPawel Jakub Dawidek 1781fa6a7837SDavid E. O'Brien ndisks = 0; 17822d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 17832d1661a5SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 17842d1661a5SPawel Jakub Dawidek sc->sc_syncdisk == NULL) { 17852d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, EIO); 17862d1661a5SPawel Jakub Dawidek return (0); 17872d1661a5SPawel Jakub Dawidek } 17882d1661a5SPawel Jakub Dawidek g_raid3_init_bio(pbp); 17892d1661a5SPawel Jakub Dawidek length = pbp->bio_length / (sc->sc_ndisks - 1); 17902d1661a5SPawel Jakub Dawidek offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1791dba915cfSPawel Jakub Dawidek round_robin = verify = 0; 17922d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 17932d1661a5SPawel Jakub Dawidek case BIO_READ: 1794dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1795dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1796dba915cfSPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1797dba915cfSPawel Jakub Dawidek verify = 1; 1798dba915cfSPawel Jakub Dawidek ndisks = sc->sc_ndisks; 1799dba915cfSPawel Jakub Dawidek } else { 1800dba915cfSPawel Jakub Dawidek verify = 0; 18012d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks - 1; 1802dba915cfSPawel Jakub Dawidek } 1803dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1804dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1805dba915cfSPawel Jakub Dawidek round_robin = 1; 1806dba915cfSPawel Jakub Dawidek } else { 1807dba915cfSPawel Jakub Dawidek round_robin = 0; 1808dba915cfSPawel Jakub Dawidek } 1809dba915cfSPawel Jakub Dawidek KASSERT(!round_robin || !verify, 1810dba915cfSPawel Jakub Dawidek ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1811f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 18122d1661a5SPawel Jakub Dawidek break; 18132d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18142d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18153650be51SPawel Jakub Dawidek /* 18163650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a synchronization 18173650be51SPawel Jakub Dawidek * request. 18183650be51SPawel Jakub Dawidek */ 18193650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, pbp)) { 18203650be51SPawel Jakub Dawidek g_raid3_regular_delay(sc, pbp); 18213650be51SPawel Jakub Dawidek return (0); 18223650be51SPawel Jakub Dawidek } 1823d2fb9c62SPawel Jakub Dawidek 18244d006a98SPawel Jakub Dawidek if (sc->sc_idle) 18254d006a98SPawel Jakub Dawidek g_raid3_unidle(sc); 18260962f942SPawel Jakub Dawidek else 182701f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 18284d006a98SPawel Jakub Dawidek 18292d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks; 18302d1661a5SPawel Jakub Dawidek break; 18312d1661a5SPawel Jakub Dawidek } 18322d1661a5SPawel Jakub Dawidek for (n = 0; n < ndisks; n++) { 18332d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 18342d1661a5SPawel Jakub Dawidek cbp = g_raid3_clone_bio(sc, pbp); 18352d1661a5SPawel Jakub Dawidek if (cbp == NULL) { 18362d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 18372d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1838a65a0da2SPawel Jakub Dawidek /* 1839a65a0da2SPawel Jakub Dawidek * To prevent deadlock, we must run back up 1840a65a0da2SPawel Jakub Dawidek * with the ENOMEM for failed requests of any 1841a65a0da2SPawel Jakub Dawidek * of our consumers. Our own sync requests 1842a65a0da2SPawel Jakub Dawidek * can stick around, as they are finite. 1843a65a0da2SPawel Jakub Dawidek */ 1844a65a0da2SPawel Jakub Dawidek if ((pbp->bio_cflags & 1845a65a0da2SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1846a65a0da2SPawel Jakub Dawidek g_io_deliver(pbp, ENOMEM); 1847a65a0da2SPawel Jakub Dawidek return (0); 1848a65a0da2SPawel Jakub Dawidek } 18492d1661a5SPawel Jakub Dawidek return (ENOMEM); 18502d1661a5SPawel Jakub Dawidek } 18512d1661a5SPawel Jakub Dawidek cbp->bio_offset = offset; 18522d1661a5SPawel Jakub Dawidek cbp->bio_length = length; 18532d1661a5SPawel Jakub Dawidek cbp->bio_done = g_raid3_done; 18542d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 18552d1661a5SPawel Jakub Dawidek case BIO_READ: 18562d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 18572d1661a5SPawel Jakub Dawidek /* 18582d1661a5SPawel Jakub Dawidek * Replace invalid component with the parity 18592d1661a5SPawel Jakub Dawidek * component. 18602d1661a5SPawel Jakub Dawidek */ 18612d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 18622d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18632d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1864f5a2f7feSPawel Jakub Dawidek } else if (round_robin && 1865f5a2f7feSPawel Jakub Dawidek disk->d_no == sc->sc_round_robin) { 1866f5a2f7feSPawel Jakub Dawidek /* 1867f5a2f7feSPawel Jakub Dawidek * In round-robin mode skip one data component 1868f5a2f7feSPawel Jakub Dawidek * and use parity component when reading. 1869f5a2f7feSPawel Jakub Dawidek */ 1870f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = disk; 1871f5a2f7feSPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1872f5a2f7feSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1873f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin++; 1874f5a2f7feSPawel Jakub Dawidek round_robin = 0; 1875dba915cfSPawel Jakub Dawidek } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1876dba915cfSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18772d1661a5SPawel Jakub Dawidek } 18782d1661a5SPawel Jakub Dawidek break; 18792d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18802d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18812d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 18822d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 18832d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18842d1661a5SPawel Jakub Dawidek /* 18852d1661a5SPawel Jakub Dawidek * Active parity component, mark it as such. 18862d1661a5SPawel Jakub Dawidek */ 18872d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 18882d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_PARITY; 18892d1661a5SPawel Jakub Dawidek } 18902d1661a5SPawel Jakub Dawidek } else { 18912d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 18922d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18932d1661a5SPawel Jakub Dawidek /* 18942d1661a5SPawel Jakub Dawidek * Parity component is not connected, 18952d1661a5SPawel Jakub Dawidek * so destroy its request. 18962d1661a5SPawel Jakub Dawidek */ 18972d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= 18982d1661a5SPawel Jakub Dawidek G_RAID3_BIO_PFLAG_NOPARITY; 18992d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 19002d1661a5SPawel Jakub Dawidek cbp = NULL; 19012d1661a5SPawel Jakub Dawidek } else { 19022d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 19032d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_NODISK; 19042d1661a5SPawel Jakub Dawidek disk = NULL; 19052d1661a5SPawel Jakub Dawidek } 19062d1661a5SPawel Jakub Dawidek } 19072d1661a5SPawel Jakub Dawidek break; 19082d1661a5SPawel Jakub Dawidek } 19092d1661a5SPawel Jakub Dawidek if (cbp != NULL) 19102d1661a5SPawel Jakub Dawidek cbp->bio_caller2 = disk; 19112d1661a5SPawel Jakub Dawidek } 19122d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 19132d1661a5SPawel Jakub Dawidek case BIO_READ: 1914f5a2f7feSPawel Jakub Dawidek if (round_robin) { 1915f5a2f7feSPawel Jakub Dawidek /* 1916f5a2f7feSPawel Jakub Dawidek * If we are in round-robin mode and 'round_robin' is 1917f5a2f7feSPawel Jakub Dawidek * still 1, it means, that we skipped parity component 1918f5a2f7feSPawel Jakub Dawidek * for this read and must reset sc_round_robin field. 1919f5a2f7feSPawel Jakub Dawidek */ 1920f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 1921f5a2f7feSPawel Jakub Dawidek } 1922ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 19232d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 19242d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 19252d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 19262d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 19273650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 19282d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", 19292d1661a5SPawel Jakub Dawidek cp->provider->name, cp->acr, cp->acw, cp->ace)); 193079e61493SPawel Jakub Dawidek cp->index++; 19312d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 19322d1661a5SPawel Jakub Dawidek } 19332d1661a5SPawel Jakub Dawidek break; 19342d1661a5SPawel Jakub Dawidek case BIO_WRITE: 19352d1661a5SPawel Jakub Dawidek case BIO_DELETE: 19362d1661a5SPawel Jakub Dawidek /* 19373650be51SPawel Jakub Dawidek * Put request onto inflight queue, so we can check if new 19383650be51SPawel Jakub Dawidek * synchronization requests don't collide with it. 19393650be51SPawel Jakub Dawidek */ 19403650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_inflight, pbp); 19413650be51SPawel Jakub Dawidek 19423650be51SPawel Jakub Dawidek /* 19432d1661a5SPawel Jakub Dawidek * Bump syncid on first write. 19442d1661a5SPawel Jakub Dawidek */ 1945ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1946a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1947d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(sc); 19482d1661a5SPawel Jakub Dawidek } 19492d1661a5SPawel Jakub Dawidek g_raid3_scatter(pbp); 19502d1661a5SPawel Jakub Dawidek break; 19512d1661a5SPawel Jakub Dawidek } 19522d1661a5SPawel Jakub Dawidek return (0); 19532d1661a5SPawel Jakub Dawidek } 19542d1661a5SPawel Jakub Dawidek 19552d1661a5SPawel Jakub Dawidek static int 19562d1661a5SPawel Jakub Dawidek g_raid3_can_destroy(struct g_raid3_softc *sc) 19572d1661a5SPawel Jakub Dawidek { 19582d1661a5SPawel Jakub Dawidek struct g_geom *gp; 19592d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 19602d1661a5SPawel Jakub Dawidek 19612d1661a5SPawel Jakub Dawidek g_topology_assert(); 19622d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 196318486a5eSPawel Jakub Dawidek if (gp->softc == NULL) 196418486a5eSPawel Jakub Dawidek return (1); 19652d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19662d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19672d1661a5SPawel Jakub Dawidek return (0); 19682d1661a5SPawel Jakub Dawidek } 19692d1661a5SPawel Jakub Dawidek gp = sc->sc_sync.ds_geom; 19702d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19712d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19722d1661a5SPawel Jakub Dawidek return (0); 19732d1661a5SPawel Jakub Dawidek } 19742d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 19752d1661a5SPawel Jakub Dawidek sc->sc_name); 19762d1661a5SPawel Jakub Dawidek return (1); 19772d1661a5SPawel Jakub Dawidek } 19782d1661a5SPawel Jakub Dawidek 19792d1661a5SPawel Jakub Dawidek static int 19802d1661a5SPawel Jakub Dawidek g_raid3_try_destroy(struct g_raid3_softc *sc) 19812d1661a5SPawel Jakub Dawidek { 19822d1661a5SPawel Jakub Dawidek 19833650be51SPawel Jakub Dawidek g_topology_assert_not(); 19843650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 19853650be51SPawel Jakub Dawidek 19864ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 19874ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 19884ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 19894ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 19904ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 19914ed854e8SPawel Jakub Dawidek } 19924ed854e8SPawel Jakub Dawidek 19932d1661a5SPawel Jakub Dawidek g_topology_lock(); 19942d1661a5SPawel Jakub Dawidek if (!g_raid3_can_destroy(sc)) { 19952d1661a5SPawel Jakub Dawidek g_topology_unlock(); 19962d1661a5SPawel Jakub Dawidek return (0); 19972d1661a5SPawel Jakub Dawidek } 199818486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 199918486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 2000a245a548SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 20012d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20022d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20032d1661a5SPawel Jakub Dawidek &sc->sc_worker); 20043650be51SPawel Jakub Dawidek /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 20053650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 20062d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_worker); 20072d1661a5SPawel Jakub Dawidek sc->sc_worker = NULL; 20082d1661a5SPawel Jakub Dawidek } else { 20092d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20103650be51SPawel Jakub Dawidek g_raid3_destroy_device(sc); 20112d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 20122d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 20132d1661a5SPawel Jakub Dawidek } 20142d1661a5SPawel Jakub Dawidek return (1); 20152d1661a5SPawel Jakub Dawidek } 20162d1661a5SPawel Jakub Dawidek 20172d1661a5SPawel Jakub Dawidek /* 20182d1661a5SPawel Jakub Dawidek * Worker thread. 20192d1661a5SPawel Jakub Dawidek */ 20202d1661a5SPawel Jakub Dawidek static void 20212d1661a5SPawel Jakub Dawidek g_raid3_worker(void *arg) 20222d1661a5SPawel Jakub Dawidek { 20232d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 20242d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 20252d1661a5SPawel Jakub Dawidek struct bio *bp; 20260962f942SPawel Jakub Dawidek int timeout; 20272d1661a5SPawel Jakub Dawidek 20282d1661a5SPawel Jakub Dawidek sc = arg; 2029982d11f8SJeff Roberson thread_lock(curthread); 203063710c4dSJohn Baldwin sched_prio(curthread, PRIBIO); 2031982d11f8SJeff Roberson thread_unlock(curthread); 20322d1661a5SPawel Jakub Dawidek 20333650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 20342d1661a5SPawel Jakub Dawidek for (;;) { 20352d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 20362d1661a5SPawel Jakub Dawidek /* 20372d1661a5SPawel Jakub Dawidek * First take a look at events. 20382d1661a5SPawel Jakub Dawidek * This is important to handle events before any I/O requests. 20392d1661a5SPawel Jakub Dawidek */ 20402d1661a5SPawel Jakub Dawidek ep = g_raid3_event_get(sc); 20413650be51SPawel Jakub Dawidek if (ep != NULL) { 2042d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 20432d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 20442d1661a5SPawel Jakub Dawidek /* Update only device status. */ 20452d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, 20462d1661a5SPawel Jakub Dawidek "Running event for device %s.", 20472d1661a5SPawel Jakub Dawidek sc->sc_name); 20482d1661a5SPawel Jakub Dawidek ep->e_error = 0; 2049d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 1); 20502d1661a5SPawel Jakub Dawidek } else { 20512d1661a5SPawel Jakub Dawidek /* Update disk status. */ 20522d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Running event for disk %s.", 20532d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(ep->e_disk)); 20542d1661a5SPawel Jakub Dawidek ep->e_error = g_raid3_update_disk(ep->e_disk, 2055d97d5ee9SPawel Jakub Dawidek ep->e_state); 20562d1661a5SPawel Jakub Dawidek if (ep->e_error == 0) 2057d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 0); 20582d1661a5SPawel Jakub Dawidek } 20592d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 20602d1661a5SPawel Jakub Dawidek KASSERT(ep->e_error == 0, 20612d1661a5SPawel Jakub Dawidek ("Error cannot be handled.")); 20622d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 20632d1661a5SPawel Jakub Dawidek } else { 20642d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 20652d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20662d1661a5SPawel Jakub Dawidek ep); 20672d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 20682d1661a5SPawel Jakub Dawidek wakeup(ep); 20692d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 20702d1661a5SPawel Jakub Dawidek } 20712d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20722d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20733650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20743650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 20753650be51SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 20763745c395SJulian Elischer kproc_exit(0); 20772d1661a5SPawel Jakub Dawidek } 20783650be51SPawel Jakub Dawidek } 20792d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 20802d1661a5SPawel Jakub Dawidek continue; 20812d1661a5SPawel Jakub Dawidek } 20822d1661a5SPawel Jakub Dawidek /* 20830962f942SPawel Jakub Dawidek * Check if we can mark array as CLEAN and if we can't take 20840962f942SPawel Jakub Dawidek * how much seconds should we wait. 20850962f942SPawel Jakub Dawidek */ 20863650be51SPawel Jakub Dawidek timeout = g_raid3_idle(sc, -1); 20870962f942SPawel Jakub Dawidek /* 20882d1661a5SPawel Jakub Dawidek * Now I/O requests. 20892d1661a5SPawel Jakub Dawidek */ 20902d1661a5SPawel Jakub Dawidek /* Get first request from the queue. */ 20912d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 20922d1661a5SPawel Jakub Dawidek bp = bioq_first(&sc->sc_queue); 20932d1661a5SPawel Jakub Dawidek if (bp == NULL) { 20942d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20952d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20962d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 20973650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20983650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 2099d7fad9f6SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 21003745c395SJulian Elischer kproc_exit(0); 21013650be51SPawel Jakub Dawidek } 21022d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21032d1661a5SPawel Jakub Dawidek } 21043650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 2105a2fe5c66SPawel Jakub Dawidek /* 2106a2fe5c66SPawel Jakub Dawidek * XXX: We can miss an event here, because an event 2107a2fe5c66SPawel Jakub Dawidek * can be added without sx-device-lock and without 2108a2fe5c66SPawel Jakub Dawidek * mtx-queue-lock. Maybe I should just stop using 2109a2fe5c66SPawel Jakub Dawidek * dedicated mutex for events synchronization and 2110a2fe5c66SPawel Jakub Dawidek * stick with the queue lock? 2111a2fe5c66SPawel Jakub Dawidek * The event will hang here until next I/O request 2112a2fe5c66SPawel Jakub Dawidek * or next event is received. 2113a2fe5c66SPawel Jakub Dawidek */ 21140962f942SPawel Jakub Dawidek MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 21150962f942SPawel Jakub Dawidek timeout * hz); 21163650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21179bb09163SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 21182d1661a5SPawel Jakub Dawidek continue; 21192d1661a5SPawel Jakub Dawidek } 212084edb86dSPawel Jakub Dawidek process: 21212d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 21222d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 21232d1661a5SPawel Jakub Dawidek 21248e007c52SPawel Jakub Dawidek if (bp->bio_from->geom == sc->sc_sync.ds_geom && 21258e007c52SPawel Jakub Dawidek (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 21268e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* READ */ 21278e007c52SPawel Jakub Dawidek } else if (bp->bio_to != sc->sc_provider) { 21283650be51SPawel Jakub Dawidek if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 21292d1661a5SPawel Jakub Dawidek g_raid3_regular_request(bp); 21303650be51SPawel Jakub Dawidek else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 21318e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* WRITE */ 2132de6f1c7cSPawel Jakub Dawidek else { 2133de6f1c7cSPawel Jakub Dawidek KASSERT(0, 21349a8fa125SWarner Losh ("Invalid request cflags=0x%hx to=%s.", 2135de6f1c7cSPawel Jakub Dawidek bp->bio_cflags, bp->bio_to->name)); 2136de6f1c7cSPawel Jakub Dawidek } 2137de6f1c7cSPawel Jakub Dawidek } else if (g_raid3_register_request(bp) != 0) { 21382d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21393650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 214084edb86dSPawel Jakub Dawidek /* 214184edb86dSPawel Jakub Dawidek * We are short in memory, let see if there are finished 214284edb86dSPawel Jakub Dawidek * request we can free. 214384edb86dSPawel Jakub Dawidek */ 214484edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 214584edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 214684edb86dSPawel Jakub Dawidek goto process; 21472d1661a5SPawel Jakub Dawidek } 214884edb86dSPawel Jakub Dawidek /* 214984edb86dSPawel Jakub Dawidek * No finished regular request, so at least keep 215084edb86dSPawel Jakub Dawidek * synchronization running. 215184edb86dSPawel Jakub Dawidek */ 215284edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 215384edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 215484edb86dSPawel Jakub Dawidek goto process; 215584edb86dSPawel Jakub Dawidek } 215684edb86dSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 215784edb86dSPawel Jakub Dawidek MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 215884edb86dSPawel Jakub Dawidek "r3:lowmem", hz / 10); 215984edb86dSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21602d1661a5SPawel Jakub Dawidek } 2161d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 21622d1661a5SPawel Jakub Dawidek } 21632d1661a5SPawel Jakub Dawidek } 21642d1661a5SPawel Jakub Dawidek 21652d1661a5SPawel Jakub Dawidek static void 21660962f942SPawel Jakub Dawidek g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 21672d1661a5SPawel Jakub Dawidek { 21682d1661a5SPawel Jakub Dawidek 21693650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 2170501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2171501250baSPawel Jakub Dawidek return; 21720962f942SPawel Jakub Dawidek if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 21732d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 21743650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21752d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 21760962f942SPawel Jakub Dawidek } else if (sc->sc_idle && 21770962f942SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 21782d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 21793650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21802d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 21812d1661a5SPawel Jakub Dawidek } 21822d1661a5SPawel Jakub Dawidek } 21832d1661a5SPawel Jakub Dawidek 21842d1661a5SPawel Jakub Dawidek static void 21852d1661a5SPawel Jakub Dawidek g_raid3_sync_start(struct g_raid3_softc *sc) 21862d1661a5SPawel Jakub Dawidek { 21872d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 21883650be51SPawel Jakub Dawidek struct g_consumer *cp; 21893650be51SPawel Jakub Dawidek struct bio *bp; 21902d1661a5SPawel Jakub Dawidek int error; 21912d1661a5SPawel Jakub Dawidek u_int n; 21922d1661a5SPawel Jakub Dawidek 21933650be51SPawel Jakub Dawidek g_topology_assert_not(); 21943650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 21952d1661a5SPawel Jakub Dawidek 21962d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 21972d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 21982d1661a5SPawel Jakub Dawidek sc->sc_state)); 21992d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 22002d1661a5SPawel Jakub Dawidek sc->sc_name, sc->sc_state)); 22012d1661a5SPawel Jakub Dawidek disk = NULL; 22022d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 22032d1661a5SPawel Jakub Dawidek if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 22042d1661a5SPawel Jakub Dawidek continue; 22052d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 22062d1661a5SPawel Jakub Dawidek break; 22072d1661a5SPawel Jakub Dawidek } 22082d1661a5SPawel Jakub Dawidek if (disk == NULL) 22092d1661a5SPawel Jakub Dawidek return; 22102d1661a5SPawel Jakub Dawidek 22113650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 22123650be51SPawel Jakub Dawidek g_topology_lock(); 22133650be51SPawel Jakub Dawidek cp = g_new_consumer(sc->sc_sync.ds_geom); 22143650be51SPawel Jakub Dawidek error = g_attach(cp, sc->sc_provider); 22153650be51SPawel Jakub Dawidek KASSERT(error == 0, 22163650be51SPawel Jakub Dawidek ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 22173650be51SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 22183650be51SPawel Jakub Dawidek KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 22193650be51SPawel Jakub Dawidek g_topology_unlock(); 22203650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 22213650be51SPawel Jakub Dawidek 22222d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 22232d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 2224501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 22252d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 22262d1661a5SPawel Jakub Dawidek KASSERT(disk->d_sync.ds_consumer == NULL, 22272d1661a5SPawel Jakub Dawidek ("Sync consumer already exists (device=%s, disk=%s).", 22282d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk))); 22293650be51SPawel Jakub Dawidek 22303650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer = cp; 22312d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer->private = disk; 223279e61493SPawel Jakub Dawidek disk->d_sync.ds_consumer->index = 0; 22332d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = disk; 22343650be51SPawel Jakub Dawidek 22353650be51SPawel Jakub Dawidek /* 22363650be51SPawel Jakub Dawidek * Allocate memory for synchronization bios and initialize them. 22373650be51SPawel Jakub Dawidek */ 22383650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 22393650be51SPawel Jakub Dawidek M_RAID3, M_WAITOK); 22403650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22413650be51SPawel Jakub Dawidek bp = g_alloc_bio(); 22423650be51SPawel Jakub Dawidek disk->d_sync.ds_bios[n] = bp; 22433650be51SPawel Jakub Dawidek bp->bio_parent = NULL; 22443650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 22453650be51SPawel Jakub Dawidek bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK); 22463650be51SPawel Jakub Dawidek bp->bio_cflags = 0; 22473650be51SPawel Jakub Dawidek bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 22483650be51SPawel Jakub Dawidek bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 22493650be51SPawel Jakub Dawidek disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 22503650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 22513650be51SPawel Jakub Dawidek bp->bio_from = disk->d_sync.ds_consumer; 22523650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 2253ef25813dSRuslan Ermilov bp->bio_caller1 = (void *)(uintptr_t)n; 22543650be51SPawel Jakub Dawidek } 22553650be51SPawel Jakub Dawidek 22563650be51SPawel Jakub Dawidek /* Set the number of in-flight synchronization requests. */ 22573650be51SPawel Jakub Dawidek disk->d_sync.ds_inflight = g_raid3_syncreqs; 22583650be51SPawel Jakub Dawidek 22593650be51SPawel Jakub Dawidek /* 22603650be51SPawel Jakub Dawidek * Fire off first synchronization requests. 22613650be51SPawel Jakub Dawidek */ 22623650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22633650be51SPawel Jakub Dawidek bp = disk->d_sync.ds_bios[n]; 22643650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 22653650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer->index++; 22663650be51SPawel Jakub Dawidek /* 22673650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 22683650be51SPawel Jakub Dawidek */ 22693650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 22703650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 22713650be51SPawel Jakub Dawidek else 22723650be51SPawel Jakub Dawidek g_io_request(bp, disk->d_sync.ds_consumer); 22733650be51SPawel Jakub Dawidek } 22742d1661a5SPawel Jakub Dawidek } 22752d1661a5SPawel Jakub Dawidek 22762d1661a5SPawel Jakub Dawidek /* 22772d1661a5SPawel Jakub Dawidek * Stop synchronization process. 22782d1661a5SPawel Jakub Dawidek * type: 0 - synchronization finished 22792d1661a5SPawel Jakub Dawidek * 1 - synchronization stopped 22802d1661a5SPawel Jakub Dawidek */ 22812d1661a5SPawel Jakub Dawidek static void 22822d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 22832d1661a5SPawel Jakub Dawidek { 22842d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 22853650be51SPawel Jakub Dawidek struct g_consumer *cp; 22862d1661a5SPawel Jakub Dawidek 22873650be51SPawel Jakub Dawidek g_topology_assert_not(); 22883650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 22893650be51SPawel Jakub Dawidek 22902d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 22912d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 22922d1661a5SPawel Jakub Dawidek sc->sc_state)); 22932d1661a5SPawel Jakub Dawidek disk = sc->sc_syncdisk; 22942d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = NULL; 22952d1661a5SPawel Jakub Dawidek KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 22962d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 22972d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 22982d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 22992d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_consumer == NULL) 23002d1661a5SPawel Jakub Dawidek return; 23012d1661a5SPawel Jakub Dawidek 23022d1661a5SPawel Jakub Dawidek if (type == 0) { 23032d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 23043650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23052d1661a5SPawel Jakub Dawidek } else /* if (type == 1) */ { 23062d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 23073650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23082d1661a5SPawel Jakub Dawidek } 23093650be51SPawel Jakub Dawidek free(disk->d_sync.ds_bios, M_RAID3); 23103650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = NULL; 23113650be51SPawel Jakub Dawidek cp = disk->d_sync.ds_consumer; 23122d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 23132d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 23143650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 23153650be51SPawel Jakub Dawidek g_topology_lock(); 23163650be51SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 23173650be51SPawel Jakub Dawidek g_topology_unlock(); 23183650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 23192d1661a5SPawel Jakub Dawidek } 23202d1661a5SPawel Jakub Dawidek 23212d1661a5SPawel Jakub Dawidek static void 23222d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(struct g_raid3_softc *sc) 23232d1661a5SPawel Jakub Dawidek { 23242d1661a5SPawel Jakub Dawidek struct g_provider *pp; 2325113d8e50SAlexander Motin struct g_raid3_disk *disk; 2326113d8e50SAlexander Motin int n; 23272d1661a5SPawel Jakub Dawidek 23283650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 23292d1661a5SPawel Jakub Dawidek 23303650be51SPawel Jakub Dawidek g_topology_lock(); 23312d1661a5SPawel Jakub Dawidek pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 23322d1661a5SPawel Jakub Dawidek pp->mediasize = sc->sc_mediasize; 23332d1661a5SPawel Jakub Dawidek pp->sectorsize = sc->sc_sectorsize; 2334113d8e50SAlexander Motin pp->stripesize = 0; 2335113d8e50SAlexander Motin pp->stripeoffset = 0; 2336113d8e50SAlexander Motin for (n = 0; n < sc->sc_ndisks; n++) { 2337113d8e50SAlexander Motin disk = &sc->sc_disks[n]; 2338113d8e50SAlexander Motin if (disk->d_consumer && disk->d_consumer->provider && 2339113d8e50SAlexander Motin disk->d_consumer->provider->stripesize > pp->stripesize) { 2340113d8e50SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 2341113d8e50SAlexander Motin pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2342113d8e50SAlexander Motin } 2343113d8e50SAlexander Motin } 2344113d8e50SAlexander Motin pp->stripesize *= sc->sc_ndisks - 1; 2345113d8e50SAlexander Motin pp->stripeoffset *= sc->sc_ndisks - 1; 23462d1661a5SPawel Jakub Dawidek sc->sc_provider = pp; 23472d1661a5SPawel Jakub Dawidek g_error_provider(pp, 0); 23483650be51SPawel Jakub Dawidek g_topology_unlock(); 23490cca572eSJohn-Mark Gurney G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 23500cca572eSJohn-Mark Gurney g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 23510cca572eSJohn-Mark Gurney 23522d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 23532d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 23542d1661a5SPawel Jakub Dawidek } 23552d1661a5SPawel Jakub Dawidek 23562d1661a5SPawel Jakub Dawidek static void 23572d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(struct g_raid3_softc *sc) 23582d1661a5SPawel Jakub Dawidek { 23592d1661a5SPawel Jakub Dawidek struct bio *bp; 23602d1661a5SPawel Jakub Dawidek 23613650be51SPawel Jakub Dawidek g_topology_assert_not(); 23622d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 23632d1661a5SPawel Jakub Dawidek sc->sc_name)); 23642d1661a5SPawel Jakub Dawidek 23653650be51SPawel Jakub Dawidek g_topology_lock(); 23662d1661a5SPawel Jakub Dawidek g_error_provider(sc->sc_provider, ENXIO); 23672d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 23682d1661a5SPawel Jakub Dawidek while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 23692d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 23702d1661a5SPawel Jakub Dawidek g_io_deliver(bp, ENXIO); 23712d1661a5SPawel Jakub Dawidek } 23722d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 23732d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 23742d1661a5SPawel Jakub Dawidek sc->sc_provider->name); 23758b64f3caSAlexander Motin g_wither_provider(sc->sc_provider, ENXIO); 23763650be51SPawel Jakub Dawidek g_topology_unlock(); 23772d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 23782d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 23792d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 23802d1661a5SPawel Jakub Dawidek } 23812d1661a5SPawel Jakub Dawidek 23822d1661a5SPawel Jakub Dawidek static void 23832d1661a5SPawel Jakub Dawidek g_raid3_go(void *arg) 23842d1661a5SPawel Jakub Dawidek { 23852d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23862d1661a5SPawel Jakub Dawidek 23872d1661a5SPawel Jakub Dawidek sc = arg; 23882d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 23892d1661a5SPawel Jakub Dawidek g_raid3_event_send(sc, 0, 23902d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 23912d1661a5SPawel Jakub Dawidek } 23922d1661a5SPawel Jakub Dawidek 23932d1661a5SPawel Jakub Dawidek static u_int 23942d1661a5SPawel Jakub Dawidek g_raid3_determine_state(struct g_raid3_disk *disk) 23952d1661a5SPawel Jakub Dawidek { 23962d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23972d1661a5SPawel Jakub Dawidek u_int state; 23982d1661a5SPawel Jakub Dawidek 23992d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 24002d1661a5SPawel Jakub Dawidek if (sc->sc_syncid == disk->d_sync.ds_syncid) { 24012d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 24022d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 24032d1661a5SPawel Jakub Dawidek /* Disk does not need synchronization. */ 24042d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_ACTIVE; 24052d1661a5SPawel Jakub Dawidek } else { 24062d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 24072d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24082d1661a5SPawel Jakub Dawidek (disk->d_flags & 24092d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24102d1661a5SPawel Jakub Dawidek /* 24112d1661a5SPawel Jakub Dawidek * We can start synchronization from 24122d1661a5SPawel Jakub Dawidek * the stored offset. 24132d1661a5SPawel Jakub Dawidek */ 24142d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24152d1661a5SPawel Jakub Dawidek } else { 24162d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24172d1661a5SPawel Jakub Dawidek } 24182d1661a5SPawel Jakub Dawidek } 24192d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 24202d1661a5SPawel Jakub Dawidek /* 24212d1661a5SPawel Jakub Dawidek * Reset all synchronization data for this disk, 24222d1661a5SPawel Jakub Dawidek * because if it even was synchronized, it was 24232d1661a5SPawel Jakub Dawidek * synchronized to disks with different syncid. 24242d1661a5SPawel Jakub Dawidek */ 24252d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 24262d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 24272d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 24282d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 24292d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24302d1661a5SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24312d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24322d1661a5SPawel Jakub Dawidek } else { 24332d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24342d1661a5SPawel Jakub Dawidek } 24352d1661a5SPawel Jakub Dawidek } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 24362d1661a5SPawel Jakub Dawidek /* 24372d1661a5SPawel Jakub Dawidek * Not good, NOT GOOD! 24382d1661a5SPawel Jakub Dawidek * It means that device was started on stale disks 24392d1661a5SPawel Jakub Dawidek * and more fresh disk just arrive. 24403c57a41dSPawel Jakub Dawidek * If there were writes, device is broken, sorry. 24412d1661a5SPawel Jakub Dawidek * I think the best choice here is don't touch 2442776fc0e9SYaroslav Tykhiy * this disk and inform the user loudly. 24432d1661a5SPawel Jakub Dawidek */ 24442d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s was started before the freshest " 24452d1661a5SPawel Jakub Dawidek "disk (%s) arrives!! It will not be connected to the " 24462d1661a5SPawel Jakub Dawidek "running device.", sc->sc_name, 24472d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 24482d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 24492d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_NONE; 24502d1661a5SPawel Jakub Dawidek /* Return immediately, because disk was destroyed. */ 24512d1661a5SPawel Jakub Dawidek return (state); 24522d1661a5SPawel Jakub Dawidek } 24532d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "State for %s disk: %s.", 24542d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 24552d1661a5SPawel Jakub Dawidek return (state); 24562d1661a5SPawel Jakub Dawidek } 24572d1661a5SPawel Jakub Dawidek 24582d1661a5SPawel Jakub Dawidek /* 24592d1661a5SPawel Jakub Dawidek * Update device state. 24602d1661a5SPawel Jakub Dawidek */ 24612d1661a5SPawel Jakub Dawidek static void 2462d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 24632d1661a5SPawel Jakub Dawidek { 24642d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 24652d1661a5SPawel Jakub Dawidek u_int state; 24662d1661a5SPawel Jakub Dawidek 24673650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 24682d1661a5SPawel Jakub Dawidek 24692d1661a5SPawel Jakub Dawidek switch (sc->sc_state) { 24702d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 24712d1661a5SPawel Jakub Dawidek { 2472a245a548SPawel Jakub Dawidek u_int n, ndirty, ndisks, genid, syncid; 24732d1661a5SPawel Jakub Dawidek 24742d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider == NULL, 24752d1661a5SPawel Jakub Dawidek ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 24762d1661a5SPawel Jakub Dawidek /* 24772d1661a5SPawel Jakub Dawidek * Are we ready? We are, if all disks are connected or 24782d1661a5SPawel Jakub Dawidek * one disk is missing and 'force' is true. 24792d1661a5SPawel Jakub Dawidek */ 24802d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 24812d1661a5SPawel Jakub Dawidek if (!force) 24822d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 24832d1661a5SPawel Jakub Dawidek } else { 24842d1661a5SPawel Jakub Dawidek if (force) { 24852d1661a5SPawel Jakub Dawidek /* 24862d1661a5SPawel Jakub Dawidek * Timeout expired, so destroy device. 24872d1661a5SPawel Jakub Dawidek */ 24882d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 24894ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 24904ed854e8SPawel Jakub Dawidek __LINE__, sc->sc_rootmount); 24914ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 24924ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 24932d1661a5SPawel Jakub Dawidek } 24942d1661a5SPawel Jakub Dawidek return; 24952d1661a5SPawel Jakub Dawidek } 24962d1661a5SPawel Jakub Dawidek 24972d1661a5SPawel Jakub Dawidek /* 2498a245a548SPawel Jakub Dawidek * Find the biggest genid. 2499a245a548SPawel Jakub Dawidek */ 2500a245a548SPawel Jakub Dawidek genid = 0; 2501a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2502a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2503a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2504a245a548SPawel Jakub Dawidek continue; 2505a245a548SPawel Jakub Dawidek if (disk->d_genid > genid) 2506a245a548SPawel Jakub Dawidek genid = disk->d_genid; 2507a245a548SPawel Jakub Dawidek } 2508a245a548SPawel Jakub Dawidek sc->sc_genid = genid; 2509a245a548SPawel Jakub Dawidek /* 2510a245a548SPawel Jakub Dawidek * Remove all disks without the biggest genid. 2511a245a548SPawel Jakub Dawidek */ 2512a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2513a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2514a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2515a245a548SPawel Jakub Dawidek continue; 2516a245a548SPawel Jakub Dawidek if (disk->d_genid < genid) { 2517a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2518a245a548SPawel Jakub Dawidek "Component %s (device %s) broken, skipping.", 2519a245a548SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 2520a245a548SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 2521a245a548SPawel Jakub Dawidek } 2522a245a548SPawel Jakub Dawidek } 2523a245a548SPawel Jakub Dawidek 2524a245a548SPawel Jakub Dawidek /* 25252d1661a5SPawel Jakub Dawidek * There must be at least 'sc->sc_ndisks - 1' components 25262d1661a5SPawel Jakub Dawidek * with the same syncid and without SYNCHRONIZING flag. 25272d1661a5SPawel Jakub Dawidek */ 25282d1661a5SPawel Jakub Dawidek 25292d1661a5SPawel Jakub Dawidek /* 25302d1661a5SPawel Jakub Dawidek * Find the biggest syncid, number of valid components and 25312d1661a5SPawel Jakub Dawidek * number of dirty components. 25322d1661a5SPawel Jakub Dawidek */ 25332d1661a5SPawel Jakub Dawidek ndirty = ndisks = syncid = 0; 25342d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25352d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25362d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25372d1661a5SPawel Jakub Dawidek continue; 25382d1661a5SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 25392d1661a5SPawel Jakub Dawidek ndirty++; 25402d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_syncid > syncid) { 25412d1661a5SPawel Jakub Dawidek syncid = disk->d_sync.ds_syncid; 25422d1661a5SPawel Jakub Dawidek ndisks = 0; 25432d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < syncid) { 25442d1661a5SPawel Jakub Dawidek continue; 25452d1661a5SPawel Jakub Dawidek } 25462d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25472d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 25482d1661a5SPawel Jakub Dawidek continue; 25492d1661a5SPawel Jakub Dawidek } 25502d1661a5SPawel Jakub Dawidek ndisks++; 25512d1661a5SPawel Jakub Dawidek } 25522d1661a5SPawel Jakub Dawidek /* 25532d1661a5SPawel Jakub Dawidek * Do we have enough valid components? 25542d1661a5SPawel Jakub Dawidek */ 25552d1661a5SPawel Jakub Dawidek if (ndisks + 1 < sc->sc_ndisks) { 25562d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 25572d1661a5SPawel Jakub Dawidek "Device %s is broken, too few valid components.", 25582d1661a5SPawel Jakub Dawidek sc->sc_name); 25592d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 25602d1661a5SPawel Jakub Dawidek return; 25612d1661a5SPawel Jakub Dawidek } 25622d1661a5SPawel Jakub Dawidek /* 25632d1661a5SPawel Jakub Dawidek * If there is one DIRTY component and all disks are present, 25642d1661a5SPawel Jakub Dawidek * mark it for synchronization. If there is more than one DIRTY 25652d1661a5SPawel Jakub Dawidek * component, mark parity component for synchronization. 25662d1661a5SPawel Jakub Dawidek */ 25672d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks && ndirty == 1) { 25682d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25692d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25702d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25712d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_DIRTY) == 0) { 25722d1661a5SPawel Jakub Dawidek continue; 25732d1661a5SPawel Jakub Dawidek } 25742d1661a5SPawel Jakub Dawidek disk->d_flags |= 25752d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING; 25762d1661a5SPawel Jakub Dawidek } 25772d1661a5SPawel Jakub Dawidek } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 25782d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 25792d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 25802d1661a5SPawel Jakub Dawidek } 25812d1661a5SPawel Jakub Dawidek 25822d1661a5SPawel Jakub Dawidek sc->sc_syncid = syncid; 25832d1661a5SPawel Jakub Dawidek if (force) { 25842d1661a5SPawel Jakub Dawidek /* Remember to bump syncid on first write. */ 2585ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 25862d1661a5SPawel Jakub Dawidek } 25872d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks) 25882d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 25892d1661a5SPawel Jakub Dawidek else /* if (ndisks == sc->sc_ndisks - 1) */ 25902d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 25912d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 25922d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 25932d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 25942d1661a5SPawel Jakub Dawidek sc->sc_state = state; 25952d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25962d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25972d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25982d1661a5SPawel Jakub Dawidek continue; 25992d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 26002d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2601a245a548SPawel Jakub Dawidek if (state == G_RAID3_DISK_STATE_STALE) 2602ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 26032d1661a5SPawel Jakub Dawidek } 26042d1661a5SPawel Jakub Dawidek break; 26052d1661a5SPawel Jakub Dawidek } 26062d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 26072d1661a5SPawel Jakub Dawidek /* 2608ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26092d1661a5SPawel Jakub Dawidek */ 2610ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2611a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2612a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2613a245a548SPawel Jakub Dawidek } 2614a245a548SPawel Jakub Dawidek 26152d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26162d1661a5SPawel Jakub Dawidek return; 26172d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 26182d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26192d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 26202d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 26212d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 26222d1661a5SPawel Jakub Dawidek return; 26232d1661a5SPawel Jakub Dawidek } 26242d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26252d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 26262d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 26272d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26282d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26292d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26302d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26312d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26322d1661a5SPawel Jakub Dawidek } 26332d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26342d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26354ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26364ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26374ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26384ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26394ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26404ed854e8SPawel Jakub Dawidek } 26412d1661a5SPawel Jakub Dawidek break; 26422d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 26432d1661a5SPawel Jakub Dawidek /* 2644ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26452d1661a5SPawel Jakub Dawidek */ 2646ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2647a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2648a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2649a245a548SPawel Jakub Dawidek } 2650a245a548SPawel Jakub Dawidek 26512d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26522d1661a5SPawel Jakub Dawidek return; 26532d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 26542d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1, 26552d1661a5SPawel Jakub Dawidek ("Too few ACTIVE components in COMPLETE state (device %s).", 26562d1661a5SPawel Jakub Dawidek sc->sc_name)); 26572d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26582d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26592d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 26602d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26612d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26622d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26632d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26642d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26652d1661a5SPawel Jakub Dawidek } 26662d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26672d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26684ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26694ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26704ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26714ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26724ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26734ed854e8SPawel Jakub Dawidek } 26742d1661a5SPawel Jakub Dawidek break; 26752d1661a5SPawel Jakub Dawidek default: 26762d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 26772d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state))); 26782d1661a5SPawel Jakub Dawidek break; 26792d1661a5SPawel Jakub Dawidek } 26802d1661a5SPawel Jakub Dawidek } 26812d1661a5SPawel Jakub Dawidek 26822d1661a5SPawel Jakub Dawidek /* 26832d1661a5SPawel Jakub Dawidek * Update disk state and device state if needed. 26842d1661a5SPawel Jakub Dawidek */ 26852d1661a5SPawel Jakub Dawidek #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 26862d1661a5SPawel Jakub Dawidek "Disk %s state changed from %s to %s (device %s).", \ 26872d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), \ 26882d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state), \ 26892d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state), sc->sc_name) 26902d1661a5SPawel Jakub Dawidek static int 2691d97d5ee9SPawel Jakub Dawidek g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 26922d1661a5SPawel Jakub Dawidek { 26932d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 26942d1661a5SPawel Jakub Dawidek 26952d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 26963650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 26973650be51SPawel Jakub Dawidek 26982d1661a5SPawel Jakub Dawidek again: 26992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 27002d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 27012d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state)); 27022d1661a5SPawel Jakub Dawidek switch (state) { 27032d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 27042d1661a5SPawel Jakub Dawidek /* 27052d1661a5SPawel Jakub Dawidek * Possible scenarios: 27062d1661a5SPawel Jakub Dawidek * 1. New disk arrive. 27072d1661a5SPawel Jakub Dawidek */ 27082d1661a5SPawel Jakub Dawidek /* Previous state should be NONE. */ 27092d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 27102d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27112d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27122d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27132d1661a5SPawel Jakub Dawidek 27142d1661a5SPawel Jakub Dawidek disk->d_state = state; 27150cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 27162d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27172d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 27182d1661a5SPawel Jakub Dawidek break; 27192d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27202d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27212d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27222d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27232d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27242d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27252d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 27262d1661a5SPawel Jakub Dawidek if (state != G_RAID3_DISK_STATE_NONE) 27272d1661a5SPawel Jakub Dawidek goto again; 27282d1661a5SPawel Jakub Dawidek break; 27292d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 27302d1661a5SPawel Jakub Dawidek /* 27312d1661a5SPawel Jakub Dawidek * Possible scenarios: 27322d1661a5SPawel Jakub Dawidek * 1. New disk does not need synchronization. 27332d1661a5SPawel Jakub Dawidek * 2. Synchronization process finished successfully. 27342d1661a5SPawel Jakub Dawidek */ 27352d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27362d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27372d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27382d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27392d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27402d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27412d1661a5SPawel Jakub Dawidek /* Previous state should be NEW or SYNCHRONIZING. */ 27422d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 27432d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 27442d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27452d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27462d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27472d1661a5SPawel Jakub Dawidek 2748bf31327cSPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 27492d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 27502d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 27512d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 0); 27522d1661a5SPawel Jakub Dawidek } 27532d1661a5SPawel Jakub Dawidek disk->d_state = state; 27542d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 27552d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 27560962f942SPawel Jakub Dawidek g_raid3_update_idle(sc, disk); 2757bf31327cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 27580cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 27592d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27602d1661a5SPawel Jakub Dawidek break; 27612d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 27622d1661a5SPawel Jakub Dawidek /* 27632d1661a5SPawel Jakub Dawidek * Possible scenarios: 27642d1661a5SPawel Jakub Dawidek * 1. Stale disk was connected. 27652d1661a5SPawel Jakub Dawidek */ 27662d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27672d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 27682d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27692d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27702d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27712d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27722d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27732d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27742d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27752d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27762d1661a5SPawel Jakub Dawidek /* 27772d1661a5SPawel Jakub Dawidek * STALE state is only possible if device is marked 27782d1661a5SPawel Jakub Dawidek * NOAUTOSYNC. 27792d1661a5SPawel Jakub Dawidek */ 27802d1661a5SPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 27812d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27822d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27832d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27842d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27852d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27862d1661a5SPawel Jakub Dawidek 27872d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 27882d1661a5SPawel Jakub Dawidek disk->d_state = state; 27892d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 27902d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 27912d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27922d1661a5SPawel Jakub Dawidek break; 27932d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 27942d1661a5SPawel Jakub Dawidek /* 27952d1661a5SPawel Jakub Dawidek * Possible scenarios: 27962d1661a5SPawel Jakub Dawidek * 1. Disk which needs synchronization was connected. 27972d1661a5SPawel Jakub Dawidek */ 27982d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27992d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28002d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 28012d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28022d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28032d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 28042d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 28052d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28062d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28072d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28082d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28092d1661a5SPawel Jakub Dawidek 28102d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NEW) 28112d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 28122d1661a5SPawel Jakub Dawidek disk->d_state = state; 28132d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) { 28142d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 28152d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 28162d1661a5SPawel Jakub Dawidek } 28172d1661a5SPawel Jakub Dawidek break; 28182d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 28192d1661a5SPawel Jakub Dawidek /* 28202d1661a5SPawel Jakub Dawidek * Possible scenarios: 28212d1661a5SPawel Jakub Dawidek * 1. Device wasn't running yet, but disk disappear. 28222d1661a5SPawel Jakub Dawidek * 2. Disk was active and disapppear. 28232d1661a5SPawel Jakub Dawidek * 3. Disk disappear during synchronization process. 28242d1661a5SPawel Jakub Dawidek */ 28252d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28262d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 28272d1661a5SPawel Jakub Dawidek /* 28282d1661a5SPawel Jakub Dawidek * Previous state should be ACTIVE, STALE or 28292d1661a5SPawel Jakub Dawidek * SYNCHRONIZING. 28302d1661a5SPawel Jakub Dawidek */ 28312d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 28322d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_STALE || 28332d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 28342d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28352d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28362d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28372d1661a5SPawel Jakub Dawidek } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 28382d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28392d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28402d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28412d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28422d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28432d1661a5SPawel Jakub Dawidek /* 28442d1661a5SPawel Jakub Dawidek * Reset bumping syncid if disk disappeared in STARTING 28452d1661a5SPawel Jakub Dawidek * state. 28462d1661a5SPawel Jakub Dawidek */ 2847ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2848a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 28492d1661a5SPawel Jakub Dawidek #ifdef INVARIANTS 28502d1661a5SPawel Jakub Dawidek } else { 28512d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 28522d1661a5SPawel Jakub Dawidek sc->sc_name, 28532d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28542d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28552d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28562d1661a5SPawel Jakub Dawidek #endif 28572d1661a5SPawel Jakub Dawidek } 28582d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28592d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 28602d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 28612d1661a5SPawel Jakub Dawidek 28622d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 28632d1661a5SPawel Jakub Dawidek break; 28642d1661a5SPawel Jakub Dawidek default: 28652d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Unknown state (%u).", state)); 28662d1661a5SPawel Jakub Dawidek break; 28672d1661a5SPawel Jakub Dawidek } 28682d1661a5SPawel Jakub Dawidek return (0); 28692d1661a5SPawel Jakub Dawidek } 28702d1661a5SPawel Jakub Dawidek #undef DISK_STATE_CHANGED 28712d1661a5SPawel Jakub Dawidek 2872ea973705SPawel Jakub Dawidek int 28732d1661a5SPawel Jakub Dawidek g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 28742d1661a5SPawel Jakub Dawidek { 28752d1661a5SPawel Jakub Dawidek struct g_provider *pp; 28762d1661a5SPawel Jakub Dawidek u_char *buf; 28772d1661a5SPawel Jakub Dawidek int error; 28782d1661a5SPawel Jakub Dawidek 28792d1661a5SPawel Jakub Dawidek g_topology_assert(); 28802d1661a5SPawel Jakub Dawidek 28812d1661a5SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 28822d1661a5SPawel Jakub Dawidek if (error != 0) 28832d1661a5SPawel Jakub Dawidek return (error); 28842d1661a5SPawel Jakub Dawidek pp = cp->provider; 28852d1661a5SPawel Jakub Dawidek g_topology_unlock(); 28862d1661a5SPawel Jakub Dawidek /* Metadata are stored on last sector. */ 28872d1661a5SPawel Jakub Dawidek buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 28882d1661a5SPawel Jakub Dawidek &error); 28892d1661a5SPawel Jakub Dawidek g_topology_lock(); 28902d1661a5SPawel Jakub Dawidek g_access(cp, -1, 0, 0); 28918a4a44b5SMaxim Sobolev if (buf == NULL) { 2892a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2893a245a548SPawel Jakub Dawidek cp->provider->name, error); 28942d1661a5SPawel Jakub Dawidek return (error); 28952d1661a5SPawel Jakub Dawidek } 28962d1661a5SPawel Jakub Dawidek 28972d1661a5SPawel Jakub Dawidek /* Decode metadata. */ 28982d1661a5SPawel Jakub Dawidek error = raid3_metadata_decode(buf, md); 28992d1661a5SPawel Jakub Dawidek g_free(buf); 29002d1661a5SPawel Jakub Dawidek if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 29012d1661a5SPawel Jakub Dawidek return (EINVAL); 2902a245a548SPawel Jakub Dawidek if (md->md_version > G_RAID3_VERSION) { 2903a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2904a245a548SPawel Jakub Dawidek "Kernel module is too old to handle metadata from %s.", 2905a245a548SPawel Jakub Dawidek cp->provider->name); 2906a245a548SPawel Jakub Dawidek return (EINVAL); 2907a245a548SPawel Jakub Dawidek } 29082d1661a5SPawel Jakub Dawidek if (error != 0) { 29092d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 29102d1661a5SPawel Jakub Dawidek cp->provider->name); 29112d1661a5SPawel Jakub Dawidek return (error); 29122d1661a5SPawel Jakub Dawidek } 291395959703SAndrey V. Elsukov if (md->md_sectorsize > MAXPHYS) { 291495959703SAndrey V. Elsukov G_RAID3_DEBUG(0, "The blocksize is too big."); 291595959703SAndrey V. Elsukov return (EINVAL); 291695959703SAndrey V. Elsukov } 29172d1661a5SPawel Jakub Dawidek 29182d1661a5SPawel Jakub Dawidek return (0); 29192d1661a5SPawel Jakub Dawidek } 29202d1661a5SPawel Jakub Dawidek 29212d1661a5SPawel Jakub Dawidek static int 29222d1661a5SPawel Jakub Dawidek g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 29232d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 29242d1661a5SPawel Jakub Dawidek { 29252d1661a5SPawel Jakub Dawidek 29262d1661a5SPawel Jakub Dawidek if (md->md_no >= sc->sc_ndisks) { 29272d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 29282d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29292d1661a5SPawel Jakub Dawidek return (EINVAL); 29302d1661a5SPawel Jakub Dawidek } 29312d1661a5SPawel Jakub Dawidek if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 29322d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 29332d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29342d1661a5SPawel Jakub Dawidek return (EEXIST); 29352d1661a5SPawel Jakub Dawidek } 29362d1661a5SPawel Jakub Dawidek if (md->md_all != sc->sc_ndisks) { 29372d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29382d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29392d1661a5SPawel Jakub Dawidek "md_all", pp->name, sc->sc_name); 29402d1661a5SPawel Jakub Dawidek return (EINVAL); 29412d1661a5SPawel Jakub Dawidek } 294211b2174fSPawel Jakub Dawidek if ((md->md_mediasize % md->md_sectorsize) != 0) { 294311b2174fSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 294411b2174fSPawel Jakub Dawidek "0) on disk %s (device %s), skipping.", pp->name, 294511b2174fSPawel Jakub Dawidek sc->sc_name); 294611b2174fSPawel Jakub Dawidek return (EINVAL); 294711b2174fSPawel Jakub Dawidek } 29482d1661a5SPawel Jakub Dawidek if (md->md_mediasize != sc->sc_mediasize) { 29492d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29502d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29512d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29522d1661a5SPawel Jakub Dawidek return (EINVAL); 29532d1661a5SPawel Jakub Dawidek } 29542d1661a5SPawel Jakub Dawidek if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 29552d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29562d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29572d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29582d1661a5SPawel Jakub Dawidek return (EINVAL); 29592d1661a5SPawel Jakub Dawidek } 29602d1661a5SPawel Jakub Dawidek if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 29612d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29622d1661a5SPawel Jakub Dawidek "Invalid size of disk %s (device %s), skipping.", pp->name, 29632d1661a5SPawel Jakub Dawidek sc->sc_name); 29642d1661a5SPawel Jakub Dawidek return (EINVAL); 29652d1661a5SPawel Jakub Dawidek } 29662d1661a5SPawel Jakub Dawidek if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 29672d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29682d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29692d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29702d1661a5SPawel Jakub Dawidek return (EINVAL); 29712d1661a5SPawel Jakub Dawidek } 29722d1661a5SPawel Jakub Dawidek if (md->md_sectorsize != sc->sc_sectorsize) { 29732d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29742d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29752d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29762d1661a5SPawel Jakub Dawidek return (EINVAL); 29772d1661a5SPawel Jakub Dawidek } 29782d1661a5SPawel Jakub Dawidek if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 29792d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29802d1661a5SPawel Jakub Dawidek "Invalid sector size of disk %s (device %s), skipping.", 29812d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29822d1661a5SPawel Jakub Dawidek return (EINVAL); 29832d1661a5SPawel Jakub Dawidek } 29842d1661a5SPawel Jakub Dawidek if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 29852d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29862d1661a5SPawel Jakub Dawidek "Invalid device flags on disk %s (device %s), skipping.", 29872d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29882d1661a5SPawel Jakub Dawidek return (EINVAL); 29892d1661a5SPawel Jakub Dawidek } 2990dba915cfSPawel Jakub Dawidek if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 2991dba915cfSPawel Jakub Dawidek (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 2992dba915cfSPawel Jakub Dawidek /* 2993dba915cfSPawel Jakub Dawidek * VERIFY and ROUND-ROBIN options are mutally exclusive. 2994dba915cfSPawel Jakub Dawidek */ 2995dba915cfSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 2996dba915cfSPawel Jakub Dawidek "disk %s (device %s), skipping.", pp->name, sc->sc_name); 2997dba915cfSPawel Jakub Dawidek return (EINVAL); 2998dba915cfSPawel Jakub Dawidek } 29992d1661a5SPawel Jakub Dawidek if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 30002d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 30012d1661a5SPawel Jakub Dawidek "Invalid disk flags on disk %s (device %s), skipping.", 30022d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 30032d1661a5SPawel Jakub Dawidek return (EINVAL); 30042d1661a5SPawel Jakub Dawidek } 30052d1661a5SPawel Jakub Dawidek return (0); 30062d1661a5SPawel Jakub Dawidek } 30072d1661a5SPawel Jakub Dawidek 3008ea973705SPawel Jakub Dawidek int 30092d1661a5SPawel Jakub Dawidek g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 30102d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 30112d1661a5SPawel Jakub Dawidek { 30122d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 30132d1661a5SPawel Jakub Dawidek int error; 30142d1661a5SPawel Jakub Dawidek 30153650be51SPawel Jakub Dawidek g_topology_assert_not(); 30162d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 30172d1661a5SPawel Jakub Dawidek 30182d1661a5SPawel Jakub Dawidek error = g_raid3_check_metadata(sc, pp, md); 30192d1661a5SPawel Jakub Dawidek if (error != 0) 30202d1661a5SPawel Jakub Dawidek return (error); 3021a245a548SPawel Jakub Dawidek if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3022a245a548SPawel Jakub Dawidek md->md_genid < sc->sc_genid) { 3023a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3024a245a548SPawel Jakub Dawidek pp->name, sc->sc_name); 3025a245a548SPawel Jakub Dawidek return (EINVAL); 3026a245a548SPawel Jakub Dawidek } 30272d1661a5SPawel Jakub Dawidek disk = g_raid3_init_disk(sc, pp, md, &error); 30282d1661a5SPawel Jakub Dawidek if (disk == NULL) 30292d1661a5SPawel Jakub Dawidek return (error); 30302d1661a5SPawel Jakub Dawidek error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 30312d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_WAIT); 3032a245a548SPawel Jakub Dawidek if (error != 0) 30332d1661a5SPawel Jakub Dawidek return (error); 3034a245a548SPawel Jakub Dawidek if (md->md_version < G_RAID3_VERSION) { 3035a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3036a245a548SPawel Jakub Dawidek pp->name, md->md_version, G_RAID3_VERSION); 3037a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 3038a245a548SPawel Jakub Dawidek } 3039a245a548SPawel Jakub Dawidek return (0); 30402d1661a5SPawel Jakub Dawidek } 30412d1661a5SPawel Jakub Dawidek 3042712fe9bdSPawel Jakub Dawidek static void 3043712fe9bdSPawel Jakub Dawidek g_raid3_destroy_delayed(void *arg, int flag) 3044712fe9bdSPawel Jakub Dawidek { 3045712fe9bdSPawel Jakub Dawidek struct g_raid3_softc *sc; 3046712fe9bdSPawel Jakub Dawidek int error; 3047712fe9bdSPawel Jakub Dawidek 3048712fe9bdSPawel Jakub Dawidek if (flag == EV_CANCEL) { 3049712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Destroying canceled."); 3050712fe9bdSPawel Jakub Dawidek return; 3051712fe9bdSPawel Jakub Dawidek } 3052712fe9bdSPawel Jakub Dawidek sc = arg; 3053712fe9bdSPawel Jakub Dawidek g_topology_unlock(); 3054712fe9bdSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3055712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3056712fe9bdSPawel Jakub Dawidek ("DESTROY flag set on %s.", sc->sc_name)); 3057712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3058712fe9bdSPawel Jakub Dawidek ("DESTROYING flag not set on %s.", sc->sc_name)); 3059712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3060712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3061712fe9bdSPawel Jakub Dawidek if (error != 0) { 3062712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3063712fe9bdSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3064712fe9bdSPawel Jakub Dawidek } 3065712fe9bdSPawel Jakub Dawidek g_topology_lock(); 3066712fe9bdSPawel Jakub Dawidek } 3067712fe9bdSPawel Jakub Dawidek 30682d1661a5SPawel Jakub Dawidek static int 30692d1661a5SPawel Jakub Dawidek g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 30702d1661a5SPawel Jakub Dawidek { 30712d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3072712fe9bdSPawel Jakub Dawidek int dcr, dcw, dce, error = 0; 30732d1661a5SPawel Jakub Dawidek 30742d1661a5SPawel Jakub Dawidek g_topology_assert(); 30752d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 30762d1661a5SPawel Jakub Dawidek acw, ace); 30772d1661a5SPawel Jakub Dawidek 30781f7fec3cSPawel Jakub Dawidek sc = pp->geom->softc; 30791f7fec3cSPawel Jakub Dawidek if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 30801f7fec3cSPawel Jakub Dawidek return (0); 30811f7fec3cSPawel Jakub Dawidek KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 30821f7fec3cSPawel Jakub Dawidek 30832d1661a5SPawel Jakub Dawidek dcr = pp->acr + acr; 30842d1661a5SPawel Jakub Dawidek dcw = pp->acw + acw; 30852d1661a5SPawel Jakub Dawidek dce = pp->ace + ace; 30862d1661a5SPawel Jakub Dawidek 30873650be51SPawel Jakub Dawidek g_topology_unlock(); 30883650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3089712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 30903650be51SPawel Jakub Dawidek g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 30913650be51SPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) 30923650be51SPawel Jakub Dawidek error = ENXIO; 30933650be51SPawel Jakub Dawidek goto end; 30942d1661a5SPawel Jakub Dawidek } 3095f62c1a47SAlexander Motin if (dcw == 0) 30963650be51SPawel Jakub Dawidek g_raid3_idle(sc, dcw); 3097712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3098712fe9bdSPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) { 3099712fe9bdSPawel Jakub Dawidek error = ENXIO; 3100712fe9bdSPawel Jakub Dawidek goto end; 3101712fe9bdSPawel Jakub Dawidek } 3102712fe9bdSPawel Jakub Dawidek if (dcr == 0 && dcw == 0 && dce == 0) { 3103712fe9bdSPawel Jakub Dawidek g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3104712fe9bdSPawel Jakub Dawidek sc, NULL); 3105712fe9bdSPawel Jakub Dawidek } 3106712fe9bdSPawel Jakub Dawidek } 31073650be51SPawel Jakub Dawidek end: 31083650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 31093650be51SPawel Jakub Dawidek g_topology_lock(); 31103650be51SPawel Jakub Dawidek return (error); 31112d1661a5SPawel Jakub Dawidek } 31122d1661a5SPawel Jakub Dawidek 31132d1661a5SPawel Jakub Dawidek static struct g_geom * 31142d1661a5SPawel Jakub Dawidek g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 31152d1661a5SPawel Jakub Dawidek { 31162d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 31172d1661a5SPawel Jakub Dawidek struct g_geom *gp; 31182d1661a5SPawel Jakub Dawidek int error, timeout; 31192d1661a5SPawel Jakub Dawidek u_int n; 31202d1661a5SPawel Jakub Dawidek 31212d1661a5SPawel Jakub Dawidek g_topology_assert(); 31222d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 31232d1661a5SPawel Jakub Dawidek 31242d1661a5SPawel Jakub Dawidek /* One disk is minimum. */ 31252d1661a5SPawel Jakub Dawidek if (md->md_all < 1) 31262d1661a5SPawel Jakub Dawidek return (NULL); 31272d1661a5SPawel Jakub Dawidek /* 31282d1661a5SPawel Jakub Dawidek * Action geom. 31292d1661a5SPawel Jakub Dawidek */ 31302d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s", md->md_name); 31312d1661a5SPawel Jakub Dawidek sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 31322d1661a5SPawel Jakub Dawidek sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 31332d1661a5SPawel Jakub Dawidek M_WAITOK | M_ZERO); 31342d1661a5SPawel Jakub Dawidek gp->start = g_raid3_start; 31352d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31362d1661a5SPawel Jakub Dawidek gp->access = g_raid3_access; 31372d1661a5SPawel Jakub Dawidek gp->dumpconf = g_raid3_dumpconf; 31382d1661a5SPawel Jakub Dawidek 31392d1661a5SPawel Jakub Dawidek sc->sc_id = md->md_id; 31402d1661a5SPawel Jakub Dawidek sc->sc_mediasize = md->md_mediasize; 31412d1661a5SPawel Jakub Dawidek sc->sc_sectorsize = md->md_sectorsize; 31422d1661a5SPawel Jakub Dawidek sc->sc_ndisks = md->md_all; 3143f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 31442d1661a5SPawel Jakub Dawidek sc->sc_flags = md->md_mflags; 3145a245a548SPawel Jakub Dawidek sc->sc_bump_id = 0; 31460962f942SPawel Jakub Dawidek sc->sc_idle = 1; 314701f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 31480962f942SPawel Jakub Dawidek sc->sc_writes = 0; 3149afd05d74SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 3150afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_softc = sc; 3151afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_no = n; 31522d1661a5SPawel Jakub Dawidek sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3153afd05d74SPawel Jakub Dawidek } 31543650be51SPawel Jakub Dawidek sx_init(&sc->sc_lock, "graid3:lock"); 31552d1661a5SPawel Jakub Dawidek bioq_init(&sc->sc_queue); 31562d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 31573650be51SPawel Jakub Dawidek bioq_init(&sc->sc_regular_delayed); 31583650be51SPawel Jakub Dawidek bioq_init(&sc->sc_inflight); 31593650be51SPawel Jakub Dawidek bioq_init(&sc->sc_sync_delayed); 31602d1661a5SPawel Jakub Dawidek TAILQ_INIT(&sc->sc_events); 31612d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3162fd90e2edSJung-uk Kim callout_init(&sc->sc_callout, 1); 31632d1661a5SPawel Jakub Dawidek sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 31642d1661a5SPawel Jakub Dawidek gp->softc = sc; 31652d1661a5SPawel Jakub Dawidek sc->sc_geom = gp; 31662d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 31672d1661a5SPawel Jakub Dawidek /* 31682d1661a5SPawel Jakub Dawidek * Synchronization geom. 31692d1661a5SPawel Jakub Dawidek */ 31702d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s.sync", md->md_name); 31712d1661a5SPawel Jakub Dawidek gp->softc = sc; 31722d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31732d1661a5SPawel Jakub Dawidek sc->sc_sync.ds_geom = gp; 31743650be51SPawel Jakub Dawidek 3175ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3176ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3177ed940a82SPawel Jakub Dawidek 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3178ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31793650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 31803650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 31813650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 31823650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3183ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3184ed940a82SPawel Jakub Dawidek 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3185ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31863650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 31873650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 31883650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 31893650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3190ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3191ed940a82SPawel Jakub Dawidek 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3192ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31933650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 31943650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 31953650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 31963650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3197ed940a82SPawel Jakub Dawidek } 31983650be51SPawel Jakub Dawidek 31993745c395SJulian Elischer error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 32002d1661a5SPawel Jakub Dawidek "g_raid3 %s", md->md_name); 32012d1661a5SPawel Jakub Dawidek if (error != 0) { 32022d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 32032d1661a5SPawel Jakub Dawidek sc->sc_name); 3204ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 32053650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 32063650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 32073650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 3208ed940a82SPawel Jakub Dawidek } 32092d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_sync.ds_geom); 32102d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 32112d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 32123650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 32132d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_geom); 32142d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32152d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32162d1661a5SPawel Jakub Dawidek return (NULL); 32172d1661a5SPawel Jakub Dawidek } 32182d1661a5SPawel Jakub Dawidek 32190cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 32200cca572eSJohn-Mark Gurney sc->sc_name, sc->sc_ndisks, sc->sc_id); 32212d1661a5SPawel Jakub Dawidek 3222853a10a5SAndrew Thompson sc->sc_rootmount = root_mount_hold("GRAID3"); 32234ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 32244ed854e8SPawel Jakub Dawidek 32252d1661a5SPawel Jakub Dawidek /* 32262d1661a5SPawel Jakub Dawidek * Run timeout. 32272d1661a5SPawel Jakub Dawidek */ 32282d1661a5SPawel Jakub Dawidek timeout = atomic_load_acq_int(&g_raid3_timeout); 32292d1661a5SPawel Jakub Dawidek callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 32302d1661a5SPawel Jakub Dawidek return (sc->sc_geom); 32312d1661a5SPawel Jakub Dawidek } 32322d1661a5SPawel Jakub Dawidek 32332d1661a5SPawel Jakub Dawidek int 3234712fe9bdSPawel Jakub Dawidek g_raid3_destroy(struct g_raid3_softc *sc, int how) 32352d1661a5SPawel Jakub Dawidek { 32362d1661a5SPawel Jakub Dawidek struct g_provider *pp; 32372d1661a5SPawel Jakub Dawidek 32383650be51SPawel Jakub Dawidek g_topology_assert_not(); 32392d1661a5SPawel Jakub Dawidek if (sc == NULL) 32402d1661a5SPawel Jakub Dawidek return (ENXIO); 32413650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 32423650be51SPawel Jakub Dawidek 32432d1661a5SPawel Jakub Dawidek pp = sc->sc_provider; 32442d1661a5SPawel Jakub Dawidek if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3245712fe9bdSPawel Jakub Dawidek switch (how) { 3246712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_SOFT: 32472d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 32482d1661a5SPawel Jakub Dawidek "Device %s is still open (r%dw%de%d).", pp->name, 32492d1661a5SPawel Jakub Dawidek pp->acr, pp->acw, pp->ace); 32502d1661a5SPawel Jakub Dawidek return (EBUSY); 3251712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_DELAYED: 3252712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, 3253712fe9bdSPawel Jakub Dawidek "Device %s will be destroyed on last close.", 3254712fe9bdSPawel Jakub Dawidek pp->name); 3255712fe9bdSPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 3256712fe9bdSPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 3257712fe9bdSPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3258712fe9bdSPawel Jakub Dawidek return (EBUSY); 3259712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_HARD: 3260712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s is still open, so it " 3261712fe9bdSPawel Jakub Dawidek "can't be definitely removed.", pp->name); 3262712fe9bdSPawel Jakub Dawidek break; 32632d1661a5SPawel Jakub Dawidek } 32642d1661a5SPawel Jakub Dawidek } 32652d1661a5SPawel Jakub Dawidek 326618486a5eSPawel Jakub Dawidek g_topology_lock(); 326718486a5eSPawel Jakub Dawidek if (sc->sc_geom->softc == NULL) { 326818486a5eSPawel Jakub Dawidek g_topology_unlock(); 326918486a5eSPawel Jakub Dawidek return (0); 327018486a5eSPawel Jakub Dawidek } 327118486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 327218486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 327318486a5eSPawel Jakub Dawidek g_topology_unlock(); 327418486a5eSPawel Jakub Dawidek 32752d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 32762d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 32772d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 32783650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 32792d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 32802d1661a5SPawel Jakub Dawidek wakeup(sc); 32812d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 32822d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 32832d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 32842d1661a5SPawel Jakub Dawidek while (sc->sc_worker != NULL) 32852d1661a5SPawel Jakub Dawidek tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 32862d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 32873650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 32882d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(sc); 32892d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32902d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32912d1661a5SPawel Jakub Dawidek return (0); 32922d1661a5SPawel Jakub Dawidek } 32932d1661a5SPawel Jakub Dawidek 32942d1661a5SPawel Jakub Dawidek static void 32952d1661a5SPawel Jakub Dawidek g_raid3_taste_orphan(struct g_consumer *cp) 32962d1661a5SPawel Jakub Dawidek { 32972d1661a5SPawel Jakub Dawidek 32982d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 32992d1661a5SPawel Jakub Dawidek cp->provider->name)); 33002d1661a5SPawel Jakub Dawidek } 33012d1661a5SPawel Jakub Dawidek 33022d1661a5SPawel Jakub Dawidek static struct g_geom * 33032d1661a5SPawel Jakub Dawidek g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 33042d1661a5SPawel Jakub Dawidek { 33052d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 33062d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 33072d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 33082d1661a5SPawel Jakub Dawidek struct g_geom *gp; 33092d1661a5SPawel Jakub Dawidek int error; 33102d1661a5SPawel Jakub Dawidek 33112d1661a5SPawel Jakub Dawidek g_topology_assert(); 33122d1661a5SPawel Jakub Dawidek g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 33132d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 33142d1661a5SPawel Jakub Dawidek 33152d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "raid3:taste"); 33162d1661a5SPawel Jakub Dawidek /* This orphan function should be never called. */ 33172d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_taste_orphan; 33182d1661a5SPawel Jakub Dawidek cp = g_new_consumer(gp); 33192d1661a5SPawel Jakub Dawidek g_attach(cp, pp); 33202d1661a5SPawel Jakub Dawidek error = g_raid3_read_metadata(cp, &md); 33212d1661a5SPawel Jakub Dawidek g_detach(cp); 33222d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 33232d1661a5SPawel Jakub Dawidek g_destroy_geom(gp); 33242d1661a5SPawel Jakub Dawidek if (error != 0) 33252d1661a5SPawel Jakub Dawidek return (NULL); 33262d1661a5SPawel Jakub Dawidek gp = NULL; 33272d1661a5SPawel Jakub Dawidek 332890f2be24SAlexander Motin if (md.md_provider[0] != '\0' && 332990f2be24SAlexander Motin !g_compare_names(md.md_provider, pp->name)) 33302d1661a5SPawel Jakub Dawidek return (NULL); 3331e6890985SPawel Jakub Dawidek if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3332e6890985SPawel Jakub Dawidek return (NULL); 33332d1661a5SPawel Jakub Dawidek if (g_raid3_debug >= 2) 33342d1661a5SPawel Jakub Dawidek raid3_metadata_dump(&md); 33352d1661a5SPawel Jakub Dawidek 33362d1661a5SPawel Jakub Dawidek /* 33372d1661a5SPawel Jakub Dawidek * Let's check if device already exists. 33382d1661a5SPawel Jakub Dawidek */ 333945d5e85aSPawel Jakub Dawidek sc = NULL; 33402d1661a5SPawel Jakub Dawidek LIST_FOREACH(gp, &mp->geom, geom) { 33412d1661a5SPawel Jakub Dawidek sc = gp->softc; 33422d1661a5SPawel Jakub Dawidek if (sc == NULL) 33432d1661a5SPawel Jakub Dawidek continue; 33442d1661a5SPawel Jakub Dawidek if (sc->sc_sync.ds_geom == gp) 33452d1661a5SPawel Jakub Dawidek continue; 33462d1661a5SPawel Jakub Dawidek if (strcmp(md.md_name, sc->sc_name) != 0) 33472d1661a5SPawel Jakub Dawidek continue; 33482d1661a5SPawel Jakub Dawidek if (md.md_id != sc->sc_id) { 33492d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s already configured.", 33502d1661a5SPawel Jakub Dawidek sc->sc_name); 33512d1661a5SPawel Jakub Dawidek return (NULL); 33522d1661a5SPawel Jakub Dawidek } 33532d1661a5SPawel Jakub Dawidek break; 33542d1661a5SPawel Jakub Dawidek } 33552d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33562d1661a5SPawel Jakub Dawidek gp = g_raid3_create(mp, &md); 33572d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33582d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot create device %s.", 33592d1661a5SPawel Jakub Dawidek md.md_name); 33602d1661a5SPawel Jakub Dawidek return (NULL); 33612d1661a5SPawel Jakub Dawidek } 33622d1661a5SPawel Jakub Dawidek sc = gp->softc; 33632d1661a5SPawel Jakub Dawidek } 33642d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 33653650be51SPawel Jakub Dawidek g_topology_unlock(); 33663650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 33672d1661a5SPawel Jakub Dawidek error = g_raid3_add_disk(sc, pp, &md); 33682d1661a5SPawel Jakub Dawidek if (error != 0) { 33692d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 33702d1661a5SPawel Jakub Dawidek pp->name, gp->name, error); 33712d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 33722d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 3373712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33743525bb6bSPawel Jakub Dawidek g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 33753650be51SPawel Jakub Dawidek g_topology_lock(); 33762d1661a5SPawel Jakub Dawidek return (NULL); 33772d1661a5SPawel Jakub Dawidek } 33783650be51SPawel Jakub Dawidek gp = NULL; 33793650be51SPawel Jakub Dawidek } 33803650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33813650be51SPawel Jakub Dawidek g_topology_lock(); 33822d1661a5SPawel Jakub Dawidek return (gp); 33832d1661a5SPawel Jakub Dawidek } 33842d1661a5SPawel Jakub Dawidek 33852d1661a5SPawel Jakub Dawidek static int 33862d1661a5SPawel Jakub Dawidek g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 33872d1661a5SPawel Jakub Dawidek struct g_geom *gp) 33882d1661a5SPawel Jakub Dawidek { 33893650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 33903650be51SPawel Jakub Dawidek int error; 33912d1661a5SPawel Jakub Dawidek 33923650be51SPawel Jakub Dawidek g_topology_unlock(); 33933650be51SPawel Jakub Dawidek sc = gp->softc; 33943650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3395712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33963525bb6bSPawel Jakub Dawidek error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 33973650be51SPawel Jakub Dawidek if (error != 0) 33983650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33993650be51SPawel Jakub Dawidek g_topology_lock(); 34003650be51SPawel Jakub Dawidek return (error); 34012d1661a5SPawel Jakub Dawidek } 34022d1661a5SPawel Jakub Dawidek 34032d1661a5SPawel Jakub Dawidek static void 34042d1661a5SPawel Jakub Dawidek g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 34052d1661a5SPawel Jakub Dawidek struct g_consumer *cp, struct g_provider *pp) 34062d1661a5SPawel Jakub Dawidek { 34072d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 34082d1661a5SPawel Jakub Dawidek 34092d1661a5SPawel Jakub Dawidek g_topology_assert(); 34102d1661a5SPawel Jakub Dawidek 34112d1661a5SPawel Jakub Dawidek sc = gp->softc; 34122d1661a5SPawel Jakub Dawidek if (sc == NULL) 34132d1661a5SPawel Jakub Dawidek return; 34142d1661a5SPawel Jakub Dawidek /* Skip synchronization geom. */ 34152d1661a5SPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 34162d1661a5SPawel Jakub Dawidek return; 34172d1661a5SPawel Jakub Dawidek if (pp != NULL) { 34182d1661a5SPawel Jakub Dawidek /* Nothing here. */ 34192d1661a5SPawel Jakub Dawidek } else if (cp != NULL) { 34202d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 34212d1661a5SPawel Jakub Dawidek 34222d1661a5SPawel Jakub Dawidek disk = cp->private; 34232d1661a5SPawel Jakub Dawidek if (disk == NULL) 34242d1661a5SPawel Jakub Dawidek return; 34253650be51SPawel Jakub Dawidek g_topology_unlock(); 34263650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 34272d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Type>", indent); 34282d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 342949ee0fceSAlexander Motin sbuf_cat(sb, "PARITY"); 34302d1661a5SPawel Jakub Dawidek else 343149ee0fceSAlexander Motin sbuf_cat(sb, "DATA"); 343249ee0fceSAlexander Motin sbuf_cat(sb, "</Type>\n"); 34332d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 34342d1661a5SPawel Jakub Dawidek (u_int)disk->d_no); 34352d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 34362d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Synchronized>", indent); 34373650be51SPawel Jakub Dawidek if (disk->d_sync.ds_offset == 0) 343849ee0fceSAlexander Motin sbuf_cat(sb, "0%"); 34392d1661a5SPawel Jakub Dawidek else { 34402d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%u%%", 34413650be51SPawel Jakub Dawidek (u_int)((disk->d_sync.ds_offset * 100) / 3442c0d68b6eSPawel Jakub Dawidek (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 34432d1661a5SPawel Jakub Dawidek } 344449ee0fceSAlexander Motin sbuf_cat(sb, "</Synchronized>\n"); 34454a7f7b10SGleb Smirnoff if (disk->d_sync.ds_offset > 0) { 34464a7f7b10SGleb Smirnoff sbuf_printf(sb, "%s<BytesSynced>%jd" 34474a7f7b10SGleb Smirnoff "</BytesSynced>\n", indent, 34484a7f7b10SGleb Smirnoff (intmax_t)disk->d_sync.ds_offset); 34494a7f7b10SGleb Smirnoff } 34502d1661a5SPawel Jakub Dawidek } 34512d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 34522d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid); 3453a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 34542d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 34552d1661a5SPawel Jakub Dawidek if (disk->d_flags == 0) 345649ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 34572d1661a5SPawel Jakub Dawidek else { 34582d1661a5SPawel Jakub Dawidek int first = 1; 34592d1661a5SPawel Jakub Dawidek 34602d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 34612d1661a5SPawel Jakub Dawidek if ((disk->d_flags & (flag)) != 0) { \ 34622d1661a5SPawel Jakub Dawidek if (!first) \ 346349ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 34642d1661a5SPawel Jakub Dawidek else \ 34652d1661a5SPawel Jakub Dawidek first = 0; \ 346649ee0fceSAlexander Motin sbuf_cat(sb, name); \ 34672d1661a5SPawel Jakub Dawidek } \ 34682d1661a5SPawel Jakub Dawidek } while (0) 34692d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 34702d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 34712d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 34722d1661a5SPawel Jakub Dawidek "SYNCHRONIZING"); 34732d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 34743aae74ecSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 34752d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 34762d1661a5SPawel Jakub Dawidek } 347749ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 34782d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 34792d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state)); 34803650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 34813650be51SPawel Jakub Dawidek g_topology_lock(); 34822d1661a5SPawel Jakub Dawidek } else { 34833650be51SPawel Jakub Dawidek g_topology_unlock(); 34843650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3485ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3486ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3487ed940a82SPawel Jakub Dawidek "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3488ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3489ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3490ed940a82SPawel Jakub Dawidek "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3491ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3492ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3493ed940a82SPawel Jakub Dawidek "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3494ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3495ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3496ed940a82SPawel Jakub Dawidek "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3497ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3498ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3499ed940a82SPawel Jakub Dawidek "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3500ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3501ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3502ed940a82SPawel Jakub Dawidek "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3503ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3504ed940a82SPawel Jakub Dawidek } 35052d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 35062d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3507a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 35082d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 35092d1661a5SPawel Jakub Dawidek if (sc->sc_flags == 0) 351049ee0fceSAlexander Motin sbuf_cat(sb, "NONE"); 35112d1661a5SPawel Jakub Dawidek else { 35122d1661a5SPawel Jakub Dawidek int first = 1; 35132d1661a5SPawel Jakub Dawidek 35142d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 35152d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & (flag)) != 0) { \ 35162d1661a5SPawel Jakub Dawidek if (!first) \ 351749ee0fceSAlexander Motin sbuf_cat(sb, ", "); \ 35182d1661a5SPawel Jakub Dawidek else \ 35192d1661a5SPawel Jakub Dawidek first = 0; \ 352049ee0fceSAlexander Motin sbuf_cat(sb, name); \ 35212d1661a5SPawel Jakub Dawidek } \ 35222d1661a5SPawel Jakub Dawidek } while (0) 3523501250baSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 35242d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3525f5a2f7feSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3526f5a2f7feSPawel Jakub Dawidek "ROUND-ROBIN"); 3527dba915cfSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 35282d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 35292d1661a5SPawel Jakub Dawidek } 353049ee0fceSAlexander Motin sbuf_cat(sb, "</Flags>\n"); 35312d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 35322d1661a5SPawel Jakub Dawidek sc->sc_ndisks); 353328b31df7SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 353428b31df7SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state)); 35353650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35363650be51SPawel Jakub Dawidek g_topology_lock(); 35372d1661a5SPawel Jakub Dawidek } 35382d1661a5SPawel Jakub Dawidek } 35392d1661a5SPawel Jakub Dawidek 35409da3072cSPawel Jakub Dawidek static void 3541f62c1a47SAlexander Motin g_raid3_shutdown_post_sync(void *arg, int howto) 35429da3072cSPawel Jakub Dawidek { 35439da3072cSPawel Jakub Dawidek struct g_class *mp; 35449da3072cSPawel Jakub Dawidek struct g_geom *gp, *gp2; 35453650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 3546712fe9bdSPawel Jakub Dawidek int error; 35479da3072cSPawel Jakub Dawidek 35489da3072cSPawel Jakub Dawidek mp = arg; 35499da3072cSPawel Jakub Dawidek g_topology_lock(); 3550f62c1a47SAlexander Motin g_raid3_shutdown = 1; 35519da3072cSPawel Jakub Dawidek LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 35523650be51SPawel Jakub Dawidek if ((sc = gp->softc) == NULL) 35539da3072cSPawel Jakub Dawidek continue; 3554712fe9bdSPawel Jakub Dawidek /* Skip synchronization geom. */ 3555712fe9bdSPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 3556712fe9bdSPawel Jakub Dawidek continue; 35573650be51SPawel Jakub Dawidek g_topology_unlock(); 35583650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3559f62c1a47SAlexander Motin g_raid3_idle(sc, -1); 3560712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 3561712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3562712fe9bdSPawel Jakub Dawidek if (error != 0) 35633650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35643650be51SPawel Jakub Dawidek g_topology_lock(); 35653650be51SPawel Jakub Dawidek } 35663650be51SPawel Jakub Dawidek g_topology_unlock(); 35673650be51SPawel Jakub Dawidek } 35683650be51SPawel Jakub Dawidek 35693650be51SPawel Jakub Dawidek static void 35709da3072cSPawel Jakub Dawidek g_raid3_init(struct g_class *mp) 35719da3072cSPawel Jakub Dawidek { 35729da3072cSPawel Jakub Dawidek 3573f62c1a47SAlexander Motin g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3574f62c1a47SAlexander Motin g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3575f62c1a47SAlexander Motin if (g_raid3_post_sync == NULL) 35769da3072cSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 35779da3072cSPawel Jakub Dawidek } 35789da3072cSPawel Jakub Dawidek 35799da3072cSPawel Jakub Dawidek static void 35809da3072cSPawel Jakub Dawidek g_raid3_fini(struct g_class *mp) 35819da3072cSPawel Jakub Dawidek { 35829da3072cSPawel Jakub Dawidek 3583f62c1a47SAlexander Motin if (g_raid3_post_sync != NULL) 3584f62c1a47SAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync); 35859da3072cSPawel Jakub Dawidek } 35869da3072cSPawel Jakub Dawidek 35872d1661a5SPawel Jakub Dawidek DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 358874d6c131SKyle Evans MODULE_VERSION(geom_raid3, 0); 3589