1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/bio.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/kobj.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/systm.h> 38 #include <geom/geom.h> 39 #include <geom/geom_dbg.h> 40 #include "geom/raid/g_raid.h" 41 #include "g_raid_tr_if.h" 42 43 static MALLOC_DEFINE(M_TR_RAID0, "tr_raid0_data", "GEOM_RAID RAID0 data"); 44 45 struct g_raid_tr_raid0_object { 46 struct g_raid_tr_object trso_base; 47 int trso_starting; 48 int trso_stopped; 49 }; 50 51 static g_raid_tr_taste_t g_raid_tr_taste_raid0; 52 static g_raid_tr_event_t g_raid_tr_event_raid0; 53 static g_raid_tr_start_t g_raid_tr_start_raid0; 54 static g_raid_tr_stop_t g_raid_tr_stop_raid0; 55 static g_raid_tr_iostart_t g_raid_tr_iostart_raid0; 56 static g_raid_tr_iodone_t g_raid_tr_iodone_raid0; 57 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid0; 58 static g_raid_tr_free_t g_raid_tr_free_raid0; 59 60 static kobj_method_t g_raid_tr_raid0_methods[] = { 61 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid0), 62 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid0), 63 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid0), 64 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid0), 65 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid0), 66 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid0), 67 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid0), 68 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid0), 69 { 0, 0 } 70 }; 71 72 static struct g_raid_tr_class g_raid_tr_raid0_class = { 73 "RAID0", 74 g_raid_tr_raid0_methods, 75 sizeof(struct g_raid_tr_raid0_object), 76 .trc_enable = 1, 77 .trc_priority = 100, 78 .trc_accept_unmapped = 1 79 }; 80 81 static int 82 g_raid_tr_taste_raid0(struct g_raid_tr_object *tr, struct g_raid_volume *volume) 83 { 84 struct g_raid_tr_raid0_object *trs; 85 86 trs = (struct g_raid_tr_raid0_object *)tr; 87 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID0 || 88 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_NONE) 89 return (G_RAID_TR_TASTE_FAIL); 90 trs->trso_starting = 1; 91 return (G_RAID_TR_TASTE_SUCCEED); 92 } 93 94 static int 95 g_raid_tr_update_state_raid0(struct g_raid_volume *vol) 96 { 97 struct g_raid_tr_raid0_object *trs; 98 struct g_raid_softc *sc; 99 u_int s; 100 int n, f; 101 102 sc = vol->v_softc; 103 trs = (struct g_raid_tr_raid0_object *)vol->v_tr; 104 if (trs->trso_stopped) 105 s = G_RAID_VOLUME_S_STOPPED; 106 else if (trs->trso_starting) 107 s = G_RAID_VOLUME_S_STARTING; 108 else { 109 n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE); 110 f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED); 111 if (n + f == vol->v_disks_count) { 112 if (f == 0) 113 s = G_RAID_VOLUME_S_OPTIMAL; 114 else 115 s = G_RAID_VOLUME_S_SUBOPTIMAL; 116 } else 117 s = G_RAID_VOLUME_S_BROKEN; 118 } 119 if (s != vol->v_state) { 120 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ? 121 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN, 122 G_RAID_EVENT_VOLUME); 123 g_raid_change_volume_state(vol, s); 124 if (!trs->trso_starting && !trs->trso_stopped) 125 g_raid_write_metadata(sc, vol, NULL, NULL); 126 } 127 return (0); 128 } 129 130 static int 131 g_raid_tr_event_raid0(struct g_raid_tr_object *tr, 132 struct g_raid_subdisk *sd, u_int event) 133 { 134 struct g_raid_tr_raid0_object *trs; 135 struct g_raid_softc *sc; 136 struct g_raid_volume *vol; 137 int state; 138 139 trs = (struct g_raid_tr_raid0_object *)tr; 140 vol = tr->tro_volume; 141 sc = vol->v_softc; 142 143 state = sd->sd_state; 144 if (state != G_RAID_SUBDISK_S_NONE && 145 state != G_RAID_SUBDISK_S_FAILED && 146 state != G_RAID_SUBDISK_S_ACTIVE) { 147 G_RAID_DEBUG1(1, sc, 148 "Promote subdisk %s:%d from %s to ACTIVE.", 149 vol->v_name, sd->sd_pos, 150 g_raid_subdisk_state2str(sd->sd_state)); 151 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE); 152 } 153 if (state != sd->sd_state && 154 !trs->trso_starting && !trs->trso_stopped) 155 g_raid_write_metadata(sc, vol, sd, NULL); 156 g_raid_tr_update_state_raid0(vol); 157 return (0); 158 } 159 160 static int 161 g_raid_tr_start_raid0(struct g_raid_tr_object *tr) 162 { 163 struct g_raid_tr_raid0_object *trs; 164 struct g_raid_volume *vol; 165 166 trs = (struct g_raid_tr_raid0_object *)tr; 167 vol = tr->tro_volume; 168 trs->trso_starting = 0; 169 g_raid_tr_update_state_raid0(vol); 170 return (0); 171 } 172 173 static int 174 g_raid_tr_stop_raid0(struct g_raid_tr_object *tr) 175 { 176 struct g_raid_tr_raid0_object *trs; 177 struct g_raid_volume *vol; 178 179 trs = (struct g_raid_tr_raid0_object *)tr; 180 vol = tr->tro_volume; 181 trs->trso_starting = 0; 182 trs->trso_stopped = 1; 183 g_raid_tr_update_state_raid0(vol); 184 return (0); 185 } 186 187 static void 188 g_raid_tr_iostart_raid0(struct g_raid_tr_object *tr, struct bio *bp) 189 { 190 struct g_raid_volume *vol; 191 struct g_raid_subdisk *sd; 192 struct bio_queue_head queue; 193 struct bio *cbp; 194 char *addr; 195 off_t offset, start, length, nstripe, remain; 196 u_int no, strip_size; 197 198 vol = tr->tro_volume; 199 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL && 200 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL) { 201 g_raid_iodone(bp, EIO); 202 return; 203 } 204 if (bp->bio_cmd == BIO_FLUSH || bp->bio_cmd == BIO_SPEEDUP) { 205 g_raid_tr_flush_common(tr, bp); 206 return; 207 } 208 if ((bp->bio_flags & BIO_UNMAPPED) != 0) 209 addr = NULL; 210 else 211 addr = bp->bio_data; 212 strip_size = vol->v_strip_size; 213 214 /* Stripe number. */ 215 nstripe = bp->bio_offset / strip_size; 216 /* Start position in stripe. */ 217 start = bp->bio_offset % strip_size; 218 /* Disk number. */ 219 no = nstripe % vol->v_disks_count; 220 /* Stripe start position in disk. */ 221 offset = (nstripe / vol->v_disks_count) * strip_size; 222 /* Length of data to operate. */ 223 remain = bp->bio_length; 224 225 bioq_init(&queue); 226 do { 227 length = MIN(strip_size - start, remain); 228 cbp = g_clone_bio(bp); 229 if (cbp == NULL) 230 goto failure; 231 cbp->bio_offset = offset + start; 232 cbp->bio_length = length; 233 if ((bp->bio_flags & BIO_UNMAPPED) != 0 && 234 bp->bio_cmd != BIO_DELETE) { 235 cbp->bio_ma_offset += (uintptr_t)addr; 236 cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE; 237 cbp->bio_ma_offset %= PAGE_SIZE; 238 cbp->bio_ma_n = round_page(cbp->bio_ma_offset + 239 cbp->bio_length) / PAGE_SIZE; 240 } else 241 cbp->bio_data = addr; 242 cbp->bio_caller1 = &vol->v_subdisks[no]; 243 bioq_insert_tail(&queue, cbp); 244 if (++no >= vol->v_disks_count) { 245 no = 0; 246 offset += strip_size; 247 } 248 remain -= length; 249 if (bp->bio_cmd != BIO_DELETE) 250 addr += length; 251 start = 0; 252 } while (remain > 0); 253 while ((cbp = bioq_takefirst(&queue)) != NULL) { 254 sd = cbp->bio_caller1; 255 cbp->bio_caller1 = NULL; 256 g_raid_subdisk_iostart(sd, cbp); 257 } 258 return; 259 failure: 260 while ((cbp = bioq_takefirst(&queue)) != NULL) 261 g_destroy_bio(cbp); 262 if (bp->bio_error == 0) 263 bp->bio_error = ENOMEM; 264 g_raid_iodone(bp, bp->bio_error); 265 } 266 267 static int 268 g_raid_tr_kerneldump_raid0(struct g_raid_tr_object *tr, 269 void *virtual, off_t boffset, size_t blength) 270 { 271 struct g_raid_volume *vol; 272 char *addr; 273 off_t offset, start, length, nstripe, remain; 274 u_int no, strip_size; 275 int error; 276 277 vol = tr->tro_volume; 278 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL) 279 return (ENXIO); 280 addr = virtual; 281 strip_size = vol->v_strip_size; 282 283 /* Stripe number. */ 284 nstripe = boffset / strip_size; 285 /* Start position in stripe. */ 286 start = boffset % strip_size; 287 /* Disk number. */ 288 no = nstripe % vol->v_disks_count; 289 /* Stripe tart position in disk. */ 290 offset = (nstripe / vol->v_disks_count) * strip_size; 291 /* Length of data to operate. */ 292 remain = blength; 293 294 do { 295 length = MIN(strip_size - start, remain); 296 error = g_raid_subdisk_kerneldump(&vol->v_subdisks[no], addr, 297 offset + start, length); 298 if (error != 0) 299 return (error); 300 if (++no >= vol->v_disks_count) { 301 no = 0; 302 offset += strip_size; 303 } 304 remain -= length; 305 addr += length; 306 start = 0; 307 } while (remain > 0); 308 return (0); 309 } 310 311 static void 312 g_raid_tr_iodone_raid0(struct g_raid_tr_object *tr, 313 struct g_raid_subdisk *sd,struct bio *bp) 314 { 315 struct bio *pbp; 316 317 pbp = bp->bio_parent; 318 if (pbp->bio_error == 0) 319 pbp->bio_error = bp->bio_error; 320 g_destroy_bio(bp); 321 pbp->bio_inbed++; 322 if (pbp->bio_children == pbp->bio_inbed) { 323 pbp->bio_completed = pbp->bio_length; 324 g_raid_iodone(pbp, pbp->bio_error); 325 } 326 } 327 328 static int 329 g_raid_tr_free_raid0(struct g_raid_tr_object *tr) 330 { 331 332 return (0); 333 } 334 335 G_RAID_TR_DECLARE(raid0, "RAID0"); 336