1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/bio.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/kobj.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/systm.h> 38 #include <geom/geom.h> 39 #include <geom/geom_dbg.h> 40 #include "geom/raid/g_raid.h" 41 #include "g_raid_tr_if.h" 42 43 static MALLOC_DEFINE(M_TR_CONCAT, "tr_concat_data", "GEOM_RAID CONCAT data"); 44 45 struct g_raid_tr_concat_object { 46 struct g_raid_tr_object trso_base; 47 int trso_starting; 48 int trso_stopped; 49 }; 50 51 static g_raid_tr_taste_t g_raid_tr_taste_concat; 52 static g_raid_tr_event_t g_raid_tr_event_concat; 53 static g_raid_tr_start_t g_raid_tr_start_concat; 54 static g_raid_tr_stop_t g_raid_tr_stop_concat; 55 static g_raid_tr_iostart_t g_raid_tr_iostart_concat; 56 static g_raid_tr_iodone_t g_raid_tr_iodone_concat; 57 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_concat; 58 static g_raid_tr_free_t g_raid_tr_free_concat; 59 60 static kobj_method_t g_raid_tr_concat_methods[] = { 61 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_concat), 62 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_concat), 63 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_concat), 64 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_concat), 65 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_concat), 66 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_concat), 67 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_concat), 68 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_concat), 69 { 0, 0 } 70 }; 71 72 static struct g_raid_tr_class g_raid_tr_concat_class = { 73 "CONCAT", 74 g_raid_tr_concat_methods, 75 sizeof(struct g_raid_tr_concat_object), 76 .trc_enable = 1, 77 .trc_priority = 50, 78 .trc_accept_unmapped = 1 79 }; 80 81 static int 82 g_raid_tr_taste_concat(struct g_raid_tr_object *tr, struct g_raid_volume *volume) 83 { 84 struct g_raid_tr_concat_object *trs; 85 86 trs = (struct g_raid_tr_concat_object *)tr; 87 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_SINGLE && 88 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_CONCAT && 89 !(tr->tro_volume->v_disks_count == 1 && 90 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_UNKNOWN)) 91 return (G_RAID_TR_TASTE_FAIL); 92 trs->trso_starting = 1; 93 return (G_RAID_TR_TASTE_SUCCEED); 94 } 95 96 static int 97 g_raid_tr_update_state_concat(struct g_raid_volume *vol) 98 { 99 struct g_raid_tr_concat_object *trs; 100 struct g_raid_softc *sc; 101 off_t size; 102 u_int s; 103 int i, n, f; 104 105 sc = vol->v_softc; 106 trs = (struct g_raid_tr_concat_object *)vol->v_tr; 107 if (trs->trso_stopped) 108 s = G_RAID_VOLUME_S_STOPPED; 109 else if (trs->trso_starting) 110 s = G_RAID_VOLUME_S_STARTING; 111 else { 112 n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE); 113 f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED); 114 if (n + f == vol->v_disks_count) { 115 if (f == 0) 116 s = G_RAID_VOLUME_S_OPTIMAL; 117 else 118 s = G_RAID_VOLUME_S_SUBOPTIMAL; 119 } else 120 s = G_RAID_VOLUME_S_BROKEN; 121 } 122 if (s != vol->v_state) { 123 /* 124 * Some metadata modules may not know CONCAT volume 125 * mediasize until all disks connected. Recalculate. 126 */ 127 if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT && 128 G_RAID_VOLUME_S_ALIVE(s) && 129 !G_RAID_VOLUME_S_ALIVE(vol->v_state)) { 130 size = 0; 131 for (i = 0; i < vol->v_disks_count; i++) { 132 if (vol->v_subdisks[i].sd_state != 133 G_RAID_SUBDISK_S_NONE) 134 size += vol->v_subdisks[i].sd_size; 135 } 136 vol->v_mediasize = size; 137 } 138 139 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ? 140 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN, 141 G_RAID_EVENT_VOLUME); 142 g_raid_change_volume_state(vol, s); 143 if (!trs->trso_starting && !trs->trso_stopped) 144 g_raid_write_metadata(sc, vol, NULL, NULL); 145 } 146 return (0); 147 } 148 149 static int 150 g_raid_tr_event_concat(struct g_raid_tr_object *tr, 151 struct g_raid_subdisk *sd, u_int event) 152 { 153 struct g_raid_tr_concat_object *trs; 154 struct g_raid_softc *sc; 155 struct g_raid_volume *vol; 156 int state; 157 158 trs = (struct g_raid_tr_concat_object *)tr; 159 vol = tr->tro_volume; 160 sc = vol->v_softc; 161 162 state = sd->sd_state; 163 if (state != G_RAID_SUBDISK_S_NONE && 164 state != G_RAID_SUBDISK_S_FAILED && 165 state != G_RAID_SUBDISK_S_ACTIVE) { 166 G_RAID_DEBUG1(1, sc, 167 "Promote subdisk %s:%d from %s to ACTIVE.", 168 vol->v_name, sd->sd_pos, 169 g_raid_subdisk_state2str(sd->sd_state)); 170 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE); 171 } 172 if (state != sd->sd_state && 173 !trs->trso_starting && !trs->trso_stopped) 174 g_raid_write_metadata(sc, vol, sd, NULL); 175 g_raid_tr_update_state_concat(vol); 176 return (0); 177 } 178 179 static int 180 g_raid_tr_start_concat(struct g_raid_tr_object *tr) 181 { 182 struct g_raid_tr_concat_object *trs; 183 struct g_raid_volume *vol; 184 185 trs = (struct g_raid_tr_concat_object *)tr; 186 vol = tr->tro_volume; 187 trs->trso_starting = 0; 188 g_raid_tr_update_state_concat(vol); 189 return (0); 190 } 191 192 static int 193 g_raid_tr_stop_concat(struct g_raid_tr_object *tr) 194 { 195 struct g_raid_tr_concat_object *trs; 196 struct g_raid_volume *vol; 197 198 trs = (struct g_raid_tr_concat_object *)tr; 199 vol = tr->tro_volume; 200 trs->trso_starting = 0; 201 trs->trso_stopped = 1; 202 g_raid_tr_update_state_concat(vol); 203 return (0); 204 } 205 206 static void 207 g_raid_tr_iostart_concat(struct g_raid_tr_object *tr, struct bio *bp) 208 { 209 struct g_raid_volume *vol; 210 struct g_raid_subdisk *sd; 211 struct bio_queue_head queue; 212 struct bio *cbp; 213 char *addr; 214 off_t offset, length, remain; 215 u_int no; 216 217 vol = tr->tro_volume; 218 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL && 219 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL) { 220 g_raid_iodone(bp, EIO); 221 return; 222 } 223 if (bp->bio_cmd == BIO_FLUSH || bp->bio_cmd == BIO_SPEEDUP) { 224 g_raid_tr_flush_common(tr, bp); 225 return; 226 } 227 228 offset = bp->bio_offset; 229 remain = bp->bio_length; 230 if ((bp->bio_flags & BIO_UNMAPPED) != 0) 231 addr = NULL; 232 else 233 addr = bp->bio_data; 234 no = 0; 235 while (no < vol->v_disks_count && 236 offset >= vol->v_subdisks[no].sd_size) { 237 offset -= vol->v_subdisks[no].sd_size; 238 no++; 239 } 240 if (no >= vol->v_disks_count) { 241 g_raid_iodone(bp, EIO); 242 return; 243 } 244 bioq_init(&queue); 245 do { 246 sd = &vol->v_subdisks[no]; 247 length = MIN(sd->sd_size - offset, remain); 248 cbp = g_clone_bio(bp); 249 if (cbp == NULL) 250 goto failure; 251 cbp->bio_offset = offset; 252 cbp->bio_length = length; 253 if ((bp->bio_flags & BIO_UNMAPPED) != 0 && 254 bp->bio_cmd != BIO_DELETE) { 255 cbp->bio_ma_offset += (uintptr_t)addr; 256 cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE; 257 cbp->bio_ma_offset %= PAGE_SIZE; 258 cbp->bio_ma_n = round_page(cbp->bio_ma_offset + 259 cbp->bio_length) / PAGE_SIZE; 260 } else 261 cbp->bio_data = addr; 262 cbp->bio_caller1 = sd; 263 bioq_insert_tail(&queue, cbp); 264 remain -= length; 265 if (bp->bio_cmd != BIO_DELETE) 266 addr += length; 267 offset = 0; 268 no++; 269 } while (remain > 0 && no < vol->v_disks_count); 270 bp->bio_completed = bp->bio_length - remain; 271 while ((cbp = bioq_takefirst(&queue)) != NULL) { 272 sd = cbp->bio_caller1; 273 cbp->bio_caller1 = NULL; 274 g_raid_subdisk_iostart(sd, cbp); 275 } 276 return; 277 failure: 278 while ((cbp = bioq_takefirst(&queue)) != NULL) 279 g_destroy_bio(cbp); 280 if (bp->bio_error == 0) 281 bp->bio_error = ENOMEM; 282 g_raid_iodone(bp, bp->bio_error); 283 } 284 285 static int 286 g_raid_tr_kerneldump_concat(struct g_raid_tr_object *tr, void *virtual, 287 off_t boffset, size_t blength) 288 { 289 struct g_raid_volume *vol; 290 struct g_raid_subdisk *sd; 291 char *addr; 292 off_t offset, length, remain; 293 int error, no; 294 295 vol = tr->tro_volume; 296 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL) 297 return (ENXIO); 298 299 offset = boffset; 300 remain = blength; 301 addr = virtual; 302 no = 0; 303 while (no < vol->v_disks_count && 304 offset >= vol->v_subdisks[no].sd_size) { 305 offset -= vol->v_subdisks[no].sd_size; 306 no++; 307 } 308 if (no >= vol->v_disks_count) 309 return (EIO); 310 do { 311 sd = &vol->v_subdisks[no]; 312 length = MIN(sd->sd_size - offset, remain); 313 error = g_raid_subdisk_kerneldump(&vol->v_subdisks[no], 314 addr, offset, length); 315 if (error != 0) 316 return (error); 317 remain -= length; 318 addr += length; 319 offset = 0; 320 no++; 321 } while (remain > 0 && no < vol->v_disks_count); 322 if (remain > 0) 323 return (EIO); 324 return (0); 325 } 326 327 static void 328 g_raid_tr_iodone_concat(struct g_raid_tr_object *tr, 329 struct g_raid_subdisk *sd,struct bio *bp) 330 { 331 struct bio *pbp; 332 333 pbp = bp->bio_parent; 334 if (pbp->bio_error == 0) 335 pbp->bio_error = bp->bio_error; 336 g_destroy_bio(bp); 337 pbp->bio_inbed++; 338 if (pbp->bio_children == pbp->bio_inbed) { 339 g_raid_iodone(pbp, pbp->bio_error); 340 } 341 } 342 343 static int 344 g_raid_tr_free_concat(struct g_raid_tr_object *tr) 345 { 346 347 return (0); 348 } 349 350 G_RAID_TR_DECLARE(concat, "CONCAT"); 351