gvinum.8 (552311f4bb98c81b1b9e0e81d74e0262fc12110b) | gvinum.8 (05d98029e9476e39b23f895225d36bf60f22ad91) |
---|---|
1.\" Copyright (c) 2005 Chris Jones 2.\" All rights reserved. 3.\" 4.\" This software was developed for the FreeBSD Project by Chris Jones 5.\" thanks to the support of Google's Summer of Code program and 6.\" mentoring by Lukas Ertl. 7.\" 8.\" Redistribution and use in source and binary forms, with or without --- 14 unchanged lines hidden (view full) --- 23.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27.\" SUCH DAMAGE. 28.\" 29.\" $FreeBSD$ 30.\" | 1.\" Copyright (c) 2005 Chris Jones 2.\" All rights reserved. 3.\" 4.\" This software was developed for the FreeBSD Project by Chris Jones 5.\" thanks to the support of Google's Summer of Code program and 6.\" mentoring by Lukas Ertl. 7.\" 8.\" Redistribution and use in source and binary forms, with or without --- 14 unchanged lines hidden (view full) --- 23.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27.\" SUCH DAMAGE. 28.\" 29.\" $FreeBSD$ 30.\" |
31.Dd April 10, 2009 | 31.Dd October 1, 2013 |
32.Dt GVINUM 8 33.Os 34.Sh NAME 35.Nm gvinum 36.Nd Logical Volume Manager control program 37.Sh SYNOPSIS 38.Nm 39.Op Ar command --- 234 unchanged lines hidden (view full) --- 274.Sh FILES 275.Bl -tag -width ".Pa /dev/gvinum/plex" 276.It Pa /dev/gvinum 277directory with device nodes for 278.Nm 279objects 280.El 281.Sh EXAMPLES | 32.Dt GVINUM 8 33.Os 34.Sh NAME 35.Nm gvinum 36.Nd Logical Volume Manager control program 37.Sh SYNOPSIS 38.Nm 39.Op Ar command --- 234 unchanged lines hidden (view full) --- 274.Sh FILES 275.Bl -tag -width ".Pa /dev/gvinum/plex" 276.It Pa /dev/gvinum 277directory with device nodes for 278.Nm 279objects 280.El 281.Sh EXAMPLES |
282To create a mirror on disks /dev/ad1 and /dev/ad2, create a filesystem, mount, 283unmount and then stop | 282To create a mirror on disks /dev/ada1 and /dev/ada2, create a filesystem, 283mount, unmount and then stop |
284.Ic gvinum : 285.Pp | 284.Ic gvinum : 285.Pp |
286.Dl "gvinum mirror /dev/ad1 /dev/ad2" | 286.Dl "gvinum mirror /dev/ada1 /dev/ada2" |
287.Dl "newfs /dev/gvinum/gvinumvolume0" 288.Dl "mount /dev/gvinum/gvinumvolume0 /mnt" 289.Dl "..." 290.Dl "unmount /mnt" 291.Dl "gvinum stop" 292.Pp | 287.Dl "newfs /dev/gvinum/gvinumvolume0" 288.Dl "mount /dev/gvinum/gvinumvolume0 /mnt" 289.Dl "..." 290.Dl "unmount /mnt" 291.Dl "gvinum stop" 292.Pp |
293To create a striped mirror on disks /dev/ad1 /dev/ad2 /dev/ad3 and /dev/ad4 294named "data" and create a filesystem: | 293To create a striped mirror on disks /dev/ada1 /dev/ada2 /dev/ada3 and 294/dev/ada4 named "data" and create a filesystem: |
295.Pp | 295.Pp |
296.Dl "gvinum mirror -s -n data /dev/ad1 /dev/ad2 /dev/ad3 /dev/ad4" | 296.Dl "gvinum mirror -s -n data /dev/ada1 /dev/ada2 /dev/ada3 /dev/ada4" |
297.Dl "newfs /dev/gvinum/data" 298.Pp | 297.Dl "newfs /dev/gvinum/data" 298.Pp |
299To create a raid5 array on disks /dev/ad1 /dev/ad2 and /dev/ad3, with stripesize 300493k you can use the raid5 command: | 299To create a raid5 array on disks /dev/ada1 /dev/ada2 and /dev/ada3, 300with stripesize 493k you can use the raid5 command: |
301.Pp | 301.Pp |
302.Dl "gvinum raid5 -s 493k /dev/ad1 /dev/ad2 /dev/ad3" | 302.Dl "gvinum raid5 -s 493k /dev/ada1 /dev/ada2 /dev/ada3" |
303.Pp 304Then the volume will be created automatically. 305Afterwards, you have to initialize the volume: 306.Pp 307.Dl "gvinum start myraid5vol" 308.Pp 309The initialization will start, and the states will be updated when it's 310finished. 311The list command will give you information about its progress. 312.Pp 313Imagine that one of the drives fails, and the output of 'printconfig' looks 314something like this: 315.Pp | 303.Pp 304Then the volume will be created automatically. 305Afterwards, you have to initialize the volume: 306.Pp 307.Dl "gvinum start myraid5vol" 308.Pp 309The initialization will start, and the states will be updated when it's 310finished. 311The list command will give you information about its progress. 312.Pp 313Imagine that one of the drives fails, and the output of 'printconfig' looks 314something like this: 315.Pp |
316.Dl "drive gvinumdrive1 device /dev/ad2" | 316.Dl "drive gvinumdrive1 device /dev/ada2" |
317.Dl "drive gvinumdrive2 device /dev/???" | 317.Dl "drive gvinumdrive2 device /dev/???" |
318.Dl "drive gvinumdrive0 device /dev/ad1" | 318.Dl "drive gvinumdrive0 device /dev/ada1" |
319.Dl "volume myraid5vol" 320.Dl "plex name myraid5vol.p0 org raid5 986s vol myraid5vol" 321.Dl "sd name myraid5vol.p0.s2 drive gvinumdrive2 len 32538s driveoffset 265s" 322.Dl "plex myraid5vol.p0 plexoffset 1972s" 323.Dl "sd name myraid5vol.p0.s1 drive gvinumdrive1 len 32538s driveoffset 265s" 324.Dl "plex myraid5vol.p0 plexoffset 986s" 325.Dl "sd name myraid5vol.p0.s0 drive gvinumdrive0 len 32538s driveoffset 265s" 326.Dl "plex myraid5vol.p0 plexoffset 0s" 327.Pp 328Create a new drive with this configuration: 329.Pp | 319.Dl "volume myraid5vol" 320.Dl "plex name myraid5vol.p0 org raid5 986s vol myraid5vol" 321.Dl "sd name myraid5vol.p0.s2 drive gvinumdrive2 len 32538s driveoffset 265s" 322.Dl "plex myraid5vol.p0 plexoffset 1972s" 323.Dl "sd name myraid5vol.p0.s1 drive gvinumdrive1 len 32538s driveoffset 265s" 324.Dl "plex myraid5vol.p0 plexoffset 986s" 325.Dl "sd name myraid5vol.p0.s0 drive gvinumdrive0 len 32538s driveoffset 265s" 326.Dl "plex myraid5vol.p0 plexoffset 0s" 327.Pp 328Create a new drive with this configuration: 329.Pp |
330.Dl "drive gdrive4 device /dev/ad4" | 330.Dl "drive gdrive4 device /dev/ada4" |
331.Pp 332Then move the stale subdisk to the new drive: 333.Pp 334.Dl "gvinum move gdrive4 myraid5vol.p0.s2" 335.Pp 336Then, initiate the rebuild: 337.Pp 338.Dl "gvinum start myraid5vol.p0" 339.Pp 340The plex will go up form degraded mode after the rebuild is finished. 341The plex can still be used while the rebuild is in progress, although requests 342might be delayed. 343.Pp 344Given the configuration as in the previous example, growing a RAID-5 or STRIPED 345array is accomplished by using the grow command: 346.Pp | 331.Pp 332Then move the stale subdisk to the new drive: 333.Pp 334.Dl "gvinum move gdrive4 myraid5vol.p0.s2" 335.Pp 336Then, initiate the rebuild: 337.Pp 338.Dl "gvinum start myraid5vol.p0" 339.Pp 340The plex will go up form degraded mode after the rebuild is finished. 341The plex can still be used while the rebuild is in progress, although requests 342might be delayed. 343.Pp 344Given the configuration as in the previous example, growing a RAID-5 or STRIPED 345array is accomplished by using the grow command: 346.Pp |
347.Dl "gvinum grow myraid5vol.p0 /dev/ad4" | 347.Dl "gvinum grow myraid5vol.p0 /dev/ada4" |
348.Pp 349If everything went ok, the plex state should now be set to growable. 350You can then start the growing with the 351.Ic start 352command: 353.Pp 354.Dl "gvinum start myraid5vol.p0" 355.Pp --- 91 unchanged lines hidden --- | 348.Pp 349If everything went ok, the plex state should now be set to growable. 350You can then start the growing with the 351.Ic start 352command: 353.Pp 354.Dl "gvinum start myraid5vol.p0" 355.Pp --- 91 unchanged lines hidden --- |