xref: /illumos-gate/usr/src/uts/common/fs/zfs/vdev_disk.c (revision 80ab886d233f514d54c2a6bdeb9fdfd951bd6881)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
33 #include <sys/zio.h>
34 #include <sys/sunldi.h>
35 
36 /*
37  * Virtual device vector for disks.
38  */
39 
40 extern ldi_ident_t zfs_li;
41 
42 typedef struct vdev_disk_buf {
43 	buf_t	vdb_buf;
44 	zio_t	*vdb_io;
45 } vdev_disk_buf_t;
46 
47 static int
48 vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
49 {
50 	vdev_disk_t *dvd;
51 	int error;
52 
53 	/*
54 	 * We must have a pathname, and it must be absolute.
55 	 */
56 	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
57 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
58 		return (EINVAL);
59 	}
60 
61 	dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
62 
63 	/*
64 	 * When opening a disk device, we want to preserve the user's original
65 	 * intent.  We always want to open the device by the path the user gave
66 	 * us, even if it is one of multiple paths to the save device.  But we
67 	 * also want to be able to survive disks being removed/recabled.
68 	 * Therefore the sequence of opening devices is:
69 	 *
70 	 * 1. Try opening the device by path.  For legacy pools without the
71 	 *    'whole_disk' property, attempt to fix the path by appending 's0'.
72 	 *
73 	 * 2. If the devid of the device matches the stored value, return
74 	 *    success.
75 	 *
76 	 * 3. Otherwise, the device may have moved.  Try opening the device
77 	 *    by the devid instead.
78 	 *
79 	 */
80 	if (vd->vdev_devid != NULL) {
81 		if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
82 		    &dvd->vd_minor) != 0) {
83 			vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
84 			return (EINVAL);
85 		}
86 	}
87 
88 	error = EINVAL;		/* presume failure */
89 
90 	if (vd->vdev_path != NULL) {
91 		ddi_devid_t devid;
92 
93 		if (vd->vdev_wholedisk == -1ULL) {
94 			size_t len = strlen(vd->vdev_path) + 3;
95 			char *buf = kmem_alloc(len, KM_SLEEP);
96 			ldi_handle_t lh;
97 
98 			(void) snprintf(buf, len, "%ss0", vd->vdev_path);
99 
100 			if (ldi_open_by_name(buf, spa_mode, kcred,
101 			    &lh, zfs_li) == 0) {
102 				spa_strfree(vd->vdev_path);
103 				vd->vdev_path = buf;
104 				vd->vdev_wholedisk = 1ULL;
105 				(void) ldi_close(lh, spa_mode, kcred);
106 			} else {
107 				kmem_free(buf, len);
108 			}
109 		}
110 
111 		error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred,
112 		    &dvd->vd_lh, zfs_li);
113 
114 		/*
115 		 * Compare the devid to the stored value.
116 		 */
117 		if (error == 0 && vd->vdev_devid != NULL &&
118 		    ldi_get_devid(dvd->vd_lh, &devid) == 0) {
119 			if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
120 				error = EINVAL;
121 				(void) ldi_close(dvd->vd_lh, spa_mode, kcred);
122 				dvd->vd_lh = NULL;
123 			}
124 			ddi_devid_free(devid);
125 		}
126 
127 		/*
128 		 * If we succeeded in opening the device, but 'vdev_wholedisk'
129 		 * is not yet set, then this must be a slice.
130 		 */
131 		if (error == 0 && vd->vdev_wholedisk == -1ULL)
132 			vd->vdev_wholedisk = 0;
133 	}
134 
135 	/*
136 	 * If we were unable to open by path, or the devid check fails, open by
137 	 * devid instead.
138 	 */
139 	if (error != 0 && vd->vdev_devid != NULL)
140 		error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor,
141 		    spa_mode, kcred, &dvd->vd_lh, zfs_li);
142 
143 	if (error) {
144 		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
145 		return (error);
146 	}
147 
148 	/*
149 	 * Determine the actual size of the device.
150 	 */
151 	if (ldi_get_size(dvd->vd_lh, psize) != 0) {
152 		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
153 		return (EINVAL);
154 	}
155 
156 	*ashift = SPA_MINBLOCKSHIFT;
157 
158 
159 	if (vd->vdev_wholedisk == 1) {
160 
161 		int wce, rc;
162 
163 		/*
164 		 * Enable disk write caching if we own the whole disk.
165 		 * Ignore errors as this is a performance optimization,
166 		 * we work just fine w/o it.
167 		 */
168 		error = 0;
169 		wce = 1;
170 		rc = ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce,
171 			FKIOCTL, kcred, &error);
172 
173 		if (rc || error)
174 			dprintf("%s: DKIOCSETWCE failed %d,%d",
175 				vdev_description(vd), rc, error);
176 	}
177 
178 	return (0);
179 }
180 
181 static void
182 vdev_disk_close(vdev_t *vd)
183 {
184 	vdev_disk_t *dvd = vd->vdev_tsd;
185 
186 	if (dvd == NULL)
187 		return;
188 
189 	dprintf("removing disk %s, devid %s\n",
190 	    vd->vdev_path ? vd->vdev_path : "<none>",
191 	    vd->vdev_devid ? vd->vdev_devid : "<none>");
192 
193 	if (dvd->vd_minor != NULL)
194 		ddi_devid_str_free(dvd->vd_minor);
195 
196 	if (dvd->vd_devid != NULL)
197 		ddi_devid_free(dvd->vd_devid);
198 
199 	if (dvd->vd_lh != NULL)
200 		(void) ldi_close(dvd->vd_lh, spa_mode, kcred);
201 
202 	kmem_free(dvd, sizeof (vdev_disk_t));
203 	vd->vdev_tsd = NULL;
204 }
205 
206 static void
207 vdev_disk_io_intr(buf_t *bp)
208 {
209 	vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp;
210 	zio_t *zio = vdb->vdb_io;
211 
212 	if ((zio->io_error = geterror(bp)) == 0 && bp->b_resid != 0)
213 		zio->io_error = EIO;
214 
215 	kmem_free(vdb, sizeof (vdev_disk_buf_t));
216 
217 	zio_next_stage_async(zio);
218 }
219 
220 static void
221 vdev_disk_ioctl_done(void *zio_arg, int error)
222 {
223 	zio_t *zio = zio_arg;
224 
225 	zio->io_error = error;
226 
227 	zio_next_stage_async(zio);
228 }
229 
230 static void
231 vdev_disk_io_start(zio_t *zio)
232 {
233 	vdev_t *vd = zio->io_vd;
234 	vdev_disk_t *dvd = vd->vdev_tsd;
235 	vdev_disk_buf_t *vdb;
236 	buf_t *bp;
237 	int flags, error;
238 
239 	if (zio->io_type == ZIO_TYPE_IOCTL) {
240 		zio_vdev_io_bypass(zio);
241 
242 		/* XXPOLICY */
243 		if (vdev_is_dead(vd)) {
244 			zio->io_error = ENXIO;
245 			zio_next_stage_async(zio);
246 			return;
247 		}
248 
249 		switch (zio->io_cmd) {
250 
251 		case DKIOCFLUSHWRITECACHE:
252 
253 			zio->io_dk_callback.dkc_callback = vdev_disk_ioctl_done;
254 			zio->io_dk_callback.dkc_cookie = zio;
255 
256 			error = ldi_ioctl(dvd->vd_lh, zio->io_cmd,
257 			    (uintptr_t)&zio->io_dk_callback,
258 			    FKIOCTL, kcred, NULL);
259 
260 			if (error == 0) {
261 				/*
262 				 * The ioctl will be done asychronously,
263 				 * and will call vdev_disk_ioctl_done()
264 				 * upon completion.
265 				 */
266 				return;
267 			}
268 			zio->io_error = error;
269 			break;
270 
271 		default:
272 			zio->io_error = ENOTSUP;
273 		}
274 
275 		zio_next_stage_async(zio);
276 		return;
277 	}
278 
279 	if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0)
280 		return;
281 
282 	if ((zio = vdev_queue_io(zio)) == NULL)
283 		return;
284 
285 	flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE);
286 	flags |= B_BUSY | B_NOCACHE;
287 	if (zio->io_flags & ZIO_FLAG_FAILFAST)
288 		flags |= B_FAILFAST;
289 
290 	vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP);
291 
292 	vdb->vdb_io = zio;
293 	bp = &vdb->vdb_buf;
294 
295 	bioinit(bp);
296 	bp->b_flags = flags;
297 	bp->b_bcount = zio->io_size;
298 	bp->b_un.b_addr = zio->io_data;
299 	bp->b_lblkno = lbtodb(zio->io_offset);
300 	bp->b_bufsize = zio->io_size;
301 	bp->b_iodone = (int (*)())vdev_disk_io_intr;
302 
303 	/* XXPOLICY */
304 	error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio);
305 	if (error) {
306 		zio->io_error = error;
307 		bioerror(bp, error);
308 		bp->b_resid = bp->b_bcount;
309 		bp->b_iodone(bp);
310 		return;
311 	}
312 
313 	error = ldi_strategy(dvd->vd_lh, bp);
314 	/* ldi_strategy() will return non-zero only on programming errors */
315 	ASSERT(error == 0);
316 }
317 
318 static void
319 vdev_disk_io_done(zio_t *zio)
320 {
321 	vdev_queue_io_done(zio);
322 
323 	if (zio->io_type == ZIO_TYPE_WRITE)
324 		vdev_cache_write(zio);
325 
326 	if (zio_injection_enabled && zio->io_error == 0)
327 		zio->io_error = zio_handle_device_injection(zio->io_vd, EIO);
328 
329 	zio_next_stage(zio);
330 }
331 
332 vdev_ops_t vdev_disk_ops = {
333 	vdev_disk_open,
334 	vdev_disk_close,
335 	vdev_default_asize,
336 	vdev_disk_io_start,
337 	vdev_disk_io_done,
338 	NULL,
339 	VDEV_TYPE_DISK,		/* name of this vdev type */
340 	B_TRUE			/* leaf vdev */
341 };
342