1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/scsi/scsi.h>
27 #include <sys/dktp/cm.h>
28 #include <sys/dktp/quetypes.h>
29 #include <sys/dktp/queue.h>
30 #include <sys/dktp/fctypes.h>
31 #include <sys/dktp/flowctrl.h>
32 #include <sys/dktp/cmdev.h>
33 #include <sys/dkio.h>
34 #include <sys/dktp/tgdk.h>
35 #include <sys/dktp/dadk.h>
36 #include <sys/dktp/bbh.h>
37 #include <sys/dktp/altsctr.h>
38 #include <sys/dktp/cmdk.h>
39
40 #include <sys/stat.h>
41 #include <sys/vtoc.h>
42 #include <sys/file.h>
43 #include <sys/dktp/dadkio.h>
44 #include <sys/aio_req.h>
45
46 #include <sys/cmlb.h>
47
48 /*
49 * Local Static Data
50 */
51 #ifdef CMDK_DEBUG
52 #define DENT 0x0001
53 #define DIO 0x0002
54
55 static int cmdk_debug = DIO;
56 #endif
57
58 #ifndef TRUE
59 #define TRUE 1
60 #endif
61
62 #ifndef FALSE
63 #define FALSE 0
64 #endif
65
66 /*
67 * NDKMAP is the base number for accessing the fdisk partitions.
68 * c?d?p0 --> cmdk@?,?:q
69 */
70 #define PARTITION0_INDEX (NDKMAP + 0)
71
72 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data
73 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext
74
75 void *cmdk_state;
76
77 /*
78 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
79 * attach situations
80 */
81 static kmutex_t cmdk_attach_mutex;
82 static int cmdk_max_instance = 0;
83
84 /*
85 * Panic dumpsys state
86 * There is only a single flag that is not mutex locked since
87 * the system is prevented from thread switching and cmdk_dump
88 * will only be called in a single threaded operation.
89 */
90 static int cmdk_indump;
91
92 /*
93 * Local Function Prototypes
94 */
95 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
96 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
97 static void cmdkmin(struct buf *bp);
98 static int cmdkrw(dev_t dev, struct uio *uio, int flag);
99 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
100
101 /*
102 * Bad Block Handling Functions Prototypes
103 */
104 static void cmdk_bbh_reopen(struct cmdk *dkp);
105 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
106 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
107 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
108 static void cmdk_bbh_close(struct cmdk *dkp);
109 static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
110 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
111
112 static struct bbh_objops cmdk_bbh_ops = {
113 nulldev,
114 nulldev,
115 cmdk_bbh_gethandle,
116 cmdk_bbh_htoc,
117 cmdk_bbh_freehandle,
118 0, 0
119 };
120
121 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
122 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
123 static int cmdkstrategy(struct buf *bp);
124 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
125 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
126 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
127 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
128 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
129 int mod_flags, char *name, caddr_t valuep, int *lengthp);
130 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
131 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
132
133 /*
134 * Device driver ops vector
135 */
136
137 static struct cb_ops cmdk_cb_ops = {
138 cmdkopen, /* open */
139 cmdkclose, /* close */
140 cmdkstrategy, /* strategy */
141 nodev, /* print */
142 cmdkdump, /* dump */
143 cmdkread, /* read */
144 cmdkwrite, /* write */
145 cmdkioctl, /* ioctl */
146 nodev, /* devmap */
147 nodev, /* mmap */
148 nodev, /* segmap */
149 nochpoll, /* poll */
150 cmdk_prop_op, /* cb_prop_op */
151 0, /* streamtab */
152 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */
153 CB_REV, /* cb_rev */
154 cmdkaread, /* async read */
155 cmdkawrite /* async write */
156 };
157
158 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
159 void **result);
160 static int cmdkprobe(dev_info_t *dip);
161 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
162 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
163
164 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp);
165 static int cmdkresume(dev_info_t *dip);
166 static int cmdksuspend(dev_info_t *dip);
167 static int cmdkpower(dev_info_t *dip, int component, int level);
168
169 struct dev_ops cmdk_ops = {
170 DEVO_REV, /* devo_rev, */
171 0, /* refcnt */
172 cmdkinfo, /* info */
173 nulldev, /* identify */
174 cmdkprobe, /* probe */
175 cmdkattach, /* attach */
176 cmdkdetach, /* detach */
177 nodev, /* reset */
178 &cmdk_cb_ops, /* driver operations */
179 (struct bus_ops *)0, /* bus operations */
180 cmdkpower, /* power */
181 ddi_quiesce_not_needed, /* quiesce */
182 };
183
184 /*
185 * This is the loadable module wrapper.
186 */
187 #include <sys/modctl.h>
188
189 #ifndef XPV_HVM_DRIVER
190 static struct modldrv modldrv = {
191 &mod_driverops, /* Type of module. This one is a driver */
192 "Common Direct Access Disk",
193 &cmdk_ops, /* driver ops */
194 };
195
196 static struct modlinkage modlinkage = {
197 MODREV_1, (void *)&modldrv, NULL
198 };
199
200
201 #else /* XPV_HVM_DRIVER */
202 static struct modlmisc modlmisc = {
203 &mod_miscops, /* Type of module. This one is a misc */
204 "HVM Common Direct Access Disk",
205 };
206
207 static struct modlinkage modlinkage = {
208 MODREV_1, (void *)&modlmisc, NULL
209 };
210
211 #endif /* XPV_HVM_DRIVER */
212
213 /* Function prototypes for cmlb callbacks */
214
215 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
216 diskaddr_t start, size_t length, void *tg_cookie);
217
218 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg,
219 void *tg_cookie);
220
221 static void cmdk_devid_setup(struct cmdk *dkp);
222 static int cmdk_devid_modser(struct cmdk *dkp);
223 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
224 static int cmdk_devid_fabricate(struct cmdk *dkp);
225 static int cmdk_devid_read(struct cmdk *dkp);
226
227 static cmlb_tg_ops_t cmdk_lb_ops = {
228 TG_DK_OPS_VERSION_1,
229 cmdk_lb_rdwr,
230 cmdk_lb_getinfo
231 };
232
233 static boolean_t
cmdk_isopen(struct cmdk * dkp,dev_t dev)234 cmdk_isopen(struct cmdk *dkp, dev_t dev)
235 {
236 int part, otyp;
237 ulong_t partbit;
238
239 ASSERT(MUTEX_HELD((&dkp->dk_mutex)));
240
241 part = CMDKPART(dev);
242 partbit = 1 << part;
243
244 /* account for close */
245 if (dkp->dk_open_lyr[part] != 0)
246 return (B_TRUE);
247 for (otyp = 0; otyp < OTYPCNT; otyp++)
248 if (dkp->dk_open_reg[otyp] & partbit)
249 return (B_TRUE);
250 return (B_FALSE);
251 }
252
253 int
_init(void)254 _init(void)
255 {
256 int rval;
257
258 #ifndef XPV_HVM_DRIVER
259 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
260 return (rval);
261 #endif /* !XPV_HVM_DRIVER */
262
263 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
264 if ((rval = mod_install(&modlinkage)) != 0) {
265 mutex_destroy(&cmdk_attach_mutex);
266 #ifndef XPV_HVM_DRIVER
267 ddi_soft_state_fini(&cmdk_state);
268 #endif /* !XPV_HVM_DRIVER */
269 }
270 return (rval);
271 }
272
273 int
_fini(void)274 _fini(void)
275 {
276 return (EBUSY);
277 }
278
279 int
_info(struct modinfo * modinfop)280 _info(struct modinfo *modinfop)
281 {
282 return (mod_info(&modlinkage, modinfop));
283 }
284
285 /*
286 * Autoconfiguration Routines
287 */
288 static int
cmdkprobe(dev_info_t * dip)289 cmdkprobe(dev_info_t *dip)
290 {
291 int instance;
292 int status;
293 struct cmdk *dkp;
294
295 instance = ddi_get_instance(dip);
296
297 #ifndef XPV_HVM_DRIVER
298 if (ddi_get_soft_state(cmdk_state, instance))
299 return (DDI_PROBE_PARTIAL);
300
301 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS)
302 return (DDI_PROBE_PARTIAL);
303 #endif /* !XPV_HVM_DRIVER */
304
305 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)
306 return (DDI_PROBE_PARTIAL);
307
308 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
309 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
310 dkp->dk_dip = dip;
311 mutex_enter(&dkp->dk_mutex);
312
313 dkp->dk_dev = makedevice(ddi_driver_major(dip),
314 ddi_get_instance(dip) << CMDK_UNITSHF);
315
316 /* linkage to dadk and strategy */
317 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
318 mutex_exit(&dkp->dk_mutex);
319 mutex_destroy(&dkp->dk_mutex);
320 rw_destroy(&dkp->dk_bbh_mutex);
321 #ifndef XPV_HVM_DRIVER
322 ddi_soft_state_free(cmdk_state, instance);
323 #endif /* !XPV_HVM_DRIVER */
324 return (DDI_PROBE_PARTIAL);
325 }
326
327 status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
328 if (status != DDI_PROBE_SUCCESS) {
329 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
330 mutex_exit(&dkp->dk_mutex);
331 mutex_destroy(&dkp->dk_mutex);
332 rw_destroy(&dkp->dk_bbh_mutex);
333 #ifndef XPV_HVM_DRIVER
334 ddi_soft_state_free(cmdk_state, instance);
335 #endif /* !XPV_HVM_DRIVER */
336 return (status);
337 }
338
339 mutex_exit(&dkp->dk_mutex);
340 #ifdef CMDK_DEBUG
341 if (cmdk_debug & DENT)
342 PRF("cmdkprobe: instance= %d name= `%s`\n",
343 instance, ddi_get_name_addr(dip));
344 #endif
345 return (status);
346 }
347
348 static int
cmdkattach(dev_info_t * dip,ddi_attach_cmd_t cmd)349 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
350 {
351 int instance;
352 struct cmdk *dkp;
353 char *node_type;
354
355 switch (cmd) {
356 case DDI_ATTACH:
357 break;
358 case DDI_RESUME:
359 return (cmdkresume(dip));
360 default:
361 return (DDI_FAILURE);
362 }
363
364 instance = ddi_get_instance(dip);
365 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
366 return (DDI_FAILURE);
367
368 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
369 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
370
371 mutex_enter(&dkp->dk_mutex);
372
373 /* dadk_attach is an empty function that only returns SUCCESS */
374 (void) dadk_attach(DKTP_DATA);
375
376 node_type = (DKTP_EXT->tg_nodetype);
377
378 /*
379 * this open allows cmlb to read the device
380 * and determine the label types
381 * so that cmlb can create minor nodes for device
382 */
383
384 /* open the target disk */
385 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
386 goto fail2;
387
388 #ifdef _ILP32
389 {
390 struct tgdk_geom phyg;
391 (void) dadk_getphygeom(DKTP_DATA, &phyg);
392 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) {
393 (void) dadk_close(DKTP_DATA);
394 goto fail2;
395 }
396 }
397 #endif
398
399
400 /* mark as having opened target */
401 dkp->dk_flag |= CMDK_TGDK_OPEN;
402
403 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
404
405 if (cmlb_attach(dip,
406 &cmdk_lb_ops,
407 DTYPE_DIRECT, /* device_type */
408 B_FALSE, /* removable */
409 B_FALSE, /* hot pluggable XXX */
410 node_type,
411 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */
412 dkp->dk_cmlbhandle,
413 0) != 0)
414 goto fail1;
415
416 /* Calling validate will create minor nodes according to disk label */
417 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0);
418
419 /* set bbh (Bad Block Handling) */
420 cmdk_bbh_reopen(dkp);
421
422 /* setup devid string */
423 cmdk_devid_setup(dkp);
424
425 mutex_enter(&cmdk_attach_mutex);
426 if (instance > cmdk_max_instance)
427 cmdk_max_instance = instance;
428 mutex_exit(&cmdk_attach_mutex);
429
430 mutex_exit(&dkp->dk_mutex);
431
432 /*
433 * Add a zero-length attribute to tell the world we support
434 * kernel ioctls (for layered drivers)
435 */
436 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
437 DDI_KERNEL_IOCTL, NULL, 0);
438 ddi_report_dev(dip);
439
440 /*
441 * Initialize power management
442 */
443 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL);
444 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL);
445 cmdk_setup_pm(dip, dkp);
446
447 return (DDI_SUCCESS);
448
449 fail1:
450 cmlb_free_handle(&dkp->dk_cmlbhandle);
451 (void) dadk_close(DKTP_DATA);
452 fail2:
453 cmdk_destroy_obj(dip, dkp);
454 rw_destroy(&dkp->dk_bbh_mutex);
455 mutex_exit(&dkp->dk_mutex);
456 mutex_destroy(&dkp->dk_mutex);
457 #ifndef XPV_HVM_DRIVER
458 ddi_soft_state_free(cmdk_state, instance);
459 #endif /* !XPV_HVM_DRIVER */
460 return (DDI_FAILURE);
461 }
462
463
464 static int
cmdkdetach(dev_info_t * dip,ddi_detach_cmd_t cmd)465 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
466 {
467 struct cmdk *dkp;
468 int instance;
469 int max_instance;
470
471 switch (cmd) {
472 case DDI_DETACH:
473 /* return (DDI_FAILURE); */
474 break;
475 case DDI_SUSPEND:
476 return (cmdksuspend(dip));
477 default:
478 #ifdef CMDK_DEBUG
479 if (cmdk_debug & DIO) {
480 PRF("cmdkdetach: cmd = %d unknown\n", cmd);
481 }
482 #endif
483 return (DDI_FAILURE);
484 }
485
486 mutex_enter(&cmdk_attach_mutex);
487 max_instance = cmdk_max_instance;
488 mutex_exit(&cmdk_attach_mutex);
489
490 /* check if any instance of driver is open */
491 for (instance = 0; instance < max_instance; instance++) {
492 dkp = ddi_get_soft_state(cmdk_state, instance);
493 if (!dkp)
494 continue;
495 if (dkp->dk_flag & CMDK_OPEN)
496 return (DDI_FAILURE);
497 }
498
499 instance = ddi_get_instance(dip);
500 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
501 return (DDI_SUCCESS);
502
503 mutex_enter(&dkp->dk_mutex);
504
505 /*
506 * The cmdk_part_info call at the end of cmdkattach may have
507 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
508 * detach for case when cmdkopen/cmdkclose never occurs.
509 */
510 if (dkp->dk_flag & CMDK_TGDK_OPEN) {
511 dkp->dk_flag &= ~CMDK_TGDK_OPEN;
512 (void) dadk_close(DKTP_DATA);
513 }
514
515 cmlb_detach(dkp->dk_cmlbhandle, 0);
516 cmlb_free_handle(&dkp->dk_cmlbhandle);
517 ddi_prop_remove_all(dip);
518
519 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
520
521 /*
522 * free the devid structure if allocated before
523 */
524 if (dkp->dk_devid) {
525 ddi_devid_free(dkp->dk_devid);
526 dkp->dk_devid = NULL;
527 }
528
529 mutex_exit(&dkp->dk_mutex);
530 mutex_destroy(&dkp->dk_mutex);
531 rw_destroy(&dkp->dk_bbh_mutex);
532 mutex_destroy(&dkp->dk_pm_mutex);
533 cv_destroy(&dkp->dk_suspend_cv);
534 #ifndef XPV_HVM_DRIVER
535 ddi_soft_state_free(cmdk_state, instance);
536 #endif /* !XPV_HVM_DRIVER */
537
538 return (DDI_SUCCESS);
539 }
540
541 static int
cmdkinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)542 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
543 {
544 dev_t dev = (dev_t)arg;
545 int instance;
546 struct cmdk *dkp;
547
548 #ifdef lint
549 dip = dip; /* no one ever uses this */
550 #endif
551 #ifdef CMDK_DEBUG
552 if (cmdk_debug & DENT)
553 PRF("cmdkinfo: call\n");
554 #endif
555 instance = CMDKUNIT(dev);
556
557 switch (infocmd) {
558 case DDI_INFO_DEVT2DEVINFO:
559 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
560 return (DDI_FAILURE);
561 *result = (void *) dkp->dk_dip;
562 break;
563 case DDI_INFO_DEVT2INSTANCE:
564 *result = (void *)(intptr_t)instance;
565 break;
566 default:
567 return (DDI_FAILURE);
568 }
569 return (DDI_SUCCESS);
570 }
571
572 /*
573 * Initialize the power management components
574 */
575 static void
cmdk_setup_pm(dev_info_t * dip,struct cmdk * dkp)576 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp)
577 {
578 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL };
579
580 /*
581 * Since the cmdk device does not the 'reg' property,
582 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
583 * The following code is to tell cpr that this device
584 * DOES need to be suspended and resumed.
585 */
586 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
587 "pm-hardware-state", "needs-suspend-resume");
588
589 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
590 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) {
591 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) {
592 mutex_enter(&dkp->dk_pm_mutex);
593 dkp->dk_pm_level = CMDK_SPINDLE_ON;
594 dkp->dk_pm_is_enabled = 1;
595 mutex_exit(&dkp->dk_pm_mutex);
596 } else {
597 mutex_enter(&dkp->dk_pm_mutex);
598 dkp->dk_pm_level = CMDK_SPINDLE_OFF;
599 dkp->dk_pm_is_enabled = 0;
600 mutex_exit(&dkp->dk_pm_mutex);
601 }
602 } else {
603 mutex_enter(&dkp->dk_pm_mutex);
604 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
605 dkp->dk_pm_is_enabled = 0;
606 mutex_exit(&dkp->dk_pm_mutex);
607 }
608 }
609
610 /*
611 * suspend routine, it will be run when get the command
612 * DDI_SUSPEND at detach(9E) from system power management
613 */
614 static int
cmdksuspend(dev_info_t * dip)615 cmdksuspend(dev_info_t *dip)
616 {
617 struct cmdk *dkp;
618 int instance;
619 clock_t count = 0;
620
621 instance = ddi_get_instance(dip);
622 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
623 return (DDI_FAILURE);
624 mutex_enter(&dkp->dk_mutex);
625 if (dkp->dk_flag & CMDK_SUSPEND) {
626 mutex_exit(&dkp->dk_mutex);
627 return (DDI_SUCCESS);
628 }
629 dkp->dk_flag |= CMDK_SUSPEND;
630
631 /* need to wait a while */
632 while (dadk_getcmds(DKTP_DATA) != 0) {
633 delay(drv_usectohz(1000000));
634 if (count > 60) {
635 dkp->dk_flag &= ~CMDK_SUSPEND;
636 cv_broadcast(&dkp->dk_suspend_cv);
637 mutex_exit(&dkp->dk_mutex);
638 return (DDI_FAILURE);
639 }
640 count++;
641 }
642 mutex_exit(&dkp->dk_mutex);
643 return (DDI_SUCCESS);
644 }
645
646 /*
647 * resume routine, it will be run when get the command
648 * DDI_RESUME at attach(9E) from system power management
649 */
650 static int
cmdkresume(dev_info_t * dip)651 cmdkresume(dev_info_t *dip)
652 {
653 struct cmdk *dkp;
654 int instance;
655
656 instance = ddi_get_instance(dip);
657 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
658 return (DDI_FAILURE);
659 mutex_enter(&dkp->dk_mutex);
660 if (!(dkp->dk_flag & CMDK_SUSPEND)) {
661 mutex_exit(&dkp->dk_mutex);
662 return (DDI_FAILURE);
663 }
664 dkp->dk_pm_level = CMDK_SPINDLE_ON;
665 dkp->dk_flag &= ~CMDK_SUSPEND;
666 cv_broadcast(&dkp->dk_suspend_cv);
667 mutex_exit(&dkp->dk_mutex);
668 return (DDI_SUCCESS);
669
670 }
671
672 /*
673 * power management entry point, it was used to
674 * change power management component.
675 * Actually, the real hard drive suspend/resume
676 * was handled in ata, so this function is not
677 * doing any real work other than verifying that
678 * the disk is idle.
679 */
680 static int
cmdkpower(dev_info_t * dip,int component,int level)681 cmdkpower(dev_info_t *dip, int component, int level)
682 {
683 struct cmdk *dkp;
684 int instance;
685
686 instance = ddi_get_instance(dip);
687 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
688 component != 0 || level > CMDK_SPINDLE_ON ||
689 level < CMDK_SPINDLE_OFF) {
690 return (DDI_FAILURE);
691 }
692
693 mutex_enter(&dkp->dk_pm_mutex);
694 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) {
695 mutex_exit(&dkp->dk_pm_mutex);
696 return (DDI_SUCCESS);
697 }
698 mutex_exit(&dkp->dk_pm_mutex);
699
700 if ((level == CMDK_SPINDLE_OFF) &&
701 (dadk_getcmds(DKTP_DATA) != 0)) {
702 return (DDI_FAILURE);
703 }
704
705 mutex_enter(&dkp->dk_pm_mutex);
706 dkp->dk_pm_level = level;
707 mutex_exit(&dkp->dk_pm_mutex);
708 return (DDI_SUCCESS);
709 }
710
711 static int
cmdk_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)712 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
713 char *name, caddr_t valuep, int *lengthp)
714 {
715 struct cmdk *dkp;
716
717 #ifdef CMDK_DEBUG
718 if (cmdk_debug & DENT)
719 PRF("cmdk_prop_op: call\n");
720 #endif
721
722 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
723 if (dkp == NULL)
724 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
725 name, valuep, lengthp));
726
727 return (cmlb_prop_op(dkp->dk_cmlbhandle,
728 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
729 CMDKPART(dev), NULL));
730 }
731
732 /*
733 * dump routine
734 */
735 static int
cmdkdump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)736 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
737 {
738 int instance;
739 struct cmdk *dkp;
740 diskaddr_t p_lblksrt;
741 diskaddr_t p_lblkcnt;
742 struct buf local;
743 struct buf *bp;
744
745 #ifdef CMDK_DEBUG
746 if (cmdk_debug & DENT)
747 PRF("cmdkdump: call\n");
748 #endif
749 instance = CMDKUNIT(dev);
750 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
751 return (ENXIO);
752
753 if (cmlb_partinfo(
754 dkp->dk_cmlbhandle,
755 CMDKPART(dev),
756 &p_lblkcnt,
757 &p_lblksrt,
758 NULL,
759 NULL,
760 0)) {
761 return (ENXIO);
762 }
763
764 if ((blkno+nblk) > p_lblkcnt)
765 return (EINVAL);
766
767 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */
768
769 bp = &local;
770 bzero(bp, sizeof (*bp));
771 bp->b_flags = B_BUSY;
772 bp->b_un.b_addr = addr;
773 bp->b_bcount = nblk << SCTRSHFT;
774 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
775
776 (void) dadk_dump(DKTP_DATA, bp);
777 return (bp->b_error);
778 }
779
780 /*
781 * Copy in the dadkio_rwcmd according to the user's data model. If needed,
782 * convert it for our internal use.
783 */
784 static int
rwcmd_copyin(struct dadkio_rwcmd * rwcmdp,caddr_t inaddr,int flag)785 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
786 {
787 switch (ddi_model_convert_from(flag)) {
788 case DDI_MODEL_ILP32: {
789 struct dadkio_rwcmd32 cmd32;
790
791 if (ddi_copyin(inaddr, &cmd32,
792 sizeof (struct dadkio_rwcmd32), flag)) {
793 return (EFAULT);
794 }
795
796 rwcmdp->cmd = cmd32.cmd;
797 rwcmdp->flags = cmd32.flags;
798 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr;
799 rwcmdp->buflen = cmd32.buflen;
800 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
801 /*
802 * Note: we do not convert the 'status' field,
803 * as it should not contain valid data at this
804 * point.
805 */
806 bzero(&rwcmdp->status, sizeof (rwcmdp->status));
807 break;
808 }
809 case DDI_MODEL_NONE: {
810 if (ddi_copyin(inaddr, rwcmdp,
811 sizeof (struct dadkio_rwcmd), flag)) {
812 return (EFAULT);
813 }
814 }
815 }
816 return (0);
817 }
818
819 /*
820 * If necessary, convert the internal rwcmdp and status to the appropriate
821 * data model and copy it out to the user.
822 */
823 static int
rwcmd_copyout(struct dadkio_rwcmd * rwcmdp,caddr_t outaddr,int flag)824 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
825 {
826 switch (ddi_model_convert_from(flag)) {
827 case DDI_MODEL_ILP32: {
828 struct dadkio_rwcmd32 cmd32;
829
830 cmd32.cmd = rwcmdp->cmd;
831 cmd32.flags = rwcmdp->flags;
832 cmd32.blkaddr = rwcmdp->blkaddr;
833 cmd32.buflen = rwcmdp->buflen;
834 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
835 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
836
837 cmd32.status.status = rwcmdp->status.status;
838 cmd32.status.resid = rwcmdp->status.resid;
839 cmd32.status.failed_blk_is_valid =
840 rwcmdp->status.failed_blk_is_valid;
841 cmd32.status.failed_blk = rwcmdp->status.failed_blk;
842 cmd32.status.fru_code_is_valid =
843 rwcmdp->status.fru_code_is_valid;
844 cmd32.status.fru_code = rwcmdp->status.fru_code;
845
846 bcopy(rwcmdp->status.add_error_info,
847 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
848
849 if (ddi_copyout(&cmd32, outaddr,
850 sizeof (struct dadkio_rwcmd32), flag))
851 return (EFAULT);
852 break;
853 }
854 case DDI_MODEL_NONE: {
855 if (ddi_copyout(rwcmdp, outaddr,
856 sizeof (struct dadkio_rwcmd), flag))
857 return (EFAULT);
858 }
859 }
860 return (0);
861 }
862
863 /*
864 * ioctl routine
865 */
866 static int
cmdkioctl(dev_t dev,int cmd,intptr_t arg,int flag,cred_t * credp,int * rvalp)867 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
868 {
869 int instance;
870 struct scsi_device *devp;
871 struct cmdk *dkp;
872 char data[NBPSCTR];
873
874 instance = CMDKUNIT(dev);
875 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
876 return (ENXIO);
877
878 mutex_enter(&dkp->dk_mutex);
879 while (dkp->dk_flag & CMDK_SUSPEND) {
880 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
881 }
882 mutex_exit(&dkp->dk_mutex);
883
884 bzero(data, sizeof (data));
885
886 switch (cmd) {
887
888 case DKIOCGMEDIAINFO: {
889 struct dk_minfo media_info;
890 struct tgdk_geom phyg;
891
892 /* dadk_getphygeom always returns success */
893 (void) dadk_getphygeom(DKTP_DATA, &phyg);
894
895 media_info.dki_lbsize = phyg.g_secsiz;
896 media_info.dki_capacity = phyg.g_cap;
897 media_info.dki_media_type = DK_FIXED_DISK;
898
899 if (ddi_copyout(&media_info, (void *)arg,
900 sizeof (struct dk_minfo), flag)) {
901 return (EFAULT);
902 } else {
903 return (0);
904 }
905 }
906
907 case DKIOCINFO: {
908 struct dk_cinfo *info = (struct dk_cinfo *)data;
909
910 /* controller information */
911 info->dki_ctype = (DKTP_EXT->tg_ctype);
912 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
913 (void) strcpy(info->dki_cname,
914 ddi_get_name(ddi_get_parent(dkp->dk_dip)));
915
916 /* Unit Information */
917 info->dki_unit = ddi_get_instance(dkp->dk_dip);
918 devp = ddi_get_driver_private(dkp->dk_dip);
919 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
920 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
921 info->dki_flags = DKI_FMTVOL;
922 info->dki_partition = CMDKPART(dev);
923
924 info->dki_maxtransfer = maxphys / DEV_BSIZE;
925 info->dki_addr = 1;
926 info->dki_space = 0;
927 info->dki_prio = 0;
928 info->dki_vec = 0;
929
930 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
931 return (EFAULT);
932 else
933 return (0);
934 }
935
936 case DKIOCSTATE: {
937 int state;
938 int rval;
939 diskaddr_t p_lblksrt;
940 diskaddr_t p_lblkcnt;
941
942 if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
943 return (EFAULT);
944
945 /* dadk_check_media blocks until state changes */
946 if (rval = dadk_check_media(DKTP_DATA, &state))
947 return (rval);
948
949 if (state == DKIO_INSERTED) {
950
951 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0)
952 return (ENXIO);
953
954 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
955 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0))
956 return (ENXIO);
957
958 if (p_lblkcnt <= 0)
959 return (ENXIO);
960 }
961
962 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
963 return (EFAULT);
964
965 return (0);
966 }
967
968 /*
969 * is media removable?
970 */
971 case DKIOCREMOVABLE: {
972 int i;
973
974 i = (DKTP_EXT->tg_rmb) ? 1 : 0;
975
976 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
977 return (EFAULT);
978
979 return (0);
980 }
981
982 case DKIOCADDBAD:
983 /*
984 * This is not an update mechanism to add bad blocks
985 * to the bad block structures stored on disk.
986 *
987 * addbadsec(1M) will update the bad block data on disk
988 * and use this ioctl to force the driver to re-initialize
989 * the list of bad blocks in the driver.
990 */
991
992 /* start BBH */
993 cmdk_bbh_reopen(dkp);
994 return (0);
995
996 case DKIOCG_PHYGEOM:
997 case DKIOCG_VIRTGEOM:
998 case DKIOCGGEOM:
999 case DKIOCSGEOM:
1000 case DKIOCGAPART:
1001 case DKIOCSAPART:
1002 case DKIOCGVTOC:
1003 case DKIOCSVTOC:
1004 case DKIOCPARTINFO:
1005 case DKIOCGEXTVTOC:
1006 case DKIOCSEXTVTOC:
1007 case DKIOCEXTPARTINFO:
1008 case DKIOCGMBOOT:
1009 case DKIOCSMBOOT:
1010 case DKIOCGETEFI:
1011 case DKIOCSETEFI:
1012 case DKIOCPARTITION:
1013 case DKIOCSETEXTPART:
1014 {
1015 int rc;
1016
1017 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag,
1018 credp, rvalp, 0);
1019 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC)
1020 cmdk_devid_setup(dkp);
1021 return (rc);
1022 }
1023
1024 case DIOCTL_RWCMD: {
1025 struct dadkio_rwcmd *rwcmdp;
1026 int status;
1027
1028 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
1029
1030 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
1031
1032 if (status == 0) {
1033 bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
1034 status = dadk_ioctl(DKTP_DATA,
1035 dev,
1036 cmd,
1037 (uintptr_t)rwcmdp,
1038 flag,
1039 credp,
1040 rvalp);
1041 }
1042 if (status == 0)
1043 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
1044
1045 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
1046 return (status);
1047 }
1048
1049 default:
1050 return (dadk_ioctl(DKTP_DATA,
1051 dev,
1052 cmd,
1053 arg,
1054 flag,
1055 credp,
1056 rvalp));
1057 }
1058 }
1059
1060 /*ARGSUSED1*/
1061 static int
cmdkclose(dev_t dev,int flag,int otyp,cred_t * credp)1062 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
1063 {
1064 int part;
1065 ulong_t partbit;
1066 int instance;
1067 struct cmdk *dkp;
1068 int lastclose = 1;
1069 int i;
1070
1071 instance = CMDKUNIT(dev);
1072 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1073 (otyp >= OTYPCNT))
1074 return (ENXIO);
1075
1076 mutex_enter(&dkp->dk_mutex);
1077
1078 /* check if device has been opened */
1079 ASSERT(cmdk_isopen(dkp, dev));
1080 if (!(dkp->dk_flag & CMDK_OPEN)) {
1081 mutex_exit(&dkp->dk_mutex);
1082 return (ENXIO);
1083 }
1084
1085 while (dkp->dk_flag & CMDK_SUSPEND) {
1086 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1087 }
1088
1089 part = CMDKPART(dev);
1090 partbit = 1 << part;
1091
1092 /* account for close */
1093 if (otyp == OTYP_LYR) {
1094 ASSERT(dkp->dk_open_lyr[part] > 0);
1095 if (dkp->dk_open_lyr[part])
1096 dkp->dk_open_lyr[part]--;
1097 } else {
1098 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0);
1099 dkp->dk_open_reg[otyp] &= ~partbit;
1100 }
1101 dkp->dk_open_exl &= ~partbit;
1102
1103 for (i = 0; i < CMDK_MAXPART; i++)
1104 if (dkp->dk_open_lyr[i] != 0) {
1105 lastclose = 0;
1106 break;
1107 }
1108
1109 if (lastclose)
1110 for (i = 0; i < OTYPCNT; i++)
1111 if (dkp->dk_open_reg[i] != 0) {
1112 lastclose = 0;
1113 break;
1114 }
1115
1116 mutex_exit(&dkp->dk_mutex);
1117
1118 if (lastclose)
1119 cmlb_invalidate(dkp->dk_cmlbhandle, 0);
1120
1121 return (DDI_SUCCESS);
1122 }
1123
1124 /*ARGSUSED3*/
1125 static int
cmdkopen(dev_t * dev_p,int flag,int otyp,cred_t * credp)1126 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
1127 {
1128 dev_t dev = *dev_p;
1129 int part;
1130 ulong_t partbit;
1131 int instance;
1132 struct cmdk *dkp;
1133 diskaddr_t p_lblksrt;
1134 diskaddr_t p_lblkcnt;
1135 int i;
1136 int nodelay;
1137
1138 instance = CMDKUNIT(dev);
1139 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1140 return (ENXIO);
1141
1142 if (otyp >= OTYPCNT)
1143 return (EINVAL);
1144
1145 mutex_enter(&dkp->dk_mutex);
1146 while (dkp->dk_flag & CMDK_SUSPEND) {
1147 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1148 }
1149 mutex_exit(&dkp->dk_mutex);
1150
1151 part = CMDKPART(dev);
1152 partbit = 1 << part;
1153 nodelay = (flag & (FNDELAY | FNONBLOCK));
1154
1155 mutex_enter(&dkp->dk_mutex);
1156
1157 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) {
1158
1159 /* fail if not doing non block open */
1160 if (!nodelay) {
1161 mutex_exit(&dkp->dk_mutex);
1162 return (ENXIO);
1163 }
1164 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
1165 &p_lblksrt, NULL, NULL, 0) == 0) {
1166
1167 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
1168 mutex_exit(&dkp->dk_mutex);
1169 return (ENXIO);
1170 }
1171 } else {
1172 /* fail if not doing non block open */
1173 if (!nodelay) {
1174 mutex_exit(&dkp->dk_mutex);
1175 return (ENXIO);
1176 }
1177 }
1178
1179 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
1180 mutex_exit(&dkp->dk_mutex);
1181 return (EROFS);
1182 }
1183
1184 /* check for part already opend exclusively */
1185 if (dkp->dk_open_exl & partbit)
1186 goto excl_open_fail;
1187
1188 /* check if we can establish exclusive open */
1189 if (flag & FEXCL) {
1190 if (dkp->dk_open_lyr[part])
1191 goto excl_open_fail;
1192 for (i = 0; i < OTYPCNT; i++) {
1193 if (dkp->dk_open_reg[i] & partbit)
1194 goto excl_open_fail;
1195 }
1196 }
1197
1198 /* open will succeed, account for open */
1199 dkp->dk_flag |= CMDK_OPEN;
1200 if (otyp == OTYP_LYR)
1201 dkp->dk_open_lyr[part]++;
1202 else
1203 dkp->dk_open_reg[otyp] |= partbit;
1204 if (flag & FEXCL)
1205 dkp->dk_open_exl |= partbit;
1206
1207 mutex_exit(&dkp->dk_mutex);
1208 return (DDI_SUCCESS);
1209
1210 excl_open_fail:
1211 mutex_exit(&dkp->dk_mutex);
1212 return (EBUSY);
1213 }
1214
1215 /*
1216 * read routine
1217 */
1218 /*ARGSUSED2*/
1219 static int
cmdkread(dev_t dev,struct uio * uio,cred_t * credp)1220 cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1221 {
1222 return (cmdkrw(dev, uio, B_READ));
1223 }
1224
1225 /*
1226 * async read routine
1227 */
1228 /*ARGSUSED2*/
1229 static int
cmdkaread(dev_t dev,struct aio_req * aio,cred_t * credp)1230 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1231 {
1232 return (cmdkarw(dev, aio, B_READ));
1233 }
1234
1235 /*
1236 * write routine
1237 */
1238 /*ARGSUSED2*/
1239 static int
cmdkwrite(dev_t dev,struct uio * uio,cred_t * credp)1240 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1241 {
1242 return (cmdkrw(dev, uio, B_WRITE));
1243 }
1244
1245 /*
1246 * async write routine
1247 */
1248 /*ARGSUSED2*/
1249 static int
cmdkawrite(dev_t dev,struct aio_req * aio,cred_t * credp)1250 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1251 {
1252 return (cmdkarw(dev, aio, B_WRITE));
1253 }
1254
1255 static void
cmdkmin(struct buf * bp)1256 cmdkmin(struct buf *bp)
1257 {
1258 if (bp->b_bcount > DK_MAXRECSIZE)
1259 bp->b_bcount = DK_MAXRECSIZE;
1260 }
1261
1262 static int
cmdkrw(dev_t dev,struct uio * uio,int flag)1263 cmdkrw(dev_t dev, struct uio *uio, int flag)
1264 {
1265 int instance;
1266 struct cmdk *dkp;
1267
1268 instance = CMDKUNIT(dev);
1269 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1270 return (ENXIO);
1271
1272 mutex_enter(&dkp->dk_mutex);
1273 while (dkp->dk_flag & CMDK_SUSPEND) {
1274 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1275 }
1276 mutex_exit(&dkp->dk_mutex);
1277
1278 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio));
1279 }
1280
1281 static int
cmdkarw(dev_t dev,struct aio_req * aio,int flag)1282 cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1283 {
1284 int instance;
1285 struct cmdk *dkp;
1286
1287 instance = CMDKUNIT(dev);
1288 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1289 return (ENXIO);
1290
1291 mutex_enter(&dkp->dk_mutex);
1292 while (dkp->dk_flag & CMDK_SUSPEND) {
1293 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1294 }
1295 mutex_exit(&dkp->dk_mutex);
1296
1297 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1298 }
1299
1300 /*
1301 * strategy routine
1302 */
1303 static int
cmdkstrategy(struct buf * bp)1304 cmdkstrategy(struct buf *bp)
1305 {
1306 int instance;
1307 struct cmdk *dkp;
1308 long d_cnt;
1309 diskaddr_t p_lblksrt;
1310 diskaddr_t p_lblkcnt;
1311
1312 instance = CMDKUNIT(bp->b_edev);
1313 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1314 (dkblock(bp) < 0)) {
1315 bp->b_resid = bp->b_bcount;
1316 SETBPERR(bp, ENXIO);
1317 biodone(bp);
1318 return (0);
1319 }
1320
1321 mutex_enter(&dkp->dk_mutex);
1322 ASSERT(cmdk_isopen(dkp, bp->b_edev));
1323 while (dkp->dk_flag & CMDK_SUSPEND) {
1324 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1325 }
1326 mutex_exit(&dkp->dk_mutex);
1327
1328 bp->b_flags &= ~(B_DONE|B_ERROR);
1329 bp->b_resid = 0;
1330 bp->av_back = NULL;
1331
1332 /*
1333 * only re-read the vtoc if necessary (force == FALSE)
1334 */
1335 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev),
1336 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) {
1337 SETBPERR(bp, ENXIO);
1338 }
1339
1340 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1341 SETBPERR(bp, ENXIO);
1342
1343 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1344 bp->b_resid = bp->b_bcount;
1345 biodone(bp);
1346 return (0);
1347 }
1348
1349 d_cnt = bp->b_bcount >> SCTRSHFT;
1350 if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1351 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1352 bp->b_bcount -= bp->b_resid;
1353 }
1354
1355 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1356 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1357 bp->b_resid += bp->b_bcount;
1358 biodone(bp);
1359 }
1360 return (0);
1361 }
1362
1363 static int
cmdk_create_obj(dev_info_t * dip,struct cmdk * dkp)1364 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1365 {
1366 struct scsi_device *devp;
1367 opaque_t queobjp = NULL;
1368 opaque_t flcobjp = NULL;
1369 char que_keyvalp[64];
1370 int que_keylen;
1371 char flc_keyvalp[64];
1372 int flc_keylen;
1373
1374 ASSERT(mutex_owned(&dkp->dk_mutex));
1375
1376 /* Create linkage to queueing routines based on property */
1377 que_keylen = sizeof (que_keyvalp);
1378 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1379 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1380 DDI_PROP_SUCCESS) {
1381 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1382 return (DDI_FAILURE);
1383 }
1384 que_keyvalp[que_keylen] = (char)0;
1385
1386 if (strcmp(que_keyvalp, "qfifo") == 0) {
1387 queobjp = (opaque_t)qfifo_create();
1388 } else if (strcmp(que_keyvalp, "qsort") == 0) {
1389 queobjp = (opaque_t)qsort_create();
1390 } else {
1391 return (DDI_FAILURE);
1392 }
1393
1394 /* Create linkage to dequeueing routines based on property */
1395 flc_keylen = sizeof (flc_keyvalp);
1396 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1397 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1398 DDI_PROP_SUCCESS) {
1399 cmn_err(CE_WARN,
1400 "cmdk_create_obj: flow-control property undefined");
1401 return (DDI_FAILURE);
1402 }
1403
1404 flc_keyvalp[flc_keylen] = (char)0;
1405
1406 if (strcmp(flc_keyvalp, "dsngl") == 0) {
1407 flcobjp = (opaque_t)dsngl_create();
1408 } else if (strcmp(flc_keyvalp, "dmult") == 0) {
1409 flcobjp = (opaque_t)dmult_create();
1410 } else {
1411 return (DDI_FAILURE);
1412 }
1413
1414 /* populate bbh_obj object stored in dkp */
1415 dkp->dk_bbh_obj.bbh_data = dkp;
1416 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1417
1418 /* create linkage to dadk */
1419 dkp->dk_tgobjp = (opaque_t)dadk_create();
1420
1421 devp = ddi_get_driver_private(dip);
1422 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1423 NULL);
1424
1425 return (DDI_SUCCESS);
1426 }
1427
1428 static void
cmdk_destroy_obj(dev_info_t * dip,struct cmdk * dkp)1429 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1430 {
1431 char que_keyvalp[64];
1432 int que_keylen;
1433 char flc_keyvalp[64];
1434 int flc_keylen;
1435
1436 ASSERT(mutex_owned(&dkp->dk_mutex));
1437
1438 (void) dadk_free((dkp->dk_tgobjp));
1439 dkp->dk_tgobjp = NULL;
1440
1441 que_keylen = sizeof (que_keyvalp);
1442 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1443 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1444 DDI_PROP_SUCCESS) {
1445 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1446 return;
1447 }
1448 que_keyvalp[que_keylen] = (char)0;
1449
1450 flc_keylen = sizeof (flc_keyvalp);
1451 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1452 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1453 DDI_PROP_SUCCESS) {
1454 cmn_err(CE_WARN,
1455 "cmdk_destroy_obj: flow-control property undefined");
1456 return;
1457 }
1458 flc_keyvalp[flc_keylen] = (char)0;
1459 }
1460 /*ARGSUSED5*/
1461 static int
cmdk_lb_rdwr(dev_info_t * dip,uchar_t cmd,void * bufaddr,diskaddr_t start,size_t count,void * tg_cookie)1462 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
1463 diskaddr_t start, size_t count, void *tg_cookie)
1464 {
1465 struct cmdk *dkp;
1466 opaque_t handle;
1467 int rc = 0;
1468 char *bufa;
1469 size_t buflen;
1470
1471 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1472 if (dkp == NULL)
1473 return (ENXIO);
1474
1475 if (cmd != TG_READ && cmd != TG_WRITE)
1476 return (EINVAL);
1477
1478 /* buflen must be multiple of 512 */
1479 buflen = (count + NBPSCTR - 1) & -NBPSCTR;
1480 handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP);
1481 if (!handle)
1482 return (ENOMEM);
1483
1484 if (cmd == TG_READ) {
1485 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1486 if (!bufa)
1487 rc = EIO;
1488 else
1489 bcopy(bufa, bufaddr, count);
1490 } else {
1491 bufa = dadk_iob_htoc(DKTP_DATA, handle);
1492 bcopy(bufaddr, bufa, count);
1493 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1494 if (!bufa)
1495 rc = EIO;
1496 }
1497 (void) dadk_iob_free(DKTP_DATA, handle);
1498
1499 return (rc);
1500 }
1501
1502 /*ARGSUSED3*/
1503 static int
cmdk_lb_getinfo(dev_info_t * dip,int cmd,void * arg,void * tg_cookie)1504 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
1505 {
1506
1507 struct cmdk *dkp;
1508 struct tgdk_geom phyg;
1509
1510
1511 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1512 if (dkp == NULL)
1513 return (ENXIO);
1514
1515 switch (cmd) {
1516 case TG_GETPHYGEOM: {
1517 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg;
1518
1519 /* dadk_getphygeom always returns success */
1520 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1521
1522 phygeomp->g_capacity = phyg.g_cap;
1523 phygeomp->g_nsect = phyg.g_sec;
1524 phygeomp->g_nhead = phyg.g_head;
1525 phygeomp->g_acyl = phyg.g_acyl;
1526 phygeomp->g_ncyl = phyg.g_cyl;
1527 phygeomp->g_secsize = phyg.g_secsiz;
1528 phygeomp->g_intrlv = 1;
1529 phygeomp->g_rpm = 3600;
1530
1531 return (0);
1532 }
1533
1534 case TG_GETVIRTGEOM: {
1535 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg;
1536 diskaddr_t capacity;
1537
1538 (void) dadk_getgeom(DKTP_DATA, &phyg);
1539 capacity = phyg.g_cap;
1540
1541 /*
1542 * If the controller returned us something that doesn't
1543 * really fit into an Int 13/function 8 geometry
1544 * result, just fail the ioctl. See PSARC 1998/313.
1545 */
1546 if (capacity < 0 || capacity >= 63 * 254 * 1024)
1547 return (EINVAL);
1548
1549 virtgeomp->g_capacity = capacity;
1550 virtgeomp->g_nsect = 63;
1551 virtgeomp->g_nhead = 254;
1552 virtgeomp->g_ncyl = capacity / (63 * 254);
1553 virtgeomp->g_acyl = 0;
1554 virtgeomp->g_secsize = 512;
1555 virtgeomp->g_intrlv = 1;
1556 virtgeomp->g_rpm = 3600;
1557
1558 return (0);
1559 }
1560
1561 case TG_GETCAPACITY:
1562 case TG_GETBLOCKSIZE:
1563 {
1564
1565 /* dadk_getphygeom always returns success */
1566 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1567 if (cmd == TG_GETCAPACITY)
1568 *(diskaddr_t *)arg = phyg.g_cap;
1569 else
1570 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz;
1571
1572 return (0);
1573 }
1574
1575 case TG_GETATTR: {
1576 tg_attribute_t *tgattribute = (tg_attribute_t *)arg;
1577 if ((DKTP_EXT->tg_rdonly))
1578 tgattribute->media_is_writable = FALSE;
1579 else
1580 tgattribute->media_is_writable = TRUE;
1581 tgattribute->media_is_rotational = TRUE;
1582
1583 return (0);
1584 }
1585
1586 default:
1587 return (ENOTTY);
1588 }
1589 }
1590
1591
1592
1593
1594
1595 /*
1596 * Create and register the devid.
1597 * There are 4 different ways we can get a device id:
1598 * 1. Already have one - nothing to do
1599 * 2. Build one from the drive's model and serial numbers
1600 * 3. Read one from the disk (first sector of last track)
1601 * 4. Fabricate one and write it on the disk.
1602 * If any of these succeeds, register the deviceid
1603 */
1604 static void
cmdk_devid_setup(struct cmdk * dkp)1605 cmdk_devid_setup(struct cmdk *dkp)
1606 {
1607 int rc;
1608
1609 /* Try options until one succeeds, or all have failed */
1610
1611 /* 1. All done if already registered */
1612 if (dkp->dk_devid != NULL)
1613 return;
1614
1615 /* 2. Build a devid from the model and serial number */
1616 rc = cmdk_devid_modser(dkp);
1617 if (rc != DDI_SUCCESS) {
1618 /* 3. Read devid from the disk, if present */
1619 rc = cmdk_devid_read(dkp);
1620
1621 /* 4. otherwise make one up and write it on the disk */
1622 if (rc != DDI_SUCCESS)
1623 rc = cmdk_devid_fabricate(dkp);
1624 }
1625
1626 /* If we managed to get a devid any of the above ways, register it */
1627 if (rc == DDI_SUCCESS)
1628 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1629
1630 }
1631
1632 /*
1633 * Build a devid from the model and serial number
1634 * Return DDI_SUCCESS or DDI_FAILURE.
1635 */
1636 static int
cmdk_devid_modser(struct cmdk * dkp)1637 cmdk_devid_modser(struct cmdk *dkp)
1638 {
1639 int rc = DDI_FAILURE;
1640 char *hwid;
1641 int modlen;
1642 int serlen;
1643
1644 /*
1645 * device ID is a concatenation of model number, '=', serial number.
1646 */
1647 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1648 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1649 if (modlen == 0) {
1650 rc = DDI_FAILURE;
1651 goto err;
1652 }
1653 hwid[modlen++] = '=';
1654 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1655 hwid + modlen, CMDK_HWIDLEN - modlen);
1656 if (serlen == 0) {
1657 rc = DDI_FAILURE;
1658 goto err;
1659 }
1660 hwid[modlen + serlen] = 0;
1661
1662 /* Initialize the device ID, trailing NULL not included */
1663 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1664 hwid, &dkp->dk_devid);
1665 if (rc != DDI_SUCCESS) {
1666 rc = DDI_FAILURE;
1667 goto err;
1668 }
1669
1670 rc = DDI_SUCCESS;
1671
1672 err:
1673 kmem_free(hwid, CMDK_HWIDLEN);
1674 return (rc);
1675 }
1676
1677 static int
cmdk_get_modser(struct cmdk * dkp,int ioccmd,char * buf,int len)1678 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1679 {
1680 dadk_ioc_string_t strarg;
1681 int rval;
1682 char *s;
1683 char ch;
1684 boolean_t ret;
1685 int i;
1686 int tb;
1687
1688 strarg.is_buf = buf;
1689 strarg.is_size = len;
1690 if (dadk_ioctl(DKTP_DATA,
1691 dkp->dk_dev,
1692 ioccmd,
1693 (uintptr_t)&strarg,
1694 FNATIVE | FKIOCTL,
1695 NULL,
1696 &rval) != 0)
1697 return (0);
1698
1699 /*
1700 * valid model/serial string must contain a non-zero non-space
1701 * trim trailing spaces/NULL
1702 */
1703 ret = B_FALSE;
1704 s = buf;
1705 for (i = 0; i < strarg.is_size; i++) {
1706 ch = *s++;
1707 if (ch != ' ' && ch != '\0')
1708 tb = i + 1;
1709 if (ch != ' ' && ch != '\0' && ch != '0')
1710 ret = B_TRUE;
1711 }
1712
1713 if (ret == B_FALSE)
1714 return (0);
1715
1716 return (tb);
1717 }
1718
1719 /*
1720 * Read a devid from on the first block of the last track of
1721 * the last cylinder. Make sure what we read is a valid devid.
1722 * Return DDI_SUCCESS or DDI_FAILURE.
1723 */
1724 static int
cmdk_devid_read(struct cmdk * dkp)1725 cmdk_devid_read(struct cmdk *dkp)
1726 {
1727 diskaddr_t blk;
1728 struct dk_devid *dkdevidp;
1729 uint_t *ip;
1730 int chksum;
1731 int i, sz;
1732 tgdk_iob_handle handle = NULL;
1733 int rc = DDI_FAILURE;
1734
1735 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0))
1736 goto err;
1737
1738 /* read the devid */
1739 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1740 if (handle == NULL)
1741 goto err;
1742
1743 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1744 if (dkdevidp == NULL)
1745 goto err;
1746
1747 /* Validate the revision */
1748 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1749 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1750 goto err;
1751
1752 /* Calculate the checksum */
1753 chksum = 0;
1754 ip = (uint_t *)dkdevidp;
1755 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1756 chksum ^= ip[i];
1757 if (DKD_GETCHKSUM(dkdevidp) != chksum)
1758 goto err;
1759
1760 /* Validate the device id */
1761 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1762 goto err;
1763
1764 /* keep a copy of the device id */
1765 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1766 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1767 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1768
1769 rc = DDI_SUCCESS;
1770
1771 err:
1772 if (handle != NULL)
1773 (void) dadk_iob_free(DKTP_DATA, handle);
1774 return (rc);
1775 }
1776
1777 /*
1778 * Create a devid and write it on the first block of the last track of
1779 * the last cylinder.
1780 * Return DDI_SUCCESS or DDI_FAILURE.
1781 */
1782 static int
cmdk_devid_fabricate(struct cmdk * dkp)1783 cmdk_devid_fabricate(struct cmdk *dkp)
1784 {
1785 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */
1786 struct dk_devid *dkdevidp; /* devid struct stored on disk */
1787 diskaddr_t blk;
1788 tgdk_iob_handle handle = NULL;
1789 uint_t *ip, chksum;
1790 int i;
1791 int rc = DDI_FAILURE;
1792
1793 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) !=
1794 DDI_SUCCESS)
1795 goto err;
1796
1797 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) {
1798 /* no device id block address */
1799 goto err;
1800 }
1801
1802 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1803 if (!handle)
1804 goto err;
1805
1806 /* Locate the buffer */
1807 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1808
1809 /* Fill in the revision */
1810 bzero(dkdevidp, NBPSCTR);
1811 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1812 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1813
1814 /* Copy in the device id */
1815 i = ddi_devid_sizeof(devid);
1816 if (i > DK_DEVID_SIZE)
1817 goto err;
1818 bcopy(devid, dkdevidp->dkd_devid, i);
1819
1820 /* Calculate the chksum */
1821 chksum = 0;
1822 ip = (uint_t *)dkdevidp;
1823 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1824 chksum ^= ip[i];
1825
1826 /* Fill in the checksum */
1827 DKD_FORMCHKSUM(chksum, dkdevidp);
1828
1829 /* write the devid */
1830 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1831
1832 dkp->dk_devid = devid;
1833
1834 rc = DDI_SUCCESS;
1835
1836 err:
1837 if (handle != NULL)
1838 (void) dadk_iob_free(DKTP_DATA, handle);
1839
1840 if (rc != DDI_SUCCESS && devid != NULL)
1841 ddi_devid_free(devid);
1842
1843 return (rc);
1844 }
1845
1846 static void
cmdk_bbh_free_alts(struct cmdk * dkp)1847 cmdk_bbh_free_alts(struct cmdk *dkp)
1848 {
1849 if (dkp->dk_alts_hdl) {
1850 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1851 kmem_free(dkp->dk_slc_cnt,
1852 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1853 dkp->dk_alts_hdl = NULL;
1854 }
1855 }
1856
1857 static void
cmdk_bbh_reopen(struct cmdk * dkp)1858 cmdk_bbh_reopen(struct cmdk *dkp)
1859 {
1860 tgdk_iob_handle handle = NULL;
1861 diskaddr_t slcb, slcn, slce;
1862 struct alts_parttbl *ap;
1863 struct alts_ent *enttblp;
1864 uint32_t altused;
1865 uint32_t altbase;
1866 uint32_t altlast;
1867 int alts;
1868 uint16_t vtoctag;
1869 int i, j;
1870
1871 /* find slice with V_ALTSCTR tag */
1872 for (alts = 0; alts < NDKMAP; alts++) {
1873 if (cmlb_partinfo(
1874 dkp->dk_cmlbhandle,
1875 alts,
1876 &slcn,
1877 &slcb,
1878 NULL,
1879 &vtoctag,
1880 0)) {
1881 goto empty; /* no partition table exists */
1882 }
1883
1884 if (vtoctag == V_ALTSCTR && slcn > 1)
1885 break;
1886 }
1887 if (alts >= NDKMAP) {
1888 goto empty; /* no V_ALTSCTR slice defined */
1889 }
1890
1891 /* read in ALTS label block */
1892 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1893 if (!handle) {
1894 goto empty;
1895 }
1896
1897 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1898 if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1899 goto empty;
1900 }
1901
1902 altused = ap->alts_ent_used; /* number of BB entries */
1903 altbase = ap->alts_ent_base; /* blk offset from begin slice */
1904 altlast = ap->alts_ent_end; /* blk offset to last block */
1905 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1906
1907 if (altused == 0 ||
1908 altbase < 1 ||
1909 altbase > altlast ||
1910 altlast >= slcn) {
1911 goto empty;
1912 }
1913 (void) dadk_iob_free(DKTP_DATA, handle);
1914
1915 /* read in ALTS remapping table */
1916 handle = dadk_iob_alloc(DKTP_DATA,
1917 slcb + altbase,
1918 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1919 if (!handle) {
1920 goto empty;
1921 }
1922
1923 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1924 if (!enttblp) {
1925 goto empty;
1926 }
1927
1928 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1929
1930 /* allocate space for dk_slc_cnt and dk_slc_ent tables */
1931 if (dkp->dk_slc_cnt == NULL) {
1932 dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1933 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1934 }
1935 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1936
1937 /* free previous BB table (if any) */
1938 if (dkp->dk_alts_hdl) {
1939 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1940 dkp->dk_alts_hdl = NULL;
1941 dkp->dk_altused = 0;
1942 }
1943
1944 /* save linkage to new BB table */
1945 dkp->dk_alts_hdl = handle;
1946 dkp->dk_altused = altused;
1947
1948 /*
1949 * build indexes to BB table by slice
1950 * effectively we have
1951 * struct alts_ent *enttblp[altused];
1952 *
1953 * uint32_t dk_slc_cnt[NDKMAP];
1954 * struct alts_ent *dk_slc_ent[NDKMAP];
1955 */
1956 for (i = 0; i < NDKMAP; i++) {
1957 if (cmlb_partinfo(
1958 dkp->dk_cmlbhandle,
1959 i,
1960 &slcn,
1961 &slcb,
1962 NULL,
1963 NULL,
1964 0)) {
1965 goto empty1;
1966 }
1967
1968 dkp->dk_slc_cnt[i] = 0;
1969 if (slcn == 0)
1970 continue; /* slice is not allocated */
1971
1972 /* last block in slice */
1973 slce = slcb + slcn - 1;
1974
1975 /* find first remap entry in after beginnning of slice */
1976 for (j = 0; j < altused; j++) {
1977 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1978 break;
1979 }
1980 dkp->dk_slc_ent[i] = enttblp + j;
1981
1982 /* count remap entrys until end of slice */
1983 for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1984 dkp->dk_slc_cnt[i] += 1;
1985 }
1986 }
1987
1988 rw_exit(&dkp->dk_bbh_mutex);
1989 return;
1990
1991 empty:
1992 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1993 empty1:
1994 if (handle && handle != dkp->dk_alts_hdl)
1995 (void) dadk_iob_free(DKTP_DATA, handle);
1996
1997 if (dkp->dk_alts_hdl) {
1998 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1999 dkp->dk_alts_hdl = NULL;
2000 }
2001
2002 rw_exit(&dkp->dk_bbh_mutex);
2003 }
2004
2005 /*ARGSUSED*/
2006 static bbh_cookie_t
cmdk_bbh_htoc(opaque_t bbh_data,opaque_t handle)2007 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
2008 {
2009 struct bbh_handle *hp;
2010 bbh_cookie_t ckp;
2011
2012 hp = (struct bbh_handle *)handle;
2013 ckp = hp->h_cktab + hp->h_idx;
2014 hp->h_idx++;
2015 return (ckp);
2016 }
2017
2018 /*ARGSUSED*/
2019 static void
cmdk_bbh_freehandle(opaque_t bbh_data,opaque_t handle)2020 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
2021 {
2022 struct bbh_handle *hp;
2023
2024 hp = (struct bbh_handle *)handle;
2025 kmem_free(handle, (sizeof (struct bbh_handle) +
2026 (hp->h_totck * (sizeof (struct bbh_cookie)))));
2027 }
2028
2029
2030 /*
2031 * cmdk_bbh_gethandle remaps the bad sectors to alternates.
2032 * There are 7 different cases when the comparison is made
2033 * between the bad sector cluster and the disk section.
2034 *
2035 * bad sector cluster gggggggggggbbbbbbbggggggggggg
2036 * case 1: ddddd
2037 * case 2: -d-----
2038 * case 3: ddddd
2039 * case 4: dddddddddddd
2040 * case 5: ddddddd-----
2041 * case 6: ---ddddddd
2042 * case 7: ddddddd
2043 *
2044 * where: g = good sector, b = bad sector
2045 * d = sector in disk section
2046 * - = disk section may be extended to cover those disk area
2047 */
2048
2049 static opaque_t
cmdk_bbh_gethandle(opaque_t bbh_data,struct buf * bp)2050 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
2051 {
2052 struct cmdk *dkp = (struct cmdk *)bbh_data;
2053 struct bbh_handle *hp;
2054 struct bbh_cookie *ckp;
2055 struct alts_ent *altp;
2056 uint32_t alts_used;
2057 uint32_t part = CMDKPART(bp->b_edev);
2058 daddr32_t lastsec;
2059 long d_count;
2060 int i;
2061 int idx;
2062 int cnt;
2063
2064 if (part >= V_NUMPAR)
2065 return (NULL);
2066
2067 /*
2068 * This if statement is atomic and it will succeed
2069 * if there are no bad blocks (almost always)
2070 *
2071 * so this if is performed outside of the rw_enter for speed
2072 * and then repeated inside the rw_enter for safety
2073 */
2074 if (!dkp->dk_alts_hdl) {
2075 return (NULL);
2076 }
2077
2078 rw_enter(&dkp->dk_bbh_mutex, RW_READER);
2079
2080 if (dkp->dk_alts_hdl == NULL) {
2081 rw_exit(&dkp->dk_bbh_mutex);
2082 return (NULL);
2083 }
2084
2085 alts_used = dkp->dk_slc_cnt[part];
2086 if (alts_used == 0) {
2087 rw_exit(&dkp->dk_bbh_mutex);
2088 return (NULL);
2089 }
2090 altp = dkp->dk_slc_ent[part];
2091
2092 /*
2093 * binary search for the largest bad sector index in the alternate
2094 * entry table which overlaps or larger than the starting d_sec
2095 */
2096 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
2097 /* if starting sector is > the largest bad sector, return */
2098 if (i == -1) {
2099 rw_exit(&dkp->dk_bbh_mutex);
2100 return (NULL);
2101 }
2102 /* i is the starting index. Set altp to the starting entry addr */
2103 altp += i;
2104
2105 d_count = bp->b_bcount >> SCTRSHFT;
2106 lastsec = GET_BP_SEC(bp) + d_count - 1;
2107
2108 /* calculate the number of bad sectors */
2109 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
2110 if (lastsec < altp->bad_start)
2111 break;
2112 }
2113
2114 if (!cnt) {
2115 rw_exit(&dkp->dk_bbh_mutex);
2116 return (NULL);
2117 }
2118
2119 /* calculate the maximum number of reserved cookies */
2120 cnt <<= 1;
2121 cnt++;
2122
2123 /* allocate the handle */
2124 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) +
2125 (cnt * sizeof (*ckp))), KM_SLEEP);
2126
2127 hp->h_idx = 0;
2128 hp->h_totck = cnt;
2129 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
2130 ckp[0].ck_sector = GET_BP_SEC(bp);
2131 ckp[0].ck_seclen = d_count;
2132
2133 altp = dkp->dk_slc_ent[part];
2134 altp += i;
2135 for (idx = 0; i < alts_used; i++, altp++) {
2136 /* CASE 1: */
2137 if (lastsec < altp->bad_start)
2138 break;
2139
2140 /* CASE 3: */
2141 if (ckp[idx].ck_sector > altp->bad_end)
2142 continue;
2143
2144 /* CASE 2 and 7: */
2145 if ((ckp[idx].ck_sector >= altp->bad_start) &&
2146 (lastsec <= altp->bad_end)) {
2147 ckp[idx].ck_sector = altp->good_start +
2148 ckp[idx].ck_sector - altp->bad_start;
2149 break;
2150 }
2151
2152 /* at least one bad sector in our section. break it. */
2153 /* CASE 5: */
2154 if ((lastsec >= altp->bad_start) &&
2155 (lastsec <= altp->bad_end)) {
2156 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
2157 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
2158 ckp[idx+1].ck_sector = altp->good_start;
2159 break;
2160 }
2161 /* CASE 6: */
2162 if ((ckp[idx].ck_sector <= altp->bad_end) &&
2163 (ckp[idx].ck_sector >= altp->bad_start)) {
2164 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
2165 ckp[idx].ck_seclen = altp->bad_end -
2166 ckp[idx].ck_sector + 1;
2167 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
2168 ckp[idx].ck_sector = altp->good_start +
2169 ckp[idx].ck_sector - altp->bad_start;
2170 idx++;
2171 ckp[idx].ck_sector = altp->bad_end + 1;
2172 continue; /* check rest of section */
2173 }
2174
2175 /* CASE 4: */
2176 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
2177 ckp[idx+1].ck_sector = altp->good_start;
2178 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
2179 idx += 2;
2180 ckp[idx].ck_sector = altp->bad_end + 1;
2181 ckp[idx].ck_seclen = lastsec - altp->bad_end;
2182 }
2183
2184 rw_exit(&dkp->dk_bbh_mutex);
2185 return ((opaque_t)hp);
2186 }
2187
2188 static int
cmdk_bbh_bsearch(struct alts_ent * buf,int cnt,daddr32_t key)2189 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
2190 {
2191 int i;
2192 int ind;
2193 int interval;
2194 int mystatus = -1;
2195
2196 if (!cnt)
2197 return (mystatus);
2198
2199 ind = 1; /* compiler complains about possible uninitialized var */
2200 for (i = 1; i <= cnt; i <<= 1)
2201 ind = i;
2202
2203 for (interval = ind; interval; ) {
2204 if ((key >= buf[ind-1].bad_start) &&
2205 (key <= buf[ind-1].bad_end)) {
2206 return (ind-1);
2207 } else {
2208 interval >>= 1;
2209 if (key < buf[ind-1].bad_start) {
2210 /* record the largest bad sector index */
2211 mystatus = ind-1;
2212 if (!interval)
2213 break;
2214 ind = ind - interval;
2215 } else {
2216 /*
2217 * if key is larger than the last element
2218 * then break
2219 */
2220 if ((ind == cnt) || !interval)
2221 break;
2222 if ((ind+interval) <= cnt)
2223 ind += interval;
2224 }
2225 }
2226 }
2227 return (mystatus);
2228 }
2229