1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2018, Joyent, Inc.
28 */
29
30 #include <sys/scsi/scsi.h>
31 #include <sys/dktp/cm.h>
32 #include <sys/dktp/quetypes.h>
33 #include <sys/dktp/queue.h>
34 #include <sys/dktp/fctypes.h>
35 #include <sys/dktp/flowctrl.h>
36 #include <sys/dktp/cmdev.h>
37 #include <sys/dkio.h>
38 #include <sys/dktp/tgdk.h>
39 #include <sys/dktp/dadk.h>
40 #include <sys/dktp/bbh.h>
41 #include <sys/dktp/altsctr.h>
42 #include <sys/dktp/cmdk.h>
43
44 #include <sys/stat.h>
45 #include <sys/vtoc.h>
46 #include <sys/file.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/aio_req.h>
49
50 #include <sys/cmlb.h>
51
52 /*
53 * Local Static Data
54 */
55 #ifdef CMDK_DEBUG
56 #define DENT 0x0001
57 #define DIO 0x0002
58
59 static int cmdk_debug = DIO;
60 #endif
61
62 #ifndef TRUE
63 #define TRUE 1
64 #endif
65
66 #ifndef FALSE
67 #define FALSE 0
68 #endif
69
70 /*
71 * NDKMAP is the base number for accessing the fdisk partitions.
72 * c?d?p0 --> cmdk@?,?:q
73 */
74 #define PARTITION0_INDEX (NDKMAP + 0)
75
76 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data
77 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext
78
79 void *cmdk_state;
80
81 /*
82 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
83 * attach situations
84 */
85 static kmutex_t cmdk_attach_mutex;
86 static int cmdk_max_instance = 0;
87
88 /*
89 * Panic dumpsys state
90 * There is only a single flag that is not mutex locked since
91 * the system is prevented from thread switching and cmdk_dump
92 * will only be called in a single threaded operation.
93 */
94 static int cmdk_indump;
95
96 /*
97 * Local Function Prototypes
98 */
99 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
100 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
101 static void cmdkmin(struct buf *bp);
102 static int cmdkrw(dev_t dev, struct uio *uio, int flag);
103 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
104
105 /*
106 * Bad Block Handling Functions Prototypes
107 */
108 static void cmdk_bbh_reopen(struct cmdk *dkp);
109 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
110 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
111 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
112 static void cmdk_bbh_close(struct cmdk *dkp);
113 static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
114 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
115
116 static struct bbh_objops cmdk_bbh_ops = {
117 nulldev,
118 nulldev,
119 cmdk_bbh_gethandle,
120 cmdk_bbh_htoc,
121 cmdk_bbh_freehandle,
122 0, 0
123 };
124
125 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
126 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
127 static int cmdkstrategy(struct buf *bp);
128 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
129 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
130 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
131 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
132 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
133 int mod_flags, char *name, caddr_t valuep, int *lengthp);
134 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
135 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
136
137 /*
138 * Device driver ops vector
139 */
140
141 static struct cb_ops cmdk_cb_ops = {
142 cmdkopen, /* open */
143 cmdkclose, /* close */
144 cmdkstrategy, /* strategy */
145 nodev, /* print */
146 cmdkdump, /* dump */
147 cmdkread, /* read */
148 cmdkwrite, /* write */
149 cmdkioctl, /* ioctl */
150 nodev, /* devmap */
151 nodev, /* mmap */
152 nodev, /* segmap */
153 nochpoll, /* poll */
154 cmdk_prop_op, /* cb_prop_op */
155 0, /* streamtab */
156 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */
157 CB_REV, /* cb_rev */
158 cmdkaread, /* async read */
159 cmdkawrite /* async write */
160 };
161
162 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
163 void **result);
164 static int cmdkprobe(dev_info_t *dip);
165 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
166 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
167
168 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp);
169 static int cmdkresume(dev_info_t *dip);
170 static int cmdksuspend(dev_info_t *dip);
171 static int cmdkpower(dev_info_t *dip, int component, int level);
172
173 struct dev_ops cmdk_ops = {
174 DEVO_REV, /* devo_rev, */
175 0, /* refcnt */
176 cmdkinfo, /* info */
177 nulldev, /* identify */
178 cmdkprobe, /* probe */
179 cmdkattach, /* attach */
180 cmdkdetach, /* detach */
181 nodev, /* reset */
182 &cmdk_cb_ops, /* driver operations */
183 (struct bus_ops *)0, /* bus operations */
184 cmdkpower, /* power */
185 ddi_quiesce_not_needed, /* quiesce */
186 };
187
188 /*
189 * This is the loadable module wrapper.
190 */
191 #include <sys/modctl.h>
192
193 static struct modldrv modldrv = {
194 &mod_driverops, /* Type of module. This one is a driver */
195 "Common Direct Access Disk",
196 &cmdk_ops, /* driver ops */
197 };
198
199 static struct modlinkage modlinkage = {
200 MODREV_1, (void *)&modldrv, NULL
201 };
202
203 /* Function prototypes for cmlb callbacks */
204
205 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
206 diskaddr_t start, size_t length, void *tg_cookie);
207
208 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg,
209 void *tg_cookie);
210
211 static void cmdk_devid_setup(struct cmdk *dkp);
212 static int cmdk_devid_modser(struct cmdk *dkp);
213 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
214 static int cmdk_devid_fabricate(struct cmdk *dkp);
215 static int cmdk_devid_read(struct cmdk *dkp);
216
217 static cmlb_tg_ops_t cmdk_lb_ops = {
218 TG_DK_OPS_VERSION_1,
219 cmdk_lb_rdwr,
220 cmdk_lb_getinfo
221 };
222
223 static boolean_t
cmdk_isopen(struct cmdk * dkp,dev_t dev)224 cmdk_isopen(struct cmdk *dkp, dev_t dev)
225 {
226 int part, otyp;
227 ulong_t partbit;
228
229 ASSERT(MUTEX_HELD((&dkp->dk_mutex)));
230
231 part = CMDKPART(dev);
232 partbit = 1 << part;
233
234 /* account for close */
235 if (dkp->dk_open_lyr[part] != 0)
236 return (B_TRUE);
237 for (otyp = 0; otyp < OTYPCNT; otyp++)
238 if (dkp->dk_open_reg[otyp] & partbit)
239 return (B_TRUE);
240 return (B_FALSE);
241 }
242
243 int
_init(void)244 _init(void)
245 {
246 int rval;
247
248 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
249 return (rval);
250
251 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
252 if ((rval = mod_install(&modlinkage)) != 0) {
253 mutex_destroy(&cmdk_attach_mutex);
254 ddi_soft_state_fini(&cmdk_state);
255 }
256 return (rval);
257 }
258
259 int
_fini(void)260 _fini(void)
261 {
262 return (EBUSY);
263 }
264
265 int
_info(struct modinfo * modinfop)266 _info(struct modinfo *modinfop)
267 {
268 return (mod_info(&modlinkage, modinfop));
269 }
270
271 /*
272 * Autoconfiguration Routines
273 */
274 static int
cmdkprobe(dev_info_t * dip)275 cmdkprobe(dev_info_t *dip)
276 {
277 int instance;
278 int status;
279 struct cmdk *dkp;
280
281 instance = ddi_get_instance(dip);
282
283 if (ddi_get_soft_state(cmdk_state, instance))
284 return (DDI_PROBE_PARTIAL);
285
286 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS)
287 return (DDI_PROBE_PARTIAL);
288
289 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)
290 return (DDI_PROBE_PARTIAL);
291
292 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
293 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
294 dkp->dk_dip = dip;
295 mutex_enter(&dkp->dk_mutex);
296
297 dkp->dk_dev = makedevice(ddi_driver_major(dip),
298 ddi_get_instance(dip) << CMDK_UNITSHF);
299
300 /* linkage to dadk and strategy */
301 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
302 mutex_exit(&dkp->dk_mutex);
303 mutex_destroy(&dkp->dk_mutex);
304 rw_destroy(&dkp->dk_bbh_mutex);
305 ddi_soft_state_free(cmdk_state, instance);
306 return (DDI_PROBE_PARTIAL);
307 }
308
309 status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
310 if (status != DDI_PROBE_SUCCESS) {
311 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
312 mutex_exit(&dkp->dk_mutex);
313 mutex_destroy(&dkp->dk_mutex);
314 rw_destroy(&dkp->dk_bbh_mutex);
315 ddi_soft_state_free(cmdk_state, instance);
316 return (status);
317 }
318
319 mutex_exit(&dkp->dk_mutex);
320 #ifdef CMDK_DEBUG
321 if (cmdk_debug & DENT)
322 PRF("cmdkprobe: instance= %d name= `%s`\n",
323 instance, ddi_get_name_addr(dip));
324 #endif
325 return (status);
326 }
327
328 static int
cmdkattach(dev_info_t * dip,ddi_attach_cmd_t cmd)329 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
330 {
331 int instance;
332 struct cmdk *dkp;
333 char *node_type;
334
335 switch (cmd) {
336 case DDI_ATTACH:
337 break;
338 case DDI_RESUME:
339 return (cmdkresume(dip));
340 default:
341 return (DDI_FAILURE);
342 }
343
344 instance = ddi_get_instance(dip);
345 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
346 return (DDI_FAILURE);
347
348 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
349 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
350
351 mutex_enter(&dkp->dk_mutex);
352
353 /* dadk_attach is an empty function that only returns SUCCESS */
354 (void) dadk_attach(DKTP_DATA);
355
356 node_type = (DKTP_EXT->tg_nodetype);
357
358 /*
359 * this open allows cmlb to read the device
360 * and determine the label types
361 * so that cmlb can create minor nodes for device
362 */
363
364 /* open the target disk */
365 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
366 goto fail2;
367
368 /* mark as having opened target */
369 dkp->dk_flag |= CMDK_TGDK_OPEN;
370
371 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
372
373 if (cmlb_attach(dip,
374 &cmdk_lb_ops,
375 DTYPE_DIRECT, /* device_type */
376 B_FALSE, /* removable */
377 B_FALSE, /* hot pluggable XXX */
378 node_type,
379 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */
380 dkp->dk_cmlbhandle,
381 0) != 0)
382 goto fail1;
383
384 /* Calling validate will create minor nodes according to disk label */
385 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0);
386
387 /* set bbh (Bad Block Handling) */
388 cmdk_bbh_reopen(dkp);
389
390 /* setup devid string */
391 cmdk_devid_setup(dkp);
392
393 mutex_enter(&cmdk_attach_mutex);
394 if (instance > cmdk_max_instance)
395 cmdk_max_instance = instance;
396 mutex_exit(&cmdk_attach_mutex);
397
398 mutex_exit(&dkp->dk_mutex);
399
400 /*
401 * Add a zero-length attribute to tell the world we support
402 * kernel ioctls (for layered drivers)
403 */
404 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
405 DDI_KERNEL_IOCTL, NULL, 0);
406 ddi_report_dev(dip);
407
408 /*
409 * Initialize power management
410 */
411 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL);
412 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL);
413 cmdk_setup_pm(dip, dkp);
414
415 return (DDI_SUCCESS);
416
417 fail1:
418 cmlb_free_handle(&dkp->dk_cmlbhandle);
419 (void) dadk_close(DKTP_DATA);
420 fail2:
421 cmdk_destroy_obj(dip, dkp);
422 rw_destroy(&dkp->dk_bbh_mutex);
423 mutex_exit(&dkp->dk_mutex);
424 mutex_destroy(&dkp->dk_mutex);
425 ddi_soft_state_free(cmdk_state, instance);
426 return (DDI_FAILURE);
427 }
428
429
430 static int
cmdkdetach(dev_info_t * dip,ddi_detach_cmd_t cmd)431 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
432 {
433 struct cmdk *dkp;
434 int instance;
435 int max_instance;
436
437 switch (cmd) {
438 case DDI_DETACH:
439 /* return (DDI_FAILURE); */
440 break;
441 case DDI_SUSPEND:
442 return (cmdksuspend(dip));
443 default:
444 #ifdef CMDK_DEBUG
445 if (cmdk_debug & DIO) {
446 PRF("cmdkdetach: cmd = %d unknown\n", cmd);
447 }
448 #endif
449 return (DDI_FAILURE);
450 }
451
452 mutex_enter(&cmdk_attach_mutex);
453 max_instance = cmdk_max_instance;
454 mutex_exit(&cmdk_attach_mutex);
455
456 /* check if any instance of driver is open */
457 for (instance = 0; instance < max_instance; instance++) {
458 dkp = ddi_get_soft_state(cmdk_state, instance);
459 if (!dkp)
460 continue;
461 if (dkp->dk_flag & CMDK_OPEN)
462 return (DDI_FAILURE);
463 }
464
465 instance = ddi_get_instance(dip);
466 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
467 return (DDI_SUCCESS);
468
469 mutex_enter(&dkp->dk_mutex);
470
471 /*
472 * The cmdk_part_info call at the end of cmdkattach may have
473 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
474 * detach for case when cmdkopen/cmdkclose never occurs.
475 */
476 if (dkp->dk_flag & CMDK_TGDK_OPEN) {
477 dkp->dk_flag &= ~CMDK_TGDK_OPEN;
478 (void) dadk_close(DKTP_DATA);
479 }
480
481 cmlb_detach(dkp->dk_cmlbhandle, 0);
482 cmlb_free_handle(&dkp->dk_cmlbhandle);
483 ddi_prop_remove_all(dip);
484
485 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
486
487 /*
488 * free the devid structure if allocated before
489 */
490 if (dkp->dk_devid) {
491 ddi_devid_free(dkp->dk_devid);
492 dkp->dk_devid = NULL;
493 }
494
495 mutex_exit(&dkp->dk_mutex);
496 mutex_destroy(&dkp->dk_mutex);
497 rw_destroy(&dkp->dk_bbh_mutex);
498 mutex_destroy(&dkp->dk_pm_mutex);
499 cv_destroy(&dkp->dk_suspend_cv);
500 ddi_soft_state_free(cmdk_state, instance);
501
502 return (DDI_SUCCESS);
503 }
504
505 static int
cmdkinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)506 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
507 {
508 dev_t dev = (dev_t)arg;
509 int instance;
510 struct cmdk *dkp;
511
512 #ifdef lint
513 dip = dip; /* no one ever uses this */
514 #endif
515 #ifdef CMDK_DEBUG
516 if (cmdk_debug & DENT)
517 PRF("cmdkinfo: call\n");
518 #endif
519 instance = CMDKUNIT(dev);
520
521 switch (infocmd) {
522 case DDI_INFO_DEVT2DEVINFO:
523 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
524 return (DDI_FAILURE);
525 *result = (void *) dkp->dk_dip;
526 break;
527 case DDI_INFO_DEVT2INSTANCE:
528 *result = (void *)(intptr_t)instance;
529 break;
530 default:
531 return (DDI_FAILURE);
532 }
533 return (DDI_SUCCESS);
534 }
535
536 /*
537 * Initialize the power management components
538 */
539 static void
cmdk_setup_pm(dev_info_t * dip,struct cmdk * dkp)540 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp)
541 {
542 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL };
543
544 /*
545 * Since the cmdk device does not the 'reg' property,
546 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
547 * The following code is to tell cpr that this device
548 * DOES need to be suspended and resumed.
549 */
550 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
551 "pm-hardware-state", "needs-suspend-resume");
552
553 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
554 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) {
555 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) {
556 mutex_enter(&dkp->dk_pm_mutex);
557 dkp->dk_pm_level = CMDK_SPINDLE_ON;
558 dkp->dk_pm_is_enabled = 1;
559 mutex_exit(&dkp->dk_pm_mutex);
560 } else {
561 mutex_enter(&dkp->dk_pm_mutex);
562 dkp->dk_pm_level = CMDK_SPINDLE_OFF;
563 dkp->dk_pm_is_enabled = 0;
564 mutex_exit(&dkp->dk_pm_mutex);
565 }
566 } else {
567 mutex_enter(&dkp->dk_pm_mutex);
568 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
569 dkp->dk_pm_is_enabled = 0;
570 mutex_exit(&dkp->dk_pm_mutex);
571 }
572 }
573
574 /*
575 * suspend routine, it will be run when get the command
576 * DDI_SUSPEND at detach(9E) from system power management
577 */
578 static int
cmdksuspend(dev_info_t * dip)579 cmdksuspend(dev_info_t *dip)
580 {
581 struct cmdk *dkp;
582 int instance;
583 clock_t count = 0;
584
585 instance = ddi_get_instance(dip);
586 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
587 return (DDI_FAILURE);
588 mutex_enter(&dkp->dk_mutex);
589 if (dkp->dk_flag & CMDK_SUSPEND) {
590 mutex_exit(&dkp->dk_mutex);
591 return (DDI_SUCCESS);
592 }
593 dkp->dk_flag |= CMDK_SUSPEND;
594
595 /* need to wait a while */
596 while (dadk_getcmds(DKTP_DATA) != 0) {
597 delay(drv_usectohz(1000000));
598 if (count > 60) {
599 dkp->dk_flag &= ~CMDK_SUSPEND;
600 cv_broadcast(&dkp->dk_suspend_cv);
601 mutex_exit(&dkp->dk_mutex);
602 return (DDI_FAILURE);
603 }
604 count++;
605 }
606 mutex_exit(&dkp->dk_mutex);
607 return (DDI_SUCCESS);
608 }
609
610 /*
611 * resume routine, it will be run when get the command
612 * DDI_RESUME at attach(9E) from system power management
613 */
614 static int
cmdkresume(dev_info_t * dip)615 cmdkresume(dev_info_t *dip)
616 {
617 struct cmdk *dkp;
618 int instance;
619
620 instance = ddi_get_instance(dip);
621 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
622 return (DDI_FAILURE);
623 mutex_enter(&dkp->dk_mutex);
624 if (!(dkp->dk_flag & CMDK_SUSPEND)) {
625 mutex_exit(&dkp->dk_mutex);
626 return (DDI_FAILURE);
627 }
628 dkp->dk_pm_level = CMDK_SPINDLE_ON;
629 dkp->dk_flag &= ~CMDK_SUSPEND;
630 cv_broadcast(&dkp->dk_suspend_cv);
631 mutex_exit(&dkp->dk_mutex);
632 return (DDI_SUCCESS);
633
634 }
635
636 /*
637 * power management entry point, it was used to
638 * change power management component.
639 * Actually, the real hard drive suspend/resume
640 * was handled in ata, so this function is not
641 * doing any real work other than verifying that
642 * the disk is idle.
643 */
644 static int
cmdkpower(dev_info_t * dip,int component,int level)645 cmdkpower(dev_info_t *dip, int component, int level)
646 {
647 struct cmdk *dkp;
648 int instance;
649
650 instance = ddi_get_instance(dip);
651 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
652 component != 0 || level > CMDK_SPINDLE_ON ||
653 level < CMDK_SPINDLE_OFF) {
654 return (DDI_FAILURE);
655 }
656
657 mutex_enter(&dkp->dk_pm_mutex);
658 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) {
659 mutex_exit(&dkp->dk_pm_mutex);
660 return (DDI_SUCCESS);
661 }
662 mutex_exit(&dkp->dk_pm_mutex);
663
664 if ((level == CMDK_SPINDLE_OFF) &&
665 (dadk_getcmds(DKTP_DATA) != 0)) {
666 return (DDI_FAILURE);
667 }
668
669 mutex_enter(&dkp->dk_pm_mutex);
670 dkp->dk_pm_level = level;
671 mutex_exit(&dkp->dk_pm_mutex);
672 return (DDI_SUCCESS);
673 }
674
675 static int
cmdk_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)676 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
677 char *name, caddr_t valuep, int *lengthp)
678 {
679 struct cmdk *dkp;
680
681 #ifdef CMDK_DEBUG
682 if (cmdk_debug & DENT)
683 PRF("cmdk_prop_op: call\n");
684 #endif
685
686 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
687 if (dkp == NULL)
688 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
689 name, valuep, lengthp));
690
691 return (cmlb_prop_op(dkp->dk_cmlbhandle,
692 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
693 CMDKPART(dev), NULL));
694 }
695
696 /*
697 * dump routine
698 */
699 static int
cmdkdump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)700 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
701 {
702 int instance;
703 struct cmdk *dkp;
704 diskaddr_t p_lblksrt;
705 diskaddr_t p_lblkcnt;
706 struct buf local;
707 struct buf *bp;
708
709 #ifdef CMDK_DEBUG
710 if (cmdk_debug & DENT)
711 PRF("cmdkdump: call\n");
712 #endif
713 instance = CMDKUNIT(dev);
714 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
715 return (ENXIO);
716
717 if (cmlb_partinfo(
718 dkp->dk_cmlbhandle,
719 CMDKPART(dev),
720 &p_lblkcnt,
721 &p_lblksrt,
722 NULL,
723 NULL,
724 0)) {
725 return (ENXIO);
726 }
727
728 if ((blkno+nblk) > p_lblkcnt)
729 return (EINVAL);
730
731 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */
732
733 bp = &local;
734 bzero(bp, sizeof (*bp));
735 bp->b_flags = B_BUSY;
736 bp->b_un.b_addr = addr;
737 bp->b_bcount = nblk << SCTRSHFT;
738 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
739
740 (void) dadk_dump(DKTP_DATA, bp);
741 return (bp->b_error);
742 }
743
744 /*
745 * Copy in the dadkio_rwcmd according to the user's data model. If needed,
746 * convert it for our internal use.
747 */
748 static int
rwcmd_copyin(struct dadkio_rwcmd * rwcmdp,caddr_t inaddr,int flag)749 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
750 {
751 switch (ddi_model_convert_from(flag)) {
752 case DDI_MODEL_ILP32: {
753 struct dadkio_rwcmd32 cmd32;
754
755 if (ddi_copyin(inaddr, &cmd32,
756 sizeof (struct dadkio_rwcmd32), flag)) {
757 return (EFAULT);
758 }
759
760 rwcmdp->cmd = cmd32.cmd;
761 rwcmdp->flags = cmd32.flags;
762 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr;
763 rwcmdp->buflen = cmd32.buflen;
764 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
765 /*
766 * Note: we do not convert the 'status' field,
767 * as it should not contain valid data at this
768 * point.
769 */
770 bzero(&rwcmdp->status, sizeof (rwcmdp->status));
771 break;
772 }
773 case DDI_MODEL_NONE: {
774 if (ddi_copyin(inaddr, rwcmdp,
775 sizeof (struct dadkio_rwcmd), flag)) {
776 return (EFAULT);
777 }
778 }
779 }
780 return (0);
781 }
782
783 /*
784 * If necessary, convert the internal rwcmdp and status to the appropriate
785 * data model and copy it out to the user.
786 */
787 static int
rwcmd_copyout(struct dadkio_rwcmd * rwcmdp,caddr_t outaddr,int flag)788 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
789 {
790 switch (ddi_model_convert_from(flag)) {
791 case DDI_MODEL_ILP32: {
792 struct dadkio_rwcmd32 cmd32;
793
794 cmd32.cmd = rwcmdp->cmd;
795 cmd32.flags = rwcmdp->flags;
796 cmd32.blkaddr = rwcmdp->blkaddr;
797 cmd32.buflen = rwcmdp->buflen;
798 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
799 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
800
801 cmd32.status.status = rwcmdp->status.status;
802 cmd32.status.resid = rwcmdp->status.resid;
803 cmd32.status.failed_blk_is_valid =
804 rwcmdp->status.failed_blk_is_valid;
805 cmd32.status.failed_blk = rwcmdp->status.failed_blk;
806 cmd32.status.fru_code_is_valid =
807 rwcmdp->status.fru_code_is_valid;
808 cmd32.status.fru_code = rwcmdp->status.fru_code;
809
810 bcopy(rwcmdp->status.add_error_info,
811 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
812
813 if (ddi_copyout(&cmd32, outaddr,
814 sizeof (struct dadkio_rwcmd32), flag))
815 return (EFAULT);
816 break;
817 }
818 case DDI_MODEL_NONE: {
819 if (ddi_copyout(rwcmdp, outaddr,
820 sizeof (struct dadkio_rwcmd), flag))
821 return (EFAULT);
822 }
823 }
824 return (0);
825 }
826
827 /*
828 * ioctl routine
829 */
830 static int
cmdkioctl(dev_t dev,int cmd,intptr_t arg,int flag,cred_t * credp,int * rvalp)831 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
832 {
833 int instance;
834 struct scsi_device *devp;
835 struct cmdk *dkp;
836 char data[NBPSCTR];
837
838 instance = CMDKUNIT(dev);
839 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
840 return (ENXIO);
841
842 mutex_enter(&dkp->dk_mutex);
843 while (dkp->dk_flag & CMDK_SUSPEND) {
844 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
845 }
846 mutex_exit(&dkp->dk_mutex);
847
848 bzero(data, sizeof (data));
849
850 switch (cmd) {
851
852 case DKIOCGMEDIAINFO: {
853 struct dk_minfo media_info;
854 struct tgdk_geom phyg;
855
856 /* dadk_getphygeom always returns success */
857 (void) dadk_getphygeom(DKTP_DATA, &phyg);
858
859 media_info.dki_lbsize = phyg.g_secsiz;
860 media_info.dki_capacity = phyg.g_cap;
861 media_info.dki_media_type = DK_FIXED_DISK;
862
863 if (ddi_copyout(&media_info, (void *)arg,
864 sizeof (struct dk_minfo), flag)) {
865 return (EFAULT);
866 } else {
867 return (0);
868 }
869 }
870
871 case DKIOCINFO: {
872 struct dk_cinfo *info = (struct dk_cinfo *)data;
873
874 /* controller information */
875 info->dki_ctype = (DKTP_EXT->tg_ctype);
876 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
877 (void) strcpy(info->dki_cname,
878 ddi_get_name(ddi_get_parent(dkp->dk_dip)));
879
880 /* Unit Information */
881 info->dki_unit = ddi_get_instance(dkp->dk_dip);
882 devp = ddi_get_driver_private(dkp->dk_dip);
883 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
884 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
885 info->dki_flags = DKI_FMTVOL;
886 info->dki_partition = CMDKPART(dev);
887
888 info->dki_maxtransfer = maxphys / DEV_BSIZE;
889 info->dki_addr = 1;
890 info->dki_space = 0;
891 info->dki_prio = 0;
892 info->dki_vec = 0;
893
894 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
895 return (EFAULT);
896 else
897 return (0);
898 }
899
900 case DKIOCSTATE: {
901 int state;
902 int rval;
903 diskaddr_t p_lblksrt;
904 diskaddr_t p_lblkcnt;
905
906 if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
907 return (EFAULT);
908
909 /* dadk_check_media blocks until state changes */
910 if (rval = dadk_check_media(DKTP_DATA, &state))
911 return (rval);
912
913 if (state == DKIO_INSERTED) {
914
915 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0)
916 return (ENXIO);
917
918 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
919 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0))
920 return (ENXIO);
921
922 if (p_lblkcnt <= 0)
923 return (ENXIO);
924 }
925
926 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
927 return (EFAULT);
928
929 return (0);
930 }
931
932 /*
933 * is media removable?
934 */
935 case DKIOCREMOVABLE: {
936 int i;
937
938 i = (DKTP_EXT->tg_rmb) ? 1 : 0;
939
940 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
941 return (EFAULT);
942
943 return (0);
944 }
945
946 case DKIOCADDBAD:
947 /*
948 * This is not an update mechanism to add bad blocks
949 * to the bad block structures stored on disk.
950 *
951 * addbadsec(8) will update the bad block data on disk
952 * and use this ioctl to force the driver to re-initialize
953 * the list of bad blocks in the driver.
954 */
955
956 /* start BBH */
957 cmdk_bbh_reopen(dkp);
958 return (0);
959
960 case DKIOCG_PHYGEOM:
961 case DKIOCG_VIRTGEOM:
962 case DKIOCGGEOM:
963 case DKIOCSGEOM:
964 case DKIOCGAPART:
965 case DKIOCSAPART:
966 case DKIOCGVTOC:
967 case DKIOCSVTOC:
968 case DKIOCPARTINFO:
969 case DKIOCGEXTVTOC:
970 case DKIOCSEXTVTOC:
971 case DKIOCEXTPARTINFO:
972 case DKIOCGMBOOT:
973 case DKIOCSMBOOT:
974 case DKIOCGETEFI:
975 case DKIOCSETEFI:
976 case DKIOCPARTITION:
977 case DKIOCSETEXTPART:
978 {
979 int rc;
980
981 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag,
982 credp, rvalp, 0);
983 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC)
984 cmdk_devid_setup(dkp);
985 return (rc);
986 }
987
988 case DIOCTL_RWCMD: {
989 struct dadkio_rwcmd *rwcmdp;
990 int status;
991
992 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
993
994 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
995
996 if (status == 0) {
997 bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
998 status = dadk_ioctl(DKTP_DATA,
999 dev,
1000 cmd,
1001 (uintptr_t)rwcmdp,
1002 flag,
1003 credp,
1004 rvalp);
1005 }
1006 if (status == 0)
1007 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
1008
1009 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
1010 return (status);
1011 }
1012
1013 default:
1014 return (dadk_ioctl(DKTP_DATA,
1015 dev,
1016 cmd,
1017 arg,
1018 flag,
1019 credp,
1020 rvalp));
1021 }
1022 }
1023
1024 /*ARGSUSED1*/
1025 static int
cmdkclose(dev_t dev,int flag,int otyp,cred_t * credp)1026 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
1027 {
1028 int part;
1029 ulong_t partbit;
1030 int instance;
1031 struct cmdk *dkp;
1032 int lastclose = 1;
1033 int i;
1034
1035 instance = CMDKUNIT(dev);
1036 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1037 (otyp >= OTYPCNT))
1038 return (ENXIO);
1039
1040 mutex_enter(&dkp->dk_mutex);
1041
1042 /* check if device has been opened */
1043 ASSERT(cmdk_isopen(dkp, dev));
1044 if (!(dkp->dk_flag & CMDK_OPEN)) {
1045 mutex_exit(&dkp->dk_mutex);
1046 return (ENXIO);
1047 }
1048
1049 while (dkp->dk_flag & CMDK_SUSPEND) {
1050 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1051 }
1052
1053 part = CMDKPART(dev);
1054 partbit = 1 << part;
1055
1056 /* account for close */
1057 if (otyp == OTYP_LYR) {
1058 ASSERT(dkp->dk_open_lyr[part] > 0);
1059 if (dkp->dk_open_lyr[part])
1060 dkp->dk_open_lyr[part]--;
1061 } else {
1062 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0);
1063 dkp->dk_open_reg[otyp] &= ~partbit;
1064 }
1065 dkp->dk_open_exl &= ~partbit;
1066
1067 for (i = 0; i < CMDK_MAXPART; i++)
1068 if (dkp->dk_open_lyr[i] != 0) {
1069 lastclose = 0;
1070 break;
1071 }
1072
1073 if (lastclose)
1074 for (i = 0; i < OTYPCNT; i++)
1075 if (dkp->dk_open_reg[i] != 0) {
1076 lastclose = 0;
1077 break;
1078 }
1079
1080 mutex_exit(&dkp->dk_mutex);
1081
1082 if (lastclose)
1083 cmlb_invalidate(dkp->dk_cmlbhandle, 0);
1084
1085 return (DDI_SUCCESS);
1086 }
1087
1088 /*ARGSUSED3*/
1089 static int
cmdkopen(dev_t * dev_p,int flag,int otyp,cred_t * credp)1090 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
1091 {
1092 dev_t dev = *dev_p;
1093 int part;
1094 ulong_t partbit;
1095 int instance;
1096 struct cmdk *dkp;
1097 diskaddr_t p_lblksrt;
1098 diskaddr_t p_lblkcnt;
1099 int i;
1100 int nodelay;
1101
1102 instance = CMDKUNIT(dev);
1103 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1104 return (ENXIO);
1105
1106 if (otyp >= OTYPCNT)
1107 return (EINVAL);
1108
1109 mutex_enter(&dkp->dk_mutex);
1110 while (dkp->dk_flag & CMDK_SUSPEND) {
1111 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1112 }
1113 mutex_exit(&dkp->dk_mutex);
1114
1115 part = CMDKPART(dev);
1116 partbit = 1 << part;
1117 nodelay = (flag & (FNDELAY | FNONBLOCK));
1118
1119 mutex_enter(&dkp->dk_mutex);
1120
1121 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) {
1122
1123 /* fail if not doing non block open */
1124 if (!nodelay) {
1125 mutex_exit(&dkp->dk_mutex);
1126 return (ENXIO);
1127 }
1128 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
1129 &p_lblksrt, NULL, NULL, 0) == 0) {
1130
1131 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
1132 mutex_exit(&dkp->dk_mutex);
1133 return (ENXIO);
1134 }
1135 } else {
1136 /* fail if not doing non block open */
1137 if (!nodelay) {
1138 mutex_exit(&dkp->dk_mutex);
1139 return (ENXIO);
1140 }
1141 }
1142
1143 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
1144 mutex_exit(&dkp->dk_mutex);
1145 return (EROFS);
1146 }
1147
1148 /* check for part already opend exclusively */
1149 if (dkp->dk_open_exl & partbit)
1150 goto excl_open_fail;
1151
1152 /* check if we can establish exclusive open */
1153 if (flag & FEXCL) {
1154 if (dkp->dk_open_lyr[part])
1155 goto excl_open_fail;
1156 for (i = 0; i < OTYPCNT; i++) {
1157 if (dkp->dk_open_reg[i] & partbit)
1158 goto excl_open_fail;
1159 }
1160 }
1161
1162 /* open will succeed, account for open */
1163 dkp->dk_flag |= CMDK_OPEN;
1164 if (otyp == OTYP_LYR)
1165 dkp->dk_open_lyr[part]++;
1166 else
1167 dkp->dk_open_reg[otyp] |= partbit;
1168 if (flag & FEXCL)
1169 dkp->dk_open_exl |= partbit;
1170
1171 mutex_exit(&dkp->dk_mutex);
1172 return (DDI_SUCCESS);
1173
1174 excl_open_fail:
1175 mutex_exit(&dkp->dk_mutex);
1176 return (EBUSY);
1177 }
1178
1179 /*
1180 * read routine
1181 */
1182 /*ARGSUSED2*/
1183 static int
cmdkread(dev_t dev,struct uio * uio,cred_t * credp)1184 cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1185 {
1186 return (cmdkrw(dev, uio, B_READ));
1187 }
1188
1189 /*
1190 * async read routine
1191 */
1192 /*ARGSUSED2*/
1193 static int
cmdkaread(dev_t dev,struct aio_req * aio,cred_t * credp)1194 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1195 {
1196 return (cmdkarw(dev, aio, B_READ));
1197 }
1198
1199 /*
1200 * write routine
1201 */
1202 /*ARGSUSED2*/
1203 static int
cmdkwrite(dev_t dev,struct uio * uio,cred_t * credp)1204 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1205 {
1206 return (cmdkrw(dev, uio, B_WRITE));
1207 }
1208
1209 /*
1210 * async write routine
1211 */
1212 /*ARGSUSED2*/
1213 static int
cmdkawrite(dev_t dev,struct aio_req * aio,cred_t * credp)1214 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1215 {
1216 return (cmdkarw(dev, aio, B_WRITE));
1217 }
1218
1219 static void
cmdkmin(struct buf * bp)1220 cmdkmin(struct buf *bp)
1221 {
1222 if (bp->b_bcount > DK_MAXRECSIZE)
1223 bp->b_bcount = DK_MAXRECSIZE;
1224 }
1225
1226 static int
cmdkrw(dev_t dev,struct uio * uio,int flag)1227 cmdkrw(dev_t dev, struct uio *uio, int flag)
1228 {
1229 int instance;
1230 struct cmdk *dkp;
1231
1232 instance = CMDKUNIT(dev);
1233 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1234 return (ENXIO);
1235
1236 mutex_enter(&dkp->dk_mutex);
1237 while (dkp->dk_flag & CMDK_SUSPEND) {
1238 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1239 }
1240 mutex_exit(&dkp->dk_mutex);
1241
1242 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio));
1243 }
1244
1245 static int
cmdkarw(dev_t dev,struct aio_req * aio,int flag)1246 cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1247 {
1248 int instance;
1249 struct cmdk *dkp;
1250
1251 instance = CMDKUNIT(dev);
1252 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1253 return (ENXIO);
1254
1255 mutex_enter(&dkp->dk_mutex);
1256 while (dkp->dk_flag & CMDK_SUSPEND) {
1257 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1258 }
1259 mutex_exit(&dkp->dk_mutex);
1260
1261 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1262 }
1263
1264 /*
1265 * strategy routine
1266 */
1267 static int
cmdkstrategy(struct buf * bp)1268 cmdkstrategy(struct buf *bp)
1269 {
1270 int instance;
1271 struct cmdk *dkp;
1272 long d_cnt;
1273 diskaddr_t p_lblksrt;
1274 diskaddr_t p_lblkcnt;
1275
1276 instance = CMDKUNIT(bp->b_edev);
1277 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1278 (dkblock(bp) < 0)) {
1279 bp->b_resid = bp->b_bcount;
1280 SETBPERR(bp, ENXIO);
1281 biodone(bp);
1282 return (0);
1283 }
1284
1285 mutex_enter(&dkp->dk_mutex);
1286 ASSERT(cmdk_isopen(dkp, bp->b_edev));
1287 while (dkp->dk_flag & CMDK_SUSPEND) {
1288 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1289 }
1290 mutex_exit(&dkp->dk_mutex);
1291
1292 bp->b_flags &= ~(B_DONE|B_ERROR);
1293 bp->b_resid = 0;
1294 bp->av_back = NULL;
1295
1296 /*
1297 * only re-read the vtoc if necessary (force == FALSE)
1298 */
1299 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev),
1300 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) {
1301 SETBPERR(bp, ENXIO);
1302 }
1303
1304 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1305 SETBPERR(bp, ENXIO);
1306
1307 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1308 bp->b_resid = bp->b_bcount;
1309 biodone(bp);
1310 return (0);
1311 }
1312
1313 d_cnt = bp->b_bcount >> SCTRSHFT;
1314 if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1315 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1316 bp->b_bcount -= bp->b_resid;
1317 }
1318
1319 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1320 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1321 bp->b_resid += bp->b_bcount;
1322 biodone(bp);
1323 }
1324 return (0);
1325 }
1326
1327 static int
cmdk_create_obj(dev_info_t * dip,struct cmdk * dkp)1328 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1329 {
1330 struct scsi_device *devp;
1331 opaque_t queobjp = NULL;
1332 opaque_t flcobjp = NULL;
1333 char que_keyvalp[64];
1334 int que_keylen;
1335 char flc_keyvalp[64];
1336 int flc_keylen;
1337
1338 ASSERT(mutex_owned(&dkp->dk_mutex));
1339
1340 /* Create linkage to queueing routines based on property */
1341 que_keylen = sizeof (que_keyvalp);
1342 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1343 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1344 DDI_PROP_SUCCESS) {
1345 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1346 return (DDI_FAILURE);
1347 }
1348 que_keyvalp[que_keylen] = (char)0;
1349
1350 if (strcmp(que_keyvalp, "qfifo") == 0) {
1351 queobjp = (opaque_t)qfifo_create();
1352 } else if (strcmp(que_keyvalp, "qsort") == 0) {
1353 queobjp = (opaque_t)qsort_create();
1354 } else {
1355 return (DDI_FAILURE);
1356 }
1357
1358 /* Create linkage to dequeueing routines based on property */
1359 flc_keylen = sizeof (flc_keyvalp);
1360 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1361 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1362 DDI_PROP_SUCCESS) {
1363 cmn_err(CE_WARN,
1364 "cmdk_create_obj: flow-control property undefined");
1365 return (DDI_FAILURE);
1366 }
1367
1368 flc_keyvalp[flc_keylen] = (char)0;
1369
1370 if (strcmp(flc_keyvalp, "dsngl") == 0) {
1371 flcobjp = (opaque_t)dsngl_create();
1372 } else if (strcmp(flc_keyvalp, "dmult") == 0) {
1373 flcobjp = (opaque_t)dmult_create();
1374 } else {
1375 return (DDI_FAILURE);
1376 }
1377
1378 /* populate bbh_obj object stored in dkp */
1379 dkp->dk_bbh_obj.bbh_data = dkp;
1380 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1381
1382 /* create linkage to dadk */
1383 dkp->dk_tgobjp = (opaque_t)dadk_create();
1384
1385 devp = ddi_get_driver_private(dip);
1386 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1387 NULL);
1388
1389 return (DDI_SUCCESS);
1390 }
1391
1392 static void
cmdk_destroy_obj(dev_info_t * dip,struct cmdk * dkp)1393 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1394 {
1395 char que_keyvalp[64];
1396 int que_keylen;
1397 char flc_keyvalp[64];
1398 int flc_keylen;
1399
1400 ASSERT(mutex_owned(&dkp->dk_mutex));
1401
1402 (void) dadk_free((dkp->dk_tgobjp));
1403 dkp->dk_tgobjp = NULL;
1404
1405 que_keylen = sizeof (que_keyvalp);
1406 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1407 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1408 DDI_PROP_SUCCESS) {
1409 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1410 return;
1411 }
1412 que_keyvalp[que_keylen] = (char)0;
1413
1414 flc_keylen = sizeof (flc_keyvalp);
1415 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1416 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1417 DDI_PROP_SUCCESS) {
1418 cmn_err(CE_WARN,
1419 "cmdk_destroy_obj: flow-control property undefined");
1420 return;
1421 }
1422 flc_keyvalp[flc_keylen] = (char)0;
1423 }
1424 /*ARGSUSED5*/
1425 static int
cmdk_lb_rdwr(dev_info_t * dip,uchar_t cmd,void * bufaddr,diskaddr_t start,size_t count,void * tg_cookie)1426 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
1427 diskaddr_t start, size_t count, void *tg_cookie)
1428 {
1429 struct cmdk *dkp;
1430 opaque_t handle;
1431 int rc = 0;
1432 char *bufa;
1433 size_t buflen;
1434
1435 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1436 if (dkp == NULL)
1437 return (ENXIO);
1438
1439 if (cmd != TG_READ && cmd != TG_WRITE)
1440 return (EINVAL);
1441
1442 /* buflen must be multiple of 512 */
1443 buflen = (count + NBPSCTR - 1) & -NBPSCTR;
1444 handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP);
1445 if (!handle)
1446 return (ENOMEM);
1447
1448 if (cmd == TG_READ) {
1449 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1450 if (!bufa)
1451 rc = EIO;
1452 else
1453 bcopy(bufa, bufaddr, count);
1454 } else {
1455 bufa = dadk_iob_htoc(DKTP_DATA, handle);
1456 bcopy(bufaddr, bufa, count);
1457 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1458 if (!bufa)
1459 rc = EIO;
1460 }
1461 (void) dadk_iob_free(DKTP_DATA, handle);
1462
1463 return (rc);
1464 }
1465
1466 /*ARGSUSED3*/
1467 static int
cmdk_lb_getinfo(dev_info_t * dip,int cmd,void * arg,void * tg_cookie)1468 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
1469 {
1470
1471 struct cmdk *dkp;
1472 struct tgdk_geom phyg;
1473
1474
1475 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1476 if (dkp == NULL)
1477 return (ENXIO);
1478
1479 switch (cmd) {
1480 case TG_GETPHYGEOM: {
1481 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg;
1482
1483 /* dadk_getphygeom always returns success */
1484 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1485
1486 phygeomp->g_capacity = phyg.g_cap;
1487 phygeomp->g_nsect = phyg.g_sec;
1488 phygeomp->g_nhead = phyg.g_head;
1489 phygeomp->g_acyl = phyg.g_acyl;
1490 phygeomp->g_ncyl = phyg.g_cyl;
1491 phygeomp->g_secsize = phyg.g_secsiz;
1492 phygeomp->g_intrlv = 1;
1493 phygeomp->g_rpm = 3600;
1494
1495 return (0);
1496 }
1497
1498 case TG_GETVIRTGEOM: {
1499 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg;
1500 diskaddr_t capacity;
1501
1502 (void) dadk_getgeom(DKTP_DATA, &phyg);
1503 capacity = phyg.g_cap;
1504
1505 /*
1506 * If the controller returned us something that doesn't
1507 * really fit into an Int 13/function 8 geometry
1508 * result, just fail the ioctl. See PSARC 1998/313.
1509 */
1510 if (capacity < 0 || capacity >= 63 * 254 * 1024)
1511 return (EINVAL);
1512
1513 virtgeomp->g_capacity = capacity;
1514 virtgeomp->g_nsect = 63;
1515 virtgeomp->g_nhead = 254;
1516 virtgeomp->g_ncyl = capacity / (63 * 254);
1517 virtgeomp->g_acyl = 0;
1518 virtgeomp->g_secsize = 512;
1519 virtgeomp->g_intrlv = 1;
1520 virtgeomp->g_rpm = 3600;
1521
1522 return (0);
1523 }
1524
1525 case TG_GETCAPACITY:
1526 case TG_GETBLOCKSIZE:
1527 {
1528
1529 /* dadk_getphygeom always returns success */
1530 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1531 if (cmd == TG_GETCAPACITY)
1532 *(diskaddr_t *)arg = phyg.g_cap;
1533 else
1534 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz;
1535
1536 return (0);
1537 }
1538
1539 case TG_GETATTR: {
1540 tg_attribute_t *tgattribute = (tg_attribute_t *)arg;
1541 if ((DKTP_EXT->tg_rdonly))
1542 tgattribute->media_is_writable = FALSE;
1543 else
1544 tgattribute->media_is_writable = TRUE;
1545 tgattribute->media_is_rotational = TRUE;
1546
1547 return (0);
1548 }
1549
1550 default:
1551 return (ENOTTY);
1552 }
1553 }
1554
1555
1556
1557
1558
1559 /*
1560 * Create and register the devid.
1561 * There are 4 different ways we can get a device id:
1562 * 1. Already have one - nothing to do
1563 * 2. Build one from the drive's model and serial numbers
1564 * 3. Read one from the disk (first sector of last track)
1565 * 4. Fabricate one and write it on the disk.
1566 * If any of these succeeds, register the deviceid
1567 */
1568 static void
cmdk_devid_setup(struct cmdk * dkp)1569 cmdk_devid_setup(struct cmdk *dkp)
1570 {
1571 int rc;
1572
1573 /* Try options until one succeeds, or all have failed */
1574
1575 /* 1. All done if already registered */
1576 if (dkp->dk_devid != NULL)
1577 return;
1578
1579 /* 2. Build a devid from the model and serial number */
1580 rc = cmdk_devid_modser(dkp);
1581 if (rc != DDI_SUCCESS) {
1582 /* 3. Read devid from the disk, if present */
1583 rc = cmdk_devid_read(dkp);
1584
1585 /* 4. otherwise make one up and write it on the disk */
1586 if (rc != DDI_SUCCESS)
1587 rc = cmdk_devid_fabricate(dkp);
1588 }
1589
1590 /* If we managed to get a devid any of the above ways, register it */
1591 if (rc == DDI_SUCCESS)
1592 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1593
1594 }
1595
1596 /*
1597 * Build a devid from the model and serial number
1598 * Return DDI_SUCCESS or DDI_FAILURE.
1599 */
1600 static int
cmdk_devid_modser(struct cmdk * dkp)1601 cmdk_devid_modser(struct cmdk *dkp)
1602 {
1603 int rc = DDI_FAILURE;
1604 char *hwid;
1605 int modlen;
1606 int serlen;
1607
1608 /*
1609 * device ID is a concatenation of model number, '=', serial number.
1610 */
1611 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1612 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1613 if (modlen == 0) {
1614 rc = DDI_FAILURE;
1615 goto err;
1616 }
1617 hwid[modlen++] = '=';
1618 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1619 hwid + modlen, CMDK_HWIDLEN - modlen);
1620 if (serlen == 0) {
1621 rc = DDI_FAILURE;
1622 goto err;
1623 }
1624 hwid[modlen + serlen] = 0;
1625
1626 /* Initialize the device ID, trailing NULL not included */
1627 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1628 hwid, &dkp->dk_devid);
1629 if (rc != DDI_SUCCESS) {
1630 rc = DDI_FAILURE;
1631 goto err;
1632 }
1633
1634 rc = DDI_SUCCESS;
1635
1636 err:
1637 kmem_free(hwid, CMDK_HWIDLEN);
1638 return (rc);
1639 }
1640
1641 static int
cmdk_get_modser(struct cmdk * dkp,int ioccmd,char * buf,int len)1642 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1643 {
1644 dadk_ioc_string_t strarg;
1645 int rval;
1646 char *s;
1647 char ch;
1648 boolean_t ret;
1649 int i;
1650 int tb;
1651
1652 strarg.is_buf = buf;
1653 strarg.is_size = len;
1654 if (dadk_ioctl(DKTP_DATA,
1655 dkp->dk_dev,
1656 ioccmd,
1657 (uintptr_t)&strarg,
1658 FNATIVE | FKIOCTL,
1659 NULL,
1660 &rval) != 0)
1661 return (0);
1662
1663 /*
1664 * valid model/serial string must contain a non-zero non-space
1665 * trim trailing spaces/NULL
1666 */
1667 ret = B_FALSE;
1668 s = buf;
1669 for (i = 0; i < strarg.is_size; i++) {
1670 ch = *s++;
1671 if (ch != ' ' && ch != '\0')
1672 tb = i + 1;
1673 if (ch != ' ' && ch != '\0' && ch != '0')
1674 ret = B_TRUE;
1675 }
1676
1677 if (ret == B_FALSE)
1678 return (0);
1679
1680 return (tb);
1681 }
1682
1683 /*
1684 * Read a devid from on the first block of the last track of
1685 * the last cylinder. Make sure what we read is a valid devid.
1686 * Return DDI_SUCCESS or DDI_FAILURE.
1687 */
1688 static int
cmdk_devid_read(struct cmdk * dkp)1689 cmdk_devid_read(struct cmdk *dkp)
1690 {
1691 diskaddr_t blk;
1692 struct dk_devid *dkdevidp;
1693 uint_t *ip;
1694 int chksum;
1695 int i, sz;
1696 tgdk_iob_handle handle = NULL;
1697 int rc = DDI_FAILURE;
1698
1699 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0))
1700 goto err;
1701
1702 /* read the devid */
1703 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1704 if (handle == NULL)
1705 goto err;
1706
1707 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1708 if (dkdevidp == NULL)
1709 goto err;
1710
1711 /* Validate the revision */
1712 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1713 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1714 goto err;
1715
1716 /* Calculate the checksum */
1717 chksum = 0;
1718 ip = (uint_t *)dkdevidp;
1719 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1720 chksum ^= ip[i];
1721 if (DKD_GETCHKSUM(dkdevidp) != chksum)
1722 goto err;
1723
1724 /* Validate the device id */
1725 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1726 goto err;
1727
1728 /* keep a copy of the device id */
1729 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1730 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1731 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1732
1733 rc = DDI_SUCCESS;
1734
1735 err:
1736 if (handle != NULL)
1737 (void) dadk_iob_free(DKTP_DATA, handle);
1738 return (rc);
1739 }
1740
1741 /*
1742 * Create a devid and write it on the first block of the last track of
1743 * the last cylinder.
1744 * Return DDI_SUCCESS or DDI_FAILURE.
1745 */
1746 static int
cmdk_devid_fabricate(struct cmdk * dkp)1747 cmdk_devid_fabricate(struct cmdk *dkp)
1748 {
1749 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */
1750 struct dk_devid *dkdevidp; /* devid struct stored on disk */
1751 diskaddr_t blk;
1752 tgdk_iob_handle handle = NULL;
1753 uint_t *ip, chksum;
1754 int i;
1755 int rc = DDI_FAILURE;
1756
1757 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) !=
1758 DDI_SUCCESS)
1759 goto err;
1760
1761 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) {
1762 /* no device id block address */
1763 goto err;
1764 }
1765
1766 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1767 if (!handle)
1768 goto err;
1769
1770 /* Locate the buffer */
1771 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1772
1773 /* Fill in the revision */
1774 bzero(dkdevidp, NBPSCTR);
1775 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1776 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1777
1778 /* Copy in the device id */
1779 i = ddi_devid_sizeof(devid);
1780 if (i > DK_DEVID_SIZE)
1781 goto err;
1782 bcopy(devid, dkdevidp->dkd_devid, i);
1783
1784 /* Calculate the chksum */
1785 chksum = 0;
1786 ip = (uint_t *)dkdevidp;
1787 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1788 chksum ^= ip[i];
1789
1790 /* Fill in the checksum */
1791 DKD_FORMCHKSUM(chksum, dkdevidp);
1792
1793 /* write the devid */
1794 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1795
1796 dkp->dk_devid = devid;
1797
1798 rc = DDI_SUCCESS;
1799
1800 err:
1801 if (handle != NULL)
1802 (void) dadk_iob_free(DKTP_DATA, handle);
1803
1804 if (rc != DDI_SUCCESS && devid != NULL)
1805 ddi_devid_free(devid);
1806
1807 return (rc);
1808 }
1809
1810 static void
cmdk_bbh_free_alts(struct cmdk * dkp)1811 cmdk_bbh_free_alts(struct cmdk *dkp)
1812 {
1813 if (dkp->dk_alts_hdl) {
1814 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1815 kmem_free(dkp->dk_slc_cnt,
1816 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1817 dkp->dk_alts_hdl = NULL;
1818 }
1819 }
1820
1821 static void
cmdk_bbh_reopen(struct cmdk * dkp)1822 cmdk_bbh_reopen(struct cmdk *dkp)
1823 {
1824 tgdk_iob_handle handle = NULL;
1825 diskaddr_t slcb, slcn, slce;
1826 struct alts_parttbl *ap;
1827 struct alts_ent *enttblp;
1828 uint32_t altused;
1829 uint32_t altbase;
1830 uint32_t altlast;
1831 int alts;
1832 uint16_t vtoctag;
1833 int i, j;
1834
1835 /* find slice with V_ALTSCTR tag */
1836 for (alts = 0; alts < NDKMAP; alts++) {
1837 if (cmlb_partinfo(
1838 dkp->dk_cmlbhandle,
1839 alts,
1840 &slcn,
1841 &slcb,
1842 NULL,
1843 &vtoctag,
1844 0)) {
1845 goto empty; /* no partition table exists */
1846 }
1847
1848 if (vtoctag == V_ALTSCTR && slcn > 1)
1849 break;
1850 }
1851 if (alts >= NDKMAP) {
1852 goto empty; /* no V_ALTSCTR slice defined */
1853 }
1854
1855 /* read in ALTS label block */
1856 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1857 if (!handle) {
1858 goto empty;
1859 }
1860
1861 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1862 if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1863 goto empty;
1864 }
1865
1866 altused = ap->alts_ent_used; /* number of BB entries */
1867 altbase = ap->alts_ent_base; /* blk offset from begin slice */
1868 altlast = ap->alts_ent_end; /* blk offset to last block */
1869 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1870
1871 if (altused == 0 ||
1872 altbase < 1 ||
1873 altbase > altlast ||
1874 altlast >= slcn) {
1875 goto empty;
1876 }
1877 (void) dadk_iob_free(DKTP_DATA, handle);
1878
1879 /* read in ALTS remapping table */
1880 handle = dadk_iob_alloc(DKTP_DATA,
1881 slcb + altbase,
1882 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1883 if (!handle) {
1884 goto empty;
1885 }
1886
1887 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1888 if (!enttblp) {
1889 goto empty;
1890 }
1891
1892 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1893
1894 /* allocate space for dk_slc_cnt and dk_slc_ent tables */
1895 if (dkp->dk_slc_cnt == NULL) {
1896 dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1897 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1898 }
1899 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1900
1901 /* free previous BB table (if any) */
1902 if (dkp->dk_alts_hdl) {
1903 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1904 dkp->dk_alts_hdl = NULL;
1905 dkp->dk_altused = 0;
1906 }
1907
1908 /* save linkage to new BB table */
1909 dkp->dk_alts_hdl = handle;
1910 dkp->dk_altused = altused;
1911
1912 /*
1913 * build indexes to BB table by slice
1914 * effectively we have
1915 * struct alts_ent *enttblp[altused];
1916 *
1917 * uint32_t dk_slc_cnt[NDKMAP];
1918 * struct alts_ent *dk_slc_ent[NDKMAP];
1919 */
1920 for (i = 0; i < NDKMAP; i++) {
1921 if (cmlb_partinfo(
1922 dkp->dk_cmlbhandle,
1923 i,
1924 &slcn,
1925 &slcb,
1926 NULL,
1927 NULL,
1928 0)) {
1929 goto empty1;
1930 }
1931
1932 dkp->dk_slc_cnt[i] = 0;
1933 if (slcn == 0)
1934 continue; /* slice is not allocated */
1935
1936 /* last block in slice */
1937 slce = slcb + slcn - 1;
1938
1939 /* find first remap entry in after beginnning of slice */
1940 for (j = 0; j < altused; j++) {
1941 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1942 break;
1943 }
1944 dkp->dk_slc_ent[i] = enttblp + j;
1945
1946 /* count remap entrys until end of slice */
1947 for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1948 dkp->dk_slc_cnt[i] += 1;
1949 }
1950 }
1951
1952 rw_exit(&dkp->dk_bbh_mutex);
1953 return;
1954
1955 empty:
1956 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1957 empty1:
1958 if (handle && handle != dkp->dk_alts_hdl)
1959 (void) dadk_iob_free(DKTP_DATA, handle);
1960
1961 if (dkp->dk_alts_hdl) {
1962 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1963 dkp->dk_alts_hdl = NULL;
1964 }
1965
1966 rw_exit(&dkp->dk_bbh_mutex);
1967 }
1968
1969 /*ARGSUSED*/
1970 static bbh_cookie_t
cmdk_bbh_htoc(opaque_t bbh_data,opaque_t handle)1971 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
1972 {
1973 struct bbh_handle *hp;
1974 bbh_cookie_t ckp;
1975
1976 hp = (struct bbh_handle *)handle;
1977 ckp = hp->h_cktab + hp->h_idx;
1978 hp->h_idx++;
1979 return (ckp);
1980 }
1981
1982 /*ARGSUSED*/
1983 static void
cmdk_bbh_freehandle(opaque_t bbh_data,opaque_t handle)1984 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
1985 {
1986 struct bbh_handle *hp;
1987
1988 hp = (struct bbh_handle *)handle;
1989 kmem_free(handle, (sizeof (struct bbh_handle) +
1990 (hp->h_totck * (sizeof (struct bbh_cookie)))));
1991 }
1992
1993
1994 /*
1995 * cmdk_bbh_gethandle remaps the bad sectors to alternates.
1996 * There are 7 different cases when the comparison is made
1997 * between the bad sector cluster and the disk section.
1998 *
1999 * bad sector cluster gggggggggggbbbbbbbggggggggggg
2000 * case 1: ddddd
2001 * case 2: -d-----
2002 * case 3: ddddd
2003 * case 4: dddddddddddd
2004 * case 5: ddddddd-----
2005 * case 6: ---ddddddd
2006 * case 7: ddddddd
2007 *
2008 * where: g = good sector, b = bad sector
2009 * d = sector in disk section
2010 * - = disk section may be extended to cover those disk area
2011 */
2012
2013 static opaque_t
cmdk_bbh_gethandle(opaque_t bbh_data,struct buf * bp)2014 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
2015 {
2016 struct cmdk *dkp = (struct cmdk *)bbh_data;
2017 struct bbh_handle *hp;
2018 struct bbh_cookie *ckp;
2019 struct alts_ent *altp;
2020 uint32_t alts_used;
2021 uint32_t part = CMDKPART(bp->b_edev);
2022 daddr32_t lastsec;
2023 long d_count;
2024 int i;
2025 int idx;
2026 int cnt;
2027
2028 if (part >= V_NUMPAR)
2029 return (NULL);
2030
2031 /*
2032 * This if statement is atomic and it will succeed
2033 * if there are no bad blocks (almost always)
2034 *
2035 * so this if is performed outside of the rw_enter for speed
2036 * and then repeated inside the rw_enter for safety
2037 */
2038 if (!dkp->dk_alts_hdl) {
2039 return (NULL);
2040 }
2041
2042 rw_enter(&dkp->dk_bbh_mutex, RW_READER);
2043
2044 if (dkp->dk_alts_hdl == NULL) {
2045 rw_exit(&dkp->dk_bbh_mutex);
2046 return (NULL);
2047 }
2048
2049 alts_used = dkp->dk_slc_cnt[part];
2050 if (alts_used == 0) {
2051 rw_exit(&dkp->dk_bbh_mutex);
2052 return (NULL);
2053 }
2054 altp = dkp->dk_slc_ent[part];
2055
2056 /*
2057 * binary search for the largest bad sector index in the alternate
2058 * entry table which overlaps or larger than the starting d_sec
2059 */
2060 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
2061 /* if starting sector is > the largest bad sector, return */
2062 if (i == -1) {
2063 rw_exit(&dkp->dk_bbh_mutex);
2064 return (NULL);
2065 }
2066 /* i is the starting index. Set altp to the starting entry addr */
2067 altp += i;
2068
2069 d_count = bp->b_bcount >> SCTRSHFT;
2070 lastsec = GET_BP_SEC(bp) + d_count - 1;
2071
2072 /* calculate the number of bad sectors */
2073 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
2074 if (lastsec < altp->bad_start)
2075 break;
2076 }
2077
2078 if (!cnt) {
2079 rw_exit(&dkp->dk_bbh_mutex);
2080 return (NULL);
2081 }
2082
2083 /* calculate the maximum number of reserved cookies */
2084 cnt <<= 1;
2085 cnt++;
2086
2087 /* allocate the handle */
2088 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) +
2089 (cnt * sizeof (*ckp))), KM_SLEEP);
2090
2091 hp->h_idx = 0;
2092 hp->h_totck = cnt;
2093 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
2094 ckp[0].ck_sector = GET_BP_SEC(bp);
2095 ckp[0].ck_seclen = d_count;
2096
2097 altp = dkp->dk_slc_ent[part];
2098 altp += i;
2099 for (idx = 0; i < alts_used; i++, altp++) {
2100 /* CASE 1: */
2101 if (lastsec < altp->bad_start)
2102 break;
2103
2104 /* CASE 3: */
2105 if (ckp[idx].ck_sector > altp->bad_end)
2106 continue;
2107
2108 /* CASE 2 and 7: */
2109 if ((ckp[idx].ck_sector >= altp->bad_start) &&
2110 (lastsec <= altp->bad_end)) {
2111 ckp[idx].ck_sector = altp->good_start +
2112 ckp[idx].ck_sector - altp->bad_start;
2113 break;
2114 }
2115
2116 /* at least one bad sector in our section. break it. */
2117 /* CASE 5: */
2118 if ((lastsec >= altp->bad_start) &&
2119 (lastsec <= altp->bad_end)) {
2120 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
2121 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
2122 ckp[idx+1].ck_sector = altp->good_start;
2123 break;
2124 }
2125 /* CASE 6: */
2126 if ((ckp[idx].ck_sector <= altp->bad_end) &&
2127 (ckp[idx].ck_sector >= altp->bad_start)) {
2128 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
2129 ckp[idx].ck_seclen = altp->bad_end -
2130 ckp[idx].ck_sector + 1;
2131 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
2132 ckp[idx].ck_sector = altp->good_start +
2133 ckp[idx].ck_sector - altp->bad_start;
2134 idx++;
2135 ckp[idx].ck_sector = altp->bad_end + 1;
2136 continue; /* check rest of section */
2137 }
2138
2139 /* CASE 4: */
2140 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
2141 ckp[idx+1].ck_sector = altp->good_start;
2142 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
2143 idx += 2;
2144 ckp[idx].ck_sector = altp->bad_end + 1;
2145 ckp[idx].ck_seclen = lastsec - altp->bad_end;
2146 }
2147
2148 rw_exit(&dkp->dk_bbh_mutex);
2149 return ((opaque_t)hp);
2150 }
2151
2152 static int
cmdk_bbh_bsearch(struct alts_ent * buf,int cnt,daddr32_t key)2153 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
2154 {
2155 int i;
2156 int ind;
2157 int interval;
2158 int mystatus = -1;
2159
2160 if (!cnt)
2161 return (mystatus);
2162
2163 ind = 1; /* compiler complains about possible uninitialized var */
2164 for (i = 1; i <= cnt; i <<= 1)
2165 ind = i;
2166
2167 for (interval = ind; interval; ) {
2168 if ((key >= buf[ind-1].bad_start) &&
2169 (key <= buf[ind-1].bad_end)) {
2170 return (ind-1);
2171 } else {
2172 interval >>= 1;
2173 if (key < buf[ind-1].bad_start) {
2174 /* record the largest bad sector index */
2175 mystatus = ind-1;
2176 if (!interval)
2177 break;
2178 ind = ind - interval;
2179 } else {
2180 /*
2181 * if key is larger than the last element
2182 * then break
2183 */
2184 if ((ind == cnt) || !interval)
2185 break;
2186 if ((ind+interval) <= cnt)
2187 ind += interval;
2188 }
2189 }
2190 }
2191 return (mystatus);
2192 }
2193