Lines Matching defs:cl
289 static dev_t cmlb_make_device(struct cmlb_lun *cl);
290 static int cmlb_validate_geometry(struct cmlb_lun *cl, boolean_t forcerevalid,
292 static void cmlb_resync_geom_caches(struct cmlb_lun *cl, diskaddr_t capacity,
294 static int cmlb_read_fdisk(struct cmlb_lun *cl, diskaddr_t capacity,
299 static int cmlb_use_efi(struct cmlb_lun *cl, diskaddr_t capacity, int flags,
301 static void cmlb_build_default_label(struct cmlb_lun *cl, void *tg_cookie);
302 static int cmlb_uselabel(struct cmlb_lun *cl, struct dk_label *l, int flags);
304 static void cmlb_build_user_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc);
306 static int cmlb_build_label_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc);
307 static int cmlb_write_label(struct cmlb_lun *cl, void *tg_cookie);
308 static int cmlb_set_vtoc(struct cmlb_lun *cl, struct dk_label *dkl,
310 static void cmlb_clear_efi(struct cmlb_lun *cl, void *tg_cookie);
311 static void cmlb_clear_vtoc(struct cmlb_lun *cl, void *tg_cookie);
312 static void cmlb_setup_default_geometry(struct cmlb_lun *cl, void *tg_cookie);
313 static int cmlb_create_minor_nodes(struct cmlb_lun *cl);
314 static int cmlb_check_update_blockcount(struct cmlb_lun *cl, void *tg_cookie);
318 static int cmlb_update_fdisk_and_vtoc(struct cmlb_lun *cl, void *tg_cookie);
326 static void cmlb_convert_geometry(struct cmlb_lun *cl, diskaddr_t capacity,
330 static int cmlb_dkio_get_geometry(struct cmlb_lun *cl, caddr_t arg, int flag,
332 static int cmlb_dkio_set_geometry(struct cmlb_lun *cl, caddr_t arg, int flag);
333 static int cmlb_dkio_get_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
335 static int cmlb_dkio_set_partition(struct cmlb_lun *cl, caddr_t arg, int flag);
336 static int cmlb_dkio_get_efi(struct cmlb_lun *cl, caddr_t arg, int flag,
338 static int cmlb_dkio_set_efi(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
340 static int cmlb_dkio_get_vtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
342 static int cmlb_dkio_get_extvtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
344 static int cmlb_dkio_set_vtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
346 static int cmlb_dkio_set_extvtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
348 static int cmlb_dkio_get_mboot(struct cmlb_lun *cl, caddr_t arg, int flag,
350 static int cmlb_dkio_set_mboot(struct cmlb_lun *cl, caddr_t arg, int flag,
352 static int cmlb_dkio_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
356 static int cmlb_dkio_set_ext_part(struct cmlb_lun *cl, caddr_t arg, int flag,
358 static int cmlb_validate_ext_part(struct cmlb_lun *cl, int part, int epart,
360 static int cmlb_is_linux_swap(struct cmlb_lun *cl, uint32_t part_start,
362 static int cmlb_dkio_get_virtgeom(struct cmlb_lun *cl, caddr_t arg, int flag);
363 static int cmlb_dkio_get_phygeom(struct cmlb_lun *cl, caddr_t arg, int flag,
365 static int cmlb_dkio_partinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
367 static int cmlb_dkio_extpartinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
371 static void cmlb_dbg(uint_t comp, struct cmlb_lun *cl, const char *fmt, ...);
408 cmlb_dbg(uint_t comp, struct cmlb_lun *cl, const char *fmt, ...)
414 ASSERT(cl != NULL);
415 dev = CMLB_DEVINFO(cl);
419 * also print if cl matches the value of cmlb_debug_cl, or if
432 ((cmlb_debug_cl == NULL) || (cmlb_debug_cl == cl))) {
434 cmlb_v_log(dev, CMLB_LABEL(cl), CE_CONT, fmt, ap);
536 struct cmlb_lun *cl;
538 cl = kmem_zalloc(sizeof (struct cmlb_lun), KM_SLEEP);
541 cl->cl_state = CMLB_INITED;
542 cl->cl_def_labeltype = CMLB_LABEL_UNDEF;
543 mutex_init(CMLB_MUTEX(cl), NULL, MUTEX_DRIVER, NULL);
545 *cmlbhandlep = (cmlb_handle_t)(cl);
559 struct cmlb_lun *cl;
561 cl = (struct cmlb_lun *)*cmlbhandlep;
562 if (cl != NULL) {
563 mutex_destroy(CMLB_MUTEX(cl));
564 kmem_free(cl, sizeof (struct cmlb_lun));
689 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
699 mutex_enter(CMLB_MUTEX(cl));
701 CMLB_DEVINFO(cl) = devi;
702 cl->cmlb_tg_ops = tgopsp;
703 cl->cl_device_type = device_type;
704 cl->cl_is_removable = is_removable;
705 cl->cl_is_hotpluggable = is_hotpluggable;
706 cl->cl_node_type = node_type;
707 cl->cl_sys_blocksize = DEV_BSIZE;
708 cl->cl_f_geometry_is_valid = B_FALSE;
709 cl->cl_def_labeltype = CMLB_LABEL_VTOC;
710 cl->cl_alter_behavior = alter_behavior;
711 cl->cl_reserved = -1;
712 cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
714 cl->cl_logical_drive_count = 0;
718 mutex_exit(CMLB_MUTEX(cl));
719 status = DK_TG_GETCAP(cl, &cap, tg_cookie);
720 mutex_enter(CMLB_MUTEX(cl));
723 cl->cl_def_labeltype = CMLB_LABEL_EFI;
728 cl->cl_last_labeltype = CMLB_LABEL_UNDEF;
729 cl->cl_cur_labeltype = CMLB_LABEL_UNDEF;
731 if (cmlb_create_minor_nodes(cl) != 0) {
732 mutex_exit(CMLB_MUTEX(cl));
737 i_ddi_prop_dyn_driver_set(CMLB_DEVINFO(cl), cmlb_prop_dyn);
739 cl->cl_state = CMLB_ATTACHED;
741 mutex_exit(CMLB_MUTEX(cl));
762 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
764 mutex_enter(CMLB_MUTEX(cl));
765 cl->cl_def_labeltype = CMLB_LABEL_UNDEF;
766 cl->cl_f_geometry_is_valid = B_FALSE;
767 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
768 i_ddi_prop_dyn_driver_set(CMLB_DEVINFO(cl), NULL);
769 cl->cl_state = CMLB_INITED;
770 mutex_exit(CMLB_MUTEX(cl));
805 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
810 * Temp work-around checking cl for NULL since there is a bug
814 if (cl == NULL)
817 mutex_enter(CMLB_MUTEX(cl));
818 if (cl->cl_state < CMLB_ATTACHED) {
819 mutex_exit(CMLB_MUTEX(cl));
827 if (cl->cl_f_geometry_is_valid) {
828 cl->cl_cur_labeltype = CMLB_LABEL_EFI;
836 cl->cl_cur_labeltype = CMLB_LABEL_VTOC;
840 (void) cmlb_create_minor_nodes(cl);
842 mutex_exit(CMLB_MUTEX(cl));
859 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
861 if (cl == NULL)
864 mutex_enter(CMLB_MUTEX(cl));
865 cl->cl_f_geometry_is_valid = B_FALSE;
866 mutex_exit(CMLB_MUTEX(cl));
886 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
891 return (cl->cl_f_geometry_is_valid);
916 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
918 mutex_enter(CMLB_MUTEX(cl));
919 cl->cl_f_geometry_is_valid = B_FALSE;
922 if (ISREMOVABLE(cl)) {
923 cl->cl_cur_labeltype = CMLB_LABEL_UNDEF;
924 (void) cmlb_create_minor_nodes(cl);
927 mutex_exit(CMLB_MUTEX(cl));
956 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
958 mutex_enter(CMLB_MUTEX(cl));
959 if (cl->cl_state < CMLB_ATTACHED) {
960 mutex_exit(CMLB_MUTEX(cl));
964 if ((!cl->cl_f_geometry_is_valid) ||
965 (cl->cl_solaris_size < DK_LABEL_LOC)) {
966 mutex_exit(CMLB_MUTEX(cl));
970 if (cl->cl_cur_labeltype == CMLB_LABEL_EFI) {
971 if (cl->cl_reserved != -1) {
972 blk = cl->cl_map[cl->cl_reserved].dkl_cylno;
974 mutex_exit(CMLB_MUTEX(cl));
979 if (cl->cl_label_from_media != CMLB_LABEL_VTOC) {
980 mutex_exit(CMLB_MUTEX(cl));
985 if (cl->cl_g.dkg_acyl < 2) {
986 mutex_exit(CMLB_MUTEX(cl));
994 cyl = cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl - 2;
995 spc = cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
996 head = cl->cl_g.dkg_nhead - 1;
997 blk = cl->cl_solaris_offset +
998 (cyl * (spc - cl->cl_g.dkg_apc)) +
999 (head * cl->cl_g.dkg_nsect) + 1;
1003 mutex_exit(CMLB_MUTEX(cl));
1045 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
1051 ASSERT(cl != NULL);
1052 mutex_enter(CMLB_MUTEX(cl));
1053 if (cl->cl_state < CMLB_ATTACHED) {
1054 mutex_exit(CMLB_MUTEX(cl));
1061 if (!cl->cl_f_geometry_is_valid)
1062 (void) cmlb_validate_geometry((struct cmlb_lun *)cl,
1065 if (((!cl->cl_f_geometry_is_valid) ||
1066 (part < NDKMAP && cl->cl_solaris_size == 0)) &&
1071 *startblockp = (diskaddr_t)cl->cl_offset[part];
1075 cl->cl_map[part].dkl_nblk;
1079 ((cl->cl_cur_labeltype == CMLB_LABEL_EFI) ||
1081 cl->cl_vtoc.v_part[part].p_tag;
1100 mutex_exit(CMLB_MUTEX(cl));
1130 struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
1133 ASSERT(cl != NULL);
1134 mutex_enter(CMLB_MUTEX(cl));
1135 if (cl->cl_state < CMLB_ATTACHED) {
1136 mutex_exit(CMLB_MUTEX(cl));
1140 if (!cl->cl_f_geometry_is_valid)
1141 (void) cmlb_validate_geometry((struct cmlb_lun *)cl, B_FALSE,
1144 if ((!cl->cl_f_geometry_is_valid) || (capacity == NULL) ||
1145 (cl->cl_cur_labeltype != CMLB_LABEL_EFI)) {
1148 *capacity = (diskaddr_t)cl->cl_map[WD_NODE].dkl_nblk;
1152 mutex_exit(CMLB_MUTEX(cl));
1164 struct cmlb_lun *cl;
1166 cl = (struct cmlb_lun *)cmlbhandle;
1168 ASSERT(cl != NULL);
1170 mutex_enter(CMLB_MUTEX(cl));
1171 if (cl->cl_state < CMLB_ATTACHED) {
1172 mutex_exit(CMLB_MUTEX(cl));
1189 if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
1190 mutex_exit(CMLB_MUTEX(cl));
1195 (void) cmlb_validate_geometry(cl, 1, CMLB_SILENT,
1203 if (cl->cl_label_from_media == CMLB_LABEL_EFI) {
1205 mutex_exit(CMLB_MUTEX(cl));
1208 (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
1209 mutex_exit(CMLB_MUTEX(cl));
1215 if (cl->cl_label_from_media == CMLB_LABEL_EFI) {
1217 mutex_exit(CMLB_MUTEX(cl));
1226 mutex_exit(CMLB_MUTEX(cl));
1230 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGGEOM\n");
1231 err = cmlb_dkio_get_geometry(cl, (caddr_t)arg, flag, tg_cookie);
1235 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSGEOM\n");
1236 err = cmlb_dkio_set_geometry(cl, (caddr_t)arg, flag);
1240 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGAPART\n");
1241 err = cmlb_dkio_get_partition(cl, (caddr_t)arg,
1246 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSAPART\n");
1247 err = cmlb_dkio_set_partition(cl, (caddr_t)arg, flag);
1251 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGVTOC\n");
1252 err = cmlb_dkio_get_vtoc(cl, (caddr_t)arg, flag, tg_cookie);
1256 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGVTOC\n");
1257 err = cmlb_dkio_get_extvtoc(cl, (caddr_t)arg, flag, tg_cookie);
1261 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGETEFI\n");
1262 err = cmlb_dkio_get_efi(cl, (caddr_t)arg, flag, tg_cookie);
1266 cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTITION\n");
1267 err = cmlb_dkio_partition(cl, (caddr_t)arg, flag, tg_cookie);
1271 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSVTOC\n");
1272 err = cmlb_dkio_set_vtoc(cl, dev, (caddr_t)arg, flag,
1277 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSVTOC\n");
1278 err = cmlb_dkio_set_extvtoc(cl, dev, (caddr_t)arg, flag,
1283 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSETEFI\n");
1284 err = cmlb_dkio_set_efi(cl, dev, (caddr_t)arg, flag, tg_cookie);
1288 cmlb_dbg(CMLB_TRACE, cl, "DKIOCGMBOOT\n");
1289 err = cmlb_dkio_get_mboot(cl, (caddr_t)arg, flag, tg_cookie);
1293 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSMBOOT\n");
1294 err = cmlb_dkio_set_mboot(cl, (caddr_t)arg, flag, tg_cookie);
1297 cmlb_dbg(CMLB_TRACE, cl, "DKIOCG_PHYGEOM\n");
1299 err = cmlb_dkio_get_phygeom(cl, (caddr_t)arg, flag, tg_cookie);
1305 cmlb_dbg(CMLB_TRACE, cl, "DKIOCG_VIRTGEOM\n");
1307 err = cmlb_dkio_get_virtgeom(cl, (caddr_t)arg, flag);
1313 cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTINFO");
1315 err = cmlb_dkio_partinfo(cl, dev, (caddr_t)arg, flag);
1321 cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTINFO");
1323 err = cmlb_dkio_extpartinfo(cl, dev, (caddr_t)arg, flag);
1330 cmlb_dbg(CMLB_TRACE, cl, "DKIOCSETEXTPART");
1331 err = cmlb_dkio_set_ext_part(cl, (caddr_t)arg, flag, tg_cookie);
1354 i_ddi_prop_dyn_cache_invalidate(CMLB_DEVINFO(cl),
1355 i_ddi_prop_dyn_driver_get(CMLB_DEVINFO(cl)));
1362 cmlb_make_device(struct cmlb_lun *cl)
1364 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE) {
1365 return (makedevice(ddi_driver_major(CMLB_DEVINFO(cl)),
1367 CMLB_DEVINFO(cl)) << CMLBUNIT_FORCE_P0_SHIFT));
1369 return (makedevice(ddi_driver_major(CMLB_DEVINFO(cl)),
1370 ddi_get_instance(CMLB_DEVINFO(cl)) << CMLBUNIT_SHIFT));
1384 cmlb_check_update_blockcount(struct cmlb_lun *cl, void *tg_cookie)
1390 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1392 if (cl->cl_f_geometry_is_valid)
1395 mutex_exit(CMLB_MUTEX(cl));
1396 status = DK_TG_GETCAP(cl, &capacity, tg_cookie);
1398 mutex_enter(CMLB_MUTEX(cl));
1402 status = DK_TG_GETBLOCKSIZE(cl, &lbasize, tg_cookie);
1403 mutex_enter(CMLB_MUTEX(cl));
1408 cl->cl_blockcount = capacity;
1409 cl->cl_tgt_blocksize = lbasize;
1410 if (!cl->cl_is_removable) {
1411 cl->cl_sys_blocksize = lbasize;
1442 * Arguments: cl - driver soft state (unit) structure
1450 cmlb_create_minor_nodes(struct cmlb_lun *cl)
1458 ASSERT(cl != NULL);
1459 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1462 (cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
1464 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
1470 if (cl->cl_cur_labeltype != CMLB_LABEL_UNDEF &&
1471 cl->cl_last_labeltype == cl->cl_cur_labeltype) {
1476 if (cl->cl_def_labeltype == CMLB_LABEL_UNDEF) {
1481 if (cl->cl_last_labeltype == CMLB_LABEL_UNDEF) {
1483 newlabeltype = cl->cl_def_labeltype;
1485 instance = ddi_get_instance(CMLB_DEVINFO(cl));
1494 if (cmlb_create_minor(CMLB_DEVINFO(cl), name,
1497 cl->cl_node_type, NULL, internal) == DDI_FAILURE) {
1503 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
1508 cl->cl_last_labeltype = newlabeltype;
1513 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE) {
1514 if (cmlb_create_minor(CMLB_DEVINFO(cl), "q", S_IFBLK,
1516 cl->cl_node_type, NULL, internal) == DDI_FAILURE) {
1517 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
1521 if (cmlb_create_minor(CMLB_DEVINFO(cl), "q,raw",
1524 cl->cl_node_type, NULL, internal) == DDI_FAILURE) {
1525 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
1534 if (cl->cl_cur_labeltype == CMLB_LABEL_UNDEF) {
1535 if (cl->cl_last_labeltype != cl->cl_def_labeltype) {
1537 newlabeltype = cl->cl_def_labeltype;
1546 if (cl->cl_cur_labeltype != cl->cl_last_labeltype) {
1548 newlabeltype = cl->cl_cur_labeltype;
1558 instance = ddi_get_instance(CMLB_DEVINFO(cl));
1566 cl->cl_last_labeltype != CMLB_LABEL_EFI) {
1568 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
1569 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
1570 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd",
1572 cl->cl_node_type, NULL, internal);
1573 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd,raw",
1575 cl->cl_node_type, NULL, internal);
1578 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
1579 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
1580 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
1582 cl->cl_node_type, NULL, internal);
1583 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
1585 cl->cl_node_type, NULL, internal);
1588 cl->cl_last_labeltype = newlabeltype;
1600 * cl driver soft state (unit) structure
1609 * EINVAL - Invalid value in cl->cl_tgt_blocksize or
1610 * cl->cl_blockcount; or label on disk is corrupted
1619 cmlb_validate_geometry(struct cmlb_lun *cl, boolean_t forcerevalid, int flags,
1626 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1629 if ((cl->cl_f_geometry_is_valid) && (!forcerevalid)) {
1630 if (cl->cl_cur_labeltype == CMLB_LABEL_EFI)
1635 if (cmlb_check_update_blockcount(cl, tg_cookie) != 0)
1638 capacity = cl->cl_blockcount;
1645 cl->cl_map[P0_RAW_DISK].dkl_cylno = 0;
1646 cl->cl_offset[P0_RAW_DISK] = 0;
1651 cl->cl_map[P0_RAW_DISK].dkl_nblk = capacity;
1658 cmlb_resync_geom_caches(cl, capacity, tg_cookie);
1660 cl->cl_label_from_media = CMLB_LABEL_UNDEF;
1661 label_error = cmlb_use_efi(cl, capacity, flags, tg_cookie);
1665 cmlb_dbg(CMLB_TRACE, cl,
1686 (cl->cl_msglog_flag & CMLB_ALLOW_2TB_WARN)) {
1688 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
1693 CMLB_LABEL(cl),
1694 ddi_get_instance(CMLB_DEVINFO(cl)));
1696 cl->cl_msglog_flag &= ~CMLB_ALLOW_2TB_WARN;
1714 if (cl->cl_device_type == DTYPE_DIRECT || ISREMOVABLE(cl)) {
1721 * Note: This will set up cl->cl_solaris_size and
1722 * cl->cl_solaris_offset.
1724 rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
1725 if ((rval != 0) && !ISCD(cl)) {
1726 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1730 if (cl->cl_solaris_size <= DK_LABEL_LOC) {
1737 cl->cl_f_geometry_is_valid = B_TRUE;
1741 label_addr = (daddr_t)(cl->cl_solaris_offset + DK_LABEL_LOC);
1743 buffer_size = cl->cl_sys_blocksize;
1745 cmlb_dbg(CMLB_TRACE, cl, "cmlb_validate_geometry: "
1752 mutex_exit(CMLB_MUTEX(cl));
1753 rval = DK_TG_READ(cl, dkl, label_addr, buffer_size, tg_cookie);
1754 mutex_enter(CMLB_MUTEX(cl));
1762 if (cmlb_uselabel(cl,
1767 cl->cl_label_from_media = CMLB_LABEL_VTOC;
1790 if ((ISREMOVABLE(cl) || ISHOTPLUGGABLE(cl)) &&
1795 if (!cl->cl_f_geometry_is_valid) {
1796 cmlb_build_default_label(cl, tg_cookie);
1810 cl->cl_map[FDISK_P1 + count].dkl_cylno = UINT16_MAX;
1811 cl->cl_map[FDISK_P1 + count].dkl_nblk =
1812 cl->cl_fmap[count].fmap_nblk;
1814 cl->cl_offset[FDISK_P1 + count] =
1815 cl->cl_fmap[count].fmap_start;
1821 struct dk_map *lp = &cl->cl_map[count];
1822 cl->cl_offset[count] =
1823 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
1825 struct dkl_partition *vp = &cl->cl_vtoc.v_part[count];
1827 cl->cl_offset[count] = vp->p_start + cl->cl_solaris_offset;
1847 cmlb_convert_geometry(struct cmlb_lun *cl, diskaddr_t capacity,
1851 ASSERT(cl != NULL);
1852 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1918 mutex_exit(CMLB_MUTEX(cl));
1920 (DK_TG_GETATTRIBUTE(cl, &tgattribute, tg_cookie) == 0) ?
1922 mutex_enter(CMLB_MUTEX(cl));
1955 * cl driver soft state (unit) structure
1963 cmlb_resync_geom_caches(struct cmlb_lun *cl, diskaddr_t capacity,
1974 ASSERT(cl != NULL);
1975 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
1982 mutex_exit(CMLB_MUTEX(cl));
1984 ret = DK_TG_GETVIRTGEOM(cl, &lgeom, tg_cookie);
1985 mutex_enter(CMLB_MUTEX(cl));
1987 bcopy(&lgeom, &cl->cl_lgeom, sizeof (cl->cl_lgeom));
1993 if (ret != 0 || cl->cl_lgeom.g_nsect == 0 ||
1994 cl->cl_lgeom.g_nhead == 0) {
2004 nhead = cl->cl_lgeom.g_nhead;
2005 nsect = cl->cl_lgeom.g_nsect;
2008 if (ISCD(cl)) {
2034 mutex_exit(CMLB_MUTEX(cl));
2035 (void) DK_TG_GETPHYGEOM(cl, pgeomp, tg_cookie);
2036 mutex_enter(CMLB_MUTEX(cl));
2042 bcopy(pgeomp, &cl->cl_pgeom, sizeof (cl->cl_pgeom));
2044 cmlb_dbg(CMLB_INFO, cl, "cmlb_resync_geom_caches: "
2046 cmlb_dbg(CMLB_INFO, cl,
2048 cl->cl_pgeom.g_ncyl, cl->cl_pgeom.g_acyl,
2049 cl->cl_pgeom.g_nhead, cl->cl_pgeom.g_nsect);
2050 cmlb_dbg(CMLB_INFO, cl, " lbasize: %d; capacity: %ld; "
2051 "intrlv: %d; rpm: %d\n", cl->cl_pgeom.g_secsize,
2052 cl->cl_pgeom.g_capacity, cl->cl_pgeom.g_intrlv,
2053 cl->cl_pgeom.g_rpm);
2064 * cl driver soft state (unit) structure
2075 cmlb_update_ext_minor_nodes(struct cmlb_lun *cl, int num_parts)
2085 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
2086 ASSERT(cl->cl_update_ext_minor_nodes == 1);
2089 (cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
2090 instance = ddi_get_instance(CMLB_DEVINFO(cl));
2094 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
2099 if (cl->cl_logical_drive_count) {
2100 for (i = 0; i < cl->cl_logical_drive_count; i++) {
2102 ddi_remove_minor_node(CMLB_DEVINFO(cl), name);
2104 ddi_remove_minor_node(CMLB_DEVINFO(cl), name);
2110 (void) ddi_deviname(cl->cl_devi, devnm);
2111 pdip = ddi_get_parent(cl->cl_devi);
2121 if (cmlb_create_minor(CMLB_DEVINFO(cl), name,
2124 cl->cl_node_type, NULL, internal) == DDI_FAILURE) {
2130 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
2131 cl->cl_logical_drive_count = 0;
2135 if (ddi_create_minor_node(CMLB_DEVINFO(cl), name,
2138 cl->cl_node_type, NULL) == DDI_FAILURE) {
2144 ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
2145 cl->cl_logical_drive_count = 0;
2154 cl->cl_map[FDISK_P4 + 1 + count].dkl_cylno = UINT32_MAX;
2155 cl->cl_map[FDISK_P4 + 1 + count].dkl_nblk =
2156 cl->cl_fmap[FD_NUMPART + count].fmap_nblk;
2157 cl->cl_offset[FDISK_P4 + 1 + count] =
2158 cl->cl_fmap[FD_NUMPART + count].fmap_start;
2161 cl->cl_logical_drive_count = i;
2162 cl->cl_update_ext_minor_nodes = 0;
2172 * cl driver soft state (unit) structure
2203 cmlb_validate_ext_part(struct cmlb_lun *cl, int part, int epart, uint32_t start,
2208 uint32_t ext_start = cl->cl_fmap[part].fmap_start;
2209 uint32_t ext_end = ext_start + cl->cl_fmap[part].fmap_nblk - 1;
2237 ts = cl->cl_fmap[FD_NUMPART].fmap_start;
2238 te = ts + cl->cl_fmap[FD_NUMPART].fmap_nblk - 1;
2250 ts = cl->cl_fmap[i].fmap_start;
2251 te = ts + cl->cl_fmap[i].fmap_nblk - 1;
2269 * cl driver soft state (unit) structure
2285 cmlb_is_linux_swap(struct cmlb_lun *cl, uint32_t part_start, void *tg_cookie)
2292 int sec_sz = cl->cl_sys_blocksize;
2296 ASSERT(cl != NULL);
2297 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
2308 mutex_exit(CMLB_MUTEX(cl));
2309 rval = DK_TG_READ(cl, buf, part_start + DK_LABEL_LOC,
2311 mutex_enter(CMLB_MUTEX(cl));
2313 cmlb_dbg(CMLB_ERROR, cl,
2334 mutex_exit(CMLB_MUTEX(cl));
2335 rval = DK_TG_READ(cl, buf, seek_offset, sec_sz, tg_cookie);
2336 mutex_enter(CMLB_MUTEX(cl));
2339 cmlb_dbg(CMLB_ERROR, cl,
2367 * cl driver soft state (unit) structure
2379 cmlb_read_fdisk(struct cmlb_lun *cl, diskaddr_t capacity, void *tg_cookie)
2383 cl->cl_solaris_offset = 0;
2384 cl->cl_solaris_size = capacity;
2385 bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
2410 ASSERT(cl != NULL);
2411 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
2419 blocksize = cl->cl_tgt_blocksize;
2423 mutex_exit(CMLB_MUTEX(cl));
2424 rval = DK_TG_READ(cl, bufp, 0, blocksize, tg_cookie);
2425 mutex_enter(CMLB_MUTEX(cl));
2428 cmlb_dbg(CMLB_ERROR, cl,
2430 bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
2469 dev_t dev = cmlb_make_device(cl);
2471 if (ddi_getprop(dev, CMLB_DEVINFO(cl), DDI_PROP_DONTPASS,
2474 if (ddi_prop_create(dev, CMLB_DEVINFO(cl), 0,
2477 cmlb_dbg(CMLB_ERROR, cl,
2480 ddi_get_instance(CMLB_DEVINFO(cl)));
2492 cmlb_dbg(CMLB_ERROR, cl,
2494 bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
2501 cmlb_dbg(CMLB_INFO, cl, "cmlb_read_fdisk:\n");
2502 cmlb_dbg(CMLB_INFO, cl, " relsect "
2505 cmlb_dbg(CMLB_INFO, cl,
2533 cl->cl_fmap[i].fmap_start = 0;
2534 cl->cl_fmap[i].fmap_nblk = 0;
2544 cl->cl_fmap[i].fmap_start = relsect;
2545 cl->cl_fmap[i].fmap_nblk = numsect;
2546 cl->cl_fmap[i].fmap_systid = LE_8(fdp->systid);
2560 mutex_exit(CMLB_MUTEX(cl));
2561 rval = DK_TG_READ(cl, bufp,
2564 mutex_enter(CMLB_MUTEX(cl));
2567 cmlb_dbg(CMLB_ERROR, cl,
2592 if (cmlb_validate_ext_part(cl, i, j, abs_secnum,
2597 if ((cl->cl_fmap[j].fmap_start != abs_secnum) ||
2598 (cl->cl_fmap[j].fmap_nblk != ext_numsect) ||
2599 (cl->cl_fmap[j].fmap_systid != systid)) {
2605 cl->cl_update_ext_minor_nodes = 1;
2607 cl->cl_fmap[j].fmap_start = abs_secnum;
2608 cl->cl_fmap[j].fmap_nblk = ext_numsect;
2609 cl->cl_fmap[j].fmap_systid = systid;
2613 (cmlb_is_linux_swap(cl, abs_secnum,
2647 (cmlb_is_linux_swap(cl, relsect,
2659 if (ld_count < cl->cl_logical_drive_count) {
2665 k < cl->cl_logical_drive_count + FD_NUMPART; k++) {
2666 cl->cl_fmap[k].fmap_start = 0;
2667 cl->cl_fmap[k].fmap_nblk = 0;
2668 cl->cl_fmap[k].fmap_systid = 0;
2670 cl->cl_update_ext_minor_nodes = 1;
2672 if (cl->cl_update_ext_minor_nodes) {
2673 rval = cmlb_update_ext_minor_nodes(cl, ld_count);
2679 cmlb_dbg(CMLB_INFO, cl, "fdisk 0x%x 0x%lx",
2680 cl->cl_solaris_offset, cl->cl_solaris_size);
2688 if ((cl->cl_solaris_offset != solaris_offset) ||
2689 (cl->cl_solaris_size != solaris_size) ||
2691 cmlb_dbg(CMLB_INFO, cl, "fdisk moved 0x%x 0x%lx",
2693 bzero(&cl->cl_g, sizeof (struct dk_geom));
2694 bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
2695 bzero(&cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
2696 cl->cl_f_geometry_is_valid = B_FALSE;
2698 cl->cl_solaris_offset = solaris_offset;
2699 cl->cl_solaris_size = solaris_size;
2801 cmlb_use_efi(struct cmlb_lun *cl, diskaddr_t capacity, int flags,
2819 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
2821 lbasize = cl->cl_sys_blocksize;
2823 cl->cl_reserved = -1;
2824 mutex_exit(CMLB_MUTEX(cl));
2828 rval = DK_TG_READ(cl, buf, 0, lbasize, tg_cookie);
2855 rval = DK_TG_READ(cl, buf, 1, lbasize, tg_cookie);
2868 rval = DK_TG_GETCAP(cl, &cap, tg_cookie);
2879 if ((rval = DK_TG_READ(cl, buf,
2880 cap - ((cl->cl_alter_behavior & CMLB_OFF_BY_ONE) ? 2 : 1),
2890 if (!(cl->cl_alter_behavior & CMLB_OFF_BY_ONE))
2892 if ((rval = DK_TG_READ(cl, buf, cap - 1, lbasize,
2900 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
2908 rval = DK_TG_READ(cl, buf, gpe_lba, EFI_MIN_ARRAY_SIZE, tg_cookie);
2920 mutex_enter(CMLB_MUTEX(cl));
2926 cl->cl_map[i].dkl_cylno =
2928 cl->cl_map[i].dkl_nblk =
2931 cl->cl_offset[i] =
2935 if (cl->cl_reserved == -1) {
2938 cl->cl_reserved = i;
2948 cl->cl_map[i].dkl_cylno = 0;
2955 cl->cl_map[i].dkl_nblk = capacity;
2957 cl->cl_map[i].dkl_nblk = alternate_lba + 1;
2959 cl->cl_offset[i] = 0;
2963 cl->cl_solaris_offset = 0;
2964 cl->cl_solaris_size = capacity;
2965 cl->cl_label_from_media = CMLB_LABEL_EFI;
2966 cl->cl_f_geometry_is_valid = B_TRUE;
2969 bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
2976 mutex_enter(CMLB_MUTEX(cl));
2987 cl->cl_f_geometry_is_valid = B_FALSE;
3000 * Arguments: cl: unit struct.
3013 cmlb_uselabel(struct cmlb_lun *cl, struct dk_label *labp, int flags)
3026 ASSERT(cl != NULL);
3027 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
3032 if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
3034 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
3052 if (!ISCD(cl)) {
3054 if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
3057 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
3068 bzero(&cl->cl_g, sizeof (struct dk_geom));
3069 cl->cl_g.dkg_ncyl = labp->dkl_ncyl;
3070 cl->cl_g.dkg_acyl = labp->dkl_acyl;
3071 cl->cl_g.dkg_bcyl = 0;
3072 cl->cl_g.dkg_nhead = labp->dkl_nhead;
3073 cl->cl_g.dkg_nsect = labp->dkl_nsect;
3074 cl->cl_g.dkg_intrlv = labp->dkl_intrlv;
3077 cl->cl_g.dkg_gap1 = labp->dkl_gap1;
3078 cl->cl_g.dkg_gap2 = labp->dkl_gap2;
3079 cl->cl_g.dkg_bhead = labp->dkl_bhead;
3082 cl->cl_dkg_skew = labp->dkl_skew;
3086 cl->cl_g.dkg_apc = labp->dkl_apc;
3096 cl->cl_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600;
3097 cl->cl_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl :
3098 (cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl);
3104 cl->cl_g.dkg_read_reinstruct = labp->dkl_read_reinstruct;
3105 cl->cl_g.dkg_write_reinstruct = labp->dkl_write_reinstruct;
3110 cl->cl_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno;
3111 cl->cl_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk;
3121 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
3128 cl->cl_map[i].dkl_cylno = vpartp->p_start / track_capacity;
3129 cl->cl_map[i].dkl_nblk = vpartp->p_size;
3134 bcopy(&labp->dkl_vtoc, &cl->cl_vtoc, sizeof (struct dk_vtoc));
3140 bcopy(labp->dkl_asciilabel, cl->cl_asciilabel, LEN_DKL_ASCII);
3144 track_capacity = (cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect);
3145 label_capacity = (cl->cl_g.dkg_ncyl * track_capacity);
3147 if (cl->cl_g.dkg_acyl) {
3150 label_capacity += (track_capacity * cl->cl_g.dkg_acyl);
3163 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
3171 cl->cl_f_geometry_is_valid = B_TRUE;
3181 if (label_capacity <= cl->cl_blockcount) {
3190 cmlb_dbg(CMLB_ERROR, cl,
3192 label_capacity, cl->cl_blockcount);
3193 cl->cl_solaris_size = label_capacity;
3199 if (ISCD(cl)) {
3208 (part_end > cl->cl_blockcount)) {
3209 cl->cl_f_geometry_is_valid = B_FALSE;
3219 (part_end > cl->cl_blockcount)) {
3220 cl->cl_f_geometry_is_valid = B_FALSE;
3226 /* label_capacity > cl->cl_blockcount */
3228 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
3230 cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_CONT,
3232 label_capacity, cl->cl_blockcount);
3234 cl->cl_f_geometry_is_valid = B_FALSE;
3240 cmlb_dbg(CMLB_INFO, cl, "cmlb_uselabel: (label geometry)\n");
3241 cmlb_dbg(CMLB_INFO, cl,
3243 cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
3244 cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
3246 cmlb_dbg(CMLB_INFO, cl,
3248 cl->cl_blockcount, cl->cl_g.dkg_intrlv, cl->cl_g.dkg_rpm);
3249 cmlb_dbg(CMLB_INFO, cl, " wrt_reinstr: %d; rd_reinstr: %d\n",
3250 cl->cl_g.dkg_write_reinstruct, cl->cl_g.dkg_read_reinstruct);
3252 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
3268 cmlb_build_default_label(struct cmlb_lun *cl, void *tg_cookie)
3277 ASSERT(cl != NULL);
3278 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
3287 if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
3292 bzero(&cl->cl_g, sizeof (struct dk_geom));
3293 bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
3294 bzero(&cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
3313 cl->cl_solaris_size = cl->cl_blockcount;
3314 if (ISCD(cl)) {
3334 mutex_exit(CMLB_MUTEX(cl));
3336 (DK_TG_GETATTRIBUTE(cl, &tgattribute, tg_cookie) == 0) ?
3338 mutex_enter(CMLB_MUTEX(cl));
3341 cl->cl_g.dkg_nhead = 64;
3342 cl->cl_g.dkg_nsect = 32;
3343 cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
3344 cl->cl_solaris_size = (diskaddr_t)cl->cl_g.dkg_ncyl *
3345 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
3347 cl->cl_g.dkg_ncyl = 1;
3348 cl->cl_g.dkg_nhead = 1;
3349 cl->cl_g.dkg_nsect = cl->cl_blockcount;
3352 if (cl->cl_blockcount < 160) {
3354 cl->cl_g.dkg_nhead = 1;
3355 cl->cl_g.dkg_ncyl = cl->cl_blockcount;
3356 cl->cl_g.dkg_nsect = 1;
3357 } else if (cl->cl_blockcount <= 0x1000) {
3359 cl->cl_g.dkg_nhead = 2;
3360 cl->cl_g.dkg_ncyl = 80;
3361 cl->cl_g.dkg_nsect = cl->cl_blockcount / (2 * 80);
3362 } else if (cl->cl_blockcount <= 0x200000) {
3363 cl->cl_g.dkg_nhead = 64;
3364 cl->cl_g.dkg_nsect = 32;
3365 cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
3367 cl->cl_g.dkg_nhead = 255;
3369 cl->cl_g.dkg_nsect = ((cl->cl_blockcount +
3373 if (cl->cl_g.dkg_nsect == 0)
3374 cl->cl_g.dkg_nsect = (UINT16_MAX / 63) * 63;
3376 cl->cl_g.dkg_ncyl = cl->cl_blockcount /
3377 (255 * cl->cl_g.dkg_nsect);
3380 cl->cl_solaris_size =
3381 (diskaddr_t)cl->cl_g.dkg_ncyl * cl->cl_g.dkg_nhead *
3382 cl->cl_g.dkg_nsect;
3386 cl->cl_g.dkg_acyl = 0;
3387 cl->cl_g.dkg_bcyl = 0;
3388 cl->cl_g.dkg_rpm = 200;
3389 cl->cl_asciilabel[0] = '\0';
3390 cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl;
3392 cl->cl_map[0].dkl_cylno = 0;
3393 cl->cl_map[0].dkl_nblk = cl->cl_solaris_size;
3395 cl->cl_map[2].dkl_cylno = 0;
3396 cl->cl_map[2].dkl_nblk = cl->cl_solaris_size;
3400 if (cl->cl_solaris_size == 0) {
3405 cl->cl_f_geometry_is_valid = B_TRUE;
3415 if (ISCD(cl)) {
3416 phys_spc = cl->cl_pgeom.g_nhead * cl->cl_pgeom.g_nsect;
3428 if (cl->cl_alter_behavior & CMLB_OFF_BY_ONE)
3429 capacity = cl->cl_blockcount - 1;
3431 capacity = cl->cl_blockcount;
3434 cmlb_convert_geometry(cl, capacity, &cl_g, tg_cookie);
3435 bcopy(&cl_g, &cl->cl_g, sizeof (cl->cl_g));
3436 phys_spc = cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
3441 cl->cl_g.dkg_pcyl = cl->cl_solaris_size / phys_spc;
3442 if (cl->cl_alter_behavior & CMLB_FAKE_LABEL_ONE_PARTITION) {
3444 cl->cl_g.dkg_ncyl = cl->cl_g.dkg_pcyl;
3445 disksize = cl->cl_solaris_size;
3447 cl->cl_g.dkg_acyl = DK_ACYL;
3448 cl->cl_g.dkg_ncyl = cl->cl_g.dkg_pcyl - DK_ACYL;
3449 disksize = cl->cl_g.dkg_ncyl * phys_spc;
3452 if (ISCD(cl)) {
3457 disksize = cl->cl_solaris_size;
3458 cl->cl_g.dkg_nhead = 1;
3459 cl->cl_g.dkg_nsect = 1;
3460 cl->cl_g.dkg_rpm =
3461 (cl->cl_pgeom.g_rpm == 0) ? 200 : cl->cl_pgeom.g_rpm;
3463 cl->cl_vtoc.v_part[0].p_start = 0;
3464 cl->cl_vtoc.v_part[0].p_size = disksize;
3465 cl->cl_vtoc.v_part[0].p_tag = V_BACKUP;
3466 cl->cl_vtoc.v_part[0].p_flag = V_UNMNT;
3468 cl->cl_map[0].dkl_cylno = 0;
3469 cl->cl_map[0].dkl_nblk = disksize;
3470 cl->cl_offset[0] = 0;
3476 cl->cl_g.dkg_rpm =
3477 (cl->cl_pgeom.g_rpm == 0) ? 3600: cl->cl_pgeom.g_rpm;
3478 cl->cl_vtoc.v_sectorsz = cl->cl_sys_blocksize;
3481 cl->cl_vtoc.v_part[8].p_start = 0;
3482 cl->cl_vtoc.v_part[8].p_size = phys_spc;
3483 cl->cl_vtoc.v_part[8].p_tag = V_BOOT;
3484 cl->cl_vtoc.v_part[8].p_flag = V_UNMNT;
3486 cl->cl_map[8].dkl_cylno = 0;
3487 cl->cl_map[8].dkl_nblk = phys_spc;
3488 cl->cl_offset[8] = 0;
3490 if ((cl->cl_alter_behavior &
3492 cl->cl_device_type == DTYPE_DIRECT) {
3493 cl->cl_vtoc.v_part[9].p_start = phys_spc;
3494 cl->cl_vtoc.v_part[9].p_size = 2 * phys_spc;
3495 cl->cl_vtoc.v_part[9].p_tag = V_ALTSCTR;
3496 cl->cl_vtoc.v_part[9].p_flag = 0;
3498 cl->cl_map[9].dkl_cylno = 1;
3499 cl->cl_map[9].dkl_nblk = 2 * phys_spc;
3500 cl->cl_offset[9] = phys_spc;
3504 cl->cl_g.dkg_apc = 0;
3507 cl->cl_vtoc.v_part[2].p_start = 0;
3508 cl->cl_vtoc.v_part[2].p_size = disksize;
3509 cl->cl_vtoc.v_part[2].p_tag = V_BACKUP;
3510 cl->cl_vtoc.v_part[2].p_flag = V_UNMNT;
3512 cl->cl_map[2].dkl_cylno = 0;
3513 cl->cl_map[2].dkl_nblk = disksize;
3514 cl->cl_offset[2] = 0;
3519 if (cl->cl_alter_behavior & CMLB_FAKE_LABEL_ONE_PARTITION) {
3520 cl->cl_vtoc.v_part[0].p_start = 0;
3521 cl->cl_vtoc.v_part[0].p_tag = V_UNASSIGNED;
3522 cl->cl_vtoc.v_part[0].p_flag = 0;
3523 cl->cl_vtoc.v_part[0].p_size = disksize;
3524 cl->cl_map[0].dkl_cylno = 0;
3525 cl->cl_map[0].dkl_nblk = disksize;
3526 cl->cl_offset[0] = 0;
3529 (void) sprintf(cl->cl_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d"
3530 " hd %d sec %d", cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
3531 cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
3537 cl->cl_g.dkg_read_reinstruct = 0;
3538 cl->cl_g.dkg_write_reinstruct = 0;
3540 cl->cl_g.dkg_intrlv = 1;
3542 cl->cl_vtoc.v_sanity = VTOC_SANE;
3543 cl->cl_vtoc.v_nparts = V_NUMPAR;
3544 cl->cl_vtoc.v_version = V_VERSION;
3546 cl->cl_f_geometry_is_valid = B_TRUE;
3547 cl->cl_label_from_media = CMLB_LABEL_UNDEF;
3549 cmlb_dbg(CMLB_INFO, cl,
3552 cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl, cl->cl_g.dkg_nhead,
3553 cl->cl_g.dkg_nsect, cl->cl_blockcount);
3611 cmlb_dkio_get_geometry(struct cmlb_lun *cl, caddr_t arg, int flag,
3622 mutex_enter(CMLB_MUTEX(cl));
3623 rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
3626 cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8) {
3632 if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
3633 cmlb_setup_default_geometry(cl, tg_cookie);
3639 mutex_exit(CMLB_MUTEX(cl));
3644 if (cl->cl_solaris_size == 0) {
3645 mutex_exit(CMLB_MUTEX(cl));
3656 bcopy(&cl->cl_g, tmp_geom, sizeof (struct dk_geom));
3663 mutex_exit(CMLB_MUTEX(cl));
3700 cmlb_dkio_set_geometry(struct cmlb_lun *cl, caddr_t arg, int flag)
3709 if (cl->cl_solaris_size == 0) {
3726 mutex_enter(CMLB_MUTEX(cl));
3727 bcopy(tmp_geom, &cl->cl_g, sizeof (struct dk_geom));
3729 lp = &cl->cl_map[i];
3730 cl->cl_offset[i] =
3731 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
3733 cl->cl_offset[i] += cl->cl_solaris_offset;
3736 cl->cl_f_geometry_is_valid = B_FALSE;
3737 mutex_exit(CMLB_MUTEX(cl));
3765 cmlb_dkio_get_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
3775 mutex_enter(CMLB_MUTEX(cl));
3776 if ((rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie)) != 0) {
3777 mutex_exit(CMLB_MUTEX(cl));
3780 mutex_exit(CMLB_MUTEX(cl));
3783 if (cl->cl_solaris_size == 0) {
3795 dk_map32[i].dkl_cylno = cl->cl_map[i].dkl_cylno;
3796 dk_map32[i].dkl_nblk = cl->cl_map[i].dkl_nblk;
3807 rval = ddi_copyout(cl->cl_map, (void *)arg, size, flag);
3815 rval = ddi_copyout(cl->cl_map, (void *)arg, size, flag);
3843 cmlb_dkio_set_partition(struct cmlb_lun *cl, caddr_t arg, int flag)
3860 mutex_enter(CMLB_MUTEX(cl));
3862 if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
3863 mutex_exit(CMLB_MUTEX(cl));
3866 mutex_exit(CMLB_MUTEX(cl));
3867 if (cl->cl_solaris_size == 0) {
3903 mutex_enter(CMLB_MUTEX(cl));
3905 bcopy(dk_map, cl->cl_map, size);
3907 vp = (struct dkl_partition *)&(cl->cl_vtoc);
3910 lp = &cl->cl_map[i];
3911 cl->cl_offset[i] =
3912 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
3914 vp->p_start = cl->cl_offset[i];
3919 cl->cl_offset[i] += cl->cl_solaris_offset;
3922 mutex_exit(CMLB_MUTEX(cl));
3950 cmlb_dkio_get_vtoc(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
3957 mutex_enter(CMLB_MUTEX(cl));
3958 if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
3959 mutex_exit(CMLB_MUTEX(cl));
3963 rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
3967 (cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8)) {
3973 if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
3974 cmlb_setup_default_geometry(cl, tg_cookie);
3980 mutex_exit(CMLB_MUTEX(cl));
3985 cmlb_build_user_vtoc(cl, &user_vtoc);
3986 mutex_exit(CMLB_MUTEX(cl));
4015 mutex_exit(CMLB_MUTEX(cl));
4027 ASSERT(sizeof (cl->cl_vtoc) == sizeof (struct vtoc32));
4030 if (ddi_copyout(&(cl->cl_vtoc), (void *)arg,
4031 sizeof (cl->cl_vtoc), flag)) {
4039 vtoc32tovtoc(cl->cl_vtoc, user_vtoc);
4048 if (ddi_copyout(&(cl->cl_vtoc), (void *)arg, sizeof (cl->cl_vtoc),
4065 cmlb_dkio_get_extvtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
4075 mutex_enter(CMLB_MUTEX(cl));
4076 rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
4080 (cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8)) {
4086 if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
4087 cmlb_setup_default_geometry(cl, tg_cookie);
4093 mutex_exit(CMLB_MUTEX(cl));
4098 cmlb_build_user_vtoc(cl, &user_vtoc);
4099 mutex_exit(CMLB_MUTEX(cl));
4127 vtoc32tovtoc(cl->cl_vtoc, ext_vtoc);
4128 mutex_exit(CMLB_MUTEX(cl));
4145 cmlb_dkio_get_efi(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
4163 mutex_enter(CMLB_MUTEX(cl));
4164 if ((cmlb_check_update_blockcount(cl, tg_cookie) != 0) ||
4165 (cl->cl_tgt_blocksize == 0) ||
4166 (user_efi.dki_length % cl->cl_sys_blocksize)) {
4167 mutex_exit(CMLB_MUTEX(cl));
4170 if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize)
4171 tgt_lba = tgt_lba * cl->cl_tgt_blocksize /
4172 cl->cl_sys_blocksize;
4173 mutex_exit(CMLB_MUTEX(cl));
4176 rval = DK_TG_READ(cl, buffer, tgt_lba, user_efi.dki_length, tg_cookie);
4192 * Arguments: cl - driver soft state (unit) structure
4196 cmlb_build_user_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc)
4204 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
4211 user_vtoc->v_bootinfo[0] = cl->cl_vtoc.v_bootinfo[0];
4212 user_vtoc->v_bootinfo[1] = cl->cl_vtoc.v_bootinfo[1];
4213 user_vtoc->v_bootinfo[2] = cl->cl_vtoc.v_bootinfo[2];
4215 user_vtoc->v_version = cl->cl_vtoc.v_version;
4216 bcopy(cl->cl_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL);
4217 user_vtoc->v_sectorsz = cl->cl_sys_blocksize;
4218 user_vtoc->v_nparts = cl->cl_vtoc.v_nparts;
4221 user_vtoc->v_reserved[i] = cl->cl_vtoc.v_reserved[i];
4229 lmap = cl->cl_map;
4230 lpart = (struct dk_map2 *)cl->cl_vtoc.v_part;
4233 nblks = cl->cl_g.dkg_nsect * cl->cl_g.dkg_nhead;
4245 user_vtoc->timestamp[i] = (time_t)cl->cl_vtoc.v_timestamp[i];
4248 bcopy(cl->cl_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII);
4253 cmlb_dkio_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
4269 buffer = kmem_alloc(cl->cl_sys_blocksize, KM_SLEEP);
4270 rval = DK_TG_READ(cl, buffer, 1, cl->cl_sys_blocksize, tg_cookie);
4289 n_gpe_per_blk = cl->cl_sys_blocksize / sizeof (efi_gpe_t);
4291 rval = DK_TG_READ(cl, buffer, gpe_lba, cl->cl_sys_blocksize, tg_cookie);
4312 kmem_free(buffer, cl->cl_sys_blocksize);
4342 cmlb_dkio_set_vtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
4350 (cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
4352 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
4384 mutex_enter(CMLB_MUTEX(cl));
4386 if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
4387 mutex_exit(CMLB_MUTEX(cl));
4392 if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize) {
4393 mutex_exit(CMLB_MUTEX(cl));
4398 if (cl->cl_g.dkg_ncyl == 0) {
4399 mutex_exit(CMLB_MUTEX(cl));
4403 mutex_exit(CMLB_MUTEX(cl));
4404 cmlb_clear_efi(cl, tg_cookie);
4405 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
4406 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
4414 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
4415 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
4417 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
4419 cl->cl_node_type, NULL, internal);
4420 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
4422 cl->cl_node_type, NULL, internal);
4423 mutex_enter(CMLB_MUTEX(cl));
4425 if ((rval = cmlb_build_label_vtoc(cl, &user_vtoc)) == 0) {
4426 if ((rval = cmlb_write_label(cl, tg_cookie)) == 0) {
4427 if (cmlb_validate_geometry(cl,
4429 cmlb_dbg(CMLB_ERROR, cl,
4433 cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
4436 mutex_exit(CMLB_MUTEX(cl));
4444 cmlb_dkio_set_extvtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
4451 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
4478 (cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
4479 mutex_enter(CMLB_MUTEX(cl));
4481 if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize) {
4482 mutex_exit(CMLB_MUTEX(cl));
4487 if (cl->cl_g.dkg_ncyl == 0) {
4488 mutex_exit(CMLB_MUTEX(cl));
4492 mutex_exit(CMLB_MUTEX(cl));
4493 cmlb_clear_efi(cl, tg_cookie);
4494 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
4495 ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
4502 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
4503 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
4505 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
4507 cl->cl_node_type, NULL, internal);
4508 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
4510 cl->cl_node_type, NULL, internal);
4512 mutex_enter(CMLB_MUTEX(cl));
4514 if ((rval = cmlb_build_label_vtoc(cl, &user_vtoc)) == 0) {
4515 if ((rval = cmlb_write_label(cl, tg_cookie)) == 0) {
4516 if (cmlb_validate_geometry(cl,
4518 cmlb_dbg(CMLB_ERROR, cl,
4524 mutex_exit(CMLB_MUTEX(cl));
4534 * Arguments: cl - driver soft state (unit) structure
4542 cmlb_build_label_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc)
4553 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
4557 user_vtoc->v_sectorsz != cl->cl_sys_blocksize ||
4559 cmlb_dbg(CMLB_INFO, cl,
4564 nblks = cl->cl_g.dkg_nsect * cl->cl_g.dkg_nhead;
4566 cmlb_dbg(CMLB_INFO, cl,
4575 cmlb_dbg(CMLB_INFO, cl,
4586 if (ncyl > (int)cl->cl_g.dkg_ncyl) {
4587 cmlb_dbg(CMLB_INFO, cl,
4591 ncyl, cl->cl_g.dkg_ncyl, vpart->p_size,
4607 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(cl->cl_vtoc))));
4613 lmap = cl->cl_map;
4623 cl->cl_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0];
4624 cl->cl_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1];
4625 cl->cl_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2];
4627 cl->cl_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity;
4628 cl->cl_vtoc.v_version = (uint32_t)user_vtoc->v_version;
4630 bcopy(user_vtoc->v_volume, cl->cl_vtoc.v_volume, LEN_DKL_VVOL);
4632 cl->cl_vtoc.v_nparts = user_vtoc->v_nparts;
4635 cl->cl_vtoc.v_reserved[i] = user_vtoc->v_reserved[i];
4642 lmap = cl->cl_map;
4643 lpart = cl->cl_vtoc.v_part;
4659 cl->cl_vtoc.v_timestamp[i] = TIME32_MAX;
4661 cl->cl_vtoc.v_timestamp[i] = user_vtoc->timestamp[i];
4664 cl->cl_vtoc.v_timestamp[i] = user_vtoc->timestamp[i];
4668 bcopy(user_vtoc->v_asciilabel, cl->cl_asciilabel, LEN_DKL_ASCII);
4681 * cl driver soft state (unit) structure
4688 cmlb_clear_efi(struct cmlb_lun *cl, void *tg_cookie)
4694 ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
4696 mutex_enter(CMLB_MUTEX(cl));
4697 cl->cl_reserved = -1;
4698 mutex_exit(CMLB_MUTEX(cl));
4700 gpt = kmem_alloc(cl->cl_sys_blocksize, KM_SLEEP);
4702 if (DK_TG_READ(cl, gpt, 1, cl->cl_sys_blocksize, tg_cookie) != 0) {
4711 if (rval = DK_TG_WRITE(cl, gpt, 1, cl->cl_sys_blocksize,
4713 cmlb_dbg(CMLB_INFO, cl,
4718 rval = DK_TG_GETCAP(cl, &cap, tg_cookie);
4723 if ((rval = DK_TG_READ(cl, gpt, cap - 1, cl->cl_sys_blocksize,
4731 cmlb_dbg(CMLB_TRACE, cl,
4734 if ((rval = DK_TG_WRITE(cl, gpt, cap - 1, cl->cl_sys_blocksize,
4736 cmlb_dbg(CMLB_INFO, cl,
4744 if ((rval = DK_TG_READ(cl, gpt, cap - 2,
4745 cl->cl_sys_blocksize, tg_cookie)) != 0) {
4752 cmlb_dbg(CMLB_TRACE, cl,
4756 if ((rval = DK_TG_WRITE(cl, gpt, cap - 2,
4757 cl->cl_sys_blocksize, tg_cookie))) {
4758 cmlb_dbg(CMLB_INFO, cl,
4766 kmem_free(gpt, cl->cl_sys_blocksize);
4775 * cl driver soft state (unit) structure
4785 cmlb_set_vtoc(struct cmlb_lun *cl, struct dk_label *dkl, void *tg_cookie)
4795 label_addr = cl->cl_solaris_offset + DK_LABEL_LOC;
4801 rval = DK_TG_WRITE(cl, dkl, label_addr, cl->cl_sys_blocksize,
4830 blk += cl->cl_solaris_offset;
4832 rval = DK_TG_WRITE(cl, dkl, blk, cl->cl_sys_blocksize,
4834 cmlb_dbg(CMLB_INFO, cl,
4850 * cl driver soft state (unit) structure
4858 cmlb_clear_vtoc(struct cmlb_lun *cl, void *tg_cookie)
4862 mutex_exit(CMLB_MUTEX(cl));
4863 dkl = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
4864 mutex_enter(CMLB_MUTEX(cl));
4869 dkl->dkl_apc = cl->cl_g.dkg_apc;
4870 dkl->dkl_ncyl = cl->cl_g.dkg_ncyl;
4871 dkl->dkl_acyl = cl->cl_g.dkg_acyl;
4872 dkl->dkl_nhead = cl->cl_g.dkg_nhead;
4873 dkl->dkl_nsect = cl->cl_g.dkg_nsect;
4874 mutex_exit(CMLB_MUTEX(cl));
4875 (void) cmlb_set_vtoc(cl, dkl, tg_cookie);
4876 kmem_free(dkl, cl->cl_sys_blocksize);
4878 mutex_enter(CMLB_MUTEX(cl));
4888 * cl cmlb handle
4901 cmlb_write_label(struct cmlb_lun *cl, void *tg_cookie)
4909 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
4910 mutex_exit(CMLB_MUTEX(cl));
4911 dkl = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
4912 mutex_enter(CMLB_MUTEX(cl));
4914 bcopy(&cl->cl_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc));
4915 dkl->dkl_rpm = cl->cl_g.dkg_rpm;
4916 dkl->dkl_pcyl = cl->cl_g.dkg_pcyl;
4917 dkl->dkl_apc = cl->cl_g.dkg_apc;
4918 dkl->dkl_intrlv = cl->cl_g.dkg_intrlv;
4919 dkl->dkl_ncyl = cl->cl_g.dkg_ncyl;
4920 dkl->dkl_acyl = cl->cl_g.dkg_acyl;
4921 dkl->dkl_nhead = cl->cl_g.dkg_nhead;
4922 dkl->dkl_nsect = cl->cl_g.dkg_nsect;
4925 dkl->dkl_obs1 = cl->cl_g.dkg_obs1;
4926 dkl->dkl_obs2 = cl->cl_g.dkg_obs2;
4927 dkl->dkl_obs3 = cl->cl_g.dkg_obs3;
4929 dkl->dkl_map[i].dkl_cylno = cl->cl_map[i].dkl_cylno;
4930 dkl->dkl_map[i].dkl_nblk = cl->cl_map[i].dkl_nblk;
4932 bcopy(cl->cl_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII);
4934 dkl->dkl_skew = cl->cl_dkg_skew;
4940 dkl->dkl_write_reinstruct = cl->cl_g.dkg_write_reinstruct;
4941 dkl->dkl_read_reinstruct = cl->cl_g.dkg_read_reinstruct;
4952 mutex_exit(CMLB_MUTEX(cl));
4954 rval = cmlb_set_vtoc(cl, dkl, tg_cookie);
4956 kmem_free(dkl, cl->cl_sys_blocksize);
4957 mutex_enter(CMLB_MUTEX(cl));
4968 cmlb_dkio_set_efi(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
4981 (cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
4983 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
4996 mutex_enter(CMLB_MUTEX(cl));
4997 if ((cmlb_check_update_blockcount(cl, tg_cookie) != 0) ||
4998 (cl->cl_tgt_blocksize == 0) ||
4999 (user_efi.dki_length % cl->cl_sys_blocksize)) {
5000 mutex_exit(CMLB_MUTEX(cl));
5003 if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize)
5005 cl->cl_tgt_blocksize / cl->cl_sys_blocksize;
5006 mutex_exit(CMLB_MUTEX(cl));
5016 mutex_enter(CMLB_MUTEX(cl));
5017 if (cl->cl_vtoc.v_sanity == VTOC_SANE) {
5018 cmlb_dbg(CMLB_TRACE, cl,
5020 if (cl->cl_label_from_media == CMLB_LABEL_VTOC)
5021 cmlb_clear_vtoc(cl, tg_cookie);
5022 bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
5023 mutex_exit(CMLB_MUTEX(cl));
5024 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
5025 ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
5026 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd",
5029 cl->cl_node_type, NULL, internal);
5030 (void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd,raw",
5033 cl->cl_node_type, NULL, internal);
5035 mutex_exit(CMLB_MUTEX(cl));
5037 rval = DK_TG_WRITE(cl, buffer, tgt_lba, user_efi.dki_length,
5041 mutex_enter(CMLB_MUTEX(cl));
5042 cl->cl_f_geometry_is_valid = B_FALSE;
5043 mutex_exit(CMLB_MUTEX(cl));
5072 cmlb_dkio_get_mboot(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
5080 if ((!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) || (arg == NULL)) {
5090 buffer_size = cl->cl_sys_blocksize;
5092 cmlb_dbg(CMLB_TRACE, cl,
5096 if ((rval = DK_TG_READ(cl, mboot, 0, buffer_size, tg_cookie)) == 0) {
5130 cmlb_dkio_set_mboot(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
5137 ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
5140 if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
5149 mboot = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
5152 cl->cl_sys_blocksize, flag) != 0) {
5153 kmem_free(mboot, cl->cl_sys_blocksize);
5160 kmem_free(mboot, cl->cl_sys_blocksize);
5164 rval = DK_TG_WRITE(cl, mboot, 0, cl->cl_sys_blocksize, tg_cookie);
5166 mutex_enter(CMLB_MUTEX(cl));
5173 rval = cmlb_update_fdisk_and_vtoc(cl, tg_cookie);
5174 if ((!cl->cl_f_geometry_is_valid) || (rval != 0)) {
5175 mutex_exit(CMLB_MUTEX(cl));
5176 kmem_free(mboot, cl->cl_sys_blocksize);
5182 cmlb_setup_default_geometry(cl, tg_cookie);
5191 if (cl->cl_blockcount <= CMLB_EXTVTOC_LIMIT)
5192 cmlb_setup_default_geometry(cl, tg_cookie);
5195 cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
5196 mutex_exit(CMLB_MUTEX(cl));
5197 kmem_free(mboot, cl->cl_sys_blocksize);
5205 cmlb_dkio_set_ext_part(struct cmlb_lun *cl, caddr_t arg, int flag,
5211 ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
5213 mutex_enter(CMLB_MUTEX(cl));
5214 capacity = cl->cl_blockcount;
5215 fdisk_rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
5217 mutex_exit(CMLB_MUTEX(cl));
5221 mutex_exit(CMLB_MUTEX(cl));
5233 * cl driver soft state (unit) structure
5242 cmlb_setup_default_geometry(struct cmlb_lun *cl, void *tg_cookie)
5250 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
5253 bzero(&cl->cl_g, sizeof (struct dk_geom));
5254 bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
5255 bzero(cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
5269 if (cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8) {
5275 mutex_exit(CMLB_MUTEX(cl));
5276 ret = DK_TG_GETPHYGEOM(cl, pgeomp, tg_cookie);
5277 mutex_enter(CMLB_MUTEX(cl));
5282 cmlb_dbg(CMLB_ERROR, cl,
5291 if (ISCD(cl)) {
5292 cl->cl_g.dkg_ncyl = 1;
5293 cl->cl_g.dkg_nhead = 1;
5294 cl->cl_g.dkg_nsect = cl->cl_blockcount;
5295 } else if (cl->cl_blockcount < 160) {
5297 cl->cl_g.dkg_nhead = 1;
5298 cl->cl_g.dkg_ncyl = cl->cl_blockcount;
5299 cl->cl_g.dkg_nsect = 1;
5300 } else if (cl->cl_blockcount <= 0x1000) {
5302 cl->cl_g.dkg_nhead = 2;
5303 cl->cl_g.dkg_ncyl = 80;
5304 cl->cl_g.dkg_pcyl = 80;
5305 cl->cl_g.dkg_nsect = cl->cl_blockcount / (2 * 80);
5306 } else if (cl->cl_blockcount <= 0x200000) {
5307 cl->cl_g.dkg_nhead = 64;
5308 cl->cl_g.dkg_nsect = 32;
5309 cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
5311 cl->cl_g.dkg_nhead = 255;
5313 cl->cl_g.dkg_nsect = ((cl->cl_blockcount +
5317 if (cl->cl_g.dkg_nsect == 0)
5318 cl->cl_g.dkg_nsect = (UINT16_MAX / 63) * 63;
5320 cl->cl_g.dkg_ncyl = cl->cl_blockcount /
5321 (255 * cl->cl_g.dkg_nsect);
5324 cl->cl_g.dkg_acyl = 0;
5325 cl->cl_g.dkg_bcyl = 0;
5326 cl->cl_g.dkg_intrlv = 1;
5327 cl->cl_g.dkg_rpm = 200;
5328 if (cl->cl_g.dkg_pcyl == 0)
5329 cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl +
5330 cl->cl_g.dkg_acyl;
5332 cl->cl_g.dkg_ncyl = (short)pgeomp->g_ncyl;
5333 cl->cl_g.dkg_acyl = pgeomp->g_acyl;
5334 cl->cl_g.dkg_nhead = pgeomp->g_nhead;
5335 cl->cl_g.dkg_nsect = pgeomp->g_nsect;
5336 cl->cl_g.dkg_intrlv = pgeomp->g_intrlv;
5337 cl->cl_g.dkg_rpm = pgeomp->g_rpm;
5338 cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl;
5341 cl->cl_g.dkg_read_reinstruct = 0;
5342 cl->cl_g.dkg_write_reinstruct = 0;
5343 cl->cl_solaris_size = cl->cl_g.dkg_ncyl *
5344 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
5346 cl->cl_map['a'-'a'].dkl_cylno = 0;
5347 cl->cl_map['a'-'a'].dkl_nblk = cl->cl_solaris_size;
5349 cl->cl_map['c'-'a'].dkl_cylno = 0;
5350 cl->cl_map['c'-'a'].dkl_nblk = cl->cl_solaris_size;
5352 cl->cl_vtoc.v_part[2].p_tag = V_BACKUP;
5353 cl->cl_vtoc.v_part[2].p_flag = V_UNMNT;
5354 cl->cl_vtoc.v_nparts = V_NUMPAR;
5355 cl->cl_vtoc.v_version = V_VERSION;
5356 (void) sprintf((char *)cl->cl_asciilabel, "DEFAULT cyl %d alt %d"
5357 " hd %d sec %d", cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
5358 cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
5360 cl->cl_f_geometry_is_valid = B_FALSE;
5372 * cl driver soft state (unit) structure
5384 cmlb_update_fdisk_and_vtoc(struct cmlb_lun *cl, void *tg_cookie)
5391 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
5393 if (cmlb_check_update_blockcount(cl, tg_cookie) != 0)
5401 cl->cl_map[P0_RAW_DISK].dkl_cylno = 0;
5402 cl->cl_map[P0_RAW_DISK].dkl_nblk = cl->cl_blockcount;
5406 * reset while we're not holding the CMLB_MUTEX(cl), we will
5407 * continue to use valid values after the CMLB_MUTEX(cl) is
5410 capacity = cl->cl_blockcount;
5417 cmlb_resync_geom_caches(cl, capacity, tg_cookie);
5423 if (cl->cl_device_type == DTYPE_DIRECT || ISREMOVABLE(cl)) {
5424 fdisk_rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
5426 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
5430 if (cl->cl_solaris_size <= DK_LABEL_LOC) {
5437 cl->cl_f_geometry_is_valid = B_TRUE;
5441 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
5452 if (!cl->cl_f_geometry_is_valid) {
5455 cmlb_build_default_label(cl, tg_cookie);
5468 cl->cl_map[FDISK_P1 + count].dkl_cylno = UINT32_MAX;
5469 cl->cl_map[FDISK_P1 + count].dkl_nblk =
5470 cl->cl_fmap[count].fmap_nblk;
5471 cl->cl_offset[FDISK_P1 + count] =
5472 cl->cl_fmap[count].fmap_start;
5478 struct dk_map *lp = &cl->cl_map[count];
5479 cl->cl_offset[count] =
5480 cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
5482 struct dkl_partition *vp = &cl->cl_vtoc.v_part[count];
5483 cl->cl_offset[count] = vp->p_start + cl->cl_solaris_offset;
5489 ASSERT(mutex_owned(CMLB_MUTEX(cl)));
5496 cmlb_dkio_get_virtgeom(struct cmlb_lun *cl, caddr_t arg, int flag)
5504 mutex_enter(CMLB_MUTEX(cl));
5511 if (cl->cl_lgeom.g_nhead == 0 ||
5512 cl->cl_lgeom.g_nsect == 0 ||
5513 cl->cl_lgeom.g_ncyl > 1024) {
5514 mutex_exit(CMLB_MUTEX(cl));
5517 dkgp->dkg_ncyl = cl->cl_lgeom.g_ncyl;
5518 dkgp->dkg_acyl = cl->cl_lgeom.g_acyl;
5520 dkgp->dkg_nhead = cl->cl_lgeom.g_nhead;
5521 dkgp->dkg_nsect = cl->cl_lgeom.g_nsect;
5523 mutex_exit(CMLB_MUTEX(cl));
5537 cmlb_dkio_get_phygeom(struct cmlb_lun *cl, caddr_t arg, int flag,
5548 mutex_enter(CMLB_MUTEX(cl));
5550 if (cl->cl_g.dkg_nhead != 0 &&
5551 cl->cl_g.dkg_nsect != 0) {
5559 bcopy(&cl->cl_g, dkgp, sizeof (*dkgp));
5561 dkgp->dkg_ncyl = cl->cl_blockcount /
5574 if (ISCD(cl)) {
5575 dkgp->dkg_nhead = cl->cl_pgeom.g_nhead;
5576 dkgp->dkg_nsect = cl->cl_pgeom.g_nsect;
5577 dkgp->dkg_ncyl = cl->cl_pgeom.g_ncyl;
5578 dkgp->dkg_acyl = cl->cl_pgeom.g_acyl;
5586 if (cl->cl_blockcount == 0) {
5587 mutex_exit(CMLB_MUTEX(cl));
5595 if (cl->cl_alter_behavior & CMLB_OFF_BY_ONE)
5596 capacity = cl->cl_blockcount - 1;
5598 capacity = cl->cl_blockcount;
5600 cmlb_convert_geometry(cl, capacity, dkgp, tg_cookie);
5608 mutex_exit(CMLB_MUTEX(cl));
5618 cmlb_dkio_partinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag)
5628 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
5633 mutex_enter(CMLB_MUTEX(cl));
5635 if (part < P0_RAW_DISK && cl->cl_solaris_size == 0) {
5637 mutex_exit(CMLB_MUTEX(cl));
5641 p.p_start = (daddr_t)cl->cl_offset[part];
5642 p.p_length = (int)cl->cl_map[part].dkl_nblk;
5643 mutex_exit(CMLB_MUTEX(cl));
5674 cmlb_dkio_extpartinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag)
5684 if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
5689 mutex_enter(CMLB_MUTEX(cl));
5691 if (part < P0_RAW_DISK && cl->cl_solaris_size == 0) {
5693 mutex_exit(CMLB_MUTEX(cl));
5697 p.p_start = (diskaddr_t)cl->cl_offset[part];
5698 p.p_length = (diskaddr_t)cl->cl_map[part].dkl_nblk;
5699 mutex_exit(CMLB_MUTEX(cl));
5712 struct cmlb_lun *cl;
5723 cl = (struct cmlb_lun *)cmlbhandle;
5724 if (cl == NULL) {
5730 capacity = cl->cl_blockcount;
5733 lbasize = cl->cl_tgt_blocksize;
5791 if (DK_TG_GETATTRIBUTE(cl, &tgattr, tg_cookie) != 0)
5797 if (DK_TG_GETATTRIBUTE(cl, &tgattr, tg_cookie) != 0)
5823 dblk = lbasize / cl->cl_sys_blocksize;