Lines Matching +full:noise +full:- +full:sensitive
1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
33 #include <linux/crc-t10dif.h>
39 #include <linux/t10-pi.h>
155 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
175 #define JDELAY_OVERRIDDEN -9999
244 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
267 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
275 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276 #define F_D_IN 1 /* Data-in command (e.g. READ) */
277 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
280 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
288 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
434 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
444 dev_to_sdebug_host(shost->dma_dev)
475 u32 flags; /* OR-ed set of SDEB_F_* */
478 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
515 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
527 /* 0x0; 0x0->0x1f: 6 byte cdbs */
535 /* 0x20; 0x20->0x3f: 10 byte cdbs */
540 /* 0x40; 0x40->0x5f: 10 byte cdbs */
546 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
550 /* 0x80; 0x80->0x9f: 16 byte cdbs */
558 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
565 /* 0xc0; 0xc0->0xff: vendor specific */
573 * The following "response" functions return the SCSI mid-level's 4 byte
574 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
628 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
722 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
842 0, 0, 0, 0} }, /* PRE-FETCH (10) */
968 static int sdeb_first_idx = -1; /* invalid index ==> none created */
969 static int sdeb_most_recent_idx = -1;
983 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
989 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
1028 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_add()
1031 spin_lock(&devip->list_lock); in sdebug_err_add()
1032 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_add()
1033 if (err->type == new->type && err->cmd == new->cmd) { in sdebug_err_add()
1034 list_del_rcu(&err->list); in sdebug_err_add()
1035 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_add()
1039 list_add_tail_rcu(&new->list, &devip->inject_err_list); in sdebug_err_add()
1040 spin_unlock(&devip->list_lock); in sdebug_err_add()
1045 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_remove()
1050 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) { in sdebug_err_remove()
1052 return -EINVAL; in sdebug_err_remove()
1055 spin_lock(&devip->list_lock); in sdebug_err_remove()
1056 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_remove()
1057 if (err->type == type && err->cmd == cmd) { in sdebug_err_remove()
1058 list_del_rcu(&err->list); in sdebug_err_remove()
1059 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_remove()
1060 spin_unlock(&devip->list_lock); in sdebug_err_remove()
1065 spin_unlock(&devip->list_lock); in sdebug_err_remove()
1068 return -EINVAL; in sdebug_err_remove()
1073 struct scsi_device *sdev = (struct scsi_device *)m->private; in sdebug_error_show()
1074 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_error_show()
1080 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_error_show()
1081 switch (err->type) { in sdebug_error_show()
1085 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt, in sdebug_error_show()
1086 err->cmd); in sdebug_error_show()
1090 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type, in sdebug_error_show()
1091 err->cnt, err->cmd, err->queuecmd_ret); in sdebug_error_show()
1096 err->type, err->cnt, err->cmd, in sdebug_error_show()
1097 err->host_byte, err->driver_byte, in sdebug_error_show()
1098 err->status_byte, err->sense_key, in sdebug_error_show()
1099 err->asc, err->asq); in sdebug_error_show()
1110 return single_open(file, sdebug_error_show, inode->i_private); in sdebug_error_open()
1119 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private; in sdebug_error_write()
1123 return -ENOMEM; in sdebug_error_write()
1127 return -EFAULT; in sdebug_error_write()
1130 if (buf[0] == '-') in sdebug_error_write()
1135 return -EINVAL; in sdebug_error_write()
1141 return -ENOMEM; in sdebug_error_write()
1148 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt, in sdebug_error_write()
1149 &inject->cmd) != 3) in sdebug_error_write()
1154 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt, in sdebug_error_write()
1155 &inject->cmd, &inject->queuecmd_ret) != 4) in sdebug_error_write()
1161 &inject->type, &inject->cnt, &inject->cmd, in sdebug_error_write()
1162 &inject->host_byte, &inject->driver_byte, in sdebug_error_write()
1163 &inject->status_byte, &inject->sense_key, in sdebug_error_write()
1164 &inject->asc, &inject->asq) != 9) in sdebug_error_write()
1181 return -EINVAL; in sdebug_error_write()
1193 struct scsi_target *starget = (struct scsi_target *)m->private; in sdebug_target_reset_fail_show()
1195 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_show()
1198 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N'); in sdebug_target_reset_fail_show()
1205 return single_open(file, sdebug_target_reset_fail_show, inode->i_private); in sdebug_target_reset_fail_open()
1213 (struct scsi_target *)file->f_inode->i_private; in sdebug_target_reset_fail_write()
1215 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_write()
1218 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail); in sdebug_target_reset_fail_write()
1221 return -ENODEV; in sdebug_target_reset_fail_write()
1237 return -ENOMEM; in sdebug_target_alloc()
1241 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), in sdebug_target_alloc()
1244 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget, in sdebug_target_alloc()
1247 starget->hostdata = targetip; in sdebug_target_alloc()
1256 debugfs_remove(targetip->debugfs_entry); in sdebug_tartget_cleanup_async()
1264 targetip = (struct sdebug_target_info *)starget->hostdata; in sdebug_target_destroy()
1266 starget->hostdata = NULL; in sdebug_target_destroy()
1293 if (!sip || !sip->storep) { in lba2fake_store()
1297 return lsip->storep + lba * sdebug_sector_size; in lba2fake_store()
1305 return sip->dif_storep + sector; in dif_store()
1315 hpnt = sdbg_host->shost; in sdebug_max_tgts_luns()
1316 if ((hpnt->this_id >= 0) && in sdebug_max_tgts_luns()
1317 (sdebug_num_tgts > hpnt->this_id)) in sdebug_max_tgts_luns()
1318 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_max_tgts_luns()
1320 hpnt->max_id = sdebug_num_tgts; in sdebug_max_tgts_luns()
1322 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_max_tgts_luns()
1329 /* Set in_bit to -1 to indicate no bit position of invalid field */
1338 sbuff = scp->sense_buffer; in mk_sense_invalid_fld()
1340 sdev_printk(KERN_ERR, scp->device, in mk_sense_invalid_fld()
1365 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" in mk_sense_invalid_fld()
1372 if (!scp->sense_buffer) { in mk_sense_buffer()
1373 sdev_printk(KERN_ERR, scp->device, in mk_sense_buffer()
1377 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in mk_sense_buffer()
1382 sdev_printk(KERN_INFO, scp->device, in mk_sense_buffer()
1391 if (!scp->sense_buffer) { in mk_sense_info_tape()
1392 sdev_printk(KERN_ERR, scp->device, in mk_sense_info_tape()
1396 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in mk_sense_info_tape()
1401 scp->sense_buffer[0] |= 0x80; /* valid */ in mk_sense_info_tape()
1402 scp->sense_buffer[2] |= tape_flags; in mk_sense_info_tape()
1403 put_unaligned_be32(information, &scp->sense_buffer[3]); in mk_sense_info_tape()
1406 sdev_printk(KERN_INFO, scp->device, in mk_sense_info_tape()
1431 return -EINVAL; in scsi_debug_ioctl()
1432 /* return -ENOTTY; // correct return but upsets fdisk */ in scsi_debug_ioctl()
1439 sdev->use_10_for_rw = false; in config_cdb_len()
1440 sdev->use_16_for_rw = false; in config_cdb_len()
1441 sdev->use_10_for_ms = false; in config_cdb_len()
1444 sdev->use_10_for_rw = true; in config_cdb_len()
1445 sdev->use_16_for_rw = false; in config_cdb_len()
1446 sdev->use_10_for_ms = false; in config_cdb_len()
1449 sdev->use_10_for_rw = true; in config_cdb_len()
1450 sdev->use_16_for_rw = false; in config_cdb_len()
1451 sdev->use_10_for_ms = true; in config_cdb_len()
1454 sdev->use_10_for_rw = false; in config_cdb_len()
1455 sdev->use_16_for_rw = true; in config_cdb_len()
1456 sdev->use_10_for_ms = true; in config_cdb_len()
1459 sdev->use_10_for_rw = false; in config_cdb_len()
1460 sdev->use_16_for_rw = true; in config_cdb_len()
1461 sdev->use_10_for_ms = true; in config_cdb_len()
1466 sdev->use_10_for_rw = true; in config_cdb_len()
1467 sdev->use_16_for_rw = false; in config_cdb_len()
1468 sdev->use_10_for_ms = false; in config_cdb_len()
1482 shost = sdbg_host->shost; in all_config_cdb_len()
1492 struct sdebug_host_info *sdhp = devip->sdbg_host; in clear_luns_changed_on_target()
1495 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in clear_luns_changed_on_target()
1496 if ((devip->sdbg_host == dp->sdbg_host) && in clear_luns_changed_on_target()
1497 (devip->target == dp->target)) { in clear_luns_changed_on_target()
1498 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in clear_luns_changed_on_target()
1507 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); in make_ua()
1558 * SPC-3 behavior is to report a UNIT ATTENTION with in make_ua()
1561 * received. SPC-4 behavior is to report it only once. in make_ua()
1563 * values as struct scsi_device->scsi_level. in make_ua()
1565 if (sdebug_scsi_level >= 6) /* SPC-4 and above */ in make_ua()
1585 clear_bit(k, devip->uas_bm); in make_ua()
1587 sdev_printk(KERN_INFO, scp->device, in make_ua()
1595 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1600 struct scsi_data_buffer *sdb = &scp->sdb; in fill_from_dev_buffer()
1602 if (!sdb->length) in fill_from_dev_buffer()
1604 if (scp->sc_data_direction != DMA_FROM_DEVICE) in fill_from_dev_buffer()
1607 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, in fill_from_dev_buffer()
1609 scsi_set_resid(scp, scsi_bufflen(scp) - act_len); in fill_from_dev_buffer()
1614 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1615 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1623 struct scsi_data_buffer *sdb = &scp->sdb; in p_fill_from_dev_buffer()
1626 if (sdb->length <= off_dst) in p_fill_from_dev_buffer()
1628 if (scp->sc_data_direction != DMA_FROM_DEVICE) in p_fill_from_dev_buffer()
1631 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, in p_fill_from_dev_buffer()
1636 n = scsi_bufflen(scp) - (off_dst + act_len); in p_fill_from_dev_buffer()
1641 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1642 * 'arr' or -1 if error.
1649 if (scp->sc_data_direction != DMA_TO_DEVICE) in fetch_to_dev_buffer()
1650 return -1; in fetch_to_dev_buffer()
1696 /* NAA-3, Logical unit identifier (binary) */ in inquiry_vpd_83()
1714 /* NAA-3, Target port identifier */ in inquiry_vpd_83()
1721 /* NAA-3, Target port group identifier */ in inquiry_vpd_83()
1730 /* NAA-3, Target device identifier */ in inquiry_vpd_83()
1738 arr[num++] = 0x63; /* proto=sas, UTF-8 */ in inquiry_vpd_83()
1782 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1794 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1816 /* naa-5 target port identifier (A) */ in inquiry_vpd_88()
1831 /* naa-5 target port identifier (B) */ in inquiry_vpd_88()
1902 /* Block limits VPD page (SBC-3) */
1955 /* Block device characteristics VPD page (SBC-3) */
1967 /* Logical block provisioning VPD page (SBC-4) */
1993 * Optimal number of non-sequentially written sequential write in inquiry_vpd_b6()
1999 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open) in inquiry_vpd_b6()
2000 put_unaligned_be32(devip->max_open, &arr[12]); in inquiry_vpd_b6()
2003 if (devip->zcap < devip->zsize) { in inquiry_vpd_b6()
2005 put_unaligned_be64(devip->zsize, &arr[20]); in inquiry_vpd_b6()
2016 /* Block limits extension VPD page (SBC-4) */
2032 unsigned char *cmd = scp->cmnd; in resp_inquiry()
2042 is_zbc = devip->zoned; in resp_inquiry()
2044 have_wlun = scsi_is_wlun(scp->device->lun); in resp_inquiry()
2047 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) in resp_inquiry()
2060 int host_no = devip->sdbg_host->shost->host_no; in resp_inquiry()
2064 (devip->channel & 0x7f); in resp_inquiry()
2067 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + in resp_inquiry()
2068 (devip->target * 1000) + devip->lun); in resp_inquiry()
2070 (devip->target * 1000) - 3; in resp_inquiry()
2092 arr[3] = n - 4; /* number of supported VPD pages */ in resp_inquiry()
2100 &devip->lu_name); in resp_inquiry()
2120 arr[4] = 0x2; /* disconnect-reconnect mp */ in resp_inquiry()
2140 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_inquiry()
2154 arr[4] = SDEBUG_LONG_INQ_SZ - 5; in resp_inquiry()
2167 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ in resp_inquiry()
2168 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ in resp_inquiry()
2170 if (is_disk) { /* SBC-4 no version claimed */ in resp_inquiry()
2173 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ in resp_inquiry()
2180 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ in resp_inquiry()
2194 unsigned char *cmd = scp->cmnd; in resp_requests()
2199 int stopped_state = atomic_read(&devip->stopped); in resp_requests()
2247 unsigned char *cmd = scp->cmnd; in resp_start_stop()
2257 stopped_state = atomic_read(&devip->stopped); in resp_start_stop()
2261 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_start_stop()
2262 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_start_stop()
2266 atomic_set(&devip->stopped, 0); in resp_start_stop()
2281 atomic_xchg(&devip->stopped, want_stop); in resp_start_stop()
2285 set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */ in resp_start_stop()
2287 devip->tape_location[i] = 0; in resp_start_stop()
2288 devip->tape_partition = 0; in resp_start_stop()
2318 capac = (unsigned int)sdebug_capacity - 1; in resp_readcap()
2330 unsigned char *cmd = scp->cmnd; in resp_readcap16()
2338 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); in resp_readcap16()
2355 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. in resp_readcap16()
2357 if (devip->zoned) in resp_readcap16()
2363 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ in resp_readcap16()
2376 unsigned char *cmd = scp->cmnd; in resp_report_tgtpgs()
2378 int host_no = devip->sdbg_host->shost->host_no; in resp_report_tgtpgs()
2396 (devip->channel & 0x7f); in resp_report_tgtpgs()
2398 (devip->channel & 0x7f) + 0x80; in resp_report_tgtpgs()
2434 rlen = n - 4; in resp_report_tgtpgs()
2439 * - The allocated length in resp_report_tgtpgs()
2440 * - The constructed command length in resp_report_tgtpgs()
2441 * - The maximum array size in resp_report_tgtpgs()
2461 u8 *cmd = scp->cmnd; in resp_rsup_opcodes()
2469 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_opcodes()
2486 oip->num_attached != 0xff; ++oip) { in resp_rsup_opcodes()
2487 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2489 count += (oip->num_attached + 1); in resp_rsup_opcodes()
2494 oip->num_attached != 0xff && offset < a_len; ++oip) { in resp_rsup_opcodes()
2495 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2497 na = oip->num_attached; in resp_rsup_opcodes()
2498 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2499 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2502 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2504 put_unaligned_be16(oip->len_mask[0], arr + offset + 6); in resp_rsup_opcodes()
2508 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { in resp_rsup_opcodes()
2509 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2512 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2513 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2516 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2518 put_unaligned_be16(oip->len_mask[0], in resp_rsup_opcodes()
2533 if (F_INV_OP & oip->flags) { in resp_rsup_opcodes()
2538 if (FF_SA & oip->flags) { in resp_rsup_opcodes()
2546 0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2547 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); in resp_rsup_opcodes()
2551 if (0 == (FF_SA & oip->flags) && in resp_rsup_opcodes()
2552 req_opcode == oip->opcode) in resp_rsup_opcodes()
2554 else if (0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2555 na = oip->num_attached; in resp_rsup_opcodes()
2556 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2558 if (req_opcode == oip->opcode) in resp_rsup_opcodes()
2562 } else if (req_sa != oip->sa) { in resp_rsup_opcodes()
2563 na = oip->num_attached; in resp_rsup_opcodes()
2564 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2566 if (req_sa == oip->sa) in resp_rsup_opcodes()
2573 u = oip->len_mask[0]; in resp_rsup_opcodes()
2575 arr[4] = oip->opcode; in resp_rsup_opcodes()
2578 oip->len_mask[k] : 0xff; in resp_rsup_opcodes()
2607 u8 *cmd = scp->cmnd; in resp_rsup_tmfs()
2613 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_tmfs()
2631 { /* Read-Write Error Recovery page for mode_sense */ in resp_err_recov_pg()
2637 memset(p + 2, 0, sizeof(err_recov_pg) - 2); in resp_err_recov_pg()
2642 { /* Disconnect-Reconnect page for mode_sense */ in resp_disconnect_pg()
2648 memset(p + 2, 0, sizeof(disconnect_pg) - 2); in resp_disconnect_pg()
2664 memset(p + 2, 0, sizeof(format_pg) - 2); in resp_format_pg()
2729 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4), in resp_grouping_m_pg()
2745 memset(p + 4, 0, sizeof(gr_m_pg) - 4); in resp_grouping_m_pg()
2766 { /* SAS SSP mode page - short format for mode_sense */ in resp_sas_sf_m_pg()
2772 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); in resp_sas_sf_m_pg()
2806 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); in resp_sas_pcd_m_spg()
2818 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); in resp_sas_sha_m_spg()
2829 memset(p + 2, 0, sizeof(partition_pg) - 2); in resp_partition_m_pg()
2840 devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS; in process_medium_part_m_pg()
2841 devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS; in process_medium_part_m_pg()
2842 devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS; in process_medium_part_m_pg()
2849 p0_size = TAPE_UNITS - p1_size; in process_medium_part_m_pg()
2857 p1_size = TAPE_UNITS - p0_size; in process_medium_part_m_pg()
2859 p0_size = TAPE_UNITS - p1_size; in process_medium_part_m_pg()
2868 devip->tape_pending_nbr_partitions = new_nbr; in process_medium_part_m_pg()
2869 devip->tape_pending_part_0_size = p0_size; in process_medium_part_m_pg()
2870 devip->tape_pending_part_1_size = p1_size; in process_medium_part_m_pg()
2872 devip->tape_pending_nbr_partitions = new_nbr; in process_medium_part_m_pg()
2888 memset(p + 2, 0, sizeof(compression_pg) - 2); in resp_compression_m_pg()
2902 int target = scp->device->id; in resp_mode_sense()
2905 unsigned char *cmd = scp->cmnd; in resp_mode_sense()
2910 return -ENOMEM; in resp_mode_sense()
2918 is_zbc = devip->zoned; in resp_mode_sense()
2929 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + in resp_mode_sense()
2930 (devip->target * 1000) - 3; in resp_mode_sense()
2933 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ in resp_mode_sense()
2959 ap[0] = devip->tape_density; in resp_mode_sense()
2960 put_unaligned_be16(devip->tape_blksize, ap + 6); in resp_mode_sense()
2983 case 0x1: /* Read-Write error recovery page, direct access */ in resp_mode_sense()
2989 case 0x2: /* Disconnect-Reconnect page, all devices */ in resp_mode_sense()
3035 len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce); in resp_mode_sense()
3091 arr[0] = offset - 1; in resp_mode_sense()
3093 put_unaligned_be16((offset - 2), arr + 0); in resp_mode_sense()
3101 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_mode_sense()
3113 unsigned char *cmd = scp->cmnd; in resp_mode_select()
3121 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); in resp_mode_select()
3125 if (-1 == res) in resp_mode_select()
3128 sdev_printk(KERN_INFO, scp->device, in resp_mode_select()
3139 mselect6 ? 3 : 6, -1); in resp_mode_select()
3143 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); in resp_mode_select()
3151 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1); in resp_mode_select()
3154 devip->tape_density = arr[off]; in resp_mode_select()
3155 devip->tape_blksize = blksize; in resp_mode_select()
3161 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); in resp_mode_select()
3182 sizeof(caching_pg) - 2); in resp_mode_select()
3189 sizeof(ctrl_m_pg) - 2); in resp_mode_select()
3202 devip->tape_dce = (arr[off + 2] & 0x80) != 0; in resp_mode_select()
3213 mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1); in resp_mode_select()
3220 sizeof(iec_m_pg) - 2); in resp_mode_select()
3230 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); in resp_mode_select()
3281 unsigned char *cmd = scp->cmnd; in resp_log_sense()
3301 arr[3] = n - 4; in resp_log_sense()
3333 arr[3] = n - 4; in resp_log_sense()
3343 arr[3] = n - 4; in resp_log_sense()
3351 arr[3] = n - 4; in resp_log_sense()
3367 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_log_sense()
3390 unsigned char *cmd = scp->cmnd; in resp_locate()
3396 if (cmd[8] >= devip->tape_nbr_partitions) { in resp_locate()
3397 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1); in resp_locate()
3400 devip->tape_partition = cmd[8]; in resp_locate()
3403 partition = devip->tape_partition; in resp_locate()
3405 for (i = 0, blp = devip->tape_blocks[partition]; in resp_locate()
3406 i < pos && i < devip->tape_eop[partition]; i++, blp++) in resp_locate()
3407 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) in resp_locate()
3410 devip->tape_location[partition] = i; in resp_locate()
3414 devip->tape_location[partition] = pos; in resp_locate()
3422 unsigned char *cmd = scp->cmnd; in resp_write_filemarks()
3425 int partition = devip->tape_partition; in resp_write_filemarks()
3427 if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */ in resp_write_filemarks()
3433 for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) { in resp_write_filemarks()
3434 if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */ in resp_write_filemarks()
3435 devip->tape_location[partition] = devip->tape_eop[partition] - 1; in resp_write_filemarks()
3440 (devip->tape_blocks[partition] + pos)->fl_size = data; in resp_write_filemarks()
3442 (devip->tape_blocks[partition] + pos)->fl_size = in resp_write_filemarks()
3444 devip->tape_location[partition] = pos; in resp_write_filemarks()
3452 unsigned char *cmd = scp->cmnd, code; in resp_space()
3455 int partition = devip->tape_partition; in resp_space()
3458 if ((count & 0x800000) != 0) /* extend negative to 32-bit count */ in resp_space()
3462 pos = devip->tape_location[partition]; in resp_space()
3465 count = (-count); in resp_space()
3466 pos -= 1; in resp_space()
3467 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count; in resp_space()
3471 else if (IS_TAPE_BLOCK_FM(blp->fl_size)) in resp_space()
3474 pos--; in resp_space()
3475 blp--; in resp_space()
3479 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count; in resp_space()
3481 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) in resp_space()
3483 if (IS_TAPE_BLOCK_FM(blp->fl_size)) { in resp_space()
3487 if (pos >= devip->tape_eop[partition]) in resp_space()
3493 count = (-count); in resp_space()
3497 for (i = 0, blp = devip->tape_blocks[partition] + pos; in resp_space()
3498 i < count && pos >= 0; i++, pos--, blp--) { in resp_space()
3499 for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) && in resp_space()
3500 pos >= 0; pos--, blp--) in resp_space()
3508 for (i = 0, blp = devip->tape_blocks[partition] + pos; in resp_space()
3510 for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) && in resp_space()
3511 !IS_TAPE_BLOCK_EOD(blp->fl_size) && in resp_space()
3512 pos < devip->tape_eop[partition]; in resp_space()
3515 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) in resp_space()
3517 if (pos >= devip->tape_eop[partition]) in resp_space()
3522 for (blp = devip->tape_blocks[partition] + pos; in resp_space()
3523 !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition]; in resp_space()
3526 if (pos >= devip->tape_eop[partition]) in resp_space()
3530 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1); in resp_space()
3533 devip->tape_location[partition] = pos; in resp_space()
3537 devip->tape_location[partition] = pos; in resp_space()
3539 FILEMARK_DETECTED_ASCQ, count - i, in resp_space()
3544 devip->tape_location[partition] = pos; in resp_space()
3546 EOD_DETECTED_ASCQ, count - i, in resp_space()
3551 devip->tape_location[partition] = 0; in resp_space()
3553 BEGINNING_OF_P_M_DETECTED_ASCQ, count - i, in resp_space()
3555 devip->tape_location[partition] = 0; in resp_space()
3559 devip->tape_location[partition] = devip->tape_eop[partition] - 1; in resp_space()
3569 devip->tape_location[devip->tape_partition] = 0; in resp_rewind()
3580 return -1; in partition_tape()
3581 devip->tape_eop[0] = part_0_size; in partition_tape()
3582 devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG; in partition_tape()
3583 devip->tape_eop[1] = part_1_size; in partition_tape()
3584 devip->tape_blocks[1] = devip->tape_blocks[0] + in partition_tape()
3585 devip->tape_eop[0]; in partition_tape()
3586 devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG; in partition_tape()
3589 devip->tape_location[i] = 0; in partition_tape()
3591 devip->tape_nbr_partitions = nbr_partitions; in partition_tape()
3592 devip->tape_partition = 0; in partition_tape()
3594 partition_pg[3] = nbr_partitions - 1; in partition_tape()
3595 put_unaligned_be16(devip->tape_eop[0], partition_pg + 8); in partition_tape()
3596 put_unaligned_be16(devip->tape_eop[1], partition_pg + 10); in partition_tape()
3605 unsigned char *cmd = scp->cmnd; in resp_format_medium()
3608 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1); in resp_format_medium()
3612 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1); in resp_format_medium()
3616 if (devip->tape_pending_nbr_partitions > 0) { in resp_format_medium()
3618 devip->tape_pending_nbr_partitions, in resp_format_medium()
3619 devip->tape_pending_part_0_size, in resp_format_medium()
3620 devip->tape_pending_part_1_size); in resp_format_medium()
3622 res = partition_tape(devip, devip->tape_nbr_partitions, in resp_format_medium()
3623 devip->tape_eop[0], devip->tape_eop[1]); in resp_format_medium()
3627 return -EINVAL; in resp_format_medium()
3629 devip->tape_pending_nbr_partitions = -1; in resp_format_medium()
3636 return devip->nr_zones != 0; in sdebug_dev_is_zoned()
3642 u32 zno = lba >> devip->zsize_shift; in zbc_zone()
3645 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones) in zbc_zone()
3646 return &devip->zstate[zno]; in zbc_zone()
3652 zno = 2 * zno - devip->nr_conv_zones; in zbc_zone()
3653 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones); in zbc_zone()
3654 zsp = &devip->zstate[zno]; in zbc_zone()
3655 if (lba >= zsp->z_start + zsp->z_size) in zbc_zone()
3657 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size); in zbc_zone()
3663 return zsp->z_type == ZBC_ZTYPE_CNV; in zbc_zone_is_conv()
3668 return zsp->z_type == ZBC_ZTYPE_GAP; in zbc_zone_is_gap()
3684 zc = zsp->z_cond; in zbc_close_zone()
3689 devip->nr_imp_open--; in zbc_close_zone()
3691 devip->nr_exp_open--; in zbc_close_zone()
3693 if (zsp->z_wp == zsp->z_start) { in zbc_close_zone()
3694 zsp->z_cond = ZC1_EMPTY; in zbc_close_zone()
3696 zsp->z_cond = ZC4_CLOSED; in zbc_close_zone()
3697 devip->nr_closed++; in zbc_close_zone()
3703 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_close_imp_open_zone()
3706 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_close_imp_open_zone()
3707 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) { in zbc_close_imp_open_zone()
3722 zc = zsp->z_cond; in zbc_open_zone()
3728 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN) in zbc_open_zone()
3730 else if (devip->max_open && in zbc_open_zone()
3731 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open) in zbc_open_zone()
3734 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_zone()
3735 devip->nr_closed--; in zbc_open_zone()
3737 zsp->z_cond = ZC3_EXPLICIT_OPEN; in zbc_open_zone()
3738 devip->nr_exp_open++; in zbc_open_zone()
3740 zsp->z_cond = ZC2_IMPLICIT_OPEN; in zbc_open_zone()
3741 devip->nr_imp_open++; in zbc_open_zone()
3748 switch (zsp->z_cond) { in zbc_set_zone_full()
3750 devip->nr_imp_open--; in zbc_set_zone_full()
3753 devip->nr_exp_open--; in zbc_set_zone_full()
3757 zsp->z_start, zsp->z_cond); in zbc_set_zone_full()
3760 zsp->z_cond = ZC5_FULL; in zbc_set_zone_full()
3767 unsigned long long n, end, zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3772 if (zsp->z_type == ZBC_ZTYPE_SWR) { in zbc_inc_wp()
3773 zsp->z_wp += num; in zbc_inc_wp()
3774 if (zsp->z_wp >= zend) in zbc_inc_wp()
3780 if (lba != zsp->z_wp) in zbc_inc_wp()
3781 zsp->z_non_seq_resource = true; in zbc_inc_wp()
3785 n = zend - lba; in zbc_inc_wp()
3786 zsp->z_wp = zend; in zbc_inc_wp()
3787 } else if (end > zsp->z_wp) { in zbc_inc_wp()
3789 zsp->z_wp = end; in zbc_inc_wp()
3793 if (zsp->z_wp >= zend) in zbc_inc_wp()
3796 num -= n; in zbc_inc_wp()
3800 zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3808 struct scsi_device *sdp = scp->device; in check_zbc_access_params()
3809 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_zbc_access_params()
3811 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1); in check_zbc_access_params()
3814 /* For host-managed, reads cannot cross zone types boundaries */ in check_zbc_access_params()
3815 if (zsp->z_type != zsp_end->z_type) { in check_zbc_access_params()
3842 if (zsp->z_type == ZBC_ZTYPE_SWR) { in check_zbc_access_params()
3851 if (zsp->z_cond == ZC5_FULL) { in check_zbc_access_params()
3857 if (lba != zsp->z_wp) { in check_zbc_access_params()
3866 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) { in check_zbc_access_params()
3867 if (devip->max_open && in check_zbc_access_params()
3868 devip->nr_exp_open >= devip->max_open) { in check_zbc_access_params()
3884 struct scsi_device *sdp = scp->device; in check_device_access_params()
3885 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_device_access_params()
3920 return xa_load(per_store_ap, devip->sdbg_host->si_idx); in devip2sip()
3964 sdeb_read_lock(&sip->macc_data_lck); in sdeb_data_read_lock()
3972 sdeb_read_unlock(&sip->macc_data_lck); in sdeb_data_read_unlock()
3980 sdeb_write_lock(&sip->macc_data_lck); in sdeb_data_write_lock()
3988 sdeb_write_unlock(&sip->macc_data_lck); in sdeb_data_write_unlock()
3996 sdeb_read_lock(&sip->macc_sector_lck); in sdeb_data_sector_read_lock()
4004 sdeb_read_unlock(&sip->macc_sector_lck); in sdeb_data_sector_read_unlock()
4012 sdeb_write_lock(&sip->macc_sector_lck); in sdeb_data_sector_write_lock()
4020 sdeb_write_unlock(&sip->macc_sector_lck); in sdeb_data_sector_write_unlock()
4025 * We simplify the atomic model to allow only 1x atomic write and many non-
4031 * So use a RW lock for per-device read and write locking:
4032 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4078 __acquire(&sip->macc_meta_lck); in sdeb_meta_read_lock()
4083 read_lock(&sip->macc_meta_lck); in sdeb_meta_read_lock()
4094 __release(&sip->macc_meta_lck); in sdeb_meta_read_unlock()
4099 read_unlock(&sip->macc_meta_lck); in sdeb_meta_read_unlock()
4110 __acquire(&sip->macc_meta_lck); in sdeb_meta_write_lock()
4115 write_lock(&sip->macc_meta_lck); in sdeb_meta_write_lock()
4126 __release(&sip->macc_meta_lck); in sdeb_meta_write_unlock()
4131 write_unlock(&sip->macc_meta_lck); in sdeb_meta_write_unlock()
4137 /* Returns number of bytes copied or -1 if error. */
4145 struct scsi_data_buffer *sdb = &scp->sdb; in do_device_access()
4154 return -1; in do_device_access()
4163 if (!sdb->length || !sip) in do_device_access()
4165 if (scp->sc_data_direction != dir) in do_device_access()
4166 return -1; in do_device_access()
4171 fsp = sip->storep; in do_device_access()
4175 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */ in do_device_access()
4180 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in do_device_access()
4196 /* Returns number of bytes copied or -1 if error. */
4199 struct scsi_data_buffer *sdb = &scp->sdb; in do_dout_fetch()
4201 if (!sdb->length) in do_dout_fetch()
4203 if (scp->sc_data_direction != DMA_TO_DEVICE) in do_dout_fetch()
4204 return -1; in do_dout_fetch()
4205 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp, in do_dout_fetch()
4209 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4210 * arr into sip->storep+lba and return true. If comparison fails then
4219 u8 *fsp = sip->storep; in comp_write_worker()
4223 rest = block + num - store_blks; in comp_write_worker()
4225 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
4229 res = memcmp(fsp, arr + ((num - rest) * lb_size), in comp_write_worker()
4236 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
4238 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size); in comp_write_worker()
4259 if (sdt->guard_tag != csum) { in dif_verify()
4262 be16_to_cpu(sdt->guard_tag), in dif_verify()
4267 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in dif_verify()
4273 be32_to_cpu(sdt->ref_tag) != ei_lba) { in dif_verify()
4287 scp->device->hostdata, true); in dif_copy_prot()
4288 struct t10_pi_tuple *dif_storep = sip->dif_storep; in dif_copy_prot()
4305 rest = start + len - dif_store_end; in dif_copy_prot()
4310 memcpy(paddr, start, len - rest); in dif_copy_prot()
4312 memcpy(start, paddr, len - rest); in dif_copy_prot()
4316 memcpy(paddr + len - rest, dif_storep, rest); in dif_copy_prot()
4318 memcpy(dif_storep, paddr + len - rest, rest); in dif_copy_prot()
4322 resid -= len; in dif_copy_prot()
4334 scp->device->hostdata, true); in prot_verify_read()
4341 if (sdt->app_tag == cpu_to_be16(0xffff)) in prot_verify_read()
4351 if (scp->cmnd[1] >> 5) { /* RDPROTECT */ in prot_verify_read()
4370 u8 *cmd = scp->cmnd; in resp_read_tape()
4371 struct scsi_data_buffer *sdb = &scp->sdb; in resp_read_tape()
4372 int partition = devip->tape_partition; in resp_read_tape()
4373 u32 pos = devip->tape_location[partition]; in resp_read_tape()
4391 size = devip->tape_blksize; in resp_read_tape()
4395 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_read_tape()
4402 for (i = 0, blp = devip->tape_blocks[partition] + pos; in resp_read_tape()
4403 i < num && pos < devip->tape_eop[partition]; in resp_read_tape()
4405 devip->tape_location[partition] = pos + 1; in resp_read_tape()
4406 if (IS_TAPE_BLOCK_FM(blp->fl_size)) { in resp_read_tape()
4408 FILEMARK_DETECTED_ASCQ, fixed ? num - i : size, in resp_read_tape()
4410 scsi_set_resid(scp, (num - i) * size); in resp_read_tape()
4414 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) { in resp_read_tape()
4416 EOD_DETECTED_ASCQ, fixed ? num - i : size, in resp_read_tape()
4418 devip->tape_location[partition] = pos; in resp_read_tape()
4419 scsi_set_resid(scp, (num - i) * size); in resp_read_tape()
4422 sg_zero_buffer(sdb->table.sgl, sdb->table.nents, in resp_read_tape()
4424 sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in resp_read_tape()
4425 &(blp->data), 4, i * size, false); in resp_read_tape()
4427 if (blp->fl_size != devip->tape_blksize) { in resp_read_tape()
4428 scsi_set_resid(scp, (num - i) * size); in resp_read_tape()
4430 0, num - i, in resp_read_tape()
4435 if (blp->fl_size != size) { in resp_read_tape()
4436 if (blp->fl_size < size) in resp_read_tape()
4437 scsi_set_resid(scp, size - blp->fl_size); in resp_read_tape()
4440 0, size - blp->fl_size, in resp_read_tape()
4447 if (pos >= devip->tape_eop[partition]) { in resp_read_tape()
4449 EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size, in resp_read_tape()
4451 devip->tape_location[partition] = pos - 1; in resp_read_tape()
4454 devip->tape_location[partition] = pos; in resp_read_tape()
4467 u8 *cmd = scp->cmnd; in resp_read_dt0()
4521 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " in resp_read_dt0()
4538 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && in resp_read_dt0()
4543 if (0x70 == (scp->sense_buffer[0] & 0x7f)) { in resp_read_dt0()
4544 scp->sense_buffer[0] |= 0x80; /* Valid bit */ in resp_read_dt0()
4547 put_unaligned_be32(ret, scp->sense_buffer + 3); in resp_read_dt0()
4567 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_read_dt0()
4578 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_read_dt0()
4590 if (unlikely(ret == -1)) in resp_read_dt0()
4593 scsi_set_resid(scp, scsi_bufflen(scp) - ret); in resp_read_dt0()
4660 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ in prot_verify_write()
4690 lba += sdebug_unmap_granularity - sdebug_unmap_alignment; in lba_to_map_index()
4700 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; in map_index_to_lba()
4713 mapped = test_bit(index, sip->map_storep); in map_state()
4716 next = find_next_zero_bit(sip->map_storep, map_size, index); in map_state()
4718 next = find_next_bit(sip->map_storep, map_size, index); in map_state()
4721 *num = end - lba; in map_state()
4734 set_bit(index, sip->map_storep); in map_region()
4744 u8 *fsp = sip->storep; in unmap_region()
4752 clear_bit(index, sip->map_storep); in unmap_region()
4759 if (sip->dif_storep) { in unmap_region()
4760 memset(sip->dif_storep + lba, 0xff, in unmap_region()
4761 sizeof(*sip->dif_storep) * in unmap_region()
4772 u8 *cmd = scp->cmnd; in resp_write_tape()
4773 struct scsi_data_buffer *sdb = &scp->sdb; in resp_write_tape()
4774 int partition = devip->tape_partition; in resp_write_tape()
4775 int pos = devip->tape_location[partition]; in resp_write_tape()
4788 size = devip->tape_blksize; in resp_write_tape()
4792 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_write_tape()
4800 for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false; in resp_write_tape()
4801 i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) { in resp_write_tape()
4802 blp->fl_size = size; in resp_write_tape()
4803 sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in resp_write_tape()
4804 &(blp->data), 4, i * size, true); in resp_write_tape()
4806 scsi_set_resid(scp, num * transfer - written); in resp_write_tape()
4807 ew |= (pos == devip->tape_eop[partition] - TAPE_EW); in resp_write_tape()
4810 devip->tape_location[partition] = pos; in resp_write_tape()
4811 blp->fl_size = TAPE_BLOCK_EOD_FLAG; in resp_write_tape()
4812 if (pos >= devip->tape_eop[partition] - 1) { in resp_write_tape()
4815 fixed ? num - i : transfer, in resp_write_tape()
4822 fixed ? num - i : transfer, in resp_write_tape()
4839 u8 *cmd = scp->cmnd; in resp_write_dt0()
4898 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_write_dt0()
4920 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_write_dt0()
4924 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
4931 if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_write_dt0()
4935 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
4954 if (unlikely(-1 == ret)) in resp_write_dt0()
4958 sdev_printk(KERN_INFO, scp->device, in resp_write_dt0()
4989 u8 *cmd = scp->cmnd; in resp_write_scat()
5027 sdev_printk(KERN_ERR, scp->device, in resp_write_scat()
5032 return 0; /* T10 says these do-nothings are not errors */ in resp_write_scat()
5035 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5044 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5054 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5058 if (res == -1) { in resp_write_scat()
5072 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5085 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5108 * Write ranges atomically to keep as close to pre-atomic in resp_write_scat()
5117 if (unlikely(-1 == ret)) { in resp_write_scat()
5121 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
5159 struct scsi_device *sdp = scp->device; in resp_write_same()
5160 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in resp_write_same()
5166 scp->device->hostdata, true); in resp_write_same()
5187 fsp = sip->storep; in resp_write_same()
5196 if (-1 == ret) { in resp_write_same()
5200 sdev_printk(KERN_INFO, scp->device, in resp_write_same()
5226 u8 *cmd = scp->cmnd; in resp_write_same_10()
5242 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_write_same_10()
5251 u8 *cmd = scp->cmnd; in resp_write_same_16()
5265 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ in resp_write_same_16()
5270 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); in resp_write_same_16()
5277 * field. For the Report supported operation codes command, SPC-4 suggests
5282 u8 *cmd = scp->cmnd; in resp_write_buffer()
5283 struct scsi_device *sdp = scp->device; in resp_write_buffer()
5291 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in resp_write_buffer()
5292 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); in resp_write_buffer()
5295 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); in resp_write_buffer()
5300 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
5302 if (dp->target == sdp->id) { in resp_write_buffer()
5303 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); in resp_write_buffer()
5306 dp->uas_bm); in resp_write_buffer()
5312 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
5314 if (dp->target == sdp->id) in resp_write_buffer()
5316 dp->uas_bm); in resp_write_buffer()
5328 u8 *cmd = scp->cmnd; in resp_comp_write()
5350 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_comp_write()
5364 if (ret == -1) { in resp_comp_write()
5368 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " in resp_comp_write()
5380 /* Cover sip->map_storep (which map_region()) sets with data lock */ in resp_comp_write()
5407 payload_len = get_unaligned_be16(scp->cmnd + 7); in resp_unmap()
5410 descriptors = (payload_len - 8) / 16; in resp_unmap()
5412 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_unmap()
5425 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); in resp_unmap()
5457 u8 *cmd = scp->cmnd; in resp_get_lba_status()
5481 if (sdebug_capacity - lba <= 0xffffffff) in resp_get_lba_status()
5482 num = sdebug_capacity - lba; in resp_get_lba_status()
5500 const u8 *cmd = scp->cmnd; in resp_get_stream_status()
5509 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); in resp_get_stream_status()
5514 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); in resp_get_stream_status()
5520 * about open streams. Treat the non-permanent stream as open. in resp_get_stream_status()
5523 &h->number_of_open_streams); in resp_get_stream_status()
5531 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT; in resp_get_stream_status()
5533 &stream_status->stream_identifier); in resp_get_stream_status()
5534 stream_status->rel_lifetime = stream_id + 1; in resp_get_stream_status()
5536 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */ in resp_get_stream_status()
5547 u8 *cmd = scp->cmnd; in resp_sync_cache()
5568 * Assuming the LBA+num_blocks is not out-of-range, this function will return
5584 u8 *cmd = scp->cmnd; in resp_pre_fetch()
5586 u8 *fsp = sip->storep; in resp_pre_fetch()
5602 arr[1] = devip->tape_partition; in resp_pre_fetch()
5603 pos = devip->tape_location[devip->tape_partition]; in resp_pre_fetch()
5616 } else { /* PRE-FETCH(16) */ in resp_pre_fetch()
5626 /* PRE-FETCH spec says nothing about LBP or PI so skip them */ in resp_pre_fetch()
5629 rest = block + nblks - sdebug_store_sectors; in resp_pre_fetch()
5631 /* Try to bring the PRE-FETCH range into CPU's cache */ in resp_pre_fetch()
5634 (nblks - rest) * sdebug_sector_size); in resp_pre_fetch()
5648 * (W-LUN), the normal Linux scanning logic does not associate it with a
5650 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5652 * the above will associate a W-LUN to each target. To only get a W-LUN
5653 * for target 2, then use "echo '- 2 49409' > scan" .
5658 unsigned char *cmd = scp->cmnd; in resp_report_luns()
5665 unsigned int wlun_cnt; /* report luns W-LUN count */ in resp_report_luns()
5679 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_report_luns()
5684 case 0: /* all LUNs apart from W-LUNs */ in resp_report_luns()
5688 case 1: /* only W-LUNs */ in resp_report_luns()
5697 case 0x11: /* see SPC-5 */ in resp_report_luns()
5701 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_report_luns()
5706 --lun_cnt; in resp_report_luns()
5729 lun_p->scsi_lun[0] |= 0x40; in resp_report_luns()
5757 u8 *cmd = scp->cmnd; in resp_verify()
5800 if (ret == -1) { in resp_verify()
5804 sdev_printk(KERN_INFO, scp->device, in resp_verify()
5836 u8 *cmd = scp->cmnd; in resp_report_zones()
5856 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); in resp_report_zones()
5869 lba = zsp->z_start + zsp->z_size) { in resp_report_zones()
5879 if (zsp->z_cond != ZC1_EMPTY) in resp_report_zones()
5884 if (zsp->z_cond != ZC2_IMPLICIT_OPEN) in resp_report_zones()
5889 if (zsp->z_cond != ZC3_EXPLICIT_OPEN) in resp_report_zones()
5894 if (zsp->z_cond != ZC4_CLOSED) in resp_report_zones()
5899 if (zsp->z_cond != ZC5_FULL) in resp_report_zones()
5906 * Read-only, offline, reset WP recommended are in resp_report_zones()
5911 /* non-seq-resource set */ in resp_report_zones()
5912 if (!zsp->z_non_seq_resource) in resp_report_zones()
5934 desc[0] = zsp->z_type; in resp_report_zones()
5935 desc[1] = zsp->z_cond << 4; in resp_report_zones()
5936 if (zsp->z_non_seq_resource) in resp_report_zones()
5938 put_unaligned_be64((u64)zsp->z_size, desc + 8); in resp_report_zones()
5939 put_unaligned_be64((u64)zsp->z_start, desc + 16); in resp_report_zones()
5940 put_unaligned_be64((u64)zsp->z_wp, desc + 24); in resp_report_zones()
5954 put_unaligned_be64(sdebug_capacity - 1, arr + 8); in resp_report_zones()
5956 if (devip->zcap < devip->zsize) in resp_report_zones()
5957 put_unaligned_be64(devip->zsize, arr + 16); in resp_report_zones()
5959 rep_len = (unsigned long)desc - (unsigned long)arr; in resp_report_zones()
5972 u8 *cmd = scp->cmnd; in resp_atomic_write()
6004 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
6009 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
6014 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
6020 if (unlikely(ret == -1)) in resp_atomic_write()
6027 /* Logic transplanted from tcmu-runner, file_zbc.c */
6030 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_open_all()
6033 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_open_all()
6034 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_all()
6035 zbc_open_zone(devip, &devip->zstate[i], true); in zbc_open_all()
6044 u8 *cmd = scp->cmnd; in resp_open_zone()
6057 if (devip->max_open && in resp_open_zone()
6058 devip->nr_exp_open + devip->nr_closed > devip->max_open) { in resp_open_zone()
6078 if (z_id != zsp->z_start) { in resp_open_zone()
6089 zc = zsp->z_cond; in resp_open_zone()
6093 if (devip->max_open && devip->nr_exp_open >= devip->max_open) { in resp_open_zone()
6110 for (i = 0; i < devip->nr_zones; i++) in zbc_close_all()
6111 zbc_close_zone(devip, &devip->zstate[i]); in zbc_close_all()
6119 u8 *cmd = scp->cmnd; in resp_close_zone()
6145 if (z_id != zsp->z_start) { in resp_close_zone()
6165 enum sdebug_z_cond zc = zsp->z_cond; in zbc_finish_zone()
6171 if (zsp->z_cond == ZC4_CLOSED) in zbc_finish_zone()
6172 devip->nr_closed--; in zbc_finish_zone()
6173 zsp->z_wp = zsp->z_start + zsp->z_size; in zbc_finish_zone()
6174 zsp->z_cond = ZC5_FULL; in zbc_finish_zone()
6182 for (i = 0; i < devip->nr_zones; i++) in zbc_finish_all()
6183 zbc_finish_zone(devip, &devip->zstate[i], false); in zbc_finish_all()
6192 u8 *cmd = scp->cmnd; in resp_finish_zone()
6217 if (z_id != zsp->z_start) { in resp_finish_zone()
6243 zc = zsp->z_cond; in zbc_rwp_zone()
6247 if (zsp->z_cond == ZC4_CLOSED) in zbc_rwp_zone()
6248 devip->nr_closed--; in zbc_rwp_zone()
6250 if (zsp->z_wp > zsp->z_start) in zbc_rwp_zone()
6251 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, in zbc_rwp_zone()
6252 (zsp->z_wp - zsp->z_start) * sdebug_sector_size); in zbc_rwp_zone()
6254 zsp->z_non_seq_resource = false; in zbc_rwp_zone()
6255 zsp->z_wp = zsp->z_start; in zbc_rwp_zone()
6256 zsp->z_cond = ZC1_EMPTY; in zbc_rwp_zone()
6263 for (i = 0; i < devip->nr_zones; i++) in zbc_rwp_all()
6264 zbc_rwp_zone(devip, &devip->zstate[i]); in zbc_rwp_all()
6272 u8 *cmd = scp->cmnd; in resp_rwp_zone()
6296 if (z_id != zsp->z_start) { in resp_rwp_zone()
6323 struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1; in sdebug_q_cmd_complete()
6329 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_q_cmd_complete()
6338 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_q_cmd_complete()
6339 aborted = sd_dp->aborted; in sdebug_q_cmd_complete()
6341 sd_dp->aborted = false; in sdebug_q_cmd_complete()
6343 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_q_cmd_complete()
6346 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); in sdebug_q_cmd_complete()
6389 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M) in sdebug_device_create_zones()
6391 while (capacity < devip->zsize << 2 && devip->zsize >= 2) in sdebug_device_create_zones()
6392 devip->zsize >>= 1; in sdebug_device_create_zones()
6393 if (devip->zsize < 2) { in sdebug_device_create_zones()
6395 return -EINVAL; in sdebug_device_create_zones()
6400 return -EINVAL; in sdebug_device_create_zones()
6402 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M) in sdebug_device_create_zones()
6404 if (devip->zsize >= capacity) { in sdebug_device_create_zones()
6406 return -EINVAL; in sdebug_device_create_zones()
6410 devip->zsize_shift = ilog2(devip->zsize); in sdebug_device_create_zones()
6411 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift; in sdebug_device_create_zones()
6414 devip->zcap = devip->zsize; in sdebug_device_create_zones()
6416 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >> in sdebug_device_create_zones()
6418 if (devip->zcap > devip->zsize) { in sdebug_device_create_zones()
6420 return -EINVAL; in sdebug_device_create_zones()
6424 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift; in sdebug_device_create_zones()
6427 return -EINVAL; in sdebug_device_create_zones()
6429 devip->nr_conv_zones = sdeb_zbc_nr_conv; in sdebug_device_create_zones()
6430 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >> in sdebug_device_create_zones()
6431 devip->zsize_shift; in sdebug_device_create_zones()
6432 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones; in sdebug_device_create_zones()
6435 if (devip->zcap < devip->zsize) in sdebug_device_create_zones()
6436 devip->nr_zones += devip->nr_seq_zones; in sdebug_device_create_zones()
6438 if (devip->zoned) { in sdebug_device_create_zones()
6440 if (sdeb_zbc_max_open >= devip->nr_zones - 1) in sdebug_device_create_zones()
6441 devip->max_open = (devip->nr_zones - 1) / 2; in sdebug_device_create_zones()
6443 devip->max_open = sdeb_zbc_max_open; in sdebug_device_create_zones()
6446 devip->zstate = kcalloc(devip->nr_zones, in sdebug_device_create_zones()
6448 if (!devip->zstate) in sdebug_device_create_zones()
6449 return -ENOMEM; in sdebug_device_create_zones()
6451 for (i = 0; i < devip->nr_zones; i++) { in sdebug_device_create_zones()
6452 zsp = &devip->zstate[i]; in sdebug_device_create_zones()
6454 zsp->z_start = zstart; in sdebug_device_create_zones()
6456 if (i < devip->nr_conv_zones) { in sdebug_device_create_zones()
6457 zsp->z_type = ZBC_ZTYPE_CNV; in sdebug_device_create_zones()
6458 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
6459 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
6460 zsp->z_size = in sdebug_device_create_zones()
6461 min_t(u64, devip->zsize, capacity - zstart); in sdebug_device_create_zones()
6462 } else if ((zstart & (devip->zsize - 1)) == 0) { in sdebug_device_create_zones()
6463 if (devip->zoned) in sdebug_device_create_zones()
6464 zsp->z_type = ZBC_ZTYPE_SWR; in sdebug_device_create_zones()
6466 zsp->z_type = ZBC_ZTYPE_SWP; in sdebug_device_create_zones()
6467 zsp->z_cond = ZC1_EMPTY; in sdebug_device_create_zones()
6468 zsp->z_wp = zsp->z_start; in sdebug_device_create_zones()
6469 zsp->z_size = in sdebug_device_create_zones()
6470 min_t(u64, devip->zcap, capacity - zstart); in sdebug_device_create_zones()
6472 zsp->z_type = ZBC_ZTYPE_GAP; in sdebug_device_create_zones()
6473 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
6474 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
6475 zsp->z_size = min_t(u64, devip->zsize - devip->zcap, in sdebug_device_create_zones()
6476 capacity - zstart); in sdebug_device_create_zones()
6479 WARN_ON_ONCE((int)zsp->z_size <= 0); in sdebug_device_create_zones()
6480 zstart += zsp->z_size; in sdebug_device_create_zones()
6494 uuid_gen(&devip->lu_name); in sdebug_device_create()
6497 devip->lu_name = shared_uuid; in sdebug_device_create()
6501 devip->lu_name = shared_uuid; in sdebug_device_create()
6504 devip->sdbg_host = sdbg_host; in sdebug_device_create()
6506 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM; in sdebug_device_create()
6512 devip->zoned = false; in sdebug_device_create()
6515 devip->tape_density = TAPE_DEF_DENSITY; in sdebug_device_create()
6516 devip->tape_blksize = TAPE_DEF_BLKSIZE; in sdebug_device_create()
6518 devip->create_ts = ktime_get_boottime(); in sdebug_device_create()
6519 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); in sdebug_device_create()
6520 spin_lock_init(&devip->list_lock); in sdebug_device_create()
6521 INIT_LIST_HEAD(&devip->inject_err_list); in sdebug_device_create()
6522 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); in sdebug_device_create()
6533 sdbg_host = shost_to_sdebug_host(sdev->host); in find_build_dev_info()
6535 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in find_build_dev_info()
6536 if ((devip->used) && (devip->channel == sdev->channel) && in find_build_dev_info()
6537 (devip->target == sdev->id) && in find_build_dev_info()
6538 (devip->lun == sdev->lun)) in find_build_dev_info()
6541 if ((!devip->used) && (!open_devip)) in find_build_dev_info()
6553 open_devip->channel = sdev->channel; in find_build_dev_info()
6554 open_devip->target = sdev->id; in find_build_dev_info()
6555 open_devip->lun = sdev->lun; in find_build_dev_info()
6556 open_devip->sdbg_host = sdbg_host; in find_build_dev_info()
6557 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm); in find_build_dev_info()
6558 open_devip->used = true; in find_build_dev_info()
6566 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_sdev_init()
6575 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_sdev_configure()
6580 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_sdev_configure()
6581 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) in scsi_debug_sdev_configure()
6582 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; in scsi_debug_sdev_configure()
6589 if (!devip->tape_blocks[0]) { in scsi_debug_sdev_configure()
6590 devip->tape_blocks[0] = in scsi_debug_sdev_configure()
6593 if (!devip->tape_blocks[0]) in scsi_debug_sdev_configure()
6596 devip->tape_pending_nbr_partitions = -1; in scsi_debug_sdev_configure()
6598 kfree(devip->tape_blocks[0]); in scsi_debug_sdev_configure()
6599 devip->tape_blocks[0] = NULL; in scsi_debug_sdev_configure()
6603 sdp->hostdata = devip; in scsi_debug_sdev_configure()
6605 sdp->no_uld_attach = 1; in scsi_debug_sdev_configure()
6609 sdp->allow_restart = 1; in scsi_debug_sdev_configure()
6611 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev), in scsi_debug_sdev_configure()
6613 if (IS_ERR_OR_NULL(devip->debugfs_entry)) in scsi_debug_sdev_configure()
6615 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_sdev_configure()
6617 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp, in scsi_debug_sdev_configure()
6621 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_sdev_configure()
6629 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_sdev_destroy()
6634 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_sdev_destroy()
6639 spin_lock(&devip->list_lock); in scsi_debug_sdev_destroy()
6640 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in scsi_debug_sdev_destroy()
6641 list_del_rcu(&err->list); in scsi_debug_sdev_destroy()
6642 call_rcu(&err->rcu, sdebug_err_free); in scsi_debug_sdev_destroy()
6644 spin_unlock(&devip->list_lock); in scsi_debug_sdev_destroy()
6646 debugfs_remove(devip->debugfs_entry); in scsi_debug_sdev_destroy()
6649 kfree(devip->tape_blocks[0]); in scsi_debug_sdev_destroy()
6650 devip->tape_blocks[0] = NULL; in scsi_debug_sdev_destroy()
6653 /* make this slot available for re-use */ in scsi_debug_sdev_destroy()
6654 devip->used = false; in scsi_debug_sdev_destroy()
6655 sdp->hostdata = NULL; in scsi_debug_sdev_destroy()
6662 struct sdebug_defer *sd_dp = &sdsc->sd_dp; in scsi_debug_stop_cmnd()
6663 enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t); in scsi_debug_stop_cmnd()
6665 lockdep_assert_held(&sdsc->lock); in scsi_debug_stop_cmnd()
6668 int res = hrtimer_try_to_cancel(&sd_dp->hrt); in scsi_debug_stop_cmnd()
6671 case -1: /* -1 It's executing the CB */ in scsi_debug_stop_cmnd()
6680 if (cancel_work(&sd_dp->ew.work)) in scsi_debug_stop_cmnd()
6692 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6700 spin_lock_irqsave(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
6702 spin_unlock_irqrestore(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
6725 struct Scsi_Host *shost = sdhp->shost; in stop_all_queued()
6727 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL); in stop_all_queued()
6734 struct scsi_device *sdp = cmnd->device; in sdebug_fail_abort()
6735 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_abort()
6737 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_abort()
6744 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_abort()
6745 if (err->type == ERR_ABORT_CMD_FAILED && in sdebug_fail_abort()
6746 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_abort()
6747 ret = !!err->cnt; in sdebug_fail_abort()
6748 if (err->cnt < 0) in sdebug_fail_abort()
6749 err->cnt++; in sdebug_fail_abort()
6763 u8 *cmd = SCpnt->cmnd; in scsi_debug_abort()
6769 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_abort()
6791 if (scmd->device == sdp) in scsi_debug_stop_all_queued_iter()
6800 struct Scsi_Host *shost = sdp->host; in scsi_debug_stop_all_queued()
6802 blk_mq_tagset_busy_iter(&shost->tag_set, in scsi_debug_stop_all_queued()
6808 struct scsi_device *sdp = cmnd->device; in sdebug_fail_lun_reset()
6809 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_lun_reset()
6811 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_lun_reset()
6818 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_lun_reset()
6819 if (err->type == ERR_LUN_RESET_FAILED && in sdebug_fail_lun_reset()
6820 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_lun_reset()
6821 ret = !!err->cnt; in sdebug_fail_lun_reset()
6822 if (err->cnt < 0) in sdebug_fail_lun_reset()
6823 err->cnt++; in sdebug_fail_lun_reset()
6839 devip->tape_blksize = TAPE_DEF_BLKSIZE; in scsi_tape_reset_clear()
6840 devip->tape_density = TAPE_DEF_DENSITY; in scsi_tape_reset_clear()
6841 devip->tape_partition = 0; in scsi_tape_reset_clear()
6842 devip->tape_dce = 0; in scsi_tape_reset_clear()
6844 devip->tape_location[i] = 0; in scsi_tape_reset_clear()
6845 devip->tape_pending_nbr_partitions = -1; in scsi_tape_reset_clear()
6852 struct scsi_device *sdp = SCpnt->device; in scsi_debug_device_reset()
6853 struct sdebug_dev_info *devip = sdp->hostdata; in scsi_debug_device_reset()
6854 u8 *cmd = SCpnt->cmnd; in scsi_debug_device_reset()
6864 set_bit(SDEBUG_UA_POR, devip->uas_bm); in scsi_debug_device_reset()
6878 struct scsi_target *starget = scsi_target(cmnd->device); in sdebug_fail_target_reset()
6880 (struct sdebug_target_info *)starget->hostdata; in sdebug_fail_target_reset()
6883 return targetip->reset_fail; in sdebug_fail_target_reset()
6890 struct scsi_device *sdp = SCpnt->device; in scsi_debug_target_reset()
6891 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_target_reset()
6893 u8 *cmd = SCpnt->cmnd; in scsi_debug_target_reset()
6901 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_target_reset()
6902 if (devip->target == sdp->id) { in scsi_debug_target_reset()
6903 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_target_reset()
6924 struct scsi_device *sdp = SCpnt->device; in scsi_debug_bus_reset()
6925 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_bus_reset()
6934 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_bus_reset()
6935 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_bus_reset()
6954 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); in scsi_debug_host_reset()
6957 list_for_each_entry(devip, &sdbg_host->dev_info_list, in scsi_debug_host_reset()
6959 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_host_reset()
6967 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_host_reset()
6987 sectors_per_part = (num_sectors - sdebug_sectors_per) in sdebug_build_parts()
6995 if (starts[k] - starts[k - 1] < max_part_secs) in sdebug_build_parts()
6996 max_part_secs = starts[k] - starts[k - 1]; in sdebug_build_parts()
7006 end_sec = starts[k] + max_part_secs - 1; in sdebug_build_parts()
7007 pp->boot_ind = 0; in sdebug_build_parts()
7009 pp->cyl = start_sec / heads_by_sects; in sdebug_build_parts()
7010 pp->head = (start_sec - (pp->cyl * heads_by_sects)) in sdebug_build_parts()
7012 pp->sector = (start_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
7014 pp->end_cyl = end_sec / heads_by_sects; in sdebug_build_parts()
7015 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) in sdebug_build_parts()
7017 pp->end_sector = (end_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
7019 pp->start_sect = cpu_to_le32(start_sec); in sdebug_build_parts()
7020 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); in sdebug_build_parts()
7021 pp->sys_ind = 0x83; /* plain Linux partition */ in sdebug_build_parts()
7032 struct Scsi_Host *shost = sdhp->shost; in block_unblock_all_queues()
7041 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7089 bool polled = rq->cmd_flags & REQ_POLLED; in schedule_resp()
7101 sdp = cmnd->device; in schedule_resp()
7110 int qdepth = cmnd->device->queue_depth; in schedule_resp()
7124 sd_dp = &sdsc->sd_dp; in schedule_resp()
7130 cmnd->result = pfp ? pfp(cmnd, devip) : 0; in schedule_resp()
7131 if (cmnd->result & SDEG_RES_IMMED_MASK) { in schedule_resp()
7132 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
7135 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
7136 cmnd->result = scsi_result; in schedule_resp()
7137 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) { in schedule_resp()
7141 cmnd->result = check_condition_result; in schedule_resp()
7145 if (unlikely(sdebug_verbose && cmnd->result)) in schedule_resp()
7146 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", in schedule_resp()
7147 __func__, cmnd->result); in schedule_resp()
7168 u64 d = ktime_get_boottime_ns() - ns_from_boot; in schedule_resp()
7176 kt -= d; in schedule_resp()
7180 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
7182 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
7183 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); in schedule_resp()
7184 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
7185 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
7188 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
7189 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); in schedule_resp()
7190 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); in schedule_resp()
7192 * The completion handler will try to grab sqcp->lock, in schedule_resp()
7197 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
7202 sd_dp->aborted = true; in schedule_resp()
7209 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
7211 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
7212 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); in schedule_resp()
7213 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
7214 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
7216 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
7217 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); in schedule_resp()
7218 schedule_work(&sd_dp->ew.work); in schedule_resp()
7219 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
7225 respond_in_thread: /* call back to mid-layer using invocation thread */ in schedule_resp()
7226 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; in schedule_resp()
7227 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
7228 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
7229 cmnd->result = scsi_result; in schedule_resp()
7327 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7329 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7331 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7349 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7354 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7355 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7362 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7365 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7369 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7373 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7385 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7386 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7387 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7390 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7406 if (k >= (SDEBUG_INFO_LEN - 1)) in scsi_debug_info()
7408 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, in scsi_debug_info()
7424 return -EACCES; in scsi_debug_write_info()
7428 return -EINVAL; in scsi_debug_write_info()
7449 int queue_num = data->queue_num; in sdebug_submit_queue_iter()
7455 if (*data->first == -1) in sdebug_submit_queue_iter()
7456 *data->first = *data->last = tag; in sdebug_submit_queue_iter()
7458 *data->last = tag; in sdebug_submit_queue_iter()
7498 int f = -1, l = -1; in scsi_debug_show_info()
7505 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter, in scsi_debug_show_info()
7513 seq_printf(m, "this host_no=%d\n", host->host_no); in scsi_debug_show_info()
7523 idx = sdhp->si_idx; in scsi_debug_show_info()
7525 sdhp->shost->host_no, idx); in scsi_debug_show_info()
7547 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7564 struct Scsi_Host *shost = sdhp->shost; in delay_store()
7567 res = -EBUSY; /* queued commands */ in delay_store()
7580 return -EINVAL; in delay_store()
7588 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7605 struct Scsi_Host *shost = sdhp->shost; in ndelay_store()
7608 res = -EBUSY; /* queued commands */ in ndelay_store()
7623 return -EINVAL; in ndelay_store()
7647 return -EINVAL; in opts_store()
7668 return -EINVAL; in ptype_store()
7672 return -EINVAL; in ptype_store()
7676 return -EINVAL; in ptype_store()
7693 return -EINVAL; in dsense_store()
7715 if (want_store) { /* 1 --> 0 transition, set up store */ in fake_rw_store()
7728 if (sdhp->si_idx != idx) { in fake_rw_store()
7729 xa_set_mark(per_store_ap, sdhp->si_idx, in fake_rw_store()
7731 sdhp->si_idx = idx; in fake_rw_store()
7735 } else { /* 0 --> 1 transition is trigger for shrink */ in fake_rw_store()
7741 return -EINVAL; in fake_rw_store()
7758 return -EINVAL; in no_lun_0_store()
7776 return -EINVAL; in num_tgts_store()
7797 return -EINVAL; in per_host_store_store()
7829 return -EINVAL; in every_nth_store()
7853 return -EINVAL; in lun_format_store()
7857 return -EINVAL; in lun_format_store()
7861 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */ in lun_format_store()
7867 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in lun_format_store()
7868 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in lun_format_store()
7875 return -EINVAL; in lun_format_store()
7892 return -EINVAL; in max_luns_store()
7897 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ in max_luns_store()
7904 list_for_each_entry(dp, &sdhp->dev_info_list, in max_luns_store()
7907 dp->uas_bm); in max_luns_store()
7914 return -EINVAL; in max_luns_store()
7938 count = -EBUSY; in max_queue_store()
7942 return -EINVAL; in max_queue_store()
7961 return -EINVAL; in no_rwlock_store()
7998 return -ENOTSUPP; in virtual_gb_store()
8011 list_for_each_entry(dp, &sdhp->dev_info_list, in virtual_gb_store()
8014 dp->uas_bm); in virtual_gb_store()
8021 return -EINVAL; in virtual_gb_store()
8041 return -EINVAL; in add_host_store()
8052 if (found) /* re-use case */ in add_host_store()
8059 } while (--delta_hosts); in add_host_store()
8082 return -EINVAL; in vpd_use_hostno_store()
8104 return -EINVAL; in statistics_store()
8149 return scnprintf(buf, PAGE_SIZE, "0-%u\n", in map_show()
8156 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", in map_show()
8157 (int)map_size, sip->map_storep); in map_show()
8177 return -EINVAL; in random_store()
8197 return -EINVAL; in removable_store()
8215 return -EINVAL; in host_lock_store()
8232 return -EINVAL; in strict_store()
8262 [BLK_ZONED_HA] = "host-aware",
8263 [BLK_ZONED_HM] = "host-managed",
8287 return -EINVAL; in sdeb_zbc_model_str()
8312 p += scnprintf(p, end - p, "%d %ld\n", i, in group_number_stats_show()
8315 return p - buf; in group_number_stats_show()
8388 int idx = -1; in scsi_debug_init()
8404 return -EINVAL; in scsi_debug_init()
8418 return -EINVAL; in scsi_debug_init()
8423 return -EINVAL; in scsi_debug_init()
8428 return -EINVAL; in scsi_debug_init()
8433 return -EINVAL; in scsi_debug_init()
8438 return -EINVAL; in scsi_debug_init()
8457 return -EINVAL; in scsi_debug_init()
8462 return -EINVAL; in scsi_debug_init()
8467 return -EINVAL; in scsi_debug_init()
8474 return -EINVAL; in scsi_debug_init()
8505 return -EINVAL; in scsi_debug_init()
8552 return -EINVAL; in scsi_debug_init()
8592 k, -ret); in scsi_debug_init()
8599 pr_err("add_host k=%d error=%d\n", k, -ret); in scsi_debug_init()
8622 for (; k; k--) in scsi_debug_exit()
8656 vfree(sip->map_storep); in sdebug_erase_store()
8657 vfree(sip->dif_storep); in sdebug_erase_store()
8658 vfree(sip->storep); in sdebug_erase_store()
8694 return -ENOMEM; in sdebug_add_store()
8701 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); in sdebug_add_store()
8709 res = -ENOMEM; in sdebug_add_store()
8710 sip->storep = vzalloc(sz); in sdebug_add_store()
8711 if (!sip->storep) { in sdebug_add_store()
8716 sdebug_build_parts(sip->storep, sz); in sdebug_add_store()
8723 sip->dif_storep = vmalloc(dif_size); in sdebug_add_store()
8726 sip->dif_storep); in sdebug_add_store()
8728 if (!sip->dif_storep) { in sdebug_add_store()
8732 memset(sip->dif_storep, 0xff, dif_size); in sdebug_add_store()
8736 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; in sdebug_add_store()
8737 sip->map_storep = vmalloc(array_size(sizeof(long), in sdebug_add_store()
8742 if (!sip->map_storep) { in sdebug_add_store()
8747 bitmap_zero(sip->map_storep, map_size); in sdebug_add_store()
8754 rwlock_init(&sip->macc_data_lck); in sdebug_add_store()
8755 rwlock_init(&sip->macc_meta_lck); in sdebug_add_store()
8756 rwlock_init(&sip->macc_sector_lck); in sdebug_add_store()
8760 pr_warn("%s: failed, errno=%d\n", __func__, -res); in sdebug_add_store()
8767 int error = -ENOMEM; in sdebug_add_host_helper()
8773 return -ENOMEM; in sdebug_add_host_helper()
8777 sdbg_host->si_idx = idx; in sdebug_add_host_helper()
8779 INIT_LIST_HEAD(&sdbg_host->dev_info_list); in sdebug_add_host_helper()
8789 list_add_tail(&sdbg_host->host_list, &sdebug_host_list); in sdebug_add_host_helper()
8792 sdbg_host->dev.bus = &pseudo_lld_bus; in sdebug_add_host_helper()
8793 sdbg_host->dev.parent = pseudo_primary; in sdebug_add_host_helper()
8794 sdbg_host->dev.release = &sdebug_release_adapter; in sdebug_add_host_helper()
8795 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); in sdebug_add_host_helper()
8797 error = device_register(&sdbg_host->dev); in sdebug_add_host_helper()
8800 list_del(&sdbg_host->host_list); in sdebug_add_host_helper()
8809 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_add_host_helper()
8811 list_del(&sdbg_devinfo->dev_list); in sdebug_add_host_helper()
8812 kfree(sdbg_devinfo->zstate); in sdebug_add_host_helper()
8815 if (sdbg_host->dev.release) in sdebug_add_host_helper()
8816 put_device(&sdbg_host->dev); in sdebug_add_host_helper()
8819 pr_warn("%s: failed, errno=%d\n", __func__, -error); in sdebug_add_host_helper()
8837 int idx = -1; in sdebug_do_remove_host()
8845 idx = sdbg_host->si_idx; in sdebug_do_remove_host()
8853 if (idx == sdbg_host2->si_idx) { in sdebug_do_remove_host()
8861 --sdeb_most_recent_idx; in sdebug_do_remove_host()
8865 list_del(&sdbg_host->host_list); in sdebug_do_remove_host()
8871 device_unregister(&sdbg_host->dev); in sdebug_do_remove_host()
8872 --sdebug_num_hosts; in sdebug_do_remove_host()
8877 struct sdebug_dev_info *devip = sdev->hostdata; in sdebug_change_qdepth()
8880 return -ENODEV; in sdebug_change_qdepth()
8892 if (qdepth != sdev->queue_depth) in sdebug_change_qdepth()
8901 return sdev->queue_depth; in sdebug_change_qdepth()
8907 if (sdebug_every_nth < -1) in fake_timeout()
8908 sdebug_every_nth = -1; in fake_timeout()
8924 struct scsi_device *sdp = scp->device; in resp_not_ready()
8926 stopped_state = atomic_read(&devip->stopped); in resp_not_ready()
8928 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_not_ready()
8929 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_not_ready()
8932 atomic_set(&devip->stopped, 0); in resp_not_ready()
8940 if (scp->cmnd[0] == TEST_UNIT_READY) { in resp_not_ready()
8944 diff_ns = tur_nanosecs_to_ready - diff_ns; in resp_not_ready()
8947 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */ in resp_not_ready()
8949 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, in resp_not_ready()
8965 if (shost->nr_hw_queues == 1) in sdebug_map_queues()
8969 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; in sdebug_map_queues()
8971 map->nr_queues = 0; in sdebug_map_queues()
8974 map->nr_queues = submit_queues - poll_queues; in sdebug_map_queues()
8976 map->nr_queues = poll_queues; in sdebug_map_queues()
8978 if (!map->nr_queues) { in sdebug_map_queues()
8983 map->queue_offset = qoff; in sdebug_map_queues()
8986 qoff += map->nr_queues; in sdebug_map_queues()
9008 int queue_num = data->queue_num; in sdebug_blk_mq_poll_iter()
9016 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) in sdebug_blk_mq_poll_iter()
9021 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
9022 sd_dp = &sdsc->sd_dp; in sdebug_blk_mq_poll_iter()
9023 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { in sdebug_blk_mq_poll_iter()
9024 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
9028 if (time < sd_dp->cmpl_ts) { in sdebug_blk_mq_poll_iter()
9029 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
9032 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
9036 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_blk_mq_poll_iter()
9041 (*data->num_entries)++; in sdebug_blk_mq_poll_iter()
9053 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter, in sdebug_blk_mq_poll()
9063 struct scsi_device *sdp = cmnd->device; in sdebug_timeout_cmd()
9064 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_timeout_cmd()
9066 unsigned char *cmd = cmnd->cmnd; in sdebug_timeout_cmd()
9073 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_timeout_cmd()
9074 if (err->type == ERR_TMOUT_CMD && in sdebug_timeout_cmd()
9075 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_timeout_cmd()
9076 ret = !!err->cnt; in sdebug_timeout_cmd()
9077 if (err->cnt < 0) in sdebug_timeout_cmd()
9078 err->cnt++; in sdebug_timeout_cmd()
9091 struct scsi_device *sdp = cmnd->device; in sdebug_fail_queue_cmd()
9092 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_queue_cmd()
9094 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_queue_cmd()
9101 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_queue_cmd()
9102 if (err->type == ERR_FAIL_QUEUE_CMD && in sdebug_fail_queue_cmd()
9103 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_queue_cmd()
9104 ret = err->cnt ? err->queuecmd_ret : 0; in sdebug_fail_queue_cmd()
9105 if (err->cnt < 0) in sdebug_fail_queue_cmd()
9106 err->cnt++; in sdebug_fail_queue_cmd()
9120 struct scsi_device *sdp = cmnd->device; in sdebug_fail_cmd()
9121 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_cmd()
9123 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_cmd()
9131 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_cmd()
9132 if (err->type == ERR_FAIL_CMD && in sdebug_fail_cmd()
9133 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_cmd()
9134 if (!err->cnt) { in sdebug_fail_cmd()
9139 ret = !!err->cnt; in sdebug_fail_cmd()
9149 if (err->cnt < 0) in sdebug_fail_cmd()
9150 err->cnt++; in sdebug_fail_cmd()
9151 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq); in sdebug_fail_cmd()
9152 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24; in sdebug_fail_cmd()
9163 struct scsi_device *sdp = scp->device; in scsi_debug_queuecommand()
9167 u8 *cmd = scp->cmnd; in scsi_debug_queuecommand()
9172 u64 lun_index = sdp->lun & 0x3FFF; in scsi_debug_queuecommand()
9193 len = scp->cmd_len; in scsi_debug_queuecommand()
9199 n += scnprintf(b + n, sb - n, "%02x ", in scsi_debug_queuecommand()
9207 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); in scsi_debug_queuecommand()
9213 devip = (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_queuecommand()
9245 na = oip->num_attached; in scsi_debug_queuecommand()
9246 r_pfp = oip->pfp; in scsi_debug_queuecommand()
9249 if (FF_SA & r_oip->flags) { in scsi_debug_queuecommand()
9250 if (F_SA_LOW & oip->flags) in scsi_debug_queuecommand()
9254 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
9255 if (opcode == oip->opcode && sa == oip->sa) in scsi_debug_queuecommand()
9259 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
9260 if (opcode == oip->opcode) in scsi_debug_queuecommand()
9265 if (F_SA_LOW & r_oip->flags) in scsi_debug_queuecommand()
9267 else if (F_SA_HIGH & r_oip->flags) in scsi_debug_queuecommand()
9274 flags = oip->flags; in scsi_debug_queuecommand()
9290 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { in scsi_debug_queuecommand()
9291 rem = ~oip->len_mask[k] & cmd[k]; in scsi_debug_queuecommand()
9293 for (j = 7; j >= 0; --j, rem <<= 1) { in scsi_debug_queuecommand()
9303 find_first_bit(devip->uas_bm, in scsi_debug_queuecommand()
9309 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) && in scsi_debug_queuecommand()
9310 atomic_read(&devip->stopped))) { in scsi_debug_queuecommand()
9321 if (likely(oip->pfp)) in scsi_debug_queuecommand()
9322 pfp = oip->pfp; /* calls a resp_* function */ in scsi_debug_queuecommand()
9354 struct sdebug_defer *sd_dp = &sdsc->sd_dp; in sdebug_init_cmd_priv()
9356 spin_lock_init(&sdsc->lock); in sdebug_init_cmd_priv()
9357 hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC, in sdebug_init_cmd_priv()
9359 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); in sdebug_init_cmd_priv()
9387 .max_sectors = -1U,
9388 .max_segment_size = -1U,
9410 error = -ENODEV; in sdebug_driver_probe()
9413 hpnt->can_queue = sdebug_max_queue; in sdebug_driver_probe()
9414 hpnt->cmd_per_lun = sdebug_max_queue; in sdebug_driver_probe()
9416 hpnt->dma_boundary = PAGE_SIZE - 1; in sdebug_driver_probe()
9427 hpnt->nr_hw_queues = submit_queues; in sdebug_driver_probe()
9429 hpnt->host_tagset = 1; in sdebug_driver_probe()
9432 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) { in sdebug_driver_probe()
9434 my_name, poll_queues, hpnt->nr_hw_queues); in sdebug_driver_probe()
9440 * left over for non-polled I/O. in sdebug_driver_probe()
9448 my_name, submit_queues - 1); in sdebug_driver_probe()
9452 hpnt->nr_maps = 3; in sdebug_driver_probe()
9454 sdbg_host->shost = hpnt; in sdebug_driver_probe()
9455 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) in sdebug_driver_probe()
9456 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_driver_probe()
9458 hpnt->max_id = sdebug_num_tgts; in sdebug_driver_probe()
9460 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_driver_probe()
9511 error = scsi_add_host(hpnt, &sdbg_host->dev); in sdebug_driver_probe()
9514 error = -ENODEV; in sdebug_driver_probe()
9530 scsi_remove_host(sdbg_host->shost); in sdebug_driver_remove()
9532 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_driver_remove()
9534 list_del(&sdbg_devinfo->dev_list); in sdebug_driver_remove()
9535 kfree(sdbg_devinfo->zstate); in sdebug_driver_remove()
9539 scsi_host_put(sdbg_host->shost); in sdebug_driver_remove()