Lines Matching +full:ssc +full:- +full:block +full:- +full:bus
1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
33 #include <linux/crc-t10dif.h>
39 #include <linux/t10-pi.h>
91 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
149 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
169 #define JDELAY_OVERRIDDEN -9999
207 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
211 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
229 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
237 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
238 #define F_D_IN 1 /* Data-in command (e.g. READ) */
239 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
242 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
250 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
267 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
268 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
388 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
398 dev_to_sdebug_host(shost->dma_dev)
444 u32 flags; /* OR-ed set of SDEB_F_* */
447 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
474 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
484 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
491 /* 0x0; 0x0->0x1f: 6 byte cdbs */
499 /* 0x20; 0x20->0x3f: 10 byte cdbs */
504 /* 0x40; 0x40->0x5f: 10 byte cdbs */
510 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
514 /* 0x80; 0x80->0x9f: 16 byte cdbs */
522 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
529 /* 0xc0; 0xc0->0xff: vendor specific */
537 * The following "response" functions return the SCSI mid-level's 4 byte
538 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
682 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
802 0, 0, 0, 0} }, /* PRE-FETCH (10) */
915 static int sdeb_first_idx = -1; /* invalid index ==> none created */
916 static int sdeb_most_recent_idx = -1;
930 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
936 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
948 .bus = &pseudo_lld_bus,
975 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_add()
978 spin_lock(&devip->list_lock); in sdebug_err_add()
979 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_add()
980 if (err->type == new->type && err->cmd == new->cmd) { in sdebug_err_add()
981 list_del_rcu(&err->list); in sdebug_err_add()
982 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_add()
986 list_add_tail_rcu(&new->list, &devip->inject_err_list); in sdebug_err_add()
987 spin_unlock(&devip->list_lock); in sdebug_err_add()
992 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_err_remove()
997 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) { in sdebug_err_remove()
999 return -EINVAL; in sdebug_err_remove()
1002 spin_lock(&devip->list_lock); in sdebug_err_remove()
1003 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_err_remove()
1004 if (err->type == type && err->cmd == cmd) { in sdebug_err_remove()
1005 list_del_rcu(&err->list); in sdebug_err_remove()
1006 call_rcu(&err->rcu, sdebug_err_free); in sdebug_err_remove()
1007 spin_unlock(&devip->list_lock); in sdebug_err_remove()
1012 spin_unlock(&devip->list_lock); in sdebug_err_remove()
1015 return -EINVAL; in sdebug_err_remove()
1020 struct scsi_device *sdev = (struct scsi_device *)m->private; in sdebug_error_show()
1021 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata; in sdebug_error_show()
1027 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_error_show()
1028 switch (err->type) { in sdebug_error_show()
1032 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt, in sdebug_error_show()
1033 err->cmd); in sdebug_error_show()
1037 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type, in sdebug_error_show()
1038 err->cnt, err->cmd, err->queuecmd_ret); in sdebug_error_show()
1043 err->type, err->cnt, err->cmd, in sdebug_error_show()
1044 err->host_byte, err->driver_byte, in sdebug_error_show()
1045 err->status_byte, err->sense_key, in sdebug_error_show()
1046 err->asc, err->asq); in sdebug_error_show()
1057 return single_open(file, sdebug_error_show, inode->i_private); in sdebug_error_open()
1066 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private; in sdebug_error_write()
1070 return -ENOMEM; in sdebug_error_write()
1074 return -EFAULT; in sdebug_error_write()
1077 if (buf[0] == '-') in sdebug_error_write()
1082 return -EINVAL; in sdebug_error_write()
1088 return -ENOMEM; in sdebug_error_write()
1095 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt, in sdebug_error_write()
1096 &inject->cmd) != 3) in sdebug_error_write()
1101 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt, in sdebug_error_write()
1102 &inject->cmd, &inject->queuecmd_ret) != 4) in sdebug_error_write()
1108 &inject->type, &inject->cnt, &inject->cmd, in sdebug_error_write()
1109 &inject->host_byte, &inject->driver_byte, in sdebug_error_write()
1110 &inject->status_byte, &inject->sense_key, in sdebug_error_write()
1111 &inject->asc, &inject->asq) != 9) in sdebug_error_write()
1128 return -EINVAL; in sdebug_error_write()
1140 struct scsi_target *starget = (struct scsi_target *)m->private; in sdebug_target_reset_fail_show()
1142 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_show()
1145 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N'); in sdebug_target_reset_fail_show()
1152 return single_open(file, sdebug_target_reset_fail_show, inode->i_private); in sdebug_target_reset_fail_open()
1160 (struct scsi_target *)file->f_inode->i_private; in sdebug_target_reset_fail_write()
1162 (struct sdebug_target_info *)starget->hostdata; in sdebug_target_reset_fail_write()
1165 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail); in sdebug_target_reset_fail_write()
1168 return -ENODEV; in sdebug_target_reset_fail_write()
1184 return -ENOMEM; in sdebug_target_alloc()
1188 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), in sdebug_target_alloc()
1191 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget, in sdebug_target_alloc()
1194 starget->hostdata = targetip; in sdebug_target_alloc()
1203 debugfs_remove(targetip->debugfs_entry); in sdebug_tartget_cleanup_async()
1211 targetip = (struct sdebug_target_info *)starget->hostdata; in sdebug_target_destroy()
1213 starget->hostdata = NULL; in sdebug_target_destroy()
1219 /* Only do the extra work involved in logical block provisioning if one or
1240 if (!sip || !sip->storep) { in lba2fake_store()
1244 return lsip->storep + lba * sdebug_sector_size; in lba2fake_store()
1252 return sip->dif_storep + sector; in dif_store()
1262 hpnt = sdbg_host->shost; in sdebug_max_tgts_luns()
1263 if ((hpnt->this_id >= 0) && in sdebug_max_tgts_luns()
1264 (sdebug_num_tgts > hpnt->this_id)) in sdebug_max_tgts_luns()
1265 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_max_tgts_luns()
1267 hpnt->max_id = sdebug_num_tgts; in sdebug_max_tgts_luns()
1269 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_max_tgts_luns()
1276 /* Set in_bit to -1 to indicate no bit position of invalid field */
1285 sbuff = scp->sense_buffer; in mk_sense_invalid_fld()
1287 sdev_printk(KERN_ERR, scp->device, in mk_sense_invalid_fld()
1312 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" in mk_sense_invalid_fld()
1319 if (!scp->sense_buffer) { in mk_sense_buffer()
1320 sdev_printk(KERN_ERR, scp->device, in mk_sense_buffer()
1324 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in mk_sense_buffer()
1329 sdev_printk(KERN_INFO, scp->device, in mk_sense_buffer()
1354 return -EINVAL; in scsi_debug_ioctl()
1355 /* return -ENOTTY; // correct return but upsets fdisk */ in scsi_debug_ioctl()
1362 sdev->use_10_for_rw = false; in config_cdb_len()
1363 sdev->use_16_for_rw = false; in config_cdb_len()
1364 sdev->use_10_for_ms = false; in config_cdb_len()
1367 sdev->use_10_for_rw = true; in config_cdb_len()
1368 sdev->use_16_for_rw = false; in config_cdb_len()
1369 sdev->use_10_for_ms = false; in config_cdb_len()
1372 sdev->use_10_for_rw = true; in config_cdb_len()
1373 sdev->use_16_for_rw = false; in config_cdb_len()
1374 sdev->use_10_for_ms = true; in config_cdb_len()
1377 sdev->use_10_for_rw = false; in config_cdb_len()
1378 sdev->use_16_for_rw = true; in config_cdb_len()
1379 sdev->use_10_for_ms = true; in config_cdb_len()
1382 sdev->use_10_for_rw = false; in config_cdb_len()
1383 sdev->use_16_for_rw = true; in config_cdb_len()
1384 sdev->use_10_for_ms = true; in config_cdb_len()
1389 sdev->use_10_for_rw = true; in config_cdb_len()
1390 sdev->use_16_for_rw = false; in config_cdb_len()
1391 sdev->use_10_for_ms = false; in config_cdb_len()
1405 shost = sdbg_host->shost; in all_config_cdb_len()
1415 struct sdebug_host_info *sdhp = devip->sdbg_host; in clear_luns_changed_on_target()
1418 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in clear_luns_changed_on_target()
1419 if ((devip->sdbg_host == dp->sdbg_host) && in clear_luns_changed_on_target()
1420 (devip->target == dp->target)) { in clear_luns_changed_on_target()
1421 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in clear_luns_changed_on_target()
1430 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); in make_ua()
1451 cp = "bus reset"; in make_ua()
1481 * SPC-3 behavior is to report a UNIT ATTENTION with in make_ua()
1484 * received. SPC-4 behavior is to report it only once. in make_ua()
1486 * values as struct scsi_device->scsi_level. in make_ua()
1488 if (sdebug_scsi_level >= 6) /* SPC-4 and above */ in make_ua()
1502 clear_bit(k, devip->uas_bm); in make_ua()
1504 sdev_printk(KERN_INFO, scp->device, in make_ua()
1512 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1517 struct scsi_data_buffer *sdb = &scp->sdb; in fill_from_dev_buffer()
1519 if (!sdb->length) in fill_from_dev_buffer()
1521 if (scp->sc_data_direction != DMA_FROM_DEVICE) in fill_from_dev_buffer()
1524 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, in fill_from_dev_buffer()
1526 scsi_set_resid(scp, scsi_bufflen(scp) - act_len); in fill_from_dev_buffer()
1531 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1532 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1540 struct scsi_data_buffer *sdb = &scp->sdb; in p_fill_from_dev_buffer()
1543 if (sdb->length <= off_dst) in p_fill_from_dev_buffer()
1545 if (scp->sc_data_direction != DMA_FROM_DEVICE) in p_fill_from_dev_buffer()
1548 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, in p_fill_from_dev_buffer()
1553 n = scsi_bufflen(scp) - (off_dst + act_len); in p_fill_from_dev_buffer()
1558 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1559 * 'arr' or -1 if error.
1566 if (scp->sc_data_direction != DMA_TO_DEVICE) in fetch_to_dev_buffer()
1567 return -1; in fetch_to_dev_buffer()
1613 /* NAA-3, Logical unit identifier (binary) */ in inquiry_vpd_83()
1631 /* NAA-3, Target port identifier */ in inquiry_vpd_83()
1638 /* NAA-3, Target port group identifier */ in inquiry_vpd_83()
1647 /* NAA-3, Target device identifier */ in inquiry_vpd_83()
1655 arr[num++] = 0x63; /* proto=sas, UTF-8 */ in inquiry_vpd_83()
1699 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1711 memset(arr + num + olen, 0, plen - olen); in inquiry_vpd_85()
1733 /* naa-5 target port identifier (A) */ in inquiry_vpd_88()
1748 /* naa-5 target port identifier (B) */ in inquiry_vpd_88()
1819 /* Block limits VPD page (SBC-3) */
1845 /* Maximum Unmap Block Descriptor Count */ in inquiry_vpd_b0()
1869 return 0x3c; /* Mandatory page length for Logical Block Provisioning */ in inquiry_vpd_b0()
1872 /* Block device characteristics VPD page (SBC-3) */
1884 /* Logical block provisioning VPD page (SBC-4) */
1903 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1910 * Optimal number of non-sequentially written sequential write in inquiry_vpd_b6()
1916 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open) in inquiry_vpd_b6()
1917 put_unaligned_be32(devip->max_open, &arr[12]); in inquiry_vpd_b6()
1920 if (devip->zcap < devip->zsize) { in inquiry_vpd_b6()
1922 put_unaligned_be64(devip->zsize, &arr[20]); in inquiry_vpd_b6()
1933 /* Block limits extension VPD page (SBC-4) */
1949 unsigned char *cmd = scp->cmnd; in resp_inquiry()
1959 is_zbc = devip->zoned; in resp_inquiry()
1961 have_wlun = scsi_is_wlun(scp->device->lun); in resp_inquiry()
1964 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) in resp_inquiry()
1977 int host_no = devip->sdbg_host->shost->host_no; in resp_inquiry()
1981 (devip->channel & 0x7f); in resp_inquiry()
1984 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + in resp_inquiry()
1985 (devip->target * 1000) + devip->lun); in resp_inquiry()
1987 (devip->target * 1000) - 3; in resp_inquiry()
2001 arr[n++] = 0xb0; /* Block limits */ in resp_inquiry()
2002 arr[n++] = 0xb1; /* Block characteristics */ in resp_inquiry()
2007 arr[n++] = 0xb7; /* Block limits extension */ in resp_inquiry()
2009 arr[3] = n - 4; /* number of supported VPD pages */ in resp_inquiry()
2017 &devip->lu_name); in resp_inquiry()
2037 arr[4] = 0x2; /* disconnect-reconnect mp */ in resp_inquiry()
2046 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */ in resp_inquiry()
2048 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */ in resp_inquiry()
2054 } else if (cmd[2] == 0xb7) { /* block limits extension page */ in resp_inquiry()
2057 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_inquiry()
2071 arr[4] = SDEBUG_LONG_INQ_SZ - 5; in resp_inquiry()
2084 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ in resp_inquiry()
2085 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ in resp_inquiry()
2087 if (is_disk) { /* SBC-4 no version claimed */ in resp_inquiry()
2090 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ in resp_inquiry()
2097 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ in resp_inquiry()
2111 unsigned char *cmd = scp->cmnd; in resp_requests()
2116 int stopped_state = atomic_read(&devip->stopped); in resp_requests()
2164 unsigned char *cmd = scp->cmnd; in resp_start_stop()
2174 stopped_state = atomic_read(&devip->stopped); in resp_start_stop()
2178 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_start_stop()
2179 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_start_stop()
2183 atomic_set(&devip->stopped, 0); in resp_start_stop()
2198 atomic_xchg(&devip->stopped, want_stop); in resp_start_stop()
2227 capac = (unsigned int)sdebug_capacity - 1; in resp_readcap()
2239 unsigned char *cmd = scp->cmnd; in resp_readcap16()
2247 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); in resp_readcap16()
2264 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. in resp_readcap16()
2266 if (devip->zoned) in resp_readcap16()
2272 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ in resp_readcap16()
2285 unsigned char *cmd = scp->cmnd; in resp_report_tgtpgs()
2287 int host_no = devip->sdbg_host->shost->host_no; in resp_report_tgtpgs()
2305 (devip->channel & 0x7f); in resp_report_tgtpgs()
2307 (devip->channel & 0x7f) + 0x80; in resp_report_tgtpgs()
2343 rlen = n - 4; in resp_report_tgtpgs()
2348 * - The allocated length in resp_report_tgtpgs()
2349 * - The constructed command length in resp_report_tgtpgs()
2350 * - The maximum array size in resp_report_tgtpgs()
2370 u8 *cmd = scp->cmnd; in resp_rsup_opcodes()
2378 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_opcodes()
2395 oip->num_attached != 0xff; ++oip) { in resp_rsup_opcodes()
2396 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2398 count += (oip->num_attached + 1); in resp_rsup_opcodes()
2403 oip->num_attached != 0xff && offset < a_len; ++oip) { in resp_rsup_opcodes()
2404 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2406 na = oip->num_attached; in resp_rsup_opcodes()
2407 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2408 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2411 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2413 put_unaligned_be16(oip->len_mask[0], arr + offset + 6); in resp_rsup_opcodes()
2417 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { in resp_rsup_opcodes()
2418 if (F_INV_OP & oip->flags) in resp_rsup_opcodes()
2421 arr[offset] = oip->opcode; in resp_rsup_opcodes()
2422 put_unaligned_be16(oip->sa, arr + offset + 2); in resp_rsup_opcodes()
2425 if (FF_SA & oip->flags) in resp_rsup_opcodes()
2427 put_unaligned_be16(oip->len_mask[0], in resp_rsup_opcodes()
2442 if (F_INV_OP & oip->flags) { in resp_rsup_opcodes()
2447 if (FF_SA & oip->flags) { in resp_rsup_opcodes()
2455 0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2456 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); in resp_rsup_opcodes()
2460 if (0 == (FF_SA & oip->flags) && in resp_rsup_opcodes()
2461 req_opcode == oip->opcode) in resp_rsup_opcodes()
2463 else if (0 == (FF_SA & oip->flags)) { in resp_rsup_opcodes()
2464 na = oip->num_attached; in resp_rsup_opcodes()
2465 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2467 if (req_opcode == oip->opcode) in resp_rsup_opcodes()
2471 } else if (req_sa != oip->sa) { in resp_rsup_opcodes()
2472 na = oip->num_attached; in resp_rsup_opcodes()
2473 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
2475 if (req_sa == oip->sa) in resp_rsup_opcodes()
2482 u = oip->len_mask[0]; in resp_rsup_opcodes()
2484 arr[4] = oip->opcode; in resp_rsup_opcodes()
2487 oip->len_mask[k] : 0xff; in resp_rsup_opcodes()
2516 u8 *cmd = scp->cmnd; in resp_rsup_tmfs()
2522 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_rsup_tmfs()
2540 { /* Read-Write Error Recovery page for mode_sense */ in resp_err_recov_pg()
2546 memset(p + 2, 0, sizeof(err_recov_pg) - 2); in resp_err_recov_pg()
2551 { /* Disconnect-Reconnect page for mode_sense */ in resp_disconnect_pg()
2557 memset(p + 2, 0, sizeof(disconnect_pg) - 2); in resp_disconnect_pg()
2573 memset(p + 2, 0, sizeof(format_pg) - 2); in resp_format_pg()
2638 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4), in resp_grouping_m_pg()
2654 memset(p + 4, 0, sizeof(gr_m_pg) - 4); in resp_grouping_m_pg()
2675 { /* SAS SSP mode page - short format for mode_sense */ in resp_sas_sf_m_pg()
2681 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); in resp_sas_sf_m_pg()
2715 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); in resp_sas_pcd_m_spg()
2727 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); in resp_sas_sha_m_spg()
2741 int target = scp->device->id; in resp_mode_sense()
2744 unsigned char *cmd = scp->cmnd; in resp_mode_sense()
2749 return -ENOMEM; in resp_mode_sense()
2750 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ in resp_mode_sense()
2757 is_zbc = devip->zoned; in resp_mode_sense()
2767 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + in resp_mode_sense()
2768 (devip->target * 1000) - 3; in resp_mode_sense()
2771 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ in resp_mode_sense()
2811 case 0x1: /* Read-Write error recovery page, direct access */ in resp_mode_sense()
2817 case 0x2: /* Disconnect-Reconnect page, all devices */ in resp_mode_sense()
2906 arr[0] = offset - 1; in resp_mode_sense()
2908 put_unaligned_be16((offset - 2), arr + 0); in resp_mode_sense()
2916 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_mode_sense()
2928 unsigned char *cmd = scp->cmnd; in resp_mode_select()
2936 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); in resp_mode_select()
2940 if (-1 == res) in resp_mode_select()
2943 sdev_printk(KERN_INFO, scp->device, in resp_mode_select()
2950 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); in resp_mode_select()
2971 sizeof(caching_pg) - 2); in resp_mode_select()
2978 sizeof(ctrl_m_pg) - 2); in resp_mode_select()
2990 sizeof(iec_m_pg) - 2); in resp_mode_select()
3000 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); in resp_mode_select()
3047 unsigned char *cmd = scp->cmnd; in resp_log_sense()
3067 arr[3] = n - 4; in resp_log_sense()
3099 arr[3] = n - 4; in resp_log_sense()
3109 arr[3] = n - 4; in resp_log_sense()
3117 arr[3] = n - 4; in resp_log_sense()
3133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); in resp_log_sense()
3143 return devip->nr_zones != 0; in sdebug_dev_is_zoned()
3149 u32 zno = lba >> devip->zsize_shift; in zbc_zone()
3152 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones) in zbc_zone()
3153 return &devip->zstate[zno]; in zbc_zone()
3159 zno = 2 * zno - devip->nr_conv_zones; in zbc_zone()
3160 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones); in zbc_zone()
3161 zsp = &devip->zstate[zno]; in zbc_zone()
3162 if (lba >= zsp->z_start + zsp->z_size) in zbc_zone()
3164 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size); in zbc_zone()
3170 return zsp->z_type == ZBC_ZTYPE_CNV; in zbc_zone_is_conv()
3175 return zsp->z_type == ZBC_ZTYPE_GAP; in zbc_zone_is_gap()
3191 zc = zsp->z_cond; in zbc_close_zone()
3196 devip->nr_imp_open--; in zbc_close_zone()
3198 devip->nr_exp_open--; in zbc_close_zone()
3200 if (zsp->z_wp == zsp->z_start) { in zbc_close_zone()
3201 zsp->z_cond = ZC1_EMPTY; in zbc_close_zone()
3203 zsp->z_cond = ZC4_CLOSED; in zbc_close_zone()
3204 devip->nr_closed++; in zbc_close_zone()
3210 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_close_imp_open_zone()
3213 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_close_imp_open_zone()
3214 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) { in zbc_close_imp_open_zone()
3229 zc = zsp->z_cond; in zbc_open_zone()
3235 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN) in zbc_open_zone()
3237 else if (devip->max_open && in zbc_open_zone()
3238 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open) in zbc_open_zone()
3241 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_zone()
3242 devip->nr_closed--; in zbc_open_zone()
3244 zsp->z_cond = ZC3_EXPLICIT_OPEN; in zbc_open_zone()
3245 devip->nr_exp_open++; in zbc_open_zone()
3247 zsp->z_cond = ZC2_IMPLICIT_OPEN; in zbc_open_zone()
3248 devip->nr_imp_open++; in zbc_open_zone()
3255 switch (zsp->z_cond) { in zbc_set_zone_full()
3257 devip->nr_imp_open--; in zbc_set_zone_full()
3260 devip->nr_exp_open--; in zbc_set_zone_full()
3264 zsp->z_start, zsp->z_cond); in zbc_set_zone_full()
3267 zsp->z_cond = ZC5_FULL; in zbc_set_zone_full()
3274 unsigned long long n, end, zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3279 if (zsp->z_type == ZBC_ZTYPE_SWR) { in zbc_inc_wp()
3280 zsp->z_wp += num; in zbc_inc_wp()
3281 if (zsp->z_wp >= zend) in zbc_inc_wp()
3287 if (lba != zsp->z_wp) in zbc_inc_wp()
3288 zsp->z_non_seq_resource = true; in zbc_inc_wp()
3292 n = zend - lba; in zbc_inc_wp()
3293 zsp->z_wp = zend; in zbc_inc_wp()
3294 } else if (end > zsp->z_wp) { in zbc_inc_wp()
3296 zsp->z_wp = end; in zbc_inc_wp()
3300 if (zsp->z_wp >= zend) in zbc_inc_wp()
3303 num -= n; in zbc_inc_wp()
3307 zend = zsp->z_start + zsp->z_size; in zbc_inc_wp()
3315 struct scsi_device *sdp = scp->device; in check_zbc_access_params()
3316 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_zbc_access_params()
3318 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1); in check_zbc_access_params()
3321 /* For host-managed, reads cannot cross zone types boundaries */ in check_zbc_access_params()
3322 if (zsp->z_type != zsp_end->z_type) { in check_zbc_access_params()
3349 if (zsp->z_type == ZBC_ZTYPE_SWR) { in check_zbc_access_params()
3358 if (zsp->z_cond == ZC5_FULL) { in check_zbc_access_params()
3364 if (lba != zsp->z_wp) { in check_zbc_access_params()
3373 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) { in check_zbc_access_params()
3374 if (devip->max_open && in check_zbc_access_params()
3375 devip->nr_exp_open >= devip->max_open) { in check_zbc_access_params()
3391 struct scsi_device *sdp = scp->device; in check_device_access_params()
3392 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in check_device_access_params()
3398 /* transfer length excessive (tie in to block limits VPD page) */ in check_device_access_params()
3427 return xa_load(per_store_ap, devip->sdbg_host->si_idx); in devip2sip()
3471 sdeb_read_lock(&sip->macc_data_lck); in sdeb_data_read_lock()
3479 sdeb_read_unlock(&sip->macc_data_lck); in sdeb_data_read_unlock()
3487 sdeb_write_lock(&sip->macc_data_lck); in sdeb_data_write_lock()
3495 sdeb_write_unlock(&sip->macc_data_lck); in sdeb_data_write_unlock()
3503 sdeb_read_lock(&sip->macc_sector_lck); in sdeb_data_sector_read_lock()
3511 sdeb_read_unlock(&sip->macc_sector_lck); in sdeb_data_sector_read_unlock()
3519 sdeb_write_lock(&sip->macc_sector_lck); in sdeb_data_sector_write_lock()
3527 sdeb_write_unlock(&sip->macc_sector_lck); in sdeb_data_sector_write_unlock()
3532 * We simplify the atomic model to allow only 1x atomic write and many non-
3538 * So use a RW lock for per-device read and write locking:
3539 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
3585 __acquire(&sip->macc_meta_lck); in sdeb_meta_read_lock()
3590 read_lock(&sip->macc_meta_lck); in sdeb_meta_read_lock()
3601 __release(&sip->macc_meta_lck); in sdeb_meta_read_unlock()
3606 read_unlock(&sip->macc_meta_lck); in sdeb_meta_read_unlock()
3617 __acquire(&sip->macc_meta_lck); in sdeb_meta_write_lock()
3622 write_lock(&sip->macc_meta_lck); in sdeb_meta_write_lock()
3633 __release(&sip->macc_meta_lck); in sdeb_meta_write_unlock()
3638 write_unlock(&sip->macc_meta_lck); in sdeb_meta_write_unlock()
3644 /* Returns number of bytes copied or -1 if error. */
3650 u64 block; in do_device_access() local
3652 struct scsi_data_buffer *sdb = &scp->sdb; in do_device_access()
3661 return -1; in do_device_access()
3670 if (!sdb->length || !sip) in do_device_access()
3672 if (scp->sc_data_direction != dir) in do_device_access()
3673 return -1; in do_device_access()
3678 fsp = sip->storep; in do_device_access()
3680 block = do_div(lba, sdebug_store_sectors); in do_device_access()
3682 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */ in do_device_access()
3687 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, in do_device_access()
3688 fsp + (block * sdebug_sector_size), in do_device_access()
3695 if (++block >= sdebug_store_sectors) in do_device_access()
3696 block = 0; in do_device_access()
3703 /* Returns number of bytes copied or -1 if error. */
3706 struct scsi_data_buffer *sdb = &scp->sdb; in do_dout_fetch()
3708 if (!sdb->length) in do_dout_fetch()
3710 if (scp->sc_data_direction != DMA_TO_DEVICE) in do_dout_fetch()
3711 return -1; in do_dout_fetch()
3712 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp, in do_dout_fetch()
3716 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3717 * arr into sip->storep+lba and return true. If comparison fails then
3723 u64 block, rest = 0; in comp_write_worker() local
3726 u8 *fsp = sip->storep; in comp_write_worker()
3728 block = do_div(lba, store_blks); in comp_write_worker()
3729 if (block + num > store_blks) in comp_write_worker()
3730 rest = block + num - store_blks; in comp_write_worker()
3732 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
3736 res = memcmp(fsp, arr + ((num - rest) * lb_size), in comp_write_worker()
3743 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size); in comp_write_worker()
3745 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size); in comp_write_worker()
3766 if (sdt->guard_tag != csum) { in dif_verify()
3769 be16_to_cpu(sdt->guard_tag), in dif_verify()
3774 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in dif_verify()
3780 be32_to_cpu(sdt->ref_tag) != ei_lba) { in dif_verify()
3794 scp->device->hostdata, true); in dif_copy_prot()
3795 struct t10_pi_tuple *dif_storep = sip->dif_storep; in dif_copy_prot()
3812 rest = start + len - dif_store_end; in dif_copy_prot()
3817 memcpy(paddr, start, len - rest); in dif_copy_prot()
3819 memcpy(start, paddr, len - rest); in dif_copy_prot()
3823 memcpy(paddr + len - rest, dif_storep, rest); in dif_copy_prot()
3825 memcpy(dif_storep, paddr + len - rest, rest); in dif_copy_prot()
3829 resid -= len; in dif_copy_prot()
3841 scp->device->hostdata, true); in prot_verify_read()
3848 if (sdt->app_tag == cpu_to_be16(0xffff)) in prot_verify_read()
3858 if (scp->cmnd[1] >> 5) { /* RDPROTECT */ in prot_verify_read()
3882 u8 *cmd = scp->cmnd; in resp_read_dt0()
3933 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " in resp_read_dt0()
3950 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && in resp_read_dt0()
3955 if (0x70 == (scp->sense_buffer[0] & 0x7f)) { in resp_read_dt0()
3956 scp->sense_buffer[0] |= 0x80; /* Valid bit */ in resp_read_dt0()
3959 put_unaligned_be32(ret, scp->sense_buffer + 3); in resp_read_dt0()
3979 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_read_dt0()
3990 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_read_dt0()
4002 if (unlikely(ret == -1)) in resp_read_dt0()
4005 scsi_set_resid(scp, scsi_bufflen(scp) - ret); in resp_read_dt0()
4014 /* Logical block guard check failed */ in resp_read_dt0()
4072 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ in prot_verify_write()
4102 lba += sdebug_unmap_granularity - sdebug_unmap_alignment; in lba_to_map_index()
4112 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; in map_index_to_lba()
4125 mapped = test_bit(index, sip->map_storep); in map_state()
4128 next = find_next_zero_bit(sip->map_storep, map_size, index); in map_state()
4130 next = find_next_bit(sip->map_storep, map_size, index); in map_state()
4133 *num = end - lba; in map_state()
4146 set_bit(index, sip->map_storep); in map_region()
4156 u8 *fsp = sip->storep; in unmap_region()
4164 clear_bit(index, sip->map_storep); in unmap_region()
4171 if (sip->dif_storep) { in unmap_region()
4172 memset(sip->dif_storep + lba, 0xff, in unmap_region()
4173 sizeof(*sip->dif_storep) * in unmap_region()
4190 u8 *cmd = scp->cmnd; in resp_write_dt0()
4246 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_write_dt0()
4268 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { in resp_write_dt0()
4272 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
4279 if (scp->prot_flags & SCSI_PROT_REF_CHECK) { in resp_write_dt0()
4283 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ in resp_write_dt0()
4302 if (unlikely(-1 == ret)) in resp_write_dt0()
4306 sdev_printk(KERN_INFO, scp->device, in resp_write_dt0()
4317 /* Logical block guard check failed */ in resp_write_dt0()
4337 u8 *cmd = scp->cmnd; in resp_write_scat()
4375 sdev_printk(KERN_ERR, scp->device, in resp_write_scat()
4380 return 0; /* T10 says these do-nothings are not errors */ in resp_write_scat()
4383 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4392 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4402 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4406 if (res == -1) { in resp_write_scat()
4420 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4433 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4456 * Write ranges atomically to keep as close to pre-atomic in resp_write_scat()
4465 if (unlikely(-1 == ret)) { in resp_write_scat()
4469 sdev_printk(KERN_INFO, scp->device, in resp_write_scat()
4481 /* Logical block guard check failed */ in resp_write_scat()
4507 struct scsi_device *sdp = scp->device; in resp_write_same()
4508 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in resp_write_same()
4510 u64 block, lbaa; in resp_write_same() local
4514 scp->device->hostdata, true); in resp_write_same()
4533 block = do_div(lbaa, sdebug_store_sectors); in resp_write_same()
4534 /* if ndob then zero 1 logical block, else fetch 1 logical block */ in resp_write_same()
4535 fsp = sip->storep; in resp_write_same()
4536 fs1p = fsp + (block * lb_size); in resp_write_same()
4544 if (-1 == ret) { in resp_write_same()
4548 sdev_printk(KERN_INFO, scp->device, in resp_write_same()
4555 block = do_div(lbaa, sdebug_store_sectors); in resp_write_same()
4556 memmove(fsp + (block * lb_size), fs1p, lb_size); in resp_write_same()
4574 u8 *cmd = scp->cmnd; in resp_write_same_10()
4590 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_write_same_10()
4599 u8 *cmd = scp->cmnd; in resp_write_same_16()
4613 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ in resp_write_same_16()
4618 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); in resp_write_same_16()
4625 * field. For the Report supported operation codes command, SPC-4 suggests
4630 u8 *cmd = scp->cmnd; in resp_write_buffer()
4631 struct scsi_device *sdp = scp->device; in resp_write_buffer()
4639 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in resp_write_buffer()
4640 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); in resp_write_buffer()
4643 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); in resp_write_buffer()
4648 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
4650 if (dp->target == sdp->id) { in resp_write_buffer()
4651 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); in resp_write_buffer()
4654 dp->uas_bm); in resp_write_buffer()
4660 &devip->sdbg_host->dev_info_list, in resp_write_buffer()
4662 if (dp->target == sdp->id) in resp_write_buffer()
4664 dp->uas_bm); in resp_write_buffer()
4676 u8 *cmd = scp->cmnd; in resp_comp_write()
4698 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " in resp_comp_write()
4712 if (ret == -1) { in resp_comp_write()
4716 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " in resp_comp_write()
4728 /* Cover sip->map_storep (which map_region()) sets with data lock */ in resp_comp_write()
4755 payload_len = get_unaligned_be16(scp->cmnd + 7); in resp_unmap()
4758 descriptors = (payload_len - 8) / 16; in resp_unmap()
4760 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); in resp_unmap()
4773 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); in resp_unmap()
4805 u8 *cmd = scp->cmnd; in resp_get_lba_status()
4829 if (sdebug_capacity - lba <= 0xffffffff) in resp_get_lba_status()
4830 num = sdebug_capacity - lba; in resp_get_lba_status()
4848 const u8 *cmd = scp->cmnd; in resp_get_stream_status()
4857 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); in resp_get_stream_status()
4862 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); in resp_get_stream_status()
4868 * about open streams. Treat the non-permanent stream as open. in resp_get_stream_status()
4871 &h->number_of_open_streams); in resp_get_stream_status()
4879 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT; in resp_get_stream_status()
4881 &stream_status->stream_identifier); in resp_get_stream_status()
4882 stream_status->rel_lifetime = stream_id + 1; in resp_get_stream_status()
4884 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */ in resp_get_stream_status()
4895 u8 *cmd = scp->cmnd; in resp_sync_cache()
4916 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4927 u64 block, rest = 0; in resp_pre_fetch() local
4929 u8 *cmd = scp->cmnd; in resp_pre_fetch()
4931 u8 *fsp = sip->storep; in resp_pre_fetch()
4936 } else { /* PRE-FETCH(16) */ in resp_pre_fetch()
4946 /* PRE-FETCH spec says nothing about LBP or PI so skip them */ in resp_pre_fetch()
4947 block = do_div(lba, sdebug_store_sectors); in resp_pre_fetch()
4948 if (block + nblks > sdebug_store_sectors) in resp_pre_fetch()
4949 rest = block + nblks - sdebug_store_sectors; in resp_pre_fetch()
4951 /* Try to bring the PRE-FETCH range into CPU's cache */ in resp_pre_fetch()
4953 prefetch_range(fsp + (sdebug_sector_size * block), in resp_pre_fetch()
4954 (nblks - rest) * sdebug_sector_size); in resp_pre_fetch()
4968 * (W-LUN), the normal Linux scanning logic does not associate it with a
4970 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4972 * the above will associate a W-LUN to each target. To only get a W-LUN
4973 * for target 2, then use "echo '- 2 49409' > scan" .
4978 unsigned char *cmd = scp->cmnd; in resp_report_luns()
4985 unsigned int wlun_cnt; /* report luns W-LUN count */ in resp_report_luns()
4999 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); in resp_report_luns()
5004 case 0: /* all LUNs apart from W-LUNs */ in resp_report_luns()
5008 case 1: /* only W-LUNs */ in resp_report_luns()
5017 case 0x11: /* see SPC-5 */ in resp_report_luns()
5021 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); in resp_report_luns()
5026 --lun_cnt; in resp_report_luns()
5049 lun_p->scsi_lun[0] |= 0x40; in resp_report_luns()
5077 u8 *cmd = scp->cmnd; in resp_verify()
5087 is_bytchk3 = true; /* 1 block sent, compared repeatedly */ in resp_verify()
5120 if (ret == -1) { in resp_verify()
5124 sdev_printk(KERN_INFO, scp->device, in resp_verify()
5156 u8 *cmd = scp->cmnd; in resp_report_zones()
5176 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); in resp_report_zones()
5189 lba = zsp->z_start + zsp->z_size) { in resp_report_zones()
5199 if (zsp->z_cond != ZC1_EMPTY) in resp_report_zones()
5204 if (zsp->z_cond != ZC2_IMPLICIT_OPEN) in resp_report_zones()
5209 if (zsp->z_cond != ZC3_EXPLICIT_OPEN) in resp_report_zones()
5214 if (zsp->z_cond != ZC4_CLOSED) in resp_report_zones()
5219 if (zsp->z_cond != ZC5_FULL) in resp_report_zones()
5226 * Read-only, offline, reset WP recommended are in resp_report_zones()
5231 /* non-seq-resource set */ in resp_report_zones()
5232 if (!zsp->z_non_seq_resource) in resp_report_zones()
5254 desc[0] = zsp->z_type; in resp_report_zones()
5255 desc[1] = zsp->z_cond << 4; in resp_report_zones()
5256 if (zsp->z_non_seq_resource) in resp_report_zones()
5258 put_unaligned_be64((u64)zsp->z_size, desc + 8); in resp_report_zones()
5259 put_unaligned_be64((u64)zsp->z_start, desc + 16); in resp_report_zones()
5260 put_unaligned_be64((u64)zsp->z_wp, desc + 24); in resp_report_zones()
5274 put_unaligned_be64(sdebug_capacity - 1, arr + 8); in resp_report_zones()
5276 if (devip->zcap < devip->zsize) in resp_report_zones()
5277 put_unaligned_be64(devip->zsize, arr + 16); in resp_report_zones()
5279 rep_len = (unsigned long)desc - (unsigned long)arr; in resp_report_zones()
5292 u8 *cmd = scp->cmnd; in resp_atomic_write()
5324 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
5329 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
5334 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); in resp_atomic_write()
5340 if (unlikely(ret == -1)) in resp_atomic_write()
5347 /* Logic transplanted from tcmu-runner, file_zbc.c */
5350 struct sdeb_zone_state *zsp = &devip->zstate[0]; in zbc_open_all()
5353 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_open_all()
5354 if (zsp->z_cond == ZC4_CLOSED) in zbc_open_all()
5355 zbc_open_zone(devip, &devip->zstate[i], true); in zbc_open_all()
5364 u8 *cmd = scp->cmnd; in resp_open_zone()
5377 if (devip->max_open && in resp_open_zone()
5378 devip->nr_exp_open + devip->nr_closed > devip->max_open) { in resp_open_zone()
5398 if (z_id != zsp->z_start) { in resp_open_zone()
5409 zc = zsp->z_cond; in resp_open_zone()
5413 if (devip->max_open && devip->nr_exp_open >= devip->max_open) { in resp_open_zone()
5430 for (i = 0; i < devip->nr_zones; i++) in zbc_close_all()
5431 zbc_close_zone(devip, &devip->zstate[i]); in zbc_close_all()
5439 u8 *cmd = scp->cmnd; in resp_close_zone()
5465 if (z_id != zsp->z_start) { in resp_close_zone()
5485 enum sdebug_z_cond zc = zsp->z_cond; in zbc_finish_zone()
5491 if (zsp->z_cond == ZC4_CLOSED) in zbc_finish_zone()
5492 devip->nr_closed--; in zbc_finish_zone()
5493 zsp->z_wp = zsp->z_start + zsp->z_size; in zbc_finish_zone()
5494 zsp->z_cond = ZC5_FULL; in zbc_finish_zone()
5502 for (i = 0; i < devip->nr_zones; i++) in zbc_finish_all()
5503 zbc_finish_zone(devip, &devip->zstate[i], false); in zbc_finish_all()
5512 u8 *cmd = scp->cmnd; in resp_finish_zone()
5537 if (z_id != zsp->z_start) { in resp_finish_zone()
5563 zc = zsp->z_cond; in zbc_rwp_zone()
5567 if (zsp->z_cond == ZC4_CLOSED) in zbc_rwp_zone()
5568 devip->nr_closed--; in zbc_rwp_zone()
5570 if (zsp->z_wp > zsp->z_start) in zbc_rwp_zone()
5571 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, in zbc_rwp_zone()
5572 (zsp->z_wp - zsp->z_start) * sdebug_sector_size); in zbc_rwp_zone()
5574 zsp->z_non_seq_resource = false; in zbc_rwp_zone()
5575 zsp->z_wp = zsp->z_start; in zbc_rwp_zone()
5576 zsp->z_cond = ZC1_EMPTY; in zbc_rwp_zone()
5583 for (i = 0; i < devip->nr_zones; i++) in zbc_rwp_all()
5584 zbc_rwp_zone(devip, &devip->zstate[i]); in zbc_rwp_all()
5592 u8 *cmd = scp->cmnd; in resp_rwp_zone()
5616 if (z_id != zsp->z_start) { in resp_rwp_zone()
5643 struct scsi_cmnd *scp = sqcp->scmd; in sdebug_q_cmd_complete()
5649 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_q_cmd_complete()
5659 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_q_cmd_complete()
5660 aborted = sd_dp->aborted; in sdebug_q_cmd_complete()
5662 sd_dp->aborted = false; in sdebug_q_cmd_complete()
5665 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_q_cmd_complete()
5668 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n"); in sdebug_q_cmd_complete()
5713 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M) in sdebug_device_create_zones()
5715 while (capacity < devip->zsize << 2 && devip->zsize >= 2) in sdebug_device_create_zones()
5716 devip->zsize >>= 1; in sdebug_device_create_zones()
5717 if (devip->zsize < 2) { in sdebug_device_create_zones()
5719 return -EINVAL; in sdebug_device_create_zones()
5724 return -EINVAL; in sdebug_device_create_zones()
5726 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M) in sdebug_device_create_zones()
5728 if (devip->zsize >= capacity) { in sdebug_device_create_zones()
5730 return -EINVAL; in sdebug_device_create_zones()
5734 devip->zsize_shift = ilog2(devip->zsize); in sdebug_device_create_zones()
5735 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift; in sdebug_device_create_zones()
5738 devip->zcap = devip->zsize; in sdebug_device_create_zones()
5740 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >> in sdebug_device_create_zones()
5742 if (devip->zcap > devip->zsize) { in sdebug_device_create_zones()
5744 return -EINVAL; in sdebug_device_create_zones()
5748 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift; in sdebug_device_create_zones()
5751 return -EINVAL; in sdebug_device_create_zones()
5753 devip->nr_conv_zones = sdeb_zbc_nr_conv; in sdebug_device_create_zones()
5754 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >> in sdebug_device_create_zones()
5755 devip->zsize_shift; in sdebug_device_create_zones()
5756 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones; in sdebug_device_create_zones()
5759 if (devip->zcap < devip->zsize) in sdebug_device_create_zones()
5760 devip->nr_zones += devip->nr_seq_zones; in sdebug_device_create_zones()
5762 if (devip->zoned) { in sdebug_device_create_zones()
5764 if (sdeb_zbc_max_open >= devip->nr_zones - 1) in sdebug_device_create_zones()
5765 devip->max_open = (devip->nr_zones - 1) / 2; in sdebug_device_create_zones()
5767 devip->max_open = sdeb_zbc_max_open; in sdebug_device_create_zones()
5770 devip->zstate = kcalloc(devip->nr_zones, in sdebug_device_create_zones()
5772 if (!devip->zstate) in sdebug_device_create_zones()
5773 return -ENOMEM; in sdebug_device_create_zones()
5775 for (i = 0; i < devip->nr_zones; i++) { in sdebug_device_create_zones()
5776 zsp = &devip->zstate[i]; in sdebug_device_create_zones()
5778 zsp->z_start = zstart; in sdebug_device_create_zones()
5780 if (i < devip->nr_conv_zones) { in sdebug_device_create_zones()
5781 zsp->z_type = ZBC_ZTYPE_CNV; in sdebug_device_create_zones()
5782 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
5783 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
5784 zsp->z_size = in sdebug_device_create_zones()
5785 min_t(u64, devip->zsize, capacity - zstart); in sdebug_device_create_zones()
5786 } else if ((zstart & (devip->zsize - 1)) == 0) { in sdebug_device_create_zones()
5787 if (devip->zoned) in sdebug_device_create_zones()
5788 zsp->z_type = ZBC_ZTYPE_SWR; in sdebug_device_create_zones()
5790 zsp->z_type = ZBC_ZTYPE_SWP; in sdebug_device_create_zones()
5791 zsp->z_cond = ZC1_EMPTY; in sdebug_device_create_zones()
5792 zsp->z_wp = zsp->z_start; in sdebug_device_create_zones()
5793 zsp->z_size = in sdebug_device_create_zones()
5794 min_t(u64, devip->zcap, capacity - zstart); in sdebug_device_create_zones()
5796 zsp->z_type = ZBC_ZTYPE_GAP; in sdebug_device_create_zones()
5797 zsp->z_cond = ZBC_NOT_WRITE_POINTER; in sdebug_device_create_zones()
5798 zsp->z_wp = (sector_t)-1; in sdebug_device_create_zones()
5799 zsp->z_size = min_t(u64, devip->zsize - devip->zcap, in sdebug_device_create_zones()
5800 capacity - zstart); in sdebug_device_create_zones()
5803 WARN_ON_ONCE((int)zsp->z_size <= 0); in sdebug_device_create_zones()
5804 zstart += zsp->z_size; in sdebug_device_create_zones()
5818 uuid_gen(&devip->lu_name); in sdebug_device_create()
5821 devip->lu_name = shared_uuid; in sdebug_device_create()
5825 devip->lu_name = shared_uuid; in sdebug_device_create()
5828 devip->sdbg_host = sdbg_host; in sdebug_device_create()
5830 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM; in sdebug_device_create()
5836 devip->zoned = false; in sdebug_device_create()
5838 devip->create_ts = ktime_get_boottime(); in sdebug_device_create()
5839 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); in sdebug_device_create()
5840 spin_lock_init(&devip->list_lock); in sdebug_device_create()
5841 INIT_LIST_HEAD(&devip->inject_err_list); in sdebug_device_create()
5842 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); in sdebug_device_create()
5853 sdbg_host = shost_to_sdebug_host(sdev->host); in find_build_dev_info()
5855 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in find_build_dev_info()
5856 if ((devip->used) && (devip->channel == sdev->channel) && in find_build_dev_info()
5857 (devip->target == sdev->id) && in find_build_dev_info()
5858 (devip->lun == sdev->lun)) in find_build_dev_info()
5861 if ((!devip->used) && (!open_devip)) in find_build_dev_info()
5873 open_devip->channel = sdev->channel; in find_build_dev_info()
5874 open_devip->target = sdev->id; in find_build_dev_info()
5875 open_devip->lun = sdev->lun; in find_build_dev_info()
5876 open_devip->sdbg_host = sdbg_host; in find_build_dev_info()
5877 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm); in find_build_dev_info()
5878 open_devip->used = true; in find_build_dev_info()
5886 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_alloc()
5894 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_slave_configure()
5899 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_configure()
5900 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) in scsi_debug_slave_configure()
5901 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; in scsi_debug_slave_configure()
5907 sdp->hostdata = devip; in scsi_debug_slave_configure()
5909 sdp->no_uld_attach = 1; in scsi_debug_slave_configure()
5913 sdp->allow_restart = 1; in scsi_debug_slave_configure()
5915 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev), in scsi_debug_slave_configure()
5917 if (IS_ERR_OR_NULL(devip->debugfs_entry)) in scsi_debug_slave_configure()
5919 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_slave_configure()
5921 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp, in scsi_debug_slave_configure()
5925 __func__, dev_name(&sdp->sdev_gendev)); in scsi_debug_slave_configure()
5933 (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_slave_destroy()
5938 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); in scsi_debug_slave_destroy()
5943 spin_lock(&devip->list_lock); in scsi_debug_slave_destroy()
5944 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in scsi_debug_slave_destroy()
5945 list_del_rcu(&err->list); in scsi_debug_slave_destroy()
5946 call_rcu(&err->rcu, sdebug_err_free); in scsi_debug_slave_destroy()
5948 spin_unlock(&devip->list_lock); in scsi_debug_slave_destroy()
5950 debugfs_remove(devip->debugfs_entry); in scsi_debug_slave_destroy()
5952 /* make this slot available for re-use */ in scsi_debug_slave_destroy()
5953 devip->used = false; in scsi_debug_slave_destroy()
5954 sdp->hostdata = NULL; in scsi_debug_slave_destroy()
5962 int res = hrtimer_try_to_cancel(&sd_dp->hrt); in stop_qc_helper()
5966 case -1: /* -1 It's executing the CB */ in stop_qc_helper()
5974 if (cancel_work_sync(&sd_dp->ew.work)) in stop_qc_helper()
5993 lockdep_assert_held(&sdsc->lock); in scsi_debug_stop_cmnd()
5997 sd_dp = &sqcp->sd_dp; in scsi_debug_stop_cmnd()
5998 l_defer_t = READ_ONCE(sd_dp->defer_t); in scsi_debug_stop_cmnd()
6008 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6016 spin_lock_irqsave(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
6018 spin_unlock_irqrestore(&sdsc->lock, flags); in scsi_debug_abort_cmnd()
6041 struct Scsi_Host *shost = sdhp->shost; in stop_all_queued()
6043 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL); in stop_all_queued()
6050 struct scsi_device *sdp = cmnd->device; in sdebug_fail_abort()
6051 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_abort()
6053 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_abort()
6060 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_abort()
6061 if (err->type == ERR_ABORT_CMD_FAILED && in sdebug_fail_abort()
6062 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_abort()
6063 ret = !!err->cnt; in sdebug_fail_abort()
6064 if (err->cnt < 0) in sdebug_fail_abort()
6065 err->cnt++; in sdebug_fail_abort()
6079 u8 *cmd = SCpnt->cmnd; in scsi_debug_abort()
6085 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_abort()
6103 if (scmd->device == sdp) in scsi_debug_stop_all_queued_iter()
6112 struct Scsi_Host *shost = sdp->host; in scsi_debug_stop_all_queued()
6114 blk_mq_tagset_busy_iter(&shost->tag_set, in scsi_debug_stop_all_queued()
6120 struct scsi_device *sdp = cmnd->device; in sdebug_fail_lun_reset()
6121 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_lun_reset()
6123 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_lun_reset()
6130 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_lun_reset()
6131 if (err->type == ERR_LUN_RESET_FAILED && in sdebug_fail_lun_reset()
6132 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_lun_reset()
6133 ret = !!err->cnt; in sdebug_fail_lun_reset()
6134 if (err->cnt < 0) in sdebug_fail_lun_reset()
6135 err->cnt++; in sdebug_fail_lun_reset()
6148 struct scsi_device *sdp = SCpnt->device; in scsi_debug_device_reset()
6149 struct sdebug_dev_info *devip = sdp->hostdata; in scsi_debug_device_reset()
6150 u8 *cmd = SCpnt->cmnd; in scsi_debug_device_reset()
6160 set_bit(SDEBUG_UA_POR, devip->uas_bm); in scsi_debug_device_reset()
6172 struct scsi_target *starget = scsi_target(cmnd->device); in sdebug_fail_target_reset()
6174 (struct sdebug_target_info *)starget->hostdata; in sdebug_fail_target_reset()
6177 return targetip->reset_fail; in sdebug_fail_target_reset()
6184 struct scsi_device *sdp = SCpnt->device; in scsi_debug_target_reset()
6185 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_target_reset()
6187 u8 *cmd = SCpnt->cmnd; in scsi_debug_target_reset()
6195 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_target_reset()
6196 if (devip->target == sdp->id) { in scsi_debug_target_reset()
6197 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_target_reset()
6217 struct scsi_device *sdp = SCpnt->device; in scsi_debug_bus_reset()
6218 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host); in scsi_debug_bus_reset()
6227 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { in scsi_debug_bus_reset()
6228 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_bus_reset()
6246 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); in scsi_debug_host_reset()
6249 list_for_each_entry(devip, &sdbg_host->dev_info_list, in scsi_debug_host_reset()
6251 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); in scsi_debug_host_reset()
6258 sdev_printk(KERN_INFO, SCpnt->device, in scsi_debug_host_reset()
6278 sectors_per_part = (num_sectors - sdebug_sectors_per) in sdebug_build_parts()
6286 if (starts[k] - starts[k - 1] < max_part_secs) in sdebug_build_parts()
6287 max_part_secs = starts[k] - starts[k - 1]; in sdebug_build_parts()
6297 end_sec = starts[k] + max_part_secs - 1; in sdebug_build_parts()
6298 pp->boot_ind = 0; in sdebug_build_parts()
6300 pp->cyl = start_sec / heads_by_sects; in sdebug_build_parts()
6301 pp->head = (start_sec - (pp->cyl * heads_by_sects)) in sdebug_build_parts()
6303 pp->sector = (start_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
6305 pp->end_cyl = end_sec / heads_by_sects; in sdebug_build_parts()
6306 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) in sdebug_build_parts()
6308 pp->end_sector = (end_sec % sdebug_sectors_per) + 1; in sdebug_build_parts()
6310 pp->start_sect = cpu_to_le32(start_sec); in sdebug_build_parts()
6311 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); in sdebug_build_parts()
6312 pp->sys_ind = 0x83; /* plain Linux partition */ in sdebug_build_parts()
6316 static void block_unblock_all_queues(bool block) in block_unblock_all_queues() argument
6323 struct Scsi_Host *shost = sdhp->shost; in block_unblock_all_queues()
6325 if (block) in block_unblock_all_queues()
6332 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6384 sd_dp = &sqcp->sd_dp; in sdebug_alloc_queued_cmd()
6386 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in sdebug_alloc_queued_cmd()
6387 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; in sdebug_alloc_queued_cmd()
6388 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); in sdebug_alloc_queued_cmd()
6390 sqcp->scmd = scmd; in sdebug_alloc_queued_cmd()
6407 bool polled = rq->cmd_flags & REQ_POLLED; in schedule_resp()
6420 sdp = cmnd->device; in schedule_resp()
6429 int qdepth = cmnd->device->queue_depth; in schedule_resp()
6448 sd_dp = &sqcp->sd_dp; in schedule_resp()
6454 cmnd->result = pfp ? pfp(cmnd, devip) : 0; in schedule_resp()
6455 if (cmnd->result & SDEG_RES_IMMED_MASK) { in schedule_resp()
6456 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
6459 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
6460 cmnd->result = scsi_result; in schedule_resp()
6461 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) { in schedule_resp()
6465 cmnd->result = check_condition_result; in schedule_resp()
6469 if (unlikely(sdebug_verbose && cmnd->result)) in schedule_resp()
6470 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", in schedule_resp()
6471 __func__, cmnd->result); in schedule_resp()
6492 u64 d = ktime_get_boottime_ns() - ns_from_boot; in schedule_resp()
6501 kt -= d; in schedule_resp()
6505 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
6507 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6508 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); in schedule_resp()
6510 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
6511 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6514 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6516 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); in schedule_resp()
6517 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); in schedule_resp()
6519 * The completion handler will try to grab sqcp->lock, in schedule_resp()
6524 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6529 sd_dp->aborted = true; in schedule_resp()
6536 sd_dp->issuing_cpu = raw_smp_processor_id(); in schedule_resp()
6538 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6540 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); in schedule_resp()
6541 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); in schedule_resp()
6542 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6544 spin_lock_irqsave(&sdsc->lock, flags); in schedule_resp()
6546 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); in schedule_resp()
6547 schedule_work(&sd_dp->ew.work); in schedule_resp()
6548 spin_unlock_irqrestore(&sdsc->lock, flags); in schedule_resp()
6554 respond_in_thread: /* call back to mid-layer using invocation thread */ in schedule_resp()
6555 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; in schedule_resp()
6556 cmnd->result &= ~SDEG_RES_IMMED_MASK; in schedule_resp()
6557 if (cmnd->result == 0 && scsi_result != 0) in schedule_resp()
6558 cmnd->result = scsi_result; in schedule_resp()
6567 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6656 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6658 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6660 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6678 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6683 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6684 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6691 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6693 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6694 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6698 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6699 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6702 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6714 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6715 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6716 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6719 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6735 if (k >= (SDEBUG_INFO_LEN - 1)) in scsi_debug_info()
6737 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, in scsi_debug_info()
6753 return -EACCES; in scsi_debug_write_info()
6757 return -EINVAL; in scsi_debug_write_info()
6778 int queue_num = data->queue_num; in sdebug_submit_queue_iter()
6784 if (*data->first == -1) in sdebug_submit_queue_iter()
6785 *data->first = *data->last = tag; in sdebug_submit_queue_iter()
6787 *data->last = tag; in sdebug_submit_queue_iter()
6811 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", in scsi_debug_show_info()
6827 int f = -1, l = -1; in scsi_debug_show_info()
6834 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter, in scsi_debug_show_info()
6842 seq_printf(m, "this host_no=%d\n", host->host_no); in scsi_debug_show_info()
6852 idx = sdhp->si_idx; in scsi_debug_show_info()
6854 sdhp->shost->host_no, idx); in scsi_debug_show_info()
6876 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6893 struct Scsi_Host *shost = sdhp->shost; in delay_store()
6896 res = -EBUSY; /* queued commands */ in delay_store()
6909 return -EINVAL; in delay_store()
6917 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6934 struct Scsi_Host *shost = sdhp->shost; in ndelay_store()
6937 res = -EBUSY; /* queued commands */ in ndelay_store()
6952 return -EINVAL; in ndelay_store()
6976 return -EINVAL; in opts_store()
6997 return -EINVAL; in ptype_store()
7001 return -EINVAL; in ptype_store()
7005 return -EINVAL; in ptype_store()
7022 return -EINVAL; in dsense_store()
7044 if (want_store) { /* 1 --> 0 transition, set up store */ in fake_rw_store()
7057 if (sdhp->si_idx != idx) { in fake_rw_store()
7058 xa_set_mark(per_store_ap, sdhp->si_idx, in fake_rw_store()
7060 sdhp->si_idx = idx; in fake_rw_store()
7064 } else { /* 0 --> 1 transition is trigger for shrink */ in fake_rw_store()
7070 return -EINVAL; in fake_rw_store()
7087 return -EINVAL; in no_lun_0_store()
7105 return -EINVAL; in num_tgts_store()
7126 return -EINVAL; in per_host_store_store()
7158 return -EINVAL; in every_nth_store()
7182 return -EINVAL; in lun_format_store()
7186 return -EINVAL; in lun_format_store()
7190 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */ in lun_format_store()
7196 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { in lun_format_store()
7197 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); in lun_format_store()
7204 return -EINVAL; in lun_format_store()
7221 return -EINVAL; in max_luns_store()
7226 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ in max_luns_store()
7233 list_for_each_entry(dp, &sdhp->dev_info_list, in max_luns_store()
7236 dp->uas_bm); in max_luns_store()
7243 return -EINVAL; in max_luns_store()
7267 count = -EBUSY; in max_queue_store()
7271 return -EINVAL; in max_queue_store()
7290 return -EINVAL; in no_rwlock_store()
7327 return -ENOTSUPP; in virtual_gb_store()
7340 list_for_each_entry(dp, &sdhp->dev_info_list, in virtual_gb_store()
7343 dp->uas_bm); in virtual_gb_store()
7350 return -EINVAL; in virtual_gb_store()
7370 return -EINVAL; in add_host_store()
7381 if (found) /* re-use case */ in add_host_store()
7388 } while (--delta_hosts); in add_host_store()
7411 return -EINVAL; in vpd_use_hostno_store()
7433 return -EINVAL; in statistics_store()
7478 return scnprintf(buf, PAGE_SIZE, "0-%u\n", in map_show()
7485 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", in map_show()
7486 (int)map_size, sip->map_storep); in map_show()
7506 return -EINVAL; in random_store()
7526 return -EINVAL; in removable_store()
7544 return -EINVAL; in host_lock_store()
7561 return -EINVAL; in strict_store()
7591 [BLK_ZONED_HA] = "host-aware",
7592 [BLK_ZONED_HM] = "host-managed",
7616 return -EINVAL; in sdeb_zbc_model_str()
7641 p += scnprintf(p, end - p, "%d %ld\n", i, in group_number_stats_show()
7644 return p - buf; in group_number_stats_show()
7660 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7717 int idx = -1; in scsi_debug_init()
7733 return -EINVAL; in scsi_debug_init()
7747 return -EINVAL; in scsi_debug_init()
7752 return -EINVAL; in scsi_debug_init()
7757 return -EINVAL; in scsi_debug_init()
7762 return -EINVAL; in scsi_debug_init()
7767 return -EINVAL; in scsi_debug_init()
7786 return -EINVAL; in scsi_debug_init()
7791 return -EINVAL; in scsi_debug_init()
7796 return -EINVAL; in scsi_debug_init()
7803 return -EINVAL; in scsi_debug_init()
7814 * check for host managed zoned block device specified with in scsi_debug_init()
7834 return -EINVAL; in scsi_debug_init()
7881 return -EINVAL; in scsi_debug_init()
7914 ret = -ENOMEM; in scsi_debug_init()
7927 k, -ret); in scsi_debug_init()
7934 pr_err("add_host k=%d error=%d\n", k, -ret); in scsi_debug_init()
7959 for (; k; k--) in scsi_debug_exit()
7994 vfree(sip->map_storep); in sdebug_erase_store()
7995 vfree(sip->dif_storep); in sdebug_erase_store()
7996 vfree(sip->storep); in sdebug_erase_store()
8032 return -ENOMEM; in sdebug_add_store()
8039 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); in sdebug_add_store()
8047 res = -ENOMEM; in sdebug_add_store()
8048 sip->storep = vzalloc(sz); in sdebug_add_store()
8049 if (!sip->storep) { in sdebug_add_store()
8054 sdebug_build_parts(sip->storep, sz); in sdebug_add_store()
8061 sip->dif_storep = vmalloc(dif_size); in sdebug_add_store()
8064 sip->dif_storep); in sdebug_add_store()
8066 if (!sip->dif_storep) { in sdebug_add_store()
8070 memset(sip->dif_storep, 0xff, dif_size); in sdebug_add_store()
8072 /* Logical Block Provisioning */ in sdebug_add_store()
8074 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; in sdebug_add_store()
8075 sip->map_storep = vmalloc(array_size(sizeof(long), in sdebug_add_store()
8080 if (!sip->map_storep) { in sdebug_add_store()
8085 bitmap_zero(sip->map_storep, map_size); in sdebug_add_store()
8092 rwlock_init(&sip->macc_data_lck); in sdebug_add_store()
8093 rwlock_init(&sip->macc_meta_lck); in sdebug_add_store()
8094 rwlock_init(&sip->macc_sector_lck); in sdebug_add_store()
8098 pr_warn("%s: failed, errno=%d\n", __func__, -res); in sdebug_add_store()
8105 int error = -ENOMEM; in sdebug_add_host_helper()
8111 return -ENOMEM; in sdebug_add_host_helper()
8115 sdbg_host->si_idx = idx; in sdebug_add_host_helper()
8117 INIT_LIST_HEAD(&sdbg_host->dev_info_list); in sdebug_add_host_helper()
8127 list_add_tail(&sdbg_host->host_list, &sdebug_host_list); in sdebug_add_host_helper()
8130 sdbg_host->dev.bus = &pseudo_lld_bus; in sdebug_add_host_helper()
8131 sdbg_host->dev.parent = pseudo_primary; in sdebug_add_host_helper()
8132 sdbg_host->dev.release = &sdebug_release_adapter; in sdebug_add_host_helper()
8133 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); in sdebug_add_host_helper()
8135 error = device_register(&sdbg_host->dev); in sdebug_add_host_helper()
8138 list_del(&sdbg_host->host_list); in sdebug_add_host_helper()
8147 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_add_host_helper()
8149 list_del(&sdbg_devinfo->dev_list); in sdebug_add_host_helper()
8150 kfree(sdbg_devinfo->zstate); in sdebug_add_host_helper()
8153 if (sdbg_host->dev.release) in sdebug_add_host_helper()
8154 put_device(&sdbg_host->dev); in sdebug_add_host_helper()
8157 pr_warn("%s: failed, errno=%d\n", __func__, -error); in sdebug_add_host_helper()
8175 int idx = -1; in sdebug_do_remove_host()
8183 idx = sdbg_host->si_idx; in sdebug_do_remove_host()
8191 if (idx == sdbg_host2->si_idx) { in sdebug_do_remove_host()
8199 --sdeb_most_recent_idx; in sdebug_do_remove_host()
8203 list_del(&sdbg_host->host_list); in sdebug_do_remove_host()
8209 device_unregister(&sdbg_host->dev); in sdebug_do_remove_host()
8210 --sdebug_num_hosts; in sdebug_do_remove_host()
8215 struct sdebug_dev_info *devip = sdev->hostdata; in sdebug_change_qdepth()
8218 return -ENODEV; in sdebug_change_qdepth()
8230 if (qdepth != sdev->queue_depth) in sdebug_change_qdepth()
8239 return sdev->queue_depth; in sdebug_change_qdepth()
8245 if (sdebug_every_nth < -1) in fake_timeout()
8246 sdebug_every_nth = -1; in fake_timeout()
8262 struct scsi_device *sdp = scp->device; in resp_not_ready()
8264 stopped_state = atomic_read(&devip->stopped); in resp_not_ready()
8266 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { in resp_not_ready()
8267 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); in resp_not_ready()
8270 atomic_set(&devip->stopped, 0); in resp_not_ready()
8278 if (scp->cmnd[0] == TEST_UNIT_READY) { in resp_not_ready()
8282 diff_ns = tur_nanosecs_to_ready - diff_ns; in resp_not_ready()
8285 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */ in resp_not_ready()
8287 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, in resp_not_ready()
8303 if (shost->nr_hw_queues == 1) in sdebug_map_queues()
8307 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; in sdebug_map_queues()
8309 map->nr_queues = 0; in sdebug_map_queues()
8312 map->nr_queues = submit_queues - poll_queues; in sdebug_map_queues()
8314 map->nr_queues = poll_queues; in sdebug_map_queues()
8316 if (!map->nr_queues) { in sdebug_map_queues()
8321 map->queue_offset = qoff; in sdebug_map_queues()
8324 qoff += map->nr_queues; in sdebug_map_queues()
8347 int queue_num = data->queue_num; in sdebug_blk_mq_poll_iter()
8355 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) in sdebug_blk_mq_poll_iter()
8360 spin_lock_irqsave(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
8363 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
8367 sd_dp = &sqcp->sd_dp; in sdebug_blk_mq_poll_iter()
8368 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { in sdebug_blk_mq_poll_iter()
8369 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
8373 if (time < sd_dp->cmpl_ts) { in sdebug_blk_mq_poll_iter()
8374 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
8379 spin_unlock_irqrestore(&sdsc->lock, flags); in sdebug_blk_mq_poll_iter()
8383 if (raw_smp_processor_id() != sd_dp->issuing_cpu) in sdebug_blk_mq_poll_iter()
8390 (*data->num_entries)++; in sdebug_blk_mq_poll_iter()
8402 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter, in sdebug_blk_mq_poll()
8412 struct scsi_device *sdp = cmnd->device; in sdebug_timeout_cmd()
8413 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_timeout_cmd()
8415 unsigned char *cmd = cmnd->cmnd; in sdebug_timeout_cmd()
8422 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_timeout_cmd()
8423 if (err->type == ERR_TMOUT_CMD && in sdebug_timeout_cmd()
8424 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_timeout_cmd()
8425 ret = !!err->cnt; in sdebug_timeout_cmd()
8426 if (err->cnt < 0) in sdebug_timeout_cmd()
8427 err->cnt++; in sdebug_timeout_cmd()
8440 struct scsi_device *sdp = cmnd->device; in sdebug_fail_queue_cmd()
8441 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_queue_cmd()
8443 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_queue_cmd()
8450 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_queue_cmd()
8451 if (err->type == ERR_FAIL_QUEUE_CMD && in sdebug_fail_queue_cmd()
8452 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_queue_cmd()
8453 ret = err->cnt ? err->queuecmd_ret : 0; in sdebug_fail_queue_cmd()
8454 if (err->cnt < 0) in sdebug_fail_queue_cmd()
8455 err->cnt++; in sdebug_fail_queue_cmd()
8469 struct scsi_device *sdp = cmnd->device; in sdebug_fail_cmd()
8470 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; in sdebug_fail_cmd()
8472 unsigned char *cmd = cmnd->cmnd; in sdebug_fail_cmd()
8480 list_for_each_entry_rcu(err, &devip->inject_err_list, list) { in sdebug_fail_cmd()
8481 if (err->type == ERR_FAIL_CMD && in sdebug_fail_cmd()
8482 (err->cmd == cmd[0] || err->cmd == 0xff)) { in sdebug_fail_cmd()
8483 if (!err->cnt) { in sdebug_fail_cmd()
8488 ret = !!err->cnt; in sdebug_fail_cmd()
8498 if (err->cnt < 0) in sdebug_fail_cmd()
8499 err->cnt++; in sdebug_fail_cmd()
8500 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq); in sdebug_fail_cmd()
8501 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24; in sdebug_fail_cmd()
8512 struct scsi_device *sdp = scp->device; in scsi_debug_queuecommand()
8516 u8 *cmd = scp->cmnd; in scsi_debug_queuecommand()
8521 u64 lun_index = sdp->lun & 0x3FFF; in scsi_debug_queuecommand()
8542 len = scp->cmd_len; in scsi_debug_queuecommand()
8548 n += scnprintf(b + n, sb - n, "%02x ", in scsi_debug_queuecommand()
8556 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); in scsi_debug_queuecommand()
8562 devip = (struct sdebug_dev_info *)sdp->hostdata; in scsi_debug_queuecommand()
8594 na = oip->num_attached; in scsi_debug_queuecommand()
8595 r_pfp = oip->pfp; in scsi_debug_queuecommand()
8598 if (FF_SA & r_oip->flags) { in scsi_debug_queuecommand()
8599 if (F_SA_LOW & oip->flags) in scsi_debug_queuecommand()
8603 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
8604 if (opcode == oip->opcode && sa == oip->sa) in scsi_debug_queuecommand()
8608 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
8609 if (opcode == oip->opcode) in scsi_debug_queuecommand()
8614 if (F_SA_LOW & r_oip->flags) in scsi_debug_queuecommand()
8616 else if (F_SA_HIGH & r_oip->flags) in scsi_debug_queuecommand()
8623 flags = oip->flags; in scsi_debug_queuecommand()
8639 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { in scsi_debug_queuecommand()
8640 rem = ~oip->len_mask[k] & cmd[k]; in scsi_debug_queuecommand()
8642 for (j = 7; j >= 0; --j, rem <<= 1) { in scsi_debug_queuecommand()
8652 find_first_bit(devip->uas_bm, in scsi_debug_queuecommand()
8658 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) && in scsi_debug_queuecommand()
8659 atomic_read(&devip->stopped))) { in scsi_debug_queuecommand()
8670 if (likely(oip->pfp)) in scsi_debug_queuecommand()
8671 pfp = oip->pfp; /* calls a resp_* function */ in scsi_debug_queuecommand()
8704 spin_lock_init(&sdsc->lock); in sdebug_init_cmd_priv()
8732 .max_sectors = -1U,
8733 .max_segment_size = -1U,
8754 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; in sdebug_driver_probe()
8759 error = -ENODEV; in sdebug_driver_probe()
8771 hpnt->nr_hw_queues = submit_queues; in sdebug_driver_probe()
8773 hpnt->host_tagset = 1; in sdebug_driver_probe()
8776 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) { in sdebug_driver_probe()
8778 my_name, poll_queues, hpnt->nr_hw_queues); in sdebug_driver_probe()
8784 * left over for non-polled I/O. in sdebug_driver_probe()
8792 my_name, submit_queues - 1); in sdebug_driver_probe()
8796 hpnt->nr_maps = 3; in sdebug_driver_probe()
8798 sdbg_host->shost = hpnt; in sdebug_driver_probe()
8799 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) in sdebug_driver_probe()
8800 hpnt->max_id = sdebug_num_tgts + 1; in sdebug_driver_probe()
8802 hpnt->max_id = sdebug_num_tgts; in sdebug_driver_probe()
8804 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; in sdebug_driver_probe()
8855 error = scsi_add_host(hpnt, &sdbg_host->dev); in sdebug_driver_probe()
8858 error = -ENODEV; in sdebug_driver_probe()
8874 scsi_remove_host(sdbg_host->shost); in sdebug_driver_remove()
8876 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, in sdebug_driver_remove()
8878 list_del(&sdbg_devinfo->dev_list); in sdebug_driver_remove()
8879 kfree(sdbg_devinfo->zstate); in sdebug_driver_remove()
8883 scsi_host_put(sdbg_host->shost); in sdebug_driver_remove()