Merge: SCSI updates for 9.5
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4106 SCSI core updates for 9.5 to upstream 6.9-rc4 +/- JIRA: https://issues.redhat.com/browse/RHEL-33543 Upstream Status: From upstream linux mainline Signed-off-by: Ewan D. Milne <emilne@redhat.com> Approved-by: Jeff Moyer <jmoyer@redhat.com> Approved-by: Ming Lei <ming.lei@redhat.com> Approved-by: Eric Chanudet <echanude@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
This commit is contained in:
commit
0ad8c7f7fd
|
@ -58,3 +58,25 @@ Description:
|
|||
(RW) Write to the file to turn on or off the SATA ncq (native
|
||||
command queueing) support. By default this feature is turned
|
||||
off.
|
||||
|
||||
|
||||
What: /sys/block/*/device/cdl_supported
|
||||
Date: May, 2023
|
||||
KernelVersion: v6.5
|
||||
Contact: linux-scsi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Indicates if the device supports the command duration
|
||||
limits feature found in some ATA and SCSI devices.
|
||||
|
||||
|
||||
What: /sys/block/*/device/cdl_enable
|
||||
Date: May, 2023
|
||||
KernelVersion: v6.5
|
||||
Contact: linux-scsi@vger.kernel.org
|
||||
Description:
|
||||
(RW) For a device supporting the command duration limits
|
||||
feature, write to the file to turn on or off the feature.
|
||||
By default this feature is turned off.
|
||||
Writing "1" to this file enables the use of command duration
|
||||
limits for read and write commands in the kernel and turns on
|
||||
the feature on the device. Writing "0" disables the feature.
|
||||
|
|
|
@ -5528,16 +5528,16 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
|||
bfqq->new_ioprio_class = task_nice_ioclass(tsk);
|
||||
break;
|
||||
case IOPRIO_CLASS_RT:
|
||||
bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
|
||||
bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
|
||||
bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
|
||||
break;
|
||||
case IOPRIO_CLASS_BE:
|
||||
bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
|
||||
bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
|
||||
bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
|
||||
break;
|
||||
case IOPRIO_CLASS_IDLE:
|
||||
bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
|
||||
bfqq->new_ioprio = 7;
|
||||
bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -5834,7 +5834,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
|||
struct bfq_io_cq *bic,
|
||||
bool respawn)
|
||||
{
|
||||
const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
|
||||
const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
|
||||
const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
|
||||
struct bfq_queue **async_bfqq = NULL;
|
||||
struct bfq_queue *bfqq;
|
||||
|
|
|
@ -171,6 +171,9 @@ static const struct {
|
|||
[BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
|
||||
[BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
|
||||
|
||||
/* Command duration limit device-side timeout */
|
||||
[BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
|
||||
|
||||
/* everything else not covered above: */
|
||||
[BLK_STS_IOERR] = { -EIO, "I/O" },
|
||||
};
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
int ioprio_check_cap(int ioprio)
|
||||
{
|
||||
int class = IOPRIO_PRIO_CLASS(ioprio);
|
||||
int data = IOPRIO_PRIO_DATA(ioprio);
|
||||
int level = IOPRIO_PRIO_LEVEL(ioprio);
|
||||
|
||||
switch (class) {
|
||||
case IOPRIO_CLASS_RT:
|
||||
|
@ -49,15 +49,16 @@ int ioprio_check_cap(int ioprio)
|
|||
fallthrough;
|
||||
/* rt has prio field too */
|
||||
case IOPRIO_CLASS_BE:
|
||||
if (data >= IOPRIO_NR_LEVELS || data < 0)
|
||||
if (level >= IOPRIO_NR_LEVELS)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IOPRIO_CLASS_IDLE:
|
||||
break;
|
||||
case IOPRIO_CLASS_NONE:
|
||||
if (data)
|
||||
if (level)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IOPRIO_CLASS_INVALID:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -102,7 +102,9 @@ do { \
|
|||
|
||||
#define MAX_RETRIES 1
|
||||
|
||||
static struct class * ch_sysfs_class;
|
||||
static const struct class ch_sysfs_class = {
|
||||
.name = "scsi_changer",
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
struct kref ref;
|
||||
|
@ -113,7 +115,6 @@ typedef struct {
|
|||
struct scsi_device **dt; /* ptrs to data transfer elements */
|
||||
u_int firsts[CH_TYPES];
|
||||
u_int counts[CH_TYPES];
|
||||
u_int unit_attention;
|
||||
u_int voltags;
|
||||
struct mutex lock;
|
||||
} scsi_changer;
|
||||
|
@ -186,17 +187,29 @@ static int
|
|||
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
|
||||
void *buffer, unsigned int buflength, enum req_op op)
|
||||
{
|
||||
int errno, retries = 0, timeout, result;
|
||||
int errno = 0, timeout, result;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = 3,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
|
||||
? timeout_init : timeout_move;
|
||||
|
||||
retry:
|
||||
errno = 0;
|
||||
result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
|
||||
timeout * HZ, MAX_RETRIES, &exec_args);
|
||||
if (result < 0)
|
||||
|
@ -205,14 +218,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
|
|||
if (debug)
|
||||
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
|
||||
errno = ch_find_errno(&sshdr);
|
||||
|
||||
switch(sshdr.sense_key) {
|
||||
case UNIT_ATTENTION:
|
||||
ch->unit_attention = 1;
|
||||
if (retries++ < 3)
|
||||
goto retry;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return errno;
|
||||
}
|
||||
|
@ -659,19 +664,23 @@ static long ch_ioctl(struct file *file,
|
|||
memset(&vparams,0,sizeof(vparams));
|
||||
if (ch->counts[CHET_V1]) {
|
||||
vparams.cvp_n1 = ch->counts[CHET_V1];
|
||||
strncpy(vparams.cvp_label1,vendor_labels[0],16);
|
||||
strscpy(vparams.cvp_label1, vendor_labels[0],
|
||||
sizeof(vparams.cvp_label1));
|
||||
}
|
||||
if (ch->counts[CHET_V2]) {
|
||||
vparams.cvp_n2 = ch->counts[CHET_V2];
|
||||
strncpy(vparams.cvp_label2,vendor_labels[1],16);
|
||||
strscpy(vparams.cvp_label2, vendor_labels[1],
|
||||
sizeof(vparams.cvp_label2));
|
||||
}
|
||||
if (ch->counts[CHET_V3]) {
|
||||
vparams.cvp_n3 = ch->counts[CHET_V3];
|
||||
strncpy(vparams.cvp_label3,vendor_labels[2],16);
|
||||
strscpy(vparams.cvp_label3, vendor_labels[2],
|
||||
sizeof(vparams.cvp_label3));
|
||||
}
|
||||
if (ch->counts[CHET_V4]) {
|
||||
vparams.cvp_n4 = ch->counts[CHET_V4];
|
||||
strncpy(vparams.cvp_label4,vendor_labels[3],16);
|
||||
strscpy(vparams.cvp_label4, vendor_labels[3],
|
||||
sizeof(vparams.cvp_label4));
|
||||
}
|
||||
if (copy_to_user(argp, &vparams, sizeof(vparams)))
|
||||
return -EFAULT;
|
||||
|
@ -923,7 +932,7 @@ static int ch_probe(struct device *dev)
|
|||
mutex_init(&ch->lock);
|
||||
kref_init(&ch->ref);
|
||||
ch->device = sd;
|
||||
class_dev = device_create(ch_sysfs_class, dev,
|
||||
class_dev = device_create(&ch_sysfs_class, dev,
|
||||
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
|
||||
"s%s", ch->name);
|
||||
if (IS_ERR(class_dev)) {
|
||||
|
@ -948,7 +957,7 @@ static int ch_probe(struct device *dev)
|
|||
|
||||
return 0;
|
||||
destroy_dev:
|
||||
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
|
||||
device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
|
||||
put_device:
|
||||
scsi_device_put(sd);
|
||||
remove_idr:
|
||||
|
@ -967,7 +976,7 @@ static int ch_remove(struct device *dev)
|
|||
dev_set_drvdata(dev, NULL);
|
||||
spin_unlock(&ch_index_lock);
|
||||
|
||||
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
|
||||
device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
|
||||
scsi_device_put(ch->device);
|
||||
kref_put(&ch->ref, ch_destroy);
|
||||
return 0;
|
||||
|
@ -996,11 +1005,9 @@ static int __init init_ch_module(void)
|
|||
int rc;
|
||||
|
||||
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
|
||||
ch_sysfs_class = class_create("scsi_changer");
|
||||
if (IS_ERR(ch_sysfs_class)) {
|
||||
rc = PTR_ERR(ch_sysfs_class);
|
||||
rc = class_register(&ch_sysfs_class);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
|
||||
if (rc < 0) {
|
||||
printk("Unable to get major %d for SCSI-Changer\n",
|
||||
|
@ -1015,7 +1022,7 @@ static int __init init_ch_module(void)
|
|||
fail2:
|
||||
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
|
||||
fail1:
|
||||
class_destroy(ch_sysfs_class);
|
||||
class_unregister(&ch_sysfs_class);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1023,7 +1030,7 @@ static void __exit exit_ch_module(void)
|
|||
{
|
||||
scsi_unregister_driver(&ch_template.gendrv);
|
||||
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
|
||||
class_destroy(ch_sysfs_class);
|
||||
class_unregister(&ch_sysfs_class);
|
||||
idr_destroy(&ch_index_idr);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,9 +46,6 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
|
|||
int ret = SCSI_DH_IO;
|
||||
|
||||
switch (sshdr->sense_key) {
|
||||
case UNIT_ATTENTION:
|
||||
ret = SCSI_DH_IMM_RETRY;
|
||||
break;
|
||||
case NOT_READY:
|
||||
if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
|
||||
/*
|
||||
|
@ -82,31 +79,40 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
|
|||
{
|
||||
unsigned char cmd[6] = { TEST_UNIT_READY };
|
||||
struct scsi_sense_hdr sshdr;
|
||||
int ret = SCSI_DH_OK, res;
|
||||
int ret, res;
|
||||
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
|
||||
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = SCMD_FAILURE_NO_LIMIT,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
retry:
|
||||
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
|
||||
HP_SW_RETRIES, &exec_args);
|
||||
if (res) {
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
ret = tur_done(sdev, h, &sshdr);
|
||||
else {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending tur failed with %x\n",
|
||||
HP_SW_NAME, res);
|
||||
ret = SCSI_DH_IO;
|
||||
}
|
||||
} else {
|
||||
if (res > 0 && scsi_sense_valid(&sshdr)) {
|
||||
ret = tur_done(sdev, h, &sshdr);
|
||||
} else if (res == 0) {
|
||||
h->path_state = HP_SW_PATH_ACTIVE;
|
||||
ret = SCSI_DH_OK;
|
||||
} else {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending tur failed with %x\n",
|
||||
HP_SW_NAME, res);
|
||||
ret = SCSI_DH_IO;
|
||||
}
|
||||
if (ret == SCSI_DH_IMM_RETRY)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -122,46 +128,58 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
|
|||
unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_device *sdev = h->sdev;
|
||||
int res, rc = SCSI_DH_OK;
|
||||
int retry_cnt = HP_SW_RETRIES;
|
||||
int res, rc;
|
||||
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
|
||||
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
/*
|
||||
* LUN not ready - manual intervention required
|
||||
*
|
||||
* Switch-over in progress, retry.
|
||||
*/
|
||||
.sense = NOT_READY,
|
||||
.asc = 0x04,
|
||||
.ascq = 0x03,
|
||||
.allowed = HP_SW_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
retry:
|
||||
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
|
||||
HP_SW_RETRIES, &exec_args);
|
||||
if (res) {
|
||||
if (!scsi_sense_valid(&sshdr)) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending start_stop_unit failed, "
|
||||
"no sense available\n", HP_SW_NAME);
|
||||
return SCSI_DH_IO;
|
||||
}
|
||||
switch (sshdr.sense_key) {
|
||||
case NOT_READY:
|
||||
if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
|
||||
/*
|
||||
* LUN not ready - manual intervention required
|
||||
*
|
||||
* Switch-over in progress, retry.
|
||||
*/
|
||||
if (--retry_cnt)
|
||||
goto retry;
|
||||
rc = SCSI_DH_RETRY;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending start_stop_unit failed, "
|
||||
"sense %x/%x/%x\n", HP_SW_NAME,
|
||||
sshdr.sense_key, sshdr.asc, sshdr.ascq);
|
||||
rc = SCSI_DH_IO;
|
||||
}
|
||||
if (!res) {
|
||||
return SCSI_DH_OK;
|
||||
} else if (res < 0 || !scsi_sense_valid(&sshdr)) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending start_stop_unit failed, "
|
||||
"no sense available\n", HP_SW_NAME);
|
||||
return SCSI_DH_IO;
|
||||
}
|
||||
|
||||
switch (sshdr.sense_key) {
|
||||
case NOT_READY:
|
||||
if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
|
||||
rc = SCSI_DH_RETRY;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"%s: sending start_stop_unit failed, "
|
||||
"sense %x/%x/%x\n", HP_SW_NAME,
|
||||
sshdr.sense_key, sshdr.asc, sshdr.ascq);
|
||||
rc = SCSI_DH_IO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -485,43 +485,17 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
|
|||
static int mode_select_handle_sense(struct scsi_device *sdev,
|
||||
struct scsi_sense_hdr *sense_hdr)
|
||||
{
|
||||
int err = SCSI_DH_IO;
|
||||
struct rdac_dh_data *h = sdev->handler_data;
|
||||
|
||||
if (!scsi_sense_valid(sense_hdr))
|
||||
goto done;
|
||||
|
||||
switch (sense_hdr->sense_key) {
|
||||
case NO_SENSE:
|
||||
case ABORTED_COMMAND:
|
||||
case UNIT_ATTENTION:
|
||||
err = SCSI_DH_RETRY;
|
||||
break;
|
||||
case NOT_READY:
|
||||
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
|
||||
/* LUN Not Ready and is in the Process of Becoming
|
||||
* Ready
|
||||
*/
|
||||
err = SCSI_DH_RETRY;
|
||||
break;
|
||||
case ILLEGAL_REQUEST:
|
||||
if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
|
||||
/*
|
||||
* Command Lock contention
|
||||
*/
|
||||
err = SCSI_DH_IMM_RETRY;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return SCSI_DH_IO;
|
||||
|
||||
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
|
||||
"MODE_SELECT returned with sense %02x/%02x/%02x",
|
||||
(char *) h->ctlr->array_name, h->ctlr->index,
|
||||
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
|
||||
|
||||
done:
|
||||
return err;
|
||||
return SCSI_DH_IO;
|
||||
}
|
||||
|
||||
static void send_mode_select(struct work_struct *work)
|
||||
|
@ -530,7 +504,7 @@ static void send_mode_select(struct work_struct *work)
|
|||
container_of(work, struct rdac_controller, ms_work);
|
||||
struct scsi_device *sdev = ctlr->ms_sdev;
|
||||
struct rdac_dh_data *h = sdev->handler_data;
|
||||
int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
|
||||
int rc, err;
|
||||
struct rdac_queue_data *tmp, *qdata;
|
||||
LIST_HEAD(list);
|
||||
unsigned char cdb[MAX_COMMAND_SIZE];
|
||||
|
@ -538,8 +512,49 @@ static void send_mode_select(struct work_struct *work)
|
|||
unsigned int data_size;
|
||||
blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
|
||||
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = NO_SENSE,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = ABORTED_COMMAND,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* LUN Not Ready and is in the Process of Becoming Ready */
|
||||
{
|
||||
.sense = NOT_READY,
|
||||
.asc = 0x04,
|
||||
.ascq = 0x01,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Command Lock contention */
|
||||
{
|
||||
.sense = ILLEGAL_REQUEST,
|
||||
.asc = 0x91,
|
||||
.ascq = 0x36,
|
||||
.allowed = SCMD_FAILURE_NO_LIMIT,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.total_allowed = RDAC_RETRY_COUNT,
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
spin_lock(&ctlr->ms_lock);
|
||||
|
@ -548,29 +563,25 @@ static void send_mode_select(struct work_struct *work)
|
|||
ctlr->ms_sdev = NULL;
|
||||
spin_unlock(&ctlr->ms_lock);
|
||||
|
||||
retry:
|
||||
memset(cdb, 0, sizeof(cdb));
|
||||
|
||||
data_size = rdac_failover_get(ctlr, &list, cdb);
|
||||
|
||||
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
|
||||
"%s MODE_SELECT command",
|
||||
(char *) h->ctlr->array_name, h->ctlr->index,
|
||||
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
|
||||
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, queueing MODE_SELECT command",
|
||||
(char *)h->ctlr->array_name, h->ctlr->index);
|
||||
|
||||
if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
|
||||
RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) {
|
||||
err = mode_select_handle_sense(sdev, &sshdr);
|
||||
if (err == SCSI_DH_RETRY && retry_cnt--)
|
||||
goto retry;
|
||||
if (err == SCSI_DH_IMM_RETRY)
|
||||
goto retry;
|
||||
}
|
||||
if (err == SCSI_DH_OK) {
|
||||
rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
|
||||
RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
|
||||
if (!rc) {
|
||||
h->state = RDAC_STATE_ACTIVE;
|
||||
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
|
||||
"MODE_SELECT completed",
|
||||
(char *) h->ctlr->array_name, h->ctlr->index);
|
||||
err = SCSI_DH_OK;
|
||||
} else if (rc < 0) {
|
||||
err = SCSI_DH_IO;
|
||||
} else {
|
||||
err = mode_select_handle_sense(sdev, &sshdr);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(qdata, tmp, &list, entry) {
|
||||
|
|
|
@ -352,12 +352,13 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
|
||||
if (shost->shost_state == SHOST_CREATED) {
|
||||
/*
|
||||
* Free the shost_dev device name here if scsi_host_alloc()
|
||||
* and scsi_host_put() have been called but neither
|
||||
* Free the shost_dev device name and remove the proc host dir
|
||||
* here if scsi_host_{alloc,put}() have been called but neither
|
||||
* scsi_host_add() nor scsi_remove_host() has been called.
|
||||
* This avoids that the memory allocated for the shost_dev
|
||||
* name is leaked.
|
||||
* name as well as the proc dir structure are leaked.
|
||||
*/
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
kfree(dev_name(&shost->shost_dev));
|
||||
}
|
||||
|
||||
|
@ -371,7 +372,7 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
kfree(shost);
|
||||
}
|
||||
|
||||
static struct device_type scsi_host_type = {
|
||||
static const struct device_type scsi_host_type = {
|
||||
.name = "scsi_host",
|
||||
.release = scsi_host_dev_release,
|
||||
};
|
||||
|
|
|
@ -328,21 +328,46 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
|
|||
return result + 4;
|
||||
}
|
||||
|
||||
enum scsi_vpd_parameters {
|
||||
SCSI_VPD_HEADER_SIZE = 4,
|
||||
SCSI_VPD_LIST_SIZE = 36,
|
||||
};
|
||||
|
||||
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
|
||||
{
|
||||
unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
|
||||
unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
|
||||
int result;
|
||||
|
||||
if (sdev->no_vpd_size)
|
||||
return SCSI_DEFAULT_VPD_LEN;
|
||||
|
||||
/*
|
||||
* Fetch the supported pages VPD and validate that the requested page
|
||||
* number is present.
|
||||
*/
|
||||
if (page != 0) {
|
||||
result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
|
||||
if (result < SCSI_VPD_HEADER_SIZE)
|
||||
return 0;
|
||||
|
||||
if (result > sizeof(vpd)) {
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: long VPD page 0 length: %d bytes\n",
|
||||
__func__, result);
|
||||
result = sizeof(vpd);
|
||||
}
|
||||
|
||||
result -= SCSI_VPD_HEADER_SIZE;
|
||||
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Fetch the VPD page header to find out how big the page
|
||||
* is. This is done to prevent problems on legacy devices
|
||||
* which can not handle allocation lengths as large as
|
||||
* potentially requested by the caller.
|
||||
*/
|
||||
result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
|
||||
result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
|
||||
if (result < 0)
|
||||
return 0;
|
||||
|
||||
|
@ -504,18 +529,22 @@ void scsi_attach_vpd(struct scsi_device *sdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* scsi_report_opcode - Find out if a given command opcode is supported
|
||||
* scsi_report_opcode - Find out if a given command is supported
|
||||
* @sdev: scsi device to query
|
||||
* @buffer: scratch buffer (must be at least 20 bytes long)
|
||||
* @len: length of buffer
|
||||
* @opcode: opcode for command to look up
|
||||
* @opcode: opcode for the command to look up
|
||||
* @sa: service action for the command to look up
|
||||
*
|
||||
* Uses the REPORT SUPPORTED OPERATION CODES to look up the given
|
||||
* opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
|
||||
* unsupported and 1 if the device claims to support the command.
|
||||
* Uses the REPORT SUPPORTED OPERATION CODES to check support for the
|
||||
* command identified with @opcode and @sa. If the command does not
|
||||
* have a service action, @sa must be 0. Returns -EINVAL if RSOC fails,
|
||||
* 0 if the command is not supported and 1 if the device claims to
|
||||
* support the command.
|
||||
*/
|
||||
int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
||||
unsigned int len, unsigned char opcode)
|
||||
unsigned int len, unsigned char opcode,
|
||||
unsigned short sa)
|
||||
{
|
||||
unsigned char cmd[16];
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
@ -539,8 +568,14 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
|||
memset(cmd, 0, 16);
|
||||
cmd[0] = MAINTENANCE_IN;
|
||||
cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
|
||||
cmd[2] = 1; /* One command format */
|
||||
cmd[3] = opcode;
|
||||
if (!sa) {
|
||||
cmd[2] = 1; /* One command format */
|
||||
cmd[3] = opcode;
|
||||
} else {
|
||||
cmd[2] = 3; /* One command format with service action */
|
||||
cmd[3] = opcode;
|
||||
put_unaligned_be16(sa, &cmd[4]);
|
||||
}
|
||||
put_unaligned_be32(request_len, &cmd[6]);
|
||||
memset(buffer, 0, len);
|
||||
|
||||
|
@ -560,6 +595,151 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_report_opcode);
|
||||
|
||||
#define SCSI_CDL_CHECK_BUF_LEN 64
|
||||
|
||||
static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa,
|
||||
unsigned char *buf)
|
||||
{
|
||||
int ret;
|
||||
u8 cdlp;
|
||||
|
||||
/* Check operation code */
|
||||
ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa);
|
||||
if (ret <= 0)
|
||||
return false;
|
||||
|
||||
if ((buf[1] & 0x03) != 0x03)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* See SPC-6, One_command parameter data format for
|
||||
* REPORT SUPPORTED OPERATION CODES. We have the following cases
|
||||
* depending on rwcdlp (buf[0] & 0x01) value:
|
||||
* - rwcdlp == 0: then cdlp indicates support for the A mode page when
|
||||
* it is equal to 1 and for the B mode page when it is
|
||||
* equal to 2.
|
||||
* - rwcdlp == 1: then cdlp indicates support for the T2A mode page
|
||||
* when it is equal to 1 and for the T2B mode page when
|
||||
* it is equal to 2.
|
||||
* Overall, to detect support for command duration limits, we only need
|
||||
* to check that cdlp is 1 or 2.
|
||||
*/
|
||||
cdlp = (buf[1] & 0x18) >> 3;
|
||||
|
||||
return cdlp == 0x01 || cdlp == 0x02;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_cdl_check - Check if a SCSI device supports Command Duration Limits
|
||||
* @sdev: The device to check
|
||||
*/
|
||||
void scsi_cdl_check(struct scsi_device *sdev)
|
||||
{
|
||||
bool cdl_supported;
|
||||
unsigned char *buf;
|
||||
|
||||
/*
|
||||
* Support for CDL was defined in SPC-5. Ignore devices reporting an
|
||||
* lower SPC version. This also avoids problems with old drives choking
|
||||
* on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
|
||||
* service action specified, as done in scsi_cdl_check_cmd().
|
||||
*/
|
||||
if (sdev->scsi_level < SCSI_SPC_5) {
|
||||
sdev->cdl_supported = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
sdev->cdl_supported = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */
|
||||
cdl_supported =
|
||||
scsi_cdl_check_cmd(sdev, READ_16, 0, buf) ||
|
||||
scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) ||
|
||||
scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) ||
|
||||
scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf);
|
||||
if (cdl_supported) {
|
||||
/*
|
||||
* We have CDL support: force the use of READ16/WRITE16.
|
||||
* READ32 and WRITE32 will be used for devices that support
|
||||
* the T10_PI_TYPE2_PROTECTION protection type.
|
||||
*/
|
||||
sdev->use_16_for_rw = 1;
|
||||
sdev->use_10_for_rw = 0;
|
||||
|
||||
sdev->cdl_supported = 1;
|
||||
} else {
|
||||
sdev->cdl_supported = 0;
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_cdl_enable - Enable or disable a SCSI device supports for Command
|
||||
* Duration Limits
|
||||
* @sdev: The target device
|
||||
* @enable: the target state
|
||||
*/
|
||||
int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
|
||||
{
|
||||
struct scsi_mode_data data;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_vpd *vpd;
|
||||
bool is_ata = false;
|
||||
char buf[64];
|
||||
int ret;
|
||||
|
||||
if (!sdev->cdl_supported)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg89);
|
||||
if (vpd)
|
||||
is_ata = true;
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* For ATA devices, CDL needs to be enabled with a SET FEATURES command.
|
||||
*/
|
||||
if (is_ata) {
|
||||
char *buf_data;
|
||||
int len;
|
||||
|
||||
ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf),
|
||||
5 * HZ, 3, &data, NULL);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
/* Enable CDL using the ATA feature page */
|
||||
len = min_t(size_t, sizeof(buf),
|
||||
data.length - data.header_length -
|
||||
data.block_descriptor_length);
|
||||
buf_data = buf + data.header_length +
|
||||
data.block_descriptor_length;
|
||||
if (enable)
|
||||
buf_data[4] = 0x02;
|
||||
else
|
||||
buf_data[4] = 0;
|
||||
|
||||
ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
|
||||
&data, &sshdr);
|
||||
if (ret) {
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
scsi_print_sense_hdr(sdev,
|
||||
dev_name(&sdev->sdev_gendev), &sshdr);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
sdev->cdl_enable = enable;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_device_get - get an additional reference to a scsi_device
|
||||
* @sdev: device to get a reference to
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/async.h>
|
||||
|
||||
#include <net/checksum.h>
|
||||
|
||||
|
@ -285,6 +287,46 @@ struct sdeb_zone_state { /* ZBC: per zone state */
|
|||
sector_t z_wp;
|
||||
};
|
||||
|
||||
enum sdebug_err_type {
|
||||
ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
|
||||
ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
|
||||
/* queuecmd return failed */
|
||||
ERR_FAIL_CMD = 2, /* make specific scsi command's */
|
||||
/* queuecmd return succeed but */
|
||||
/* with errors set in scsi_cmnd */
|
||||
ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
|
||||
/* scsi_debug_abort() */
|
||||
ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
|
||||
/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
|
||||
};
|
||||
|
||||
struct sdebug_err_inject {
|
||||
int type;
|
||||
struct list_head list;
|
||||
int cnt;
|
||||
unsigned char cmd;
|
||||
struct rcu_head rcu;
|
||||
|
||||
union {
|
||||
/*
|
||||
* For ERR_FAIL_QUEUE_CMD
|
||||
*/
|
||||
int queuecmd_ret;
|
||||
|
||||
/*
|
||||
* For ERR_FAIL_CMD
|
||||
*/
|
||||
struct {
|
||||
unsigned char host_byte;
|
||||
unsigned char driver_byte;
|
||||
unsigned char status_byte;
|
||||
unsigned char sense_key;
|
||||
unsigned char asc;
|
||||
unsigned char asq;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct sdebug_dev_info {
|
||||
struct list_head dev_list;
|
||||
unsigned int channel;
|
||||
|
@ -310,6 +352,15 @@ struct sdebug_dev_info {
|
|||
unsigned int max_open;
|
||||
ktime_t create_ts; /* time since bootup that this device was created */
|
||||
struct sdeb_zone_state *zstate;
|
||||
|
||||
struct dentry *debugfs_entry;
|
||||
struct spinlock list_lock;
|
||||
struct list_head inject_err_list;
|
||||
};
|
||||
|
||||
struct sdebug_target_info {
|
||||
bool reset_fail;
|
||||
struct dentry *debugfs_entry;
|
||||
};
|
||||
|
||||
struct sdebug_host_info {
|
||||
|
@ -792,6 +843,7 @@ static bool have_dif_prot;
|
|||
static bool write_since_sync;
|
||||
static bool sdebug_statistics = DEF_STATISTICS;
|
||||
static bool sdebug_wp;
|
||||
static bool sdebug_allow_restart;
|
||||
static enum {
|
||||
BLK_ZONED_NONE = 0,
|
||||
BLK_ZONED_HA = 1,
|
||||
|
@ -847,7 +899,7 @@ static int poll_queues; /* iouring iopoll interface.*/
|
|||
static char sdebug_proc_name[] = MY_NAME;
|
||||
static const char *my_name = MY_NAME;
|
||||
|
||||
static struct bus_type pseudo_lld_bus;
|
||||
static const struct bus_type pseudo_lld_bus;
|
||||
|
||||
static struct device_driver sdebug_driverfs_driver = {
|
||||
.name = sdebug_proc_name,
|
||||
|
@ -865,6 +917,258 @@ static const int device_qfull_result =
|
|||
|
||||
static const int condition_met_result = SAM_STAT_CONDITION_MET;
|
||||
|
||||
static struct dentry *sdebug_debugfs_root;
|
||||
|
||||
static void sdebug_err_free(struct rcu_head *head)
|
||||
{
|
||||
struct sdebug_err_inject *inject =
|
||||
container_of(head, typeof(*inject), rcu);
|
||||
|
||||
kfree(inject);
|
||||
}
|
||||
|
||||
static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
|
||||
{
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
|
||||
spin_lock(&devip->list_lock);
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == new->type && err->cmd == new->cmd) {
|
||||
list_del_rcu(&err->list);
|
||||
call_rcu(&err->rcu, sdebug_err_free);
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&new->list, &devip->inject_err_list);
|
||||
spin_unlock(&devip->list_lock);
|
||||
}
|
||||
|
||||
static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
|
||||
{
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
int type;
|
||||
unsigned char cmd;
|
||||
|
||||
if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&devip->list_lock);
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == type && err->cmd == cmd) {
|
||||
list_del_rcu(&err->list);
|
||||
call_rcu(&err->rcu, sdebug_err_free);
|
||||
spin_unlock(&devip->list_lock);
|
||||
kfree(buf);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
spin_unlock(&devip->list_lock);
|
||||
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int sdebug_error_show(struct seq_file *m, void *p)
|
||||
{
|
||||
struct scsi_device *sdev = (struct scsi_device *)m->private;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
|
||||
seq_puts(m, "Type\tCount\tCommand\n");
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
switch (err->type) {
|
||||
case ERR_TMOUT_CMD:
|
||||
case ERR_ABORT_CMD_FAILED:
|
||||
case ERR_LUN_RESET_FAILED:
|
||||
seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
|
||||
err->cmd);
|
||||
break;
|
||||
|
||||
case ERR_FAIL_QUEUE_CMD:
|
||||
seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
|
||||
err->cnt, err->cmd, err->queuecmd_ret);
|
||||
break;
|
||||
|
||||
case ERR_FAIL_CMD:
|
||||
seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
err->type, err->cnt, err->cmd,
|
||||
err->host_byte, err->driver_byte,
|
||||
err->status_byte, err->sense_key,
|
||||
err->asc, err->asq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdebug_error_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, sdebug_error_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char *buf;
|
||||
unsigned int inject_type;
|
||||
struct sdebug_err_inject *inject;
|
||||
struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
|
||||
|
||||
buf = kzalloc(count + 1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(buf, ubuf, count)) {
|
||||
kfree(buf);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (buf[0] == '-')
|
||||
return sdebug_err_remove(sdev, buf, count);
|
||||
|
||||
if (sscanf(buf, "%d", &inject_type) != 1) {
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
|
||||
if (!inject) {
|
||||
kfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
switch (inject_type) {
|
||||
case ERR_TMOUT_CMD:
|
||||
case ERR_ABORT_CMD_FAILED:
|
||||
case ERR_LUN_RESET_FAILED:
|
||||
if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
|
||||
&inject->cmd) != 3)
|
||||
goto out_error;
|
||||
break;
|
||||
|
||||
case ERR_FAIL_QUEUE_CMD:
|
||||
if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
|
||||
&inject->cmd, &inject->queuecmd_ret) != 4)
|
||||
goto out_error;
|
||||
break;
|
||||
|
||||
case ERR_FAIL_CMD:
|
||||
if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
|
||||
&inject->type, &inject->cnt, &inject->cmd,
|
||||
&inject->host_byte, &inject->driver_byte,
|
||||
&inject->status_byte, &inject->sense_key,
|
||||
&inject->asc, &inject->asq) != 9)
|
||||
goto out_error;
|
||||
break;
|
||||
|
||||
default:
|
||||
goto out_error;
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
sdebug_err_add(sdev, inject);
|
||||
|
||||
return count;
|
||||
|
||||
out_error:
|
||||
kfree(buf);
|
||||
kfree(inject);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct file_operations sdebug_error_fops = {
|
||||
.open = sdebug_error_open,
|
||||
.read = seq_read,
|
||||
.write = sdebug_error_write,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
|
||||
{
|
||||
struct scsi_target *starget = (struct scsi_target *)m->private;
|
||||
struct sdebug_target_info *targetip =
|
||||
(struct sdebug_target_info *)starget->hostdata;
|
||||
|
||||
if (targetip)
|
||||
seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t sdebug_target_reset_fail_write(struct file *file,
|
||||
const char __user *ubuf, size_t count, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
struct scsi_target *starget =
|
||||
(struct scsi_target *)file->f_inode->i_private;
|
||||
struct sdebug_target_info *targetip =
|
||||
(struct sdebug_target_info *)starget->hostdata;
|
||||
|
||||
if (targetip) {
|
||||
ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
|
||||
return ret < 0 ? ret : count;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static const struct file_operations sdebug_target_reset_fail_fops = {
|
||||
.open = sdebug_target_reset_fail_open,
|
||||
.read = seq_read,
|
||||
.write = sdebug_target_reset_fail_write,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int sdebug_target_alloc(struct scsi_target *starget)
|
||||
{
|
||||
struct sdebug_target_info *targetip;
|
||||
|
||||
targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
|
||||
if (!targetip)
|
||||
return -ENOMEM;
|
||||
|
||||
targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
|
||||
sdebug_debugfs_root);
|
||||
|
||||
debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
|
||||
&sdebug_target_reset_fail_fops);
|
||||
|
||||
starget->hostdata = targetip;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct sdebug_target_info *targetip = data;
|
||||
|
||||
debugfs_remove(targetip->debugfs_entry);
|
||||
kfree(targetip);
|
||||
}
|
||||
|
||||
static void sdebug_target_destroy(struct scsi_target *starget)
|
||||
{
|
||||
struct sdebug_target_info *targetip;
|
||||
|
||||
targetip = (struct sdebug_target_info *)starget->hostdata;
|
||||
if (targetip) {
|
||||
starget->hostdata = NULL;
|
||||
async_schedule(sdebug_tartget_cleanup_async, targetip);
|
||||
}
|
||||
}
|
||||
|
||||
/* Only do the extra work involved in logical block provisioning if one or
|
||||
* more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
|
||||
|
@ -5095,6 +5399,8 @@ static struct sdebug_dev_info *sdebug_device_create(
|
|||
}
|
||||
devip->create_ts = ktime_get_boottime();
|
||||
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
|
||||
spin_lock_init(&devip->list_lock);
|
||||
INIT_LIST_HEAD(&devip->inject_err_list);
|
||||
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
|
||||
}
|
||||
return devip;
|
||||
|
@ -5140,6 +5446,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
|
|||
if (sdebug_verbose)
|
||||
pr_info("slave_alloc <%u %u %u %llu>\n",
|
||||
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5147,6 +5454,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
|
|||
{
|
||||
struct sdebug_dev_info *devip =
|
||||
(struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct dentry *dentry;
|
||||
|
||||
if (sdebug_verbose)
|
||||
pr_info("slave_configure <%u %u %u %llu>\n",
|
||||
|
@ -5162,6 +5470,22 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
|
|||
if (sdebug_no_uld)
|
||||
sdp->no_uld_attach = 1;
|
||||
config_cdb_len(sdp);
|
||||
|
||||
if (sdebug_allow_restart)
|
||||
sdp->allow_restart = 1;
|
||||
|
||||
devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
|
||||
sdebug_debugfs_root);
|
||||
if (IS_ERR_OR_NULL(devip->debugfs_entry))
|
||||
pr_info("%s: failed to create debugfs directory for device %s\n",
|
||||
__func__, dev_name(&sdp->sdev_gendev));
|
||||
|
||||
dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
|
||||
&sdebug_error_fops);
|
||||
if (IS_ERR_OR_NULL(dentry))
|
||||
pr_info("%s: failed to create error file for device %s\n",
|
||||
__func__, dev_name(&sdp->sdev_gendev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5169,15 +5493,27 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
|
|||
{
|
||||
struct sdebug_dev_info *devip =
|
||||
(struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
|
||||
if (sdebug_verbose)
|
||||
pr_info("slave_destroy <%u %u %u %llu>\n",
|
||||
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
|
||||
if (devip) {
|
||||
/* make this slot available for re-use */
|
||||
devip->used = false;
|
||||
sdp->hostdata = NULL;
|
||||
|
||||
if (!devip)
|
||||
return;
|
||||
|
||||
spin_lock(&devip->list_lock);
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
list_del_rcu(&err->list);
|
||||
call_rcu(&err->rcu, sdebug_err_free);
|
||||
}
|
||||
spin_unlock(&devip->list_lock);
|
||||
|
||||
debugfs_remove(devip->debugfs_entry);
|
||||
|
||||
/* make this slot available for re-use */
|
||||
devip->used = false;
|
||||
sdp->hostdata = NULL;
|
||||
}
|
||||
|
||||
/* Returns true if we require the queued memory to be freed by the caller. */
|
||||
|
@ -5271,9 +5607,39 @@ static void stop_all_queued(void)
|
|||
mutex_unlock(&sdebug_host_list_mutex);
|
||||
}
|
||||
|
||||
static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct scsi_device *sdp = cmnd->device;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
unsigned char *cmd = cmnd->cmnd;
|
||||
int ret = 0;
|
||||
|
||||
if (devip == NULL)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == ERR_ABORT_CMD_FAILED &&
|
||||
(err->cmd == cmd[0] || err->cmd == 0xff)) {
|
||||
ret = !!err->cnt;
|
||||
if (err->cnt < 0)
|
||||
err->cnt++;
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
bool ok = scsi_debug_abort_cmnd(SCpnt);
|
||||
u8 *cmd = SCpnt->cmnd;
|
||||
u8 opcode = cmd[0];
|
||||
|
||||
++num_aborts;
|
||||
|
||||
|
@ -5282,6 +5648,12 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
|
|||
"%s: command%s found\n", __func__,
|
||||
ok ? "" : " not");
|
||||
|
||||
if (sdebug_fail_abort(SCpnt)) {
|
||||
scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
|
||||
opcode);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -5305,10 +5677,40 @@ static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
|
|||
scsi_debug_stop_all_queued_iter, sdp);
|
||||
}
|
||||
|
||||
static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct scsi_device *sdp = cmnd->device;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
unsigned char *cmd = cmnd->cmnd;
|
||||
int ret = 0;
|
||||
|
||||
if (devip == NULL)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == ERR_LUN_RESET_FAILED &&
|
||||
(err->cmd == cmd[0] || err->cmd == 0xff)) {
|
||||
ret = !!err->cnt;
|
||||
if (err->cnt < 0)
|
||||
err->cnt++;
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct scsi_device *sdp = SCpnt->device;
|
||||
struct sdebug_dev_info *devip = sdp->hostdata;
|
||||
u8 *cmd = SCpnt->cmnd;
|
||||
u8 opcode = cmd[0];
|
||||
|
||||
++num_dev_resets;
|
||||
|
||||
|
@ -5319,14 +5721,33 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
|
|||
if (devip)
|
||||
set_bit(SDEBUG_UA_POR, devip->uas_bm);
|
||||
|
||||
if (sdebug_fail_lun_reset(SCpnt)) {
|
||||
scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct scsi_target *starget = scsi_target(cmnd->device);
|
||||
struct sdebug_target_info *targetip =
|
||||
(struct sdebug_target_info *)starget->hostdata;
|
||||
|
||||
if (targetip)
|
||||
return targetip->reset_fail;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct scsi_device *sdp = SCpnt->device;
|
||||
struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
|
||||
struct sdebug_dev_info *devip;
|
||||
u8 *cmd = SCpnt->cmnd;
|
||||
u8 opcode = cmd[0];
|
||||
int k = 0;
|
||||
|
||||
++num_target_resets;
|
||||
|
@ -5344,6 +5765,12 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
|
|||
sdev_printk(KERN_INFO, sdp,
|
||||
"%s: %d device(s) found in target\n", __func__, k);
|
||||
|
||||
if (sdebug_fail_target_reset(SCpnt)) {
|
||||
scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
|
||||
opcode);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -5771,6 +6198,7 @@ module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
|
|||
module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
|
||||
module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
|
||||
module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
|
||||
module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
|
||||
|
||||
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
|
||||
MODULE_DESCRIPTION("SCSI debug adapter driver");
|
||||
|
@ -5843,6 +6271,7 @@ MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
|
|||
MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
|
||||
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
|
||||
MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
|
||||
MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
|
||||
|
||||
#define SDEBUG_INFO_LEN 256
|
||||
static char sdebug_info[SDEBUG_INFO_LEN];
|
||||
|
@ -7010,6 +7439,10 @@ static int __init scsi_debug_init(void)
|
|||
goto driver_unreg;
|
||||
}
|
||||
|
||||
sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
|
||||
if (IS_ERR_OR_NULL(sdebug_debugfs_root))
|
||||
pr_info("%s: failed to create initial debugfs directory\n", __func__);
|
||||
|
||||
for (k = 0; k < hosts_to_add; k++) {
|
||||
if (want_store && k == 0) {
|
||||
ret = sdebug_add_host_helper(idx);
|
||||
|
@ -7056,6 +7489,7 @@ static void __exit scsi_debug_exit(void)
|
|||
|
||||
sdebug_erase_all_stores(false);
|
||||
xa_destroy(per_store_ap);
|
||||
debugfs_remove(sdebug_debugfs_root);
|
||||
}
|
||||
|
||||
device_initcall(scsi_debug_init);
|
||||
|
@ -7495,6 +7929,104 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
|||
return num_entries;
|
||||
}
|
||||
|
||||
static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct scsi_device *sdp = cmnd->device;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
unsigned char *cmd = cmnd->cmnd;
|
||||
int ret = 0;
|
||||
|
||||
if (devip == NULL)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == ERR_TMOUT_CMD &&
|
||||
(err->cmd == cmd[0] || err->cmd == 0xff)) {
|
||||
ret = !!err->cnt;
|
||||
if (err->cnt < 0)
|
||||
err->cnt++;
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct scsi_device *sdp = cmnd->device;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
unsigned char *cmd = cmnd->cmnd;
|
||||
int ret = 0;
|
||||
|
||||
if (devip == NULL)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == ERR_FAIL_QUEUE_CMD &&
|
||||
(err->cmd == cmd[0] || err->cmd == 0xff)) {
|
||||
ret = err->cnt ? err->queuecmd_ret : 0;
|
||||
if (err->cnt < 0)
|
||||
err->cnt++;
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
|
||||
struct sdebug_err_inject *info)
|
||||
{
|
||||
struct scsi_device *sdp = cmnd->device;
|
||||
struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
|
||||
struct sdebug_err_inject *err;
|
||||
unsigned char *cmd = cmnd->cmnd;
|
||||
int ret = 0;
|
||||
int result;
|
||||
|
||||
if (devip == NULL)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
|
||||
if (err->type == ERR_FAIL_CMD &&
|
||||
(err->cmd == cmd[0] || err->cmd == 0xff)) {
|
||||
if (!err->cnt) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = !!err->cnt;
|
||||
rcu_read_unlock();
|
||||
goto out_handle;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
out_handle:
|
||||
if (err->cnt < 0)
|
||||
err->cnt++;
|
||||
mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
|
||||
result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
|
||||
*info = *err;
|
||||
*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scp)
|
||||
{
|
||||
|
@ -7514,6 +8046,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
|
|||
u8 opcode = cmd[0];
|
||||
bool has_wlun_rl;
|
||||
bool inject_now;
|
||||
int ret = 0;
|
||||
struct sdebug_err_inject err;
|
||||
|
||||
scsi_set_resid(scp, 0);
|
||||
if (sdebug_statistics) {
|
||||
|
@ -7553,6 +8087,29 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
|
|||
if (NULL == devip)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (sdebug_timeout_cmd(scp)) {
|
||||
scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = sdebug_fail_queue_cmd(scp);
|
||||
if (ret) {
|
||||
scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
|
||||
opcode, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (sdebug_fail_cmd(scp, &ret, &err)) {
|
||||
scmd_printk(KERN_INFO, scp,
|
||||
"fail command 0x%x with hostbyte=0x%x, "
|
||||
"driverbyte=0x%x, statusbyte=0x%x, "
|
||||
"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
|
||||
opcode, err.host_byte, err.driver_byte,
|
||||
err.status_byte, err.sense_key, err.asc, err.asq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
|
||||
atomic_set(&sdeb_inject_pending, 1);
|
||||
|
||||
|
@ -7671,7 +8228,6 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct scsi_host_template sdebug_driver_template = {
|
||||
.show_info = scsi_debug_show_info,
|
||||
.write_info = scsi_debug_write_info,
|
||||
|
@ -7701,6 +8257,8 @@ static struct scsi_host_template sdebug_driver_template = {
|
|||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct sdebug_scsi_cmd),
|
||||
.init_cmd_priv = sdebug_init_cmd_priv,
|
||||
.target_alloc = sdebug_target_alloc,
|
||||
.target_destroy = sdebug_target_destroy,
|
||||
};
|
||||
|
||||
static int sdebug_driver_probe(struct device *dev)
|
||||
|
@ -7847,7 +8405,7 @@ static void sdebug_driver_remove(struct device *dev)
|
|||
scsi_host_put(sdbg_host->shost);
|
||||
}
|
||||
|
||||
static struct bus_type pseudo_lld_bus = {
|
||||
static const struct bus_type pseudo_lld_bus = {
|
||||
.name = "pseudo",
|
||||
.probe = sdebug_driver_probe,
|
||||
.remove = sdebug_driver_remove,
|
||||
|
|
|
@ -551,9 +551,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
|
|||
if (model)
|
||||
strflags = strsep(&next, next_check);
|
||||
if (!model || !strflags) {
|
||||
printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
|
||||
" '%s'\n", __func__, vendor, model,
|
||||
strflags);
|
||||
pr_err("%s: bad dev info string '%s' '%s' '%s'\n",
|
||||
__func__, vendor, model ? model : "",
|
||||
strflags ? strflags : "");
|
||||
res = -EINVAL;
|
||||
} else
|
||||
res = scsi_dev_info_list_add(0 /* compatible */, vendor,
|
||||
|
|
|
@ -302,6 +302,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
|
|||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!shost->ehandler);
|
||||
WARN_ON_ONCE(!test_bit(SCMD_STATE_INFLIGHT, &scmd->state));
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
|
||||
|
@ -318,7 +319,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
|
|||
* Ensure that all tasks observe the host state change before the
|
||||
* host_failed change.
|
||||
*/
|
||||
call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
|
||||
call_rcu_hurry(&scmd->rcu, scsi_eh_inc_host_failed);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -537,6 +538,7 @@ static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status)
|
|||
*/
|
||||
enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct request *req = scsi_cmd_to_rq(scmd);
|
||||
struct scsi_device *sdev = scmd->device;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
|
@ -596,6 +598,22 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
|
|||
if (sshdr.asc == 0x10) /* DIF */
|
||||
return SUCCESS;
|
||||
|
||||
/*
|
||||
* Check aborts due to command duration limit policy:
|
||||
* ABORTED COMMAND additional sense code with the
|
||||
* COMMAND TIMEOUT BEFORE PROCESSING or
|
||||
* COMMAND TIMEOUT DURING PROCESSING or
|
||||
* COMMAND TIMEOUT DURING PROCESSING DUE TO ERROR RECOVERY
|
||||
* additional sense code qualifiers.
|
||||
*/
|
||||
if (sshdr.asc == 0x2e &&
|
||||
sshdr.ascq >= 0x01 && sshdr.ascq <= 0x03) {
|
||||
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
|
||||
req->cmd_flags |= REQ_FAILFAST_DEV;
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
|
||||
return ADD_TO_MLQUEUE;
|
||||
if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
|
||||
|
@ -692,6 +710,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
|
|||
}
|
||||
return SUCCESS;
|
||||
|
||||
case COMPLETED:
|
||||
if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) {
|
||||
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
|
||||
req->cmd_flags |= REQ_FAILFAST_DEV;
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
}
|
||||
return SUCCESS;
|
||||
|
||||
default:
|
||||
return SUCCESS;
|
||||
}
|
||||
|
@ -786,6 +812,14 @@ static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
|
|||
switch (get_status_byte(scmd)) {
|
||||
case SAM_STAT_GOOD:
|
||||
scsi_handle_queue_ramp_up(scmd->device);
|
||||
if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd))
|
||||
/*
|
||||
* If we have sense data, call scsi_check_sense() in
|
||||
* order to set the correct SCSI ML byte (if any).
|
||||
* No point in checking the return value, since the
|
||||
* command has already completed successfully.
|
||||
*/
|
||||
scsi_check_sense(scmd);
|
||||
fallthrough;
|
||||
case SAM_STAT_COMMAND_TERMINATED:
|
||||
return SUCCESS;
|
||||
|
@ -1811,6 +1845,10 @@ bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
|
|||
return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
|
||||
}
|
||||
|
||||
/* Never retry commands aborted due to a duration limit timeout */
|
||||
if (scsi_ml_byte(scmd->result) == SCSIML_STAT_DL_TIMEOUT)
|
||||
return true;
|
||||
|
||||
if (!scsi_status_is_check_condition(scmd->result))
|
||||
return false;
|
||||
|
||||
|
@ -1970,6 +2008,14 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
|
|||
if (scmd->cmnd[0] == REPORT_LUNS)
|
||||
scmd->device->sdev_target->expecting_lun_change = 0;
|
||||
scsi_handle_queue_ramp_up(scmd->device);
|
||||
if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd))
|
||||
/*
|
||||
* If we have sense data, call scsi_check_sense() in
|
||||
* order to set the correct SCSI ML byte (if any).
|
||||
* No point in checking the return value, since the
|
||||
* command has already completed successfully.
|
||||
*/
|
||||
scsi_check_sense(scmd);
|
||||
fallthrough;
|
||||
case SAM_STAT_COMMAND_TERMINATED:
|
||||
return SUCCESS;
|
||||
|
@ -2154,15 +2200,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
|
|||
struct scsi_cmnd *scmd, *next;
|
||||
|
||||
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
|
||||
struct scsi_device *sdev = scmd->device;
|
||||
|
||||
list_del_init(&scmd->eh_entry);
|
||||
if (scsi_device_online(scmd->device) &&
|
||||
!scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
|
||||
scsi_eh_should_retry_cmd(scmd)) {
|
||||
if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
|
||||
scsi_cmd_retry_allowed(scmd) &&
|
||||
scsi_eh_should_retry_cmd(scmd)) {
|
||||
SCSI_LOG_ERROR_RECOVERY(3,
|
||||
scmd_printk(KERN_INFO, scmd,
|
||||
"%s: flush retry cmd\n",
|
||||
current->comm));
|
||||
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
|
||||
blk_mq_kick_requeue_list(sdev->request_queue);
|
||||
} else {
|
||||
/*
|
||||
* If just we got sense for the device (called
|
||||
|
|
|
@ -383,7 +383,7 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
|||
* fill in all the output members
|
||||
*/
|
||||
hdr->status = req->result & 0xff;
|
||||
hdr->masked_status = status_byte(req->result);
|
||||
hdr->masked_status = sg_status_byte(req->result);
|
||||
hdr->msg_status = COMMAND_COMPLETE;
|
||||
hdr->host_status = host_byte(req->result);
|
||||
hdr->driver_status = 0;
|
||||
|
|
|
@ -214,6 +214,92 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
}
|
||||
EXPORT_SYMBOL(__scsi_execute);
|
||||
|
||||
void scsi_failures_reset_retries(struct scsi_failures *failures)
|
||||
{
|
||||
struct scsi_failure *failure;
|
||||
|
||||
failures->total_retries = 0;
|
||||
|
||||
for (failure = failures->failure_definitions; failure->result;
|
||||
failure++)
|
||||
failure->retries = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_failures_reset_retries);
|
||||
|
||||
/**
|
||||
* scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry.
|
||||
* @scmd: scsi_cmnd to check.
|
||||
* @failures: scsi_failures struct that lists failures to check for.
|
||||
*
|
||||
* Returns -EAGAIN if the caller should retry else 0.
|
||||
*/
|
||||
static int scsi_check_passthrough(struct scsi_cmnd *scmd,
|
||||
struct scsi_failures *failures)
|
||||
{
|
||||
struct scsi_failure *failure;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
enum sam_status status;
|
||||
|
||||
if (!failures)
|
||||
return 0;
|
||||
|
||||
for (failure = failures->failure_definitions; failure->result;
|
||||
failure++) {
|
||||
if (failure->result == SCMD_FAILURE_RESULT_ANY)
|
||||
goto maybe_retry;
|
||||
|
||||
if (host_byte(scmd->result) &&
|
||||
host_byte(scmd->result) == host_byte(failure->result))
|
||||
goto maybe_retry;
|
||||
|
||||
status = status_byte(scmd->result);
|
||||
if (!status)
|
||||
continue;
|
||||
|
||||
if (failure->result == SCMD_FAILURE_STAT_ANY &&
|
||||
!scsi_status_is_good(scmd->result))
|
||||
goto maybe_retry;
|
||||
|
||||
if (status != status_byte(failure->result))
|
||||
continue;
|
||||
|
||||
if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION ||
|
||||
failure->sense == SCMD_FAILURE_SENSE_ANY)
|
||||
goto maybe_retry;
|
||||
|
||||
if (!scsi_command_normalize_sense(scmd, &sshdr))
|
||||
return 0;
|
||||
|
||||
if (failure->sense != sshdr.sense_key)
|
||||
continue;
|
||||
|
||||
if (failure->asc == SCMD_FAILURE_ASC_ANY)
|
||||
goto maybe_retry;
|
||||
|
||||
if (failure->asc != sshdr.asc)
|
||||
continue;
|
||||
|
||||
if (failure->ascq == SCMD_FAILURE_ASCQ_ANY ||
|
||||
failure->ascq == sshdr.ascq)
|
||||
goto maybe_retry;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
maybe_retry:
|
||||
if (failure->allowed) {
|
||||
if (failure->allowed == SCMD_FAILURE_NO_LIMIT ||
|
||||
++failure->retries <= failure->allowed)
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT ||
|
||||
++failures->total_retries <= failures->total_allowed)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_execute_cmd - insert request and wait for the result
|
||||
* @sdev: scsi_device
|
||||
|
@ -222,7 +308,7 @@ EXPORT_SYMBOL(__scsi_execute);
|
|||
* @buffer: data buffer
|
||||
* @bufflen: len of buffer
|
||||
* @timeout: request timeout in HZ
|
||||
* @retries: number of times to retry request
|
||||
* @ml_retries: number of times SCSI midlayer will retry request
|
||||
* @args: Optional args. See struct definition for field descriptions
|
||||
*
|
||||
* Returns the scsi_cmnd result field if a command was executed, or a negative
|
||||
|
@ -230,7 +316,7 @@ EXPORT_SYMBOL(__scsi_execute);
|
|||
*/
|
||||
int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
||||
blk_opf_t opf, void *buffer, unsigned int bufflen,
|
||||
int timeout, int retries,
|
||||
int timeout, int ml_retries,
|
||||
const struct scsi_exec_args *args)
|
||||
{
|
||||
static const struct scsi_exec_args default_args;
|
||||
|
@ -245,6 +331,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
args->sense_len != SCSI_SENSE_BUFFERSIZE))
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
@ -259,7 +346,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
}
|
||||
rq->cmd_len = COMMAND_SIZE(cmd[0]);
|
||||
memcpy(rq->cmd, cmd, rq->cmd_len);
|
||||
rq->retries = retries;
|
||||
rq->retries = ml_retries;
|
||||
scmd = blk_mq_rq_to_pdu(req);
|
||||
scmd->flags |= args->scmd_flags;
|
||||
req->timeout = timeout;
|
||||
|
@ -270,6 +357,11 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
*/
|
||||
blk_execute_rq(req, true);
|
||||
|
||||
if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) {
|
||||
blk_mq_free_request(req);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some devices (USB mass-storage in particular) may transfer
|
||||
* garbage data together with a residue indicating that the data
|
||||
|
@ -577,10 +669,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
|||
if (blk_queue_add_random(q))
|
||||
add_disk_randomness(req->q->disk);
|
||||
|
||||
if (!blk_rq_is_passthrough(req)) {
|
||||
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
|
||||
cmd->flags &= ~SCMD_INITIALIZED;
|
||||
}
|
||||
WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
|
||||
!(cmd->flags & SCMD_INITIALIZED));
|
||||
cmd->flags = 0;
|
||||
|
||||
/*
|
||||
* Calling rcu_barrier() is not necessary here because the
|
||||
|
@ -613,11 +704,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline u8 get_scsi_ml_byte(int result)
|
||||
{
|
||||
return (result >> 8) & 0xff;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
|
||||
* @result: scsi error code
|
||||
|
@ -630,7 +716,7 @@ static blk_status_t scsi_result_to_blk_status(int result)
|
|||
* Check the scsi-ml byte first in case we converted a host or status
|
||||
* byte.
|
||||
*/
|
||||
switch (get_scsi_ml_byte(result)) {
|
||||
switch (scsi_ml_byte(result)) {
|
||||
case SCSIML_STAT_OK:
|
||||
break;
|
||||
case SCSIML_STAT_RESV_CONFLICT:
|
||||
|
@ -641,6 +727,8 @@ static blk_status_t scsi_result_to_blk_status(int result)
|
|||
return BLK_STS_MEDIUM;
|
||||
case SCSIML_STAT_TGT_FAILURE:
|
||||
return BLK_STS_TARGET;
|
||||
case SCSIML_STAT_DL_TIMEOUT:
|
||||
return BLK_STS_DURATION_LIMIT;
|
||||
}
|
||||
|
||||
switch (host_byte(result)) {
|
||||
|
@ -813,6 +901,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
|||
case 0x1b: /* sanitize in progress */
|
||||
case 0x1d: /* configuration in progress */
|
||||
case 0x24: /* depopulation in progress */
|
||||
case 0x25: /* depopulation restore in progress */
|
||||
action = ACTION_DELAYED_RETRY;
|
||||
break;
|
||||
case 0x0a: /* ALUA state transition */
|
||||
|
@ -838,6 +927,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
|||
blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
|
||||
}
|
||||
break;
|
||||
case COMPLETED:
|
||||
fallthrough;
|
||||
default:
|
||||
action = ACTION_FAIL;
|
||||
break;
|
||||
|
@ -1330,28 +1421,26 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
|
|||
int token;
|
||||
|
||||
token = sbitmap_get(&sdev->budget_map);
|
||||
if (atomic_read(&sdev->device_blocked)) {
|
||||
if (token < 0)
|
||||
goto out;
|
||||
if (token < 0)
|
||||
return -1;
|
||||
|
||||
if (scsi_device_busy(sdev) > 1)
|
||||
goto out_dec;
|
||||
if (!atomic_read(&sdev->device_blocked))
|
||||
return token;
|
||||
|
||||
/*
|
||||
* unblock after device_blocked iterates to zero
|
||||
*/
|
||||
if (atomic_dec_return(&sdev->device_blocked) > 0)
|
||||
goto out_dec;
|
||||
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
|
||||
"unblocking device at zero depth\n"));
|
||||
/*
|
||||
* Only unblock if no other commands are pending and
|
||||
* if device_blocked has decreased to zero
|
||||
*/
|
||||
if (scsi_device_busy(sdev) > 1 ||
|
||||
atomic_dec_return(&sdev->device_blocked) > 0) {
|
||||
sbitmap_put(&sdev->budget_map, token);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
|
||||
"unblocking device at zero depth\n"));
|
||||
|
||||
return token;
|
||||
out_dec:
|
||||
if (token >= 0)
|
||||
sbitmap_put(&sdev->budget_map, token);
|
||||
out:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2212,6 +2301,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
|
|||
* @sdev: SCSI device to be queried
|
||||
* @dbd: set to prevent mode sense from returning block descriptors
|
||||
* @modepage: mode page being requested
|
||||
* @subpage: sub-page of the mode page being requested
|
||||
* @buffer: request buffer (may not be smaller than eight bytes)
|
||||
* @len: length of request buffer.
|
||||
* @timeout: command timeout
|
||||
|
@ -2223,18 +2313,32 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
|
|||
* Returns zero if successful, or a negative error number on failure
|
||||
*/
|
||||
int
|
||||
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
||||
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
|
||||
unsigned char *buffer, int len, int timeout, int retries,
|
||||
struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
unsigned char cmd[12];
|
||||
int use_10_for_ms;
|
||||
int header_length;
|
||||
int result, retry_count = retries;
|
||||
int result;
|
||||
struct scsi_sense_hdr my_sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = retries,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
/* caller might not be interested in sense, but we need it */
|
||||
.sshdr = sshdr ? : &my_sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
memset(data, 0, sizeof(*data));
|
||||
|
@ -2243,6 +2347,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
|||
dbd = sdev->set_dbd_for_ms ? 8 : dbd;
|
||||
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
|
||||
cmd[2] = modepage;
|
||||
cmd[3] = subpage;
|
||||
|
||||
sshdr = exec_args.sshdr;
|
||||
|
||||
|
@ -2295,12 +2400,6 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
|||
goto retry;
|
||||
}
|
||||
}
|
||||
if (scsi_status_is_check_condition(result) &&
|
||||
sshdr->sense_key == UNIT_ATTENTION &&
|
||||
retry_count) {
|
||||
retry_count--;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -2357,10 +2456,10 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
|
|||
do {
|
||||
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
|
||||
timeout, 1, &exec_args);
|
||||
if (sdev->removable && scsi_sense_valid(sshdr) &&
|
||||
if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) &&
|
||||
sshdr->sense_key == UNIT_ATTENTION)
|
||||
sdev->changed = 1;
|
||||
} while (scsi_sense_valid(sshdr) &&
|
||||
} while (result > 0 && scsi_sense_valid(sshdr) &&
|
||||
sshdr->sense_key == UNIT_ATTENTION && --retries);
|
||||
|
||||
return result;
|
||||
|
|
|
@ -27,8 +27,14 @@ enum scsi_ml_status {
|
|||
SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */
|
||||
SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */
|
||||
SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */
|
||||
SCSIML_STAT_DL_TIMEOUT = 0x05, /* Command Duration Limit timeout */
|
||||
};
|
||||
|
||||
static inline u8 scsi_ml_byte(int result)
|
||||
{
|
||||
return (result >> 8) & 0xff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scsi Error Handler Flags
|
||||
*/
|
||||
|
@ -151,7 +157,7 @@ extern void scsi_sysfs_device_initialize(struct scsi_device *);
|
|||
extern struct scsi_transport_template blank_transport_template;
|
||||
extern void __scsi_remove_device(struct scsi_device *);
|
||||
|
||||
extern struct bus_type scsi_bus_type;
|
||||
extern const struct bus_type scsi_bus_type;
|
||||
extern const struct attribute_group *scsi_sysfs_shost_attr_groups[];
|
||||
|
||||
/* scsi_netlink.c */
|
||||
|
|
|
@ -412,7 +412,7 @@ static void scsi_target_dev_release(struct device *dev)
|
|||
put_device(parent);
|
||||
}
|
||||
|
||||
static struct device_type scsi_target_type = {
|
||||
static const struct device_type scsi_target_type = {
|
||||
.name = "scsi_target",
|
||||
.release = scsi_target_dev_release,
|
||||
};
|
||||
|
@ -626,6 +626,7 @@ void scsi_sanitize_inquiry_string(unsigned char *s, int len)
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
|
||||
|
||||
|
||||
/**
|
||||
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
|
||||
* @sdev: scsi_device to probe
|
||||
|
@ -647,10 +648,36 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
|
||||
int response_len = 0;
|
||||
int pass, count, result, resid;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
/*
|
||||
* not-ready to ready transition [asc/ascq=0x28/0x0] or
|
||||
* power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
|
||||
* should not yield UNIT_ATTENTION but many buggy devices do
|
||||
* so anyway.
|
||||
*/
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x28,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x29,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.allowed = 1,
|
||||
.result = DID_TIME_OUT << 16,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.total_allowed = 3,
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.resid = &resid,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
*bflags = 0;
|
||||
|
@ -668,6 +695,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||
pass, try_inquiry_len));
|
||||
|
||||
/* Each pass gets up to three chances to ignore Unit Attention */
|
||||
scsi_failures_reset_retries(&failures);
|
||||
|
||||
for (count = 0; count < 3; ++count) {
|
||||
memset(scsi_cmd, 0, 6);
|
||||
scsi_cmd[0] = INQUIRY;
|
||||
|
@ -684,22 +713,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||
"scsi scan: INQUIRY %s with code 0x%x\n",
|
||||
result ? "failed" : "successful", result));
|
||||
|
||||
if (result > 0) {
|
||||
/*
|
||||
* not-ready to ready transition [asc/ascq=0x28/0x0]
|
||||
* or power-on, reset [asc/ascq=0x29/0x0], continue.
|
||||
* INQUIRY should not yield UNIT_ATTENTION
|
||||
* but many buggy devices do so anyway.
|
||||
*/
|
||||
if (scsi_status_is_check_condition(result) &&
|
||||
scsi_sense_valid(&sshdr)) {
|
||||
if ((sshdr.sense_key == UNIT_ATTENTION) &&
|
||||
((sshdr.asc == 0x28) ||
|
||||
(sshdr.asc == 0x29)) &&
|
||||
(sshdr.ascq == 0))
|
||||
continue;
|
||||
}
|
||||
} else if (result == 0) {
|
||||
if (result == 0) {
|
||||
/*
|
||||
* if nothing was transferred, we try
|
||||
* again. It's a workaround for some USB
|
||||
|
@ -822,7 +836,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||
* device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
|
||||
* non-zero LUNs can be scanned.
|
||||
*/
|
||||
sdev->scsi_level = inq_result[2] & 0x07;
|
||||
sdev->scsi_level = inq_result[2] & 0x0f;
|
||||
if (sdev->scsi_level >= 2 ||
|
||||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
|
||||
sdev->scsi_level++;
|
||||
|
@ -1087,6 +1101,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||
if (sdev->scsi_level >= SCSI_3)
|
||||
scsi_attach_vpd(sdev);
|
||||
|
||||
scsi_cdl_check(sdev);
|
||||
|
||||
sdev->max_queue_depth = sdev->queue_depth;
|
||||
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
|
||||
sdev->sdev_bflags = *bflags;
|
||||
|
@ -1400,14 +1416,34 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
|
|||
unsigned int length;
|
||||
u64 lun;
|
||||
unsigned int num_luns;
|
||||
unsigned int retries;
|
||||
int result;
|
||||
struct scsi_lun *lunp, *lun_data;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_device *sdev;
|
||||
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Fail all CCs except the UA above */
|
||||
{
|
||||
.sense = SCMD_FAILURE_SENSE_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Retry any other errors not listed above */
|
||||
{
|
||||
.result = SCMD_FAILURE_RESULT_ANY,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.total_allowed = 3,
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1478,29 +1514,18 @@ retry:
|
|||
* should come through as a check condition, and will not generate
|
||||
* a retry.
|
||||
*/
|
||||
for (retries = 0; retries < 3; retries++) {
|
||||
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
|
||||
"scsi scan: Sending REPORT LUNS to (try %d)\n",
|
||||
retries));
|
||||
scsi_failures_reset_retries(&failures);
|
||||
|
||||
result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
|
||||
lun_data, length,
|
||||
SCSI_REPORT_LUNS_TIMEOUT, 3,
|
||||
&exec_args);
|
||||
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
|
||||
"scsi scan: Sending REPORT LUNS\n"));
|
||||
|
||||
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
|
||||
"scsi scan: REPORT LUNS"
|
||||
" %s (try %d) result 0x%x\n",
|
||||
result ? "failed" : "successful",
|
||||
retries, result));
|
||||
if (result == 0)
|
||||
break;
|
||||
else if (scsi_sense_valid(&sshdr)) {
|
||||
if (sshdr.sense_key != UNIT_ATTENTION)
|
||||
break;
|
||||
}
|
||||
}
|
||||
result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
|
||||
length, SCSI_REPORT_LUNS_TIMEOUT, 3,
|
||||
&exec_args);
|
||||
|
||||
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
|
||||
"scsi scan: REPORT LUNS %s result 0x%x\n",
|
||||
result ? "failed" : "successful", result));
|
||||
if (result) {
|
||||
/*
|
||||
* The device probably does not support a REPORT LUN command
|
||||
|
@ -1637,6 +1662,7 @@ int scsi_rescan_device_rh(struct device *dev)
|
|||
}
|
||||
|
||||
scsi_attach_vpd(sdev);
|
||||
scsi_cdl_check(sdev);
|
||||
|
||||
if (sdev->handler && sdev->handler->rescan)
|
||||
sdev->handler->rescan(sdev);
|
||||
|
|
|
@ -21,25 +21,11 @@ static struct ctl_table scsi_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table scsi_dir_table[] = {
|
||||
{ .procname = "scsi",
|
||||
.mode = 0555,
|
||||
.child = scsi_table },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table scsi_root_table[] = {
|
||||
{ .procname = "dev",
|
||||
.mode = 0555,
|
||||
.child = scsi_dir_table },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table_header *scsi_table_header;
|
||||
|
||||
int __init scsi_init_sysctl(void)
|
||||
{
|
||||
scsi_table_header = register_sysctl_table(scsi_root_table);
|
||||
scsi_table_header = register_sysctl("dev/scsi", scsi_table);
|
||||
if (!scsi_table_header)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "scsi_priv.h"
|
||||
#include "scsi_logging.h"
|
||||
|
||||
static struct device_type scsi_dev_type;
|
||||
static const struct device_type scsi_dev_type;
|
||||
|
||||
static const struct {
|
||||
enum scsi_device_state value;
|
||||
|
@ -565,7 +565,7 @@ static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct bus_type scsi_bus_type = {
|
||||
const struct bus_type scsi_bus_type = {
|
||||
.name = "scsi",
|
||||
.match = scsi_bus_match,
|
||||
.uevent = scsi_bus_uevent,
|
||||
|
@ -687,6 +687,7 @@ sdev_rd_attr (scsi_level, "%d\n");
|
|||
sdev_rd_attr (vendor, "%.8s\n");
|
||||
sdev_rd_attr (model, "%.16s\n");
|
||||
sdev_rd_attr (rev, "%.4s\n");
|
||||
sdev_rd_attr (cdl_supported, "%d\n");
|
||||
|
||||
static ssize_t
|
||||
sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
|
||||
|
@ -1238,6 +1239,33 @@ static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
|
|||
sdev_show_queue_ramp_up_period,
|
||||
sdev_store_queue_ramp_up_period);
|
||||
|
||||
static ssize_t sdev_show_cdl_enable(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", (int)sdev->cdl_enable);
|
||||
}
|
||||
|
||||
static ssize_t sdev_store_cdl_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
bool v;
|
||||
|
||||
if (kstrtobool(buf, &v))
|
||||
return -EINVAL;
|
||||
|
||||
ret = scsi_cdl_enable(to_scsi_device(dev), v);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(cdl_enable, S_IRUGO | S_IWUSR,
|
||||
sdev_show_cdl_enable, sdev_store_cdl_enable);
|
||||
|
||||
static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int i)
|
||||
{
|
||||
|
@ -1317,6 +1345,8 @@ static struct attribute *scsi_sdev_attrs[] = {
|
|||
&dev_attr_preferred_path.attr,
|
||||
#endif
|
||||
&dev_attr_queue_ramp_up_period.attr,
|
||||
&dev_attr_cdl_supported.attr,
|
||||
&dev_attr_cdl_enable.attr,
|
||||
REF_EVT(media_change),
|
||||
REF_EVT(inquiry_change_reported),
|
||||
REF_EVT(capacity_change_reported),
|
||||
|
@ -1646,7 +1676,7 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct device_type scsi_dev_type = {
|
||||
static const struct device_type scsi_dev_type = {
|
||||
.name = "scsi_device",
|
||||
.release = scsi_device_dev_release,
|
||||
.groups = scsi_sdev_attr_groups,
|
||||
|
|
|
@ -1201,7 +1201,7 @@ static const struct device_type iscsi_flashnode_conn_dev_type = {
|
|||
.release = iscsi_flashnode_conn_release,
|
||||
};
|
||||
|
||||
static struct bus_type iscsi_flashnode_bus;
|
||||
static const struct bus_type iscsi_flashnode_bus;
|
||||
|
||||
int iscsi_flashnode_bus_match(struct device *dev,
|
||||
struct device_driver *drv)
|
||||
|
@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
|
||||
|
||||
static struct bus_type iscsi_flashnode_bus = {
|
||||
static const struct bus_type iscsi_flashnode_bus = {
|
||||
.name = "iscsi_flashnode",
|
||||
.match = &iscsi_flashnode_bus_match,
|
||||
};
|
||||
|
|
|
@ -1246,7 +1246,7 @@ int sas_read_port_mode_page(struct scsi_device *sdev)
|
|||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
|
||||
error = scsi_mode_sense(sdev, 1, 0x19, 0, buffer, BUF_SIZE, 30*HZ, 3,
|
||||
&mode_data, NULL);
|
||||
|
||||
if (error)
|
||||
|
|
|
@ -108,29 +108,30 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
|
|||
enum req_op op, void *buffer, unsigned int bufflen,
|
||||
struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
int i, result;
|
||||
struct scsi_sense_hdr sshdr_tmp;
|
||||
blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = DV_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
/* bypass the SDEV_QUIESCE state with BLK_MQ_REQ_PM */
|
||||
.req_flags = BLK_MQ_REQ_PM,
|
||||
.sshdr = sshdr ? : &sshdr_tmp,
|
||||
.sshdr = sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
sshdr = exec_args.sshdr;
|
||||
|
||||
for(i = 0; i < DV_RETRIES; i++) {
|
||||
/*
|
||||
* The purpose of the RQF_PM flag below is to bypass the
|
||||
* SDEV_QUIESCE state.
|
||||
*/
|
||||
result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen,
|
||||
DV_TIMEOUT, 1, &exec_args);
|
||||
if (result < 0 || !scsi_sense_valid(sshdr) ||
|
||||
sshdr->sense_key != UNIT_ATTENTION)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
return scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1,
|
||||
&exec_args);
|
||||
}
|
||||
|
||||
static struct {
|
||||
|
@ -676,10 +677,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
|
|||
for (r = 0; r < retries; r++) {
|
||||
result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
|
||||
buffer, len, &sshdr);
|
||||
if(result || !scsi_device_online(sdev)) {
|
||||
if (result || !scsi_device_online(sdev)) {
|
||||
|
||||
scsi_device_set_state(sdev, SDEV_QUIESCE);
|
||||
if (scsi_sense_valid(&sshdr)
|
||||
if (result > 0 && scsi_sense_valid(&sshdr)
|
||||
&& sshdr.sense_key == ILLEGAL_REQUEST
|
||||
/* INVALID FIELD IN CDB */
|
||||
&& sshdr.asc == 0x24 && sshdr.ascq == 0x00)
|
||||
|
|
|
@ -150,7 +150,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
|
|||
struct scsi_mode_data data;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
static const char temp[] = "temporary ";
|
||||
int len;
|
||||
int len, ret;
|
||||
|
||||
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
|
||||
/* no cache control on RBC devices; theoretically they
|
||||
|
@ -179,7 +179,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
|
|||
return count;
|
||||
}
|
||||
|
||||
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
|
||||
if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT,
|
||||
sdkp->max_retries, &data, NULL))
|
||||
return -EINVAL;
|
||||
len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
|
||||
|
@ -197,9 +197,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
|
|||
*/
|
||||
data.device_specific = 0;
|
||||
|
||||
if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
|
||||
sdkp->max_retries, &data, &sshdr)) {
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
|
||||
sdkp->max_retries, &data, &sshdr);
|
||||
if (ret) {
|
||||
if (ret > 0 && scsi_sense_valid(&sshdr))
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1010,7 +1011,7 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
|||
|
||||
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
|
||||
sector_t lba, unsigned int nr_blocks,
|
||||
unsigned char flags)
|
||||
unsigned char flags, unsigned int dld)
|
||||
{
|
||||
cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
|
||||
if (unlikely(cmd->cmnd == NULL))
|
||||
|
@ -1023,6 +1024,7 @@ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
|
|||
cmd->cmnd[7] = 0x18; /* Additional CDB len */
|
||||
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
|
||||
cmd->cmnd[10] = flags;
|
||||
cmd->cmnd[11] = dld & 0x07;
|
||||
put_unaligned_be64(lba, &cmd->cmnd[12]);
|
||||
put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
|
||||
put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
|
||||
|
@ -1032,12 +1034,12 @@ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
|
|||
|
||||
static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
|
||||
sector_t lba, unsigned int nr_blocks,
|
||||
unsigned char flags)
|
||||
unsigned char flags, unsigned int dld)
|
||||
{
|
||||
cmd->cmd_len = 16;
|
||||
cmd->cmnd[0] = write ? WRITE_16 : READ_16;
|
||||
cmd->cmnd[1] = flags;
|
||||
cmd->cmnd[14] = 0;
|
||||
cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
|
||||
cmd->cmnd[14] = (dld & 0x03) << 6;
|
||||
cmd->cmnd[15] = 0;
|
||||
put_unaligned_be64(lba, &cmd->cmnd[2]);
|
||||
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
|
||||
|
@ -1089,6 +1091,31 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a command has a duration limit set. If it does, and the target
|
||||
* device supports CDL and the feature is enabled, return the limit
|
||||
* descriptor index to use. Return 0 (no limit) otherwise.
|
||||
*/
|
||||
static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
int hint;
|
||||
|
||||
if (!sdp->cdl_supported || !sdp->cdl_enable)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Use "no limit" if the request ioprio does not specify a duration
|
||||
* limit hint.
|
||||
*/
|
||||
hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd)));
|
||||
if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 ||
|
||||
hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7)
|
||||
return 0;
|
||||
|
||||
return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1;
|
||||
}
|
||||
|
||||
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
|
@ -1100,6 +1127,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
|||
unsigned int mask = logical_to_sectors(sdp, 1) - 1;
|
||||
bool write = rq_data_dir(rq) == WRITE;
|
||||
unsigned char protect, fua;
|
||||
unsigned int dld;
|
||||
blk_status_t ret;
|
||||
unsigned int dif;
|
||||
bool dix;
|
||||
|
@ -1149,6 +1177,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
|||
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
|
||||
dix = scsi_prot_sg_count(cmd);
|
||||
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
|
||||
dld = sd_cdl_dld(sdkp, cmd);
|
||||
|
||||
if (dif || dix)
|
||||
protect = sd_setup_protect_cmnd(cmd, dix, dif);
|
||||
|
@ -1157,10 +1186,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
|||
|
||||
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
|
||||
ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
|
||||
protect | fua);
|
||||
protect | fua, dld);
|
||||
} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
|
||||
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
|
||||
protect | fua);
|
||||
protect | fua, dld);
|
||||
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
|
||||
sdp->use_10_for_rw || protect) {
|
||||
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
|
||||
|
@ -1557,41 +1586,37 @@ out:
|
|||
return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||
}
|
||||
|
||||
static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
|
||||
static int sd_sync_cache(struct scsi_disk *sdkp)
|
||||
{
|
||||
int retries, res;
|
||||
int res;
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
const int timeout = sdp->request_queue->rq_timeout
|
||||
* SD_FLUSH_TIMEOUT_MULTIPLIER;
|
||||
struct scsi_sense_hdr my_sshdr;
|
||||
/* Leave the rest of the command zero to indicate flush everything. */
|
||||
const unsigned char cmd[16] = { sdp->use_16_for_sync ?
|
||||
SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE };
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.allowed = 3,
|
||||
.result = SCMD_FAILURE_RESULT_ANY,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.req_flags = BLK_MQ_REQ_PM,
|
||||
/* caller might not be interested in sense, but we need it */
|
||||
.sshdr = sshdr ? : &my_sshdr,
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
if (!scsi_device_online(sdp))
|
||||
return -ENODEV;
|
||||
|
||||
sshdr = exec_args.sshdr;
|
||||
|
||||
for (retries = 3; retries > 0; --retries) {
|
||||
unsigned char cmd[16] = { 0 };
|
||||
|
||||
if (sdp->use_16_for_sync)
|
||||
cmd[0] = SYNCHRONIZE_CACHE_16;
|
||||
else
|
||||
cmd[0] = SYNCHRONIZE_CACHE;
|
||||
/*
|
||||
* Leave the rest of the command zero to indicate
|
||||
* flush everything.
|
||||
*/
|
||||
res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
|
||||
timeout, sdkp->max_retries, &exec_args);
|
||||
if (res == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
|
||||
sdkp->max_retries, &exec_args);
|
||||
if (res) {
|
||||
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
|
||||
|
||||
|
@ -1599,15 +1624,23 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
|
|||
return res;
|
||||
|
||||
if (scsi_status_is_check_condition(res) &&
|
||||
scsi_sense_valid(sshdr)) {
|
||||
sd_print_sense_hdr(sdkp, sshdr);
|
||||
scsi_sense_valid(&sshdr)) {
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
|
||||
/* we need to evaluate the error return */
|
||||
if (sshdr->asc == 0x3a || /* medium not present */
|
||||
sshdr->asc == 0x20 || /* invalid command */
|
||||
(sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
|
||||
if (sshdr.asc == 0x3a || /* medium not present */
|
||||
sshdr.asc == 0x20 || /* invalid command */
|
||||
(sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */
|
||||
/* this is no error here */
|
||||
return 0;
|
||||
/*
|
||||
* This drive doesn't support sync and there's not much
|
||||
* we can do because this is called during shutdown
|
||||
* or suspend so just return success so those operations
|
||||
* can proceed.
|
||||
*/
|
||||
if (sshdr.sense_key == ILLEGAL_REQUEST)
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (host_byte(res)) {
|
||||
|
@ -1693,6 +1726,36 @@ static char sd_pr_type(enum pr_type type)
|
|||
}
|
||||
};
|
||||
|
||||
static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
|
||||
{
|
||||
switch (host_byte(result)) {
|
||||
case DID_TRANSPORT_MARGINAL:
|
||||
case DID_TRANSPORT_DISRUPTED:
|
||||
case DID_BUS_BUSY:
|
||||
return PR_STS_RETRY_PATH_FAILURE;
|
||||
case DID_NO_CONNECT:
|
||||
return PR_STS_PATH_FAILED;
|
||||
case DID_TRANSPORT_FAILFAST:
|
||||
return PR_STS_PATH_FAST_FAILED;
|
||||
}
|
||||
|
||||
switch (status_byte(result)) {
|
||||
case SAM_STAT_RESERVATION_CONFLICT:
|
||||
return PR_STS_RESERVATION_CONFLICT;
|
||||
case SAM_STAT_CHECK_CONDITION:
|
||||
if (!scsi_sense_valid(sshdr))
|
||||
return PR_STS_IOERR;
|
||||
|
||||
if (sshdr->sense_key == ILLEGAL_REQUEST &&
|
||||
(sshdr->asc == 0x26 || sshdr->asc == 0x24))
|
||||
return -EINVAL;
|
||||
|
||||
fallthrough;
|
||||
default:
|
||||
return PR_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_pr_command(struct block_device *bdev, u8 sa,
|
||||
u64 key, u64 sa_key, u8 type, u8 flags)
|
||||
{
|
||||
|
@ -1725,7 +1788,10 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
|
|||
scsi_print_sense_hdr(sdev, NULL, &sshdr);
|
||||
}
|
||||
|
||||
return result;
|
||||
if (result <= 0)
|
||||
return result;
|
||||
|
||||
return sd_scsi_to_pr_err(&sshdr, result);
|
||||
}
|
||||
|
||||
static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
|
@ -2038,53 +2104,68 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
|||
static void
|
||||
sd_spinup_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
unsigned char cmd[10];
|
||||
static const u8 cmd[10] = { TEST_UNIT_READY };
|
||||
unsigned long spintime_expire = 0;
|
||||
int retries, spintime;
|
||||
int spintime, sense_valid = 0;
|
||||
unsigned int the_result;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
/* Do not retry Medium Not Present */
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x3A,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = NOT_READY,
|
||||
.asc = 0x3A,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Retry when scsi_status_is_good would return false 3 times */
|
||||
{
|
||||
.result = SCMD_FAILURE_STAT_ANY,
|
||||
.allowed = 3,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
int sense_valid = 0;
|
||||
|
||||
spintime = 0;
|
||||
|
||||
/* Spin up drives, as required. Only do this at boot time */
|
||||
/* Spinup needs to be done for module loads too. */
|
||||
do {
|
||||
retries = 0;
|
||||
bool media_was_present = sdkp->media_present;
|
||||
|
||||
do {
|
||||
bool media_was_present = sdkp->media_present;
|
||||
scsi_failures_reset_retries(&failures);
|
||||
|
||||
cmd[0] = TEST_UNIT_READY;
|
||||
memset((void *) &cmd[1], 0, 9);
|
||||
the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN,
|
||||
NULL, 0, SD_TIMEOUT,
|
||||
sdkp->max_retries, &exec_args);
|
||||
|
||||
the_result = scsi_execute_cmd(sdkp->device, cmd,
|
||||
REQ_OP_DRV_IN, NULL, 0,
|
||||
SD_TIMEOUT,
|
||||
sdkp->max_retries,
|
||||
&exec_args);
|
||||
|
||||
if (the_result > 0) {
|
||||
/*
|
||||
* If the drive has indicated to us that it
|
||||
* doesn't have any media in it, don't bother
|
||||
* with any more polling.
|
||||
* If the drive has indicated to us that it doesn't
|
||||
* have any media in it, don't bother with any more
|
||||
* polling.
|
||||
*/
|
||||
if (media_not_present(sdkp, &sshdr)) {
|
||||
if (media_was_present)
|
||||
sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Media removed, stopped polling\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (the_result)
|
||||
sense_valid = scsi_sense_valid(&sshdr);
|
||||
retries++;
|
||||
} while (retries < 3 &&
|
||||
(!scsi_status_is_good(the_result) ||
|
||||
(scsi_status_is_check_condition(the_result) &&
|
||||
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
|
||||
sense_valid = scsi_sense_valid(&sshdr);
|
||||
}
|
||||
|
||||
if (!scsi_status_is_check_condition(the_result)) {
|
||||
/* no sense, TUR either succeeded or failed
|
||||
|
@ -2111,18 +2192,24 @@ sd_spinup_disk(struct scsi_disk *sdkp)
|
|||
break; /* unavailable */
|
||||
if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
|
||||
break; /* sanitize in progress */
|
||||
if (sshdr.asc == 4 && sshdr.ascq == 0x24)
|
||||
break; /* depopulation in progress */
|
||||
if (sshdr.asc == 4 && sshdr.ascq == 0x25)
|
||||
break; /* depopulation restoration in progress */
|
||||
/*
|
||||
* Issue command to spin up drive when not ready
|
||||
*/
|
||||
if (!spintime) {
|
||||
/* Return immediately and start spin cycle */
|
||||
const u8 start_cmd[10] = {
|
||||
[0] = START_STOP,
|
||||
[1] = 1,
|
||||
[4] = sdkp->device->start_stop_pwr_cond ?
|
||||
0x11 : 1,
|
||||
};
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
|
||||
cmd[0] = START_STOP;
|
||||
cmd[1] = 1; /* Return immediately */
|
||||
memset((void *) &cmd[2], 0, 8);
|
||||
cmd[4] = 1; /* Start spin cycle */
|
||||
if (sdkp->device->start_stop_pwr_cond)
|
||||
cmd[4] |= 1 << 4;
|
||||
scsi_execute_cmd(sdkp->device, cmd,
|
||||
scsi_execute_cmd(sdkp->device, start_cmd,
|
||||
REQ_OP_DRV_IN, NULL, 0,
|
||||
SD_TIMEOUT, sdkp->max_retries,
|
||||
&exec_args);
|
||||
|
@ -2275,11 +2362,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
|||
the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
|
||||
buffer, RC16_LEN, SD_TIMEOUT,
|
||||
sdkp->max_retries, &exec_args);
|
||||
|
||||
if (media_not_present(sdkp, &sshdr))
|
||||
return -ENODEV;
|
||||
|
||||
if (the_result > 0) {
|
||||
if (media_not_present(sdkp, &sshdr))
|
||||
return -ENODEV;
|
||||
|
||||
sense_valid = scsi_sense_valid(&sshdr);
|
||||
if (sense_valid &&
|
||||
sshdr.sense_key == ILLEGAL_REQUEST &&
|
||||
|
@ -2344,42 +2430,58 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
|||
static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
||||
unsigned char *buffer)
|
||||
{
|
||||
unsigned char cmd[16];
|
||||
static const u8 cmd[10] = { READ_CAPACITY };
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
/* Do not retry Medium Not Present */
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x3A,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = NOT_READY,
|
||||
.asc = 0x3A,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Device reset might occur several times so retry a lot */
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x29,
|
||||
.allowed = READ_CAPACITY_RETRIES_ON_RESET,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
/* Any other error not listed above retry 3 times */
|
||||
{
|
||||
.result = SCMD_FAILURE_RESULT_ANY,
|
||||
.allowed = 3,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
int sense_valid = 0;
|
||||
int the_result;
|
||||
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
|
||||
sector_t lba;
|
||||
unsigned sector_size;
|
||||
|
||||
do {
|
||||
cmd[0] = READ_CAPACITY;
|
||||
memset(&cmd[1], 0, 9);
|
||||
memset(buffer, 0, 8);
|
||||
memset(buffer, 0, 8);
|
||||
|
||||
the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
|
||||
8, SD_TIMEOUT, sdkp->max_retries,
|
||||
&exec_args);
|
||||
the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
|
||||
8, SD_TIMEOUT, sdkp->max_retries,
|
||||
&exec_args);
|
||||
|
||||
if (the_result > 0) {
|
||||
sense_valid = scsi_sense_valid(&sshdr);
|
||||
|
||||
if (media_not_present(sdkp, &sshdr))
|
||||
return -ENODEV;
|
||||
|
||||
if (the_result > 0) {
|
||||
sense_valid = scsi_sense_valid(&sshdr);
|
||||
if (sense_valid &&
|
||||
sshdr.sense_key == UNIT_ATTENTION &&
|
||||
sshdr.asc == 0x29 && sshdr.ascq == 0x00)
|
||||
/* Device reset might occur several times,
|
||||
* give it one more chance */
|
||||
if (--reset_retries > 0)
|
||||
continue;
|
||||
}
|
||||
retries--;
|
||||
|
||||
} while (the_result && retries);
|
||||
}
|
||||
|
||||
if (the_result) {
|
||||
sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
|
||||
|
@ -2558,9 +2660,8 @@ sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
|
|||
if (sdkp->device->use_10_for_ms && len < 8)
|
||||
len = 8;
|
||||
|
||||
return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
|
||||
SD_TIMEOUT, sdkp->max_retries, data,
|
||||
sshdr);
|
||||
return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len,
|
||||
SD_TIMEOUT, sdkp->max_retries, data, sshdr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2777,7 +2878,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
}
|
||||
|
||||
bad_sense:
|
||||
if (scsi_sense_valid(&sshdr) &&
|
||||
if (res == -EIO && scsi_sense_valid(&sshdr) &&
|
||||
sshdr.sense_key == ILLEGAL_REQUEST &&
|
||||
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
|
||||
/* Invalid field in CDB */
|
||||
|
@ -2817,7 +2918,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
if (sdkp->protection_type == 0)
|
||||
return;
|
||||
|
||||
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
|
||||
res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT,
|
||||
sdkp->max_retries, &data, &sshdr);
|
||||
|
||||
if (res < 0 || !data.header_length ||
|
||||
|
@ -2825,7 +2926,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
sd_first_printk(KERN_WARNING, sdkp,
|
||||
"getting Control mode page failed, assume no ATO\n");
|
||||
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
if (res == -EIO && scsi_sense_valid(&sshdr))
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
|
||||
return;
|
||||
|
@ -3003,7 +3104,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
return;
|
||||
}
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) {
|
||||
struct scsi_vpd *vpd;
|
||||
|
||||
sdev->no_report_opcodes = 1;
|
||||
|
@ -3019,10 +3120,10 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1)
|
||||
sdkp->ws16 = 1;
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1)
|
||||
sdkp->ws10 = 1;
|
||||
}
|
||||
|
||||
|
@ -3034,9 +3135,9 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
return;
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
|
||||
SECURITY_PROTOCOL_IN) == 1 &&
|
||||
SECURITY_PROTOCOL_IN, 0) == 1 &&
|
||||
scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
|
||||
SECURITY_PROTOCOL_OUT) == 1)
|
||||
SECURITY_PROTOCOL_OUT, 0) == 1)
|
||||
sdkp->security = 1;
|
||||
}
|
||||
|
||||
|
@ -3567,7 +3668,7 @@ static int sd_probe(struct device *dev)
|
|||
|
||||
error = device_add_disk(dev, gd, NULL);
|
||||
if (error) {
|
||||
put_device(&sdkp->disk_dev);
|
||||
device_unregister(&sdkp->disk_dev);
|
||||
put_disk(gd);
|
||||
goto out;
|
||||
}
|
||||
|
@ -3688,7 +3789,7 @@ static void sd_shutdown(struct device *dev)
|
|||
|
||||
if (sdkp->WCE && sdkp->media_present) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
|
||||
sd_sync_cache(sdkp, NULL);
|
||||
sd_sync_cache(sdkp);
|
||||
}
|
||||
|
||||
if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
|
||||
|
@ -3700,7 +3801,6 @@ static void sd_shutdown(struct device *dev)
|
|||
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
struct scsi_sense_hdr sshdr;
|
||||
int ret = 0;
|
||||
|
||||
if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
|
||||
|
@ -3709,24 +3809,13 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
|
|||
if (sdkp->WCE && sdkp->media_present) {
|
||||
if (!sdkp->device->silence_suspend)
|
||||
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
|
||||
ret = sd_sync_cache(sdkp, &sshdr);
|
||||
ret = sd_sync_cache(sdkp);
|
||||
/* ignore OFFLINE device */
|
||||
if (ret == -ENODEV)
|
||||
return 0;
|
||||
|
||||
if (ret) {
|
||||
/* ignore OFFLINE device */
|
||||
if (ret == -ENODEV)
|
||||
return 0;
|
||||
|
||||
if (!scsi_sense_valid(&sshdr) ||
|
||||
sshdr.sense_key != ILLEGAL_REQUEST)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* sshdr.sense_key == ILLEGAL_REQUEST means this drive
|
||||
* doesn't support sync. There's not much to do and
|
||||
* suspend shouldn't fail.
|
||||
*/
|
||||
ret = 0;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (sdkp->device->manage_start_stop) {
|
||||
|
|
|
@ -87,19 +87,32 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
|
|||
0
|
||||
};
|
||||
unsigned char recv_page_code;
|
||||
unsigned int retries = SES_RETRIES;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x29,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = SES_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = NOT_READY,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = SES_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
do {
|
||||
ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
|
||||
SES_TIMEOUT, 1, &exec_args);
|
||||
} while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
|
||||
(sshdr.sense_key == NOT_READY ||
|
||||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
|
||||
|
||||
ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
|
||||
SES_TIMEOUT, 1, &exec_args);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -131,19 +144,32 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
|
|||
bufflen & 0xff,
|
||||
0
|
||||
};
|
||||
struct scsi_sense_hdr sshdr;
|
||||
unsigned int retries = SES_RETRIES;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.sense = UNIT_ATTENTION,
|
||||
.asc = 0x29,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = SES_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{
|
||||
.sense = NOT_READY,
|
||||
.asc = SCMD_FAILURE_ASC_ANY,
|
||||
.ascq = SCMD_FAILURE_ASCQ_ANY,
|
||||
.allowed = SES_RETRIES,
|
||||
.result = SAM_STAT_CHECK_CONDITION,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
do {
|
||||
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf,
|
||||
bufflen, SES_TIMEOUT, 1, &exec_args);
|
||||
} while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
|
||||
(sshdr.sense_key == NOT_READY ||
|
||||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
|
||||
|
||||
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf, bufflen,
|
||||
SES_TIMEOUT, 1, &exec_args);
|
||||
if (result)
|
||||
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
|
||||
result);
|
||||
|
|
|
@ -1354,7 +1354,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
srp->header.status = 0xff & result;
|
||||
srp->header.masked_status = status_byte(result);
|
||||
srp->header.masked_status = sg_status_byte(result);
|
||||
srp->header.msg_status = COMMAND_COMPLETE;
|
||||
srp->header.host_status = host_byte(result);
|
||||
srp->header.driver_status = driver_byte(result);
|
||||
|
@ -1429,7 +1429,9 @@ static const struct file_operations sg_fops = {
|
|||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct class *sg_sysfs_class;
|
||||
static const struct class sg_sysfs_class = {
|
||||
.name = "scsi_generic"
|
||||
};
|
||||
|
||||
static int sg_sysfs_valid = 0;
|
||||
|
||||
|
@ -1531,7 +1533,7 @@ sg_add_device(struct device *cl_dev)
|
|||
if (sg_sysfs_valid) {
|
||||
struct device *sg_class_member;
|
||||
|
||||
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
|
||||
sg_class_member = device_create(&sg_sysfs_class, cl_dev->parent,
|
||||
MKDEV(SCSI_GENERIC_MAJOR,
|
||||
sdp->index),
|
||||
sdp, "%s", sdp->name);
|
||||
|
@ -1621,7 +1623,7 @@ sg_remove_device(struct device *cl_dev)
|
|||
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
|
||||
|
||||
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
|
||||
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
|
||||
device_destroy(&sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
|
||||
cdev_del(sdp->cdev);
|
||||
sdp->cdev = NULL;
|
||||
|
||||
|
@ -1662,11 +1664,9 @@ init_sg(void)
|
|||
SG_MAX_DEVS, "sg");
|
||||
if (rc)
|
||||
return rc;
|
||||
sg_sysfs_class = class_create("scsi_generic");
|
||||
if ( IS_ERR(sg_sysfs_class) ) {
|
||||
rc = PTR_ERR(sg_sysfs_class);
|
||||
rc = class_register(&sg_sysfs_class);
|
||||
if (rc)
|
||||
goto err_out;
|
||||
}
|
||||
sg_sysfs_valid = 1;
|
||||
rc = scsi_register_interface(&sg_interface);
|
||||
if (0 == rc) {
|
||||
|
@ -1675,7 +1675,7 @@ init_sg(void)
|
|||
#endif /* CONFIG_SCSI_PROC_FS */
|
||||
return 0;
|
||||
}
|
||||
class_destroy(sg_sysfs_class);
|
||||
class_unregister(&sg_sysfs_class);
|
||||
err_out:
|
||||
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
|
||||
return rc;
|
||||
|
@ -1688,7 +1688,7 @@ exit_sg(void)
|
|||
remove_proc_subtree("scsi/sg", NULL);
|
||||
#endif /* CONFIG_SCSI_PROC_FS */
|
||||
scsi_unregister_interface(&sg_interface);
|
||||
class_destroy(sg_sysfs_class);
|
||||
class_unregister(&sg_sysfs_class);
|
||||
sg_sysfs_valid = 0;
|
||||
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
|
||||
SG_MAX_DEVS);
|
||||
|
|
|
@ -177,7 +177,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
|
|||
|
||||
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf),
|
||||
SR_TIMEOUT, MAX_RETRIES, &exec_args);
|
||||
if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
|
||||
if (result > 0 && scsi_sense_valid(&sshdr) &&
|
||||
sshdr.sense_key == UNIT_ATTENTION)
|
||||
return DISK_EVENT_MEDIA_CHANGE;
|
||||
|
||||
if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
|
||||
|
@ -716,27 +717,29 @@ fail:
|
|||
|
||||
static void get_sectorsize(struct scsi_cd *cd)
|
||||
{
|
||||
unsigned char cmd[10];
|
||||
unsigned char buffer[8];
|
||||
int the_result, retries = 3;
|
||||
static const u8 cmd[10] = { READ_CAPACITY };
|
||||
unsigned char buffer[8] = { };
|
||||
int the_result;
|
||||
int sector_size;
|
||||
struct request_queue *queue;
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.result = SCMD_FAILURE_RESULT_ANY,
|
||||
.allowed = 3,
|
||||
},
|
||||
{}
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.failures = &failures,
|
||||
};
|
||||
|
||||
do {
|
||||
cmd[0] = READ_CAPACITY;
|
||||
memset((void *) &cmd[1], 0, 9);
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
|
||||
/* Do the command and wait.. */
|
||||
the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN,
|
||||
buffer, sizeof(buffer),
|
||||
SR_TIMEOUT, MAX_RETRIES, NULL);
|
||||
|
||||
retries--;
|
||||
|
||||
} while (the_result && retries);
|
||||
|
||||
|
||||
/* Do the command and wait.. */
|
||||
the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
|
||||
sizeof(buffer), SR_TIMEOUT, MAX_RETRIES,
|
||||
&exec_args);
|
||||
if (the_result) {
|
||||
cd->capacity = 0x1fffff;
|
||||
sector_size = 2048; /* A guess, just in case */
|
||||
|
@ -825,7 +828,7 @@ static int get_capabilities(struct scsi_cd *cd)
|
|||
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
|
||||
|
||||
/* ask for mode page 0x2a */
|
||||
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
|
||||
rc = scsi_mode_sense(cd->device, 0, 0x2a, 0, buffer, ms_len,
|
||||
SR_TIMEOUT, 3, &data, NULL);
|
||||
|
||||
if (rc < 0 || data.length > ms_len ||
|
||||
|
|
|
@ -87,7 +87,7 @@ static int try_rdio = 1;
|
|||
static int try_wdio = 1;
|
||||
static int debug_flag;
|
||||
|
||||
static struct class st_sysfs_class;
|
||||
static const struct class st_sysfs_class;
|
||||
static const struct attribute_group *st_dev_groups[];
|
||||
static const struct attribute_group *st_drv_groups[];
|
||||
|
||||
|
@ -4250,11 +4250,10 @@ static int st_probe(struct device *dev)
|
|||
struct st_partstat *STps;
|
||||
struct st_buffer *buffer;
|
||||
int i, error;
|
||||
char *stp;
|
||||
|
||||
if (SDp->type != TYPE_TAPE)
|
||||
return -ENODEV;
|
||||
if ((stp = st_incompatible(SDp))) {
|
||||
if (st_incompatible(SDp)) {
|
||||
sdev_printk(KERN_INFO, SDp,
|
||||
"OnStream tapes are no longer supported;\n");
|
||||
sdev_printk(KERN_INFO, SDp,
|
||||
|
@ -4439,7 +4438,7 @@ static void scsi_tape_release(struct kref *kref)
|
|||
return;
|
||||
}
|
||||
|
||||
static struct class st_sysfs_class = {
|
||||
static const struct class st_sysfs_class = {
|
||||
.name = "scsi_tape",
|
||||
.dev_groups = st_dev_groups,
|
||||
};
|
||||
|
|
|
@ -9262,7 +9262,17 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
|
|||
struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
|
||||
struct scsi_failure failure_defs[] = {
|
||||
{
|
||||
.allowed = 2,
|
||||
.result = SCMD_FAILURE_RESULT_ANY,
|
||||
},
|
||||
};
|
||||
struct scsi_failures failures = {
|
||||
.failure_definitions = failure_defs,
|
||||
};
|
||||
const struct scsi_exec_args args = {
|
||||
.failures = &failures,
|
||||
.sshdr = sshdr,
|
||||
.req_flags = BLK_MQ_REQ_PM,
|
||||
.scmd_flags = SCMD_FAIL_IF_RECOVERING,
|
||||
|
@ -9287,7 +9297,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
|||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_device *sdp;
|
||||
unsigned long flags;
|
||||
int ret, retries;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
sdp = hba->ufs_device_wlun;
|
||||
|
@ -9318,15 +9328,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
|||
* callbacks hence set the RQF_PM flag so that it doesn't resume the
|
||||
* already suspended childs.
|
||||
*/
|
||||
for (retries = 3; retries > 0; --retries) {
|
||||
ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
|
||||
/*
|
||||
* scsi_execute() only returns a negative value if the request
|
||||
* queue is dying.
|
||||
*/
|
||||
if (ret <= 0)
|
||||
break;
|
||||
}
|
||||
ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
|
||||
if (ret) {
|
||||
sdev_printk(KERN_WARNING, sdp,
|
||||
"START_STOP failed for power mode: %d, result %x\n",
|
||||
|
|
|
@ -176,6 +176,12 @@ typedef u16 blk_short_t;
|
|||
*/
|
||||
#define BLK_STS_OFFLINE ((__force blk_status_t)17)
|
||||
|
||||
/*
|
||||
* BLK_STS_DURATION_LIMIT is returned from the driver when the target device
|
||||
* aborted the command because it exceeded one of its Command Duration Limits.
|
||||
*/
|
||||
#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
|
||||
|
||||
/**
|
||||
* blk_path_error - returns true if error may be path related
|
||||
* @error: status the request was completed with
|
||||
|
|
|
@ -121,6 +121,7 @@ enum scsi_disposition {
|
|||
* msg_byte (unused)
|
||||
* host_byte = set by low-level driver to indicate status.
|
||||
*/
|
||||
#define status_byte(result) (result & 0xff)
|
||||
#define host_byte(result) (((result) >> 16) & 0xff)
|
||||
|
||||
#define sense_class(sense) (((sense) >> 4) & 0x7)
|
||||
|
@ -156,6 +157,9 @@ enum scsi_disposition {
|
|||
#define SCSI_3 4 /* SPC */
|
||||
#define SCSI_SPC_2 5
|
||||
#define SCSI_SPC_3 6
|
||||
#define SCSI_SPC_4 7
|
||||
#define SCSI_SPC_5 8
|
||||
#define SCSI_SPC_6 14
|
||||
|
||||
/*
|
||||
* INQ PERIPHERAL QUALIFIERS
|
||||
|
|
|
@ -103,10 +103,6 @@ struct scsi_vpd {
|
|||
unsigned char data[];
|
||||
};
|
||||
|
||||
enum scsi_vpd_parameters {
|
||||
SCSI_VPD_HEADER_SIZE = 4,
|
||||
};
|
||||
|
||||
struct scsi_device {
|
||||
struct Scsi_Host *host;
|
||||
struct request_queue *request_queue;
|
||||
|
@ -220,6 +216,9 @@ struct scsi_device {
|
|||
RH_KABI_FILL_HOLE(unsigned use_16_for_sync:1) /* Use sync (16) over sync (10) */
|
||||
RH_KABI_FILL_HOLE(unsigned no_vpd_size:1) /* No VPD size reported in header */
|
||||
|
||||
RH_KABI_FILL_HOLE(unsigned cdl_supported:1) /* Command duration limits supported */
|
||||
RH_KABI_FILL_HOLE(unsigned cdl_enable:1) /* Enable/disable Command duration limits */
|
||||
|
||||
unsigned int queue_stopped; /* request queue is quiesced */
|
||||
bool offline_already; /* Device offline message logged */
|
||||
|
||||
|
@ -391,6 +390,8 @@ extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
|
|||
extern void scsi_remove_device(struct scsi_device *);
|
||||
extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
|
||||
void scsi_attach_vpd(struct scsi_device *sdev);
|
||||
void scsi_cdl_check(struct scsi_device *sdev);
|
||||
int scsi_cdl_enable(struct scsi_device *sdev, bool enable);
|
||||
|
||||
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
|
||||
extern int __must_check scsi_device_get(struct scsi_device *);
|
||||
|
@ -448,10 +449,10 @@ extern int scsi_track_queue_full(struct scsi_device *, int);
|
|||
|
||||
extern int scsi_set_medium_removal(struct scsi_device *, char);
|
||||
|
||||
extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
||||
unsigned char *buffer, int len, int timeout,
|
||||
int retries, struct scsi_mode_data *data,
|
||||
struct scsi_sense_hdr *);
|
||||
int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
||||
int subpage, unsigned char *buffer, int len, int timeout,
|
||||
int retries, struct scsi_mode_data *data,
|
||||
struct scsi_sense_hdr *);
|
||||
extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
|
||||
unsigned char *buffer, int len, int timeout,
|
||||
int retries, struct scsi_mode_data *data,
|
||||
|
@ -460,8 +461,9 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
|
|||
int retries, struct scsi_sense_hdr *sshdr);
|
||||
extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
|
||||
int buf_len);
|
||||
extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
||||
unsigned int len, unsigned char opcode);
|
||||
int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
||||
unsigned int len, unsigned char opcode,
|
||||
unsigned short sa);
|
||||
extern int scsi_device_set_state(struct scsi_device *sdev,
|
||||
enum scsi_device_state state);
|
||||
extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
|
||||
|
@ -490,6 +492,52 @@ extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
int timeout, int retries, blk_opf_t flags,
|
||||
req_flags_t rq_flags, int *resid);
|
||||
|
||||
/*
|
||||
* scsi_execute_cmd users can set scsi_failure.result to have
|
||||
* scsi_check_passthrough fail/retry a command. scsi_failure.result can be a
|
||||
* specific host byte or message code, or SCMD_FAILURE_RESULT_ANY can be used
|
||||
* to match any host or message code.
|
||||
*/
|
||||
#define SCMD_FAILURE_RESULT_ANY 0x7fffffff
|
||||
/*
|
||||
* Set scsi_failure.result to SCMD_FAILURE_STAT_ANY to fail/retry any failure
|
||||
* scsi_status_is_good returns false for.
|
||||
*/
|
||||
#define SCMD_FAILURE_STAT_ANY 0xff
|
||||
/*
|
||||
* The following can be set to the scsi_failure sense, asc and ascq fields to
|
||||
* match on any sense, ASC, or ASCQ value.
|
||||
*/
|
||||
#define SCMD_FAILURE_SENSE_ANY 0xff
|
||||
#define SCMD_FAILURE_ASC_ANY 0xff
|
||||
#define SCMD_FAILURE_ASCQ_ANY 0xff
|
||||
/* Always retry a matching failure. */
|
||||
#define SCMD_FAILURE_NO_LIMIT -1
|
||||
|
||||
struct scsi_failure {
|
||||
int result;
|
||||
u8 sense;
|
||||
u8 asc;
|
||||
u8 ascq;
|
||||
/*
|
||||
* Number of times scsi_execute_cmd will retry the failure. It does
|
||||
* not count for the total_allowed.
|
||||
*/
|
||||
s8 allowed;
|
||||
/* Number of times the failure has been retried. */
|
||||
s8 retries;
|
||||
};
|
||||
|
||||
struct scsi_failures {
|
||||
/*
|
||||
* If a scsi_failure does not have a retry limit setup this limit will
|
||||
* be used.
|
||||
*/
|
||||
int total_allowed;
|
||||
int total_retries;
|
||||
struct scsi_failure *failure_definitions;
|
||||
};
|
||||
|
||||
/* Optional arguments to scsi_execute_cmd */
|
||||
struct scsi_exec_args {
|
||||
unsigned char *sense; /* sense buffer */
|
||||
|
@ -498,12 +546,14 @@ struct scsi_exec_args {
|
|||
blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */
|
||||
int scmd_flags; /* SCMD flags */
|
||||
int *resid; /* residual length */
|
||||
struct scsi_failures *failures; /* failures to retry */
|
||||
};
|
||||
|
||||
int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
||||
blk_opf_t opf, void *buffer, unsigned int bufflen,
|
||||
int timeout, int retries,
|
||||
const struct scsi_exec_args *args);
|
||||
void scsi_failures_reset_retries(struct scsi_failures *failures);
|
||||
|
||||
/* Make sure any sense buffer is the correct size. */
|
||||
#define scsi_execute(_sdev, _cmd, _data_dir, _buffer, _bufflen, _sense, \
|
||||
|
|
|
@ -247,6 +247,9 @@ struct scsi_host_template {
|
|||
* midlayer calls this point so that the driver may deallocate
|
||||
* and terminate any references to the target.
|
||||
*
|
||||
* Note: This callback is called with the host lock held and hence
|
||||
* must not sleep.
|
||||
*
|
||||
* Status: OPTIONAL
|
||||
*/
|
||||
void (* target_destroy)(struct scsi_target *);
|
||||
|
|
|
@ -163,7 +163,7 @@ struct compat_sg_io_hdr {
|
|||
#define TASK_ABORTED 0x20
|
||||
|
||||
/* Obsolete status_byte() declaration */
|
||||
#define status_byte(result) (((result) >> 1) & 0x7f)
|
||||
#define sg_status_byte(result) (((result) >> 1) & 0x7f)
|
||||
|
||||
typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
|
||||
int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */
|
||||
|
|
|
@ -2,22 +2,23 @@
|
|||
#ifndef _UAPI_LINUX_IOPRIO_H
|
||||
#define _UAPI_LINUX_IOPRIO_H
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Gives us 8 prio classes with 13-bits of data for each class
|
||||
*/
|
||||
#define IOPRIO_CLASS_SHIFT 13
|
||||
#define IOPRIO_CLASS_MASK 0x07
|
||||
#define IOPRIO_NR_CLASSES 8
|
||||
#define IOPRIO_CLASS_MASK (IOPRIO_NR_CLASSES - 1)
|
||||
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
|
||||
|
||||
#define IOPRIO_PRIO_CLASS(ioprio) \
|
||||
(((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
|
||||
#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK)
|
||||
#define IOPRIO_PRIO_VALUE(class, data) \
|
||||
((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
|
||||
((data) & IOPRIO_PRIO_MASK))
|
||||
|
||||
/*
|
||||
* These are the io priority groups as implemented by the BFQ and mq-deadline
|
||||
* These are the io priority classes as implemented by the BFQ and mq-deadline
|
||||
* schedulers. RT is the realtime class, it always gets premium service. For
|
||||
* ATA disks supporting NCQ IO priority, RT class IOs will be processed using
|
||||
* high priority NCQ commands. BE is the best-effort scheduling class, the
|
||||
|
@ -25,18 +26,30 @@
|
|||
* served when no one else is using the disk.
|
||||
*/
|
||||
enum {
|
||||
IOPRIO_CLASS_NONE,
|
||||
IOPRIO_CLASS_RT,
|
||||
IOPRIO_CLASS_BE,
|
||||
IOPRIO_CLASS_IDLE,
|
||||
IOPRIO_CLASS_NONE = 0,
|
||||
IOPRIO_CLASS_RT = 1,
|
||||
IOPRIO_CLASS_BE = 2,
|
||||
IOPRIO_CLASS_IDLE = 3,
|
||||
|
||||
/* Special class to indicate an invalid ioprio value */
|
||||
IOPRIO_CLASS_INVALID = 7,
|
||||
};
|
||||
|
||||
/*
|
||||
* The RT and BE priority classes both support up to 8 priority levels.
|
||||
* The RT and BE priority classes both support up to 8 priority levels that
|
||||
* can be specified using the lower 3-bits of the priority data.
|
||||
*/
|
||||
#define IOPRIO_NR_LEVELS 8
|
||||
#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
|
||||
#define IOPRIO_LEVEL_NR_BITS 3
|
||||
#define IOPRIO_NR_LEVELS (1 << IOPRIO_LEVEL_NR_BITS)
|
||||
#define IOPRIO_LEVEL_MASK (IOPRIO_NR_LEVELS - 1)
|
||||
#define IOPRIO_PRIO_LEVEL(ioprio) ((ioprio) & IOPRIO_LEVEL_MASK)
|
||||
|
||||
#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
|
||||
|
||||
/*
|
||||
* Possible values for the "which" argument of the ioprio_get() and
|
||||
* ioprio_set() system calls (see "man ioprio_set").
|
||||
*/
|
||||
enum {
|
||||
IOPRIO_WHO_PROCESS = 1,
|
||||
IOPRIO_WHO_PGRP,
|
||||
|
@ -44,9 +57,71 @@ enum {
|
|||
};
|
||||
|
||||
/*
|
||||
* Fallback BE priority level.
|
||||
* Fallback BE class priority level.
|
||||
*/
|
||||
#define IOPRIO_NORM 4
|
||||
#define IOPRIO_BE_NORM IOPRIO_NORM
|
||||
|
||||
/*
|
||||
* The 10 bits between the priority class and the priority level are used to
|
||||
* optionally define I/O hints for any combination of I/O priority class and
|
||||
* level. Depending on the kernel configuration, I/O scheduler being used and
|
||||
* the target I/O device being used, hints can influence how I/Os are processed
|
||||
* without affecting the I/O scheduling ordering defined by the I/O priority
|
||||
* class and level.
|
||||
*/
|
||||
#define IOPRIO_HINT_SHIFT IOPRIO_LEVEL_NR_BITS
|
||||
#define IOPRIO_HINT_NR_BITS 10
|
||||
#define IOPRIO_NR_HINTS (1 << IOPRIO_HINT_NR_BITS)
|
||||
#define IOPRIO_HINT_MASK (IOPRIO_NR_HINTS - 1)
|
||||
#define IOPRIO_PRIO_HINT(ioprio) \
|
||||
(((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK)
|
||||
|
||||
/*
|
||||
* I/O hints.
|
||||
*/
|
||||
enum {
|
||||
/* No hint */
|
||||
IOPRIO_HINT_NONE = 0,
|
||||
|
||||
/*
|
||||
* Device command duration limits: indicate to the device a desired
|
||||
* duration limit for the commands that will be used to process an I/O.
|
||||
* These will currently only be effective for SCSI and ATA devices that
|
||||
* support the command duration limits feature. If this feature is
|
||||
* enabled, then the commands issued to the device to process an I/O with
|
||||
* one of these hints set will have the duration limit index (dld field)
|
||||
* set to the value of the hint.
|
||||
*/
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6,
|
||||
IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7,
|
||||
};
|
||||
|
||||
#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max))
|
||||
|
||||
/*
|
||||
* Return an I/O priority value based on a class, a level and a hint.
|
||||
*/
|
||||
static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
|
||||
int priohint)
|
||||
{
|
||||
if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
|
||||
IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
|
||||
IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
|
||||
return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
|
||||
|
||||
return (prioclass << IOPRIO_CLASS_SHIFT) |
|
||||
(priohint << IOPRIO_HINT_SHIFT) | priolevel;
|
||||
}
|
||||
|
||||
#define IOPRIO_PRIO_VALUE(prioclass, priolevel) \
|
||||
ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
|
||||
#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint) \
|
||||
ioprio_value(prioclass, priolevel, priohint)
|
||||
|
||||
#endif /* _UAPI_LINUX_IOPRIO_H */
|
||||
|
|
Loading…
Reference in New Issue