summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c41
-rw-r--r--drivers/target/target_core_transport.c82
3 files changed, 50 insertions, 85 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 354a8339a3f9..e4b9ba296dcf 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3940,7 +3940,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
- struct se_cmd *se_cmd;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
@@ -3953,16 +3952,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
/*
* Special cases for active iSCSI TMR, and
* transport_lookup_cmd_lun() failing from
* iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
*/
- if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1);
- else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
- transport_release_cmd(se_cmd);
+ if (cmd->tmr_req)
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
else
iscsit_release_cmd(cmd);
@@ -3973,10 +3969,8 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
- if (se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 000356baf0b5..c3803b2fdd65 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -143,12 +143,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -170,12 +168,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -260,12 +256,10 @@ void iscsit_discard_cr_cmds_by_expstatsn(
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -319,12 +313,10 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@@ -378,12 +370,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -404,12 +394,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1);
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -434,9 +422,8 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
iscsit_free_all_datain_reqs(cmd);
- if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
- cmd->se_cmd.transport_wait_for_tasks)
- cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, 0);
+ if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ae1efc7c9ec6..f8de042e532e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1638,8 +1638,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
return 0;
}
-static void transport_generic_wait_for_tasks(struct se_cmd *, int);
-
/* transport_generic_allocate_tasks():
*
* Called from fabric RX Thread.
@@ -1651,12 +1649,6 @@ int transport_generic_allocate_tasks(
int ret;
transport_generic_prepare_cdb(cdb);
-
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1739,7 +1731,7 @@ int transport_handle_cdb_direct(
* Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
* in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_generic_wait_for_tasks()
+ * correctly during shutdown via transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1819,11 +1811,6 @@ EXPORT_SYMBOL(transport_generic_handle_data);
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
return 0;
}
@@ -2504,8 +2491,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-static void transport_nop_wait_for_tasks(struct se_cmd *, int);
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2780,7 +2765,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
@@ -2881,8 +2865,6 @@ static int transport_generic_cmd_sequencer(
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->transport_wait_for_tasks =
- &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
@@ -2892,7 +2874,6 @@ static int transport_generic_cmd_sequencer(
*/
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
@@ -3396,7 +3377,6 @@ static int transport_generic_cmd_sequencer(
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
@@ -4304,17 +4284,20 @@ EXPORT_SYMBOL(transport_release_cmd);
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ if (wait_for_tasks && cmd->se_tmr_req)
+ transport_wait_for_tasks(cmd);
+
transport_release_cmd(cmd);
- else {
+ } else {
+ if (wait_for_tasks)
+ transport_wait_for_tasks(cmd);
+
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- if (wait_for_tasks && cmd->transport_wait_for_tasks)
- cmd->transport_wait_for_tasks(cmd, 0);
-
transport_free_dev_tasks(cmd);
transport_put_cmd(cmd);
@@ -4322,13 +4305,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-static void transport_nop_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd)
-{
- return;
-}
-
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
@@ -4498,21 +4474,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
return 0;
}
-/* transport_generic_wait_for_tasks():
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd: command to wait
*
- * Called from frontend or passthrough context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
*/
-static void transport_generic_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd)
+void transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
- return;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ /*
+ * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
+ * has been set in transport_set_supported_SAM_opcode().
+ */
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
@@ -4552,8 +4537,10 @@ static void transport_generic_wait_for_tasks(
atomic_set(&cmd->transport_lun_stop, 0);
}
if (!atomic_read(&cmd->t_transport_active) ||
- atomic_read(&cmd->t_transport_aborted))
- goto remove;
+ atomic_read(&cmd->t_transport_aborted)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
atomic_set(&cmd->t_transport_stop, 1);
@@ -4576,13 +4563,10 @@ static void transport_generic_wait_for_tasks(
pr_debug("wait_for_tasks: Stopped wait_for_compltion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd));
-remove:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!remove_cmd)
- return;
- transport_generic_free_cmd(cmd, 0);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+EXPORT_SYMBOL(transport_wait_for_tasks);
static int transport_get_sense_codes(
struct se_cmd *cmd,