summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
diff options
context:
space:
mode:
authorSara Sharon2018-10-25 19:11:51 +0200
committerLuca Coelho2019-01-29 15:10:30 +0100
commitfba8248e7e67b7e1098e69284aeccbcb2110fa86 (patch)
treedf2f14d8f0f460e29e6e6eb3c0c4a7af2d579ec3 /drivers/net/wireless/intel/iwlwifi/mvm/sta.c
parentiwlwifi: mvm: fix A-MPDU reference assignment (diff)
downloadkernel-qcow2-linux-fba8248e7e67b7e1098e69284aeccbcb2110fa86.tar.gz
kernel-qcow2-linux-fba8248e7e67b7e1098e69284aeccbcb2110fa86.tar.xz
kernel-qcow2-linux-fba8248e7e67b7e1098e69284aeccbcb2110fa86.zip
iwlwifi: mvm: get rid of tx_path_lock
TX path lock was introduced in order to prevent out of order invocations of TX. This can happen in the following flow: TX path invoked from net dev Packet dequeued TX path invoked from RX path Packet dequeued Packet TXed Packet TXed However, we don't really need a lock. If TX path is already invoked from some location, other paths can simply abort their execution, instead of waiting to the first path to finish, and then discover queue is (likely) empty or stopped. Replace the lock with an atomic variable to track TX ownership. This simplifies the locking dependencies between RX and TX paths, and should improve performance. Signed-off-by: Sara Sharon <sara.sharon@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 5f42897dcd55..c5a01470a3bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1403,9 +1403,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
- local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
- local_bh_enable();
}
mutex_unlock(&mvm->mutex);
@@ -1646,7 +1644,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
INIT_LIST_HEAD(&mvmtxq->list);
- spin_lock_init(&mvmtxq->tx_path_lock);
+ atomic_set(&mvmtxq->tx_request, 0);
}
mvm_sta->agg_tids = 0;