summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c255
1 files changed, 146 insertions, 109 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 2e89040e63be..a7422e52d883 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -141,7 +141,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- len = sizeof(struct iwl_cmd) * q->n_window;
+ len = sizeof(struct iwl_device_cmd) * q->n_window;
/* De-alloc array of command/tx buffers */
for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -156,6 +156,12 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
kfree(txq->txb);
txq->txb = NULL;
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
@@ -179,7 +185,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
if (q->n_bd == 0)
return;
- len = sizeof(struct iwl_cmd) * q->n_window;
+ len = sizeof(struct iwl_device_cmd) * q->n_window;
len += IWL_MAX_SCAN_SIZE;
/* De-alloc array of command/tx buffers */
@@ -318,6 +324,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
{
int i, len;
int ret;
+ int actual_slots = slots_num;
/*
* Alloc buffer array for commands (Tx or other types of commands).
@@ -327,14 +334,22 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
* For normal Tx queues (all other queues), no super-size command
* space is needed.
*/
- len = sizeof(struct iwl_cmd);
- for (i = 0; i <= slots_num; i++) {
- if (i == slots_num) {
- if (txq_id == IWL_CMD_QUEUE_NUM)
- len += IWL_MAX_SCAN_SIZE;
- else
- continue;
- }
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ actual_slots++;
+
+ txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+ GFP_KERNEL);
+ txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+ GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct iwl_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len += IWL_MAX_SCAN_SIZE;
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
if (!txq->cmd[i])
@@ -348,6 +363,10 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
txq->need_update = 0;
+ /* aggregation TX queues will get their ID when aggregation begins */
+ if (txq_id <= IWL_TX_FIFO_AC3)
+ txq->swq_id = txq_id;
+
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
@@ -360,15 +379,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
return 0;
err:
- for (i = 0; i < slots_num; i++) {
+ for (i = 0; i < actual_slots; i++)
kfree(txq->cmd[i]);
- txq->cmd[i] = NULL;
- }
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
- if (txq_id == IWL_CMD_QUEUE_NUM) {
- kfree(txq->cmd[slots_num]);
- txq->cmd[slots_num] = NULL;
- }
return -ENOMEM;
}
EXPORT_SYMBOL(iwl_tx_queue_init);
@@ -550,62 +566,81 @@ static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
- __le16 fc, int sta_id,
- int is_hcca)
+ __le16 fc, int is_hcca)
{
- u32 rate_flags = 0;
+ u32 rate_flags;
int rate_idx;
- u8 rts_retry_limit = 0;
- u8 data_retry_limit = 0;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
u8 rate_plcp;
- rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
- IWL_RATE_COUNT - 1);
-
- rate_plcp = iwl_rates[rate_idx].plcp;
-
- rts_retry_limit = (is_hcca) ?
- RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
-
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
-
- if (ieee80211_is_probe_resp(fc)) {
- data_retry_limit = 3;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- } else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
-
+ /* Set retry limit on DATA packets and Probe Responses*/
if (priv->data_retry_limit != -1)
data_retry_limit = priv->data_retry_limit;
+ else if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
+ RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- } else {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
+ return;
+ }
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
- rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up RTS and CTS flags for certain packets */
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+ }
+ break;
+ default:
+ break;
}
- tx_cmd->rts_retry_limit = rts_retry_limit;
- tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
}
@@ -652,14 +687,6 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
}
}
-static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
-{
- /* 0 - mgmt, 1 - cnt, 2 - data */
- int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
- priv->tx_stats[idx].cnt++;
- priv->tx_stats[idx].bytes += len;
-}
-
/*
* start REPLY_TX command process
*/
@@ -669,7 +696,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_tx_queue *txq;
struct iwl_queue *q;
- struct iwl_cmd *out_cmd;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
struct iwl_tx_cmd *tx_cmd;
int swq_id, txq_id;
dma_addr_t phys_addr;
@@ -692,12 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
goto drop_unlock;
}
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
- IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -709,10 +731,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
- /* drop all data frame if we are not associated */
+ /* drop all non-injected data frame if we are not associated */
if (ieee80211_is_data(fc) &&
- (!iwl_is_monitor_mode(priv) ||
- !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!iwl_is_associated(priv) ||
((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
!priv->assoc_station_added)) {
@@ -723,7 +744,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find (or create) index into station table for destination station */
- sta_id = iwl_get_sta_id(priv, hdr);
+ if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ sta_id = priv->hw_params.bcast_sta_id;
+ else
+ sta_id = iwl_get_sta_id(priv, hdr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -732,11 +756,12 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
- swq_id = skb_get_queue_mapping(skb);
- txq_id = swq_id;
+ txq_id = skb_get_queue_mapping(skb);
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop_unlock;
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
@@ -744,15 +769,13 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
- }
}
txq = &priv->txq[txq_id];
+ swq_id = txq->swq_id;
q = &txq->q;
- txq->swq_id = swq_id;
if (unlikely(iwl_queue_space(q) < q->high_mark))
goto drop_unlock;
@@ -766,6 +789,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -793,12 +817,12 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+ iwl_dbg_log_tx_data_frame(priv, len, hdr);
/* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
-
- iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
+ iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
+ iwl_update_stats(priv, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -828,8 +852,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
- pci_unmap_len_set(&out_cmd->meta, len, len);
+ pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ pci_unmap_len_set(out_meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -922,7 +946,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct iwl_cmd *out_cmd;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
int len, ret;
@@ -936,25 +961,32 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
* we will need to increase the size of the TFD entries */
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->meta.flags & CMD_SIZE_HUGE));
+ !(cmd->flags & CMD_SIZE_HUGE));
if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_INFO(priv, "Not sending command - RF KILL");
+ IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
return -EIO;
}
- if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
IWL_ERR(priv, "No space for Tx\n");
return -ENOSPC;
}
spin_lock_irqsave(&priv->hcmd_lock, flags);
- idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
+ idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
/* At this point, the out_cmd now has all of the incoming cmd
@@ -963,9 +995,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
INDEX_TO_SEQ(q->write_ptr));
- if (out_cmd->meta.flags & CMD_SIZE_HUGE)
+ if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
+ len = sizeof(struct iwl_device_cmd);
len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
@@ -997,8 +1029,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
fix_size, PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
- pci_unmap_len_set(&out_cmd->meta, len, fix_size);
+ pci_unmap_addr_set(out_meta, mapping, phys_addr);
+ pci_unmap_len_set(out_meta, len, fix_size);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
@@ -1067,8 +1099,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
}
pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
- pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
+ pci_unmap_addr(&txq->meta[cmd_idx], mapping),
+ pci_unmap_len(&txq->meta[cmd_idx], len),
PCI_DMA_BIDIRECTIONAL);
for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
@@ -1099,7 +1131,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
int index = SEQ_TO_INDEX(sequence);
int cmd_index;
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_cmd *cmd;
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -1109,24 +1142,24 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
txq_id, sequence,
priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
- iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
+ iwl_print_hex_error(priv, pkt, 32);
return;
}
cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
+ meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
/* Input error checking is done when commands are added to queue. */
- if (cmd->meta.flags & CMD_WANT_SKB) {
- cmd->meta.source->u.skb = rxb->skb;
- rxb->skb = NULL;
- } else if (cmd->meta.u.callback &&
- !cmd->meta.u.callback(priv, cmd, rxb->skb))
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_skb = rxb->skb;
rxb->skb = NULL;
+ } else if (meta->callback)
+ meta->callback(priv, cmd, rxb->skb);
iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
- if (!(cmd->meta.flags & CMD_ASYNC)) {
+ if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
}
@@ -1189,6 +1222,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
tid_data = &priv->stations[sta_id].tid[tid];
*ssn = SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
+ priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
spin_unlock_irqrestore(&priv->sta_lock, flags);
ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
@@ -1221,6 +1255,9 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
return -EINVAL;
}
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
tx_fifo_id = default_tid_to_tx_fifo[tid];
else