summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
diff options
context:
space:
mode:
authorDavid S. Miller2017-08-07 20:34:41 +0200
committerDavid S. Miller2017-08-07 20:37:47 +0200
commit46d4b68f891bee5d83a32508bfbd9778be6b1b63 (patch)
tree3c05e6c6285102eef6f6756bd692e86ae86f9b30 /drivers/net/wireless/intel/iwlwifi/mvm/fw.c
parenthns3: fix unused function warning (diff)
parentrtlwifi: Replace hardcode value with macro (diff)
downloadkernel-qcow2-linux-46d4b68f891bee5d83a32508bfbd9778be6b1b63.tar.gz
kernel-qcow2-linux-46d4b68f891bee5d83a32508bfbd9778be6b1b63.tar.xz
kernel-qcow2-linux-46d4b68f891bee5d83a32508bfbd9778be6b1b63.zip
Merge tag 'wireless-drivers-next-for-davem-2017-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.14 The first wireless-drivers-next pull request for 4.14. I'm submitting this unusally late in the cycle as my vacation postponed this. But even if this is late there's not still that much new features, mostly cleanup or fixes. Major changes: ath10k * preparation for wcn3990 support iwlwifi * Reorganization of the code into separate directories continues qtnfmac * regulatory support updates * add get_channel, dump_survey and channel_switch cfg80211 handlers ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/fw.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c484
1 files changed, 23 insertions, 461 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 79e7a7a285dc..875cf3a60adb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -78,7 +78,7 @@
#include "iwl-eeprom-parse.h"
#include "mvm.h"
-#include "fw-dbg.h"
+#include "fw/dbg.h"
#include "iwl-phy-db.h"
#define MVM_UCODE_ALIVE_TIMEOUT HZ
@@ -144,134 +144,6 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
return ret;
}
-void iwl_free_fw_paging(struct iwl_mvm *mvm)
-{
- int i;
-
- if (!mvm->fw_paging_db[0].fw_paging_block)
- return;
-
- for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
- struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
-
- if (!paging->fw_paging_block) {
- IWL_DEBUG_FW(mvm,
- "Paging: block %d already freed, continue to next page\n",
- i);
-
- continue;
- }
- dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
- paging->fw_paging_size, DMA_BIDIRECTIONAL);
-
- __free_pages(paging->fw_paging_block,
- get_order(paging->fw_paging_size));
- paging->fw_paging_block = NULL;
- }
- kfree(mvm->trans->paging_download_buf);
- mvm->trans->paging_download_buf = NULL;
- mvm->trans->paging_db = NULL;
-
- memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
-}
-
-static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
-{
- int sec_idx, idx;
- u32 offset = 0;
-
- /*
- * find where is the paging image start point:
- * if CPU2 exist and it's in paging format, then the image looks like:
- * CPU1 sections (2 or more)
- * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
- * CPU2 sections (not paged)
- * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
- * non paged to CPU2 paging sec
- * CPU2 paging CSS
- * CPU2 paging image (including instruction and data)
- */
- for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
- if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
- sec_idx++;
- break;
- }
- }
-
- /*
- * If paging is enabled there should be at least 2 more sections left
- * (one for CSS and one for Paging data)
- */
- if (sec_idx >= image->num_sec - 1) {
- IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
- iwl_free_fw_paging(mvm);
- return -EINVAL;
- }
-
- /* copy the CSS block to the dram */
- IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
- sec_idx);
-
- memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
- image->sec[sec_idx].data,
- mvm->fw_paging_db[0].fw_paging_size);
- dma_sync_single_for_device(mvm->trans->dev,
- mvm->fw_paging_db[0].fw_paging_phys,
- mvm->fw_paging_db[0].fw_paging_size,
- DMA_BIDIRECTIONAL);
-
- IWL_DEBUG_FW(mvm,
- "Paging: copied %d CSS bytes to first block\n",
- mvm->fw_paging_db[0].fw_paging_size);
-
- sec_idx++;
-
- /*
- * copy the paging blocks to the dram
- * loop index start from 1 since that CSS block already copied to dram
- * and CSS index is 0.
- * loop stop at num_of_paging_blk since that last block is not full.
- */
- for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
- struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
-
- memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset,
- block->fw_paging_size);
- dma_sync_single_for_device(mvm->trans->dev,
- block->fw_paging_phys,
- block->fw_paging_size,
- DMA_BIDIRECTIONAL);
-
-
- IWL_DEBUG_FW(mvm,
- "Paging: copied %d paging bytes to block %d\n",
- mvm->fw_paging_db[idx].fw_paging_size,
- idx);
-
- offset += mvm->fw_paging_db[idx].fw_paging_size;
- }
-
- /* copy the last paging block */
- if (mvm->num_of_pages_in_last_blk > 0) {
- struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
-
- memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset,
- FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
- dma_sync_single_for_device(mvm->trans->dev,
- block->fw_paging_phys,
- block->fw_paging_size,
- DMA_BIDIRECTIONAL);
-
- IWL_DEBUG_FW(mvm,
- "Paging: copied %d pages in the last block %d\n",
- mvm->num_of_pages_in_last_blk, idx);
- }
-
- return 0;
-}
-
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -293,178 +165,6 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
le32_to_cpu(dump_data[i]));
}
-static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
- const struct fw_img *image)
-{
- struct page *block;
- dma_addr_t phys = 0;
- int blk_idx, order, num_of_pages, size, dma_enabled;
-
- if (mvm->fw_paging_db[0].fw_paging_block)
- return 0;
-
- dma_enabled = is_device_dma_capable(mvm->trans->dev);
-
- /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
- BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
-
- num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
- mvm->num_of_paging_blk =
- DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
- mvm->num_of_pages_in_last_blk =
- num_of_pages -
- NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
-
- IWL_DEBUG_FW(mvm,
- "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
- mvm->num_of_paging_blk,
- mvm->num_of_pages_in_last_blk);
-
- /*
- * Allocate CSS and paging blocks in dram.
- */
- for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
- size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
- order = get_order(size);
- block = alloc_pages(GFP_KERNEL, order);
- if (!block) {
- /* free all the previous pages since we failed */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
-
- mvm->fw_paging_db[blk_idx].fw_paging_block = block;
- mvm->fw_paging_db[blk_idx].fw_paging_size = size;
-
- if (dma_enabled) {
- phys = dma_map_page(mvm->trans->dev, block, 0,
- PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(mvm->trans->dev, phys)) {
- /*
- * free the previous pages and the current one
- * since we failed to map_page.
- */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
- mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
- } else {
- mvm->fw_paging_db[blk_idx].fw_paging_phys =
- PAGING_ADDR_SIG |
- blk_idx << BLOCK_2_EXP_SIZE;
- }
-
- if (!blk_idx)
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
- order);
- else
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
- order);
- }
-
- return 0;
-}
-
-static int iwl_save_fw_paging(struct iwl_mvm *mvm,
- const struct fw_img *fw)
-{
- int ret;
-
- ret = iwl_alloc_fw_paging_mem(mvm, fw);
- if (ret)
- return ret;
-
- return iwl_fill_paging_mem(mvm, fw);
-}
-
-/* send paging cmd to FW in case CPU2 has paging image */
-static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
-{
- struct iwl_fw_paging_cmd paging_cmd = {
- .flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
- PAGING_CMD_IS_ENABLED |
- (mvm->num_of_pages_in_last_blk <<
- PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
- .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
- .block_num = cpu_to_le32(mvm->num_of_paging_blk),
- };
- int blk_idx;
-
- /* loop for for all paging blocks + CSS block */
- for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
- __le32 phy_addr;
-
- addr = addr >> PAGE_2_EXP_SIZE;
- phy_addr = cpu_to_le32(addr);
- paging_cmd.device_phy_addr[blk_idx] = phy_addr;
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(paging_cmd), &paging_cmd);
-}
-
-/*
- * Send paging item cmd to FW in case CPU2 has paging image
- */
-static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
-{
- int ret;
- struct iwl_fw_get_item_cmd fw_get_item_cmd = {
- .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
- };
-
- struct iwl_fw_get_item_resp *item_resp;
- struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
- .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
- .data = { &fw_get_item_cmd, },
- };
-
- cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret) {
- IWL_ERR(mvm,
- "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
- ret);
- return ret;
- }
-
- item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
- if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
- IWL_ERR(mvm,
- "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
- le32_to_cpu(item_resp->item_id));
- ret = -EIO;
- goto exit;
- }
-
- /* Add an extra page for headers */
- mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
- FW_PAGING_SIZE,
- GFP_KERNEL);
- if (!mvm->trans->paging_download_buf) {
- ret = -ENOMEM;
- goto exit;
- }
- mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
- mvm->trans->paging_db = mvm->fw_paging_db;
- IWL_DEBUG_FW(mvm,
- "Paging: got paging request address (paging_req_addr 0x%08x)\n",
- mvm->trans->paging_req_addr);
-
-exit:
- iwl_free_resp(&cmd);
-
- return ret;
-}
-
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@@ -544,48 +244,6 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
-static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
-{
- const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
- int ret;
-
- /*
- * Configure and operate fw paging mechanism.
- * The driver configures the paging flow only once.
- * The CPU2 paging image is included in the IWL_UCODE_INIT image.
- */
- if (!fw->paging_mem_size)
- return 0;
-
- /*
- * When dma is not enabled, the driver needs to copy / write
- * the downloaded / uploaded page to / from the smem.
- * This gets the location of the place were the pages are
- * stored.
- */
- if (!is_device_dma_capable(mvm->trans->dev)) {
- ret = iwl_trans_get_paging_item(mvm);
- if (ret) {
- IWL_ERR(mvm, "failed to get FW paging item\n");
- return ret;
- }
- }
-
- ret = iwl_save_fw_paging(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to save the FW paging image\n");
- return ret;
- }
-
- ret = iwl_send_paging_cmd(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to send the paging cmd\n");
- iwl_free_fw_paging(mvm);
- return ret;
- }
-
- return 0;
-}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -593,7 +251,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_mvm_alive_data alive_data;
const struct fw_img *fw;
int ret, i;
- enum iwl_ucode_type old_type = mvm->cur_ucode;
+ enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
@@ -606,7 +264,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
- mvm->cur_ucode = ucode_type;
+ iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
@@ -615,7 +273,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
if (ret) {
- mvm->cur_ucode = old_type;
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
iwl_remove_notification(&mvm->notif_wait, &alive_wait);
return ret;
}
@@ -639,13 +297,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
- mvm->cur_ucode = old_type;
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret;
}
if (!alive_data.valid) {
IWL_ERR(mvm, "Loaded ucode is not valid!\n");
- mvm->cur_ucode = old_type;
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
return -EIO;
}
@@ -673,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- if (iwl_mvm_is_dqa_supported(mvm))
- mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
- else
- mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -774,7 +429,7 @@ error:
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd phy_cfg_cmd;
- enum iwl_ucode_type ucode_type = mvm->cur_ucode;
+ enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@ -799,7 +454,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
- if (iwl_mvm_has_new_tx_api(mvm))
+ if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm, true);
lockdep_assert_held(&mvm->mutex);
@@ -910,95 +565,6 @@ out:
return ret;
}
-static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
- int i, lmac;
- int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
-
- if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
- return;
-
- mvm->smem_cfg.num_lmacs = lmac_num;
- mvm->smem_cfg.num_txfifo_entries =
- ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
- mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
-
- for (lmac = 0; lmac < lmac_num; lmac++) {
- struct iwl_shared_mem_lmac_cfg *lmac_cfg =
- &mem_cfg->lmac_smem[lmac];
-
- for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
- mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
- le32_to_cpu(lmac_cfg->txfifo_size[i]);
- mvm->smem_cfg.lmac[lmac].rxfifo1_size =
- le32_to_cpu(lmac_cfg->rxfifo1_size);
- }
-}
-
-static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
- int i;
-
- mvm->smem_cfg.num_lmacs = 1;
-
- mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
- mvm->smem_cfg.lmac[0].txfifo_size[i] =
- le32_to_cpu(mem_cfg->txfifo_size[i]);
-
- mvm->smem_cfg.lmac[0].rxfifo1_size =
- le32_to_cpu(mem_cfg->rxfifo_size[0]);
- mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
-
- /* new API has more data, from rxfifo_addr field and on */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
- sizeof(mem_cfg->internal_txfifo_size));
-
- for (i = 0;
- i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
- i++)
- mvm->smem_cfg.internal_txfifo_size[i] =
- le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
- }
-}
-
-static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
-{
- struct iwl_host_cmd cmd = {
- .flags = CMD_WANT_SKB,
- .data = { NULL, },
- .len = { 0, },
- };
- struct iwl_rx_packet *pkt;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
- else
- cmd.id = SHARED_MEM_CFG;
-
- if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
- return;
-
- pkt = cmd.resp_pkt;
- if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_parse_shared_mem_a000(mvm, pkt);
- else
- iwl_mvm_parse_shared_mem(mvm, pkt);
-
- IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
-
- iwl_free_resp(&cmd);
-}
-
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
struct iwl_ltr_config_cmd cmd = {
@@ -1048,8 +614,8 @@ static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
union acpi_object *data,
int data_size)
{
+ union acpi_object *wifi_pkg = NULL;
int i;
- union acpi_object *wifi_pkg;
/*
* We need at least two packages, one for the revision and one
@@ -1465,7 +1031,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
int ret;
- if (iwl_mvm_has_new_tx_api(mvm))
+ if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm, false);
ret = iwl_run_init_mvm_ucode(mvm, false);
@@ -1495,7 +1061,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret)
return ret;
- return iwl_mvm_init_paging(mvm);
+ return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
}
int iwl_mvm_up(struct iwl_mvm *mvm)
@@ -1516,24 +1082,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- iwl_mvm_get_shared_mem_conf(mvm);
+ iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
- mvm->fw_dbg_conf = FW_DBG_INVALID;
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
if (mvm->fw->dbg_dest_tlv)
- mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
- iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
+ mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+ iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
goto error;
- /* Send phy db control command and then phy db calibration*/
- if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (!iwl_mvm_has_unified_ucode(mvm)) {
+ /* Send phy db control command and then phy db calibration */
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
@@ -1549,7 +1115,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Init RSS configuration */
/* TODO - remove a000 disablement when we have RXQ config API */
- if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_has_new_rx_api(mvm) &&
+ mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1567,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
- /* Enable DQA-mode if required */
- if (iwl_mvm_is_dqa_supported(mvm)) {
- ret = iwl_mvm_send_dqa_cmd(mvm);
- if (ret)
- goto error;
- } else {
- IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
- }
+ ret = iwl_mvm_send_dqa_cmd(mvm);
+ if (ret)
+ goto error;
/* Add auxiliary station for scanning */
ret = iwl_mvm_add_aux_sta(mvm);