summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qede/qede_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede/qede_main.c')
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c541
1 files changed, 453 insertions, 88 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6237f10b5119..5f15e23a0f7d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
struct eth_tx_3rd_bd *third_bd)
{
u8 l4_proto;
- u16 bd2_bits = 0, bd2_bits2 = 0;
+ u16 bd2_bits1 = 0, bd2_bits2 = 0;
- bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+ bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
- bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
@@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
l4_proto = ip_hdr(skb)->protocol;
if (l4_proto == IPPROTO_UDP)
- bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
- if (third_bd) {
+ if (third_bd)
third_bd->data.bitfields |=
- ((tcp_hdrlen(skb) / 4) &
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
- }
+ cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
- second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
}
@@ -464,12 +463,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
+ u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
+
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -491,7 +494,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
- (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
/* Make life easier for FW guys who can't deal with header and
* data on same BD. If we need to split, use the second bd...
@@ -719,26 +722,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
-/* This function copies the Rx buffer from the CONS position to the PROD
- * position, since we failed to allocate a new Rx buffer.
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
*/
-static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+static inline void qede_reuse_page(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
{
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *sw_rx_data_cons =
- &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- struct sw_rx_data *sw_rx_data_prod =
- &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ struct sw_rx_data *curr_prod;
+ dma_addr_t new_mapping;
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(sw_rx_data_cons, mapping));
+ curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ *curr_prod = *curr_cons;
- sw_rx_data_prod->data = sw_rx_data_cons->data;
- memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+ new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
- rxq->sw_rx_cons++;
rxq->sw_rx_prod++;
+ curr_cons->data = NULL;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ /* Move to the next segment in the page */
+ curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+ if (curr_cons->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ return -ENOMEM;
+
+ dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ } else {
+ /* Increment refcount of the page as we don't want
+ * network stack to take the ownership of the page
+ * which can be recycled multiple times by the driver.
+ */
+ atomic_inc(&curr_cons->data->_count);
+ qede_reuse_page(edev, rxq, curr_cons);
+ }
+
+ return 0;
}
static inline void qede_update_rx_prod(struct qede_dev *edev,
@@ -857,9 +886,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
struct sk_buff *skb;
+ struct page *data;
+ __le16 flags;
u16 len, pad;
u32 rx_hash;
- u8 *data;
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)
@@ -879,56 +909,110 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
data = sw_rx_data->data;
fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->pkt_len);
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
pad = fp_cqe->placement_offset;
+ flags = cqe->fast_path_regular.pars_flags.flags;
- /* For every Rx BD consumed, we allocate a new BD so the BD ring
- * is always with a fixed size. If allocation fails, we take the
- * consumed BD and return it to the ring in the PROD position.
- * The packet that was received on that BD will be dropped (and
- * not passed to the upper stack).
- */
- if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(sw_rx_data, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
-
- /* If this is an error packet then drop it */
- parse_flag =
- le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
- csum_flag = qede_check_csum(parse_flag);
- if (csum_flag == QEDE_CSUM_ERROR) {
- DP_NOTICE(edev,
- "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- kfree(data);
- goto next_rx;
- }
-
- skb = build_skb(data, 0);
-
- if (unlikely(!skb)) {
- DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
- kfree(data);
- rxq->rx_alloc_errors++;
- goto next_rx;
- }
+ /* If this is an error packet then drop it */
+ parse_flag = le16_to_cpu(flags);
- skb_reserve(skb, pad);
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ qede_reuse_page(edev, rxq, sw_rx_data);
+ goto next_rx;
+ }
- } else {
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
DP_NOTICE(edev,
- "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
- qede_reuse_rx_data(rxq);
+ "Build_skb failed, dropping incoming packet\n");
+ qede_reuse_page(edev, rxq, sw_rx_data);
rxq->rx_alloc_errors++;
- goto next_cqe;
+ goto next_rx;
+ }
+
+ /* Copy data into SKB */
+ if (len + pad <= QEDE_RX_HDR_SIZE) {
+ memcpy(skb_put(skb, len),
+ page_address(data) + pad +
+ sw_rx_data->page_offset, len);
+ qede_reuse_page(edev, rxq, sw_rx_data);
+ } else {
+ struct skb_frag_struct *frag;
+ unsigned int pull_len;
+ unsigned char *va;
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
+ pad + sw_rx_data->page_offset,
+ len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq,
+ sw_rx_data))) {
+ DP_ERR(edev, "Failed to allocate rx buffer\n");
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
}
- sw_rx_data->data = NULL;
+ if (fp_cqe->bd_num != 1) {
+ u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
+ u8 num_frags;
+
+ pkt_len -= len;
+
+ for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
+ num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ?
+ rxq->rx_buf_size : pkt_len;
+
+ WARN_ONCE(!cur_size,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ goto next_cqe;
+
+ rxq->sw_rx_cons++;
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ qed_chain_consume(&rxq->rx_bd_ring);
+ dma_unmap_page(&edev->pdev->dev,
+ sw_rx_data->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb,
+ skb_shinfo(skb)->nr_frags++,
+ sw_rx_data->data, 0,
+ cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
- skb_put(skb, len);
+ if (pkt_len)
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+ }
skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -1056,6 +1140,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev,
return edev->ops->filter_config(edev->cdev, &filter_cmd);
}
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ u16 vid)
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.vlan_valid = 1;
+ filter_cmd.filter.ucast.vlan = vid;
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
struct qed_eth_stats stats;
@@ -1168,6 +1267,247 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+ struct qed_update_vport_params params;
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (edev->accept_any_vlan == action)
+ return;
+
+ memset(&params, 0, sizeof(params));
+
+ params.vport_id = 0;
+ params.accept_any_vlan = action;
+ params.update_accept_any_vlan_flg = 1;
+
+ rc = edev->ops->vport_update(edev->cdev, &params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ edev->accept_any_vlan = action;
+ }
+}
+
+static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan, *tmp;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ DP_INFO(edev, "Failed to allocate struct for vlan\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&vlan->list);
+ vlan->vid = vid;
+ vlan->configured = false;
+
+ /* Verify vlan isn't already configured */
+ list_for_each_entry(tmp, &edev->vlan_list, list) {
+ if (tmp->vid == vlan->vid) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "vlan already configured\n");
+ kfree(vlan);
+ return -EEXIST;
+ }
+ }
+
+ /* If interface is down, cache this VLAN ID and return */
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, VLAN %d will be configured when interface is up\n",
+ vid);
+ if (vid != 0)
+ edev->non_configured_vlans++;
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+ }
+
+ /* Check for the filter limit.
+ * Note - vlan0 has a reserved filter and can be added without
+ * worrying about quota
+ */
+ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+ (vlan->vid == 0)) {
+ rc = qede_set_ucast_rx_vlan(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %d\n",
+ vlan->vid);
+ kfree(vlan);
+ return -EINVAL;
+ }
+ vlan->configured = true;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0)
+ edev->configured_vlans++;
+ } else {
+ /* Out of quota; Activate accept-any-VLAN mode */
+ if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, true);
+
+ edev->non_configured_vlans++;
+ }
+
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+ struct qede_vlan *vlan)
+{
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ if (vlan->configured)
+ edev->configured_vlans--;
+ else
+ edev->non_configured_vlans--;
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+ int rc = 0, real_rc = 0, accept_any_vlan = 0;
+ struct qed_dev_eth_info *dev_info;
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return 0;
+
+ dev_info = &edev->dev_info;
+
+ /* Configure non-configured vlans */
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (vlan->configured)
+ continue;
+
+ /* We have used all our credits, now enable accept_any_vlan */
+ if ((vlan->vid != 0) &&
+ (edev->configured_vlans == dev_info->num_vlan_filters)) {
+ accept_any_vlan = 1;
+ continue;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %u\n",
+ vlan->vid);
+ real_rc = rc;
+ continue;
+ }
+
+ vlan->configured = true;
+ /* vlan0 filter doesn't consume our VLAN filter's quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans--;
+ edev->configured_vlans++;
+ }
+ }
+
+ /* enable accept_any_vlan mode if we have more VLANs than credits,
+ * or remove accept_any_vlan mode if we've actually removed
+ * a non-configured vlan, and all remaining vlans are truly configured.
+ */
+
+ if (accept_any_vlan)
+ qede_config_accept_any_vlan(edev, true);
+ else if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, false);
+
+ return real_rc;
+}
+
+static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan = NULL;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+ /* Find whether entry exists */
+ list_for_each_entry(vlan, &edev->vlan_list, list)
+ if (vlan->vid == vid)
+ break;
+
+ if (!vlan || (vlan->vid != vid)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Vlan isn't configured\n");
+ return 0;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ /* As interface is already down, we don't have a VPORT
+ * instance to remove vlan filter. So just update vlan list
+ */
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, removing VLAN from list only\n");
+ qede_del_vlan_from_list(edev, vlan);
+ return 0;
+ }
+
+ /* Remove vlan */
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ qede_del_vlan_from_list(edev, vlan);
+
+ /* We have removed a VLAN - try to see if we can
+ * configure non-configured VLAN from the list.
+ */
+ rc = qede_configure_vlan_filters(edev);
+
+ return rc;
+}
+
+static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return;
+
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (!vlan->configured)
+ continue;
+
+ vlan->configured = false;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans++;
+ edev->configured_vlans--;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "marked vlan %d as non-configured\n",
+ vlan->vid);
+ }
+
+ edev->accept_any_vlan = false;
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1176,6 +1516,8 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64,
};
@@ -1220,6 +1562,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->num_tc = edev->dev_info.num_tc;
+ INIT_LIST_HEAD(&edev->vlan_list);
+
return edev;
}
@@ -1251,7 +1595,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
ndev->hw_features = hw_features;
@@ -1566,17 +1910,17 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
struct sw_rx_data *rx_buf;
- u8 *data;
+ struct page *data;
rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
data = rx_buf->data;
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_page(&edev->pdev->dev,
+ rx_buf->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
- kfree(data);
+ __free_page(data);
}
}
@@ -1600,29 +1944,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct sw_rx_data *sw_rx_data;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
+ struct page *data;
u16 rx_buf_size;
- u8 *data;
rx_buf_size = rxq->rx_buf_size;
- data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
- DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
return -ENOMEM;
}
- mapping = dma_map_single(&edev->pdev->dev, data,
- rx_buf_size, DMA_FROM_DEVICE);
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(&edev->pdev->dev, data, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- kfree(data);
+ __free_page(data);
DP_NOTICE(edev, "Failed to map Rx buffer\n");
return -ENOMEM;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
sw_rx_data->data = data;
-
- dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+ sw_rx_data->mapping = mapping;
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
@@ -1643,13 +1990,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN +
- ETH_OVERHEAD +
- edev->ndev->mtu +
- QEDE_FW_RX_ALIGN_END;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
+ edev->ndev->mtu;
+ if (rxq->rx_buf_size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE;
+
+ /* Segment size to spilt a page in multiple equal parts */
+ rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
/* Allocate the parallel driver ring for Rx buffers */
- size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
@@ -1660,7 +2010,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -1671,7 +2021,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
if (rc)
@@ -2252,6 +2602,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Stopped Queues\n");
+ qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
/* Release the interrupts */
@@ -2320,6 +2671,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
edev->state = QEDE_STATE_OPEN;
mutex_unlock(&edev->qede_lock);
+ /* Program un-configured VLANs */
+ qede_configure_vlan_filters(edev);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2580,6 +2934,17 @@ static void qede_config_rx_mode(struct net_device *ndev)
goto out;
}
+ /* take care of VLAN mode */
+ if (ndev->flags & IFF_PROMISC) {
+ qede_config_accept_any_vlan(edev, true);
+ } else if (!edev->non_configured_vlans) {
+ /* It's possible that accept_any_vlan mode is set due to a
+ * previous setting of IFF_PROMISC. If vlan credits are
+ * sufficient, disable accept_any_vlan.
+ */
+ qede_config_accept_any_vlan(edev, false);
+ }
+
rx_mode.filter.accept_flags = accept_flags;
edev->ops->filter_config(edev->cdev, &rx_mode);
out: