diff options
Diffstat (limited to 'hw/net/vmxnet3.c')
-rw-r--r-- | hw/net/vmxnet3.c | 155 |
1 files changed, 81 insertions, 74 deletions
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 20f26b7d89..16645e6c23 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -30,8 +30,8 @@ #include "vmxnet3.h" #include "vmxnet_debug.h" #include "vmware_utils.h" -#include "vmxnet_tx_pkt.h" -#include "vmxnet_rx_pkt.h" +#include "net_tx_pkt.h" +#include "net_rx_pkt.h" #define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 #define VMXNET3_MSIX_BAR_SIZE 0x2000 @@ -314,13 +314,13 @@ typedef struct { bool peer_has_vhdr; /* TX packets to QEMU interface */ - struct VmxnetTxPkt *tx_pkt; + struct NetTxPkt *tx_pkt; uint32_t offload_mode; uint32_t cso_or_gso_size; uint16_t tci; bool needs_vlan; - struct VmxnetRxPkt *rx_pkt; + struct NetRxPkt *rx_pkt; bool tx_sop; bool skip_current_tx_pkt; @@ -474,7 +474,7 @@ static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l) s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0); s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1); - VMW_CFPRN("Variable MAC: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a)); + VMW_CFPRN("Variable MAC: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } @@ -546,18 +546,18 @@ vmxnet3_setup_tx_offloads(VMXNET3State *s) { switch (s->offload_mode) { case VMXNET3_OM_NONE: - vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); + net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); break; case VMXNET3_OM_CSUM: - vmxnet_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); + net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); VMW_PKPRN("L4 CSO requested\n"); break; case VMXNET3_OM_TSO: - vmxnet_tx_pkt_build_vheader(s->tx_pkt, true, true, + net_tx_pkt_build_vheader(s->tx_pkt, true, true, s->cso_or_gso_size); - vmxnet_tx_pkt_update_ip_checksums(s->tx_pkt); + net_tx_pkt_update_ip_checksums(s->tx_pkt); VMW_PKPRN("GSO offload requested."); break; @@ -590,12 +590,12 @@ static void vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx, Vmxnet3PktStatus status) { - size_t tot_len = vmxnet_tx_pkt_get_total_len(s->tx_pkt); + size_t tot_len = net_tx_pkt_get_total_len(s->tx_pkt); struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats; switch (status) { case VMXNET3_PKT_STATUS_OK: - switch (vmxnet_tx_pkt_get_packet_type(s->tx_pkt)) { + switch (net_tx_pkt_get_packet_type(s->tx_pkt)) { case ETH_PKT_BCAST: stats->bcastPktsTxOK++; stats->bcastBytesTxOK += tot_len; @@ -643,7 +643,7 @@ vmxnet3_on_rx_done_update_stats(VMXNET3State *s, Vmxnet3PktStatus status) { struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats; - size_t tot_len = vmxnet_rx_pkt_get_total_len(s->rx_pkt); + size_t tot_len = net_rx_pkt_get_total_len(s->rx_pkt); switch (status) { case VMXNET3_PKT_STATUS_OUT_OF_BUF: @@ -654,7 +654,7 @@ vmxnet3_on_rx_done_update_stats(VMXNET3State *s, stats->pktsRxError++; break; case VMXNET3_PKT_STATUS_OK: - switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) { + switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { case ETH_PKT_BCAST: stats->bcastPktsRxOK++; stats->bcastBytesRxOK += tot_len; @@ -715,10 +715,10 @@ vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx) } /* debug prints */ - vmxnet3_dump_virt_hdr(vmxnet_tx_pkt_get_vhdr(s->tx_pkt)); - vmxnet_tx_pkt_dump(s->tx_pkt); + vmxnet3_dump_virt_hdr(net_tx_pkt_get_vhdr(s->tx_pkt)); + net_tx_pkt_dump(s->tx_pkt); - if (!vmxnet_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) { + if (!net_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) { status = VMXNET3_PKT_STATUS_DISCARD; goto func_exit; } @@ -746,7 +746,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; data_pa = le64_to_cpu(txd.addr); - if (!vmxnet_tx_pkt_add_raw_fragment(s->tx_pkt, + if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, data_pa, data_len)) { s->skip_current_tx_pkt = true; @@ -759,9 +759,9 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) } if (txd.eop) { - if (!s->skip_current_tx_pkt && vmxnet_tx_pkt_parse(s->tx_pkt)) { + if (!s->skip_current_tx_pkt && net_tx_pkt_parse(s->tx_pkt)) { if (s->needs_vlan) { - vmxnet_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); + net_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); } vmxnet3_send_packet(s, qidx); @@ -773,7 +773,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; - vmxnet_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_reset(s->tx_pkt); } } } @@ -802,7 +802,9 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) hwaddr daddr = vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); - cpu_physical_memory_read(daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); + pci_dma_read(PCI_DEVICE(s), daddr, + &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); + ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); if (rxcd.gen != ring_gen) { @@ -928,7 +930,7 @@ vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head, * in the case the host OS performs forwarding, it will forward an * incorrectly checksummed packet. */ -static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt, +static void vmxnet3_rx_need_csum_calculate(struct NetRxPkt *pkt, const void *pkt_data, size_t pkt_len) { @@ -937,16 +939,16 @@ static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt, uint8_t *data; int len; - if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) { + if (!net_rx_pkt_has_virt_hdr(pkt)) { return; } - vhdr = vmxnet_rx_pkt_get_vhdr(pkt); + vhdr = net_rx_pkt_get_vhdr(pkt); if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) { return; } - vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); if (!(isip4 || isip6) || !(istcp || isudp)) { return; } @@ -970,7 +972,7 @@ static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt, vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; } -static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt, +static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, struct Vmxnet3_RxCompDesc *rxcd) { int csum_ok, is_gso; @@ -978,16 +980,16 @@ static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt, struct virtio_net_hdr *vhdr; uint8_t offload_type; - if (vmxnet_rx_pkt_is_vlan_stripped(pkt)) { + if (net_rx_pkt_is_vlan_stripped(pkt)) { rxcd->ts = 1; - rxcd->tci = vmxnet_rx_pkt_get_vlan_tag(pkt); + rxcd->tci = net_rx_pkt_get_vlan_tag(pkt); } - if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) { + if (!net_rx_pkt_has_virt_hdr(pkt)) { goto nocsum; } - vhdr = vmxnet_rx_pkt_get_vhdr(pkt); + vhdr = net_rx_pkt_get_vhdr(pkt); /* * Checksum is valid when lower level tell so or when lower level * requires checksum offload telling that packet produced/bridged @@ -1004,7 +1006,7 @@ static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt, goto nocsum; } - vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); if ((!istcp && !isudp) || (!isip4 && !isip6)) { goto nocsum; } @@ -1023,10 +1025,11 @@ nocsum: } static void -vmxnet3_physical_memory_writev(const struct iovec *iov, - size_t start_iov_off, - hwaddr target_addr, - size_t bytes_to_copy) +vmxnet3_pci_dma_writev(PCIDevice *pci_dev, + const struct iovec *iov, + size_t start_iov_off, + hwaddr target_addr, + size_t bytes_to_copy) { size_t curr_off = 0; size_t copied = 0; @@ -1036,9 +1039,9 @@ vmxnet3_physical_memory_writev(const struct iovec *iov, size_t chunk_len = MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy); - cpu_physical_memory_write(target_addr + copied, - iov->iov_base + start_iov_off - curr_off, - chunk_len); + pci_dma_write(pci_dev, target_addr + copied, + iov->iov_base + start_iov_off - curr_off, + chunk_len); copied += chunk_len; start_iov_off += chunk_len; @@ -1063,13 +1066,13 @@ vmxnet3_indicate_packet(VMXNET3State *s) uint32_t new_rxcd_gen = VMXNET3_INIT_GEN; hwaddr new_rxcd_pa = 0; hwaddr ready_rxcd_pa = 0; - struct iovec *data = vmxnet_rx_pkt_get_iovec(s->rx_pkt); + struct iovec *data = net_rx_pkt_get_iovec(s->rx_pkt); size_t bytes_copied = 0; - size_t bytes_left = vmxnet_rx_pkt_get_total_len(s->rx_pkt); + size_t bytes_left = net_rx_pkt_get_total_len(s->rx_pkt); uint16_t num_frags = 0; size_t chunk_size; - vmxnet_rx_pkt_dump(s->rx_pkt); + net_rx_pkt_dump(s->rx_pkt); while (bytes_left > 0) { @@ -1088,15 +1091,15 @@ vmxnet3_indicate_packet(VMXNET3State *s) } chunk_size = MIN(bytes_left, rxd.len); - vmxnet3_physical_memory_writev(data, bytes_copied, - le64_to_cpu(rxd.addr), chunk_size); + vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied, + le64_to_cpu(rxd.addr), chunk_size); bytes_copied += chunk_size; bytes_left -= chunk_size; vmxnet3_dump_rx_descr(&rxd); if (ready_rxcd_pa != 0) { - cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd)); + pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); } memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); @@ -1127,7 +1130,8 @@ vmxnet3_indicate_packet(VMXNET3State *s) if (ready_rxcd_pa != 0) { rxcd.eop = 1; rxcd.err = (bytes_left != 0); - cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd)); + + pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); /* Flush RX descriptor changes */ smp_wmb(); @@ -1219,16 +1223,16 @@ static void vmxnet3_reset_interrupt_states(VMXNET3State *s) static void vmxnet3_reset_mac(VMXNET3State *s) { memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a)); - VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a)); + VMW_CFPRN("MAC address set to: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); } static void vmxnet3_deactivate_device(VMXNET3State *s) { if (s->device_active) { VMW_CBPRN("Deactivating vmxnet3..."); - vmxnet_tx_pkt_reset(s->tx_pkt); - vmxnet_tx_pkt_uninit(s->tx_pkt); - vmxnet_rx_pkt_uninit(s->rx_pkt); + net_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_uninit(s->tx_pkt); + net_rx_pkt_uninit(s->rx_pkt); s->device_active = false; } } @@ -1298,10 +1302,11 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s) VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.rxFilterConf.mfTablePA); - cpu_physical_memory_read(mcast_list_pa, s->mcast_list, list_bytes); + pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes); + VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len); for (i = 0; i < s->mcast_list_len; i++) { - VMW_CFPRN("\t" VMXNET_MF, VMXNET_MA(s->mcast_list[i].a)); + VMW_CFPRN("\t" MAC_FMT, MAC_ARG(s->mcast_list[i].a)); } } } @@ -1328,15 +1333,17 @@ static void vmxnet3_fill_stats(VMXNET3State *s) return; for (i = 0; i < s->txq_num; i++) { - cpu_physical_memory_write(s->txq_descr[i].tx_stats_pa, - &s->txq_descr[i].txq_stats, - sizeof(s->txq_descr[i].txq_stats)); + pci_dma_write(PCI_DEVICE(s), + s->txq_descr[i].tx_stats_pa, + &s->txq_descr[i].txq_stats, + sizeof(s->txq_descr[i].txq_stats)); } for (i = 0; i < s->rxq_num; i++) { - cpu_physical_memory_write(s->rxq_descr[i].rx_stats_pa, - &s->rxq_descr[i].rxq_stats, - sizeof(s->rxq_descr[i].rxq_stats)); + pci_dma_write(PCI_DEVICE(s), + s->rxq_descr[i].rx_stats_pa, + &s->rxq_descr[i].rxq_stats, + sizeof(s->rxq_descr[i].rxq_stats)); } } @@ -1558,8 +1565,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Preallocate TX packet wrapper */ VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); - vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); - vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), + s->max_tx_frags, s->peer_has_vhdr); + net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); /* Read rings memory locations for RX queues */ for (i = 0; i < s->rxq_num; i++) { @@ -1965,7 +1973,7 @@ vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data, return false; } - switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) { + switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { case ETH_PKT_UCAST: if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) { return false; @@ -2013,7 +2021,7 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) } if (s->peer_has_vhdr) { - vmxnet_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf); + net_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf); buf += sizeof(struct virtio_net_hdr); size -= sizeof(struct virtio_net_hdr); } @@ -2026,13 +2034,13 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) size = sizeof(min_buf); } - vmxnet_rx_pkt_set_packet_type(s->rx_pkt, + net_rx_pkt_set_packet_type(s->rx_pkt, get_eth_packet_type(PKT_GET_ETH_HDR(buf))); if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { - vmxnet_rx_pkt_set_protocols(s->rx_pkt, buf, size); + net_rx_pkt_set_protocols(s->rx_pkt, buf, size); vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); - vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); + net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; if (bytes_indicated < size) { VMW_PKPRN("RX: %zu of %zu bytes indicated", bytes_indicated, size); @@ -2102,7 +2110,7 @@ static void vmxnet3_net_init(VMXNET3State *s) s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP; - VMW_CFPRN("Permanent MAC: " VMXNET_MF, VMXNET_MA(s->perm_mac.a)); + VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a)); s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, object_get_typename(OBJECT(s)), @@ -2255,9 +2263,9 @@ static const MemoryRegionOps b1_ops = { }, }; -static uint8_t *vmxnet3_device_serial_num(VMXNET3State *s) +static uint64_t vmxnet3_device_serial_num(VMXNET3State *s) { - static uint64_t dsn_payload; + uint64_t dsn_payload; uint8_t *dsnp = (uint8_t *)&dsn_payload; dsnp[0] = 0xfe; @@ -2268,7 +2276,7 @@ static uint8_t *vmxnet3_device_serial_num(VMXNET3State *s) dsnp[5] = s->conf.macaddr.a[1]; dsnp[6] = s->conf.macaddr.a[2]; dsnp[7] = 0xff; - return dsnp; + return dsn_payload; } static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) @@ -2313,10 +2321,8 @@ static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) pcie_endpoint_cap_init(pci_dev, VMXNET3_EXP_EP_OFFSET); } - pcie_add_capability(pci_dev, PCI_EXT_CAP_ID_DSN, 0x1, - VMXNET3_DSN_OFFSET, PCI_EXT_CAP_DSN_SIZEOF); - memcpy(pci_dev->config + VMXNET3_DSN_OFFSET + 4, - vmxnet3_device_serial_num(s), sizeof(uint64_t)); + pcie_dev_ser_num_init(pci_dev, VMXNET3_DSN_OFFSET, + vmxnet3_device_serial_num(s)); } register_savevm(dev, "vmxnet3-msix", -1, 1, @@ -2538,8 +2544,9 @@ static int vmxnet3_post_load(void *opaque, int version_id) VMXNET3State *s = opaque; PCIDevice *d = PCI_DEVICE(s); - vmxnet_tx_pkt_init(&s->tx_pkt, s->max_tx_frags, s->peer_has_vhdr); - vmxnet_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), + s->max_tx_frags, s->peer_has_vhdr); + net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); if (s->msix_used) { if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { |