summaryrefslogtreecommitdiffstats
path: root/hw/net
diff options
context:
space:
mode:
Diffstat (limited to 'hw/net')
-rw-r--r--hw/net/cadence_gem.c557
-rw-r--r--hw/net/e1000e.c2
-rw-r--r--hw/net/e1000e_core.c34
-rw-r--r--hw/net/e1000e_core.h3
-rw-r--r--hw/net/fsl_etsec/etsec.c2
-rw-r--r--hw/net/imx_fec.c2
-rw-r--r--hw/net/lan9118.c2
-rw-r--r--hw/net/mcf_fec.c7
-rw-r--r--hw/net/spapr_llan.c91
-rw-r--r--hw/net/trace-events18
-rw-r--r--hw/net/virtio-net.c126
11 files changed, 596 insertions, 248 deletions
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index db1b301e7f..7915732f74 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -26,6 +26,8 @@
#include <zlib.h> /* For crc32 */
#include "hw/net/cadence_gem.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
#include "net/checksum.h"
#ifdef CADENCE_GEM_ERR_DEBUG
@@ -141,6 +143,55 @@
#define GEM_DESCONF6 (0x00000294/4)
#define GEM_DESCONF7 (0x00000298/4)
+#define GEM_INT_Q1_STATUS (0x00000400 / 4)
+#define GEM_INT_Q1_MASK (0x00000640 / 4)
+
+#define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4)
+#define GEM_TRANSMIT_Q7_PTR (GEM_TRANSMIT_Q1_PTR + 6)
+
+#define GEM_RECEIVE_Q1_PTR (0x00000480 / 4)
+#define GEM_RECEIVE_Q7_PTR (GEM_RECEIVE_Q1_PTR + 6)
+
+#define GEM_INT_Q1_ENABLE (0x00000600 / 4)
+#define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6)
+
+#define GEM_INT_Q1_DISABLE (0x00000620 / 4)
+#define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6)
+
+#define GEM_INT_Q1_MASK (0x00000640 / 4)
+#define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6)
+
+#define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4)
+
+#define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29)
+#define GEM_ST1R_DSTC_ENABLE (1 << 28)
+#define GEM_ST1R_UDP_PORT_MATCH_SHIFT (12)
+#define GEM_ST1R_UDP_PORT_MATCH_WIDTH (27 - GEM_ST1R_UDP_PORT_MATCH_SHIFT + 1)
+#define GEM_ST1R_DSTC_MATCH_SHIFT (4)
+#define GEM_ST1R_DSTC_MATCH_WIDTH (11 - GEM_ST1R_DSTC_MATCH_SHIFT + 1)
+#define GEM_ST1R_QUEUE_SHIFT (0)
+#define GEM_ST1R_QUEUE_WIDTH (3 - GEM_ST1R_QUEUE_SHIFT + 1)
+
+#define GEM_SCREENING_TYPE2_REGISTER_0 (0x00000540 / 4)
+
+#define GEM_ST2R_COMPARE_A_ENABLE (1 << 18)
+#define GEM_ST2R_COMPARE_A_SHIFT (13)
+#define GEM_ST2R_COMPARE_WIDTH (17 - GEM_ST2R_COMPARE_A_SHIFT + 1)
+#define GEM_ST2R_ETHERTYPE_ENABLE (1 << 12)
+#define GEM_ST2R_ETHERTYPE_INDEX_SHIFT (9)
+#define GEM_ST2R_ETHERTYPE_INDEX_WIDTH (11 - GEM_ST2R_ETHERTYPE_INDEX_SHIFT \
+ + 1)
+#define GEM_ST2R_QUEUE_SHIFT (0)
+#define GEM_ST2R_QUEUE_WIDTH (3 - GEM_ST2R_QUEUE_SHIFT + 1)
+
+#define GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 (0x000006e0 / 4)
+#define GEM_TYPE2_COMPARE_0_WORD_0 (0x00000700 / 4)
+
+#define GEM_T2CW1_COMPARE_OFFSET_SHIFT (7)
+#define GEM_T2CW1_COMPARE_OFFSET_WIDTH (8 - GEM_T2CW1_COMPARE_OFFSET_SHIFT + 1)
+#define GEM_T2CW1_OFFSET_VALUE_SHIFT (0)
+#define GEM_T2CW1_OFFSET_VALUE_WIDTH (6 - GEM_T2CW1_OFFSET_VALUE_SHIFT + 1)
+
/*****************************************/
#define GEM_NWCTRL_TXSTART 0x00000200 /* Transmit Enable */
#define GEM_NWCTRL_TXENA 0x00000008 /* Transmit Enable */
@@ -284,9 +335,9 @@ static inline unsigned tx_desc_get_length(unsigned *desc)
return desc[1] & DESC_1_LENGTH;
}
-static inline void print_gem_tx_desc(unsigned *desc)
+static inline void print_gem_tx_desc(unsigned *desc, uint8_t queue)
{
- DB_PRINT("TXDESC:\n");
+ DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue);
DB_PRINT("bufaddr: 0x%08x\n", *desc);
DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc));
DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc));
@@ -416,6 +467,7 @@ static void phy_update_link(CadenceGEMState *s)
static int gem_can_receive(NetClientState *nc)
{
CadenceGEMState *s;
+ int i;
s = qemu_get_nic_opaque(nc);
@@ -428,18 +480,20 @@ static int gem_can_receive(NetClientState *nc)
return 0;
}
- if (rx_desc_get_ownership(s->rx_desc) == 1) {
- if (s->can_rx_state != 2) {
- s->can_rx_state = 2;
- DB_PRINT("can't receive - busy buffer descriptor 0x%x\n",
- s->rx_desc_addr);
+ for (i = 0; i < s->num_priority_queues; i++) {
+ if (rx_desc_get_ownership(s->rx_desc[i]) == 1) {
+ if (s->can_rx_state != 2) {
+ s->can_rx_state = 2;
+ DB_PRINT("can't receive - busy buffer descriptor (q%d) 0x%x\n",
+ i, s->rx_desc_addr[i]);
+ }
+ return 0;
}
- return 0;
}
if (s->can_rx_state != 0) {
s->can_rx_state = 0;
- DB_PRINT("can receive 0x%x\n", s->rx_desc_addr);
+ DB_PRINT("can receive\n");
}
return 1;
}
@@ -450,9 +504,20 @@ static int gem_can_receive(NetClientState *nc)
*/
static void gem_update_int_status(CadenceGEMState *s)
{
- if (s->regs[GEM_ISR]) {
- DB_PRINT("asserting int. (0x%08x)\n", s->regs[GEM_ISR]);
- qemu_set_irq(s->irq, 1);
+ int i;
+
+ if ((s->num_priority_queues == 1) && s->regs[GEM_ISR]) {
+ /* No priority queues, just trigger the interrupt */
+ DB_PRINT("asserting int.\n", i);
+ qemu_set_irq(s->irq[0], 1);
+ return;
+ }
+
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ if (s->regs[GEM_INT_Q1_STATUS + i]) {
+ DB_PRINT("asserting int. (q=%d)\n", i);
+ qemu_set_irq(s->irq[i], 1);
+ }
}
}
@@ -601,17 +666,137 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
return GEM_RX_REJECT;
}
-static void gem_get_rx_desc(CadenceGEMState *s)
+/* Figure out which queue the received data should be sent to */
+static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
+ unsigned rxbufsize)
{
- DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr);
+ uint32_t reg;
+ bool matched, mismatched;
+ int i, j;
+
+ for (i = 0; i < s->num_type1_screeners; i++) {
+ reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i];
+ matched = false;
+ mismatched = false;
+
+ /* Screening is based on UDP Port */
+ if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) {
+ uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
+ if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT,
+ GEM_ST1R_UDP_PORT_MATCH_WIDTH)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ /* Screening is based on DS/TC */
+ if (reg & GEM_ST1R_DSTC_ENABLE) {
+ uint8_t dscp = rxbuf_ptr[14 + 1];
+ if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT,
+ GEM_ST1R_DSTC_MATCH_WIDTH)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ if (matched && !mismatched) {
+ return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH);
+ }
+ }
+
+ for (i = 0; i < s->num_type2_screeners; i++) {
+ reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i];
+ matched = false;
+ mismatched = false;
+
+ if (reg & GEM_ST2R_ETHERTYPE_ENABLE) {
+ uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
+ int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT,
+ GEM_ST2R_ETHERTYPE_INDEX_WIDTH);
+
+ if (et_idx > s->num_type2_screeners) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
+ "register index: %d\n", et_idx);
+ }
+ if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 +
+ et_idx]) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ /* Compare A, B, C */
+ for (j = 0; j < 3; j++) {
+ uint32_t cr0, cr1, mask;
+ uint16_t rx_cmp;
+ int offset;
+ int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6,
+ GEM_ST2R_COMPARE_WIDTH);
+
+ if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) {
+ continue;
+ }
+ if (cr_idx > s->num_type2_screeners) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
+ "register index: %d\n", cr_idx);
+ }
+
+ cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
+ cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1];
+ offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT,
+ GEM_T2CW1_OFFSET_VALUE_WIDTH);
+
+ switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT,
+ GEM_T2CW1_COMPARE_OFFSET_WIDTH)) {
+ case 3: /* Skip UDP header */
+ qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
+ "unimplemented - assuming UDP\n");
+ offset += 8;
+ /* Fallthrough */
+ case 2: /* skip the IP header */
+ offset += 20;
+ /* Fallthrough */
+ case 1: /* Count from after the ethertype */
+ offset += 14;
+ break;
+ case 0:
+ /* Offset from start of frame */
+ break;
+ }
+
+ rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
+ mask = extract32(cr0, 0, 16);
+
+ if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ if (matched && !mismatched) {
+ return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH);
+ }
+ }
+
+ /* We made it here, assume it's queue 0 */
+ return 0;
+}
+
+static void gem_get_rx_desc(CadenceGEMState *s, int q)
+{
+ DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr[q]);
/* read current descriptor */
- cpu_physical_memory_read(s->rx_desc_addr,
- (uint8_t *)s->rx_desc, sizeof(s->rx_desc));
+ cpu_physical_memory_read(s->rx_desc_addr[0],
+ (uint8_t *)s->rx_desc[0], sizeof(s->rx_desc[0]));
/* Descriptor owned by software ? */
- if (rx_desc_get_ownership(s->rx_desc) == 1) {
+ if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
DB_PRINT("descriptor 0x%x owned by sw.\n",
- (unsigned)s->rx_desc_addr);
+ (unsigned)s->rx_desc_addr[q]);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]);
/* Handle interrupt consequences */
@@ -632,6 +817,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
uint8_t *rxbuf_ptr;
bool first_desc = true;
int maf;
+ int q = 0;
s = qemu_get_nic_opaque(nc);
@@ -710,6 +896,9 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size);
+ /* Find which queue we are targetting */
+ q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
+
while (bytes_to_copy) {
/* Do nothing if receive is not enabled. */
if (!gem_can_receive(nc)) {
@@ -718,56 +907,59 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
}
DB_PRINT("copy %d bytes to 0x%x\n", MIN(bytes_to_copy, rxbufsize),
- rx_desc_get_buffer(s->rx_desc));
+ rx_desc_get_buffer(s->rx_desc[q]));
/* Copy packet data to emulated DMA buffer */
- cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc) + rxbuf_offset,
+ cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc[q]) +
+ rxbuf_offset,
rxbuf_ptr, MIN(bytes_to_copy, rxbufsize));
rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
/* Update the descriptor. */
if (first_desc) {
- rx_desc_set_sof(s->rx_desc);
+ rx_desc_set_sof(s->rx_desc[q]);
first_desc = false;
}
if (bytes_to_copy == 0) {
- rx_desc_set_eof(s->rx_desc);
- rx_desc_set_length(s->rx_desc, size);
+ rx_desc_set_eof(s->rx_desc[q]);
+ rx_desc_set_length(s->rx_desc[q], size);
}
- rx_desc_set_ownership(s->rx_desc);
+ rx_desc_set_ownership(s->rx_desc[q]);
switch (maf) {
case GEM_RX_PROMISCUOUS_ACCEPT:
break;
case GEM_RX_BROADCAST_ACCEPT:
- rx_desc_set_broadcast(s->rx_desc);
+ rx_desc_set_broadcast(s->rx_desc[q]);
break;
case GEM_RX_UNICAST_HASH_ACCEPT:
- rx_desc_set_unicast_hash(s->rx_desc);
+ rx_desc_set_unicast_hash(s->rx_desc[q]);
break;
case GEM_RX_MULTICAST_HASH_ACCEPT:
- rx_desc_set_multicast_hash(s->rx_desc);
+ rx_desc_set_multicast_hash(s->rx_desc[q]);
break;
case GEM_RX_REJECT:
abort();
default: /* SAR */
- rx_desc_set_sar(s->rx_desc, maf);
+ rx_desc_set_sar(s->rx_desc[q], maf);
}
/* Descriptor write-back. */
- cpu_physical_memory_write(s->rx_desc_addr,
- (uint8_t *)s->rx_desc, sizeof(s->rx_desc));
+ cpu_physical_memory_write(s->rx_desc_addr[q],
+ (uint8_t *)s->rx_desc[q],
+ sizeof(s->rx_desc[q]));
/* Next descriptor */
- if (rx_desc_get_wrap(s->rx_desc)) {
+ if (rx_desc_get_wrap(s->rx_desc[q])) {
DB_PRINT("wrapping RX descriptor list\n");
- s->rx_desc_addr = s->regs[GEM_RXQBASE];
+ s->rx_desc_addr[q] = s->regs[GEM_RXQBASE];
} else {
DB_PRINT("incrementing RX descriptor list\n");
- s->rx_desc_addr += 8;
+ s->rx_desc_addr[q] += 8;
}
- gem_get_rx_desc(s);
+
+ gem_get_rx_desc(s, q);
}
/* Count it */
@@ -839,6 +1031,7 @@ static void gem_transmit(CadenceGEMState *s)
uint8_t tx_packet[2048];
uint8_t *p;
unsigned total_bytes;
+ int q = 0;
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
@@ -854,110 +1047,123 @@ static void gem_transmit(CadenceGEMState *s)
p = tx_packet;
total_bytes = 0;
- /* read current descriptor */
- packet_desc_addr = s->tx_desc_addr;
-
- DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
- cpu_physical_memory_read(packet_desc_addr,
- (uint8_t *)desc, sizeof(desc));
- /* Handle all descriptors owned by hardware */
- while (tx_desc_get_used(desc) == 0) {
-
- /* Do nothing if transmit is not enabled. */
- if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
- return;
- }
- print_gem_tx_desc(desc);
-
- /* The real hardware would eat this (and possibly crash).
- * For QEMU let's lend a helping hand.
- */
- if ((tx_desc_get_buffer(desc) == 0) ||
- (tx_desc_get_length(desc) == 0)) {
- DB_PRINT("Invalid TX descriptor @ 0x%x\n",
- (unsigned)packet_desc_addr);
- break;
- }
-
- if (tx_desc_get_length(desc) > sizeof(tx_packet) - (p - tx_packet)) {
- DB_PRINT("TX descriptor @ 0x%x too large: size 0x%x space 0x%x\n",
- (unsigned)packet_desc_addr,
- (unsigned)tx_desc_get_length(desc),
- sizeof(tx_packet) - (p - tx_packet));
- break;
- }
+ for (q = s->num_priority_queues - 1; q >= 0; q--) {
+ /* read current descriptor */
+ packet_desc_addr = s->tx_desc_addr[q];
- /* Gather this fragment of the packet from "dma memory" to our contig.
- * buffer.
- */
- cpu_physical_memory_read(tx_desc_get_buffer(desc), p,
- tx_desc_get_length(desc));
- p += tx_desc_get_length(desc);
- total_bytes += tx_desc_get_length(desc);
+ DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
+ cpu_physical_memory_read(packet_desc_addr,
+ (uint8_t *)desc, sizeof(desc));
+ /* Handle all descriptors owned by hardware */
+ while (tx_desc_get_used(desc) == 0) {
- /* Last descriptor for this packet; hand the whole thing off */
- if (tx_desc_get_last(desc)) {
- unsigned desc_first[2];
+ /* Do nothing if transmit is not enabled. */
+ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
+ return;
+ }
+ print_gem_tx_desc(desc, q);
- /* Modify the 1st descriptor of this packet to be owned by
- * the processor.
+ /* The real hardware would eat this (and possibly crash).
+ * For QEMU let's lend a helping hand.
*/
- cpu_physical_memory_read(s->tx_desc_addr, (uint8_t *)desc_first,
- sizeof(desc_first));
- tx_desc_set_used(desc_first);
- cpu_physical_memory_write(s->tx_desc_addr, (uint8_t *)desc_first,
- sizeof(desc_first));
- /* Advance the hardware current descriptor past this packet */
- if (tx_desc_get_wrap(desc)) {
- s->tx_desc_addr = s->regs[GEM_TXQBASE];
- } else {
- s->tx_desc_addr = packet_desc_addr + 8;
+ if ((tx_desc_get_buffer(desc) == 0) ||
+ (tx_desc_get_length(desc) == 0)) {
+ DB_PRINT("Invalid TX descriptor @ 0x%x\n",
+ (unsigned)packet_desc_addr);
+ break;
}
- DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr);
-
- s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
- s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]);
-
- /* Handle interrupt consequences */
- gem_update_int_status(s);
- /* Is checksum offload enabled? */
- if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
- net_checksum_calculate(tx_packet, total_bytes);
+ if (tx_desc_get_length(desc) > sizeof(tx_packet) -
+ (p - tx_packet)) {
+ DB_PRINT("TX descriptor @ 0x%x too large: size 0x%x space " \
+ "0x%x\n", (unsigned)packet_desc_addr,
+ (unsigned)tx_desc_get_length(desc),
+ sizeof(tx_packet) - (p - tx_packet));
+ break;
}
- /* Update MAC statistics */
- gem_transmit_updatestats(s, tx_packet, total_bytes);
+ /* Gather this fragment of the packet from "dma memory" to our
+ * contig buffer.
+ */
+ cpu_physical_memory_read(tx_desc_get_buffer(desc), p,
+ tx_desc_get_length(desc));
+ p += tx_desc_get_length(desc);
+ total_bytes += tx_desc_get_length(desc);
+
+ /* Last descriptor for this packet; hand the whole thing off */
+ if (tx_desc_get_last(desc)) {
+ unsigned desc_first[2];
+
+ /* Modify the 1st descriptor of this packet to be owned by
+ * the processor.
+ */
+ cpu_physical_memory_read(s->tx_desc_addr[q],
+ (uint8_t *)desc_first,
+ sizeof(desc_first));
+ tx_desc_set_used(desc_first);
+ cpu_physical_memory_write(s->tx_desc_addr[q],
+ (uint8_t *)desc_first,
+ sizeof(desc_first));
+ /* Advance the hardware current descriptor past this packet */
+ if (tx_desc_get_wrap(desc)) {
+ s->tx_desc_addr[q] = s->regs[GEM_TXQBASE];
+ } else {
+ s->tx_desc_addr[q] = packet_desc_addr + 8;
+ }
+ DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
+
+ s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
+ s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]);
+
+ /* Update queue interrupt status */
+ if (s->num_priority_queues > 1) {
+ s->regs[GEM_INT_Q1_STATUS + q] |=
+ GEM_INT_TXCMPL & ~(s->regs[GEM_INT_Q1_MASK + q]);
+ }
+
+ /* Handle interrupt consequences */
+ gem_update_int_status(s);
+
+ /* Is checksum offload enabled? */
+ if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
+ net_checksum_calculate(tx_packet, total_bytes);
+ }
+
+ /* Update MAC statistics */
+ gem_transmit_updatestats(s, tx_packet, total_bytes);
+
+ /* Send the packet somewhere */
+ if (s->phy_loop || (s->regs[GEM_NWCTRL] &
+ GEM_NWCTRL_LOCALLOOP)) {
+ gem_receive(qemu_get_queue(s->nic), tx_packet,
+ total_bytes);
+ } else {
+ qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
+ total_bytes);
+ }
+
+ /* Prepare for next packet */
+ p = tx_packet;
+ total_bytes = 0;
+ }
- /* Send the packet somewhere */
- if (s->phy_loop || (s->regs[GEM_NWCTRL] & GEM_NWCTRL_LOCALLOOP)) {
- gem_receive(qemu_get_queue(s->nic), tx_packet, total_bytes);
+ /* read next descriptor */
+ if (tx_desc_get_wrap(desc)) {
+ tx_desc_set_last(desc);
+ packet_desc_addr = s->regs[GEM_TXQBASE];
} else {
- qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
- total_bytes);
+ packet_desc_addr += 8;
}
-
- /* Prepare for next packet */
- p = tx_packet;
- total_bytes = 0;
+ DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
+ cpu_physical_memory_read(packet_desc_addr,
+ (uint8_t *)desc, sizeof(desc));
}
- /* read next descriptor */
- if (tx_desc_get_wrap(desc)) {
- tx_desc_set_last(desc);
- packet_desc_addr = s->regs[GEM_TXQBASE];
- } else {
- packet_desc_addr += 8;
+ if (tx_desc_get_used(desc)) {
+ s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
+ s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]);
+ gem_update_int_status(s);
}
- DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
- cpu_physical_memory_read(packet_desc_addr,
- (uint8_t *)desc, sizeof(desc));
- }
-
- if (tx_desc_get_used(desc)) {
- s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
- s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]);
- gem_update_int_status(s);
}
}
@@ -1065,7 +1271,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
{
CadenceGEMState *s;
uint32_t retval;
-
+ int i;
s = (CadenceGEMState *)opaque;
offset >>= 2;
@@ -1075,8 +1281,10 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
switch (offset) {
case GEM_ISR:
- DB_PRINT("lowering irq on ISR read\n");
- qemu_set_irq(s->irq, 0);
+ DB_PRINT("lowering irqs on ISR read\n");
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ qemu_set_irq(s->irq[i], 0);
+ }
break;
case GEM_PHYMNTNC:
if (retval & GEM_PHYMNTNC_OP_R) {
@@ -1101,6 +1309,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
retval &= ~(s->regs_wo[offset]);
DB_PRINT("0x%08x\n", retval);
+ gem_update_int_status(s);
return retval;
}
@@ -1113,6 +1322,7 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
{
CadenceGEMState *s = (CadenceGEMState *)opaque;
uint32_t readonly;
+ int i;
DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val);
offset >>= 2;
@@ -1132,14 +1342,18 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
switch (offset) {
case GEM_NWCTRL:
if (val & GEM_NWCTRL_RXENA) {
- gem_get_rx_desc(s);
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ gem_get_rx_desc(s, i);
+ }
}
if (val & GEM_NWCTRL_TXSTART) {
gem_transmit(s);
}
if (!(val & GEM_NWCTRL_TXENA)) {
/* Reset to start of Q when transmit disabled. */
- s->tx_desc_addr = s->regs[GEM_TXQBASE];
+ for (i = 0; i < s->num_priority_queues; i++) {
+ s->tx_desc_addr[i] = s->regs[GEM_TXQBASE];
+ }
}
if (gem_can_receive(qemu_get_queue(s->nic))) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
@@ -1150,10 +1364,16 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
gem_update_int_status(s);
break;
case GEM_RXQBASE:
- s->rx_desc_addr = val;
+ s->rx_desc_addr[0] = val;
+ break;
+ case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q7_PTR:
+ s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val;
break;
case GEM_TXQBASE:
- s->tx_desc_addr = val;
+ s->tx_desc_addr[0] = val;
+ break;
+ case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q7_PTR:
+ s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val;
break;
case GEM_RXSTATUS:
gem_update_int_status(s);
@@ -1162,10 +1382,18 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
s->regs[GEM_IMR] &= ~val;
gem_update_int_status(s);
break;
+ case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE:
+ s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val;
+ gem_update_int_status(s);
+ break;
case GEM_IDR:
s->regs[GEM_IMR] |= val;
gem_update_int_status(s);
break;
+ case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE:
+ s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val;
+ gem_update_int_status(s);
+ break;
case GEM_SPADDR1LO:
case GEM_SPADDR2LO:
case GEM_SPADDR3LO:
@@ -1202,8 +1430,11 @@ static const MemoryRegionOps gem_ops = {
static void gem_set_link(NetClientState *nc)
{
+ CadenceGEMState *s = qemu_get_nic_opaque(nc);
+
DB_PRINT("\n");
- phy_update_link(qemu_get_nic_opaque(nc));
+ phy_update_link(s);
+ gem_update_int_status(s);
}
static NetClientInfo net_gem_info = {
@@ -1214,36 +1445,62 @@ static NetClientInfo net_gem_info = {
.link_status_changed = gem_set_link,
};
-static int gem_init(SysBusDevice *sbd)
+static void gem_realize(DeviceState *dev, Error **errp)
{
- DeviceState *dev = DEVICE(sbd);
CadenceGEMState *s = CADENCE_GEM(dev);
+ int i;
+
+ if (s->num_priority_queues == 0 ||
+ s->num_priority_queues > MAX_PRIORITY_QUEUES) {
+ error_setg(errp, "Invalid num-priority-queues value: %" PRIx8,
+ s->num_priority_queues);
+ return;
+ } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) {
+ error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8,
+ s->num_type1_screeners);
+ return;
+ } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) {
+ error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8,
+ s->num_type2_screeners);
+ return;
+ }
+
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
+ }
+
+ qemu_macaddr_default_if_unset(&s->conf.macaddr);
+
+ s->nic = qemu_new_nic(&net_gem_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+}
+
+static void gem_init(Object *obj)
+{
+ CadenceGEMState *s = CADENCE_GEM(obj);
+ DeviceState *dev = DEVICE(obj);
DB_PRINT("\n");
gem_init_register_masks(s);
memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s,
"enet", sizeof(s->regs));
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
- qemu_macaddr_default_if_unset(&s->conf.macaddr);
- s->nic = qemu_new_nic(&net_gem_info, &s->conf,
- object_get_typename(OBJECT(dev)), dev->id, s);
-
- return 0;
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
static const VMStateDescription vmstate_cadence_gem = {
.name = "cadence_gem",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 4,
+ .minimum_version_id = 4,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG),
VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32),
VMSTATE_UINT8(phy_loop, CadenceGEMState),
- VMSTATE_UINT32(rx_desc_addr, CadenceGEMState),
- VMSTATE_UINT32(tx_desc_addr, CadenceGEMState),
+ VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState,
+ MAX_PRIORITY_QUEUES),
+ VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState,
+ MAX_PRIORITY_QUEUES),
VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4),
VMSTATE_END_OF_LIST(),
}
@@ -1251,15 +1508,20 @@ static const VMStateDescription vmstate_cadence_gem = {
static Property gem_properties[] = {
DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
+ DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
+ num_priority_queues, 1),
+ DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
+ num_type1_screeners, 4),
+ DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
+ num_type2_screeners, 4),
DEFINE_PROP_END_OF_LIST(),
};
static void gem_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
- sdc->init = gem_init;
+ dc->realize = gem_realize;
dc->props = gem_properties;
dc->vmsd = &vmstate_cadence_gem;
dc->reset = gem_reset;
@@ -1269,6 +1531,7 @@ static const TypeInfo gem_info = {
.name = TYPE_CADENCE_GEM,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(CadenceGEMState),
+ .instance_init = gem_init,
.class_init = gem_class_init,
};
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index bad43f474e..4994e1ca00 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -400,7 +400,7 @@ static void e1000e_write_config(PCIDevice *pci_dev, uint32_t address,
if (range_covers_byte(address, len, PCI_COMMAND) &&
(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
- qemu_flush_queued_packets(qemu_get_queue(s->nic));
+ e1000e_start_recv(&s->core);
}
}
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index badb1feb7d..6505983c12 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -52,7 +52,7 @@
second according to spec 10.2.4.2 */
#define E1000E_MAX_TX_FRAGS (64)
-static void
+static inline void
e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val);
static inline void
@@ -953,7 +953,7 @@ e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r,
core->rx_desc_buf_size;
}
-static inline void
+void
e1000e_start_recv(E1000ECore *core)
{
int i;
@@ -1710,7 +1710,8 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt)
}
/* Perform ACK receive detection */
- if (e1000e_is_tcp_ack(core, core->rx_pkt)) {
+ if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) &&
+ (e1000e_is_tcp_ack(core, core->rx_pkt))) {
n |= E1000_ICS_ACK;
}
@@ -1807,6 +1808,7 @@ e1000e_core_set_link_status(E1000ECore *core)
core->autoneg_timer);
} else {
e1000x_update_regs_on_link_up(core->mac, core->phy[0]);
+ e1000e_start_recv(core);
}
}
@@ -2007,19 +2009,23 @@ e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg)
}
if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) {
- trace_e1000e_irq_ims_clear_eiame(core->mac[IAM], cause);
- e1000e_clear_ims_bits(core, core->mac[IAM] & cause);
+ trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause);
+ core->mac[IAM] &= ~cause;
}
trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]);
- if (core->mac[EIAC] & E1000_ICR_OTHER) {
- effective_eiac = (core->mac[EIAC] & E1000_EIAC_MASK) |
- E1000_ICR_OTHER_CAUSES;
- } else {
- effective_eiac = core->mac[EIAC] & E1000_EIAC_MASK;
+ effective_eiac = core->mac[EIAC] & cause;
+
+ if (effective_eiac == E1000_ICR_OTHER) {
+ effective_eiac |= E1000_ICR_OTHER_CAUSES;
}
+
core->mac[ICR] &= ~effective_eiac;
+
+ if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) {
+ core->mac[IMS] &= ~effective_eiac;
+ }
}
static void
@@ -2130,7 +2136,7 @@ e1000e_update_interrupt_state(E1000ECore *core)
/* Set ICR[OTHER] for MSI-X */
if (is_msix) {
- if (core->mac[ICR] & core->mac[IMS] & E1000_ICR_OTHER_CAUSES) {
+ if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) {
core->mac[ICR] |= E1000_ICR_OTHER;
trace_e1000e_irq_add_msi_other(core->mac[ICR]);
}
@@ -2168,7 +2174,7 @@ e1000e_update_interrupt_state(E1000ECore *core)
}
}
-static inline void
+static void
e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val)
{
trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]);
@@ -2187,6 +2193,8 @@ e1000e_autoneg_timer(void *opaque)
E1000ECore *core = opaque;
if (!qemu_get_queue(core->owner_nic)->link_down) {
e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]);
+ e1000e_start_recv(core);
+
e1000e_update_flowctl_status(core);
/* signal link status change to the guest */
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
@@ -2344,7 +2352,7 @@ e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val)
core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
- if (msix_enabled(core->owner)) {
+ if (!msix_enabled(core->owner)) {
return;
}
diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h
index 5f413a9e08..1ff6978ca1 100644
--- a/hw/net/e1000e_core.h
+++ b/hw/net/e1000e_core.h
@@ -144,3 +144,6 @@ e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size);
ssize_t
e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt);
+
+void
+e1000e_start_recv(E1000ECore *core);
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index b5c777fbf6..951c5f0038 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -387,7 +387,7 @@ static void etsec_realize(DeviceState *dev, Error **errp)
etsec->bh = qemu_bh_new(etsec_timer_hit, etsec);
- etsec->ptimer = ptimer_init(etsec->bh);
+ etsec->ptimer = ptimer_init(etsec->bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(etsec->ptimer, 100);
}
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
index 1c415ab3b1..50c75642c6 100644
--- a/hw/net/imx_fec.c
+++ b/hw/net/imx_fec.c
@@ -429,7 +429,7 @@ static void imx_fec_do_tx(IMXFECState *s)
frame_size += len;
if (bd.flags & ENET_BD_L) {
/* Last buffer in frame. */
- qemu_send_packet(qemu_get_queue(s->nic), frame, len);
+ qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size);
ptr = frame;
frame_size = 0;
s->regs[ENET_EIR] |= ENET_INT_TXF;
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 4615d873b1..3db8937cac 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -1345,7 +1345,7 @@ static int lan9118_init1(SysBusDevice *sbd)
s->txp = &s->tx_packet;
bh = qemu_bh_new(lan9118_tick, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(s->timer, 10000);
ptimer_set_limit(s->timer, 0xffff, 1);
diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c
index 0ee8ad9d66..dc61bac2fc 100644
--- a/hw/net/mcf_fec.c
+++ b/hw/net/mcf_fec.c
@@ -23,6 +23,7 @@ do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
#define DPRINTF(fmt, ...) do {} while(0)
#endif
+#define FEC_MAX_DESC 1024
#define FEC_MAX_FRAME_SIZE 2032
typedef struct {
@@ -149,7 +150,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
uint32_t addr;
mcf_fec_bd bd;
int frame_size;
- int len;
+ int len, descnt = 0;
uint8_t frame[FEC_MAX_FRAME_SIZE];
uint8_t *ptr;
@@ -157,7 +158,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
ptr = frame;
frame_size = 0;
addr = s->tx_descriptor;
- while (1) {
+ while (descnt++ < FEC_MAX_DESC) {
mcf_fec_read_bd(&bd, addr);
DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
addr, bd.flags, bd.length, bd.data);
@@ -176,7 +177,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
if (bd.flags & FEC_BD_L) {
/* Last buffer in frame. */
DPRINTF("Sending packet\n");
- qemu_send_packet(qemu_get_queue(s->nic), frame, len);
+ qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size);
ptr = frame;
frame_size = 0;
s->eir |= FEC_INT_TXF;
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index b273eda933..01ecb02773 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -34,20 +34,13 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "sysemu/sysemu.h"
+#include "trace.h"
#include <libfdt.h>
#define ETH_ALEN 6
#define MAX_PACKET_SIZE 65536
-/*#define DEBUG*/
-
-#ifdef DEBUG
-#define DPRINTF(fmt...) do { fprintf(stderr, fmt); } while (0)
-#else
-#define DPRINTF(fmt...)
-#endif
-
/* Compatibility flags for migration */
#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
@@ -106,6 +99,7 @@ typedef struct VIOsPAPRVLANDevice {
VIOsPAPRDevice sdev;
NICConf nicconf;
NICState *nic;
+ MACAddr perm_mac;
bool isopen;
hwaddr buf_list;
uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
@@ -157,8 +151,10 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(VIOsPAPRVLANDevice *dev,
return 0;
}
- DPRINTF("Found buffer: pool=%d count=%d rxbufs=%d\n", pool,
- dev->rx_pool[pool]->count, dev->rx_bufs);
+
+ trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
+ dev->rx_pool[pool]->count,
+ dev->rx_bufs);
/* Remove the buffer from the pool */
dev->rx_pool[pool]->count--;
@@ -185,8 +181,8 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
}
bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
- DPRINTF("use_buf_ptr=%d bd=0x%016llx\n",
- buf_ptr, (unsigned long long)bd);
+
+ trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
} while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
&& buf_ptr != dev->use_buf_ptr);
@@ -199,7 +195,7 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
dev->use_buf_ptr = buf_ptr;
vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
- DPRINTF("Found buffer: ptr=%d rxbufs=%d\n", dev->use_buf_ptr, dev->rx_bufs);
+ trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
return bd;
}
@@ -214,8 +210,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
uint64_t handle;
uint8_t control;
- DPRINTF("spapr_vlan_receive() [%s] rx_bufs=%d\n", sdev->qdev.id,
- dev->rx_bufs);
+ trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
if (!dev->isopen) {
return -1;
@@ -243,7 +238,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
return -1;
}
- DPRINTF("spapr_vlan_receive: DMA write completed\n");
+ trace_spapr_vlan_receive_dma_completed();
/* Update the receive queue */
control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
@@ -257,12 +252,11 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
- DPRINTF("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n",
- (unsigned long long)dev->rxq_ptr,
- (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
- dev->rxq_ptr),
- (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
- dev->rxq_ptr + 8));
+ trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
+ vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
+ dev->rxq_ptr),
+ vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
+ dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
@@ -316,6 +310,10 @@ static void spapr_vlan_reset(VIOsPAPRDevice *sdev)
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
+
+ memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a,
+ sizeof(dev->nicconf.macaddr.a));
+ qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
}
static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
@@ -324,6 +322,8 @@ static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
qemu_macaddr_default_if_unset(&dev->nicconf.macaddr);
+ memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a));
+
dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf,
object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev);
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
@@ -573,8 +573,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
rx_pool_size_compare);
pool = spapr_vlan_get_rx_pool_id(dev, size);
- DPRINTF("created RX pool %d for size %lld\n", pool,
- VLAN_BD_LEN(buf));
+ trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
+ VLAN_BD_LEN(buf));
break;
}
}
@@ -584,8 +584,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
return H_RESOURCE;
}
- DPRINTF("h_add_llan_buf(): Add buf using pool %i (size %lli, count=%i)\n",
- pool, VLAN_BD_LEN(buf), dev->rx_pool[pool]->count);
+ trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
+ dev->rx_pool[pool]->count);
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
@@ -616,8 +616,7 @@ static target_long spapr_vlan_add_rxbuf_to_page(VIOsPAPRVLANDevice *dev,
vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
- DPRINTF("h_add_llan_buf(): Added buf ptr=%d rx_bufs=%d bd=0x%016llx\n",
- dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf);
+ trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
return 0;
}
@@ -633,8 +632,7 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
target_long ret;
- DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx
- ", 0x" TARGET_FMT_lx ")\n", reg, buf);
+ trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
if (!sdev) {
hcall_dprintf("Bad device\n");
@@ -687,14 +685,13 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
int i, nbufs;
int ret;
- DPRINTF("H_SEND_LOGICAL_LAN(0x" TARGET_FMT_lx ", <bufs>, 0x"
- TARGET_FMT_lx ")\n", reg, continue_token);
+ trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
if (!sdev) {
return H_PARAMETER;
}
- DPRINTF("rxbufs = %d\n", dev->rx_bufs);
+ trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
if (!dev->isopen) {
return H_DROPPED;
@@ -706,7 +703,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
total_len = 0;
for (i = 0; i < 6; i++) {
- DPRINTF(" buf desc: 0x" TARGET_FMT_lx "\n", bufs[i]);
+ trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
if (!(bufs[i] & VLAN_BD_VALID)) {
break;
}
@@ -714,8 +711,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
}
nbufs = i;
- DPRINTF("h_send_logical_lan() %d buffers, total length 0x%x\n",
- nbufs, total_len);
+ trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
if (total_len == 0) {
return H_SUCCESS;
@@ -756,6 +752,27 @@ static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_SUCCESS;
}
+static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong macaddr = args[1];
+ VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+ VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff;
+ macaddr >>= 8;
+ }
+
+ qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
+
+ return H_SUCCESS;
+}
+
static Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev),
DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf),
@@ -854,6 +871,8 @@ static void spapr_vlan_register_types(void)
spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER,
h_add_logical_lan_buffer);
spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl);
+ spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC,
+ h_change_logical_lan_mac);
type_register_static(&spapr_vlan_info);
}
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 8d38d7724d..1a5c909939 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -223,7 +223,7 @@ e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x"
e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x"
e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS"
e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME"
-e1000e_irq_ims_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X"
+e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X"
e1000e_irq_icr_clear_eiac(uint32_t icr, uint32_t eiac) "Clearing ICR bits due to EIAC, ICR: 0x%X, EIAC: 0x%X"
e1000e_irq_ims_clear_set_imc(uint32_t val) "Clearing IMS bits due to IMC write 0x%x"
e1000e_irq_fire_delayed_interrupts(void) "Firing delayed interrupts"
@@ -270,3 +270,19 @@ e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d"
e1000e_vm_state_running(void) "VM state is running"
e1000e_vm_state_stopped(void) "VM state is stopped"
+
+# hw/net/spapr_llan.c
+spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32
+spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64
+spapr_vlan_get_rx_bd_from_page_found(uint32_t use_buf_ptr, uint32_t rx_bufs) "ptr=%"PRIu32" rxbufs=%"PRIu32
+spapr_vlan_receive(const char *id, uint32_t rx_bufs) "[%s] rx_bufs=%"PRIu32
+spapr_vlan_receive_dma_completed(void) "DMA write completed"
+spapr_vlan_receive_wrote(uint64_t ptr, uint64_t hi, uint64_t lo) "rxq entry (ptr=0x%"PRIx64"): 0x%016"PRIx64" 0x%016"PRIx64
+spapr_vlan_add_rxbuf_to_pool_create(int pool, uint64_t len) "created RX pool %d for size %"PRIu64
+spapr_vlan_add_rxbuf_to_pool(int pool, uint64_t len, int32_t count) "add buf using pool %d (size %"PRIu64", count=%"PRId32")"
+spapr_vlan_add_rxbuf_to_page(uint32_t ptr, uint32_t rx_bufs, uint64_t bd) "added buf ptr=%"PRIu32" rx_bufs=%"PRIu32" bd=0x%016"PRIx64
+spapr_vlan_h_add_logical_lan_buffer(uint64_t reg, uint64_t buf) "H_ADD_LOGICAL_LAN_BUFFER(0x%"PRIx64", 0x%"PRIx64")"
+spapr_vlan_h_send_logical_lan(uint64_t reg, uint64_t continue_token) "H_SEND_LOGICAL_LAN(0x%"PRIx64", <bufs>, 0x%"PRIx64")"
+spapr_vlan_h_send_logical_lan_rxbufs(uint32_t rx_bufs) "rxbufs = %"PRIu32
+spapr_vlan_h_send_logical_lan_buf_desc(uint64_t buf) " buf desc: 0x%"PRIx64
+spapr_vlan_h_send_logical_lan_total(int nbufs, unsigned total_len) "%d buffers, total length 0x%x"
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 01f1351554..06bfe4bcc9 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -31,6 +31,11 @@
#define MAC_TABLE_ENTRIES 64
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+/* previously fixed value */
+#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+/* for now, only allow larger queues; with virtio-1, guest can downsize */
+#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+
/*
* Calculate the number of bytes up to and including the given 'field' of
* 'container'.
@@ -875,6 +880,7 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_OK;
}
+
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -892,8 +898,10 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
- error_report("virtio-net ctrl missing headers");
- exit(1);
+ virtio_error(vdev, "virtio-net ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
}
iov_cnt = elem->out_num;
@@ -1122,21 +1130,24 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
- if (i == 0)
- return -1;
- error_report("virtio-net unexpected empty queue: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd "
- "guest features 0x%" PRIx64,
- i, n->mergeable_rx_bufs, offset, size,
- n->guest_hdr_len, n->host_hdr_len,
- vdev->guest_features);
- exit(1);
+ if (i) {
+ virtio_error(vdev, "virtio-net unexpected empty queue: "
+ "i %zd mergeable %d offset %zd, size %zd, "
+ "guest hdr len %zd, host hdr len %zd "
+ "guest features 0x%" PRIx64,
+ i, n->mergeable_rx_bufs, offset, size,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+ return -1;
}
if (elem->in_num < 1) {
- error_report("virtio-net receive queue contains no in buffers");
- exit(1);
+ virtio_error(vdev,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+ return -1;
}
sg = elem->in_sg;
@@ -1238,15 +1249,19 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
- error_report("virtio-net header not in first element");
- exit(1);
+ virtio_error(vdev, "virtio-net header not in first element");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
- error_report("virtio-net header incorrect");
- exit(1);
+ virtio_error(vdev, "virtio-net header incorrect");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
@@ -1314,7 +1329,9 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
virtio_queue_set_notification(vq, 1);
timer_del(q->tx_timer);
q->tx_waiting = 0;
- virtio_net_flush_tx(q);
+ if (virtio_net_flush_tx(q) == -EINVAL) {
+ return;
+ }
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
@@ -1385,8 +1402,9 @@ static void virtio_net_tx_bh(void *opaque)
}
ret = virtio_net_flush_tx(q);
- if (ret == -EBUSY) {
- return; /* Notification re-enable handled by tx_complete */
+ if (ret == -EBUSY || ret == -EINVAL) {
+ return; /* Notification re-enable handled by tx_complete or device
+ * broken */
}
/* If we flush a full burst of packets, assume there are
@@ -1401,7 +1419,10 @@ static void virtio_net_tx_bh(void *opaque)
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
- if (virtio_net_flush_tx(q) > 0) {
+ ret = virtio_net_flush_tx(q);
+ if (ret == -EINVAL) {
+ return;
+ } else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
@@ -1412,7 +1433,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
+ virtio_net_handle_rx);
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[index].tx_vq =
virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
@@ -1492,17 +1514,6 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
virtio_net_set_queues(n);
}
-static void virtio_net_save(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
-
- /* At this point, backend must be stopped, otherwise
- * it might keep writing to memory. */
- assert(!n->vhost_started);
- virtio_save(vdev, f);
-}
-
static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -1538,14 +1549,6 @@ static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
}
}
-static int virtio_net_load(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
-
- return virtio_load(vdev, f, VIRTIO_NET_VM_VERSION);
-}
-
static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@@ -1720,6 +1723,22 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
+ /*
+ * We set a lower limit on RX queue size to what it always was.
+ * Guests that want a smaller ring can always resize it without
+ * help from us (using virtio 1 and up).
+ */
+ if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
+ n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
+ error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d.",
+ n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -1832,8 +1851,25 @@ static void virtio_net_instance_init(Object *obj)
DEVICE(n), NULL);
}
-VMSTATE_VIRTIO_DEVICE(net, VIRTIO_NET_VM_VERSION, virtio_net_load,
- virtio_net_save);
+static void virtio_net_pre_save(void *opaque)
+{
+ VirtIONet *n = opaque;
+
+ /* At this point, backend must be stopped, otherwise
+ * it might keep writing to memory. */
+ assert(!n->vhost_started);
+}
+
+static const VMStateDescription vmstate_virtio_net = {
+ .name = "virtio-net",
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .pre_save = virtio_net_pre_save,
+};
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
@@ -1880,6 +1916,8 @@ static Property virtio_net_properties[] = {
TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
+ DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
+ VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
DEFINE_PROP_END_OF_LIST(),
};