summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/cputlb.c138
-rw-r--r--hw/display/cirrus_vga.c12
-rw-r--r--hw/display/virtio-gpu.c5
-rw-r--r--softmmu/cpus.c11
-rw-r--r--tcg/tcg-op-gvec.c61
-rw-r--r--ui/gtk-gl-area.c11
-rw-r--r--ui/gtk.c52
-rw-r--r--ui/spice-input.c2
-rw-r--r--ui/vnc-auth-sasl.c1
9 files changed, 196 insertions, 97 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 2d48281942..6489abbf8c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2009,6 +2009,80 @@ store_memop(void *haddr, uint64_t val, MemOp op)
}
}
+static void __attribute__((noinline))
+store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
+ uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
+ bool big_endian)
+{
+ const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
+ uintptr_t index, index2;
+ CPUTLBEntry *entry, *entry2;
+ target_ulong page2, tlb_addr, tlb_addr2;
+ TCGMemOpIdx oi;
+ size_t size2;
+ int i;
+
+ /*
+ * Ensure the second page is in the TLB. Note that the first page
+ * is already guaranteed to be filled, and that the second page
+ * cannot evict the first.
+ */
+ page2 = (addr + size) & TARGET_PAGE_MASK;
+ size2 = (addr + size) & ~TARGET_PAGE_MASK;
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+
+ tlb_addr2 = tlb_addr_write(entry2);
+ if (!tlb_hit_page(tlb_addr2, page2)) {
+ if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
+ tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+ }
+ tlb_addr2 = tlb_addr_write(entry2);
+ }
+
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ tlb_addr = tlb_addr_write(entry);
+
+ /*
+ * Handle watchpoints. Since this may trap, all checks
+ * must happen before any store.
+ */
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), addr, size - size2,
+ env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
+ BP_MEM_WRITE, retaddr);
+ }
+ if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), page2, size2,
+ env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
+ BP_MEM_WRITE, retaddr);
+ }
+
+ /*
+ * XXX: not efficient, but simple.
+ * This loop must go in the forward direction to avoid issues
+ * with self-modifying code in Windows 64-bit.
+ */
+ oi = make_memop_idx(MO_UB, mmu_idx);
+ if (big_endian) {
+ for (i = 0; i < size; ++i) {
+ /* Big-endian extract. */
+ uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
+ helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
+ }
+ } else {
+ for (i = 0; i < size; ++i) {
+ /* Little-endian extract. */
+ uint8_t val8 = val >> (i * 8);
+ helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
+ }
+ }
+}
+
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
@@ -2097,64 +2171,9 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
if (size > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
>= TARGET_PAGE_SIZE)) {
- int i;
- uintptr_t index2;
- CPUTLBEntry *entry2;
- target_ulong page2, tlb_addr2;
- size_t size2;
-
do_unaligned_access:
- /*
- * Ensure the second page is in the TLB. Note that the first page
- * is already guaranteed to be filled, and that the second page
- * cannot evict the first.
- */
- page2 = (addr + size) & TARGET_PAGE_MASK;
- size2 = (addr + size) & ~TARGET_PAGE_MASK;
- index2 = tlb_index(env, mmu_idx, page2);
- entry2 = tlb_entry(env, mmu_idx, page2);
- tlb_addr2 = tlb_addr_write(entry2);
- if (!tlb_hit_page(tlb_addr2, page2)) {
- if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
- tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index2 = tlb_index(env, mmu_idx, page2);
- entry2 = tlb_entry(env, mmu_idx, page2);
- }
- tlb_addr2 = tlb_addr_write(entry2);
- }
-
- /*
- * Handle watchpoints. Since this may trap, all checks
- * must happen before any store.
- */
- if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
- cpu_check_watchpoint(env_cpu(env), addr, size - size2,
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
- BP_MEM_WRITE, retaddr);
- }
- if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
- cpu_check_watchpoint(env_cpu(env), page2, size2,
- env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
- BP_MEM_WRITE, retaddr);
- }
-
- /*
- * XXX: not efficient, but simple.
- * This loop must go in the forward direction to avoid issues
- * with self-modifying code in Windows 64-bit.
- */
- for (i = 0; i < size; ++i) {
- uint8_t val8;
- if (memop_big_endian(op)) {
- /* Big-endian extract. */
- val8 = val >> (((size - 1) * 8) - (i * 8));
- } else {
- /* Little-endian extract. */
- val8 = val >> (i * 8);
- }
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
- }
+ store_helper_unaligned(env, addr, val, retaddr, size,
+ mmu_idx, memop_big_endian(op));
return;
}
@@ -2162,8 +2181,9 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
store_memop(haddr, val, op);
}
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void __attribute__((noinline))
+helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 02d9ed0bd4..41e71af08a 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -640,10 +640,16 @@ static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin,
}
for (y = 0; y < lines; y++) {
- off_cur = off_begin;
+ off_cur = off_begin & s->cirrus_addr_mask;
off_cur_end = ((off_cur + bytesperline - 1) & s->cirrus_addr_mask) + 1;
- assert(off_cur_end >= off_cur);
- memory_region_set_dirty(&s->vga.vram, off_cur, off_cur_end - off_cur);
+ if (off_cur_end >= off_cur) {
+ memory_region_set_dirty(&s->vga.vram, off_cur, off_cur_end - off_cur);
+ } else {
+ /* wraparound */
+ memory_region_set_dirty(&s->vga.vram, off_cur,
+ s->cirrus_addr_mask + 1 - off_cur);
+ memory_region_set_dirty(&s->vga.vram, 0, off_cur_end);
+ }
off_begin += off_pitch;
}
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 5f0dd7c150..90be4e3ed7 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -646,9 +646,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
uint64_t a = le64_to_cpu(ents[i].addr);
uint32_t l = le32_to_cpu(ents[i].length);
hwaddr len = l;
- (*iov)[i].iov_len = l;
(*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
a, &len, DMA_DIRECTION_TO_DEVICE);
+ (*iov)[i].iov_len = len;
if (addr) {
(*addr)[i] = a;
}
@@ -656,6 +656,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" resource %d element %d\n",
__func__, ab->resource_id, i);
+ if ((*iov)[i].iov_base) {
+ i++; /* cleanup the 'i'th map */
+ }
virtio_gpu_cleanup_mapping_iov(g, *iov, i);
g_free(ents);
*iov = NULL;
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index a802e899ab..e3b98065c9 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -1895,6 +1895,16 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
if (!tcg_region_inited) {
tcg_region_inited = 1;
tcg_region_init();
+ /*
+ * If MTTCG, and we will create multiple cpus,
+ * then we will have cpus running in parallel.
+ */
+ if (qemu_tcg_mttcg_enabled()) {
+ MachineState *ms = MACHINE(qdev_get_machine());
+ if (ms->smp.max_cpus > 1) {
+ parallel_cpus = true;
+ }
+ }
}
if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
@@ -1904,7 +1914,6 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
if (qemu_tcg_mttcg_enabled()) {
/* create a thread per vCPU with TCG (MTTCG) */
- parallel_cpus = true;
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
cpu->cpu_index);
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 3707c0effb..7ebd9e8298 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -1570,18 +1570,16 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
- } else {
+ } else if (vece == 4) {
/* 128-bit duplicate. */
- /* ??? Dup to 256-bit vector. */
int i;
- tcg_debug_assert(vece == 4);
tcg_debug_assert(oprsz >= 16);
if (TCG_TARGET_HAS_v128) {
TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
tcg_gen_ld_vec(in, cpu_env, aofs);
- for (i = 0; i < oprsz; i += 16) {
+ for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
tcg_gen_st_vec(in, cpu_env, dofs + i);
}
tcg_temp_free_vec(in);
@@ -1591,7 +1589,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_gen_ld_i64(in0, cpu_env, aofs);
tcg_gen_ld_i64(in1, cpu_env, aofs + 8);
- for (i = 0; i < oprsz; i += 16) {
+ for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
tcg_gen_st_i64(in0, cpu_env, dofs + i);
tcg_gen_st_i64(in1, cpu_env, dofs + i + 8);
}
@@ -1601,6 +1599,54 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
}
+ } else if (vece == 5) {
+ /* 256-bit duplicate. */
+ int i;
+
+ tcg_debug_assert(oprsz >= 32);
+ tcg_debug_assert(oprsz % 32 == 0);
+ if (TCG_TARGET_HAS_v256) {
+ TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
+
+ tcg_gen_ld_vec(in, cpu_env, aofs);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in, cpu_env, dofs + i);
+ }
+ tcg_temp_free_vec(in);
+ } else if (TCG_TARGET_HAS_v128) {
+ TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
+ TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
+
+ tcg_gen_ld_vec(in0, cpu_env, aofs);
+ tcg_gen_ld_vec(in1, cpu_env, aofs + 16);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in0, cpu_env, dofs + i);
+ tcg_gen_st_vec(in1, cpu_env, dofs + i + 16);
+ }
+ tcg_temp_free_vec(in0);
+ tcg_temp_free_vec(in1);
+ } else {
+ TCGv_i64 in[4];
+ int j;
+
+ for (j = 0; j < 4; ++j) {
+ in[j] = tcg_temp_new_i64();
+ tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8);
+ }
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ for (j = 0; j < 4; ++j) {
+ tcg_gen_st_i64(in[j], cpu_env, dofs + i + j * 8);
+ }
+ }
+ for (j = 0; j < 4; ++j) {
+ tcg_temp_free_i64(in[j]);
+ }
+ }
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+ } else {
+ g_assert_not_reached();
}
}
@@ -2264,12 +2310,13 @@ static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
/*
- * Invert (via xor -1) and add one (via sub -1).
+ * Invert (via xor -1) and add one.
* Because of the ordering the msb is cleared,
* so we never have carry into the next element.
*/
tcg_gen_xor_i64(d, b, t);
- tcg_gen_sub_i64(d, d, t);
+ tcg_gen_andi_i64(t, t, dup_const(vece, 1));
+ tcg_gen_add_i64(d, d, t);
tcg_temp_free_i64(t);
}
diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c
index 85f9d14c51..98c22d23f5 100644
--- a/ui/gtk-gl-area.c
+++ b/ui/gtk-gl-area.c
@@ -147,10 +147,21 @@ QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl,
gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area));
window = gtk_widget_get_window(vc->gfx.drawing_area);
ctx = gdk_window_create_gl_context(window, &err);
+ if (err) {
+ g_printerr("Create gdk gl context failed: %s\n", err->message);
+ g_error_free(err);
+ return NULL;
+ }
gdk_gl_context_set_required_version(ctx,
params->major_ver,
params->minor_ver);
gdk_gl_context_realize(ctx, &err);
+ if (err) {
+ g_printerr("Realize gdk gl context failed: %s\n", err->message);
+ g_error_free(err);
+ g_clear_object(&ctx);
+ return NULL;
+ }
return ctx;
}
diff --git a/ui/gtk.c b/ui/gtk.c
index b0cc08ad6d..7a717ce8e5 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -744,6 +744,25 @@ static void gd_resize_event(GtkGLArea *area,
#endif
+/*
+ * If available, return the refresh rate of the display in milli-Hertz,
+ * else return 0.
+ */
+static int gd_refresh_rate_millihz(GtkWidget *window)
+{
+#ifdef GDK_VERSION_3_22
+ GdkWindow *win = gtk_widget_get_window(window);
+
+ if (win) {
+ GdkDisplay *dpy = gtk_widget_get_display(window);
+ GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win);
+
+ return gdk_monitor_get_refresh_rate(monitor);
+ }
+#endif
+ return 0;
+}
+
static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque)
{
VirtualConsole *vc = opaque;
@@ -751,6 +770,7 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque)
int mx, my;
int ww, wh;
int fbw, fbh;
+ int refresh_rate_millihz;
#if defined(CONFIG_OPENGL)
if (vc->gfx.gls) {
@@ -771,6 +791,12 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque)
return FALSE;
}
+ refresh_rate_millihz = gd_refresh_rate_millihz(vc->window ?
+ vc->window : s->window);
+ if (refresh_rate_millihz) {
+ vc->gfx.dcl.update_interval = MILLISEC_PER_SEC / refresh_rate_millihz;
+ }
+
fbw = surface_width(vc->gfx.ds);
fbh = surface_height(vc->gfx.ds);
@@ -1949,31 +1975,11 @@ static GtkWidget *gd_create_menu_machine(GtkDisplayState *s)
return machine_menu;
}
-/*
- * If available, return the refresh rate of the display in milli-Hertz,
- * else return 0.
- */
-static int gd_refresh_rate_millihz(GtkWidget *window)
-{
-#ifdef GDK_VERSION_3_22
- GdkWindow *win = gtk_widget_get_window(window);
-
- if (win) {
- GdkDisplay *dpy = gtk_widget_get_display(window);
- GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win);
-
- return gdk_monitor_get_refresh_rate(monitor);
- }
-#endif
- return 0;
-}
-
static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc,
QemuConsole *con, int idx,
GSList *group, GtkWidget *view_menu)
{
bool zoom_to_fit = false;
- int refresh_rate_millihz;
vc->label = qemu_console_get_label(con);
vc->s = s;
@@ -2031,12 +2037,6 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc,
vc->gfx.kbd = qkbd_state_init(con);
vc->gfx.dcl.con = con;
- refresh_rate_millihz = gd_refresh_rate_millihz(vc->window ?
- vc->window : s->window);
- if (refresh_rate_millihz) {
- vc->gfx.dcl.update_interval = MILLISEC_PER_SEC / refresh_rate_millihz;
- }
-
register_displaychangelistener(&vc->gfx.dcl);
gd_connect_vc_gfx_signals(vc);
diff --git a/ui/spice-input.c b/ui/spice-input.c
index cd4bb0043f..d5bba231c9 100644
--- a/ui/spice-input.c
+++ b/ui/spice-input.c
@@ -123,6 +123,8 @@ static void spice_update_buttons(QemuSpicePointer *pointer,
[INPUT_BUTTON_RIGHT] = 0x02,
[INPUT_BUTTON_WHEEL_UP] = 0x10,
[INPUT_BUTTON_WHEEL_DOWN] = 0x20,
+ [INPUT_BUTTON_SIDE] = 0x40,
+ [INPUT_BUTTON_EXTRA] = 0x80,
};
if (wheel < 0) {
diff --git a/ui/vnc-auth-sasl.c b/ui/vnc-auth-sasl.c
index 7b2b09f242..0517b2ead9 100644
--- a/ui/vnc-auth-sasl.c
+++ b/ui/vnc-auth-sasl.c
@@ -522,6 +522,7 @@ vnc_socket_ip_addr_string(QIOChannelSocket *ioc,
if (addr->type != SOCKET_ADDRESS_TYPE_INET) {
error_setg(errp, "Not an inet socket type");
+ qapi_free_SocketAddress(addr);
return NULL;
}
ret = g_strdup_printf("%s;%s", addr->u.inet.host, addr->u.inet.port);