summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bsd-user/bsdload.c4
-rw-r--r--bsd-user/elfload.c328
-rw-r--r--bsd-user/i386/target_signal.h20
-rw-r--r--bsd-user/main.c290
-rw-r--r--bsd-user/mmap.c22
-rw-r--r--bsd-user/qemu.h127
-rw-r--r--bsd-user/signal.c1
-rw-r--r--bsd-user/sparc/target_signal.h27
-rw-r--r--bsd-user/sparc64/target_signal.h27
-rw-r--r--bsd-user/strace.c8
-rw-r--r--bsd-user/syscall.c18
-rw-r--r--bsd-user/uaccess.c2
-rw-r--r--bsd-user/x86_64/target_signal.h19
-rw-r--r--default-configs/targets/sparc-bsd-user.mak3
-rw-r--r--default-configs/targets/sparc64-bsd-user.mak4
-rw-r--r--target/i386/cpu.h8
-rw-r--r--target/i386/helper.h43
-rw-r--r--target/i386/tcg/bpt_helper.c2
-rw-r--r--target/i386/tcg/excp_helper.c18
-rw-r--r--target/i386/tcg/helper-tcg.h5
-rw-r--r--target/i386/tcg/misc_helper.c79
-rw-r--r--target/i386/tcg/seg_helper.c43
-rw-r--r--target/i386/tcg/sysemu/misc_helper.c52
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c29
-rw-r--r--target/i386/tcg/sysemu/svm_helper.c30
-rw-r--r--target/i386/tcg/translate.c883
-rw-r--r--target/i386/tcg/user/meson.build2
-rw-r--r--target/i386/tcg/user/misc_stubs.c75
-rw-r--r--target/i386/tcg/user/svm_stubs.c76
29 files changed, 940 insertions, 1305 deletions
diff --git a/bsd-user/bsdload.c b/bsd-user/bsdload.c
index e1ed3b7b60..8d83f21eda 100644
--- a/bsd-user/bsdload.c
+++ b/bsd-user/bsdload.c
@@ -32,7 +32,7 @@ static int count(char **vec)
return i;
}
-static int prepare_binprm(struct linux_binprm *bprm)
+static int prepare_binprm(struct bsd_binprm *bprm)
{
struct stat st;
int mode;
@@ -127,7 +127,7 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
int loader_exec(const char *filename, char **argv, char **envp,
struct target_pt_regs *regs, struct image_info *infop)
{
- struct linux_binprm bprm;
+ struct bsd_binprm bprm;
int retval;
int i;
diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c
index 5f4d824d78..6edceb3ea6 100644
--- a/bsd-user/elfload.c
+++ b/bsd-user/elfload.c
@@ -111,7 +111,7 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_X86_64
#define ELF_START_MMAP 0x2aaaaab000ULL
-#define elf_check_arch(x) ( ((x) == ELF_ARCH) )
+#define elf_check_arch(x) (((x) == ELF_ARCH))
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
@@ -134,7 +134,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
+#define elf_check_arch(x) (((x) == EM_386) || ((x) == EM_486))
/*
* These are used to set parameters in the core dumps.
@@ -168,7 +168,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_ARM )
+#define elf_check_arch(x) ((x) == EM_ARM)
#define ELF_CLASS ELFCLASS32
#ifdef TARGET_WORDS_BIGENDIAN
@@ -184,7 +184,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
memset(regs, 0, sizeof(*regs));
regs->ARM_cpsr = 0x10;
if (infop->entry & 1)
- regs->ARM_cpsr |= CPSR_T;
+ regs->ARM_cpsr |= CPSR_T;
regs->ARM_pc = infop->entry & 0xfffffffe;
regs->ARM_sp = infop->start_stack;
/* FIXME - what to for failure of get_user()? */
@@ -224,9 +224,9 @@ enum
#define ELF_START_MMAP 0x80000000
#ifndef TARGET_ABI32
-#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
+#define elf_check_arch(x) ((x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS)
#else
-#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
+#define elf_check_arch(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC)
#endif
#define ELF_CLASS ELFCLASS64
@@ -261,7 +261,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#else
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_SPARC )
+#define elf_check_arch(x) ((x) == EM_SPARC)
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
@@ -285,13 +285,13 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
-#define elf_check_arch(x) ( (x) == EM_PPC64 )
+#define elf_check_arch(x) ((x) == EM_PPC64)
#define ELF_CLASS ELFCLASS64
#else
-#define elf_check_arch(x) ( (x) == EM_PPC )
+#define elf_check_arch(x) ((x) == EM_PPC)
#define ELF_CLASS ELFCLASS32
@@ -376,7 +376,7 @@ static inline void init_thread(struct target_pt_regs *_regs, struct image_info *
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_MIPS )
+#define elf_check_arch(x) ((x) == EM_MIPS)
#ifdef TARGET_MIPS64
#define ELF_CLASS ELFCLASS64
@@ -406,7 +406,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_SH )
+#define elf_check_arch(x) ((x) == EM_SH)
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
@@ -428,7 +428,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_CRIS )
+#define elf_check_arch(x) ((x) == EM_CRIS)
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
@@ -448,7 +448,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#define ELF_START_MMAP 0x80000000
-#define elf_check_arch(x) ( (x) == EM_68K )
+#define elf_check_arch(x) ((x) == EM_68K)
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
@@ -473,7 +473,7 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
#define ELF_START_MMAP (0x30000000000ULL)
-#define elf_check_arch(x) ( (x) == ELF_ARCH )
+#define elf_check_arch(x) ((x) == ELF_ARCH)
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
@@ -538,8 +538,8 @@ struct exec
/* Necessary parameters */
#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
-#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
-#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
+#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
+#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
#define INTERPRETER_NONE 0
#define INTERPRETER_AOUT 1
@@ -547,12 +547,12 @@ struct exec
#define DLINFO_ITEMS 12
-static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
+static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
{
memcpy(to, from, n);
}
-static int load_aout_interp(void * exptr, int interp_fd);
+static int load_aout_interp(void *exptr, int interp_fd);
#ifdef BSWAP_NEEDED
static void bswap_ehdr(struct elfhdr *ehdr)
@@ -613,7 +613,7 @@ static void bswap_sym(struct elf_sym *sym)
* to be put directly into the top of new user memory.
*
*/
-static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
+static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
abi_ulong p)
{
char *tmp, *tmp1, *pag = NULL;
@@ -638,10 +638,10 @@ static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
--p; --tmp; --len;
if (--offset < 0) {
offset = p % TARGET_PAGE_SIZE;
- pag = (char *)page[p/TARGET_PAGE_SIZE];
+ pag = (char *)page[p / TARGET_PAGE_SIZE];
if (!pag) {
pag = g_try_malloc0(TARGET_PAGE_SIZE);
- page[p/TARGET_PAGE_SIZE] = pag;
+ page[p / TARGET_PAGE_SIZE] = pag;
if (!pag)
return 0;
}
@@ -662,7 +662,7 @@ static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
return p;
}
-static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
+static abi_ulong setup_arg_pages(abi_ulong p, struct bsd_binprm *bprm,
struct image_info *info)
{
abi_ulong stack_base, size, error;
@@ -672,8 +672,8 @@ static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
* it for args, we'll use it for something else...
*/
size = x86_stack_size;
- if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
- size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
+ if (size < MAX_ARG_PAGES * TARGET_PAGE_SIZE)
+ size = MAX_ARG_PAGES * TARGET_PAGE_SIZE;
error = target_mmap(0,
size + qemu_host_page_size,
PROT_READ | PROT_WRITE,
@@ -686,7 +686,7 @@ static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
/* we reserve one extra page at the top of the stack as guard */
target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
- stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
+ stack_base = error + size - MAX_ARG_PAGES * TARGET_PAGE_SIZE;
p += stack_base;
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
@@ -708,7 +708,7 @@ static void set_brk(abi_ulong start, abi_ulong end)
end = HOST_PAGE_ALIGN(end);
if (end <= start)
return;
- if(target_mmap(start, end - start,
+ if (target_mmap(start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
perror("cannot mmap brk");
@@ -738,12 +738,12 @@ static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
end_addr = HOST_PAGE_ALIGN(elf_bss);
if (end_addr1 < end_addr) {
mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
}
}
- nbyte = elf_bss & (qemu_host_page_size-1);
+ nbyte = elf_bss & (qemu_host_page_size - 1);
if (nbyte) {
nbyte = qemu_host_page_size - nbyte;
do {
@@ -781,10 +781,10 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
/*
* Force 16 byte _final_ alignment here for generality.
*/
- sp = sp &~ (abi_ulong)15;
+ sp = sp & ~(abi_ulong)15;
size = (DLINFO_ITEMS + 1) * 2;
if (k_platform)
- size += 2;
+ size += 2;
#ifdef DLINFO_ARCH_ITEMS
size += DLINFO_ARCH_ITEMS * 2;
#endif
@@ -792,7 +792,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
size += (!ibcs ? 3 : 1); /* argc itself */
size *= n;
if (size & 15)
- sp -= 16 - (size & 15);
+ sp -= 16 - (size & 15);
/* This is correct because Linux defines
* elf_addr_t as Elf32_Off / Elf64_Off
@@ -800,13 +800,13 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
#define NEW_AUX_ENT(id, val) do { \
sp -= n; put_user_ual(val, sp); \
sp -= n; put_user_ual(id, sp); \
- } while(0)
+ } while (0)
- NEW_AUX_ENT (AT_NULL, 0);
+ NEW_AUX_ENT(AT_NULL, 0);
/* There must be exactly DLINFO_ITEMS entries here. */
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
- NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
+ NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr)));
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
@@ -834,90 +834,90 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
}
-static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
+static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex,
int interpreter_fd,
abi_ulong *interp_load_addr)
{
- struct elf_phdr *elf_phdata = NULL;
- struct elf_phdr *eppnt;
- abi_ulong load_addr = 0;
- int load_addr_set = 0;
- int retval;
- abi_ulong last_bss, elf_bss;
- abi_ulong error;
- int i;
+ struct elf_phdr *elf_phdata = NULL;
+ struct elf_phdr *eppnt;
+ abi_ulong load_addr = 0;
+ int load_addr_set = 0;
+ int retval;
+ abi_ulong last_bss, elf_bss;
+ abi_ulong error;
+ int i;
- elf_bss = 0;
- last_bss = 0;
- error = 0;
+ elf_bss = 0;
+ last_bss = 0;
+ error = 0;
#ifdef BSWAP_NEEDED
- bswap_ehdr(interp_elf_ex);
+ bswap_ehdr(interp_elf_ex);
#endif
- /* First of all, some simple consistency checks */
- if ((interp_elf_ex->e_type != ET_EXEC &&
- interp_elf_ex->e_type != ET_DYN) ||
- !elf_check_arch(interp_elf_ex->e_machine)) {
- return ~((abi_ulong)0UL);
- }
+ /* First of all, some simple consistency checks */
+ if ((interp_elf_ex->e_type != ET_EXEC &&
+ interp_elf_ex->e_type != ET_DYN) ||
+ !elf_check_arch(interp_elf_ex->e_machine)) {
+ return ~((abi_ulong)0UL);
+ }
- /* Now read in all of the header information */
+ /* Now read in all of the header information */
- if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
- return ~(abi_ulong)0UL;
+ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
+ return ~(abi_ulong)0UL;
- elf_phdata = (struct elf_phdr *)
- malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
+ elf_phdata = (struct elf_phdr *)
+ malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
- if (!elf_phdata)
- return ~((abi_ulong)0UL);
+ if (!elf_phdata)
+ return ~((abi_ulong)0UL);
- /*
- * If the size of this structure has changed, then punt, since
- * we will be doing the wrong thing.
- */
- if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
- free(elf_phdata);
- return ~((abi_ulong)0UL);
- }
+ /*
+ * If the size of this structure has changed, then punt, since
+ * we will be doing the wrong thing.
+ */
+ if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
+ free(elf_phdata);
+ return ~((abi_ulong)0UL);
+ }
- retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
- if(retval >= 0) {
- retval = read(interpreter_fd,
- (char *) elf_phdata,
- sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
- }
- if (retval < 0) {
- perror("load_elf_interp");
- exit(-1);
- free (elf_phdata);
- return retval;
- }
+ retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
+ if (retval >= 0) {
+ retval = read(interpreter_fd,
+ (char *) elf_phdata,
+ sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
+ }
+ if (retval < 0) {
+ perror("load_elf_interp");
+ exit(-1);
+ free (elf_phdata);
+ return retval;
+ }
#ifdef BSWAP_NEEDED
- eppnt = elf_phdata;
- for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
- bswap_phdr(eppnt);
- }
+ eppnt = elf_phdata;
+ for (i = 0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
+ bswap_phdr(eppnt);
+ }
#endif
- if (interp_elf_ex->e_type == ET_DYN) {
- /* in order to avoid hardcoding the interpreter load
- address in qemu, we allocate a big enough memory zone */
- error = target_mmap(0, INTERP_MAP_SIZE,
- PROT_NONE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
- if (error == -1) {
- perror("mmap");
- exit(-1);
- }
- load_addr = error;
- load_addr_set = 1;
+ if (interp_elf_ex->e_type == ET_DYN) {
+ /* in order to avoid hardcoding the interpreter load
+ address in qemu, we allocate a big enough memory zone */
+ error = target_mmap(0, INTERP_MAP_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ if (error == -1) {
+ perror("mmap");
+ exit(-1);
}
+ load_addr = error;
+ load_addr_set = 1;
+ }
- eppnt = elf_phdata;
- for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
- if (eppnt->p_type == PT_LOAD) {
+ eppnt = elf_phdata;
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++)
+ if (eppnt->p_type == PT_LOAD) {
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
int elf_prot = 0;
abi_ulong vaddr = 0;
@@ -930,23 +930,23 @@ static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
elf_type |= MAP_FIXED;
vaddr = eppnt->p_vaddr;
}
- error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
- eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
- elf_prot,
- elf_type,
- interpreter_fd,
- eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
+ error = target_mmap(load_addr + TARGET_ELF_PAGESTART(vaddr),
+ eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
+ elf_prot,
+ elf_type,
+ interpreter_fd,
+ eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
if (error == -1) {
- /* Real error */
- close(interpreter_fd);
- free(elf_phdata);
- return ~((abi_ulong)0UL);
+ /* Real error */
+ close(interpreter_fd);
+ free(elf_phdata);
+ return ~((abi_ulong)0UL);
}
if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
- load_addr = error;
- load_addr_set = 1;
+ load_addr = error;
+ load_addr_set = 1;
}
/*
@@ -962,31 +962,31 @@ static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
*/
k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
if (k > last_bss) last_bss = k;
- }
+ }
- /* Now use mmap to map the library into memory. */
+ /* Now use mmap to map the library into memory. */
- close(interpreter_fd);
+ close(interpreter_fd);
- /*
- * Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zeromapped pages up to and including the last
- * bss page.
- */
- padzero(elf_bss, last_bss);
- elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
-
- /* Map the last of the bss segment */
- if (last_bss > elf_bss) {
- target_mmap(elf_bss, last_bss-elf_bss,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
- }
- free(elf_phdata);
+ /*
+ * Now fill out the bss section. First pad the last page up
+ * to the page boundary, and then perform a mmap to make sure
+ * that there are zeromapped pages up to and including the last
+ * bss page.
+ */
+ padzero(elf_bss, last_bss);
+ elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
+
+ /* Map the last of the bss segment */
+ if (last_bss > elf_bss) {
+ target_mmap(elf_bss, last_bss - elf_bss,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
+ }
+ free(elf_phdata);
- *interp_load_addr = load_addr;
- return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
+ *interp_load_addr = load_addr;
+ return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
}
static int symfind(const void *s0, const void *s1)
@@ -1102,7 +1102,7 @@ static void load_symbols(struct elfhdr *hdr, int fd)
}
continue;
}
-#if defined(TARGET_ARM) || defined (TARGET_MIPS)
+#if defined(TARGET_ARM) || defined(TARGET_MIPS)
/* The bottom address bit marks a Thumb or MIPS16 symbol. */
syms[i].st_value &= ~(target_ulong)1;
#endif
@@ -1143,8 +1143,8 @@ static void load_symbols(struct elfhdr *hdr, int fd)
syminfos = s;
}
-int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
- struct image_info * info)
+int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
+ struct image_info *info)
{
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
@@ -1178,13 +1178,13 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
/* First of all, some simple consistency checks */
if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
- (! elf_check_arch(elf_ex.e_machine))) {
+ (!elf_check_arch(elf_ex.e_machine))) {
return -ENOEXEC;
}
bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
- bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
- bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
+ bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page,bprm->p);
+ bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page,bprm->p);
if (!bprm->p) {
retval = -E2BIG;
}
@@ -1196,21 +1196,21 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
}
retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
- if(retval > 0) {
- retval = read(bprm->fd, (char *) elf_phdata,
+ if (retval > 0) {
+ retval = read(bprm->fd, (char *)elf_phdata,
elf_ex.e_phentsize * elf_ex.e_phnum);
}
if (retval < 0) {
perror("load_elf_binary");
exit(-1);
- free (elf_phdata);
+ free(elf_phdata);
return -errno;
}
#ifdef BSWAP_NEEDED
elf_ppnt = elf_phdata;
- for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
+ for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++) {
bswap_phdr(elf_ppnt);
}
#endif
@@ -1227,11 +1227,11 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
end_data = 0;
interp_ex.a_info = 0;
- for(i=0;i < elf_ex.e_phnum; i++) {
+ for (i = 0;i < elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
- if ( elf_interpreter != NULL )
+ if (elf_interpreter != NULL)
{
- free (elf_phdata);
+ free(elf_phdata);
free(elf_interpreter);
close(bprm->fd);
return -EINVAL;
@@ -1245,16 +1245,16 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
if (elf_interpreter == NULL) {
- free (elf_phdata);
+ free(elf_phdata);
close(bprm->fd);
return -ENOMEM;
}
retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
- if(retval >= 0) {
+ if (retval >= 0) {
retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
}
- if(retval < 0) {
+ if (retval < 0) {
perror("load_elf_binary2");
exit(-1);
}
@@ -1265,8 +1265,8 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
/* JRP - Need to add X86 lib dir stuff here... */
- if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
- strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
+ if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 ||
+ strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
ibcs2_interpreter = 1;
}
@@ -1275,7 +1275,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
#endif
if (retval >= 0) {
retval = open(path(elf_interpreter), O_RDONLY);
- if(retval >= 0) {
+ if (retval >= 0) {
interpreter_fd = retval;
}
else {
@@ -1287,8 +1287,8 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
if (retval >= 0) {
retval = lseek(interpreter_fd, 0, SEEK_SET);
- if(retval >= 0) {
- retval = read(interpreter_fd,bprm->buf,128);
+ if (retval >= 0) {
+ retval = read(interpreter_fd, bprm->buf, 128);
}
}
if (retval >= 0) {
@@ -1298,7 +1298,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
if (retval < 0) {
perror("load_elf_binary3");
exit(-1);
- free (elf_phdata);
+ free(elf_phdata);
free(elf_interpreter);
close(bprm->fd);
return retval;
@@ -1308,17 +1308,17 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
}
/* Some simple consistency checks for the interpreter */
- if (elf_interpreter){
+ if (elf_interpreter) {
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
/* Now figure out which format our binary is */
if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
(N_MAGIC(interp_ex) != QMAGIC)) {
- interpreter_type = INTERPRETER_ELF;
+ interpreter_type = INTERPRETER_ELF;
}
if (interp_elf_ex.e_ident[0] != 0x7f ||
- strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
+ strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) {
interpreter_type &= ~INTERPRETER_ELF;
}
@@ -1334,20 +1334,20 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
and then start this sucker up */
{
- char * passed_p;
+ char *passed_p;
if (interpreter_type == INTERPRETER_AOUT) {
snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
passed_p = passed_fileno;
if (elf_interpreter) {
- bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
+ bprm->p = copy_elf_strings(1, &passed_p, bprm->page, bprm->p);
bprm->argc++;
}
}
if (!bprm->p) {
free(elf_interpreter);
- free (elf_phdata);
+ free(elf_phdata);
close(bprm->fd);
return -E2BIG;
}
@@ -1393,7 +1393,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
* address.
*/
- for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
+ for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0;
int elf_flags = 0;
abi_ulong error;
@@ -1538,7 +1538,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
printf("(brk) %x\n" , info->brk);
#endif
- if ( info->personality == PER_SVR4 )
+ if (info->personality == PER_SVR4)
{
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
@@ -1553,7 +1553,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
return 0;
}
-static int load_aout_interp(void * exptr, int interp_fd)
+static int load_aout_interp(void *exptr, int interp_fd)
{
printf("a.out interpreter not yet supported\n");
return(0);
diff --git a/bsd-user/i386/target_signal.h b/bsd-user/i386/target_signal.h
deleted file mode 100644
index 2ef36d1f98..0000000000
--- a/bsd-user/i386/target_signal.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
-
-#include "cpu.h"
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_long ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-static inline abi_ulong get_sp_from_cpustate(CPUX86State *state)
-{
- return state->regs[R_ESP];
-}
-
-#endif /* TARGET_SIGNAL_H */
diff --git a/bsd-user/main.c b/bsd-user/main.c
index 715129e624..9d370bc8f6 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -36,6 +36,7 @@
#include "tcg/tcg.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
+#include "qemu/cutils.h"
#include "exec/log.h"
#include "trace/control.h"
@@ -47,12 +48,13 @@ unsigned long reserved_va;
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
-extern char **environ;
enum BSDType bsd_type;
-/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
- we allocate a bigger stack. Need a better solution, for example
- by remapping the process stack directly at the right place */
+/*
+ * XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
+ * we allocate a bigger stack. Need a better solution, for example
+ * by remapping the process stack directly at the right place
+ */
unsigned long x86_stack_size = 512 * 1024;
void gemu_log(const char *fmt, ...)
@@ -147,15 +149,15 @@ void cpu_loop(CPUX86State *env)
CPUState *cs = env_cpu(env);
int trapnr;
abi_ulong pc;
- //target_siginfo_t info;
+ /* target_siginfo_t info; */
- for(;;) {
+ for (;;) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);
- switch(trapnr) {
+ switch (trapnr) {
case 0x80:
/* syscall from int $0x80 */
if (bsd_type == target_freebsd) {
@@ -196,7 +198,7 @@ void cpu_loop(CPUX86State *env)
arg6,
arg7,
arg8);
- } else { //if (bsd_type == target_openbsd)
+ } else { /* if (bsd_type == target_openbsd) */
env->regs[R_EAX] = do_openbsd_syscall(env,
env->regs[R_EAX],
env->regs[R_EBX],
@@ -216,7 +218,7 @@ void cpu_loop(CPUX86State *env)
#ifndef TARGET_ABI32
case EXCP_SYSCALL:
/* syscall from syscall instruction */
- if (bsd_type == target_freebsd)
+ if (bsd_type == target_freebsd) {
env->regs[R_EAX] = do_freebsd_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
@@ -225,7 +227,7 @@ void cpu_loop(CPUX86State *env)
env->regs[R_ECX],
env->regs[8],
env->regs[9], 0, 0);
- else { //if (bsd_type == target_openbsd)
+ } else { /* if (bsd_type == target_openbsd) */
env->regs[R_EAX] = do_openbsd_syscall(env,
env->regs[R_EAX],
env->regs[R_EDI],
@@ -244,120 +246,13 @@ void cpu_loop(CPUX86State *env)
}
break;
#endif
-#if 0
- case EXCP0B_NOSEG:
- case EXCP0C_STACK:
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = TARGET_SI_KERNEL;
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, &info);
- break;
- case EXCP0D_GPF:
- /* XXX: potential problem if ABI32 */
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_fault(env);
- } else
-#endif
- {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SI_KERNEL;
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, &info);
- }
- break;
- case EXCP0E_PAGE:
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- if (!(env->error_code & 1))
- info.si_code = TARGET_SEGV_MAPERR;
- else
- info.si_code = TARGET_SEGV_ACCERR;
- info._sifields._sigfault._addr = env->cr[2];
- queue_signal(env, info.si_signo, &info);
- break;
- case EXCP00_DIVZ:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
- } else
-#endif
- {
- /* division by zero */
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_INTDIV;
- info._sifields._sigfault._addr = env->eip;
- queue_signal(env, info.si_signo, &info);
- }
- break;
- case EXCP01_DB:
- case EXCP03_INT3:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
- } else
-#endif
- {
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- if (trapnr == EXCP01_DB) {
- info.si_code = TARGET_TRAP_BRKPT;
- info._sifields._sigfault._addr = env->eip;
- } else {
- info.si_code = TARGET_SI_KERNEL;
- info._sifields._sigfault._addr = 0;
- }
- queue_signal(env, info.si_signo, &info);
- }
- break;
- case EXCP04_INTO:
- case EXCP05_BOUND:
-#ifndef TARGET_X86_64
- if (env->eflags & VM_MASK) {
- handle_vm86_trap(env, trapnr);
- } else
-#endif
- {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = TARGET_SI_KERNEL;
- info._sifields._sigfault._addr = 0;
- queue_signal(env, info.si_signo, &info);
- }
- break;
- case EXCP06_ILLOP:
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = TARGET_ILL_ILLOPN;
- info._sifields._sigfault._addr = env->eip;
- queue_signal(env, info.si_signo, &info);
- break;
-#endif
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
-#if 0
- case EXCP_DEBUG:
- {
- int sig;
-
- sig = gdb_handlesig (env, TARGET_SIGTRAP);
- if (sig)
- {
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, &info);
- }
- }
- break;
-#endif
default:
pc = env->segs[R_CS].base + env->eip;
- fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
+ fprintf(stderr,
+ "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
(long)pc, trapnr);
abort();
}
@@ -369,16 +264,21 @@ void cpu_loop(CPUX86State *env)
#ifdef TARGET_SPARC
#define SPARC64_STACK_BIAS 2047
-//#define DEBUG_WIN
-/* WARNING: dealing with register windows _is_ complicated. More info
- can be found at http://www.sics.se/~psm/sparcstack.html */
+/* #define DEBUG_WIN */
+/*
+ * WARNING: dealing with register windows _is_ complicated. More info
+ * can be found at http://www.sics.se/~psm/sparcstack.html
+ */
static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
{
index = (index + cwp * 16) % (16 * env->nwindows);
- /* wrap handling : if cwp is on the last window, then we use the
- registers 'after' the end */
- if (index < 8 && env->cwp == env->nwindows - 1)
+ /*
+ * wrap handling : if cwp is on the last window, then we use the
+ * registers 'after' the end
+ */
+ if (index < 8 && env->cwp == env->nwindows - 1) {
index += 16 * env->nwindows;
+ }
return index;
}
@@ -390,14 +290,15 @@ static inline void save_window_offset(CPUSPARCState *env, int cwp1)
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#ifdef TARGET_SPARC64
- if (sp_ptr & 3)
+ if (sp_ptr & 3) {
sp_ptr += SPARC64_STACK_BIAS;
+ }
#endif
#if defined(DEBUG_WIN)
printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
sp_ptr, cwp1);
#endif
- for(i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++) {
/* FIXME - what to do if put_user() fails? */
put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
sp_ptr += sizeof(abi_ulong);
@@ -440,22 +341,24 @@ static void restore_window(CPUSPARCState *env)
cwp1 = cpu_cwp_inc(env, env->cwp + 1);
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#ifdef TARGET_SPARC64
- if (sp_ptr & 3)
+ if (sp_ptr & 3) {
sp_ptr += SPARC64_STACK_BIAS;
+ }
#endif
#if defined(DEBUG_WIN)
printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
sp_ptr, cwp1);
#endif
- for(i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++) {
/* FIXME - what to do if get_user() fails? */
get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
sp_ptr += sizeof(abi_ulong);
}
#ifdef TARGET_SPARC64
env->canrestore++;
- if (env->cleanwin < env->nwindows - 1)
+ if (env->cleanwin < env->nwindows - 1) {
env->cleanwin++;
+ }
env->cansave--;
#else
env->wim = new_wim;
@@ -467,15 +370,17 @@ static void flush_windows(CPUSPARCState *env)
int offset, cwp1;
offset = 1;
- for(;;) {
+ for (;;) {
/* if restore would invoke restore_window(), then we can stop */
cwp1 = cpu_cwp_inc(env, env->cwp + offset);
#ifndef TARGET_SPARC64
- if (env->wim & (1 << cwp1))
+ if (env->wim & (1 << cwp1)) {
break;
+ }
#else
- if (env->canrestore == 0)
+ if (env->canrestore == 0) {
break;
+ }
env->cansave++;
env->canrestore--;
#endif
@@ -496,7 +401,7 @@ void cpu_loop(CPUSPARCState *env)
{
CPUState *cs = env_cpu(env);
int trapnr, ret, syscall_nr;
- //target_siginfo_t info;
+ /* target_siginfo_t info; */
while (1) {
cpu_exec_start(cs);
@@ -510,8 +415,9 @@ void cpu_loop(CPUSPARCState *env)
#else
/* FreeBSD uses 0x141 for syscalls too */
case 0x141:
- if (bsd_type != target_freebsd)
+ if (bsd_type != target_freebsd) {
goto badtrap;
+ }
/* fallthrough */
case 0x100:
#endif
@@ -520,13 +426,14 @@ void cpu_loop(CPUSPARCState *env)
ret = do_freebsd_syscall(env, syscall_nr,
env->regwptr[0], env->regwptr[1],
env->regwptr[2], env->regwptr[3],
- env->regwptr[4], env->regwptr[5], 0, 0);
+ env->regwptr[4], env->regwptr[5],
+ 0, 0);
else if (bsd_type == target_netbsd)
ret = do_netbsd_syscall(env, syscall_nr,
env->regwptr[0], env->regwptr[1],
env->regwptr[2], env->regwptr[3],
env->regwptr[4], env->regwptr[5]);
- else { //if (bsd_type == target_openbsd)
+ else { /* if (bsd_type == target_openbsd) */
#if defined(TARGET_SPARC64)
syscall_nr &= ~(TARGET_OPENBSD_SYSCALL_G7RFLAG |
TARGET_OPENBSD_SYSCALL_G2RFLAG);
@@ -588,16 +495,6 @@ void cpu_loop(CPUSPARCState *env)
break;
case TT_TFAULT:
case TT_DFAULT:
-#if 0
- {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->mmuregs[4];
- queue_signal(env, info.si_signo, &info);
- }
-#endif
break;
#else
case TT_SPILL: /* window overflow */
@@ -608,19 +505,6 @@ void cpu_loop(CPUSPARCState *env)
break;
case TT_TFAULT:
case TT_DFAULT:
-#if 0
- {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* XXX: check env->error_code */
- info.si_code = TARGET_SEGV_MAPERR;
- if (trapnr == TT_DFAULT)
- info._sifields._sigfault._addr = env->dmmuregs[4];
- else
- info._sifields._sigfault._addr = env->tsptr->tpc;
- //queue_signal(env, info.si_signo, &info);
- }
-#endif
break;
#endif
case EXCP_INTERRUPT:
@@ -628,30 +512,18 @@ void cpu_loop(CPUSPARCState *env)
break;
case EXCP_DEBUG:
{
-#if 0
- int sig =
-#endif
gdb_handlesig(cs, TARGET_SIGTRAP);
-#if 0
- if (sig)
- {
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- //queue_signal(env, info.si_signo, &info);
- }
-#endif
}
break;
default:
#ifdef TARGET_SPARC64
badtrap:
#endif
- printf ("Unhandled trap: 0x%x\n", trapnr);
+ printf("Unhandled trap: 0x%x\n", trapnr);
cpu_dump_state(cs, stderr, 0);
- exit (1);
+ exit(1);
}
- process_pending_signals (env);
+ process_pending_signals(env);
}
}
@@ -741,15 +613,16 @@ int main(int argc, char **argv)
TaskState ts1, *ts = &ts1;
CPUArchState *env;
CPUState *cpu;
- int optind;
+ int optind, rv;
const char *r;
const char *gdbstub = NULL;
char **target_environ, **wrk;
envlist_t *envlist = NULL;
bsd_type = target_openbsd;
- if (argc <= 1)
+ if (argc <= 1) {
usage();
+ }
error_init(argv[0]);
module_call_init(MODULE_INIT_TRACE);
@@ -769,11 +642,13 @@ int main(int argc, char **argv)
optind = 1;
for (;;) {
- if (optind >= argc)
+ if (optind >= argc) {
break;
+ }
r = argv[optind];
- if (r[0] != '-')
+ if (r[0] != '-') {
break;
+ }
optind++;
r++;
if (!strcmp(r, "-")) {
@@ -790,24 +665,28 @@ int main(int argc, char **argv)
log_file = argv[optind++];
} else if (!strcmp(r, "E")) {
r = argv[optind++];
- if (envlist_setenv(envlist, r) != 0)
+ if (envlist_setenv(envlist, r) != 0) {
usage();
+ }
} else if (!strcmp(r, "ignore-environment")) {
envlist_free(envlist);
envlist = envlist_create();
} else if (!strcmp(r, "U")) {
r = argv[optind++];
- if (envlist_unsetenv(envlist, r) != 0)
+ if (envlist_unsetenv(envlist, r) != 0) {
usage();
+ }
} else if (!strcmp(r, "s")) {
r = argv[optind++];
- x86_stack_size = strtol(r, (char **)&r, 0);
- if (x86_stack_size <= 0)
+ rv = qemu_strtoul(r, &r, 0, &x86_stack_size);
+ if (rv < 0 || x86_stack_size <= 0) {
usage();
- if (*r == 'M')
+ }
+ if (*r == 'M') {
x86_stack_size *= MiB;
- else if (*r == 'k' || *r == 'K')
+ } else if (*r == 'k' || *r == 'K') {
x86_stack_size *= KiB;
+ }
} else if (!strcmp(r, "L")) {
interp_prefix = argv[optind++];
} else if (!strcmp(r, "p")) {
@@ -824,15 +703,18 @@ int main(int argc, char **argv)
} else if (!strcmp(r, "cpu")) {
cpu_model = argv[optind++];
if (is_help_option(cpu_model)) {
-/* XXX: implement xxx_cpu_list for targets that still miss it */
+ /* XXX: implement xxx_cpu_list for targets that still miss it */
#if defined(cpu_list)
- cpu_list();
+ cpu_list();
#endif
exit(1);
}
} else if (!strcmp(r, "B")) {
- guest_base = strtol(argv[optind++], NULL, 0);
- have_guest_base = true;
+ rv = qemu_strtoul(argv[optind++], NULL, 0, &guest_base);
+ if (rv < 0) {
+ usage();
+ }
+ have_guest_base = true;
} else if (!strcmp(r, "drop-ld-preload")) {
(void) envlist_unsetenv(envlist, "LD_PRELOAD");
} else if (!strcmp(r, "bsd")) {
@@ -947,17 +829,19 @@ int main(int argc, char **argv)
if (!have_guest_base) {
FILE *fp;
- if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
+ fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
+ if (fp != NULL) {
unsigned long tmp;
if (fscanf(fp, "%lu", &tmp) == 1) {
mmap_min_addr = tmp;
- qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
+ qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n",
+ mmap_min_addr);
}
fclose(fp);
}
}
- if (loader_exec(filename, argv+optind, target_environ, regs, info) != 0) {
+ if (loader_exec(filename, argv + optind, target_environ, regs, info) != 0) {
printf("Error loading %s\n", filename);
_exit(1);
}
@@ -989,9 +873,11 @@ int main(int argc, char **argv)
syscall_init();
signal_init();
- /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
- generating the prologue until now so that the prologue can take
- the real value of GUEST_BASE into account. */
+ /*
+ * Now that we've loaded the binary, GUEST_BASE is fixed. Delay
+ * generating the prologue until now so that the prologue can take
+ * the real value of GUEST_BASE into account.
+ */
tcg_prologue_init(tcg_ctx);
tcg_region_init();
@@ -1052,8 +938,8 @@ int main(int argc, char **argv)
env->idt.limit = 255;
#endif
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
- PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
idt_table = g2h_untagged(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
@@ -1081,8 +967,8 @@ int main(int argc, char **argv)
{
uint64_t *gdt_table;
env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
- PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
gdt_table = g2h_untagged(env->gdt.base);
#ifdef TARGET_ABI32
@@ -1122,10 +1008,12 @@ int main(int argc, char **argv)
env->pc = regs->pc;
env->npc = regs->npc;
env->y = regs->y;
- for(i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++) {
env->gregs[i] = regs->u_regs[i];
- for(i = 0; i < 8; i++)
+ }
+ for (i = 0; i < 8; i++) {
env->regwptr[i] = regs->u_regs[i + 8];
+ }
}
#else
#error unsupported target CPU
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 01ec808003..0ac1b92706 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -93,11 +93,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
if (start > host_start) {
/* handle host page containing start */
prot1 = prot;
- for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
if (host_end == host_start + qemu_host_page_size) {
- for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
end = host_end;
@@ -110,7 +110,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
}
if (end < host_end) {
prot1 = prot;
- for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
@@ -148,7 +148,7 @@ static int mmap_frag(abi_ulong real_start,
/* get the protection of the target pages outside the mapping */
prot1 = 0;
- for(addr = real_start; addr < real_end; addr++) {
+ for (addr = real_start; addr < real_end; addr++) {
if (addr < start || addr >= end)
prot1 |= page_get_flags(addr);
}
@@ -225,9 +225,9 @@ static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
if (addr == 0)
addr = mmap_next_start;
addr_start = addr;
- for(;;) {
+ for (;;) {
prot = 0;
- for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
+ for (addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr1);
}
if (prot == 0)
@@ -262,7 +262,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
printf("MAP_FIXED ");
if (flags & MAP_ANON)
printf("MAP_ANON ");
- switch(flags & TARGET_BSD_MAP_FLAGMASK) {
+ switch (flags & TARGET_BSD_MAP_FLAGMASK) {
case MAP_PRIVATE:
printf("MAP_PRIVATE ");
break;
@@ -321,7 +321,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
end = start + len;
real_end = HOST_PAGE_ALIGN(end);
- for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
flg = page_get_flags(addr);
if (flg & PAGE_RESERVED) {
errno = ENXIO;
@@ -433,11 +433,11 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (start > real_start) {
/* handle host page containing start */
prot = 0;
- for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (real_end == real_start + qemu_host_page_size) {
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
end = real_end;
@@ -447,7 +447,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
}
if (end < real_end) {
prot = 0;
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (prot != 0)
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index b836b603af..c02e8a5ca1 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -27,6 +27,8 @@
#include "exec/user/abitypes.h"
+extern char **environ;
+
enum BSDType {
target_freebsd,
target_netbsd,
@@ -36,7 +38,6 @@ extern enum BSDType bsd_type;
#include "syscall_defs.h"
#include "target_syscall.h"
-#include "target_signal.h"
#include "exec/gdbstub.h"
#if defined(CONFIG_USE_NPTL)
@@ -45,9 +46,10 @@ extern enum BSDType bsd_type;
#define THREAD
#endif
-/* This struct is used to hold certain information about the image.
- * Basically, it replicates in user space what would be certain
- * task_struct fields in the kernel
+/*
+ * This struct is used to hold certain information about the image. Basically,
+ * it replicates in user space what would be certain task_struct fields in the
+ * kernel
*/
struct image_info {
abi_ulong load_addr;
@@ -71,18 +73,18 @@ struct image_info {
struct sigqueue {
struct sigqueue *next;
- //target_siginfo_t info;
};
struct emulated_sigtable {
int pending; /* true if signal is pending */
struct sigqueue *first;
- struct sigqueue info; /* in order to always have memory for the
- first signal, we put it here */
+ /* in order to always have memory for the first signal, we put it here */
+ struct sigqueue info;
};
-/* NOTE: we force a big alignment so that the stack stored after is
- aligned too */
+/*
+ * NOTE: we force a big alignment so that the stack stored after is aligned too
+ */
typedef struct TaskState {
pid_t ts_tid; /* tid (or pid) of this task */
@@ -102,7 +104,6 @@ void init_task_state(TaskState *ts);
extern const char *qemu_uname_release;
extern unsigned long mmap_min_addr;
-/* ??? See if we can avoid exposing so much of the loader internals. */
/*
* MAX_ARG_PAGES defines the number of pages allocated for arguments
* and envelope for the new program. 32 should suffice, this gives
@@ -114,7 +115,7 @@ extern unsigned long mmap_min_addr;
* This structure is used to hold the arguments that are
* used when loading binaries.
*/
-struct linux_binprm {
+struct bsd_binprm {
char buf[128];
void *page[MAX_ARG_PAGES];
abi_ulong p;
@@ -123,19 +124,19 @@ struct linux_binprm {
int argc, envc;
char **argv;
char **envp;
- char * filename; /* Name of binary */
+ char *filename; /* Name of binary */
};
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop);
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr);
-int loader_exec(const char * filename, char ** argv, char ** envp,
- struct target_pt_regs * regs, struct image_info *infop);
+int loader_exec(const char *filename, char **argv, char **envp,
+ struct target_pt_regs *regs, struct image_info *infop);
-int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
- struct image_info * info);
-int load_flt_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
- struct image_info * info);
+int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
+ struct image_info *info);
+int load_flt_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
+ struct image_info *info);
abi_long memcpy_to_target(abi_ulong dest, const void *src,
unsigned long len);
@@ -193,9 +194,6 @@ extern int do_strace;
/* signal.c */
void process_pending_signals(CPUArchState *cpu_env);
void signal_init(void);
-//int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info);
-//void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
-//void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUArchState *env);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
@@ -226,9 +224,11 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
return page_check_range((target_ulong)addr, size, type) == 0;
}
-/* NOTE __get_user and __put_user use host pointers and don't check access. */
-/* These are usually used to access struct data members once the
- * struct has been locked - usually with lock_user_struct().
+/*
+ * NOTE __get_user and __put_user use host pointers and don't check access.
+ *
+ * These are usually used to access struct data members once the struct has been
+ * locked - usually with lock_user_struct().
*/
#define __put_user(x, hptr)\
({\
@@ -248,7 +248,7 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
break;\
default:\
abort();\
- }\
+ } \
0;\
})
@@ -269,24 +269,26 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
x = (typeof(*hptr))tswap64(*(uint64_t *)(hptr));\
break;\
default:\
- /* avoid warning */\
x = 0;\
abort();\
- }\
+ } \
0;\
})
-/* put_user()/get_user() take a guest address and check access */
-/* These are usually used to access an atomic data type, such as an int,
- * that has been passed by address. These internally perform locking
- * and unlocking on the data type.
+/*
+ * put_user()/get_user() take a guest address and check access
+ *
+ * These are usually used to access an atomic data type, such as an int, that
+ * has been passed by address. These internally perform locking and unlocking
+ * on the data type.
*/
#define put_user(x, gaddr, target_type) \
({ \
abi_ulong __gaddr = (gaddr); \
target_type *__hptr; \
abi_long __ret; \
- if ((__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0))) { \
+ __hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0); \
+ if (__hptr) { \
__ret = __put_user((x), __hptr); \
unlock_user(__hptr, __gaddr, sizeof(target_type)); \
} else \
@@ -299,11 +301,11 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
abi_ulong __gaddr = (gaddr); \
target_type *__hptr; \
abi_long __ret; \
- if ((__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1))) { \
+ __hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1); \
+ if (__hptr) { \
__ret = __get_user((x), __hptr); \
unlock_user(__hptr, __gaddr, 0); \
} else { \
- /* avoid warning */ \
(x) = 0; \
__ret = -TARGET_EFAULT; \
} \
@@ -332,33 +334,41 @@ static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
#define get_user_u8(x, gaddr) get_user((x), (gaddr), uint8_t)
#define get_user_s8(x, gaddr) get_user((x), (gaddr), int8_t)
-/* copy_from_user() and copy_to_user() are usually used to copy data
+/*
+ * copy_from_user() and copy_to_user() are usually used to copy data
* buffers between the target and host. These internally perform
* locking/unlocking of the memory.
*/
abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
-/* Functions for accessing guest memory. The tget and tput functions
- read/write single values, byteswapping as necessary. The lock_user function
- gets a pointer to a contiguous area of guest memory, but does not perform
- any byteswapping. lock_user may return either a pointer to the guest
- memory, or a temporary buffer. */
+/*
+ * Functions for accessing guest memory. The tget and tput functions
+ * read/write single values, byteswapping as necessary. The lock_user function
+ * gets a pointer to a contiguous area of guest memory, but does not perform
+ * any byteswapping. lock_user may return either a pointer to the guest
+ * memory, or a temporary buffer.
+ */
-/* Lock an area of guest memory into the host. If copy is true then the
- host area will have the same contents as the guest. */
-static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
+/*
+ * Lock an area of guest memory into the host. If copy is true then the
+ * host area will have the same contents as the guest.
+ */
+static inline void *lock_user(int type, abi_ulong guest_addr, long len,
+ int copy)
{
- if (!access_ok(type, guest_addr, len))
+ if (!access_ok(type, guest_addr, len)) {
return NULL;
+ }
#ifdef DEBUG_REMAP
{
void *addr;
addr = g_malloc(len);
- if (copy)
+ if (copy) {
memcpy(addr, g2h_untagged(guest_addr), len);
- else
+ } else {
memset(addr, 0, len);
+ }
return addr;
}
#else
@@ -366,26 +376,32 @@ static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy
#endif
}
-/* Unlock an area of guest memory. The first LEN bytes must be
- flushed back to guest memory. host_ptr = NULL is explicitly
- allowed and does nothing. */
+/*
+ * Unlock an area of guest memory. The first LEN bytes must be flushed back to
+ * guest memory. host_ptr = NULL is explicitly allowed and does nothing.
+ */
static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
long len)
{
#ifdef DEBUG_REMAP
- if (!host_ptr)
+ if (!host_ptr) {
return;
- if (host_ptr == g2h_untagged(guest_addr))
+ }
+ if (host_ptr == g2h_untagged(guest_addr)) {
return;
- if (len > 0)
+ }
+ if (len > 0) {
memcpy(g2h_untagged(guest_addr), host_ptr, len);
+ }
g_free(host_ptr);
#endif
}
-/* Return the length of a string in target memory or -TARGET_EFAULT if
- access error. */
+/*
+ * Return the length of a string in target memory or -TARGET_EFAULT if access
+ * error.
+ */
abi_long target_strlen(abi_ulong gaddr);
/* Like lock_user but for null terminated strings. */
@@ -393,8 +409,9 @@ static inline void *lock_user_string(abi_ulong guest_addr)
{
abi_long len;
len = target_strlen(guest_addr);
- if (len < 0)
+ if (len < 0) {
return NULL;
+ }
return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
}
diff --git a/bsd-user/signal.c b/bsd-user/signal.c
index f6f7aa2427..ad6d935569 100644
--- a/bsd-user/signal.c
+++ b/bsd-user/signal.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "qemu.h"
-#include "target_signal.h"
void signal_init(void)
{
diff --git a/bsd-user/sparc/target_signal.h b/bsd-user/sparc/target_signal.h
deleted file mode 100644
index 5b2abba40f..0000000000
--- a/bsd-user/sparc/target_signal.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
-
-#include "cpu.h"
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_long ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-#ifndef UREG_I6
-#define UREG_I6 6
-#endif
-#ifndef UREG_FP
-#define UREG_FP UREG_I6
-#endif
-
-static inline abi_ulong get_sp_from_cpustate(CPUSPARCState *state)
-{
- return state->regwptr[UREG_FP];
-}
-
-#endif /* TARGET_SIGNAL_H */
diff --git a/bsd-user/sparc64/target_signal.h b/bsd-user/sparc64/target_signal.h
deleted file mode 100644
index 5b2abba40f..0000000000
--- a/bsd-user/sparc64/target_signal.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
-
-#include "cpu.h"
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_long ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-#ifndef UREG_I6
-#define UREG_I6 6
-#endif
-#ifndef UREG_FP
-#define UREG_FP UREG_I6
-#endif
-
-static inline abi_ulong get_sp_from_cpustate(CPUSPARCState *state)
-{
- return state->regwptr[UREG_FP];
-}
-
-#endif /* TARGET_SIGNAL_H */
diff --git a/bsd-user/strace.c b/bsd-user/strace.c
index 2c3b59caf0..be40b8a20c 100644
--- a/bsd-user/strace.c
+++ b/bsd-user/strace.c
@@ -128,14 +128,6 @@ static void print_syscall_ret_addr(const struct syscallname *name, abi_long ret)
}
}
-#if 0 /* currently unused */
-static void
-print_syscall_ret_raw(struct syscallname *name, abi_long ret)
-{
- gemu_log(" = 0x" TARGET_ABI_FMT_lx "\n", ret);
-}
-#endif
-
/*
* An array of all of the syscalls we know about
*/
diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c
index 4abff796c7..7d986e9700 100644
--- a/bsd-user/syscall.c
+++ b/bsd-user/syscall.c
@@ -95,7 +95,7 @@ static abi_long do_freebsd_sysarch(CPUX86State *env, int op, abi_ulong parms)
abi_ulong val;
int idx;
- switch(op) {
+ switch (op) {
#ifdef TARGET_ABI32
case TARGET_FREEBSD_I386_SET_GSBASE:
case TARGET_FREEBSD_I386_SET_FSBASE:
@@ -272,7 +272,7 @@ static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
if (!target_vec)
return -TARGET_EFAULT;
- for(i = 0;i < count; i++) {
+ for (i = 0;i < count; i++) {
base = tswapl(target_vec[i].iov_base);
vec[i].iov_len = tswapl(target_vec[i].iov_len);
if (vec[i].iov_len != 0) {
@@ -298,7 +298,7 @@ static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
if (!target_vec)
return -TARGET_EFAULT;
- for(i = 0;i < count; i++) {
+ for (i = 0;i < count; i++) {
if (target_vec[i].iov_base) {
base = tswapl(target_vec[i].iov_base);
unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
@@ -326,10 +326,10 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1,
#endif
record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
- if(do_strace)
+ if (do_strace)
print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
- switch(num) {
+ switch (num) {
case TARGET_FREEBSD_NR_exit:
#ifdef CONFIG_GPROF
_mcleanup();
@@ -428,10 +428,10 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1,
record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
- if(do_strace)
+ if (do_strace)
print_netbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
- switch(num) {
+ switch (num) {
case TARGET_NETBSD_NR_exit:
#ifdef CONFIG_GPROF
_mcleanup();
@@ -507,10 +507,10 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
- if(do_strace)
+ if (do_strace)
print_openbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
- switch(num) {
+ switch (num) {
case TARGET_OPENBSD_NR_exit:
#ifdef CONFIG_GPROF
_mcleanup();
diff --git a/bsd-user/uaccess.c b/bsd-user/uaccess.c
index 91e2067933..89163257f4 100644
--- a/bsd-user/uaccess.c
+++ b/bsd-user/uaccess.c
@@ -46,7 +46,7 @@ abi_long target_strlen(abi_ulong guest_addr1)
int max_len, len;
guest_addr = guest_addr1;
- for(;;) {
+ for (;;) {
max_len = TARGET_PAGE_SIZE - (guest_addr & ~TARGET_PAGE_MASK);
ptr = lock_user(VERIFY_READ, guest_addr, max_len, 1);
if (!ptr)
diff --git a/bsd-user/x86_64/target_signal.h b/bsd-user/x86_64/target_signal.h
deleted file mode 100644
index 659cd401b8..0000000000
--- a/bsd-user/x86_64/target_signal.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef TARGET_SIGNAL_H
-#define TARGET_SIGNAL_H
-
-#include "cpu.h"
-
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_long ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-static inline abi_ulong get_sp_from_cpustate(CPUX86State *state)
-{
- return state->regs[R_ESP];
-}
-
-#endif /* TARGET_SIGNAL_H */
diff --git a/default-configs/targets/sparc-bsd-user.mak b/default-configs/targets/sparc-bsd-user.mak
deleted file mode 100644
index 9ba3d7b07f..0000000000
--- a/default-configs/targets/sparc-bsd-user.mak
+++ /dev/null
@@ -1,3 +0,0 @@
-TARGET_ARCH=sparc
-TARGET_ALIGNED_ONLY=y
-TARGET_WORDS_BIGENDIAN=y
diff --git a/default-configs/targets/sparc64-bsd-user.mak b/default-configs/targets/sparc64-bsd-user.mak
deleted file mode 100644
index 8dd3217800..0000000000
--- a/default-configs/targets/sparc64-bsd-user.mak
+++ /dev/null
@@ -1,4 +0,0 @@
-TARGET_ARCH=sparc64
-TARGET_BASE_ARCH=sparc
-TARGET_ALIGNED_ONLY=y
-TARGET_WORDS_BIGENDIAN=y
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 324ef92beb..e6836393f7 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2146,8 +2146,16 @@ static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
void helper_lock_init(void);
/* svm_helper.c */
+#ifdef CONFIG_USER_ONLY
+static inline void
+cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param, uintptr_t retaddr)
+{ /* no-op */ }
+#else
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
uint64_t param, uintptr_t retaddr);
+#endif
+
/* apic.c */
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
diff --git a/target/i386/helper.h b/target/i386/helper.h
index 095520f81f..f3d8c3f949 100644
--- a/target/i386/helper.h
+++ b/target/i386/helper.h
@@ -42,9 +42,6 @@ DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
DEF_HELPER_2(iret_real, void, env, int)
DEF_HELPER_3(iret_protected, void, env, int, int)
DEF_HELPER_3(lret_protected, void, env, int, int)
-DEF_HELPER_2(read_crN, tl, env, int)
-DEF_HELPER_3(write_crN, void, env, int, tl)
-DEF_HELPER_2(lmsw, void, env, tl)
DEF_HELPER_1(clts, void, env)
#ifndef CONFIG_USER_ONLY
@@ -52,7 +49,6 @@ DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
#endif /* !CONFIG_USER_ONLY */
DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
-DEF_HELPER_2(invlpg, void, env, tl)
DEF_HELPER_1(sysenter, void, env)
DEF_HELPER_2(sysexit, void, env, int)
@@ -60,14 +56,11 @@ DEF_HELPER_2(sysexit, void, env, int)
DEF_HELPER_2(syscall, void, env, int)
DEF_HELPER_2(sysret, void, env, int)
#endif
-DEF_HELPER_2(hlt, void, env, int)
-DEF_HELPER_2(monitor, void, env, tl)
-DEF_HELPER_2(mwait, void, env, int)
-DEF_HELPER_2(pause, void, env, int)
-DEF_HELPER_1(debug, void, env)
+DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_1(reset_rf, void, env)
-DEF_HELPER_3(raise_interrupt, void, env, int, int)
-DEF_HELPER_2(raise_exception, void, env, int)
+DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
DEF_HELPER_1(cli, void, env)
DEF_HELPER_1(sti, void, env)
DEF_HELPER_1(clac, void, env)
@@ -86,30 +79,23 @@ DEF_HELPER_2(cmpxchg8b, void, env, tl)
DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl)
DEF_HELPER_2(cmpxchg16b, void, env, tl)
#endif
-DEF_HELPER_1(single_step, void, env)
+DEF_HELPER_FLAGS_1(single_step, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_1(rechecking_single_step, void, env)
DEF_HELPER_1(cpuid, void, env)
DEF_HELPER_1(rdtsc, void, env)
DEF_HELPER_1(rdtscp, void, env)
-DEF_HELPER_1(rdpmc, void, env)
-DEF_HELPER_1(rdmsr, void, env)
-DEF_HELPER_1(wrmsr, void, env)
+DEF_HELPER_FLAGS_1(rdpmc, TCG_CALL_NO_WG, noreturn, env)
-DEF_HELPER_2(check_iob, void, env, i32)
-DEF_HELPER_2(check_iow, void, env, i32)
-DEF_HELPER_2(check_iol, void, env, i32)
+#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(outb, void, env, i32, i32)
DEF_HELPER_2(inb, tl, env, i32)
DEF_HELPER_3(outw, void, env, i32, i32)
DEF_HELPER_2(inw, tl, env, i32)
DEF_HELPER_3(outl, void, env, i32, i32)
DEF_HELPER_2(inl, tl, env, i32)
-
-#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_3(check_io, TCG_CALL_NO_WG, void, env, i32, i32)
DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
-#endif /* !CONFIG_USER_ONLY */
-
-DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
+DEF_HELPER_2(svm_check_intercept, void, env, i32)
DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
DEF_HELPER_3(vmrun, void, env, int, int)
DEF_HELPER_1(vmmcall, void, env)
@@ -117,8 +103,15 @@ DEF_HELPER_2(vmload, void, env, int)
DEF_HELPER_2(vmsave, void, env, int)
DEF_HELPER_1(stgi, void, env)
DEF_HELPER_1(clgi, void, env)
-DEF_HELPER_1(skinit, void, env)
-DEF_HELPER_2(invlpga, void, env, int)
+DEF_HELPER_FLAGS_2(flush_page, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(hlt, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_FLAGS_2(monitor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(mwait, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_1(rdmsr, void, env)
+DEF_HELPER_1(wrmsr, void, env)
+DEF_HELPER_FLAGS_2(read_crN, TCG_CALL_NO_RWG, tl, env, int)
+DEF_HELPER_FLAGS_3(write_crN, TCG_CALL_NO_RWG, void, env, int, tl)
+#endif /* !CONFIG_USER_ONLY */
/* x86 FPU */
diff --git a/target/i386/tcg/bpt_helper.c b/target/i386/tcg/bpt_helper.c
index fb2a65ac9c..83cd89581e 100644
--- a/target/i386/tcg/bpt_helper.c
+++ b/target/i386/tcg/bpt_helper.c
@@ -22,7 +22,7 @@
#include "exec/helper-proto.h"
#include "helper-tcg.h"
-void helper_single_step(CPUX86State *env)
+void QEMU_NORETURN helper_single_step(CPUX86State *env)
{
#ifndef CONFIG_USER_ONLY
check_hw_breakpoints(env, true);
diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c
index 0183f3932e..bdae887d0a 100644
--- a/target/i386/tcg/excp_helper.c
+++ b/target/i386/tcg/excp_helper.c
@@ -25,12 +25,13 @@
#include "exec/helper-proto.h"
#include "helper-tcg.h"
-void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
+void QEMU_NORETURN helper_raise_interrupt(CPUX86State *env, int intno,
+ int next_eip_addend)
{
raise_interrupt(env, intno, 1, 0, next_eip_addend);
}
-void helper_raise_exception(CPUX86State *env, int exception_index)
+void QEMU_NORETURN helper_raise_exception(CPUX86State *env, int exception_index)
{
raise_exception(env, exception_index);
}
@@ -116,24 +117,25 @@ void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
}
-void raise_exception_err(CPUX86State *env, int exception_index,
- int error_code)
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code)
{
raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
}
-void raise_exception_err_ra(CPUX86State *env, int exception_index,
- int error_code, uintptr_t retaddr)
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr)
{
raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
}
-void raise_exception(CPUX86State *env, int exception_index)
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index)
{
raise_interrupt2(env, exception_index, 0, 0, 0, 0);
}
-void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+ uintptr_t retaddr)
{
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 97fb7a226a..2510cc244e 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -76,11 +76,14 @@ extern const uint8_t parity_table[256];
/* misc_helper.c */
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);
+void do_pause(CPUX86State *env) QEMU_NORETURN;
-/* svm_helper.c */
+/* sysemu/svm_helper.c */
+#ifndef CONFIG_USER_ONLY
void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
uint64_t exit_info_1, uintptr_t retaddr);
void do_vmexit(CPUX86State *env);
+#endif
/* seg_helper.c */
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
index a30379283e..baffa5d7ba 100644
--- a/target/i386/tcg/misc_helper.c
+++ b/target/i386/tcg/misc_helper.c
@@ -60,22 +60,6 @@ void helper_cpuid(CPUX86State *env)
env->regs[R_EDX] = edx;
}
-void helper_lmsw(CPUX86State *env, target_ulong t0)
-{
- /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
- if already set to one. */
- t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
- helper_write_crN(env, 0, t0);
-}
-
-void helper_invlpg(CPUX86State *env, target_ulong addr)
-{
- X86CPU *cpu = env_archcpu(env);
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC());
- tlb_flush_page(CPU(cpu), addr);
-}
-
void helper_rdtsc(CPUX86State *env)
{
uint64_t val;
@@ -96,7 +80,7 @@ void helper_rdtscp(CPUX86State *env)
env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
}
-void helper_rdpmc(CPUX86State *env)
+void QEMU_NORETURN helper_rdpmc(CPUX86State *env)
{
if (((env->cr[4] & CR4_PCE_MASK) == 0 ) &&
((env->hflags & HF_CPL_MASK) != 0)) {
@@ -109,75 +93,24 @@ void helper_rdpmc(CPUX86State *env)
raise_exception_err(env, EXCP06_ILLOP, 0);
}
-static void do_pause(X86CPU *cpu)
+void QEMU_NORETURN do_pause(CPUX86State *env)
{
- CPUState *cs = CPU(cpu);
+ CPUState *cs = env_cpu(env);
/* Just let another CPU run. */
cs->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cs);
}
-static void do_hlt(X86CPU *cpu)
-{
- CPUState *cs = CPU(cpu);
- CPUX86State *env = &cpu->env;
-
- env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
- cs->halted = 1;
- cs->exception_index = EXCP_HLT;
- cpu_loop_exit(cs);
-}
-
-void helper_hlt(CPUX86State *env, int next_eip_addend)
+void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
{
- X86CPU *cpu = env_archcpu(env);
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
- env->eip += next_eip_addend;
-
- do_hlt(cpu);
-}
-
-void helper_monitor(CPUX86State *env, target_ulong ptr)
-{
- if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
- }
- /* XXX: store address? */
- cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
-}
-
-void helper_mwait(CPUX86State *env, int next_eip_addend)
-{
- CPUState *cs = env_cpu(env);
- X86CPU *cpu = env_archcpu(env);
-
- if ((uint32_t)env->regs[R_ECX] != 0) {
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
- }
- cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
- env->eip += next_eip_addend;
-
- /* XXX: not complete but not completely erroneous */
- if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
- do_pause(cpu);
- } else {
- do_hlt(cpu);
- }
-}
-
-void helper_pause(CPUX86State *env, int next_eip_addend)
-{
- X86CPU *cpu = env_archcpu(env);
-
cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC());
env->eip += next_eip_addend;
- do_pause(cpu);
+ do_pause(env);
}
-void helper_debug(CPUX86State *env)
+void QEMU_NORETURN helper_debug(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index cf3f051524..2f6cdc8239 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -2416,46 +2416,3 @@ void helper_verw(CPUX86State *env, target_ulong selector1)
}
CC_SRC = eflags | CC_Z;
}
-
-/* check if Port I/O is allowed in TSS */
-static inline void check_io(CPUX86State *env, int addr, int size,
- uintptr_t retaddr)
-{
- int io_offset, val, mask;
-
- /* TSS must be a valid 32 bit one */
- if (!(env->tr.flags & DESC_P_MASK) ||
- ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
- env->tr.limit < 103) {
- goto fail;
- }
- io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
- io_offset += (addr >> 3);
- /* Note: the check needs two bytes */
- if ((io_offset + 1) > env->tr.limit) {
- goto fail;
- }
- val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
- val >>= (addr & 7);
- mask = (1 << size) - 1;
- /* all bits must be zero to allow the I/O */
- if ((val & mask) != 0) {
- fail:
- raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
- }
-}
-
-void helper_check_iob(CPUX86State *env, uint32_t t0)
-{
- check_io(env, t0, 1, GETPC());
-}
-
-void helper_check_iow(CPUX86State *env, uint32_t t0)
-{
- check_io(env, t0, 2, GETPC());
-}
-
-void helper_check_iol(CPUX86State *env, uint32_t t0)
-{
- check_io(env, t0, 4, GETPC());
-}
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
index 66e7939537..0cef2f1a4c 100644
--- a/target/i386/tcg/sysemu/misc_helper.c
+++ b/target/i386/tcg/sysemu/misc_helper.c
@@ -65,7 +65,6 @@ target_ulong helper_read_crN(CPUX86State *env, int reg)
{
target_ulong val;
- cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
switch (reg) {
default:
val = env->cr[reg];
@@ -83,7 +82,6 @@ target_ulong helper_read_crN(CPUX86State *env, int reg)
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
{
- cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
switch (reg) {
case 0:
cpu_x86_update_cr0(env, t0);
@@ -440,3 +438,53 @@ void helper_rdmsr(CPUX86State *env)
env->regs[R_EAX] = (uint32_t)(val);
env->regs[R_EDX] = (uint32_t)(val >> 32);
}
+
+void helper_flush_page(CPUX86State *env, target_ulong addr)
+{
+ tlb_flush_page(env_cpu(env), addr);
+}
+
+static void QEMU_NORETURN do_hlt(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ do_hlt(env);
+}
+
+void helper_monitor(CPUX86State *env, target_ulong ptr)
+{
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ /* XXX: store address? */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
+}
+
+void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ /* XXX: not complete but not completely erroneous */
+ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
+ do_pause(env);
+ } else {
+ do_hlt(env);
+ }
+}
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
index e0d7b32b82..82c0856c41 100644
--- a/target/i386/tcg/sysemu/seg_helper.c
+++ b/target/i386/tcg/sysemu/seg_helper.c
@@ -23,6 +23,7 @@
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "tcg/helper-tcg.h"
+#include "../seg_helper.h"
#ifdef TARGET_X86_64
void helper_syscall(CPUX86State *env, int next_eip_addend)
@@ -123,3 +124,31 @@ void x86_cpu_do_interrupt(CPUState *cs)
env->old_exception = -1;
}
}
+
+/* check if Port I/O is allowed in TSS */
+void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
+{
+ uintptr_t retaddr = GETPC();
+ uint32_t io_offset, val, mask;
+
+ /* TSS must be a valid 32 bit one */
+ if (!(env->tr.flags & DESC_P_MASK) ||
+ ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+ env->tr.limit < 103) {
+ goto fail;
+ }
+ io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
+ io_offset += (addr >> 3);
+ /* Note: the check needs two bytes */
+ if ((io_offset + 1) > env->tr.limit) {
+ goto fail;
+ }
+ val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
+ val >>= (addr & 7);
+ mask = (1 << size) - 1;
+ /* all bits must be zero to allow the I/O */
+ if ((val & mask) != 0) {
+ fail:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+}
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index c4e8e717a9..9d671297cf 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -412,31 +412,6 @@ void helper_clgi(CPUX86State *env)
env->hflags2 &= ~HF2_GIF_MASK;
}
-void helper_skinit(CPUX86State *env)
-{
- cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
- /* XXX: not implemented */
- raise_exception(env, EXCP06_ILLOP);
-}
-
-void helper_invlpga(CPUX86State *env, int aflag)
-{
- X86CPU *cpu = env_archcpu(env);
- target_ulong addr;
-
- cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
-
- if (aflag == 2) {
- addr = env->regs[R_EAX];
- } else {
- addr = (uint32_t)env->regs[R_EAX];
- }
-
- /* XXX: could use the ASID to see if it is needed to do the
- flush */
- tlb_flush_page(CPU(cpu), addr);
-}
-
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param, uintptr_t retaddr)
{
@@ -513,10 +488,9 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
}
}
-void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
- uint64_t param)
+void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
{
- cpu_svm_check_intercept_param(env, type, param, GETPC());
+ cpu_svm_check_intercept_param(env, type, 0, GETPC());
}
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 3ab8c23855..834186bcae 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -39,16 +39,7 @@
#define PREFIX_DATA 0x08
#define PREFIX_ADR 0x10
#define PREFIX_VEX 0x20
-
-#ifdef TARGET_X86_64
-#define CODE64(s) ((s)->code64)
-#define REX_X(s) ((s)->rex_x)
-#define REX_B(s) ((s)->rex_b)
-#else
-#define CODE64(s) 0
-#define REX_X(s) 0
-#define REX_B(s) 0
-#endif
+#define PREFIX_REX 0x40
#ifdef TARGET_X86_64
# define ctztl ctz64
@@ -85,42 +76,38 @@ static TCGv_i64 cpu_bndu[4];
typedef struct DisasContext {
DisasContextBase base;
- /* current insn context */
- int override; /* -1 if no override */
- int prefix;
+ target_ulong pc; /* pc = eip + cs_base */
+ target_ulong pc_start; /* pc at TB entry */
+ target_ulong cs_base; /* base of CS segment */
+
MemOp aflag;
MemOp dflag;
- target_ulong pc_start;
- target_ulong pc; /* pc = eip + cs_base */
- /* current block context */
- target_ulong cs_base; /* base of CS segment */
- int pe; /* protected mode */
- int code32; /* 32 bit code segment */
-#ifdef TARGET_X86_64
- int lma; /* long mode active */
- int code64; /* 64 bit code segment */
- int rex_x, rex_b;
+
+ int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
+ uint8_t prefix;
+
+#ifndef CONFIG_USER_ONLY
+ uint8_t cpl; /* code priv level */
+ uint8_t iopl; /* i/o priv level */
#endif
- int vex_l; /* vex vector length */
- int vex_v; /* vex vvvv register, without 1's complement. */
- int ss32; /* 32 bit stack segment */
- CCOp cc_op; /* current CC operation */
- bool cc_op_dirty;
+ uint8_t vex_l; /* vex vector length */
+ uint8_t vex_v; /* vex vvvv register, without 1's complement. */
+ uint8_t popl_esp_hack; /* for correct popl with esp base handling */
+ uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
+
#ifdef TARGET_X86_64
- bool x86_64_hregs;
+ uint8_t rex_r;
+ uint8_t rex_x;
+ uint8_t rex_b;
+ bool rex_w;
#endif
- int addseg; /* non zero if either DS/ES/SS have a non zero base */
- int f_st; /* currently unused */
- int vm86; /* vm86 mode */
- int cpl;
- int iopl;
- int tf; /* TF cpu flag */
- int jmp_opt; /* use direct block chaining for direct jumps */
- int repz_opt; /* optimize jumps within repz instructions */
+ bool jmp_opt; /* use direct block chaining for direct jumps */
+ bool repz_opt; /* optimize jumps within repz instructions */
+ bool cc_op_dirty;
+
+ CCOp cc_op; /* current CC operation */
int mem_index; /* select memory access functions */
- uint64_t flags; /* all execution flags */
- int popl_esp_hack; /* for correct popl with esp base handling */
- int rip_offset; /* only used in x86_64, but left for simplicity */
+ uint32_t flags; /* all execution flags */
int cpuid_features;
int cpuid_ext_features;
int cpuid_ext2_features;
@@ -146,11 +133,96 @@ typedef struct DisasContext {
sigjmp_buf jmpbuf;
} DisasContext;
+/* The environment in which user-only runs is constrained. */
+#ifdef CONFIG_USER_ONLY
+#define PE(S) true
+#define CPL(S) 3
+#define IOPL(S) 0
+#define SVME(S) false
+#define GUEST(S) false
+#else
+#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
+#define CPL(S) ((S)->cpl)
+#define IOPL(S) ((S)->iopl)
+#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
+#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
+#endif
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
+#define VM86(S) false
+#define CODE32(S) true
+#define SS32(S) true
+#define ADDSEG(S) false
+#else
+#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
+#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
+#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
+#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
+#endif
+#if !defined(TARGET_X86_64)
+#define CODE64(S) false
+#define LMA(S) false
+#elif defined(CONFIG_USER_ONLY)
+#define CODE64(S) true
+#define LMA(S) true
+#else
+#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
+#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
+#endif
+
+#ifdef TARGET_X86_64
+#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
+#define REX_W(S) ((S)->rex_w)
+#define REX_R(S) ((S)->rex_r + 0)
+#define REX_X(S) ((S)->rex_x + 0)
+#define REX_B(S) ((S)->rex_b + 0)
+#else
+#define REX_PREFIX(S) false
+#define REX_W(S) false
+#define REX_R(S) 0
+#define REX_X(S) 0
+#define REX_B(S) 0
+#endif
+
+/*
+ * Many sysemu-only helpers are not reachable for user-only.
+ * Define stub generators here, so that we need not either sprinkle
+ * ifdefs through the translator, nor provide the helper function.
+ */
+#define STUB_HELPER(NAME, ...) \
+ static inline void gen_helper_##NAME(__VA_ARGS__) \
+ { qemu_build_not_reached(); }
+
+#ifdef CONFIG_USER_ONLY
+STUB_HELPER(clgi, TCGv_env env)
+STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
+STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
+STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(monitor, TCGv_env env, TCGv addr)
+STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
+STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(rdmsr, TCGv_env env)
+STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
+STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
+STUB_HELPER(stgi, TCGv_env env)
+STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
+STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
+STUB_HELPER(vmmcall, TCGv_env env)
+STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
+STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
+STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
+STUB_HELPER(wrmsr, TCGv_env env)
+#endif
+
static void gen_eob(DisasContext *s);
static void gen_jr(DisasContext *s, TCGv dest);
static void gen_jmp(DisasContext *s, target_ulong eip);
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
+static void gen_exception_gpf(DisasContext *s);
/* i386 arith/logic operations */
enum {
@@ -309,14 +381,10 @@ static void gen_update_cc_op(DisasContext *s)
*/
static inline bool byte_reg_is_xH(DisasContext *s, int reg)
{
- if (reg < 4) {
+ /* Any time the REX prefix is present, byte registers are uniform */
+ if (reg < 4 || REX_PREFIX(s)) {
return false;
}
-#ifdef TARGET_X86_64
- if (reg >= 8 || s->x86_64_hregs) {
- return false;
- }
-#endif
return true;
}
@@ -333,7 +401,7 @@ static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
/* Select the size of the stack pointer. */
static inline MemOp mo_stacksize(DisasContext *s)
{
- return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+ return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
}
/* Select only size 64 else 32. Used for SSE operand sizes. */
@@ -466,7 +534,7 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
#endif
case MO_32:
/* 32 bit address */
- if (ovr_seg < 0 && s->addseg) {
+ if (ovr_seg < 0 && ADDSEG(s)) {
ovr_seg = def_seg;
}
if (ovr_seg < 0) {
@@ -479,7 +547,7 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
tcg_gen_ext16u_tl(s->A0, a0);
a0 = s->A0;
if (ovr_seg < 0) {
- if (s->addseg) {
+ if (ADDSEG(s)) {
ovr_seg = def_seg;
} else {
return;
@@ -612,37 +680,40 @@ static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
}
}
-static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip,
+/*
+ * Validate that access to [port, port + 1<<ot) is allowed.
+ * Raise #GP, or VMM exit if not.
+ */
+static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
uint32_t svm_flags)
{
- target_ulong next_eip;
-
- if (s->pe && (s->cpl > s->iopl || s->vm86)) {
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- switch (ot) {
- case MO_8:
- gen_helper_check_iob(cpu_env, s->tmp2_i32);
- break;
- case MO_16:
- gen_helper_check_iow(cpu_env, s->tmp2_i32);
- break;
- case MO_32:
- gen_helper_check_iol(cpu_env, s->tmp2_i32);
- break;
- default:
- tcg_abort();
- }
+#ifdef CONFIG_USER_ONLY
+ /*
+ * We do not implement the ioperm(2) syscall, so the TSS check
+ * will always fail.
+ */
+ gen_exception_gpf(s);
+ return false;
+#else
+ if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
+ gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
}
- if(s->flags & HF_GUEST_MASK) {
+ if (GUEST(s)) {
+ target_ulong cur_eip = s->base.pc_next - s->cs_base;
+ target_ulong next_eip = s->pc - s->cs_base;
+
gen_update_cc_op(s);
gen_jmp_im(s, cur_eip);
- svm_flags |= (1 << (4 + ot));
- next_eip = s->pc - s->cs_base;
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_svm_check_io(cpu_env, s->tmp2_i32,
- tcg_const_i32(svm_flags),
- tcg_const_i32(next_eip - cur_eip));
+ if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ svm_flags |= SVM_IOIO_REP_MASK;
+ }
+ svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
+ gen_helper_svm_check_io(cpu_env, port,
+ tcg_constant_i32(svm_flags),
+ tcg_constant_i32(next_eip - cur_eip));
}
+ return true;
+#endif
}
static inline void gen_movs(DisasContext *s, MemOp ot)
@@ -1276,6 +1347,42 @@ static void gen_illegal_opcode(DisasContext *s)
gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
}
+/* Generate #GP for the current instruction. */
+static void gen_exception_gpf(DisasContext *s)
+{
+ gen_exception(s, EXCP0D_GPF, s->pc_start - s->cs_base);
+}
+
+/* Check for cpl == 0; if not, raise #GP and return false. */
+static bool check_cpl0(DisasContext *s)
+{
+ if (CPL(s) == 0) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
+/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
+static bool check_vm86_iopl(DisasContext *s)
+{
+ if (!VM86(s) || IOPL(s) == 3) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
+/* Check for iopl allowing access; if not, raise #GP and return false. */
+static bool check_iopl(DisasContext *s)
+{
+ if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
/* if d == OR_TMP0, it means memory operand (address in A0) */
static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
{
@@ -2309,14 +2416,14 @@ static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
call this function with seg_reg == R_CS */
static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
{
- if (s->pe && !s->vm86) {
+ if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
interrupts for the next instruction */
- if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) {
+ if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
s->base.is_jmp = DISAS_TOO_MANY;
}
} else {
@@ -2327,28 +2434,13 @@ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
}
}
-static inline int svm_is_rep(int prefixes)
-{
- return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
-}
-
-static inline void
-gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
- uint32_t type, uint64_t param)
+static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
{
/* no SVM activated; fast case */
- if (likely(!(s->flags & HF_GUEST_MASK)))
+ if (likely(!GUEST(s))) {
return;
- gen_update_cc_op(s);
- gen_jmp_im(s, pc_start - s->cs_base);
- gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
- tcg_const_i64(param));
-}
-
-static inline void
-gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
-{
- gen_svm_check_intercept_param(s, pc_start, type, 0);
+ }
+ gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
}
static inline void gen_stack_update(DisasContext *s, int addend)
@@ -2367,7 +2459,7 @@ static void gen_push_v(DisasContext *s, TCGv val)
tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
if (!CODE64(s)) {
- if (s->addseg) {
+ if (ADDSEG(s)) {
new_esp = s->tmp4;
tcg_gen_mov_tl(new_esp, s->A0);
}
@@ -2396,12 +2488,12 @@ static inline void gen_pop_update(DisasContext *s, MemOp ot)
static inline void gen_stack_A0(DisasContext *s)
{
- gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
+ gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
}
static void gen_pusha(DisasContext *s)
{
- MemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ MemOp s_ot = SS32(s) ? MO_32 : MO_16;
MemOp d_ot = s->dflag;
int size = 1 << d_ot;
int i;
@@ -2417,7 +2509,7 @@ static void gen_pusha(DisasContext *s)
static void gen_popa(DisasContext *s)
{
- MemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ MemOp s_ot = SS32(s) ? MO_32 : MO_16;
MemOp d_ot = s->dflag;
int size = 1 << d_ot;
int i;
@@ -2439,7 +2531,7 @@ static void gen_popa(DisasContext *s)
static void gen_enter(DisasContext *s, int esp_addend, int level)
{
MemOp d_ot = mo_pushpop(s, s->dflag);
- MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+ MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
int size = 1 << d_ot;
/* Push BP; compute FrameTemp into T1. */
@@ -2522,10 +2614,10 @@ static void gen_interrupt(DisasContext *s, int intno,
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_debug(DisasContext *s, target_ulong cur_eip)
+static void gen_debug(DisasContext *s)
{
gen_update_cc_op(s);
- gen_jmp_im(s, cur_eip);
+ gen_jmp_im(s, s->base.pc_next - s->cs_base);
gen_helper_debug(cpu_env);
s->base.is_jmp = DISAS_NORETURN;
}
@@ -2591,7 +2683,7 @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
} else if (recheck_tf) {
gen_helper_rechecking_single_step(cpu_env);
tcg_gen_exit_tb(NULL, 0);
- } else if (s->tf) {
+ } else if (s->flags & HF_TF_MASK) {
gen_helper_single_step(cpu_env);
} else if (jr) {
tcg_gen_lookup_and_goto_ptr();
@@ -3034,7 +3126,7 @@ static const struct SSEOpHelper_eppi sse_op_table7[256] = {
};
static void gen_sse(CPUX86State *env, DisasContext *s, int b,
- target_ulong pc_start, int rex_r)
+ target_ulong pc_start)
{
int b1, op1_offset, op2_offset, is_xmm, val;
int modrm, mod, rm, reg;
@@ -3104,8 +3196,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7);
- if (is_xmm)
- reg |= rex_r;
+ if (is_xmm) {
+ reg |= REX_R(s);
+ }
mod = (modrm >> 6) & 3;
if (sse_fn_epp == SSE_SPECIAL) {
b |= (b1 << 8);
@@ -3639,7 +3732,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_ld16u_tl(s->T0, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
}
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_op_mov_reg_v(s, ot, reg, s->T0);
break;
case 0x1d6: /* movq ea, xmm */
@@ -3683,7 +3776,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
offsetof(CPUX86State, fpregs[rm].mmx));
gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr0);
}
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
break;
@@ -3695,7 +3788,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
}
modrm = x86_ldub_code(env, s);
rm = modrm & 7;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
@@ -3771,7 +3864,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Various integer extensions at 0f 38 f[0-f]. */
b = modrm | (b1 << 8);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
switch (b) {
case 0x3f0: /* crc32 Gd,Eb */
@@ -4125,7 +4218,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
b = modrm;
modrm = x86_ldub_code(env, s);
rm = modrm & 7;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
@@ -4145,7 +4238,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
gen_lea_modrm(env, s, modrm);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
val = x86_ldub_code(env, s);
switch (b) {
case 0x14: /* pextrb */
@@ -4314,7 +4407,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Various integer extensions at 0f 3a f[0-f]. */
b = modrm | (b1 << 8);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
switch (b) {
case 0x3f0: /* rorx Gy,Ey, Ib */
@@ -4488,27 +4581,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
MemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
- int rex_w, rex_r;
target_ulong pc_start = s->base.pc_next;
s->pc_start = s->pc = pc_start;
s->override = -1;
#ifdef TARGET_X86_64
+ s->rex_w = false;
+ s->rex_r = 0;
s->rex_x = 0;
s->rex_b = 0;
- s->x86_64_hregs = false;
#endif
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
if (sigsetjmp(s->jmpbuf, 0) != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ gen_exception_gpf(s);
return s->pc;
}
prefixes = 0;
- rex_w = -1;
- rex_r = 0;
next_byte:
b = x86_ldub_code(env, s);
@@ -4551,12 +4642,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
- rex_w = (b >> 3) & 1;
- rex_r = (b & 0x4) << 1;
+ prefixes |= PREFIX_REX;
+ s->rex_w = (b >> 3) & 1;
+ s->rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
- REX_B(s) = (b & 0x1) << 3;
- /* select uniform byte register addressing */
- s->x86_64_hregs = true;
+ s->rex_b = (b & 0x1) << 3;
goto next_byte;
}
break;
@@ -4565,7 +4655,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xc4: /* 3-byte VEX */
/* VEX prefixes cannot be used except in 32-bit mode.
Otherwise the instruction is LES or LDS. */
- if (s->code32 && !s->vm86) {
+ if (CODE32(s) && !VM86(s)) {
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
@@ -4580,27 +4670,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
- | PREFIX_LOCK | PREFIX_DATA)) {
+ | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
- if (s->x86_64_hregs) {
- goto illegal_op;
- }
+ s->rex_r = (~vex2 >> 4) & 8;
#endif
- rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
/* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
vex3 = vex2;
b = x86_ldub_code(env, s) | 0x100;
} else {
/* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
+ vex3 = x86_ldub_code(env, s);
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
+ s->rex_w = (vex3 >> 7) & 1;
#endif
- vex3 = x86_ldub_code(env, s);
- rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
b = x86_ldub_code(env, s) | 0x100;
@@ -4627,18 +4714,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
over 0x66 if both are present. */
- dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
+ dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
} else {
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
- if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
+ if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
dflag = MO_32;
} else {
dflag = MO_16;
}
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
- if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
+ if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
aflag = MO_32;
} else {
aflag = MO_16;
@@ -4678,7 +4765,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
switch(f) {
case 0: /* OP Ev, Gv */
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
@@ -4700,7 +4787,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 1: /* OP Gv, Ev */
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
@@ -5023,7 +5110,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
/* operand size for jumps is 64 bit */
ot = MO_64;
} else if (op == 3 || op == 5) {
- ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
+ ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
} else if (op == 6) {
/* default push size is 64 bit */
ot = mo_pushpop(s, dflag);
@@ -5072,7 +5159,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, s->T0, s->A0);
do_lcall:
- if (s->pe && !s->vm86) {
+ if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
tcg_const_i32(dflag - 1),
@@ -5102,7 +5189,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_add_A0_im(s, 1 << ot);
gen_op_ld_v(s, MO_16, s->T0, s->A0);
do_ljmp:
- if (s->pe && !s->vm86) {
+ if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
tcg_const_tl(s->pc - s->cs_base));
@@ -5126,7 +5213,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_v_reg(s, ot, s->T1, reg);
@@ -5198,7 +5285,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x6b:
ot = dflag;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
@@ -5250,7 +5337,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x1c1: /* xadd Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
gen_op_mov_v_reg(s, ot, s->T0, reg);
if (mod == 3) {
@@ -5282,7 +5369,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
oldv = tcg_temp_new();
newv = tcg_temp_new();
@@ -5480,7 +5567,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (s->base.is_jmp) {
gen_jmp_im(s, s->pc - s->cs_base);
if (reg == R_SS) {
- s->tf = 0;
+ s->flags &= ~HF_TF_MASK;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
@@ -5504,7 +5591,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x89: /* mov Gv, Ev */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
@@ -5530,7 +5617,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x8b: /* mov Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_v(s, ot, reg, s->T0);
@@ -5546,7 +5633,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (s->base.is_jmp) {
gen_jmp_im(s, s->pc - s->cs_base);
if (reg == R_SS) {
- s->tf = 0;
+ s->flags &= ~HF_TF_MASK;
gen_eob_inhibit_irq(s, true);
} else {
gen_eob(s);
@@ -5580,7 +5667,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
s_ot = b & 8 ? MO_SIGN | ot : ot;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -5619,7 +5706,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
{
AddressParts a = gen_lea_modrm_0(env, s, modrm);
TCGv ea = gen_lea_modrm_1(s, a);
@@ -5701,7 +5788,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x87: /* xchg Ev, Gv */
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
@@ -5738,7 +5825,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
do_lxx:
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
@@ -5821,7 +5908,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
opreg = OR_TMP0;
@@ -6399,9 +6486,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- gen_check_io(s, ot, pc_start - s->cs_base,
- SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32,
+ SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
@@ -6418,9 +6508,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- gen_check_io(s, ot, pc_start - s->cs_base,
- svm_is_rep(prefixes) | 4);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
@@ -6442,13 +6534,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xe5:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
- tcg_gen_movi_tl(s->T0, val);
- gen_check_io(s, ot, pc_start - s->cs_base,
- SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_movi_i32(s->tmp2_i32, val);
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6460,15 +6552,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xe7:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
- tcg_gen_movi_tl(s->T0, val);
- gen_check_io(s, ot, pc_start - s->cs_base,
- svm_is_rep(prefixes));
- gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
-
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_movi_i32(s->tmp2_i32, val);
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6479,13 +6570,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- gen_check_io(s, ot, pc_start - s->cs_base,
- SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6496,15 +6588,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- gen_check_io(s, ot, pc_start - s->cs_base,
- svm_is_rep(prefixes));
- gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
-
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
+ break;
+ }
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6535,7 +6627,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xca: /* lret im */
val = x86_ldsw_code(env, s);
do_lret:
- if (s->pe && !s->vm86) {
+ if (PE(s) && !VM86(s)) {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
@@ -6560,23 +6652,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
val = 0;
goto do_lret;
case 0xcf: /* iret */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
- if (!s->pe) {
- /* real mode */
- gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
- set_cc_op(s, CC_OP_EFLAGS);
- } else if (s->vm86) {
- if (s->iopl != 3) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
- set_cc_op(s, CC_OP_EFLAGS);
+ gen_svm_check_intercept(s, SVM_EXIT_IRET);
+ if (!PE(s) || VM86(s)) {
+ /* real mode or vm86 mode */
+ if (!check_vm86_iopl(s)) {
+ break;
}
+ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
} else {
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
- set_cc_op(s, CC_OP_EFLAGS);
}
+ set_cc_op(s, CC_OP_EFLAGS);
gen_eob(s);
break;
case 0xe8: /* call im */
@@ -6680,29 +6767,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
}
ot = dflag;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/************************/
/* flags */
case 0x9c: /* pushf */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
- if (s->vm86 && s->iopl != 3) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
+ if (check_vm86_iopl(s)) {
gen_update_cc_op(s);
gen_helper_read_eflags(s->T0, cpu_env);
gen_push_v(s, s->T0);
}
break;
case 0x9d: /* popf */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
- if (s->vm86 && s->iopl != 3) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ gen_svm_check_intercept(s, SVM_EXIT_POPF);
+ if (check_vm86_iopl(s)) {
ot = gen_pop_T0(s);
- if (s->cpl == 0) {
+ if (CPL(s) == 0) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, s->T0,
tcg_const_i32((TF_MASK | AC_MASK |
@@ -6717,7 +6800,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
& 0xffff));
}
} else {
- if (s->cpl <= s->iopl) {
+ if (CPL(s) <= IOPL(s)) {
if (dflag != MO_16) {
gen_helper_write_eflags(cpu_env, s->T0,
tcg_const_i32((TF_MASK |
@@ -6830,7 +6913,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
do_btx:
ot = dflag;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(s, MO_32, s->T1, reg);
@@ -6935,7 +7018,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x1bd: /* bsr / lzcnt */
ot = dflag;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, s->T0);
@@ -7060,9 +7143,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xcd: /* int N */
val = x86_ldub_code(env, s);
- if (s->vm86 && s->iopl != 3) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ if (check_vm86_iopl(s)) {
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
}
break;
@@ -7075,33 +7156,21 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
- gen_debug(s, pc_start - s->cs_base);
+ gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
+ gen_debug(s);
break;
#endif
case 0xfa: /* cli */
- if (!s->vm86) {
- if (s->cpl <= s->iopl) {
- gen_helper_cli(cpu_env);
- } else {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- }
- } else {
- if (s->iopl == 3) {
- gen_helper_cli(cpu_env);
- } else {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- }
+ if (check_iopl(s)) {
+ gen_helper_cli(cpu_env);
}
break;
case 0xfb: /* sti */
- if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
+ if (check_iopl(s)) {
gen_helper_sti(cpu_env);
/* interruptions are enabled only the first insn after sti */
gen_jmp_im(s, s->pc - s->cs_base);
gen_eob_inhibit_irq(s, true);
- } else {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
break;
case 0x62: /* bound */
@@ -7193,15 +7262,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ if (check_cpl0(s)) {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
if (b & 2) {
gen_helper_rdmsr(cpu_env);
} else {
gen_helper_wrmsr(cpu_env);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
}
}
break;
@@ -7220,13 +7289,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_rdpmc(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
- if (!s->pe) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!PE(s)) {
+ gen_exception_gpf(s);
} else {
gen_helper_sysenter(cpu_env);
gen_eob(s);
@@ -7236,8 +7306,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
/* For Intel SYSEXIT is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
- if (!s->pe) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!PE(s)) {
+ gen_exception_gpf(s);
} else {
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
@@ -7255,12 +7325,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
- if (!s->pe) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!PE(s)) {
+ gen_exception_gpf(s);
} else {
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
- if (s->lma) {
+ if (LMA(s)) {
set_cc_op(s, CC_OP_EFLAGS);
}
/* TF handling for the sysret insn is different. The TF bit is
@@ -7277,9 +7347,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_helper_cpuid(cpu_env);
break;
case 0xf4: /* hlt */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ if (check_cpl0(s)) {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
@@ -7292,42 +7360,38 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* sldt */
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
+ gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
tcg_gen_ld32u_tl(s->T0, cpu_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_lldt(cpu_env, s->tmp2_i32);
}
break;
case 1: /* str */
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
+ gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
tcg_gen_ld32u_tl(s->T0, cpu_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_ltr(cpu_env, s->tmp2_i32);
@@ -7335,7 +7399,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 4: /* verr */
case 5: /* verw */
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
@@ -7355,7 +7419,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
+ gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(s->T0,
cpu_env, offsetof(CPUX86State, gdt.limit));
@@ -7369,7 +7433,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xc8: /* monitor */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
@@ -7381,18 +7445,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xc9: /* mwait */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
goto illegal_op;
}
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
- gen_eob(s);
+ s->base.is_jmp = DISAS_NORETURN;
break;
case 0xca: /* clac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
- || s->cpl != 0) {
+ || CPL(s) != 0) {
goto illegal_op;
}
gen_helper_clac(cpu_env);
@@ -7402,7 +7466,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xcb: /* stac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
- || s->cpl != 0) {
+ || CPL(s) != 0) {
goto illegal_op;
}
gen_helper_stac(cpu_env);
@@ -7411,7 +7475,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
CASE_MODRM_MEM_OP(1): /* sidt */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
+ gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, s->T0, s->A0);
@@ -7440,8 +7504,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
| PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
@@ -7454,11 +7517,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xd8: /* VMRUN */
- if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ if (!SVME(s) || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
gen_update_cc_op(s);
@@ -7470,7 +7532,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xd9: /* VMMCALL */
- if (!(s->flags & HF_SVME_MASK)) {
+ if (!SVME(s)) {
goto illegal_op;
}
gen_update_cc_op(s);
@@ -7479,11 +7541,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xda: /* VMLOAD */
- if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ if (!SVME(s) || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
gen_update_cc_op(s);
@@ -7492,11 +7553,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xdb: /* VMSAVE */
- if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ if (!SVME(s) || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
gen_update_cc_op(s);
@@ -7505,13 +7565,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xdc: /* STGI */
- if ((!(s->flags & HF_SVME_MASK)
- && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
- || !s->pe) {
+ if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
gen_update_cc_op(s);
@@ -7521,11 +7579,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xdd: /* CLGI */
- if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ if (!SVME(s) || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
gen_update_cc_op(s);
@@ -7534,35 +7591,37 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0xde: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK)
- && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
- || !s->pe) {
+ if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !PE(s)) {
goto illegal_op;
}
- gen_update_cc_op(s);
- gen_jmp_im(s, pc_start - s->cs_base);
- gen_helper_skinit(cpu_env);
- break;
+ gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
+ /* If not intercepted, not implemented -- raise #UD. */
+ goto illegal_op;
case 0xdf: /* INVLPGA */
- if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ if (!SVME(s) || !PE(s)) {
goto illegal_op;
}
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
- gen_update_cc_op(s);
- gen_jmp_im(s, pc_start - s->cs_base);
- gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
+ gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
+ if (s->aflag == MO_64) {
+ tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
+ } else {
+ tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
+ }
+ gen_helper_flush_page(cpu_env, s->A0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
break;
CASE_MODRM_MEM_OP(2): /* lgdt */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
+ gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, s->T1, s->A0);
gen_add_A0_im(s, 2);
@@ -7575,11 +7634,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
CASE_MODRM_MEM_OP(3): /* lidt */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
+ gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_16, s->T1, s->A0);
gen_add_A0_im(s, 2);
@@ -7592,7 +7650,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
CASE_MODRM_OP(4): /* smsw */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
+ gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
/*
* In 32-bit mode, the higher 16 bits of the destination
@@ -7620,27 +7678,33 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
break;
+
CASE_MODRM_OP(6): /* lmsw */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_helper_lmsw(cpu_env, s->T0);
+ /*
+ * Only the 4 lower bits of CR0 are modified.
+ * PE cannot be set to zero if already set to one.
+ */
+ tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
+ tcg_gen_andi_tl(s->T0, s->T0, 0xf);
+ tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
+ tcg_gen_or_tl(s->T0, s->T0, s->T1);
+ gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
gen_jmp_im(s, s->pc - s->cs_base);
gen_eob(s);
break;
CASE_MODRM_MEM_OP(7): /* invlpg */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ if (!check_cpl0(s)) {
break;
}
- gen_update_cc_op(s);
- gen_jmp_im(s, pc_start - s->cs_base);
+ gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
gen_lea_modrm(env, s, modrm);
- gen_helper_invlpg(cpu_env, s->A0);
+ gen_helper_flush_page(cpu_env, s->A0);
gen_jmp_im(s, s->pc - s->cs_base);
gen_eob(s);
break;
@@ -7648,9 +7712,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xf8: /* swapgs */
#ifdef TARGET_X86_64
if (CODE64(s)) {
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
+ if (check_cpl0(s)) {
tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
offsetof(CPUX86State, kernelgsbase));
@@ -7684,10 +7746,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x108: /* invd */
case 0x109: /* wbinvd */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
/* nothing to do */
}
break;
@@ -7699,7 +7759,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
d_ot = dflag;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -7721,7 +7781,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
TCGLabel *label1;
TCGv t0, t1, t2, a0;
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
@@ -7769,11 +7829,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
{
TCGLabel *label1;
TCGv t0;
- if (!s->pe || s->vm86)
+ if (!PE(s) || VM86(s))
goto illegal_op;
ot = dflag != MO_16 ? MO_32 : MO_16;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
@@ -7814,7 +7874,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (prefixes & PREFIX_REPZ) {
/* bndcl */
if (reg >= 4
@@ -7904,7 +7964,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
/* bndmk */
if (reg >= 4
@@ -8006,66 +8066,59 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
gen_nop_modrm(env, s, modrm);
break;
+
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- modrm = x86_ldub_code(env, s);
- /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
- * AMD documentation (24594.pdf) and testing of
- * intel 386 and 486 processors all show that the mod bits
- * are assumed to be 1's, regardless of actual values.
- */
- rm = (modrm & 7) | REX_B(s);
- reg = ((modrm >> 3) & 7) | rex_r;
- if (CODE64(s))
- ot = MO_64;
- else
- ot = MO_32;
- if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
+ if (!check_cpl0(s)) {
+ break;
+ }
+ modrm = x86_ldub_code(env, s);
+ /*
+ * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of Intel 386 and 486
+ * processors all show that the mod bits are assumed to be 1's,
+ * regardless of actual values.
+ */
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ switch (reg) {
+ case 0:
+ if ((prefixes & PREFIX_LOCK) &&
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
reg = 8;
}
- switch(reg) {
- case 0:
- case 2:
- case 3:
- case 4:
- case 8:
- gen_update_cc_op(s);
- gen_jmp_im(s, pc_start - s->cs_base);
- if (b & 2) {
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_op_mov_v_reg(s, ot, s->T0, rm);
- gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
- s->T0);
- gen_jmp_im(s, s->pc - s->cs_base);
- gen_eob(s);
- } else {
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg));
- gen_op_mov_reg_v(s, ot, rm, s->T0);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_jmp(s, s->pc - s->cs_base);
- }
- }
- break;
- default:
- goto unknown_op;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ break;
+ default:
+ goto unknown_op;
+ }
+ ot = (CODE64(s) ? MO_64 : MO_32);
+
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ if (b & 2) {
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
+ gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
}
}
break;
+
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
-#ifndef CONFIG_USER_ONLY
+ if (check_cpl0(s)) {
modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
@@ -8073,7 +8126,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (CODE64(s))
ot = MO_64;
else
@@ -8082,26 +8135,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
goto illegal_op;
}
if (b & 2) {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_v_reg(s, ot, s->T0, rm);
tcg_gen_movi_i32(s->tmp2_i32, reg);
gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
gen_jmp_im(s, s->pc - s->cs_base);
gen_eob(s);
} else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
+ gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(s->tmp2_i32, reg);
gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, rm, s->T0);
}
-#endif /* !CONFIG_USER_ONLY */
}
break;
case 0x106: /* clts */
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
gen_helper_clts(cpu_env);
/* abort block because static cpu state changed */
gen_jmp_im(s, s->pc - s->cs_base);
@@ -8117,7 +8167,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
@@ -8328,7 +8378,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_nop_modrm(env, s, modrm);
break;
case 0x1aa: /* rsm */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
+ gen_svm_check_intercept(s, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
#ifdef CONFIG_USER_ONLY
@@ -8349,7 +8399,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
goto illegal_op;
modrm = x86_ldub_code(env, s);
- reg = ((modrm >> 3) & 7) | rex_r;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
if (s->prefix & PREFIX_DATA) {
ot = MO_16;
@@ -8377,7 +8427,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
- gen_sse(env, s, b, pc_start, rex_r);
+ gen_sse(env, s, b, pc_start);
break;
default:
goto unknown_op;
@@ -8477,20 +8527,31 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUX86State *env = cpu->env_ptr;
uint32_t flags = dc->base.tb->flags;
- target_ulong cs_base = dc->base.tb->cs_base;
-
- dc->pe = (flags >> HF_PE_SHIFT) & 1;
- dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
- dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
- dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
- dc->f_st = 0;
- dc->vm86 = (flags >> VM_SHIFT) & 1;
- dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
- dc->iopl = (flags >> IOPL_SHIFT) & 3;
- dc->tf = (flags >> TF_SHIFT) & 1;
+ int cpl = (flags >> HF_CPL_SHIFT) & 3;
+ int iopl = (flags >> IOPL_SHIFT) & 3;
+
+ dc->cs_base = dc->base.tb->cs_base;
+ dc->flags = flags;
+#ifndef CONFIG_USER_ONLY
+ dc->cpl = cpl;
+ dc->iopl = iopl;
+#endif
+
+ /* We make some simplifying assumptions; validate they're correct. */
+ g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
+ g_assert(CPL(dc) == cpl);
+ g_assert(IOPL(dc) == iopl);
+ g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
+ g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
+ g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
+ g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
+ g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
+ g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
+ g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
+ g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
+
dc->cc_op = CC_OP_DYNAMIC;
dc->cc_op_dirty = false;
- dc->cs_base = cs_base;
dc->popl_esp_hack = 0;
/* select memory access functions */
dc->mem_index = 0;
@@ -8503,29 +8564,14 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
-#ifdef TARGET_X86_64
- dc->lma = (flags >> HF_LMA_SHIFT) & 1;
- dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
-#endif
- dc->flags = flags;
- dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled ||
- (flags & HF_INHIBIT_IRQ_MASK));
- /* Do not optimize repz jumps at all in icount mode, because
- rep movsS instructions are execured with different paths
- in !repz_opt and repz_opt modes. The first one was used
- always except single step mode. And this setting
- disables jumps optimization and control paths become
- equivalent in run and single step modes.
- Now there will be no jump optimization for repz in
- record/replay modes and there will always be an
- additional step for ecx=0 when icount is enabled.
+ dc->jmp_opt = !(dc->base.singlestep_enabled ||
+ (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
+ /*
+ * If jmp_opt, we want to handle each string instruction individually.
+ * For icount also disable repz optimization so that each iteration
+ * is accounted separately.
*/
dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
-#if 0
- /* check addseg logic */
- if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
- printf("ERROR addseg\n");
-#endif
dc->T0 = tcg_temp_new();
dc->T1 = tcg_temp_new();
@@ -8559,8 +8605,7 @@ static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
/* If RF is set, suppress an internally generated breakpoint. */
int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY;
if (bp->flags & flags) {
- gen_debug(dc, dc->base.pc_next - dc->cs_base);
- dc->base.is_jmp = DISAS_NORETURN;
+ gen_debug(dc);
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
@@ -8590,7 +8635,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
pc_next = disas_insn(dc, cpu);
- if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) {
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
/* if single step mode, we generate only one instruction and
generate an exception */
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
diff --git a/target/i386/tcg/user/meson.build b/target/i386/tcg/user/meson.build
index 9eac0e69ca..1df6bc4343 100644
--- a/target/i386/tcg/user/meson.build
+++ b/target/i386/tcg/user/meson.build
@@ -1,6 +1,4 @@
i386_user_ss.add(when: ['CONFIG_TCG', 'CONFIG_USER_ONLY'], if_true: files(
'excp_helper.c',
- 'misc_stubs.c',
- 'svm_stubs.c',
'seg_helper.c',
))
diff --git a/target/i386/tcg/user/misc_stubs.c b/target/i386/tcg/user/misc_stubs.c
deleted file mode 100644
index 84df4e65ff..0000000000
--- a/target/i386/tcg/user/misc_stubs.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * x86 misc helpers
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-
-void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
-{
- g_assert_not_reached();
-}
-
-target_ulong helper_inb(CPUX86State *env, uint32_t port)
-{
- g_assert_not_reached();
- return 0;
-}
-
-void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
-{
- g_assert_not_reached();
-}
-
-target_ulong helper_inw(CPUX86State *env, uint32_t port)
-{
- g_assert_not_reached();
- return 0;
-}
-
-void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
-{
- g_assert_not_reached();
-}
-
-target_ulong helper_inl(CPUX86State *env, uint32_t port)
-{
- g_assert_not_reached();
- return 0;
-}
-
-target_ulong helper_read_crN(CPUX86State *env, int reg)
-{
- g_assert_not_reached();
-}
-
-void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
-{
- g_assert_not_reached();
-}
-
-void helper_wrmsr(CPUX86State *env)
-{
- g_assert_not_reached();
-}
-
-void helper_rdmsr(CPUX86State *env)
-{
- g_assert_not_reached();
-}
diff --git a/target/i386/tcg/user/svm_stubs.c b/target/i386/tcg/user/svm_stubs.c
deleted file mode 100644
index 97528b56ad..0000000000
--- a/target/i386/tcg/user/svm_stubs.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * x86 SVM helpers (user-mode)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-#include "tcg/helper-tcg.h"
-
-void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
-{
-}
-
-void helper_vmmcall(CPUX86State *env)
-{
-}
-
-void helper_vmload(CPUX86State *env, int aflag)
-{
-}
-
-void helper_vmsave(CPUX86State *env, int aflag)
-{
-}
-
-void helper_stgi(CPUX86State *env)
-{
-}
-
-void helper_clgi(CPUX86State *env)
-{
-}
-
-void helper_skinit(CPUX86State *env)
-{
-}
-
-void helper_invlpga(CPUX86State *env, int aflag)
-{
-}
-
-void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
- uintptr_t retaddr)
-{
- assert(0);
-}
-
-void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
- uint64_t param)
-{
-}
-
-void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
- uint64_t param, uintptr_t retaddr)
-{
-}
-
-void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
- uint32_t next_eip_addend)
-{
-}