From d90b94cd78af672cdfd52dc3789ab249534c2f40 Mon Sep 17 00:00:00 2001 From: Doug Kwan Date: Thu, 29 May 2014 09:12:19 -0500 Subject: target-ppc: Support little-endian PPC64 in user mode. Look at ELF header to determine ABI version on PPC64. This is required for executing the first instruction correctly. Also print correct machine name in uname() system call. Signed-off-by: Doug Kwan Signed-off-by: Tom Musta Signed-off-by: Alexander Graf --- linux-user/elfload.c | 17 +++++++++++++++-- linux-user/ppc/syscall.h | 4 ++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'linux-user') diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 68b9793649..d08fc80051 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -784,12 +784,18 @@ static uint32_t get_elf_hwcap(void) NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ } while (0) +static inline uint32_t get_ppc64_abi(struct image_info *infop); + static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) { _regs->gpr[1] = infop->start_stack; #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) - _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias; - infop->entry = ldq_raw(infop->entry) + infop->load_bias; + if (get_ppc64_abi(infop) < 2) { + _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias; + infop->entry = ldq_raw(infop->entry) + infop->load_bias; + } else { + _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ + } #endif _regs->nip = infop->entry; } @@ -1159,6 +1165,13 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i #include "elf.h" +#ifdef TARGET_PPC +static inline uint32_t get_ppc64_abi(struct image_info *infop) +{ + return infop->elf_flags & EF_PPC64_ABI; +} +#endif + struct exec { unsigned int a_info; /* Use macros N_MAGIC, etc for access */ diff --git a/linux-user/ppc/syscall.h b/linux-user/ppc/syscall.h index 6514c637a5..db92bbee17 100644 --- a/linux-user/ppc/syscall.h +++ b/linux-user/ppc/syscall.h @@ -58,8 +58,12 @@ struct target_revectored_struct { */ #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) +#ifdef TARGET_WORDS_BIGENDIAN #define UNAME_MACHINE "ppc64" #else +#define UNAME_MACHINE "ppc64le" +#endif +#else #define UNAME_MACHINE "ppc" #endif #define UNAME_MINIMUM_RELEASE "2.6.32" -- cgit v1.2.3-55-g7522 From e22c357b3ec01c1141969ae81397d60d52e8c87b Mon Sep 17 00:00:00 2001 From: Doug Kwan Date: Thu, 29 May 2014 09:12:20 -0500 Subject: target-ppc: Allow little-endian user mode. This allows running PPC64 little-endian in user mode if target is configured that way. In PPC64 LE user mode we set MSR.LE during initialization. Signed-off-by: Doug Kwan Signed-off-by: Tom Musta Signed-off-by: Alexander Graf --- linux-user/main.c | 8 ++- target-ppc/mem_helper.c | 26 ++++++-- target-ppc/translate.c | 151 ++++++++++++++++++-------------------------- target-ppc/translate_init.c | 3 + 4 files changed, 92 insertions(+), 96 deletions(-) (limited to 'linux-user') diff --git a/linux-user/main.c b/linux-user/main.c index 3e21024056..f577e19646 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -1484,7 +1484,7 @@ static int do_store_exclusive(CPUPPCState *env) { target_ulong addr; target_ulong page_addr; - target_ulong val, val2 __attribute__((unused)); + target_ulong val, val2 __attribute__((unused)) = 0; int flags; int segv = 0; @@ -1527,6 +1527,12 @@ static int do_store_exclusive(CPUPPCState *env) case 8: segv = put_user_u64(val, addr); break; case 16: { if (val2 == env->reserve_val2) { + if (msr_le) { + val2 = val; + val = env->gpr[reg+1]; + } else { + val2 = env->gpr[reg+1]; + } segv = put_user_u64(val, addr); if (!segv) { segv = put_user_u64(val2, addr + 8); diff --git a/target-ppc/mem_helper.c b/target-ppc/mem_helper.c index 02b627e47b..50344b81cf 100644 --- a/target-ppc/mem_helper.c +++ b/target-ppc/mem_helper.c @@ -25,6 +25,15 @@ //#define DEBUG_OP +static inline bool needs_byteswap(const CPUPPCState *env) +{ +#if defined(TARGET_WORDS_BIGENDIAN) + return msr_le; +#else + return !msr_le; +#endif +} + /*****************************************************************************/ /* Memory load and stores */ @@ -44,7 +53,7 @@ static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { for (; reg < 32; reg++) { - if (msr_le) { + if (needs_byteswap(env)) { env->gpr[reg] = bswap32(cpu_ldl_data(env, addr)); } else { env->gpr[reg] = cpu_ldl_data(env, addr); @@ -56,7 +65,7 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) { for (; reg < 32; reg++) { - if (msr_le) { + if (needs_byteswap(env)) { cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg])); } else { cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]); @@ -199,6 +208,11 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, #define LO_IDX 0 #endif +/* We use msr_le to determine index ordering in a vector. However, + byteswapping is not simply controlled by msr_le. We also need to take + into account endianness of the target. This is done for the little-endian + PPC64 user-mode target. */ + #define LVE(name, access, swap, element) \ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ target_ulong addr) \ @@ -207,9 +221,11 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, int adjust = HI_IDX*(n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ - \ if (msr_le) { \ index = n_elems - index - 1; \ + } \ + \ + if (needs_byteswap(env)) { \ r->element[LO_IDX ? index : (adjust - index)] = \ swap(access(env, addr)); \ } else { \ @@ -232,9 +248,11 @@ LVE(lvewx, cpu_ldl_data, bswap32, u32) int adjust = HI_IDX * (n_elems - 1); \ int sh = sizeof(r->element[0]) >> 1; \ int index = (addr & 0xf) >> sh; \ - \ if (msr_le) { \ index = n_elems - index - 1; \ + } \ + \ + if (needs_byteswap(env)) { \ access(env, addr, swap(r->element[LO_IDX ? index : \ (adjust - index)])); \ } else { \ diff --git a/target-ppc/translate.c b/target-ppc/translate.c index 3086ec5940..715bc74ea8 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -196,6 +196,7 @@ typedef struct DisasContext { int access_type; /* Translation flags */ int le_mode; + TCGMemOp default_tcg_memop_mask; #if defined(TARGET_PPC64) int sf_mode; int has_cfar; @@ -210,6 +211,16 @@ typedef struct DisasContext { uint64_t insns_flags2; } DisasContext; +/* Return true iff byteswap is needed in a scalar memop */ +static inline bool need_byteswap(const DisasContext *ctx) +{ +#if defined(TARGET_WORDS_BIGENDIAN) + return ctx->le_mode; +#else + return !ctx->le_mode; +#endif +} + /* True when active word size < size of target_long. */ #ifdef TARGET_PPC64 # define NARROW_MODE(C) (!(C)->sf_mode) @@ -2660,29 +2671,20 @@ static inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2) static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2) { - tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); - if (unlikely(ctx->le_mode)) { - tcg_gen_bswap16_tl(arg1, arg1); - } + TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask; + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (unlikely(ctx->le_mode)) { - tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); - tcg_gen_bswap16_tl(arg1, arg1); - tcg_gen_ext16s_tl(arg1, arg1); - } else { - tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask; + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2) { - tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); - if (unlikely(ctx->le_mode)) { - tcg_gen_bswap32_tl(arg1, arg1); - } + TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask; + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) @@ -2695,12 +2697,8 @@ static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (unlikely(ctx->le_mode)) { - tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); - tcg_gen_bswap32_tl(arg1, arg1); - tcg_gen_ext32s_tl(arg1, arg1); - } else - tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx); + TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask; + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) @@ -2713,10 +2711,8 @@ static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) { - tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx); - if (unlikely(ctx->le_mode)) { - tcg_gen_bswap64_i64(arg1, arg1); - } + TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask; + tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op); } static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2) @@ -2726,28 +2722,14 @@ static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2) static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (unlikely(ctx->le_mode)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext16u_tl(t0, arg1); - tcg_gen_bswap16_tl(t0, t0); - tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx); - tcg_temp_free(t0); - } else { - tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask; + tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op); } static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (unlikely(ctx->le_mode)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, arg1); - tcg_gen_bswap32_tl(t0, t0); - tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx); - tcg_temp_free(t0); - } else { - tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask; + tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op); } static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) @@ -2760,13 +2742,8 @@ static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr) static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) { - if (unlikely(ctx->le_mode)) { - TCGv_i64 t0 = tcg_temp_new_i64(); - tcg_gen_bswap64_i64(t0, arg1); - tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx); - tcg_temp_free_i64(t0); - } else - tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx); + TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask; + tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op); } #define GEN_LD(name, ldop, opc, type) \ @@ -2910,6 +2887,8 @@ static void gen_lq(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_imm_index(ctx, EA, 0x0F); + /* We only need to swap high and low halves. gen_qemu_ld64 does necessary + 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA); gen_addr_add(ctx, EA, EA, 8); @@ -3028,6 +3007,8 @@ static void gen_std(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_imm_index(ctx, EA, 0x03); + /* We only need to swap high and low halves. gen_qemu_st64 does + necessary 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_st64(ctx, cpu_gpr[rs+1], EA); gen_addr_add(ctx, EA, EA, 8); @@ -3057,23 +3038,20 @@ static void gen_std(DisasContext *ctx) } #endif /*** Integer load and store with byte reverse ***/ + /* lhbrx */ static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2) { - tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); - if (likely(!ctx->le_mode)) { - tcg_gen_bswap16_tl(arg1, arg1); - } + TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER); /* lwbrx */ static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2) { - tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); - if (likely(!ctx->le_mode)) { - tcg_gen_bswap32_tl(arg1, arg1); - } + TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op); } GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); @@ -3081,10 +3059,8 @@ GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); /* ldbrx */ static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2) { - tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx); - if (likely(!ctx->le_mode)) { - tcg_gen_bswap64_tl(arg1, arg1); - } + TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op); } GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX); #endif /* TARGET_PPC64 */ @@ -3092,30 +3068,16 @@ GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX); /* sthbrx */ static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (likely(!ctx->le_mode)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext16u_tl(t0, arg1); - tcg_gen_bswap16_tl(t0, t0); - tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx); - tcg_temp_free(t0); - } else { - tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op); } GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER); /* stwbrx */ static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (likely(!ctx->le_mode)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, arg1); - tcg_gen_bswap32_tl(t0, t0); - tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx); - tcg_temp_free(t0); - } else { - tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op); } GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); @@ -3123,14 +3085,8 @@ GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); /* stdbrx */ static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2) { - if (likely(!ctx->le_mode)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_bswap64_tl(t0, arg1); - tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx); - tcg_temp_free(t0); - } else { - tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx); - } + TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP); + tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op); } GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX); #endif /* TARGET_PPC64 */ @@ -3550,7 +3506,9 @@ static void gen_lfdp(DisasContext *ctx) } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0); \ + gen_addr_imm_index(ctx, EA, 0); + /* We only need to swap high and low halves. gen_qemu_ld64 does necessary + 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); tcg_gen_addi_tl(EA, EA, 8); @@ -3574,6 +3532,8 @@ static void gen_lfdpx(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); + /* We only need to swap high and low halves. gen_qemu_ld64 does necessary + 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); tcg_gen_addi_tl(EA, EA, 8); @@ -3722,7 +3682,9 @@ static void gen_stfdp(DisasContext *ctx) } gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0); \ + gen_addr_imm_index(ctx, EA, 0); + /* We only need to swap high and low halves. gen_qemu_st64 does necessary + 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); tcg_gen_addi_tl(EA, EA, 8); @@ -3746,6 +3708,8 @@ static void gen_stfdpx(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_FLOAT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); + /* We only need to swap high and low halves. gen_qemu_st64 does necessary + 64-bit byteswap already. */ if (unlikely(ctx->le_mode)) { gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); tcg_gen_addi_tl(EA, EA, 8); @@ -6716,6 +6680,8 @@ static void glue(gen_, name)(DisasContext *ctx) EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_andi_tl(EA, EA, ~0xf); \ + /* We only need to swap high and low halves. gen_qemu_ld64 does necessary \ + 64-bit byteswap already. */ \ if (ctx->le_mode) { \ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ tcg_gen_addi_tl(EA, EA, 8); \ @@ -6740,6 +6706,8 @@ static void gen_st##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_andi_tl(EA, EA, ~0xf); \ + /* We only need to swap high and low halves. gen_qemu_st64 does necessary \ + 64-bit byteswap already. */ \ if (ctx->le_mode) { \ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ tcg_gen_addi_tl(EA, EA, 8); \ @@ -11742,6 +11710,7 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu, ctx.insns_flags2 = env->insns_flags2; ctx.access_type = -1; ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0; + ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE; #if defined(TARGET_PPC64) ctx.sf_mode = msr_is_64bit(env, env->msr); ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); @@ -11807,7 +11776,7 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu, ctx.nip, ctx.mem_idx, (int)msr_ir); if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); - if (unlikely(ctx.le_mode)) { + if (unlikely(need_byteswap(&ctx))) { ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip)); } else { ctx.opcode = cpu_ldl_code(env, ctx.nip); diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index 9b342c0438..5f5a8ad7c6 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -9315,6 +9315,9 @@ static void ppc_cpu_reset(CPUState *s) msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */ msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */ msr |= (target_ulong)1 << MSR_PR; +#if !defined(TARGET_WORDS_BIGENDIAN) + msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */ +#endif #endif #if defined(TARGET_PPC64) -- cgit v1.2.3-55-g7522 From f46e9a0b9911fcfbc13f85f3a8808067990a0f5c Mon Sep 17 00:00:00 2001 From: Tom Musta Date: Thu, 29 May 2014 09:12:23 -0500 Subject: target-ppc: Confirm That .bss Pages Are Valid The existing code does a check to ensure that a .bss region is properly mmap'd. When additional mmap is required, the (guest) pages are also validated. However, this code has a bug: when host page size is larger than target page size, it is possible for the .bss pages to already be (host) mapped but the guest .bss pages may not be valid. The check to mmap additional space is separated from the flagging of the target (guest) pages, thus ensuring that both aspects are done properly. Signed-off-by: Tom Musta Signed-off-by: Alexander Graf --- linux-user/elfload.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'linux-user') diff --git a/linux-user/elfload.c b/linux-user/elfload.c index d08fc80051..eb8d3adce3 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -1425,10 +1425,11 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) perror("cannot mmap brk"); exit(-1); } + } - /* Since we didn't use target_mmap, make sure to record - the validity of the pages with qemu. */ - page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID); + /* Ensure that the bss page(s) are valid */ + if ((page_get_flags(last_bss-1) & prot) != prot) { + page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID); } if (host_start < host_map_start) { -- cgit v1.2.3-55-g7522 From 4b1daa72d3b68b050bb9013edd0888972a0e22dd Mon Sep 17 00:00:00 2001 From: Tom Musta Date: Thu, 29 May 2014 09:12:24 -0500 Subject: target-ppc: Store Quadword Conditional Drops Size Bit The size and register information are encoded into the reserve_info field of CPU state in the store conditional translation code. Specifically, the size is shifted left by 5 bits (see target-ppc/translate.c gen_conditional_store). The user-mode store conditional code erroneously extracts the size by ANDing with a 4 bit mask; this breaks if size >= 16. Eliminate the mask to make the extraction of size mirror its encoding. Signed-off-by: Tom Musta Signed-off-by: Alexander Graf --- linux-user/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-user') diff --git a/linux-user/main.c b/linux-user/main.c index f577e19646..a87c6f7ed4 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -1497,7 +1497,7 @@ static int do_store_exclusive(CPUPPCState *env) segv = 1; } else { int reg = env->reserve_info & 0x1f; - int size = (env->reserve_info >> 5) & 0xf; + int size = env->reserve_info >> 5; int stored = 0; if (addr == env->reserve_addr) { -- cgit v1.2.3-55-g7522 From a70daba3771e96cc6b8fd3d11ed297ab13717018 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 5 Jun 2014 11:39:43 +0200 Subject: linux-user: Tell guest about big host page sizes We tell the guest its page size via AUX vectors. The guest process then uses this page size as information on which boundaries it can mmap() things. However, if the host has a bigger page size granularity than the guest, it can not fulfill these mmap() requests - which falls apart when MAP_FIXED is passed to mmap. So in that case, let the guest know that we're running on a bigger page size granularity than the target would require. This fixes running qemu-ppc (TARGET_PAGE_SIZE=4k) on a 64k page size ppc64 host for me. Signed-off-by: Alexander Graf Reviewed-by: Richard Henderson --- linux-user/elfload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-user') diff --git a/linux-user/elfload.c b/linux-user/elfload.c index eb8d3adce3..c123244ecd 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -1552,7 +1552,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); - NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); + NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, getpagesize()))); NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); NEW_AUX_ENT(AT_ENTRY, info->entry); -- cgit v1.2.3-55-g7522