/*
* Alpha emulation cpu micro-operations helpers for qemu.
*
* Copyright (c) 2007 Jocelyn Mayer
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "exec.h"
#include "host-utils.h"
#include "softfloat.h"
#include "op_helper.h"
#define MEMSUFFIX _raw
#include "op_helper_mem.h"
#if !defined(CONFIG_USER_ONLY)
#define MEMSUFFIX _kernel
#include "op_helper_mem.h"
#define MEMSUFFIX _executive
#include "op_helper_mem.h"
#define MEMSUFFIX _supervisor
#include "op_helper_mem.h"
#define MEMSUFFIX _user
#include "op_helper_mem.h"
/* This is used for pal modes */
#define MEMSUFFIX _data
#include "op_helper_mem.h"
#endif
void helper_tb_flush (void)
{
tlb_flush(env, 1);
}
void cpu_dump_EA (target_ulong EA);
void helper_print_mem_EA (target_ulong EA)
{
cpu_dump_EA(EA);
}
/*****************************************************************************/
/* Exceptions processing helpers */
void helper_excp (int excp, int error)
{
env->exception_index = excp;
env->error_code = error;
cpu_loop_exit();
}
uint64_t helper_amask (uint64_t arg)
{
switch (env->implver) {
case IMPLVER_2106x:
/* EV4, EV45, LCA, LCA45 & EV5 */
break;
case IMPLVER_21164:
case IMPLVER_21264:
case IMPLVER_21364:
arg &= ~env->amask;
break;
}
return arg;
}
uint64_t helper_load_pcc (void)
{
/* XXX: TODO */
return 0;
}
uint64_t helper_load_implver (void)
{
return env->implver;
}
void helper_load_fpcr (void)
{
T0 = 0;
#ifdef CONFIG_SOFTFLOAT
T0 |= env->fp_status.float_exception_flags << 52;
if (env->fp_status.float_exception_flags)
T0 |= 1ULL << 63;
env->ipr[IPR_EXC_SUM] &= ~0x3E:
env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
#endif
switch (env->fp_status.float_rounding_mode) {
case float_round_nearest_even:
T0 |= 2ULL << 58;
break;
case float_round_down:
T0 |= 1ULL << 58;
break;
case float_round_up:
T0 |= 3ULL << 58;
break;
case float_round_to_zero:
break;
}
}
void helper_store_fpcr (void)
{
#ifdef CONFIG_SOFTFLOAT
set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
#endif
switch ((T0 >> 58) & 3) {
case 0:
set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
break;
case 1:
set_float_rounding_mode(float_round_down, &FP_STATUS);
break;
case 2:
set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
break;
case 3:
set_float_rounding_mode(float_round_up, &FP_STATUS);
break;
}
}
spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
uint64_t helper_rs(void)
{
uint64_t tmp;
spin_lock(&intr_cpu_lock);
tmp = env->intr_flag;
env->intr_flag = 1;
spin_unlock(&intr_cpu_lock);
return tmp;
}
uint64_t helper_rc(void)
{
uint64_t tmp;
spin_lock(&intr_cpu_lock);
tmp = env->intr_flag;
env->intr_flag = 0;
spin_unlock(&intr_cpu_lock);
return tmp;
}
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 += op2;
if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return op1;
}
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 = (uint32_t)(op1 + op2);
if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return op1;
}
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 -= op2;
if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return op1;
}
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 = (uint32_t)(op1 - op2);
if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return op1;
}
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
{
int64_t res = (int64_t)op1 * (int64_t)op2;
if (unlikely((int32_t)res != res)) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return (int64_t)((int32_t)res);
}
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
{
uint64_t tl, th;
muls64(&tl, &th, op1, op2);
/* If th != 0 && th != -1, then we had an overflow */
if (unlikely((th + 1) > 1)) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
return tl;
}
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
{
uint64_t tl, th;
mulu64(&tl, &th, op1, op2);
return th;
}
uint64_t helper_ctpop (uint64_t arg)
{
return ctpop64(arg);
}
uint64_t helper_ctlz (uint64_t arg)
{
return clz64(arg);
}
uint64_t helper_cttz (uint64_t arg)
{
return ctz64(arg);
}
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
{
uint64_t mask;
mask = 0;
mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
return op & ~mask;
}
uint64_t helper_mskbl(uint64_t val, uint64_t mask)
{
return byte_zap(val, 0x01 << (mask & 7));
}
uint64_t helper_insbl(uint64_t val, uint64_t mask)
{
val <<= (mask & 7) * 8;
return byte_zap(val, ~(0x01 << (mask & 7)));
}
uint64_t helper_mskwl(uint64_t val, uint64_t mask)
{
return byte_zap(val, 0x03 << (mask & 7));
}
uint64_t helper_inswl(uint64_t val, uint64_t mask)
{
val <<= (mask & 7) * 8;
return byte_zap(val, ~(0x03 << (mask & 7)));
}
uint64_t helper_mskll(uint64_t val, uint64_t mask)
{
return byte_zap(val, 0x0F << (mask & 7));
}
uint64_t helper_insll(uint64_t val, uint64_t mask)
{
val <<= (mask & 7) * 8;
return byte_zap(val, ~(0x0F << (mask & 7)));
}
uint64_t helper_zap(uint64_t val, uint64_t mask)
{
return byte_zap(val, mask);
}
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
{
return byte_zap(val, ~mask);
}
uint64_t helper_mskql(uint64_t val, uint64_t mask)
{
return byte_zap(val, 0xFF << (mask & 7));
}
uint64_t helper_insql(uint64_t val, uint64_t mask)
{
val <<= (mask & 7) * 8;
return byte_zap(val, ~(0xFF << (mask & 7)));
}
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
{
return byte_zap(val, (0x03 << (mask & 7)) >> 8);
}
uint64_t helper_inswh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
}
uint64_t helper_msklh(uint64_t val, uint64_t mask)
{
return byte_zap(val, (0x0F << (mask & 7)) >> 8);
}
uint64_t helper_inslh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
}
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
{
return byte_zap(val, (0xFF << (mask & 7)) >> 8);
}
uint64_t helper_insqh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
}
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
{
uint8_t opa, opb, res;
int i;
res = 0;
for (i = 0; i < 7; i++) {
opa = op1 >> (i * 8);
opb = op2 >> (i * 8);
if (opa >= opb)
res |= 1 << i;
}
return res;
}
void helper_cmov_fir (int freg)
{
if (FT0 != 0)
env->fir[freg] = FT1;
}
void helper_sqrts (void)
{
FT0 = float32_sqrt(FT0, &FP_STATUS);
}
void helper_cpys (void)
{
union {
double d;
uint64_t i;
} p, q, r;
p.d = FT0;
q.d = FT1;
r.i = p.i & 0x8000000000000000ULL;
r.i |= q.i & ~0x8000000000000000ULL;
FT0 = r.d;
}
void helper_cpysn (void)
{
union {
double d;
uint64_t i;
} p, q, r;
p.d = FT0;
q.d = FT1;
r.i = (~p.i) & 0x8000000000000000ULL;
r.i |= q.i & ~0x8000000000000000ULL;
FT0 = r.d;
}
void helper_cpyse (void)
{
union {
double d;
uint64_t i;
} p, q, r;
p.d = FT0;
q.d = FT1;
r.i = p.i & 0xFFF0000000000000ULL;
r.i |= q.i & ~0xFFF0000000000000ULL;
FT0 = r.d;
}
void helper_itofs (void)
{
union {
double d;
uint64_t i;
} p;
p.d = FT0;
FT0 = int64_to_float32(p.i, &FP_STATUS);
}
void helper_ftois (void)
{
union {
double d;
uint64_t i;
} p;
p.i = float32_to_int64(FT0, &FP_STATUS);
FT0 = p.d;
}
void helper_sqrtt (void)
{
FT0 = float64_sqrt(FT0, &FP_STATUS);
}
void helper_cmptun (void)
{
union {
double d;
uint64_t i;
} p;
p.i = 0;
if (float64_is_nan(FT0) || float64_is_nan(FT1))
p.i = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cmpteq (void)
{
union {
double d;
uint64_t i;
} p;
p.i = 0;
if (float64_eq(FT0, FT1, &FP_STATUS))
p.i = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cmptle (void)
{
union {
double d;
uint64_t i;
} p;
p.i = 0;
if (float64_le(FT0, FT1, &FP_STATUS))
p.i = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cmptlt (void)
{
union {
double d;
uint64_t i;
} p;
p.i = 0;
if (float64_lt(FT0, FT1, &FP_STATUS))
p.i = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_itoft (void)
{
union {
double d;
uint64_t i;
} p;
p.d = FT0;
FT0 = int64_to_float64(p.i, &FP_STATUS);
}
void helper_ftoit (void)
{
union {
double d;
uint64_t i;
} p;
p.i = float64_to_int64(FT0, &FP_STATUS);
FT0 = p.d;
}
static always_inline int vaxf_is_valid (float ff)
{
union {
float f;
uint32_t i;
} p;
uint32_t exp, mant;
p.f = ff;
exp = (p.i >> 23) & 0xFF;
mant = p.i & 0x007FFFFF;
if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
/* Reserved operands / Dirty zero */
return 0;
}
return 1;
}
static always_inline float vaxf_to_ieee32 (float ff)
{
union {
float f;
uint32_t i;
} p;
uint32_t exp;
p.f = ff;
exp = (p.i >> 23) & 0xFF;
if (exp < 3) {
/* Underflow */
p.f = 0.0;
} else {
p.f *= 0.25;
}
return p.f;
}
static always_inline float ieee32_to_vaxf (float fi)
{
union {
float f;
uint32_t i;
} p;
uint32_t exp, mant;
p.f = fi;
exp = (p.i >> 23) & 0xFF;
mant = p.i & 0x007FFFFF;
if (exp == 255) {
/* NaN or infinity */
p.i = 1;
} else if (exp == 0) {
if (mant == 0) {
/* Zero */
p.i = 0;
} else {
/* Denormalized */
p.f *= 2.0;
}
} else {
if (exp >= 253) {
/* Overflow */
p.i = 1;
} else {
p.f *= 4.0;
}
}
return p.f;
}
void helper_addf (void)
{
float ft0, ft1, ft2;
if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxf_to_ieee32(FT0);
ft1 = vaxf_to_ieee32(FT1);
ft2 = float32_add(ft0, ft1, &FP_STATUS);
FT0 = ieee32_to_vaxf(ft2);
}
void helper_subf (void)
{
float ft0, ft1, ft2;
if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxf_to_ieee32(FT0);
ft1 = vaxf_to_ieee32(FT1);
ft2 = float32_sub(ft0, ft1, &FP_STATUS);
FT0 = ieee32_to_vaxf(ft2);
}
void helper_mulf (void)
{
float ft0, ft1, ft2;
if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxf_to_ieee32(FT0);
ft1 = vaxf_to_ieee32(FT1);
ft2 = float32_mul(ft0, ft1, &FP_STATUS);
FT0 = ieee32_to_vaxf(ft2);
}
void helper_divf (void)
{
float ft0, ft1, ft2;
if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxf_to_ieee32(FT0);
ft1 = vaxf_to_ieee32(FT1);
ft2 = float32_div(ft0, ft1, &FP_STATUS);
FT0 = ieee32_to_vaxf(ft2);
}
void helper_sqrtf (void)
{
float ft0, ft1;
if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxf_to_ieee32(FT0);
ft1 = float32_sqrt(ft0, &FP_STATUS);
FT0 = ieee32_to_vaxf(ft1);
}
void helper_itoff (void)
{
/* XXX: TODO */
}
static always_inline int vaxg_is_valid (double ff)
{
union {
double f;
uint64_t i;
} p;
uint64_t exp, mant;
p.f = ff;
exp = (p.i >> 52) & 0x7FF;
mant = p.i & 0x000FFFFFFFFFFFFFULL;
if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
/* Reserved operands / Dirty zero */
return 0;
}
return 1;
}
static always_inline double vaxg_to_ieee64 (double fg)
{
union {
double f;
uint64_t i;
} p;
uint32_t exp;
p.f = fg;
exp = (p.i >> 52) & 0x7FF;
if (exp < 3) {
/* Underflow */
p.f = 0.0;
} else {
p.f *= 0.25;
}
return p.f;
}
static always_inline double ieee64_to_vaxg (double fi)
{
union {
double f;
uint64_t i;
} p;
uint64_t mant;
uint32_t exp;
p.f = fi;
exp = (p.i >> 52) & 0x7FF;
mant = p.i & 0x000FFFFFFFFFFFFFULL;
if (exp == 255) {
/* NaN or infinity */
p.i = 1; /* VAX dirty zero */
} else if (exp == 0) {
if (mant == 0) {
/* Zero */
p.i = 0;
} else {
/* Denormalized */
p.f *= 2.0;
}
} else {
if (exp >= 2045) {
/* Overflow */
p.i = 1; /* VAX dirty zero */
} else {
p.f *= 4.0;
}
}
return p.f;
}
void helper_addg (void)
{
double ft0, ft1, ft2;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
ft2 = float64_add(ft0, ft1, &FP_STATUS);
FT0 = ieee64_to_vaxg(ft2);
}
void helper_subg (void)
{
double ft0, ft1, ft2;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
ft2 = float64_sub(ft0, ft1, &FP_STATUS);
FT0 = ieee64_to_vaxg(ft2);
}
void helper_mulg (void)
{
double ft0, ft1, ft2;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
ft2 = float64_mul(ft0, ft1, &FP_STATUS);
FT0 = ieee64_to_vaxg(ft2);
}
void helper_divg (void)
{
double ft0, ft1, ft2;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
ft2 = float64_div(ft0, ft1, &FP_STATUS);
FT0 = ieee64_to_vaxg(ft2);
}
void helper_sqrtg (void)
{
double ft0, ft1;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = float64_sqrt(ft0, &FP_STATUS);
FT0 = ieee64_to_vaxg(ft1);
}
void helper_cmpgeq (void)
{
union {
double d;
uint64_t u;
} p;
double ft0, ft1;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
p.u = 0;
if (float64_eq(ft0, ft1, &FP_STATUS))
p.u = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cmpglt (void)
{
union {
double d;
uint64_t u;
} p;
double ft0, ft1;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
p.u = 0;
if (float64_lt(ft0, ft1, &FP_STATUS))
p.u = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cmpgle (void)
{
union {
double d;
uint64_t u;
} p;
double ft0, ft1;
if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
/* XXX: TODO */
}
ft0 = vaxg_to_ieee64(FT0);
ft1 = vaxg_to_ieee64(FT1);
p.u = 0;
if (float64_le(ft0, ft1, &FP_STATUS))
p.u = 0x4000000000000000ULL;
FT0 = p.d;
}
void helper_cvtqs (void)
{
union {
double d;
uint64_t u;
} p;
p.d = FT0;
FT0 = (float)p.u;
}
void helper_cvttq (void)
{
union {
double d;
uint64_t u;
} p;
p.u = FT0;
FT0 = p.d;
}
void helper_cvtqt (void)
{
union {
double d;
uint64_t u;
} p;
p.d = FT0;
FT0 = p.u;
}
void helper_cvtqf (void)
{
union {
double d;
uint64_t u;
} p;
p.d = FT0;
FT0 = ieee32_to_vaxf(p.u);
}
void helper_cvtgf (void)
{
double ft0;
ft0 = vaxg_to_ieee64(FT0);
FT0 = ieee32_to_vaxf(ft0);
}
void helper_cvtgd (void)
{
/* XXX: TODO */
}
void helper_cvtgq (void)
{
union {
double d;
uint64_t u;
} p;
p.u = vaxg_to_ieee64(FT0);
FT0 = p.d;
}
void helper_cvtqg (void)
{
union {
double d;
uint64_t u;
} p;
p.d = FT0;
FT0 = ieee64_to_vaxg(p.u);
}
void helper_cvtdg (void)
{
/* XXX: TODO */
}
void helper_cvtlq (void)
{
union {
double d;
uint64_t u;
} p, q;
p.d = FT0;
q.u = (p.u >> 29) & 0x3FFFFFFF;
q.u |= (p.u >> 32);
q.u = (int64_t)((int32_t)q.u);
FT0 = q.d;
}
static always_inline void __helper_cvtql (int s, int v)
{
union {
double d;
uint64_t u;
} p, q;
p.d = FT0;
q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
FT0 = q.d;
if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
}
if (s) {
/* TODO */
}
}
void helper_cvtql (void)
{
__helper_cvtql(0, 0);
}
void helper_cvtqlv (void)
{
__helper_cvtql(0, 1);
}
void helper_cvtqlsv (void)
{
__helper_cvtql(1, 1);
}
void helper_cmpfeq (void)
{
if (float64_eq(FT0, FT1, &FP_STATUS))
T0 = 1;
else
T0 = 0;
}
void helper_cmpfne (void)
{
if (float64_eq(FT0, FT1, &FP_STATUS))
T0 = 0;
else
T0 = 1;
}
void helper_cmpflt (void)
{
if (float64_lt(FT0, FT1, &FP_STATUS))
T0 = 1;
else
T0 = 0;
}
void helper_cmpfle (void)
{
if (float64_lt(FT0, FT1, &FP_STATUS))
T0 = 1;
else
T0 = 0;
}
void helper_cmpfgt (void)
{
if (float64_le(FT0, FT1, &FP_STATUS))
T0 = 0;
else
T0 = 1;
}
void helper_cmpfge (void)
{
if (float64_lt(FT0, FT1, &FP_STATUS))
T0 = 0;
else
T0 = 1;
}
#if !defined (CONFIG_USER_ONLY)
void helper_mfpr (int iprn)
{
uint64_t val;
if (cpu_alpha_mfpr(env, iprn, &val) == 0)
T0 = val;
}
void helper_mtpr (int iprn)
{
cpu_alpha_mtpr(env, iprn, T0, NULL);
}
#endif
#if defined(HOST_SPARC) || defined(HOST_SPARC64)
void helper_reset_FT0 (void)
{
FT0 = 0;
}
void helper_reset_FT1 (void)
{
FT1 = 0;
}
void helper_reset_FT2 (void)
{
FT2 = 0;
}
#endif
/*****************************************************************************/
/* Softmmu support */
#if !defined (CONFIG_USER_ONLY)
/* XXX: the two following helpers are pure hacks.
* Hopefully, we emulate the PALcode, then we should never see
* HW_LD / HW_ST instructions.
*/
void helper_ld_phys_to_virt (void)
{
uint64_t tlb_addr, physaddr;
int index, mmu_idx;
void *retaddr;
mmu_idx = cpu_mmu_index(env);
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
if ((T0 & TARGET_PAGE_MASK) ==
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
tlb_fill(T0, 0, mmu_idx, retaddr);
goto redo;
}
T0 = physaddr;
}
void helper_st_phys_to_virt (void)
{
uint64_t tlb_addr, physaddr;
int index, mmu_idx;
void *retaddr;
mmu_idx = cpu_mmu_index(env);
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((T0 & TARGET_PAGE_MASK) ==
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
tlb_fill(T0, 1, mmu_idx, retaddr);
goto redo;
}
T0 = physaddr;
}
#define MMUSUFFIX _mmu
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
TranslationBlock *tb;
CPUState *saved_env;
unsigned long pc;
int ret;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
saved_env = env;
env = cpu_single_env;
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
if (!likely(ret == 0)) {
if (likely(retaddr)) {
/* now we have a real cpu fault */
pc = (unsigned long)retaddr;
tb = tb_find_pc(pc);
if (likely(tb)) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
cpu_restore_state(tb, env, pc, NULL);
}
}
/* Exception index and error code are already set */
cpu_loop_exit();
}
env = saved_env;
}
#endif