summaryrefslogtreecommitdiffstats
path: root/target/riscv/pmp.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv/pmp.c')
-rw-r--r--target/riscv/pmp.c218
1 files changed, 196 insertions, 22 deletions
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index cff020122a..78203291de 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -19,10 +19,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
- */
-
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
@@ -59,16 +55,6 @@ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
return 0;
}
- /* In TOR mode, need to check the lock bit of the next pmp
- * (if there is a next)
- */
- const uint8_t a_field =
- pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg);
- if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) &&
- (PMP_AMATCH_TOR == a_field)) {
- return 1;
- }
-
return 0;
}
@@ -100,11 +86,42 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- if (!pmp_is_locked(env, pmp_index)) {
- env->pmp_state.pmp[pmp_index].cfg_reg = val;
- pmp_update_rule(env, pmp_index);
+ bool locked = true;
+
+ if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ /* mseccfg.RLB is set */
+ if (MSECCFG_RLB_ISSET(env)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is not set */
+ if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is set */
+ if (MSECCFG_MML_ISSET(env)) {
+ /* not adding execute bit */
+ if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
+ locked = false;
+ }
+ /* shared region and not adding X bit */
+ if ((val & PMP_LOCK) != PMP_LOCK &&
+ (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
+ locked = false;
+ }
+ }
} else {
+ if (!pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+ }
+
+ if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
+ } else {
+ env->pmp_state.pmp[pmp_index].cfg_reg = val;
+ pmp_update_rule(env, pmp_index);
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -227,6 +244,32 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
{
bool ret;
+ if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (MSECCFG_MMWP_ISSET(env)) {
+ /*
+ * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
+ * so we default to deny all, even for M-mode.
+ */
+ *allowed_privs = 0;
+ return false;
+ } else if (MSECCFG_MML_ISSET(env)) {
+ /*
+ * The Machine Mode Lockdown (mseccfg.MML) bit is set
+ * so we can only execute code in M-mode with an applicable
+ * rule. Other modes are disabled.
+ */
+ if (mode == PRV_M && !(privs & PMP_EXEC)) {
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ } else {
+ ret = false;
+ *allowed_privs = 0;
+ }
+
+ return ret;
+ }
+ }
+
if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
/*
* Privileged spec v1.10 states if HW doesn't implement any PMP entry
@@ -304,13 +347,94 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
/*
- * If the PMP entry is not off and the address is in range, do the priv
- * check
+ * Convert the PMP permissions to match the truth table in the
+ * ePMP spec.
*/
+ const uint8_t epmp_operation =
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
+ (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
- *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
- if ((mode != PRV_M) || pmp_is_locked(env, i)) {
- *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ /*
+ * If the PMP entry is not off and the address is in range,
+ * do the priv check
+ */
+ if (!MSECCFG_MML_ISSET(env)) {
+ /*
+ * If mseccfg.MML Bit is not set, do pmp priv check
+ * This will always apply to regular PMP.
+ */
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ if ((mode != PRV_M) || pmp_is_locked(env, i)) {
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ }
+ } else {
+ /*
+ * If mseccfg.MML Bit set, do the enhanced pmp priv check
+ */
+ if (mode == PRV_M) {
+ switch (epmp_operation) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ *allowed_privs = 0;
+ break;
+ case 2:
+ case 3:
+ case 14:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 9:
+ case 10:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 11:
+ case 13:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 12:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ }
+ } else {
+ switch (epmp_operation) {
+ case 0:
+ case 8:
+ case 9:
+ case 12:
+ case 13:
+ case 14:
+ *allowed_privs = 0;
+ break;
+ case 1:
+ case 10:
+ case 11:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 2:
+ case 4:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ case 3:
+ case 6:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 5:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 7:
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ break;
+ }
+ }
}
ret = ((privs & *allowed_privs) == privs);
@@ -380,7 +504,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val)
{
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
+
if (addr_index < MAX_RISCV_PMPS) {
+ /*
+ * In TOR mode, need to check the lock bit of the next pmp
+ * (if there is a next).
+ */
+ if (addr_index + 1 < MAX_RISCV_PMPS) {
+ uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
+
+ if (pmp_cfg & PMP_LOCK &&
+ PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ return;
+ }
+ }
+
if (!pmp_is_locked(env, addr_index)) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
@@ -414,6 +554,40 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
}
/*
+ * Handle a write to a mseccfg CSR
+ */
+void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
+{
+ int i;
+
+ trace_mseccfg_csr_write(env->mhartid, val);
+
+ /* RLB cannot be enabled if it's already 0 and if any regions are locked */
+ if (!MSECCFG_RLB_ISSET(env)) {
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ if (pmp_is_locked(env, i)) {
+ val &= ~MSECCFG_RLB;
+ break;
+ }
+ }
+ }
+
+ /* Sticky bits */
+ val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
+
+ env->mseccfg = val;
+}
+
+/*
+ * Handle a read from a mseccfg CSR
+ */
+target_ulong mseccfg_csr_read(CPURISCVState *env)
+{
+ trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
+ return env->mseccfg;
+}
+
+/*
* Calculate the TLB size if the start address or the end address of
* PMP entry is presented in thie TLB page.
*/