summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/bpf/verifier.c121
1 files changed, 107 insertions, 14 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9b6ee93d5a85..a2e763703c30 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1659,16 +1659,18 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
}
}
-static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
+static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ int spi)
{
struct bpf_verifier_state *st = env->cur_state;
int first_idx = st->first_insn_idx;
int last_idx = env->insn_idx;
struct bpf_func_state *func;
struct bpf_reg_state *reg;
- u32 reg_mask = 1u << regno;
- u64 stack_mask = 0;
+ u32 reg_mask = regno >= 0 ? 1u << regno : 0;
+ u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
bool skip_first = true;
+ bool new_marks = false;
int i, err;
if (!env->allow_ptr_leaks)
@@ -1676,18 +1678,43 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
return 0;
func = st->frame[st->curframe];
- reg = &func->regs[regno];
- if (reg->type != SCALAR_VALUE) {
- WARN_ONCE(1, "backtracing misuse");
- return -EFAULT;
+ if (regno >= 0) {
+ reg = &func->regs[regno];
+ if (reg->type != SCALAR_VALUE) {
+ WARN_ONCE(1, "backtracing misuse");
+ return -EFAULT;
+ }
+ if (!reg->precise)
+ new_marks = true;
+ else
+ reg_mask = 0;
+ reg->precise = true;
}
- if (reg->precise)
- return 0;
- func->regs[regno].precise = true;
+ while (spi >= 0) {
+ if (func->stack[spi].slot_type[0] != STACK_SPILL) {
+ stack_mask = 0;
+ break;
+ }
+ reg = &func->stack[spi].spilled_ptr;
+ if (reg->type != SCALAR_VALUE) {
+ stack_mask = 0;
+ break;
+ }
+ if (!reg->precise)
+ new_marks = true;
+ else
+ stack_mask = 0;
+ reg->precise = true;
+ break;
+ }
+
+ if (!new_marks)
+ return 0;
+ if (!reg_mask && !stack_mask)
+ return 0;
for (;;) {
DECLARE_BITMAP(mask, 64);
- bool new_marks = false;
u32 history = st->jmp_history_cnt;
if (env->log.level & BPF_LOG_LEVEL)
@@ -1730,12 +1757,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
if (!st)
break;
+ new_marks = false;
func = st->frame[st->curframe];
bitmap_from_u64(mask, reg_mask);
for_each_set_bit(i, mask, 32) {
reg = &func->regs[i];
- if (reg->type != SCALAR_VALUE)
+ if (reg->type != SCALAR_VALUE) {
+ reg_mask &= ~(1u << i);
continue;
+ }
if (!reg->precise)
new_marks = true;
reg->precise = true;
@@ -1756,11 +1786,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
return -EFAULT;
}
- if (func->stack[i].slot_type[0] != STACK_SPILL)
+ if (func->stack[i].slot_type[0] != STACK_SPILL) {
+ stack_mask &= ~(1ull << i);
continue;
+ }
reg = &func->stack[i].spilled_ptr;
- if (reg->type != SCALAR_VALUE)
+ if (reg->type != SCALAR_VALUE) {
+ stack_mask &= ~(1ull << i);
continue;
+ }
if (!reg->precise)
new_marks = true;
reg->precise = true;
@@ -1772,6 +1806,8 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
reg_mask, stack_mask);
}
+ if (!reg_mask && !stack_mask)
+ break;
if (!new_marks)
break;
@@ -1781,6 +1817,15 @@ static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
return 0;
}
+static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
+{
+ return __mark_chain_precision(env, regno, -1);
+}
+
+static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
+{
+ return __mark_chain_precision(env, -1, spi);
+}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
@@ -7111,6 +7156,46 @@ static int propagate_liveness(struct bpf_verifier_env *env,
return 0;
}
+/* find precise scalars in the previous equivalent state and
+ * propagate them into the current state
+ */
+static int propagate_precision(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *old)
+{
+ struct bpf_reg_state *state_reg;
+ struct bpf_func_state *state;
+ int i, err = 0;
+
+ state = old->frame[old->curframe];
+ state_reg = state->regs;
+ for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
+ if (state_reg->type != SCALAR_VALUE ||
+ !state_reg->precise)
+ continue;
+ if (env->log.level & BPF_LOG_LEVEL2)
+ verbose(env, "propagating r%d\n", i);
+ err = mark_chain_precision(env, i);
+ if (err < 0)
+ return err;
+ }
+
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ state_reg = &state->stack[i].spilled_ptr;
+ if (state_reg->type != SCALAR_VALUE ||
+ !state_reg->precise)
+ continue;
+ if (env->log.level & BPF_LOG_LEVEL2)
+ verbose(env, "propagating fp%d\n",
+ (-i - 1) * BPF_REG_SIZE);
+ err = mark_chain_precision_stack(env, i);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static bool states_maybe_looping(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
@@ -7203,6 +7288,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* this state and will pop a new one.
*/
err = propagate_liveness(env, &sl->state, cur);
+
+ /* if previous state reached the exit with precision and
+ * current state is equivalent to it (except precsion marks)
+ * the precision needs to be propagated back in
+ * the current state.
+ */
+ err = err ? : push_jmp_history(env, cur);
+ err = err ? : propagate_precision(env, &sl->state);
if (err)
return err;
return 1;