Merge branch 'bpf-next'
Alexei Starovoitov says: ==================== bpf: reduce verifier memory consumption and add tests Small set of cleanups: - reduce verifier memory consumption - add verifier test to check register state propagation and state equivalence - add JIT test reduced from recent nmap triggered crash ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f541a22ae9
3 changed files with 145 additions and 45 deletions
|
@ -153,22 +153,19 @@ struct reg_state {
|
||||||
|
|
||||||
enum bpf_stack_slot_type {
|
enum bpf_stack_slot_type {
|
||||||
STACK_INVALID, /* nothing was stored in this stack slot */
|
STACK_INVALID, /* nothing was stored in this stack slot */
|
||||||
STACK_SPILL, /* 1st byte of register spilled into stack */
|
STACK_SPILL, /* register spilled into stack */
|
||||||
STACK_SPILL_PART, /* other 7 bytes of register spill */
|
|
||||||
STACK_MISC /* BPF program wrote some data into this slot */
|
STACK_MISC /* BPF program wrote some data into this slot */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_stack_slot {
|
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||||
enum bpf_stack_slot_type stype;
|
|
||||||
struct reg_state reg_st;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* state of the program:
|
/* state of the program:
|
||||||
* type of all registers and stack info
|
* type of all registers and stack info
|
||||||
*/
|
*/
|
||||||
struct verifier_state {
|
struct verifier_state {
|
||||||
struct reg_state regs[MAX_BPF_REG];
|
struct reg_state regs[MAX_BPF_REG];
|
||||||
struct bpf_stack_slot stack[MAX_BPF_STACK];
|
u8 stack_slot_type[MAX_BPF_STACK];
|
||||||
|
struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* linked list of verifier states used to prune search */
|
/* linked list of verifier states used to prune search */
|
||||||
|
@ -259,10 +256,10 @@ static void print_verifier_state(struct verifier_env *env)
|
||||||
env->cur_state.regs[i].map_ptr->key_size,
|
env->cur_state.regs[i].map_ptr->key_size,
|
||||||
env->cur_state.regs[i].map_ptr->value_size);
|
env->cur_state.regs[i].map_ptr->value_size);
|
||||||
}
|
}
|
||||||
for (i = 0; i < MAX_BPF_STACK; i++) {
|
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
||||||
if (env->cur_state.stack[i].stype == STACK_SPILL)
|
if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
|
||||||
verbose(" fp%d=%s", -MAX_BPF_STACK + i,
|
verbose(" fp%d=%s", -MAX_BPF_STACK + i,
|
||||||
reg_type_str[env->cur_state.stack[i].reg_st.type]);
|
reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
|
||||||
}
|
}
|
||||||
verbose("\n");
|
verbose("\n");
|
||||||
}
|
}
|
||||||
|
@ -539,8 +536,10 @@ static int bpf_size_to_bytes(int bpf_size)
|
||||||
static int check_stack_write(struct verifier_state *state, int off, int size,
|
static int check_stack_write(struct verifier_state *state, int off, int size,
|
||||||
int value_regno)
|
int value_regno)
|
||||||
{
|
{
|
||||||
struct bpf_stack_slot *slot;
|
|
||||||
int i;
|
int i;
|
||||||
|
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
|
||||||
|
* so it's aligned access and [off, off + size) are within stack limits
|
||||||
|
*/
|
||||||
|
|
||||||
if (value_regno >= 0 &&
|
if (value_regno >= 0 &&
|
||||||
(state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
|
(state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
|
||||||
|
@ -548,30 +547,24 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
|
||||||
state->regs[value_regno].type == PTR_TO_CTX)) {
|
state->regs[value_regno].type == PTR_TO_CTX)) {
|
||||||
|
|
||||||
/* register containing pointer is being spilled into stack */
|
/* register containing pointer is being spilled into stack */
|
||||||
if (size != 8) {
|
if (size != BPF_REG_SIZE) {
|
||||||
verbose("invalid size of register spill\n");
|
verbose("invalid size of register spill\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
slot = &state->stack[MAX_BPF_STACK + off];
|
|
||||||
slot->stype = STACK_SPILL;
|
|
||||||
/* save register state */
|
/* save register state */
|
||||||
slot->reg_st = state->regs[value_regno];
|
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
|
||||||
for (i = 1; i < 8; i++) {
|
state->regs[value_regno];
|
||||||
slot = &state->stack[MAX_BPF_STACK + off + i];
|
|
||||||
slot->stype = STACK_SPILL_PART;
|
|
||||||
slot->reg_st.type = UNKNOWN_VALUE;
|
|
||||||
slot->reg_st.map_ptr = NULL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
|
|
||||||
|
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||||
|
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
|
||||||
|
} else {
|
||||||
/* regular write of data into stack */
|
/* regular write of data into stack */
|
||||||
for (i = 0; i < size; i++) {
|
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
|
||||||
slot = &state->stack[MAX_BPF_STACK + off + i];
|
(struct reg_state) {};
|
||||||
slot->stype = STACK_MISC;
|
|
||||||
slot->reg_st.type = UNKNOWN_VALUE;
|
for (i = 0; i < size; i++)
|
||||||
slot->reg_st.map_ptr = NULL;
|
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -579,19 +572,18 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
|
||||||
static int check_stack_read(struct verifier_state *state, int off, int size,
|
static int check_stack_read(struct verifier_state *state, int off, int size,
|
||||||
int value_regno)
|
int value_regno)
|
||||||
{
|
{
|
||||||
|
u8 *slot_type;
|
||||||
int i;
|
int i;
|
||||||
struct bpf_stack_slot *slot;
|
|
||||||
|
|
||||||
slot = &state->stack[MAX_BPF_STACK + off];
|
slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
|
||||||
|
|
||||||
if (slot->stype == STACK_SPILL) {
|
if (slot_type[0] == STACK_SPILL) {
|
||||||
if (size != 8) {
|
if (size != BPF_REG_SIZE) {
|
||||||
verbose("invalid size of register spill\n");
|
verbose("invalid size of register spill\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
for (i = 1; i < 8; i++) {
|
for (i = 1; i < BPF_REG_SIZE; i++) {
|
||||||
if (state->stack[MAX_BPF_STACK + off + i].stype !=
|
if (slot_type[i] != STACK_SPILL) {
|
||||||
STACK_SPILL_PART) {
|
|
||||||
verbose("corrupted spill memory\n");
|
verbose("corrupted spill memory\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
@ -599,12 +591,12 @@ static int check_stack_read(struct verifier_state *state, int off, int size,
|
||||||
|
|
||||||
if (value_regno >= 0)
|
if (value_regno >= 0)
|
||||||
/* restore register state from stack */
|
/* restore register state from stack */
|
||||||
state->regs[value_regno] = slot->reg_st;
|
state->regs[value_regno] =
|
||||||
|
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
if (state->stack[MAX_BPF_STACK + off + i].stype !=
|
if (slot_type[i] != STACK_MISC) {
|
||||||
STACK_MISC) {
|
|
||||||
verbose("invalid read from stack off %d+%d size %d\n",
|
verbose("invalid read from stack off %d+%d size %d\n",
|
||||||
off, i, size);
|
off, i, size);
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
@ -747,7 +739,7 @@ static int check_stack_boundary(struct verifier_env *env,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < access_size; i++) {
|
for (i = 0; i < access_size; i++) {
|
||||||
if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) {
|
if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
|
||||||
verbose("invalid indirect read from stack off %d+%d size %d\n",
|
verbose("invalid indirect read from stack off %d+%d size %d\n",
|
||||||
off, i, access_size);
|
off, i, access_size);
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
@ -1417,12 +1409,33 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < MAX_BPF_STACK; i++) {
|
for (i = 0; i < MAX_BPF_STACK; i++) {
|
||||||
if (memcmp(&old->stack[i], &cur->stack[i],
|
if (old->stack_slot_type[i] == STACK_INVALID)
|
||||||
sizeof(old->stack[0])) != 0) {
|
continue;
|
||||||
if (old->stack[i].stype == STACK_INVALID)
|
if (old->stack_slot_type[i] != cur->stack_slot_type[i])
|
||||||
continue;
|
/* Ex: old explored (safe) state has STACK_SPILL in
|
||||||
|
* this stack slot, but current has has STACK_MISC ->
|
||||||
|
* this verifier states are not equivalent,
|
||||||
|
* return false to continue verification of this path
|
||||||
|
*/
|
||||||
return false;
|
return false;
|
||||||
}
|
if (i % BPF_REG_SIZE)
|
||||||
|
continue;
|
||||||
|
if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
|
||||||
|
&cur->spilled_regs[i / BPF_REG_SIZE],
|
||||||
|
sizeof(old->spilled_regs[0])))
|
||||||
|
/* when explored and current stack slot types are
|
||||||
|
* the same, check that stored pointers types
|
||||||
|
* are the same as well.
|
||||||
|
* Ex: explored safe path could have stored
|
||||||
|
* (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
|
||||||
|
* but current path has stored:
|
||||||
|
* (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
|
||||||
|
* such verifier states are not equivalent.
|
||||||
|
* return false to continue verification of this path
|
||||||
|
*/
|
||||||
|
return false;
|
||||||
|
else
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1756,6 +1756,49 @@ static struct bpf_test tests[] = {
|
||||||
{ },
|
{ },
|
||||||
{ { 0, 1 } }
|
{ { 0, 1 } }
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"nmap reduced",
|
||||||
|
.u.insns_int = {
|
||||||
|
BPF_MOV64_REG(R6, R1),
|
||||||
|
BPF_LD_ABS(BPF_H, 12),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
|
||||||
|
BPF_LD_ABS(BPF_H, 12),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
|
||||||
|
BPF_MOV32_IMM(R0, 18),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -64),
|
||||||
|
BPF_LDX_MEM(BPF_W, R7, R10, -64),
|
||||||
|
BPF_LD_IND(BPF_W, R7, 14),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -60),
|
||||||
|
BPF_MOV32_IMM(R0, 280971478),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -56),
|
||||||
|
BPF_LDX_MEM(BPF_W, R7, R10, -56),
|
||||||
|
BPF_LDX_MEM(BPF_W, R0, R10, -60),
|
||||||
|
BPF_ALU32_REG(BPF_SUB, R0, R7),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
|
||||||
|
BPF_LD_ABS(BPF_H, 12),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
|
||||||
|
BPF_MOV32_IMM(R0, 22),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -56),
|
||||||
|
BPF_LDX_MEM(BPF_W, R7, R10, -56),
|
||||||
|
BPF_LD_IND(BPF_H, R7, 14),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -52),
|
||||||
|
BPF_MOV32_IMM(R0, 17366),
|
||||||
|
BPF_STX_MEM(BPF_W, R10, R0, -48),
|
||||||
|
BPF_LDX_MEM(BPF_W, R7, R10, -48),
|
||||||
|
BPF_LDX_MEM(BPF_W, R0, R10, -52),
|
||||||
|
BPF_ALU32_REG(BPF_SUB, R0, R7),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
|
||||||
|
BPF_MOV32_IMM(R0, 256),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_MOV32_IMM(R0, 0),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
INTERNAL,
|
||||||
|
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
|
||||||
|
{ { 38, 256 } }
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct net_device dev;
|
static struct net_device dev;
|
||||||
|
|
|
@ -602,6 +602,45 @@ static struct bpf_test tests[] = {
|
||||||
},
|
},
|
||||||
.result = ACCEPT,
|
.result = ACCEPT,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"jump test 5",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||||
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||||
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
.result = ACCEPT,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int probe_filter_length(struct bpf_insn *fp)
|
static int probe_filter_length(struct bpf_insn *fp)
|
||||||
|
@ -630,7 +669,7 @@ static int create_map(void)
|
||||||
|
|
||||||
static int test(void)
|
static int test(void)
|
||||||
{
|
{
|
||||||
int prog_fd, i;
|
int prog_fd, i, pass_cnt = 0, err_cnt = 0;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||||
struct bpf_insn *prog = tests[i].insns;
|
struct bpf_insn *prog = tests[i].insns;
|
||||||
|
@ -657,21 +696,25 @@ static int test(void)
|
||||||
printf("FAIL\nfailed to load prog '%s'\n",
|
printf("FAIL\nfailed to load prog '%s'\n",
|
||||||
strerror(errno));
|
strerror(errno));
|
||||||
printf("%s", bpf_log_buf);
|
printf("%s", bpf_log_buf);
|
||||||
|
err_cnt++;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (prog_fd >= 0) {
|
if (prog_fd >= 0) {
|
||||||
printf("FAIL\nunexpected success to load\n");
|
printf("FAIL\nunexpected success to load\n");
|
||||||
printf("%s", bpf_log_buf);
|
printf("%s", bpf_log_buf);
|
||||||
|
err_cnt++;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
|
if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
|
||||||
printf("FAIL\nunexpected error message: %s",
|
printf("FAIL\nunexpected error message: %s",
|
||||||
bpf_log_buf);
|
bpf_log_buf);
|
||||||
|
err_cnt++;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pass_cnt++;
|
||||||
printf("OK\n");
|
printf("OK\n");
|
||||||
fail:
|
fail:
|
||||||
if (map_fd >= 0)
|
if (map_fd >= 0)
|
||||||
|
@ -679,6 +722,7 @@ fail:
|
||||||
close(prog_fd);
|
close(prog_fd);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue