Skip to content

Commit

Permalink
Merge branch 'bpf-next/master' into for-next
Browse files Browse the repository at this point in the history
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
Alexei Starovoitov committed Dec 31, 2024
2 parents c2ce3bb + dfa94ce commit 69f0f83
Show file tree
Hide file tree
Showing 7 changed files with 226 additions and 69 deletions.
10 changes: 4 additions & 6 deletions kernel/bpf/arena.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
atomic_t mmap_count;
refcount_t mmap_count;
};

static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
Expand All @@ -228,7 +228,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
atomic_set(&vml->mmap_count, 1);
refcount_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
Expand All @@ -239,7 +239,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
{
struct vma_list *vml = vma->vm_private_data;

atomic_inc(&vml->mmap_count);
refcount_inc(&vml->mmap_count);
}

static void arena_vm_close(struct vm_area_struct *vma)
Expand All @@ -248,7 +248,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml = vma->vm_private_data;

if (!atomic_dec_and_test(&vml->mmap_count))
if (!refcount_dec_and_test(&vml->mmap_count))
return;
guard(mutex)(&arena->lock);
/* update link list under lock */
Expand All @@ -257,8 +257,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
kfree(vml);
}

#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */

static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
{
struct bpf_map *map = vmf->vma->vm_file->private_data;
Expand Down
8 changes: 6 additions & 2 deletions kernel/bpf/bpf_local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -841,8 +841,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
smap->elem_size = offsetof(struct bpf_local_storage_elem,
sdata.data[attr->value_size]);

smap->bpf_ma = bpf_ma;
if (bpf_ma) {
/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
* preemptible context. Thus, enforce all storages to use
* bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
*/
smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
if (smap->bpf_ma) {
err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
if (err)
goto free_smap;
Expand Down
83 changes: 39 additions & 44 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -11739,6 +11739,9 @@ BTF_ID(func, bpf_rbtree_first)
#ifdef CONFIG_NET
BTF_ID(func, bpf_dynptr_from_skb)
BTF_ID(func, bpf_dynptr_from_xdp)
#else
BTF_ID_UNUSED
BTF_ID_UNUSED
#endif
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
Expand Down Expand Up @@ -14084,64 +14087,56 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s32 smin_val = src_reg->s32_min_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
s32 *dst_smin = &dst_reg->s32_min_value;
s32 *dst_smax = &dst_reg->s32_max_value;
u32 *dst_umin = &dst_reg->u32_min_value;
u32 *dst_umax = &dst_reg->u32_max_value;
s32 tmp_prod[4];

if (smin_val < 0 || dst_reg->s32_min_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg32_unbounded(dst_reg);
return;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S32_MAX).
*/
if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
/* Potential overflow, we know nothing */
__mark_reg32_unbounded(dst_reg);
return;
if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) ||
check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) {
/* Overflow possible, we know nothing */
*dst_umin = 0;
*dst_umax = U32_MAX;
}
dst_reg->u32_min_value *= umin_val;
dst_reg->u32_max_value *= umax_val;
if (dst_reg->u32_max_value > S32_MAX) {
if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) ||
check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) ||
check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) ||
check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) {
/* Overflow possible, we know nothing */
dst_reg->s32_min_value = S32_MIN;
dst_reg->s32_max_value = S32_MAX;
*dst_smin = S32_MIN;
*dst_smax = S32_MAX;
} else {
dst_reg->s32_min_value = dst_reg->u32_min_value;
dst_reg->s32_max_value = dst_reg->u32_max_value;
*dst_smin = min_array(tmp_prod, 4);
*dst_smax = max_array(tmp_prod, 4);
}
}

static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s64 smin_val = src_reg->smin_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
s64 *dst_smin = &dst_reg->smin_value;
s64 *dst_smax = &dst_reg->smax_value;
u64 *dst_umin = &dst_reg->umin_value;
u64 *dst_umax = &dst_reg->umax_value;
s64 tmp_prod[4];

if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg64_unbounded(dst_reg);
return;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg64_unbounded(dst_reg);
return;
if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) ||
check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) {
/* Overflow possible, we know nothing */
*dst_umin = 0;
*dst_umax = U64_MAX;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) ||
check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) ||
check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) ||
check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
*dst_smin = S64_MIN;
*dst_smax = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
*dst_smin = min_array(tmp_prod, 4);
*dst_smax = max_array(tmp_prod, 4);
}
}

Expand Down
14 changes: 13 additions & 1 deletion tools/lib/bpf/libbpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1731,12 +1731,24 @@ static int sys_memfd_create(const char *name, unsigned flags)
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_NOEXEC_SEAL
#define MFD_NOEXEC_SEAL 0x0008U
#endif

static int create_placeholder_fd(void)
{
unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL;
const char *name = "libbpf-placeholder-fd";
int fd;

fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
fd = ensure_good_fd(sys_memfd_create(name, flags));
if (fd >= 0)
return fd;
else if (errno != EINVAL)
return -errno;

/* Possibly running on kernel without MFD_NOEXEC_SEAL */
fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL));
if (fd < 0)
return -errno;
return fd;
Expand Down
134 changes: 134 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_bounds.c
Original file line number Diff line number Diff line change
Expand Up @@ -1200,4 +1200,138 @@ l0_%=: r0 = 0; \
: __clobber_all);
}

SEC("tc")
__description("multiply mixed sign bounds. test 1")
__success __log_level(2)
__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
__naked void mult_mixed0_sign(void)
{
asm volatile (
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= 0xf;"
"r6 -= 1000000000;"
"r7 &= 0xf;"
"r7 -= 2000000000;"
"r6 *= r7;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("tc")
__description("multiply mixed sign bounds. test 2")
__success __log_level(2)
__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)")
__naked void mult_mixed1_sign(void)
{
asm volatile (
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= 0xf;"
"r6 -= 0xa;"
"r7 &= 0xf;"
"r7 -= 0x14;"
"r6 *= r7;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("tc")
__description("multiply negative bounds")
__success __log_level(2)
__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
__naked void mult_sign_bounds(void)
{
asm volatile (
"r8 = 0x7fff;"
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= 0xa;"
"r6 -= r8;"
"r7 &= 0xf;"
"r7 -= r8;"
"r6 *= r7;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("tc")
__description("multiply bounds that don't cross signed boundary")
__success __log_level(2)
__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
__naked void mult_no_sign_crossing(void)
{
asm volatile (
"r6 = 0xb;"
"r8 = 0xb3c3f8c99262687 ll;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= r7;"
"r8 *= r6;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("tc")
__description("multiplication overflow, result in unbounded reg. test 1")
__success __log_level(2)
__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
__naked void mult_unsign_ovf(void)
{
asm volatile (
"r8 = 0x7ffffffffff ll;"
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= 0x7fffffff;"
"r7 &= r8;"
"r6 *= r7;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("tc")
__description("multiplication overflow, result in unbounded reg. test 2")
__success __log_level(2)
__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
__naked void mult_sign_ovf(void)
{
asm volatile (
"r8 = 0x7ffffffff ll;"
"call %[bpf_get_prandom_u32];"
"r6 = r0;"
"call %[bpf_get_prandom_u32];"
"r7 = r0;"
"r6 &= 0xa;"
"r6 -= r8;"
"r7 &= 0x7fffffff;"
"r6 *= r7;"
"exit"
:
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
Loading

0 comments on commit 69f0f83

Please sign in to comment.