Skip to content

Commit

Permalink
Merge branch 'bpf-next/master' into for-next
Browse files Browse the repository at this point in the history
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
Alexei Starovoitov committed Jan 9, 2025
2 parents 7bf1659 + e8ec1c9 commit 6e90b32
Show file tree
Hide file tree
Showing 130 changed files with 3,869 additions and 1,874 deletions.
48 changes: 24 additions & 24 deletions arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,19 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}

static inline void emit_a64_add_i(const bool is64, const int dst, const int src,
const int tmp, const s32 imm, struct jit_ctx *ctx)
{
if (is_addsub_imm(imm)) {
emit(A64_ADD_I(is64, dst, src, imm), ctx);
} else if (is_addsub_imm(-imm)) {
emit(A64_SUB_I(is64, dst, src, -imm), ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ADD(is64, dst, src, tmp), ctx);
}
}

/*
* There are 3 types of AArch64 LDR/STR (immediate) instruction:
* Post-index, Pre-index, Unsigned offset.
Expand Down Expand Up @@ -648,16 +661,13 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s16 off = insn->off;
u8 reg = dst;

if (off || arena) {
if (off) {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
if (arena) {
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
reg = tmp;
}
if (off) {
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
reg = tmp;
}
if (arena) {
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
reg = tmp;
}

switch (insn->imm) {
Expand Down Expand Up @@ -723,7 +733,7 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s32 imm = insn->imm;
const s16 off = insn->off;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 reg;
u8 reg = dst;
s32 jmp_offset;

if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
Expand All @@ -732,11 +742,8 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
return -EINVAL;
}

if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
if (off) {
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
reg = tmp;
}

Expand Down Expand Up @@ -1146,14 +1153,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (is_addsub_imm(imm)) {
emit(A64_ADD_I(is64, dst, dst, imm), ctx);
} else if (is_addsub_imm(-imm)) {
emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ADD(is64, dst, dst, tmp), ctx);
}
emit_a64_add_i(is64, dst, dst, tmp, imm, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
Expand Down
17 changes: 17 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);

/*
* The __bpf_map_get() and __btf_get_by_fd() functions parse a file
* descriptor and return a corresponding map or btf object.
* Their names are double underscored to emphasize the fact that they
* do not increase refcnt. To also increase refcnt use corresponding
* bpf_map_get() and btf_get_by_fd() functions.
*/

static inline struct bpf_map *__bpf_map_get(struct fd f)
{
if (fd_empty(f))
Expand All @@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
return fd_file(f)->private_data;
}

static inline struct btf *__btf_get_by_fd(struct fd f)
{
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (unlikely(fd_file(f)->f_op != &btf_fops))
return ERR_PTR(-EINVAL);
return fd_file(f)->private_data;
}

void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
Expand Down
26 changes: 16 additions & 10 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,7 @@ enum bpf_stack_slot_type {
*/
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
};

#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
Expand All @@ -254,8 +255,9 @@ struct bpf_reference_state {
* default to pointer reference on zero initialization of a state.
*/
enum ref_state_type {
REF_TYPE_PTR = 0,
REF_TYPE_LOCK,
REF_TYPE_PTR = 1,
REF_TYPE_IRQ = 2,
REF_TYPE_LOCK = 3,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
Expand Down Expand Up @@ -315,9 +317,6 @@ struct bpf_func_state {
u32 callback_depth;

/* The following fields should be last. See copy_func_state() */
int acquired_refs;
int active_locks;
struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
Expand Down Expand Up @@ -370,6 +369,8 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
/* Acquired reference states */
struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
Expand Down Expand Up @@ -419,9 +420,13 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;

bool speculative;
u32 acquired_refs;
u32 active_locks;
u32 active_preempt_locks;
u32 active_irq_id;
bool active_rcu_lock;
u32 active_preempt_lock;

bool speculative;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
Expand Down Expand Up @@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);

void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno);

#endif /* _LINUX_BPF_VERIFIER_H */
5 changes: 5 additions & 0 deletions include/linux/btf.h
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
return btf_type_is_int(t) || btf_type_is_enum(t);
}

static inline bool btf_type_is_fwd(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
}

static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
Expand Down
10 changes: 10 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
/* The fd_array_cnt can be used to pass the length of the
* fd_array array. In this case all the [map] file descriptors
* passed in this array will be bound to the program, even if
* the maps are not referenced directly. The functionality is
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
* used by the verifier during the program load. If provided,
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
* continuous.
*/
__u32 fd_array_cnt;
};

struct { /* anonymous struct used by BPF_OBJ_* commands */
Expand Down
16 changes: 9 additions & 7 deletions kernel/bpf/arena.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
INIT_LIST_HEAD(&arena->vma_list);
bpf_map_init_from_attr(&arena->map, attr);
range_tree_init(&arena->rt);
range_tree_set(&arena->rt, 0, attr->max_entries);
err = range_tree_set(&arena->rt, 0, attr->max_entries);
if (err) {
bpf_map_area_free(arena);
goto err;
}
mutex_init(&arena->lock);

return &arena->map;
Expand Down Expand Up @@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
atomic_t mmap_count;
refcount_t mmap_count;
};

static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
Expand All @@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
atomic_set(&vml->mmap_count, 1);
refcount_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
Expand All @@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
{
struct vma_list *vml = vma->vm_private_data;

atomic_inc(&vml->mmap_count);
refcount_inc(&vml->mmap_count);
}

static void arena_vm_close(struct vm_area_struct *vma)
Expand All @@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml = vma->vm_private_data;

if (!atomic_dec_and_test(&vml->mmap_count))
if (!refcount_dec_and_test(&vml->mmap_count))
return;
guard(mutex)(&arena->lock);
/* update link list under lock */
Expand All @@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
kfree(vml);
}

#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */

static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
{
struct bpf_map *map = vmf->vma->vm_file->private_data;
Expand Down
6 changes: 2 additions & 4 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
u64 ret = 0;
void *val;

cant_migrate();

if (flags != 0)
return -EINVAL;

is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
array = container_of(map, struct bpf_array, map);
if (is_percpu)
migrate_disable();
for (i = 0; i < map->max_entries; i++) {
if (is_percpu)
val = this_cpu_ptr(array->pptrs[i]);
Expand All @@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
break;
}

if (is_percpu)
migrate_enable();
return num_elems;
}

Expand Down
15 changes: 7 additions & 8 deletions kernel/bpf/bpf_cgrp_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,20 @@ static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);

static void bpf_cgrp_storage_lock(void)
{
migrate_disable();
cant_migrate();
this_cpu_inc(bpf_cgrp_storage_busy);
}

static void bpf_cgrp_storage_unlock(void)
{
this_cpu_dec(bpf_cgrp_storage_busy);
migrate_enable();
}

static bool bpf_cgrp_storage_trylock(void)
{
migrate_disable();
cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
this_cpu_dec(bpf_cgrp_storage_busy);
migrate_enable();
return false;
}
return true;
Expand All @@ -47,17 +45,18 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
{
struct bpf_local_storage *local_storage;

migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
if (!local_storage)
goto out;

bpf_cgrp_storage_lock();
bpf_local_storage_destroy(local_storage);
bpf_cgrp_storage_unlock();
out:
rcu_read_unlock();
migrate_enable();
}

static struct bpf_local_storage_data *
Expand Down
9 changes: 5 additions & 4 deletions kernel/bpf/bpf_inode_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,16 +62,17 @@ void bpf_inode_storage_free(struct inode *inode)
if (!bsb)
return;

migrate_disable();
rcu_read_lock();

local_storage = rcu_dereference(bsb->storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
if (!local_storage)
goto out;

bpf_local_storage_destroy(local_storage);
out:
rcu_read_unlock();
migrate_enable();
}

static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
Expand Down
Loading

0 comments on commit 6e90b32

Please sign in to comment.