Skip to content

Commit

Permalink
kernel: move current thread pointer management to core code
Browse files Browse the repository at this point in the history
Define the generic _current directly and get rid of the generic
arch_current_get().

The SMP default implementation is now known as z_smp_current_get().
It is no longer inlined which saves significant binary size (about 10%
for some random test case I checked).

Introduce z_current_thread_set() and use it in place of
arch_current_thread_set() for updating the current thread pointer
given this is not necessarily an architecture specific operation.
The architecture specific optimization, when enabled, should only care
about its own things and not have to also update the generic
_current_cpu->current copy.

Signed-off-by: Nicolas Pitre <[email protected]>
  • Loading branch information
Nicolas Pitre authored and kartben committed Jan 10, 2025
1 parent 46aa671 commit 7a3124d
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 60 deletions.
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_m/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
{
z_arm_prepare_switch_to_main();

arch_current_thread_set(main_thread);
z_current_thread_set(main_thread);

#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* On Cortex-M, TLS uses a global variable as pointer to
Expand Down
4 changes: 2 additions & 2 deletions arch/posix/core/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ int arch_swap(unsigned int key)
_current->callee_saved.thread_status;


arch_current_thread_set(_kernel.ready_q.cache);
z_current_thread_set(_kernel.ready_q.cache);
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();
#endif
Expand Down Expand Up @@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
z_thread_mark_switched_out();
#endif

arch_current_thread_set(_kernel.ready_q.cache);
z_current_thread_set(_kernel.ready_q.cache);

#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();
Expand Down
2 changes: 0 additions & 2 deletions include/zephyr/arch/arch_inlines.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,4 @@
#include <zephyr/arch/sparc/arch_inlines.h>
#endif

#include <zephyr/arch/common/arch_inlines.h>

#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */
45 changes: 0 additions & 45 deletions include/zephyr/arch/common/arch_inlines.h

This file was deleted.

7 changes: 3 additions & 4 deletions include/zephyr/arch/riscv/arch_inlines.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,12 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
}

#ifdef CONFIG_RISCV_CURRENT_VIA_GP

register struct k_thread *__arch_current_thread __asm__("gp");

#define arch_current_thread() __arch_current_thread
#define arch_current_thread_set(thread) \
do { \
__arch_current_thread = _current_cpu->current = (thread); \
} while (0)
#define arch_current_thread_set(thread) ({ __arch_current_thread = (thread); })

#endif /* CONFIG_RISCV_CURRENT_VIA_GP */

static ALWAYS_INLINE unsigned int arch_num_cpus(void)
Expand Down
16 changes: 14 additions & 2 deletions include/zephyr/kernel_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,16 +260,28 @@ extern atomic_t _cpus_active;
* another SMP CPU.
*/
bool z_smp_cpu_mobile(void);

#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
arch_curr_cpu(); })
#define _current arch_current_thread()

struct k_thread *z_smp_current_get(void);
#define _current z_smp_current_get()

#else
#define _current_cpu (&_kernel.cpus[0])
#define _current _kernel.cpus[0].current
#endif

/* This is always invoked from a context where preemption is disabled */
#define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })

#ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
#undef _current
#define _current arch_current_thread()
#undef z_current_thread_set
#define z_current_thread_set(thread) \
arch_current_thread_set(({ _current_cpu->current = (thread); }))
#endif

/* kernel wait queue record */
#ifdef CONFIG_WAITQ_SCALABLE

Expand Down
4 changes: 2 additions & 2 deletions kernel/include/kswap.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
#endif /* CONFIG_SMP */
z_thread_mark_switched_out();
z_sched_switch_spin(new_thread);
arch_current_thread_set(new_thread);
z_current_thread_set(new_thread);

#ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread);
Expand Down Expand Up @@ -259,6 +259,6 @@ static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
dummy_thread->base.slice_ticks = 0;
#endif /* CONFIG_TIMESLICE_PER_THREAD */

arch_current_thread_set(dummy_thread);
z_current_thread_set(dummy_thread);
}
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
4 changes: 2 additions & 2 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -797,11 +797,11 @@ struct k_thread *z_swap_next_thread(void)
}

#ifdef CONFIG_USE_SWITCH
/* Just a wrapper around arch_current_thread_set(xxx) with tracing */
/* Just a wrapper around z_current_thread_set(xxx) with tracing */
static inline void set_current(struct k_thread *new_thread)
{
z_thread_mark_switched_out();
arch_current_thread_set(new_thread);
z_current_thread_set(new_thread);
}

/**
Expand Down
14 changes: 14 additions & 0 deletions kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,3 +248,17 @@ bool z_smp_cpu_mobile(void)
arch_irq_unlock(k);
return !pinned;
}

struct k_thread *z_smp_current_get(void)
{
/*
* _current is a field read from _current_cpu, which can race
* with preemption before it is read. We must lock local
* interrupts when reading it.
*/
unsigned int key = arch_irq_lock();
struct k_thread *t = _current_cpu->current;

arch_irq_unlock(key);
return t;
}

0 comments on commit 7a3124d

Please sign in to comment.