diff options
author | Michael J. Chudobiak <mjc@avtechpulse.com> | 2016-04-25 10:00:44 -0400 |
---|---|---|
committer | Michael J. Chudobiak <mjc@avtechpulse.com> | 2016-04-25 10:00:44 -0400 |
commit | a1df417e74aa6dae7352dc8cbb0ad471af5b7c69 (patch) | |
tree | c34b2311e37ea31db153c90cb8f4570374d05e78 /linux/include/asm-generic/preempt.h |
initial Olimex linux tree from Daniel, originally Feb 3, 2016
Diffstat (limited to 'linux/include/asm-generic/preempt.h')
-rw-r--r-- | linux/include/asm-generic/preempt.h | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/linux/include/asm-generic/preempt.h b/linux/include/asm-generic/preempt.h new file mode 100644 index 00000000..b6a53e8e --- /dev/null +++ b/linux/include/asm-generic/preempt.h @@ -0,0 +1,90 @@ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <linux/thread_info.h> + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return current_thread_info()->preempt_count; +} + +static __always_inline int *preempt_count_ptr(void) +{ + return ¤t_thread_info()->preempt_count; +} + +static __always_inline void preempt_count_set(int pc) +{ + *preempt_count_ptr() = pc; +} + +/* + * must be macros to avoid header recursion hell + */ +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ +} + +static __always_inline void clear_preempt_need_resched(void) +{ +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return false; +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + *preempt_count_ptr() += val; +} + +static __always_inline void __preempt_count_sub(int val) +{ + *preempt_count_ptr() -= val; +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(int preempt_offset) +{ + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); +} + +#ifdef CONFIG_PREEMPT +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() + +#ifdef CONFIG_CONTEXT_TRACKING +extern asmlinkage void preempt_schedule_context(void); +#define __preempt_schedule_context() preempt_schedule_context() +#endif +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ |