From c14a30468230c608731f36569bfd9785bb486131 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 23 Sep 2024 18:18:47 +0000 Subject: [PATCH 01/16] scripts: import more list macros Import list_is_first, list_is_last, list_replace, and list_replace_init. Signed-off-by: Sami Tolvanen Signed-off-by: Masahiro Yamada --- scripts/include/list.h | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/scripts/include/list.h b/scripts/include/list.h index fea1e2b79063..8bdcaadca709 100644 --- a/scripts/include/list.h +++ b/scripts/include/list.h @@ -127,6 +127,36 @@ static inline void list_del(struct list_head *entry) entry->prev = LIST_POISON2; } +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +/** + * list_replace_init - replace old entry by new one and initialize the old one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + /** * list_move - delete from one list and add as another's head * @list: the entry to move @@ -150,6 +180,26 @@ static inline void list_move_tail(struct list_head *list, list_add_tail(list, head); } +/** + * list_is_first -- tests whether @list is the first entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_first(const struct list_head *list, const struct list_head *head) +{ + return list->prev == head; +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, const struct list_head *head) +{ + return list->next == head; +} + /** * list_is_head - tests whether @list is the list @head * @list: the entry to test -- 2.51.0 From d939881a15b13c028257471d8853d12d83686bcc Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Wed, 25 Sep 2024 13:32:30 +0800 Subject: [PATCH 02/16] kbuild: fix a typo dt_binding_schema -> dt_binding_schemas If we follow "make help" to "make dt_binding_schema", we will see below error: $ make dt_binding_schema make[1]: *** No rule to make target 'dt_binding_schema'. Stop. make: *** [Makefile:224: __sub-make] Error 2 It should be a typo. So this will fix it. Fixes: 604a57ba9781 ("dt-bindings: kbuild: Add separate target/dependency for processed-schema.json") Signed-off-by: Xu Yang Reviewed-by: Nicolas Schier Signed-off-by: Masahiro Yamada --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 187a4ce2728e..7bb521fae64a 100644 --- a/Makefile +++ b/Makefile @@ -1645,7 +1645,7 @@ help: echo '* dtbs - Build device tree blobs for enabled boards'; \ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \ echo ' dt_binding_check - Validate device tree binding documents and examples'; \ - echo ' dt_binding_schema - Build processed device tree binding schemas'; \ + echo ' dt_binding_schemas - Build processed device tree binding schemas'; \ echo ' dtbs_check - Validate device tree source files';\ echo '') -- 2.51.0 From 82cb44308951ad4ce7a8500b9e025d27d7fb3526 Mon Sep 17 00:00:00 2001 From: Aaron Thompson Date: Fri, 4 Oct 2024 07:52:45 +0000 Subject: [PATCH 03/16] kbuild: deb-pkg: Remove blank first line from maint scripts The blank line causes execve() to fail: # strace ./postinst execve("./postinst", ...) = -1 ENOEXEC (Exec format error) strace: exec: Exec format error +++ exited with 1 +++ However running the scripts via shell does work (at least with bash) because the shell attempts to execute the file as a shell script when execve() fails. Fixes: b611daae5efc ("kbuild: deb-pkg: split image and debug objects staging out into functions") Signed-off-by: Aaron Thompson Reviewed-by: Nathan Chancellor Reviewed-by: Nicolas Schier Signed-off-by: Masahiro Yamada --- scripts/package/builddeb | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/package/builddeb b/scripts/package/builddeb index c1757db6aa8a..404587fc71fe 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -74,7 +74,6 @@ install_linux_image () { mkdir -p "${pdir}/DEBIAN" cat <<-EOF > "${pdir}/DEBIAN/${script}" - #!/bin/sh set -e -- 2.51.0 From 8cf0b93919e13d1e8d4466eb4080a4c4d9d66d7b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Oct 2024 15:32:27 -0700 Subject: [PATCH 04/16] Linux 6.12-rc2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7bb521fae64a..c5493c0c0ca1 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Baby Opossum Posse # *DOCUMENTATION* -- 2.51.0 From 2382d68d7d43873ba856baf567cab0d5c523f23b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:38 +1000 Subject: [PATCH 05/16] sched: change wake_up_bit() and related function to expect unsigned long * wake_up_bit() currently allows a "void *". While this isn't strictly a problem as the address is never dereferenced, it is inconsistent with the corresponding wait_on_bit() which requires "unsigned long *" and does dereference the pointer. Any code that needs to wait for a change in something other than an unsigned long would be better served by wake_up_var()/wait_var_event(). This patch changes all related "void *" to "unsigned long *". Reported-by: Linus Torvalds Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-2-neilb@suse.de --- include/linux/wait_bit.h | 16 ++++++++-------- kernel/sched/wait_bit.c | 12 ++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 7725b7579b78..48e123839892 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -8,7 +8,7 @@ #include struct wait_bit_key { - void *flags; + unsigned long *flags; int bit_nr; unsigned long timeout; }; @@ -23,14 +23,14 @@ struct wait_bit_queue_entry { typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); -void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); +void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit); int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); -void wake_up_bit(void *word, int bit); -int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); -int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); -int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); -struct wait_queue_head *bit_waitqueue(void *word, int bit); +void wake_up_bit(unsigned long *word, int bit); +int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); +int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit); extern void __init wait_bit_init(void); int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); @@ -327,7 +327,7 @@ do { \ * You can use this helper if bitflags are manipulated atomically rather than * non-atomically under a lock. */ -static inline void clear_and_wake_up_bit(int bit, void *word) +static inline void clear_and_wake_up_bit(int bit, unsigned long *word) { clear_bit_unlock(bit, word); /* See wake_up_bit() for which memory barrier you need to use. */ diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 134d7112ef71..058b0e18727e 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -9,7 +9,7 @@ static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned; -wait_queue_head_t *bit_waitqueue(void *word, int bit) +wait_queue_head_t *bit_waitqueue(unsigned long *word, int bit) { const int shift = BITS_PER_LONG == 32 ? 5 : 6; unsigned long val = (unsigned long)word << shift | bit; @@ -55,7 +55,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_ } EXPORT_SYMBOL(__wait_on_bit); -int __sched out_of_line_wait_on_bit(void *word, int bit, +int __sched out_of_line_wait_on_bit(unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode) { struct wait_queue_head *wq_head = bit_waitqueue(word, bit); @@ -66,7 +66,7 @@ int __sched out_of_line_wait_on_bit(void *word, int bit, EXPORT_SYMBOL(out_of_line_wait_on_bit); int __sched out_of_line_wait_on_bit_timeout( - void *word, int bit, wait_bit_action_f *action, + unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode, unsigned long timeout) { struct wait_queue_head *wq_head = bit_waitqueue(word, bit); @@ -108,7 +108,7 @@ __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry } EXPORT_SYMBOL(__wait_on_bit_lock); -int __sched out_of_line_wait_on_bit_lock(void *word, int bit, +int __sched out_of_line_wait_on_bit_lock(unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode) { struct wait_queue_head *wq_head = bit_waitqueue(word, bit); @@ -118,7 +118,7 @@ int __sched out_of_line_wait_on_bit_lock(void *word, int bit, } EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); -void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) +void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit) { struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); @@ -144,7 +144,7 @@ EXPORT_SYMBOL(__wake_up_bit); * may need to use a less regular barrier, such fs/inode.c's smp_mb(), * because spin_unlock() does not guarantee a memory barrier. */ -void wake_up_bit(void *word, int bit) +void wake_up_bit(unsigned long *word, int bit) { __wake_up_bit(bit_waitqueue(word, bit), word, bit); } -- 2.51.0 From 3cdee6b359f134da22f7fd4606e0338413cfd79e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:39 +1000 Subject: [PATCH 06/16] sched: Improve documentation for wake_up_bit/wait_on_bit family of functions This patch revises the documention for wake_up_bit(), clear_and_wake_up_bit(), and all the wait_on_bit() family of functions. The new documentation places less emphasis on the pool of waitqueues used (an implementation detail) and focuses instead on details of how the functions behave. The barriers included in the wait functions and clear_and_wake_up_bit() and those required for wake_up_bit() are spelled out more clearly. The error statuses returned are given explicitly. The fact that the wait_on_bit_lock() function sets the bit is made more obvious. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-3-neilb@suse.de --- include/linux/wait_bit.h | 159 +++++++++++++++++++++------------------ kernel/sched/wait_bit.c | 34 +++++---- 2 files changed, 107 insertions(+), 86 deletions(-) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 48e123839892..723e7bf35747 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -53,19 +53,21 @@ extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); /** * wait_on_bit - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit. - * For instance, if one were to have waiters on a bitflag, one would - * call wait_on_bit() in threads waiting for the bit to clear. - * One uses wait_on_bit() where one is waiting for the bit to clear, - * but has no intention of setting it. - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the target bit, even + * if other processes on the same queue are waiting for other bits. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) @@ -80,17 +82,20 @@ wait_on_bit(unsigned long *word, int bit, unsigned mode) /** * wait_on_bit_io - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), but calls - * io_schedule() instead of schedule() for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), but calls io_schedule() instead of + * schedule() for the actual waiting. * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) @@ -104,19 +109,24 @@ wait_on_bit_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * @timeout: timeout, in jiffies * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), except also takes a - * timeout parameter. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The + * clearing of the bit must be signalled with wake_up_bit(), often as + * clear_and_wake_up_bit(). * - * Returned value will be zero if the bit was cleared before the - * @timeout elapsed, or non-zero if the @timeout elapsed or process - * received a signal and the mode permitted wakeup on that signal. + * This is similar to wait_on_bit(), except it also takes a timeout + * parameter. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal, or %-EAGAIN if the + * timeout elapsed. */ static inline int wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, @@ -132,19 +142,21 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, /** * wait_on_bit_action - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit waited on + * @bit: the bit at that address being waited on * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared, and allow the waiting action to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or the error code returned by @action if + * that call returned non-zero. */ static inline int wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -157,23 +169,22 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, } /** - * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit - * when one intends to set it, for instance, trying to lock bitflags. - * For instance, if one were to have waiters trying to set bitflag - * and waiting for it to clear before setting it, one would call - * wait_on_bit() in threads waiting to be able to set the bit. - * One uses wait_on_bit_lock() where one is waiting for the bit to - * clear with the intention of setting it, and when done, clearing it. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * This is similar to wait_on_bit(), but sets the bit before returning. + * + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or %-EINTR if the process received a signal and + * the mode permitted wake up on that signal. */ static inline int wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) @@ -185,15 +196,18 @@ wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_io - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to atomically set it. This is similar - * to wait_on_bit(), but calls io_schedule() instead of schedule() - * for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. + * + * This is similar to wait_on_bit_lock(), but calls io_schedule() instead + * of schedule(). * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and @@ -209,21 +223,19 @@ wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_action - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to set it, and allow the waiting action - * to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * This is similar to wait_on_bit_lock(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or the error code returned by @action if that + * call returned non-zero. */ static inline int wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -320,12 +332,13 @@ do { \ /** * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit - * * @bit: the bit of the word being waited on - * @word: the word being waited on, a kernel virtual address + * @word: the address containing the bit being waited on * - * You can use this helper if bitflags are manipulated atomically rather than - * non-atomically under a lock. + * The designated bit is cleared and any tasks waiting in wait_on_bit() + * or similar will be woken. This call has RELEASE semantics so that + * any changes to memory made before this call are guaranteed to be visible + * after the corresponding wait_on_bit() completes. */ static inline void clear_and_wake_up_bit(int bit, unsigned long *word) { diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 058b0e18727e..bd2fc750fb1f 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -128,21 +128,29 @@ void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit EXPORT_SYMBOL(__wake_up_bit); /** - * wake_up_bit - wake up a waiter on a bit - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wake_up_bit - wake up waiters on a bit + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hash-table's accessor API that wakes up waiters - * on a bit. For instance, if one were to have waiters on a bitflag, - * one would call wake_up_bit() after clearing the bit. + * Wake up any process waiting in wait_on_bit() or similar for the + * given bit to be cleared. * - * In order for this to function properly, as it uses waitqueue_active() - * internally, some kind of memory barrier must be done prior to calling - * this. Typically, this will be smp_mb__after_atomic(), but in some - * cases where bitflags are manipulated non-atomically under a lock, one - * may need to use a less regular barrier, such fs/inode.c's smp_mb(), - * because spin_unlock() does not guarantee a memory barrier. + * The wake-up is sent to tasks in a waitqueue selected by hash from a + * shared pool. Only those tasks on that queue which have requested + * wake_up on this specific address and bit will be woken, and only if the + * bit is clear. + * + * In order for this to function properly there must be a full memory + * barrier after the bit is cleared and before this function is called. + * If the bit was cleared atomically, such as a by clear_bit() then + * smb_mb__after_atomic() can be used, othwewise smb_mb() is needed. + * If the bit was cleared with a fully-ordered operation, no further + * barrier is required. + * + * Normally the bit should be cleared by an operation with RELEASE + * semantics so that any changes to memory made before the bit is + * cleared are guaranteed to be visible after the matching wait_on_bit() + * completes. */ void wake_up_bit(unsigned long *word, int bit) { -- 2.51.0 From bf39882edc798279765ca31751f6e679b50b97ef Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:40 +1000 Subject: [PATCH 07/16] sched: Document wait_var_event() family of functions and wake_up_var() wake_up_var(), wait_var_event() and related interfaces are not documented but have important ordering requirements. This patch adds documentation and makes these requirements explicit. The return values for those wait_var_event_* functions which return a value are documented. Note that these are, perhaps surprisingly, sometimes different from comparable wait_on_bit() functions. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-4-neilb@suse.de --- include/linux/wait_bit.h | 71 ++++++++++++++++++++++++++++++++++++++++ kernel/sched/wait_bit.c | 30 +++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 723e7bf35747..06ec99b90bf3 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -282,6 +282,22 @@ __out: __ret; \ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) +/** + * wait_var_event - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true, only re-checking when a wake up is + * received for the given @var (an arbitrary kernel address which need + * not be directly related to the given condition, but usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event(var, condition) \ do { \ might_sleep(); \ @@ -294,6 +310,24 @@ do { \ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a fatal signal to be received, + * only re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is + * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal + * was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_killable(var, condition) \ ({ \ int __ret = 0; \ @@ -308,6 +342,26 @@ do { \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) +/** + * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire + * @var: the address of variable being waited on + * @condition: the condition to wait for + * @timeout: maximum time to wait in jiffies + * + * Wait for a @condition to be true or a timeout to expire, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the timeout expired and the condition was still false, or the + * remaining time left in the timeout (but at least 1) if the condition + * was found to be true. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_timeout(var, condition, timeout) \ ({ \ long __ret = timeout; \ @@ -321,6 +375,23 @@ do { \ ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a signal to be received, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the condition became true, or %-ERESTARTSYS if a signal was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_interruptible(var, condition) \ ({ \ int __ret = 0; \ diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index bd2fc750fb1f..22ec270f5ab5 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -196,6 +196,36 @@ void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int } EXPORT_SYMBOL(init_wait_var_entry); +/** + * wake_up_var - wake up waiters on a variable (kernel address) + * @var: the address of the variable being waited on + * + * Wake up any process waiting in wait_var_event() or similar for the + * given variable to change. wait_var_event() can be waiting for an + * arbitrary condition to be true and associates that condition with an + * address. Calling wake_up_var() suggests that the condition has been + * made true, but does not strictly require the condtion to use the + * address given. + * + * The wake-up is sent to tasks in a waitqueue selected by hash from a + * shared pool. Only those tasks on that queue which have requested + * wake_up on this specific address will be woken. + * + * In order for this to function properly there must be a full memory + * barrier after the variable is updated (or more accurately, after the + * condition waited on has been made to be true) and before this function + * is called. If the variable was updated atomically, such as a by + * atomic_dec() then smb_mb__after_atomic() can be used. If the + * variable was updated by a fully ordered operation such as + * atomic_dec_and_test() then no extra barrier is required. Otherwise + * smb_mb() is needed. + * + * Normally the variable should be updated (the condition should be made + * to be true) by an operation with RELEASE semantics such as + * smp_store_release() so that any changes to memory made before the + * variable was updated are guaranteed to be visible after the matching + * wait_var_event() completes. + */ void wake_up_var(void *var) { __wake_up_bit(__var_waitqueue(var), var, -1); -- 2.51.0 From 52d633def56c10fe3e82a2c5d88c3ecb3f4e4852 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:41 +1000 Subject: [PATCH 08/16] sched: Add test_and_clear_wake_up_bit() and atomic_dec_and_wake_up() There are common patterns in the kernel of using test_and_clear_bit() before wake_up_bit(), and atomic_dec_and_test() before wake_up_var(). These combinations don't need extra barriers but sometimes include them unnecessarily. To help avoid the unnecessary barriers and to help discourage the general use of wake_up_bit/var (which is a fragile interface) introduce two combined functions which implement these patterns. Also add store_release_wake_up() which supports the task of simply setting a non-atomic variable and sending a wakeup. This pattern requires barriers which are often omitted. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-5-neilb@suse.de --- include/linux/wait_bit.h | 60 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 06ec99b90bf3..0272629b590a 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -419,4 +419,64 @@ static inline void clear_and_wake_up_bit(int bit, unsigned long *word) wake_up_bit(word, bit); } +/** + * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit + * @bit: the bit of the word being waited on + * @word: the address of memory containing that bit + * + * If the bit is set and can be atomically cleared, any tasks waiting in + * wait_on_bit() or similar will be woken. This call has the same + * complete ordering semantics as test_and_clear_bit(). Any changes to + * memory made before this call are guaranteed to be visible after the + * corresponding wait_on_bit() completes. + * + * Returns %true if the bit was successfully set and the wake up was sent. + */ +static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word) +{ + if (!test_and_clear_bit(bit, word)) + return false; + /* no extra barrier required */ + wake_up_bit(word, bit); + return true; +} + +/** + * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters + * @var: the variable to dec and test + * + * Decrements the atomic variable and if it reaches zero, send a wake_up to any + * processes waiting on the variable. + * + * This function has the same complete ordering semantics as atomic_dec_and_test. + * + * Returns %true is the variable reaches zero and the wake up was sent. + */ + +static inline bool atomic_dec_and_wake_up(atomic_t *var) +{ + if (!atomic_dec_and_test(var)) + return false; + /* No extra barrier required */ + wake_up_var(var); + return true; +} + +/** + * store_release_wake_up - update a variable and send a wake_up + * @var: the address of the variable to be updated and woken + * @val: the value to store in the variable. + * + * Store the given value in the variable send a wake up to any tasks + * waiting on the variable. All necessary barriers are included to ensure + * the task calling wait_var_event() sees the new value and all values + * written to memory before this call. + */ +#define store_release_wake_up(var, val) \ +do { \ + smp_store_release(var, val); \ + smp_mb(); \ + wake_up_var(var); \ +} while (0) + #endif /* _LINUX_WAIT_BIT_H */ -- 2.51.0 From cc2e1c82d7e474753681a38b07b63034e107e369 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:42 +1000 Subject: [PATCH 09/16] sched: Add wait/wake interface for variable updated under a lock. Sometimes we need to wait for a condition to be true which must be testing while holding a lock. Correspondingly the condition is made true while holding the lock and the wake up is sent under the lock. This patch provides wake and wait interfaces which can be used for this situation when the lock is a mutex or a spinlock, or any other lock for which there are foo_lock() and foo_unlock() functions. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-6-neilb@suse.de --- include/linux/wait_bit.h | 106 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 0272629b590a..6aea10efca3d 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -401,6 +401,112 @@ do { \ __ret; \ }) +/** + * wait_var_event_any_lock - wait for a variable to be updated under a lock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the object that is locked to protect updates to the variable + * @type: prefix on lock and unlock operations + * @state: waiting state, %TASK_UNINTERRUPTIBLE etc. + * + * Wait for a condition which can only be reliably tested while holding + * a lock. The variables assessed in the condition will normal be updated + * under the same lock, and the wake up should be signalled with + * wake_up_var_locked() under the same lock. + * + * This is similar to wait_var_event(), but assumes a lock is held + * while calling this function and while updating the variable. + * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. The functions used to + * unlock and lock the object are constructed by appending _unlock and _lock + * to @type. + * + * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt + * the wait according to @state. + */ +#define wait_var_event_any_lock(var, condition, lock, type, state) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __ret = ___wait_var_event(var, condition, state, 0, 0, \ + type ## _unlock(lock); \ + schedule(); \ + type ## _lock(lock)); \ + __ret; \ +}) + +/** + * wait_var_event_spinlock - wait for a variable to be updated under a spinlock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the spinlock which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a spinlock. The variables assessed in the condition will normal be updated + * under the same spinlock, and the wake up should be signalled with + * wake_up_var_locked() under the same spinlock. + * + * This is similar to wait_var_event(), but assumes a spinlock is held + * while calling this function and while updating the variable. + * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_spinlock(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE) + +/** + * wait_var_event_mutex - wait for a variable to be updated under a mutex + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @mutex: the mutex which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a mutex. The variables assessed in the condition will normal be + * updated under the same mutex, and the wake up should be signalled + * with wake_up_var_locked() under the same mutex. + * + * This is similar to wait_var_event(), but assumes a mutex is held + * while calling this function and while updating the variable. + * + * This must be called while the given mutex is held and the mutex will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_mutex(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE) + +/** + * wake_up_var_protected - wake up waiters for a variable asserting that it is safe + * @var: the address of the variable being waited on + * @cond: the condition which afirms this is safe + * + * When waking waiters which use wait_var_event_any_lock() the waker must be + * holding the reelvant lock to avoid races. This version of wake_up_var() + * asserts that the relevant lock is held and so no barrier is needed. + * The @cond is only tested when CONFIG_LOCKDEP is enabled. + */ +#define wake_up_var_protected(var, cond) \ +do { \ + lockdep_assert(cond); \ + wake_up_var(var); \ +} while (0) + +/** + * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex + * @var: the address of the variable being waited on + * @lock: The spinlock or mutex what protects the variable + * + * Send a wake up for the given variable which should be waited for with + * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(), + * no extra barriers are needed as the locking provides sufficient sequencing. + */ +#define wake_up_var_locked(var, lock) \ + wake_up_var_protected(var, lockdep_is_held(lock)) + /** * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit * @bit: the bit of the word being waited on -- 2.51.0 From 80681c04c5e8e4297b9ebf201ca3ce6242aa16c3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:43 +1000 Subject: [PATCH 10/16] sched: add wait_var_event_io() It is not currently possible to wait wait_var_event for an io_schedule() style wait. This patch adds wait_var_event_io() for that purpose. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-7-neilb@suse.de --- include/linux/wait_bit.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 6aea10efca3d..6346e26fbfd1 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -281,6 +281,9 @@ __out: __ret; \ #define __wait_var_event(var, condition) \ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) +#define __wait_var_event_io(var, condition) \ + ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + io_schedule()) /** * wait_var_event - wait for a variable to be updated and notified @@ -306,6 +309,34 @@ do { \ __wait_var_event(var, condition); \ } while (0) +/** + * wait_var_event_io - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for an IO related @condition to be true, only re-checking when a + * wake up is received for the given @var (an arbitrary kernel address + * which need not be directly related to the given condition, but + * usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * This is similar to wait_var_event(), but calls io_schedule() instead + * of schedule(). + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ +#define wait_var_event_io(var, condition) \ +do { \ + might_sleep(); \ + if (condition) \ + break; \ + __wait_var_event_io(var, condition); \ +} while (0) + #define __wait_var_event_killable(var, condition) \ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ schedule()) -- 2.51.0 From 49994911b401c5f6b979060ffbc834949a024d8a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 25 Sep 2024 15:31:44 +1000 Subject: [PATCH 11/16] softirq: use bit waits instead of var waits. The waiting in softirq.c is always waiting for a bit to be cleared. This makes the bit wait functions seem more suitable. By switching over we can rid of all explicit barriers. We also use wait_on_bit_lock() to avoid an explicit loop. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240925053405.3960701-8-neilb@suse.de --- kernel/softirq.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/softirq.c b/kernel/softirq.c index d082e7840f88..b756d6b3fd09 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -748,10 +748,8 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); static bool tasklet_clear_sched(struct tasklet_struct *t) { - if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { - wake_up_var(&t->state); + if (test_and_clear_wake_up_bit(TASKLET_STATE_SCHED, &t->state)) return true; - } WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n", t->use_callback ? "callback" : "func", @@ -871,8 +869,7 @@ void tasklet_kill(struct tasklet_struct *t) if (in_interrupt()) pr_notice("Attempt to kill tasklet from interrupt\n"); - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) - wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); + wait_on_bit_lock(&t->state, TASKLET_STATE_SCHED, TASK_UNINTERRUPTIBLE); tasklet_unlock_wait(t); tasklet_clear_sched(t); @@ -882,16 +879,13 @@ EXPORT_SYMBOL(tasklet_kill); #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) void tasklet_unlock(struct tasklet_struct *t) { - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &t->state); - smp_mb__after_atomic(); - wake_up_var(&t->state); + clear_and_wake_up_bit(TASKLET_STATE_RUN, &t->state); } EXPORT_SYMBOL_GPL(tasklet_unlock); void tasklet_unlock_wait(struct tasklet_struct *t) { - wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); + wait_on_bit(&t->state, TASKLET_STATE_RUN, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL_GPL(tasklet_unlock_wait); #endif -- 2.51.0 From 5e9f0c4819deb9459f32f12c4fd2b47993b8c395 Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Mon, 30 Sep 2024 05:09:46 +0000 Subject: [PATCH 12/16] sched: remove unused __HAVE_THREAD_FUNCTIONS hook support __HAVE_THREAD_FUNCTIONS could be defined by architectures wishing to provide their own task_thread_info(), task_stack_page(), setup_thread_stack() and end_of_stack() hooks. Commit cf8e8658100d ("arch: Remove Itanium (IA-64) architecture") removed the last upstream consumer of __HAVE_THREAD_FUNCTIONS, so change the remaining !CONFIG_THREAD_INFO_IN_TASK && !__HAVE_THREAD_FUNCTIONS conditionals to only check for the former case. Signed-off-by: David Disseldorp Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ard Biesheuvel Link: https://lkml.kernel.org/r/20240930050945.30304-2-ddiss@suse.de --- include/linux/sched.h | 2 +- include/linux/sched/task_stack.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index e6ee4258169a..abf26f1e1447 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1898,7 +1898,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_THREAD_INFO_IN_TASK # define task_thread_info(task) (&(task)->thread_info) -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else # define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index bf10bdb487dd..2e52cc421bce 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -33,7 +33,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas #endif } -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else #define task_stack_page(task) ((void *)(task)->stack) -- 2.51.0 From e31488c9df27aaea2cdffba688129fdeb3869650 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Tue, 1 Oct 2024 14:54:51 +0800 Subject: [PATCH 13/16] sched/fair: remove the DOUBLE_TICK feature The patch "5e963f2bd46 sched/fair: Commit to EEVDF" removed the code following the DOUBLE_TICK: - - if (!sched_feat(EEVDF) && cfs_rq->nr_running > 1) - check_preempt_tick(cfs_rq, curr); The DOUBLE_TICK feature becomes dead code now, so remove it. Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: "Christoph Lameter (Ampere)" Reviewed-by: Vishal Chourasia Link: https://lore.kernel.org/r/20241001065451.10356-1-shijie@os.amperecomputing.com --- kernel/sched/fair.c | 6 ------ kernel/sched/features.h | 1 - 2 files changed, 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 225b31aaee55..c9e3b8d2a85f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5680,12 +5680,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) resched_curr(rq_of(cfs_rq)); return; } - /* - * don't let the period tick interfere with the hrtick preemption - */ - if (!sched_feat(DOUBLE_TICK) && - hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) - return; #endif } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 290874079f60..7c22b33fb9d3 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -56,7 +56,6 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(HRTICK_DL, false) -SCHED_FEAT(DOUBLE_TICK, false) /* * Decrement CPU capacity based on time not spent running tasks -- 2.51.0 From 4423af84b29794a9bd2bd07188d8e71083e54c61 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Tue, 1 Oct 2024 15:00:21 +0800 Subject: [PATCH 14/16] sched/fair: optimize the PLACE_LAG when se->vlag is zero When PLACE_LAG is enabled, from the relationship: vl_i = (W + w_i)*vl'_i / W we know that if vl'_i(se->vlag) is zero, the vl_i is zero too. So if se->vlag is zero, there is no need to waste cycles to do the calculation. Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Christoph Lameter (Ampere) Link: https://lkml.kernel.org/r/20241001070021.10626-1-shijie@os.amperecomputing.com --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c9e3b8d2a85f..5a621210c9c1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5280,7 +5280,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * * EEVDF: placement strategy #1 / #2 */ - if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { + if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) { struct sched_entity *curr = cfs_rq->curr; unsigned long load; -- 2.51.0 From b15148ce21c11373ade7389202c12cabf4eba6cf Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Tue, 1 Oct 2024 15:04:56 +0800 Subject: [PATCH 15/16] sched/fair: fix the comment for PREEMPT_SHORT We do not have RESPECT_SLICE, we only have RUN_TO_PARITY. Change RESPECT_SLICE to RUN_TO_PARITY, makes it more clear. Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Christoph Lameter (Ampere) Link: https://lkml.kernel.org/r/20241001070456.10939-1-shijie@os.amperecomputing.com --- kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 7c22b33fb9d3..a3d331dd2d8f 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -19,7 +19,7 @@ SCHED_FEAT(PLACE_REL_DEADLINE, true) */ SCHED_FEAT(RUN_TO_PARITY, true) /* - * Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for + * Allow wakeup of tasks with a shorter slice to cancel RUN_TO_PARITY for * current. */ SCHED_FEAT(PREEMPT_SHORT, true) -- 2.51.0 From 0ac8f14ef22a1592b44dc90272aab35e43b0106a Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Wed, 2 Oct 2024 00:40:16 +0100 Subject: [PATCH 16/16] sched/wait: Remove unused bit_wait_io_timeout bit_wait_io_timeout has been unused since 2016's commit 62906027091f ("mm: add PageWaiters indicating tasks are waiting for a page bit") Remove it. Signed-off-by: "Dr. David Alan Gilbert" Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tim Chen Link: https://lore.kernel.org/r/20241001234016.231696-1-linux@treblig.org --- include/linux/wait_bit.h | 1 - kernel/sched/wait_bit.c | 14 -------------- 2 files changed, 15 deletions(-) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 6346e26fbfd1..9e29d79fc790 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -49,7 +49,6 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync extern int bit_wait(struct wait_bit_key *key, int mode); extern int bit_wait_io(struct wait_bit_key *key, int mode); extern int bit_wait_timeout(struct wait_bit_key *key, int mode); -extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); /** * wait_on_bit - wait for a bit to be cleared diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 22ec270f5ab5..b410b61cec95 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -266,20 +266,6 @@ __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) } EXPORT_SYMBOL_GPL(bit_wait_timeout); -__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) -{ - unsigned long now = READ_ONCE(jiffies); - - if (time_after_eq(now, word->timeout)) - return -EAGAIN; - io_schedule_timeout(word->timeout - now); - if (signal_pending_state(mode, current)) - return -EINTR; - - return 0; -} -EXPORT_SYMBOL_GPL(bit_wait_io_timeout); - void __init wait_bit_init(void) { int i; -- 2.51.0