LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@kernel.org>,
Juri Lelli <juri.lelli@redhat.com>,
Steven Rostedt <rostedt@goodmis.org>,
Daniel Bristot de Oliveira <bristot@redhat.com>,
Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
Boqun Feng <boqun.feng@gmail.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Davidlohr Bueso <dave@stgolabs.net>
Subject: [patch 49/63] locking/rtmutex: Extend the rtmutex core to support ww_mutex
Date: Fri, 30 Jul 2021 15:50:56 +0200 [thread overview]
Message-ID: <20210730135207.924862152@linutronix.de> (raw)
In-Reply-To: <20210730135007.155909613@linutronix.de>
From: Peter Zijlstra <peterz@infradead.org>
Add a ww acquire context pointer to the waiter and various functions and
add the ww_mutex related invocations to the proper spots in the locking
code similar to the mutex based variant.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/rtmutex.c | 115 ++++++++++++++++++++++++++++++++++++----
kernel/locking/rtmutex_api.c | 4 -
kernel/locking/rtmutex_common.h | 2
kernel/locking/rwsem.c | 2
4 files changed, 110 insertions(+), 13 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -17,9 +17,44 @@
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
+#include <linux/ww_mutex.h>
#include "rtmutex_common.h"
+#ifndef WW_RT
+# define build_ww_mutex() (false)
+# define ww_container_of(rtm) NULL
+
+static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
+ struct rt_mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ return 0;
+}
+
+static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ return 0;
+}
+
+#else
+# define build_ww_mutex() (true)
+# define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
+# include "ww_mutex.h"
+#endif
+
/*
* lock->owner state tracking:
*
@@ -291,7 +326,24 @@ static __always_inline int rt_mutex_wait
static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
{
- return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+ struct rt_mutex_waiter *aw = __node_2_waiter(a);
+ struct rt_mutex_waiter *bw = __node_2_waiter(b);
+
+ if (rt_mutex_waiter_less(aw, bw))
+ return 1;
+ if (rt_mutex_waiter_less(bw, aw))
+ return 0;
+
+ /* NOTE: relies on waiter->ww_ctx being set before insertion */
+ if (build_ww_mutex() && aw->ww_ctx) {
+ if (!bw->ww_ctx)
+ return 1;
+
+ return (signed long)(aw->ww_ctx->stamp -
+ bw->ww_ctx->stamp) < 0;
+ }
+
+ return 0;
}
static __always_inline void
@@ -939,6 +991,7 @@ try_to_take_rt_mutex(struct rt_mutex_bas
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
+ struct ww_acquire_ctx *ww_ctx,
enum rtmutex_chainwalk chwalk)
{
struct task_struct *owner = rt_mutex_owner(lock);
@@ -975,6 +1028,16 @@ static int __sched task_blocks_on_rt_mut
raw_spin_unlock(&task->pi_lock);
+ if (build_ww_mutex() && ww_ctx) {
+ struct rt_mutex *rtm;
+
+ /* Check whether the waiter should backout immediately */
+ rtm = container_of(lock, struct rt_mutex, rtmutex);
+ res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
+ if (res)
+ return res;
+ }
+
if (!owner)
return 0;
@@ -1260,6 +1323,7 @@ static void __sched remove_waiter(struct
/**
* rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
+ * @ww_ctx: WW mutex context pointer
* @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
@@ -1268,10 +1332,12 @@ static void __sched remove_waiter(struct
* Must be called with lock->wait_lock held and interrupts disabled
*/
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+ struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter)
{
+ struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
int ret = 0;
for (;;) {
@@ -1288,6 +1354,12 @@ static int __sched rt_mutex_slowlock_blo
break;
}
+ if (build_ww_mutex() && ww_ctx) {
+ ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
+ if (ret)
+ break;
+ }
+
raw_spin_unlock_irq(&lock->wait_lock);
schedule();
@@ -1310,6 +1382,9 @@ static void __sched rt_mutex_handle_dead
if (res != -EDEADLOCK || detect_deadlock)
return;
+ if (build_ww_mutex() && w->ww_ctx)
+ return;
+
/*
* Yell loudly and stop the task right here.
*/
@@ -1323,31 +1398,46 @@ static void __sched rt_mutex_handle_dead
/**
* __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
* @lock: The rtmutex to block lock
+ * @ww_ctx: WW mutex context pointer
* @state: The task state for sleeping
* @chwalk: Indicator whether full or partial chainwalk is requested
* @waiter: Initializer waiter for blocking
*/
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+ struct ww_acquire_ctx *ww_ctx,
unsigned int state,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter)
{
+ struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+ struct ww_mutex *ww = ww_container_of(rtm);
int ret;
lockdep_assert_held(&lock->wait_lock);
/* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock, current, NULL))
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+ if (build_ww_mutex() && ww_ctx) {
+ __ww_mutex_check_waiters(rtm, ww_ctx);
+ ww_mutex_lock_acquired(ww, ww_ctx);
+ }
return 0;
+ }
set_current_state(state);
- ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
+ ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
if (likely(!ret))
- ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
+ ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
- if (unlikely(ret)) {
+ if (likely(!ret)) {
+ /* acquired the lock */
+ if (build_ww_mutex() && ww_ctx) {
+ if (!ww_ctx->is_wait_die)
+ __ww_mutex_check_waiters(rtm, ww_ctx);
+ ww_mutex_lock_acquired(ww, ww_ctx);
+ }
+ } else {
__set_current_state(TASK_RUNNING);
remove_waiter(lock, waiter);
rt_mutex_handle_deadlock(ret, chwalk, waiter);
@@ -1362,14 +1452,17 @@ static int __sched __rt_mutex_slowlock(s
}
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
+ struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{
struct rt_mutex_waiter waiter;
int ret;
rt_mutex_init_waiter(&waiter);
+ waiter.ww_ctx = ww_ctx;
- ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
+ ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
+ &waiter);
debug_rt_mutex_free_waiter(&waiter);
return ret;
@@ -1378,9 +1471,11 @@ static inline int __rt_mutex_slowlock_lo
/*
* rt_mutex_slowlock - Locking slowpath invoked when fast path fails
* @lock: The rtmutex to block lock
+ * @ww_ctx: WW mutex context pointer
* @state: The task state for sleeping
*/
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+ struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{
unsigned long flags;
@@ -1395,7 +1490,7 @@ static int __sched rt_mutex_slowlock(str
* irqsave/restore variants.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
- ret = __rt_mutex_slowlock_locked(lock, state);
+ ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret;
@@ -1407,7 +1502,7 @@ static __always_inline int __rt_mutex_lo
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
- return rt_mutex_slowlock(lock, state);
+ return rt_mutex_slowlock(lock, NULL, state);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
@@ -1434,7 +1529,7 @@ static void __sched rtlock_slowlock_lock
/* Save current state and set state to TASK_RTLOCK_WAIT */
current_save_and_set_rtlock_wait_state();
- task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
+ task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
for (;;) {
/* Try to acquire the lock again. */
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -267,7 +267,7 @@ int __sched __rt_mutex_start_proxy_lock(
return 1;
/* We enforce deadlock detection for futexes */
- ret = task_blocks_on_rt_mutex(lock, waiter, task,
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
RT_MUTEX_FULL_CHAINWALK);
if (ret && !rt_mutex_owner(lock)) {
@@ -343,7 +343,7 @@ int __sched rt_mutex_wait_proxy_lock(str
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
- ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
+ ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -28,6 +28,7 @@
* @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
* @prio: Priority of the waiter
* @deadline: Deadline of the waiter if applicable
+ * @ww_ctx: WW context pointer
*/
struct rt_mutex_waiter {
struct rb_node tree_entry;
@@ -37,6 +38,7 @@ struct rt_mutex_waiter {
unsigned int wake_state;
int prio;
u64 deadline;
+ struct ww_acquire_ctx *ww_ctx;
};
/**
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1360,7 +1360,7 @@ static inline void __downgrade_write(str
__rt_mutex_lock(rtm, state)
#define rwbase_rtmutex_slowlock_locked(rtm, state) \
- __rt_mutex_slowlock_locked(rtm, state)
+ __rt_mutex_slowlock_locked(rtm, NULL, state)
#define rwbase_rtmutex_unlock(rtm) \
__rt_mutex_unlock(rtm)
next prev parent reply other threads:[~2021-07-30 14:22 UTC|newest]
Thread overview: 92+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-30 13:50 [patch 00/63] locking, sched: The PREEMPT-RT locking infrastructure Thomas Gleixner
2021-07-30 13:50 ` [patch 01/63] sched: Split out the wakeup state check Thomas Gleixner
2021-07-30 13:50 ` [patch 02/63] sched: Introduce TASK_RTLOCK_WAIT Thomas Gleixner
2021-07-30 13:50 ` [patch 03/63] sched: Prepare for RT sleeping spin/rwlocks Thomas Gleixner
2021-08-01 15:30 ` Mike Galbraith
2021-08-03 9:48 ` Peter Zijlstra
2021-08-03 14:04 ` Thomas Gleixner
2021-08-03 14:51 ` Peter Zijlstra
2021-08-03 20:11 ` Thomas Gleixner
2021-07-30 13:50 ` [patch 04/63] sched: Rework the __schedule() preempt argument Thomas Gleixner
2021-07-30 13:50 ` [patch 05/63] sched: Provide schedule point for RT locks Thomas Gleixner
2021-07-30 13:50 ` [patch 06/63] sched/wake_q: Provide WAKE_Q_HEAD_INITIALIZER Thomas Gleixner
2021-07-30 13:50 ` [patch 07/63] media/atomisp: Use lockdep instead of *mutex_is_locked() Thomas Gleixner
2021-07-30 13:50 ` [patch 08/63] rtmutex: Remove rt_mutex_is_locked() Thomas Gleixner
2021-07-30 13:50 ` [patch 09/63] rtmutex: Convert macros to inlines Thomas Gleixner
2021-07-30 13:50 ` [patch 10/63] rtmutex: Switch to try_cmpxchg() Thomas Gleixner
2021-07-30 13:50 ` [patch 11/63] rtmutex: Split API and implementation Thomas Gleixner
2021-07-30 13:50 ` [patch 12/63] rtmutex: Split out the inner parts of struct rtmutex Thomas Gleixner
2021-07-30 13:50 ` [patch 13/63] locking/rtmutex: Provide rt_mutex_slowlock_locked() Thomas Gleixner
2021-07-30 13:50 ` [patch 14/63] rtmutex: Provide rt_mutex_base_is_locked() Thomas Gleixner
2021-07-30 13:50 ` [patch 15/63] locking: Add base code for RT rw_semaphore and rwlock Thomas Gleixner
2021-08-04 19:37 ` Waiman Long
2021-08-05 9:04 ` Thomas Gleixner
2021-08-05 14:59 ` Waiman Long
2021-07-30 13:50 ` [patch 16/63] locking/rwsem: Add rtmutex based R/W semaphore implementation Thomas Gleixner
2021-07-30 13:50 ` [patch 17/63] locking/rtmutex: Add wake_state to rt_mutex_waiter Thomas Gleixner
2021-07-30 13:50 ` [patch 18/63] locking/rtmutex: Provide rt_wake_q and helpers Thomas Gleixner
2021-07-30 13:50 ` [patch 19/63] locking/rtmutex: Use rt_mutex_wake_q_head Thomas Gleixner
2021-07-30 13:50 ` [patch 20/63] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Thomas Gleixner
2021-07-30 13:50 ` [patch 21/63] locking/rtmutex: Guard regular sleeping locks specific functions Thomas Gleixner
2021-07-30 13:50 ` [patch 22/63] locking/spinlock: Split the lock types header Thomas Gleixner
2021-08-04 21:17 ` Waiman Long
2021-08-05 8:54 ` Thomas Gleixner
2021-07-30 13:50 ` [patch 23/63] locking/rtmutex: Prevent future include recursion hell Thomas Gleixner
2021-07-30 13:50 ` [patch 24/63] locking/lockdep: Reduce includes in debug_locks.h Thomas Gleixner
2021-07-30 13:50 ` [patch 25/63] rbtree: Split out the rbtree type definitions Thomas Gleixner
2021-07-30 13:50 ` [patch 26/63] locking/rtmutex: Include only rbtree types Thomas Gleixner
2021-07-30 13:50 ` [patch 27/63] locking/spinlock: Provide RT specific spinlock type Thomas Gleixner
2021-07-30 13:50 ` [patch 28/63] locking/spinlock: Provide RT variant header Thomas Gleixner
2021-07-30 13:50 ` [patch 29/63] locking/rtmutex: Provide the spin/rwlock core lock function Thomas Gleixner
2021-07-30 13:50 ` [patch 30/63] locking/spinlock: Provide RT variant Thomas Gleixner
2021-08-04 23:34 ` Waiman Long
2021-08-05 8:54 ` Thomas Gleixner
2021-07-30 13:50 ` [patch 31/63] locking/rwlock: " Thomas Gleixner
2021-07-30 13:50 ` [patch 32/63] locking/mutex: Consolidate core headers Thomas Gleixner
2021-07-30 13:50 ` [patch 33/63] locking/mutex: Move waiter to core header Thomas Gleixner
2021-07-30 13:50 ` [patch 34/63] locking/ww_mutex: Move ww_mutex declarations into ww_mutex.h Thomas Gleixner
2021-07-30 13:50 ` [patch 35/63] locking/mutex: Make mutex::wait_lock raw Thomas Gleixner
2021-07-30 13:50 ` [patch 36/63] locking/ww_mutex: Simplify lockdep annotation Thomas Gleixner
2021-07-30 13:50 ` [patch 37/63] locking/ww_mutex: Gather mutex_waiter initialization Thomas Gleixner
2021-07-30 13:50 ` [patch 38/63] locking/ww_mutex: Split up ww_mutex_unlock() Thomas Gleixner
2021-07-30 13:50 ` [patch 39/63] locking/ww_mutex: Split W/W implementation logic Thomas Gleixner
2021-07-30 13:50 ` [patch 40/63] locking/ww_mutex: Remove __sched annotation Thomas Gleixner
2021-07-30 13:50 ` [patch 41/63] locking/ww_mutex: Abstract waiter iteration Thomas Gleixner
2021-07-30 13:50 ` [patch 42/63] locking/ww_mutex: Abstract waiter enqueueing Thomas Gleixner
2021-07-30 13:50 ` [patch 43/63] locking/ww_mutex: Abstract mutex accessors Thomas Gleixner
2021-07-30 13:50 ` [patch 44/63] locking/ww_mutex: Abstract mutex types Thomas Gleixner
2021-07-30 13:50 ` [patch 45/63] locking/ww_mutex: Abstract internal lock access Thomas Gleixner
2021-07-30 13:50 ` [patch 46/63] locking/ww_mutex: Implement rt_mutex accessors Thomas Gleixner
2021-07-30 13:50 ` [patch 47/63] locking/ww_mutex: Add RT priority to W/W order Thomas Gleixner
2021-07-30 13:50 ` [patch 48/63] locking/ww_mutex: Add rt_mutex based lock type and accessors Thomas Gleixner
2021-07-30 13:50 ` Thomas Gleixner [this message]
2021-07-30 13:50 ` [patch 50/63] locking/ww_mutex: Implement rtmutex based ww_mutex API functions Thomas Gleixner
2021-07-31 13:26 ` Mike Galbraith
2021-08-01 21:18 ` Thomas Gleixner
2021-07-30 13:50 ` [patch 51/63] locking/rtmutex: Add mutex variant for RT Thomas Gleixner
2021-07-30 13:50 ` [patch 52/63] lib/test_lockup: Adapt to changed variables Thomas Gleixner
2021-07-30 13:51 ` [patch 53/63] futex: Validate waiter correctly in futex_proxy_trylock_atomic() Thomas Gleixner
2021-07-30 13:51 ` [patch 54/63] futex: Cleanup stale comments Thomas Gleixner
2021-07-30 13:51 ` [patch 55/63] futex: Correct the number of requeued waiters for PI Thomas Gleixner
2021-07-30 13:51 ` [patch 56/63] futex: Restructure futex_requeue() Thomas Gleixner
2021-07-30 13:51 ` [patch 57/63] futex: Clarify comment in futex_requeue() Thomas Gleixner
2021-07-30 13:51 ` [patch 58/63] futex: Prevent requeue_pi() lock nesting issue on RT Thomas Gleixner
2021-08-02 12:56 ` Peter Zijlstra
2021-08-02 13:10 ` Peter Zijlstra
2021-08-02 14:35 ` Thomas Gleixner
2021-08-02 14:34 ` Thomas Gleixner
2021-08-03 10:28 ` Peter Zijlstra
2021-08-03 21:10 ` Thomas Gleixner
2021-08-03 10:07 ` Peter Zijlstra
2021-08-03 21:10 ` Thomas Gleixner
2021-08-03 11:20 ` Peter Zijlstra
2021-08-03 21:22 ` Thomas Gleixner
2021-07-30 13:51 ` [patch 59/63] rtmutex: Prevent lockdep false positive with PI futexes Thomas Gleixner
2021-07-30 13:51 ` [patch 60/63] preempt: Adjust PREEMPT_LOCK_OFFSET for RT Thomas Gleixner
2021-07-30 13:51 ` [patch 61/63] locking/rtmutex: Implement equal priority lock stealing Thomas Gleixner
2021-07-30 13:51 ` [patch 62/63] locking/rtmutex: Add adaptive spinwait mechanism Thomas Gleixner
2021-08-04 12:30 ` Peter Zijlstra
2021-08-04 17:49 ` Thomas Gleixner
2021-07-30 13:51 ` [patch 63/63] locking/rtmutex: Use adaptive spinwait for all rtmutex based locks Thomas Gleixner
[not found] ` <20210803063217.2325-1-hdanton@sina.com>
2021-08-03 9:10 ` [patch 30/63] locking/spinlock: Provide RT variant Thomas Gleixner
2021-08-03 12:37 ` [patch 00/63] locking, sched: The PREEMPT-RT locking infrastructure Daniel Bristot de Oliveira
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210730135207.924862152@linutronix.de \
--to=tglx@linutronix.de \
--cc=bigeasy@linutronix.de \
--cc=boqun.feng@gmail.com \
--cc=bristot@redhat.com \
--cc=dave@stgolabs.net \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=will@kernel.org \
--subject='Re: [patch 49/63] locking/rtmutex: Extend the rtmutex core to support ww_mutex' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).