Centos-kernel-stream-9/kernel/locking/ww_rt_mutex.c

102 lines
2.3 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* rtmutex API
*/
#include <linux/spinlock.h>
#include <linux/export.h>
#define RT_MUTEX_BUILD_MUTEX
#define WW_RT
#include "rtmutex.c"
kernel/locking: Add context to ww_mutex_trylock() Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2072020 Upstream Status: v5.16-rc1 commit 12235da8c80a1f9909008e4ca6036d5772b81192 Author: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> AuthorDate: Thu Sep 9 11:32:18 2021 +0200 Commit: Peter Zijlstra <peterz@infradead.org> CommitDate: Fri Sep 17 15:08:41 2021 +0200 i915 will soon gain an eviction path that trylock a whole lot of locks for eviction, getting dmesg failures like below: BUG: MAX_LOCK_DEPTH too low! turning off the locking correctness validator. depth: 48 max: 48! 48 locks held by i915_selftest/5776: #0: ffff888101a79240 (&dev->mutex){....}-{3:3}, at: __driver_attach+0x88/0x160 #1: ffffc900009778c0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: i915_vma_pin.constprop.63+0x39/0x1b0 [i915] #2: ffff88800cf74de8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_vma_pin.constprop.63+0x5f/0x1b0 [i915] #3: ffff88810c7f9e38 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x1c4/0x9d0 [i915] #4: ffff88810bad5768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] #5: ffff88810bad60e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] ... #46: ffff88811964d768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] #47: ffff88811964e0e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] INFO: lockdep is turned off. Fixing eviction to nest into ww_class_acquire is a high priority, but it requires a rework of the entire driver, which can only be done one step at a time. As an intermediate solution, add an acquire context to ww_mutex_trylock, which allows us to do proper nesting annotations on the trylocks, making the above lockdep splat disappear. This is also useful in regulator_lock_nested, which may avoid dropping regulator_nesting_mutex in the uncontended path, so use it there. TTM may be another user for this, where we could lock a buffer in a fastpath with list locks held, without dropping all locks we hold. [peterz: rework actual ww_mutex_trylock() implementations] Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/YUBGPdDDjKlxAuXJ@hirez.programming.kicks-ass.net Signed-off-by: Karol Herbst <kherbst@redhat.com>
2022-03-28 18:55:47 +00:00
int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
{
struct rt_mutex *rtm = &lock->base;
if (!ww_ctx)
return rt_mutex_trylock(rtm);
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
if (__rt_mutex_trylock(&rtm->rtmutex)) {
ww_mutex_set_context_fastpath(lock, ww_ctx);
mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
kernel/locking: Add context to ww_mutex_trylock() Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2072020 Upstream Status: v5.16-rc1 commit 12235da8c80a1f9909008e4ca6036d5772b81192 Author: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> AuthorDate: Thu Sep 9 11:32:18 2021 +0200 Commit: Peter Zijlstra <peterz@infradead.org> CommitDate: Fri Sep 17 15:08:41 2021 +0200 i915 will soon gain an eviction path that trylock a whole lot of locks for eviction, getting dmesg failures like below: BUG: MAX_LOCK_DEPTH too low! turning off the locking correctness validator. depth: 48 max: 48! 48 locks held by i915_selftest/5776: #0: ffff888101a79240 (&dev->mutex){....}-{3:3}, at: __driver_attach+0x88/0x160 #1: ffffc900009778c0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: i915_vma_pin.constprop.63+0x39/0x1b0 [i915] #2: ffff88800cf74de8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_vma_pin.constprop.63+0x5f/0x1b0 [i915] #3: ffff88810c7f9e38 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x1c4/0x9d0 [i915] #4: ffff88810bad5768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] #5: ffff88810bad60e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] ... #46: ffff88811964d768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] #47: ffff88811964e0e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915] INFO: lockdep is turned off. Fixing eviction to nest into ww_class_acquire is a high priority, but it requires a rework of the entire driver, which can only be done one step at a time. As an intermediate solution, add an acquire context to ww_mutex_trylock, which allows us to do proper nesting annotations on the trylocks, making the above lockdep splat disappear. This is also useful in regulator_lock_nested, which may avoid dropping regulator_nesting_mutex in the uncontended path, so use it there. TTM may be another user for this, where we could lock a buffer in a fastpath with list locks held, without dropping all locks we hold. [peterz: rework actual ww_mutex_trylock() implementations] Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/YUBGPdDDjKlxAuXJ@hirez.programming.kicks-ass.net Signed-off-by: Karol Herbst <kherbst@redhat.com>
2022-03-28 18:55:47 +00:00
return 1;
}
return 0;
}
EXPORT_SYMBOL(ww_mutex_trylock);
static int __sched
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
unsigned int state, unsigned long ip)
{
struct lockdep_map __maybe_unused *nest_lock = NULL;
struct rt_mutex *rtm = &lock->base;
int ret;
might_sleep();
if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
return -EALREADY;
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nest_lock = &ww_ctx->dep_map;
#endif
}
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
locking/rtmutex: Avoid pointless blk_flush_plug() invocations Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2218724 Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git commit 96c0a06e80cb53788a282e087773b2cfa5525545 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Thu Apr 27 13:19:36 2023 +0200 locking/rtmutex: Avoid pointless blk_flush_plug() invocations With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire() always fails and all lock operations take the slow path, which leads to the invocation of blk_flush_plug() even if the lock is not contended which is unnecessary and avoids batch processing of requests. Provide a new helper inline rt_mutex_try_acquire() which maps to rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case it invokes rt_mutex_slowtrylock() which can acquire a non-contended rtmutex under full debug coverage. Replace the rt_mutex_cmpxchg_acquire() invocations in __rt_mutex_lock() and __ww_rt_mutex_lock() with the new helper function, which avoid the blk_flush_plug() for the non-contended case and preserves the debug mechanism. [ tglx: Created a new helper and massaged changelog ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lore.kernel.org/r/20230427111937.2745231-4-bigeasy@linutronix.de Signed-off-by: Crystal Wood <swood@redhat.com>
2023-07-14 01:06:09 +00:00
if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
if (ww_ctx)
ww_mutex_set_context_fastpath(lock, ww_ctx);
return 0;
}
ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
if (ret)
mutex_release(&rtm->dep_map, ip);
return ret;
}
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
}
EXPORT_SYMBOL(ww_mutex_lock);
int __sched
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
}
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
struct rt_mutex *rtm = &lock->base;
__ww_mutex_unlock(lock);
mutex_release(&rtm->dep_map, _RET_IP_);
__rt_mutex_unlock(&rtm->rtmutex);
}
EXPORT_SYMBOL(ww_mutex_unlock);