EXPORT_SYMBOL(mutex_lock);
 #endif
 
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
-                                                  struct ww_acquire_ctx *ww_ctx)
+static __always_inline void
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 {
 #ifdef CONFIG_DEBUG_MUTEXES
        /*
  * slowpath, set ctx and wake up any waiters so they can recheck.
  */
 static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
-                              struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        unsigned long flags;
 
  * Callers must hold the mutex wait_lock.
  */
 static __always_inline void
-ww_mutex_set_context_slowpath(struct ww_mutex *lock,
-                             struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        ww_mutex_lock_acquired(lock, ctx);
        lock->ctx = ctx;
  * with the spinner at the head of the OSQ, if present, until the owner is
  * changed to itself.
  */
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, const bool waiter)
 {
        if (!waiter) {
                /*
        return false;
 }
 #else
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, const bool waiter)
 {
        return false;
 }
        struct ww_mutex *ww;
        int ret;
 
-       ww = container_of(lock, struct ww_mutex, base);
+       might_sleep();
 
+       ww = container_of(lock, struct ww_mutex, base);
        if (use_ww_ctx && ww_ctx) {
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
        return ret;
 }
 
+static int __sched
+__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+            struct lockdep_map *nest_lock, unsigned long ip)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
+}
+
+static int __sched
+__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+               struct lockdep_map *nest_lock, unsigned long ip,
+               struct ww_acquire_ctx *ww_ctx)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
+}
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           subclass, NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
 void __sched
 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           0, nest, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 
 int __sched
 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_KILLABLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 
 int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 
        int ret;
 
        might_sleep();
-       ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
-                                  0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                                  ctx, 1);
+       ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
+                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                              ctx);
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
        int ret;
 
        might_sleep();
-       ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
-                                 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                                 ctx, 1);
+       ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
+                             0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                             ctx);
 
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 static noinline void __sched
 __mutex_lock_slowpath(struct mutex *lock)
 {
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
-                           NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_killable_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_KILLABLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 static noinline int __sched
 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 #endif