* (the type definitions are in asm/spinlock_types.h)
   */
  
 -void arch_lock_relax(int cpu);
 +void arch_spin_relax(arch_spinlock_t *lock);
++#define arch_spin_relax       arch_spin_relax
  
  void arch_spin_lock_wait(arch_spinlock_t *);
  int arch_spin_trylock_retry(arch_spinlock_t *);
                                        unsigned long flags)
  {
        if (!arch_spin_trylock_once(lp))
 -              arch_spin_lock_wait_flags(lp, flags);
 +              arch_spin_lock_wait(lp);
  }
+ #define arch_spin_lock_flags  arch_spin_lock_flags
  
  static inline int arch_spin_trylock(arch_spinlock_t *lp)
  {
   * read-locks.
   */
  
- /**
-  * read_can_lock - would read_trylock() succeed?
-  * @lock: the rwlock in question.
-  */
- #define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0)
 -extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
 -extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
--
- /**
-  * write_can_lock - would write_trylock() succeed?
-  * @lock: the rwlock in question.
-  */
- #define arch_write_can_lock(x) ((x)->cnts == 0)
 -static inline int arch_read_trylock_once(arch_rwlock_t *rw)
 -{
 -      int old = READ_ONCE(rw->lock);
 -      return likely(old >= 0 &&
 -                    __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
 -}
--
- #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
- #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -static inline int arch_write_trylock_once(arch_rwlock_t *rw)
 -{
 -      int old = READ_ONCE(rw->lock);
 -      return likely(old == 0 &&
 -                    __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
 -}
 -
 -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 +#define arch_read_relax(rw) barrier()
 +#define arch_write_relax(rw) barrier()
  
 -#define __RAW_OP_OR   "lao"
 -#define __RAW_OP_AND  "lan"
 -#define __RAW_OP_ADD  "laa"
 -
 -#define __RAW_LOCK(ptr, op_val, op_string)            \
 -({                                                    \
 -      int old_val;                                    \
 -                                                      \
 -      typecheck(int *, ptr);                          \
 -      asm volatile(                                   \
 -              op_string "     %0,%2,%1\n"             \
 -              "bcr    14,0\n"                         \
 -              : "=d" (old_val), "+Q" (*ptr)           \
 -              : "d" (op_val)                          \
 -              : "cc", "memory");                      \
 -      old_val;                                        \
 -})
 -
 -#define __RAW_UNLOCK(ptr, op_val, op_string)          \
 -({                                                    \
 -      int old_val;                                    \
 -                                                      \
 -      typecheck(int *, ptr);                          \
 -      asm volatile(                                   \
 -              op_string "     %0,%2,%1\n"             \
 -              : "=d" (old_val), "+Q" (*ptr)           \
 -              : "d" (op_val)                          \
 -              : "cc", "memory");                      \
 -      old_val;                                        \
 -})
 -
 -extern void _raw_read_lock_wait(arch_rwlock_t *lp);
 -extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
 +void arch_read_lock_wait(arch_rwlock_t *lp);
 +void arch_write_lock_wait(arch_rwlock_t *lp);
  
  static inline void arch_read_lock(arch_rwlock_t *rw)
  {
 
                if (count-- >= 0)
                        continue;
                count = spin_retry;
 -              /*
 -               * For multiple layers of hypervisors, e.g. z/VM + LPAR
 -               * yield the CPU unconditionally. For LPAR rely on the
 -               * sense running status.
 -               */
 -              if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
 -                      smp_yield_cpu(~owner);
 +              if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
 +                      smp_yield_cpu(owner - 1);
        }
 +
 +      /* Pass lock_spin job to next CPU in the queue */
 +      if (node_id && tail_id != node_id) {
 +              /* Wait until the next CPU has set up the 'next' pointer */
 +              while ((next = READ_ONCE(node->next)) == NULL)
 +                      ;
 +              next->prev = NULL;
 +      }
 +
 + out:
 +      S390_lowcore.spinlock_index--;
  }
 -EXPORT_SYMBOL(arch_spin_lock_wait);
  
 -void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 +static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
  {
 -      int cpu = SPINLOCK_LOCKVAL;
 -      int owner, count;
 +      int lockval, old, new, owner, count;
  
 -      local_irq_restore(flags);
 +      lockval = SPINLOCK_LOCKVAL;     /* cpu + 1 */
  
        /* Pass the virtual CPU to the lock holder if it is not running */
-       owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL);
 -      owner = arch_load_niai4(&lp->lock);
 -      if (owner && arch_vcpu_is_preempted(~owner))
 -              smp_yield_cpu(~owner);
++      owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
 +      if (owner && arch_vcpu_is_preempted(owner - 1))
 +              smp_yield_cpu(owner - 1);
  
        count = spin_retry;
        while (1) {