extern struct eeh_ops *eeh_ops;
 extern int eeh_subsystem_enabled;
 extern struct mutex eeh_mutex;
+extern raw_spinlock_t confirm_error_lock;
 extern int eeh_probe_mode;
 
 #define EEH_PROBE_MODE_DEV     (1<<0)  /* From PCI device      */
        mutex_unlock(&eeh_mutex);
 }
 
+static inline void eeh_serialize_lock(unsigned long *flags)
+{
+       raw_spin_lock_irqsave(&confirm_error_lock, *flags);
+}
+
+static inline void eeh_serialize_unlock(unsigned long flags)
+{
+       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
 /*
  * Max number of EEH freezes allowed before we consider the device
  * to be permanently disabled.
 
 DEFINE_MUTEX(eeh_mutex);
 
 /* Lock to avoid races due to multiple reports of an error */
-static DEFINE_RAW_SPINLOCK(confirm_error_lock);
+DEFINE_RAW_SPINLOCK(confirm_error_lock);
 
 /* Buffer for reporting pci register dumps. Its here in BSS, and
  * not dynamically alloced, so that it ends up in RMO where RTAS
         * in one slot might report errors simultaneously, and we
         * only want one error recovery routine running.
         */
-       raw_spin_lock_irqsave(&confirm_error_lock, flags);
+       eeh_serialize_lock(&flags);
        rc = 1;
        if (pe->state & EEH_PE_ISOLATED) {
                pe->check_count++;
         * bridges.
         */
        eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
-       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+       eeh_serialize_unlock(flags);
 
        eeh_send_failure_event(pe);
 
        return 1;
 
 dn_unlock:
-       raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+       eeh_serialize_unlock(flags);
        return rc;
 }
 
                return ret;
        }
 
-       raw_spin_lock_init(&confirm_error_lock);
-
        /* Initialize EEH event */
        ret = eeh_event_init();
        if (ret)