static void acpi_ec_enable_event(struct acpi_ec *ec)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        if (acpi_ec_started(ec))
                __acpi_ec_enable_event(ec);
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 
        /* Drain additional events if hardware requires that */
        if (EC_FLAGS_CLEAR_ON_RESUME)
 
 static void acpi_ec_disable_event(struct acpi_ec *ec)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        __acpi_ec_disable_event(ec);
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 
        /*
         * When ec_freeze_events is true, we need to flush events in
 
 static bool acpi_ec_guard_event(struct acpi_ec *ec)
 {
-       unsigned long flags;
        bool guarded;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        /*
         * If firmware SCI_EVT clearing timing is "event", we actually
         * don't know when the SCI_EVT will be cleared by firmware after
        guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
                ec->event_state != EC_EVENT_READY &&
                (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
        return guarded;
 }
 
 static int ec_transaction_polled(struct acpi_ec *ec)
 {
-       unsigned long flags;
        int ret = 0;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
                ret = 1;
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
        return ret;
 }
 
 static int ec_transaction_completed(struct acpi_ec *ec)
 {
-       unsigned long flags;
        int ret = 0;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
                ret = 1;
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
        return ret;
 }
 
 
 static int ec_poll(struct acpi_ec *ec)
 {
-       unsigned long flags;
        int repeat = 5; /* number of command restarts */
 
        while (repeat--) {
                do {
                        if (!ec_guard(ec))
                                return 0;
-                       spin_lock_irqsave(&ec->lock, flags);
+                       spin_lock(&ec->lock);
                        advance_transaction(ec, false);
-                       spin_unlock_irqrestore(&ec->lock, flags);
+                       spin_unlock(&ec->lock);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
-               spin_lock_irqsave(&ec->lock, flags);
+               spin_lock(&ec->lock);
                start_transaction(ec);
-               spin_unlock_irqrestore(&ec->lock, flags);
+               spin_unlock(&ec->lock);
        }
        return -ETIME;
 }
 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
                                        struct transaction *t)
 {
-       unsigned long tmp;
        int ret = 0;
 
        /* start transaction */
-       spin_lock_irqsave(&ec->lock, tmp);
+       spin_lock(&ec->lock);
        /* Enable GPE for command processing (IBF=0/OBF=1) */
        if (!acpi_ec_submit_flushable_request(ec)) {
                ret = -EINVAL;
        ec->curr = t;
        ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
        start_transaction(ec);
-       spin_unlock_irqrestore(&ec->lock, tmp);
+       spin_unlock(&ec->lock);
 
        ret = ec_poll(ec);
 
-       spin_lock_irqsave(&ec->lock, tmp);
+       spin_lock(&ec->lock);
        if (t->irq_count == ec_storm_threshold)
                acpi_ec_unmask_events(ec);
        ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
        acpi_ec_complete_request(ec);
        ec_dbg_ref(ec, "Decrease command");
 unlock:
-       spin_unlock_irqrestore(&ec->lock, tmp);
+       spin_unlock(&ec->lock);
        return ret;
 }
 
 
 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
                ec_dbg_drv("Starting EC");
                /* Enable GPE for event processing (SCI_EVT=1) */
                }
                ec_log_drv("EC started");
        }
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 }
 
 static bool acpi_ec_stopped(struct acpi_ec *ec)
 {
-       unsigned long flags;
        bool flushed;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        flushed = acpi_ec_flushed(ec);
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
        return flushed;
 }
 
 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        if (acpi_ec_started(ec)) {
                ec_dbg_drv("Stopping EC");
                set_bit(EC_FLAGS_STOPPED, &ec->flags);
-               spin_unlock_irqrestore(&ec->lock, flags);
+               spin_unlock(&ec->lock);
                wait_event(ec->wait, acpi_ec_stopped(ec));
-               spin_lock_irqsave(&ec->lock, flags);
+               spin_lock(&ec->lock);
                /* Disable GPE for event processing (SCI_EVT=1) */
                if (!suspending) {
                        acpi_ec_complete_request(ec);
                clear_bit(EC_FLAGS_STOPPED, &ec->flags);
                ec_log_drv("EC stopped");
        }
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 }
 
 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        ec->busy_polling = true;
        ec->polling_guard = 0;
        ec_log_drv("interrupt blocked");
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 }
 
 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
        ec->busy_polling = ec_busy_polling;
        ec->polling_guard = ec_polling_guard;
        ec_log_drv("interrupt unblocked");
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 }
 
 void acpi_ec_block_transactions(void)
 
        ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
 
-       spin_lock_irq(&ec->lock);
+       spin_lock(&ec->lock);
        ec->queries_in_progress--;
-       spin_unlock_irq(&ec->lock);
+       spin_unlock(&ec->lock);
 
        acpi_ec_put_query_handler(handler);
        kfree(q);
         */
        ec_dbg_evt("Query(0x%02x) scheduled", value);
 
-       spin_lock_irq(&ec->lock);
+       spin_lock(&ec->lock);
 
        ec->queries_in_progress++;
        queue_work(ec_query_wq, &q->work);
 
-       spin_unlock_irq(&ec->lock);
+       spin_unlock(&ec->lock);
 
        return 0;
 
 
        ec_dbg_evt("Event started");
 
-       spin_lock_irq(&ec->lock);
+       spin_lock(&ec->lock);
 
        while (ec->events_to_process) {
-               spin_unlock_irq(&ec->lock);
+               spin_unlock(&ec->lock);
 
                acpi_ec_submit_query(ec);
 
-               spin_lock_irq(&ec->lock);
+               spin_lock(&ec->lock);
 
                ec->events_to_process--;
        }
 
                ec_dbg_evt("Event stopped");
 
-               spin_unlock_irq(&ec->lock);
+               spin_unlock(&ec->lock);
 
                guard_timeout = !!ec_guard(ec);
 
-               spin_lock_irq(&ec->lock);
+               spin_lock(&ec->lock);
 
                /* Take care of SCI_EVT unless someone else is doing that. */
                if (guard_timeout && !ec->curr)
 
        ec->events_in_progress--;
 
-       spin_unlock_irq(&ec->lock);
+       spin_unlock(&ec->lock);
 }
 
 static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
 
 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&ec->lock, flags);
+       spin_lock(&ec->lock);
 
        clear_gpe_and_advance_transaction(ec, true);
 
-       spin_unlock_irqrestore(&ec->lock, flags);
+       spin_unlock(&ec->lock);
 }
 
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
         * Dispatch the EC GPE in-band, but do not report wakeup in any case
         * to allow the caller to process events properly after that.
         */
-       spin_lock_irq(&first_ec->lock);
+       spin_lock(&first_ec->lock);
 
        if (acpi_ec_gpe_status_set(first_ec)) {
                pm_pr_dbg("ACPI EC GPE status set\n");
                work_in_progress = acpi_ec_work_in_progress(first_ec);
        }
 
-       spin_unlock_irq(&first_ec->lock);
+       spin_unlock(&first_ec->lock);
 
        if (!work_in_progress)
                return false;
 
                pm_pr_dbg("ACPI EC work flushed\n");
 
-               spin_lock_irq(&first_ec->lock);
+               spin_lock(&first_ec->lock);
 
                work_in_progress = acpi_ec_work_in_progress(first_ec);
 
-               spin_unlock_irq(&first_ec->lock);
+               spin_unlock(&first_ec->lock);
        } while (work_in_progress && !pm_wakeup_pending());
 
        return false;