]> www.infradead.org Git - users/hch/misc.git/commitdiff
ALSA: xen: Use guard() for mutex locks
authorTakashi Iwai <tiwai@suse.de>
Fri, 29 Aug 2025 15:13:22 +0000 (17:13 +0200)
committerTakashi Iwai <tiwai@suse.de>
Mon, 1 Sep 2025 11:54:27 +0000 (13:54 +0200)
Replace the manual mutex lock/unlock pairs with guard() for code
simplification.

Only code refactoring, and no behavior change.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
Link: https://patch.msgid.link/20250829151335.7342-9-tiwai@suse.de
sound/xen/xen_snd_front.c
sound/xen/xen_snd_front_evtchnl.c

index b66e037710d0dfcbb608b45d8d02a9905977eb3a..c56d17479f05d1ae8a942adc277008061b515b8d 100644 (file)
@@ -62,12 +62,12 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
        struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
 
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
-       req->op.hw_param = *hw_param_req;
-       mutex_unlock(&evtchnl->ring_io_lock);
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
+               req->op.hw_param = *hw_param_req;
+       }
 
        ret = be_stream_do_io(evtchnl);
 
@@ -77,7 +77,6 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
        if (ret == 0)
                *hw_param_resp = evtchnl->u.req.resp.hw_param;
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
@@ -90,25 +89,24 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
        struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
-
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
-       req->op.open.pcm_format = format;
-       req->op.open.pcm_channels = channels;
-       req->op.open.pcm_rate = rate;
-       req->op.open.buffer_sz = buffer_sz;
-       req->op.open.period_sz = period_sz;
-       req->op.open.gref_directory =
-               xen_front_pgdir_shbuf_get_dir_start(shbuf);
-       mutex_unlock(&evtchnl->ring_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
+
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
+               req->op.open.pcm_format = format;
+               req->op.open.pcm_channels = channels;
+               req->op.open.pcm_rate = rate;
+               req->op.open.buffer_sz = buffer_sz;
+               req->op.open.period_sz = period_sz;
+               req->op.open.gref_directory =
+                       xen_front_pgdir_shbuf_get_dir_start(shbuf);
+       }
 
        ret = be_stream_do_io(evtchnl);
 
        if (ret == 0)
                ret = be_stream_wait_io(evtchnl);
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
@@ -117,18 +115,17 @@ int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
        __always_unused struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
 
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
-       mutex_unlock(&evtchnl->ring_io_lock);
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
+       }
 
        ret = be_stream_do_io(evtchnl);
 
        if (ret == 0)
                ret = be_stream_wait_io(evtchnl);
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
@@ -138,20 +135,19 @@ int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
        struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
 
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
-       req->op.rw.length = count;
-       req->op.rw.offset = pos;
-       mutex_unlock(&evtchnl->ring_io_lock);
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
+               req->op.rw.length = count;
+               req->op.rw.offset = pos;
+       }
 
        ret = be_stream_do_io(evtchnl);
 
        if (ret == 0)
                ret = be_stream_wait_io(evtchnl);
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
@@ -161,20 +157,19 @@ int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
        struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
 
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
-       req->op.rw.length = count;
-       req->op.rw.offset = pos;
-       mutex_unlock(&evtchnl->ring_io_lock);
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
+               req->op.rw.length = count;
+               req->op.rw.offset = pos;
+       }
 
        ret = be_stream_do_io(evtchnl);
 
        if (ret == 0)
                ret = be_stream_wait_io(evtchnl);
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
@@ -184,19 +179,18 @@ int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
        struct xensnd_req *req;
        int ret;
 
-       mutex_lock(&evtchnl->u.req.req_io_lock);
+       guard(mutex)(&evtchnl->u.req.req_io_lock);
 
-       mutex_lock(&evtchnl->ring_io_lock);
-       req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
-       req->op.trigger.type = type;
-       mutex_unlock(&evtchnl->ring_io_lock);
+       scoped_guard(mutex, &evtchnl->ring_io_lock) {
+               req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
+               req->op.trigger.type = type;
+       }
 
        ret = be_stream_do_io(evtchnl);
 
        if (ret == 0)
                ret = be_stream_wait_io(evtchnl);
 
-       mutex_unlock(&evtchnl->u.req.req_io_lock);
        return ret;
 }
 
index 26d1b3987887c698f80fb50c1cbc2c56e8b2f6da..2fbed8e4a490e1d0f8d8ca5705b0d131c97b70bc 100644 (file)
@@ -28,7 +28,7 @@ static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
        if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
                return IRQ_HANDLED;
 
-       mutex_lock(&channel->ring_io_lock);
+       guard(mutex)(&channel->ring_io_lock);
 
 again:
        rp = channel->u.req.ring.sring->rsp_prod;
@@ -80,7 +80,6 @@ again:
                channel->u.req.ring.sring->rsp_event = i + 1;
        }
 
-       mutex_unlock(&channel->ring_io_lock);
        return IRQ_HANDLED;
 }
 
@@ -93,13 +92,13 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
        if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
                return IRQ_HANDLED;
 
-       mutex_lock(&channel->ring_io_lock);
+       guard(mutex)(&channel->ring_io_lock);
 
        prod = page->in_prod;
        /* Ensure we see ring contents up to prod. */
        virt_rmb();
        if (prod == page->in_cons)
-               goto out;
+               return IRQ_HANDLED;
 
        /*
         * Assume that the backend is trusted to always write sane values
@@ -125,8 +124,6 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
        /* Ensure ring contents. */
        virt_wmb();
 
-out:
-       mutex_unlock(&channel->ring_io_lock);
        return IRQ_HANDLED;
 }
 
@@ -444,23 +441,23 @@ void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair
        else
                state = EVTCHNL_STATE_DISCONNECTED;
 
-       mutex_lock(&evt_pair->req.ring_io_lock);
-       evt_pair->req.state = state;
-       mutex_unlock(&evt_pair->req.ring_io_lock);
+       scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
+               evt_pair->req.state = state;
+       }
 
-       mutex_lock(&evt_pair->evt.ring_io_lock);
-       evt_pair->evt.state = state;
-       mutex_unlock(&evt_pair->evt.ring_io_lock);
+       scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
+               evt_pair->evt.state = state;
+       }
 }
 
 void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
 {
-       mutex_lock(&evt_pair->req.ring_io_lock);
-       evt_pair->req.evt_next_id = 0;
-       mutex_unlock(&evt_pair->req.ring_io_lock);
+       scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
+               evt_pair->req.evt_next_id = 0;
+       }
 
-       mutex_lock(&evt_pair->evt.ring_io_lock);
-       evt_pair->evt.evt_next_id = 0;
-       mutex_unlock(&evt_pair->evt.ring_io_lock);
+       scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
+               evt_pair->evt.evt_next_id = 0;
+       }
 }