struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
- req->op.hw_param = *hw_param_req;
- mutex_unlock(&evtchnl->ring_io_lock);
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
+ req->op.hw_param = *hw_param_req;
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
*hw_param_resp = evtchnl->u.req.resp.hw_param;
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
-
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
- req->op.open.pcm_format = format;
- req->op.open.pcm_channels = channels;
- req->op.open.pcm_rate = rate;
- req->op.open.buffer_sz = buffer_sz;
- req->op.open.period_sz = period_sz;
- req->op.open.gref_directory =
- xen_front_pgdir_shbuf_get_dir_start(shbuf);
- mutex_unlock(&evtchnl->ring_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
+
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
+ req->op.open.pcm_format = format;
+ req->op.open.pcm_channels = channels;
+ req->op.open.pcm_rate = rate;
+ req->op.open.buffer_sz = buffer_sz;
+ req->op.open.period_sz = period_sz;
+ req->op.open.gref_directory =
+ xen_front_pgdir_shbuf_get_dir_start(shbuf);
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
__always_unused struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
- mutex_unlock(&evtchnl->ring_io_lock);
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
- req->op.rw.length = count;
- req->op.rw.offset = pos;
- mutex_unlock(&evtchnl->ring_io_lock);
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
+ req->op.rw.length = count;
+ req->op.rw.offset = pos;
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
- req->op.rw.length = count;
- req->op.rw.offset = pos;
- mutex_unlock(&evtchnl->ring_io_lock);
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
+ req->op.rw.length = count;
+ req->op.rw.offset = pos;
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
struct xensnd_req *req;
int ret;
- mutex_lock(&evtchnl->u.req.req_io_lock);
+ guard(mutex)(&evtchnl->u.req.req_io_lock);
- mutex_lock(&evtchnl->ring_io_lock);
- req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
- req->op.trigger.type = type;
- mutex_unlock(&evtchnl->ring_io_lock);
+ scoped_guard(mutex, &evtchnl->ring_io_lock) {
+ req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
+ req->op.trigger.type = type;
+ }
ret = be_stream_do_io(evtchnl);
if (ret == 0)
ret = be_stream_wait_io(evtchnl);
- mutex_unlock(&evtchnl->u.req.req_io_lock);
return ret;
}
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
- mutex_lock(&channel->ring_io_lock);
+ guard(mutex)(&channel->ring_io_lock);
again:
rp = channel->u.req.ring.sring->rsp_prod;
channel->u.req.ring.sring->rsp_event = i + 1;
}
- mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
return IRQ_HANDLED;
- mutex_lock(&channel->ring_io_lock);
+ guard(mutex)(&channel->ring_io_lock);
prod = page->in_prod;
/* Ensure we see ring contents up to prod. */
virt_rmb();
if (prod == page->in_cons)
- goto out;
+ return IRQ_HANDLED;
/*
* Assume that the backend is trusted to always write sane values
/* Ensure ring contents. */
virt_wmb();
-out:
- mutex_unlock(&channel->ring_io_lock);
return IRQ_HANDLED;
}
else
state = EVTCHNL_STATE_DISCONNECTED;
- mutex_lock(&evt_pair->req.ring_io_lock);
- evt_pair->req.state = state;
- mutex_unlock(&evt_pair->req.ring_io_lock);
+ scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
+ evt_pair->req.state = state;
+ }
- mutex_lock(&evt_pair->evt.ring_io_lock);
- evt_pair->evt.state = state;
- mutex_unlock(&evt_pair->evt.ring_io_lock);
+ scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
+ evt_pair->evt.state = state;
+ }
}
void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
{
- mutex_lock(&evt_pair->req.ring_io_lock);
- evt_pair->req.evt_next_id = 0;
- mutex_unlock(&evt_pair->req.ring_io_lock);
+ scoped_guard(mutex, &evt_pair->req.ring_io_lock) {
+ evt_pair->req.evt_next_id = 0;
+ }
- mutex_lock(&evt_pair->evt.ring_io_lock);
- evt_pair->evt.evt_next_id = 0;
- mutex_unlock(&evt_pair->evt.ring_io_lock);
+ scoped_guard(mutex, &evt_pair->evt.ring_io_lock) {
+ evt_pair->evt.evt_next_id = 0;
+ }
}