{
WARN_ON_ONCE(!bus->rb.area);
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
bus->corb.buf = (__le32 *)bus->rb.area;
snd_hdac_chip_writeb(bus, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
/* Accept unsolicited responses */
snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
- spin_unlock_irq(&bus->reg_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_init_cmd_io);
*/
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus)
{
- spin_lock_irq(&bus->reg_lock);
- /* disable ringbuffer DMAs */
- snd_hdac_chip_writeb(bus, RIRBCTL, 0);
- snd_hdac_chip_writeb(bus, CORBCTL, 0);
- spin_unlock_irq(&bus->reg_lock);
+ scoped_guard(spinlock_irq, &bus->reg_lock) {
+ /* disable ringbuffer DMAs */
+ snd_hdac_chip_writeb(bus, RIRBCTL, 0);
+ snd_hdac_chip_writeb(bus, CORBCTL, 0);
+ }
hdac_wait_for_cmd_dmas(bus);
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
/* disable unsolicited responses */
snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
- spin_unlock_irq(&bus->reg_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_cmd_io);
{
unsigned int addr = azx_command_addr(val);
int timeout = 50;
- int ret = -EIO;
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
while (timeout--) {
/* check ICB bit */
/* Set ICB bit */
snd_hdac_chip_updatew(bus, IRS, AZX_IRS_BUSY, AZX_IRS_BUSY);
- ret = snd_hdac_bus_wait_for_pio_response(bus, addr);
- goto out;
+ return snd_hdac_bus_wait_for_pio_response(bus, addr);
}
udelay(1);
}
dev_dbg_ratelimited(bus->dev, "send_cmd_pio timeout: IRS=%#x, val=%#x\n",
snd_hdac_chip_readw(bus, IRS), val);
-out:
- spin_unlock_irq(&bus->reg_lock);
-
- return ret;
+ return -EIO;
}
/**
unsigned int addr = azx_command_addr(val);
unsigned int wp, rp;
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
bus->last_cmd[azx_command_addr(val)] = val;
wp = snd_hdac_chip_readw(bus, CORBWP);
if (wp == 0xffff) {
/* something wrong, controller likely turned to D3 */
- spin_unlock_irq(&bus->reg_lock);
return -EIO;
}
wp++;
rp = snd_hdac_chip_readw(bus, CORBRP);
if (wp == rp) {
/* oops, it's full */
- spin_unlock_irq(&bus->reg_lock);
return -EAGAIN;
}
bus->corb.buf[wp] = cpu_to_le32(val);
snd_hdac_chip_writew(bus, CORBWP, wp);
- spin_unlock_irq(&bus->reg_lock);
-
return 0;
}
timeout = jiffies + msecs_to_jiffies(1000);
for (loopcounter = 0;; loopcounter++) {
- spin_lock_irq(&bus->reg_lock);
- if (!bus->polling_mode)
- prepare_to_wait(&bus->rirb_wq, &wait,
- TASK_UNINTERRUPTIBLE);
- if (bus->polling_mode)
- snd_hdac_bus_update_rirb(bus);
- if (!bus->rirb.cmds[addr]) {
- if (res)
- *res = bus->rirb.res[addr]; /* the last value */
+ scoped_guard(spinlock_irq, &bus->reg_lock) {
if (!bus->polling_mode)
- finish_wait(&bus->rirb_wq, &wait);
- spin_unlock_irq(&bus->reg_lock);
- return 0;
+ prepare_to_wait(&bus->rirb_wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (bus->polling_mode)
+ snd_hdac_bus_update_rirb(bus);
+ if (!bus->rirb.cmds[addr]) {
+ if (res)
+ *res = bus->rirb.res[addr]; /* the last value */
+ if (!bus->polling_mode)
+ finish_wait(&bus->rirb_wq, &wait);
+ return 0;
+ }
}
- spin_unlock_irq(&bus->reg_lock);
if (time_after(jiffies, timeout))
break;
#define LOOP_COUNT_MAX 3000
if (substream->pcm)
key |= (substream->pcm->device << 16);
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
list_for_each_entry(azx_dev, &bus->stream_list, list) {
if (azx_dev->direction != substream->stream)
continue;
res->assigned_key = key;
res->substream = substream;
}
- spin_unlock_irq(&bus->reg_lock);
return res;
}
EXPORT_SYMBOL_GPL(snd_hdac_stream_assign);
{
struct hdac_bus *bus = azx_dev->bus;
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
snd_hdac_stream_release_locked(azx_dev);
- spin_unlock_irq(&bus->reg_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_stream_release);
int err;
guard(snd_hdac_dsp_lock)(azx_dev);
- spin_lock_irq(&bus->reg_lock);
- if (azx_dev->running || azx_dev->locked) {
- spin_unlock_irq(&bus->reg_lock);
- return -EBUSY;
+ scoped_guard(spinlock_irq, &bus->reg_lock) {
+ if (azx_dev->running || azx_dev->locked)
+ return -EBUSY;
+ azx_dev->locked = true;
}
- azx_dev->locked = true;
- spin_unlock_irq(&bus->reg_lock);
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev,
byte_size, bufp);
error:
snd_dma_free_pages(bufp);
err_alloc:
- spin_lock_irq(&bus->reg_lock);
- azx_dev->locked = false;
- spin_unlock_irq(&bus->reg_lock);
+ scoped_guard(spinlock_irq, &bus->reg_lock) {
+ azx_dev->locked = false;
+ }
return err;
}
EXPORT_SYMBOL_GPL(snd_hdac_dsp_prepare);
snd_dma_free_pages(dmab);
dmab->area = NULL;
- spin_lock_irq(&bus->reg_lock);
+ guard(spinlock_irq)(&bus->reg_lock);
azx_dev->locked = false;
- spin_unlock_irq(&bus->reg_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_dsp_cleanup);
#endif /* CONFIG_SND_HDA_DSP_LOADER */