INIT_WORK(&sch->todo_work, css_sch_todo);
        sch->dev.release = &css_subchannel_release;
+       sch->dev.dma_mask = &sch->dma_mask;
        device_initialize(&sch->dev);
        /*
-        * The physical addresses of some the dma structures that can
+        * The physical addresses for some of the dma structures that can
         * belong to a subchannel need to fit 31 bit width (e.g. ccw).
         */
-       sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+       ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
+       if (ret)
+               goto err;
        /*
         * But we don't have such restrictions imposed on the stuff that
         * is handled by the streaming API.
         */
-       sch->dma_mask = DMA_BIT_MASK(64);
-       sch->dev.dma_mask = &sch->dma_mask;
+       ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
+       if (ret)
+               goto err;
+
        return sch;
 
 err:
         * css->device as the device argument with the DMA API)
         * and are fine with 64 bit addresses.
         */
-       css->device.coherent_dma_mask = DMA_BIT_MASK(64);
-       css->device.dma_mask = &css->device.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
+       if (ret) {
+               kfree(css);
+               goto out_err;
+       }
 
        mutex_init(&css->mutex);
        ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
 
 {
        struct ccw_device *cdev;
        struct gen_pool *dma_pool;
+       int ret;
 
        cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
-       if (!cdev)
+       if (!cdev) {
+               ret = -ENOMEM;
                goto err_cdev;
+       }
        cdev->private = kzalloc(sizeof(struct ccw_device_private),
                                GFP_KERNEL | GFP_DMA);
-       if (!cdev->private)
+       if (!cdev->private) {
+               ret = -ENOMEM;
                goto err_priv;
-       cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+       }
+
        cdev->dev.dma_mask = sch->dev.dma_mask;
+       ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
+       if (ret)
+               goto err_coherent_mask;
+
        dma_pool = cio_gp_dma_create(&cdev->dev, 1);
-       if (!dma_pool)
+       if (!dma_pool) {
+               ret = -ENOMEM;
                goto err_dma_pool;
+       }
        cdev->private->dma_pool = dma_pool;
        cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
                                        sizeof(*cdev->private->dma_area));
-       if (!cdev->private->dma_area)
+       if (!cdev->private->dma_area) {
+               ret = -ENOMEM;
                goto err_dma_area;
+       }
        return cdev;
 err_dma_area:
        cio_gp_dma_destroy(dma_pool, &cdev->dev);
 err_dma_pool:
+err_coherent_mask:
        kfree(cdev->private);
 err_priv:
        kfree(cdev);
 err_cdev:
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(ret);
 }
 
 static void ccw_device_todo(struct work_struct *work);