if (pdev->id < 0 || pdev->id > 3)
                return -ENODEV;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info) {
                dev_err(&pdev->dev, "unable to allocate memory\n");
                ret = -ENOMEM;
                goto err_nomem;
        }
 
-       vaddr = ioremap(res1->start, resource_size(res1));
-       base = ioremap(res2->start, resource_size(res2));
+       vaddr = devm_request_and_ioremap(&pdev->dev, res1);
+       base = devm_request_and_ioremap(&pdev->dev, res2);
        if (!vaddr || !base) {
                dev_err(&pdev->dev, "ioremap failed\n");
-               ret = -EINVAL;
+               ret = -EADDRNOTAVAIL;
                goto err_ioremap;
        }
 
        }
        info->chip.ecc.mode = ecc_mode;
 
-       info->clk = clk_get(&pdev->dev, "aemif");
+       info->clk = devm_clk_get(&pdev->dev, "aemif");
        if (IS_ERR(info->clk)) {
                ret = PTR_ERR(info->clk);
                dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
        clk_disable_unprepare(info->clk);
 
 err_clk_enable:
-       clk_put(info->clk);
-
        spin_lock_irq(&davinci_nand_lock);
        if (ecc_mode == NAND_ECC_HW_SYNDROME)
                ecc4_busy = false;
 err_ecc:
 err_clk:
 err_ioremap:
-       if (base)
-               iounmap(base);
-       if (vaddr)
-               iounmap(vaddr);
-
 err_nomem:
-       kfree(info);
        return ret;
 }
 
                ecc4_busy = false;
        spin_unlock_irq(&davinci_nand_lock);
 
-       iounmap(info->base);
-       iounmap(info->vaddr);
-
        nand_release(&info->mtd);
 
        clk_disable_unprepare(info->clk);
-       clk_put(info->clk);
-
-       kfree(info);
 
        return 0;
 }