if (nr || !footbridge_cfn_mode())
                return 0;
 
-       res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
+       res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
        if (!res) {
                printk("out of memory for root bus resources");
                return 0;
 
        if (nr >= 1)
                return 0;
 
-       res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
+       res = kcalloc(2, sizeof(*res), GFP_KERNEL);
        if (res == NULL) {
                /* 
                 * If we're out of memory this early, something is wrong,
 
 {
        int i;
 
-       omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *),
+       omap_mcbsp_devices = kcalloc(size, sizeof(struct platform_device *),
                                     GFP_KERNEL);
        if (!omap_mcbsp_devices) {
                printk(KERN_ERR "Could not register McBSP devices\n");
 
 {
        char *hc_name;
 
-       hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL);
+       hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL);
        if (!hc_name) {
                kfree(hc_name);
                return -ENOMEM;
 
        if (!omap_hwmod_parse_module_range(NULL, node, &res))
                return -ENODEV;
 
-       hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
+       hwmods = kcalloc(oh_cnt, sizeof(struct omap_hwmod *), GFP_KERNEL);
        if (!hwmods) {
                ret = -ENOMEM;
                goto odbfd_exit;
                goto error;
        }
 
-       res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
+       res = kcalloc(2, sizeof(*res), GFP_KERNEL);
        if (!res)
                return -ENOMEM;
 
 
 
        prcm_irq_setup = irq_setup;
 
-       prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
-       prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
-       prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
-               GFP_KERNEL);
+       prcm_irq_chips = kcalloc(nr_regs, sizeof(void *), GFP_KERNEL);
+       prcm_irq_setup->saved_mask = kcalloc(nr_regs, sizeof(u32),
+                                            GFP_KERNEL);
+       prcm_irq_setup->priority_mask = kcalloc(nr_regs, sizeof(u32),
+                                               GFP_KERNEL);
 
        if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
            !prcm_irq_setup->priority_mask)
 
        uint32_t data = 0, off, ret, idx;
        struct ve_spc_opp *opps;
 
-       opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
+       opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL);
        if (!opps)
                return -ENOMEM;
 
 
                goto err;
 
        mapping->bitmap_size = bitmap_size;
-       mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
-                               GFP_KERNEL);
+       mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
+                                  GFP_KERNEL);
        if (!mapping->bitmaps)
                goto err2;
 
 
        struct insn_emulation *insn;
        struct ctl_table *insns_sysctl, *sysctl;
 
-       insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
-                             GFP_KERNEL);
+       insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
+                              GFP_KERNEL);
 
        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
        list_for_each_entry(insn, &insn_emulation, node) {
 
         */
        WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
-       asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
+       asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
                           GFP_KERNEL);
        if (!asid_map)
                panic("Failed to allocate bitmap for %lu ASIDs\n",
 
        }
 #endif
 
-       sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
+       sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
        if (!sysfs_cpus)
                panic("kzalloc in topology_init failed - NR_CPUS too big?");
 
                return -1;
        }
 
-       this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
-                       GFP_KERNEL);
+       this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
+                          GFP_KERNEL);
        if (this_cache == NULL)
                return -ENOMEM;
 
 
        printk_once(KERN_WARNING
                "PROM version < 4.50 -- implementing old PROM flush WAR\n");
 
-       war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+       war_list = kcalloc(DEV_PER_WIDGET, sizeof(*war_list), GFP_KERNEL);
        BUG_ON(!war_list);
 
        SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
 
        /* Setup the PMU ATE map */
        soft->pbi_int_ate_resource.lowest_free_index = 0;
        soft->pbi_int_ate_resource.ate =
-           kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
+           kcalloc(soft->pbi_int_ate_size, sizeof(u64), GFP_KERNEL);
 
        if (!soft->pbi_int_ate_resource.ate) {
                kfree(soft);
 
                return -ENODEV;
        }
 
-       a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
+       a = kcalloc(6, sizeof(*a), GFP_KERNEL);
        if (!a)
                return -ENOMEM;
 
 
 {
        int ret;
 
-       dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL);
+       dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
        if (!dbdev_tab)
                return -ENOMEM;
 
 
        uartclk = clk_get_rate(clk);
        clk_put(clk);
 
-       ports = kzalloc(s * (c + 1), GFP_KERNEL);
+       ports = kcalloc(s, (c + 1), GFP_KERNEL);
        if (!ports) {
                printk(KERN_INFO "Alchemy: no memory for UART data\n");
                return;
 
 static int __init _new_usbres(struct resource **r, struct platform_device **d)
 {
-       *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
+       *r = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
        if (!*r)
                return -ENOMEM;
        *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
 
        if (stschg_irq)
                cnt++;
 
-       sr = kzalloc(sizeof(struct resource) * cnt, GFP_KERNEL);
+       sr = kcalloc(cnt, sizeof(struct resource), GFP_KERNEL);
        if (!sr)
                return -ENOMEM;
 
                return -EINVAL;
 
        ret = -ENOMEM;
-       parts = kzalloc(sizeof(struct mtd_partition) * 5, GFP_KERNEL);
+       parts = kcalloc(5, sizeof(struct mtd_partition), GFP_KERNEL);
        if (!parts)
                goto out;
 
 
                goto out_bad;
 
        /* add a dummy (zero) entry at the end as a sentinel */
-       bmips_dma_ranges = kzalloc(sizeof(struct bmips_dma_range) * (len + 1),
+       bmips_dma_ranges = kcalloc(len + 1, sizeof(struct bmips_dma_range),
                                   GFP_KERNEL);
        if (!bmips_dma_ranges)
                goto out_bad;
 
                "nand-disk",
        };
 
-       leds_data = kzalloc(sizeof(*leds_data) * RBTX4939_MAX_7SEGLEDS,
+       leds_data = kcalloc(RBTX4939_MAX_7SEGLEDS, sizeof(*leds_data),
                            GFP_KERNEL);
        if (!leds_data)
                return -ENOMEM;
 
 
 #ifdef CONFIG_VDSO32
        /* Make sure pages are in the correct state */
-       vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2),
+       vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
                                  GFP_KERNEL);
        BUG_ON(vdso32_pagelist == NULL);
        for (i = 0; i < vdso32_pages; i++) {
 #endif
 
 #ifdef CONFIG_PPC64
-       vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2),
+       vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
                                  GFP_KERNEL);
        BUG_ON(vdso64_pagelist == NULL);
        for (i = 0; i < vdso64_pages; i++) {
 
        if (!weight)
                return 0;
 
-       updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
+       updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
        if (!updates)
                return 0;
 
 
        if (!bpf_jit_enable)
                return;
 
-       addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+       addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
        if (addrs == NULL)
                return;
 
 
                goto skip_init_ctx;
        }
 
-       addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+       addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
        if (addrs == NULL) {
                fp = org_fp;
                goto out_addrs;
 
        timer.function = profile_spus;
 
        /* Allocate arrays for collecting SPU PC samples */
-       samples = kzalloc(SPUS_PER_NODE *
-                         TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);
+       samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
+                         GFP_KERNEL);
 
        if (!samples)
                return -ENOMEM;
 
        count = ppc4xx_pciex_hwops->core_init(np);
        if (count > 0) {
                ppc4xx_pciex_ports =
-                      kzalloc(count * sizeof(struct ppc4xx_pciex_port),
+                      kcalloc(count, sizeof(struct ppc4xx_pciex_port),
                               GFP_KERNEL);
                if (ppc4xx_pciex_ports) {
                        ppc4xx_pciex_port_count = count;
 
                goto out_param_buf;
        }
 
-       id = kzalloc(sizeof(*id) * count, GFP_KERNEL);
+       id = kcalloc(count, sizeof(*id), GFP_KERNEL);
        if (!id) {
                pr_err("SYSPARAM: Failed to allocate memory to read parameter "
                                "id\n");
                goto out_param_buf;
        }
 
-       size = kzalloc(sizeof(*size) * count, GFP_KERNEL);
+       size = kcalloc(count, sizeof(*size), GFP_KERNEL);
        if (!size) {
                pr_err("SYSPARAM: Failed to allocate memory to read parameter "
                                "size\n");
                goto out_free_id;
        }
 
-       perm = kzalloc(sizeof(*perm) * count, GFP_KERNEL);
+       perm = kcalloc(count, sizeof(*perm), GFP_KERNEL);
        if (!perm) {
                pr_err("SYSPARAM: Failed to allocate memory to read supported "
                                "action on the parameter");
                goto out_free_perm;
        }
 
-       attr = kzalloc(sizeof(*attr) * count, GFP_KERNEL);
+       attr = kcalloc(count, sizeof(*attr), GFP_KERNEL);
        if (!attr) {
                pr_err("SYSPARAM: Failed to allocate memory for parameter "
                                "attributes\n");
 
        printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
 
        /* Allocate fixups array */
-       mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
+       mpic->fixups = kcalloc(128, sizeof(*mpic->fixups), GFP_KERNEL);
        BUG_ON(mpic->fixups == NULL);
 
        /* Init spinlock */
        if (psrc) {
                /* Allocate a bitmap with one bit per interrupt */
                unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
-               mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
+               mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL);
                BUG_ON(mpic->protected == NULL);
                for (i = 0; i < psize/sizeof(u32); i++) {
                        if (psrc[i] > intvec_top)
 
        if (rc == 0)
                return true;
 
-       xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
+       xive_provision_chips = kcalloc(4, xive_provision_chip_count,
                                       GFP_KERNEL);
        if (WARN_ON(!xive_provision_chips))
                return false;
 
        if (ops->size > APPLDATA_MAX_REC_SIZE)
                return -EINVAL;
 
-       ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
+       ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
        if (!ops->ctl_table)
                return -ENOMEM;
 
 
                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 
        /* Make sure pages are in the correct state */
-       vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
+       vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
                                  GFP_KERNEL);
        BUG_ON(vdso32_pagelist == NULL);
        for (i = 0; i < vdso32_pages - 1; i++) {
                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 
        /* Make sure pages are in the correct state */
-       vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
+       vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
                                  GFP_KERNEL);
        BUG_ON(vdso64_pagelist == NULL);
        for (i = 0; i < vdso64_pages - 1; i++) {
 
        unsigned long or;
        int ret;
 
-       dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler),
+       dmabrg_handlers = kcalloc(10, sizeof(struct dmabrg_handler),
                                  GFP_KERNEL);
        if (!dmabrg_handlers)
                return -ENOMEM;
 
        if (unlikely(nr_ports == 0))
                return -ENODEV;
 
-       sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
+       sh7786_pcie_ports = kcalloc(nr_ports, sizeof(struct sh7786_pcie_port),
                                    GFP_KERNEL);
        if (unlikely(!sh7786_pcie_ports))
                return -ENOMEM;
 
        }
        if (!current_thread_info()->utraps) {
                current_thread_info()->utraps =
-                       kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+                       kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
+                               GFP_KERNEL);
                if (!current_thread_info()->utraps)
                        return -ENOMEM;
                current_thread_info()->utraps[0] = 1;
 
        while (amd_iommu_v2_event_descs[i].attr.attr.name)
                i++;
 
-       attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL);
+       attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
        if (!attrs)
                return -ENOMEM;
 
 
        size_t size;
        int i, j;
 
-       pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
+       pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
        if (!pmus)
                return -ENOMEM;
 
 
        int i;
        u8 num_banks = mca_cfg.banks;
 
-       mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
+       mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
        if (!mce_banks)
                return -ENOMEM;
 
 
        if (bp)
                return 0;
 
-       bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
+       bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
                     GFP_KERNEL);
        if (!bp)
                return -ENOMEM;
 
 
        max = num_var_ranges;
        if (fcount == NULL) {
-               fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
+               fcount = kcalloc(max, sizeof(*fcount), GFP_KERNEL);
                if (!fcount)
                        return -ENOMEM;
                FILE_FCOUNT(file) = fcount;
 
        if (!hpet_domain)
                return;
 
-       hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
+       hpet_devs = kcalloc(num_timers, sizeof(struct hpet_dev), GFP_KERNEL);
        if (!hpet_devs)
                return;
 
 
        if (type == PCI_CAP_ID_MSI && nvec > 1)
                return 1;
 
-       v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+       v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
        if (!v)
                return -ENOMEM;
 
 
 {
        int cpu;
 
-       blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
+       blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
        if (!blade_info)
                return -ENOMEM;
 
 
 {
        bio_slab_max = 2;
        bio_slab_nr = 0;
-       bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+       bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
+                           GFP_KERNEL);
        if (!bio_slabs)
                panic("bio: can't allocate bios\n");
 
 
                       __func__, depth);
        }
 
-       tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+       tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
        if (!tag_index)
                goto fail;
 
        nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-       tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+       tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
 
 
        if (count < 0) {
                return NULL;
        } else if (count > 0) {
-               resources = kzalloc(count * sizeof(struct resource),
+               resources = kcalloc(count, sizeof(struct resource),
                                    GFP_KERNEL);
                if (!resources) {
                        dev_err(&adev->dev, "No memory for resources\n");
 
        num_gpes = acpi_current_gpe_count;
        num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 
-       all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
+       all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
                            GFP_KERNEL);
        if (all_attrs == NULL)
                return;
 
-       all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
+       all_counters = kcalloc(num_counters, sizeof(struct event_counter),
                               GFP_KERNEL);
        if (all_counters == NULL)
                goto fail;
        if (ACPI_FAILURE(status))
                goto fail;
 
-       counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
+       counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
                                GFP_KERNEL);
        if (counter_attrs == NULL)
                goto fail;
 
                }
        }
 #endif
-       alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
-                                  ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+       alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+                              sizeof(alloc->pages[0]),
                               GFP_KERNEL);
        if (alloc->pages == NULL) {
                ret = -ENOMEM;
 
                if (*p == ',')
                        size++;
 
-       ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
+       ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
        if (!ata_force_tbl) {
                printk(KERN_WARNING "ata: failed to extend force table, "
                       "libata.force ignored\n");
 
        int i, err;
 
        if (!pmp_link) {
-               pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
+               pmp_link = kcalloc(SATA_PMP_MAX_PORTS, sizeof(pmp_link[0]),
                                   GFP_NOIO);
                if (!pmp_link)
                        return -ENOMEM;
 
            DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
 
            /* allocate the array of receive buffers */
-           buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
+           buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
+                                           GFP_KERNEL);
 
            if (buffer == NULL)
                return -ENOMEM;
 
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
 
 {
        unsigned int *resources_per_cpu, min_index = ~0;
 
-       resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
+       resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
+                                   GFP_KERNEL);
        if (resources_per_cpu) {
                struct drbd_resource *resource;
                unsigned int cpu, min = ~0;
 
        struct nullb_cmd *cmd;
        int i, tag_size;
 
-       nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
+       nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
        if (!nq->cmds)
                return -ENOMEM;
 
        tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
-       nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
+       nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
        if (!nq->tag_map) {
                kfree(nq->cmds);
                return -ENOMEM;
 
 static int setup_queues(struct nullb *nullb)
 {
-       nullb->queues = kzalloc(nullb->dev->submit_queues *
-               sizeof(struct nullb_queue), GFP_KERNEL);
+       nullb->queues = kcalloc(nullb->dev->submit_queues,
+                               sizeof(struct nullb_queue),
+                               GFP_KERNEL);
        if (!nullb->queues)
                return -ENOMEM;
 
 
 
        priv->cache.page_count = CACHE_PAGE_COUNT;
        priv->cache.page_size = CACHE_PAGE_SIZE;
-       priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
-                                  CACHE_PAGE_COUNT, GFP_KERNEL);
+       priv->cache.tags = kcalloc(CACHE_PAGE_COUNT,
+                                  sizeof(struct ps3vram_tag),
+                                  GFP_KERNEL);
        if (!priv->cache.tags)
                return -ENOMEM;
 
 
                dev_info(CARD_TO_DEV(card),
                        "Failed reading the number of DMA targets\n");
 
-       card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
+       card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl),
+                            GFP_KERNEL);
        if (!card->ctrl) {
                st = -ENOMEM;
                goto failed_dma_setup;
 
        struct rsxx_dma *dma;
        struct list_head *issued_dmas;
 
-       issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
+       issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas),
                              GFP_KERNEL);
        if (!issued_dmas)
                return -ENOMEM;
 
 {
        unsigned int r;
 
-       blkif->rings = kzalloc(blkif->nr_rings * sizeof(struct xen_blkif_ring), GFP_KERNEL);
+       blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
+                              GFP_KERNEL);
        if (!blkif->rings)
                return -ENOMEM;
 
 
        if (!info->nr_rings)
                info->nr_rings = 1;
 
-       info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
+       info->rinfo = kcalloc(info->nr_rings,
+                             sizeof(struct blkfront_ring_info),
+                             GFP_KERNEL);
        if (!info->rinfo) {
                xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
                return -ENOMEM;
        }
 
        for (i = 0; i < BLK_RING_SIZE(info); i++) {
-               rinfo->shadow[i].grants_used = kzalloc(
-                       sizeof(rinfo->shadow[i].grants_used[0]) * grants,
-                       GFP_NOIO);
-               rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
-               if (info->max_indirect_segments)
-                       rinfo->shadow[i].indirect_grants = kzalloc(
-                               sizeof(rinfo->shadow[i].indirect_grants[0]) *
-                               INDIRECT_GREFS(grants),
+               rinfo->shadow[i].grants_used =
+                       kcalloc(grants,
+                               sizeof(rinfo->shadow[i].grants_used[0]),
                                GFP_NOIO);
+               rinfo->shadow[i].sg = kcalloc(psegs,
+                                             sizeof(rinfo->shadow[i].sg[0]),
+                                             GFP_NOIO);
+               if (info->max_indirect_segments)
+                       rinfo->shadow[i].indirect_grants =
+                               kcalloc(INDIRECT_GREFS(grants),
+                                       sizeof(rinfo->shadow[i].indirect_grants[0]),
+                                       GFP_NOIO);
                if ((rinfo->shadow[i].grants_used == NULL) ||
                        (rinfo->shadow[i].sg == NULL) ||
                     (info->max_indirect_segments &&
 
        int retval = 0;
        int i;
 
-       tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
+       tables = kcalloc(nr_tables + 1, sizeof(struct amd_page_map *),
+                        GFP_KERNEL);
        if (tables == NULL)
                return -ENOMEM;
 
 
        int retval = 0;
        int i;
 
-       tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL);
+       tables = kcalloc(nr_tables + 1, sizeof(struct ati_page_map *),
+                        GFP_KERNEL);
        if (tables == NULL)
                return -ENOMEM;
 
 
        int retval = 0;
        int i;
 
-       tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
+       tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *),
                         GFP_KERNEL);
        if (tables == NULL)
                return -ENOMEM;
 
        list_for_each_entry(info, &ssif_infos, link)
                count++;
 
-       address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL);
+       address_list = kcalloc(count + 1, sizeof(*address_list),
+                              GFP_KERNEL);
        if (!address_list)
                return NULL;
 
 
        }
 
        cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
-       clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
        if (cpg == NULL || clks == NULL) {
                /* We're leaking memory on purpose, there's no point in cleaning
                 * up as the system won't boot anyway.
 
        }
 
        cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
-       clks = kzalloc(CPG_NUM_CLOCKS * sizeof(*clks), GFP_KERNEL);
+       clks = kcalloc(CPG_NUM_CLOCKS, sizeof(*clks), GFP_KERNEL);
        if (cpg == NULL || clks == NULL) {
                /* We're leaking memory on purpose, there's no point in cleaning
                 * up as the system won't boot anyway.
 
        }
 
        cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
-       clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
        if (cpg == NULL || clks == NULL) {
                /* We're leaking memory on purpose, there's no point in cleaning
                 * up as the system won't boot anyway.
 
                return;
 
        cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
-       clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
        BUG_ON(!cpg || !clks);
 
        cpg->data.clks = clks;
 
                return;
 
        clk_data->clk_num = QUADFS_MAX_CHAN;
-       clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *),
+       clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *),
                                 GFP_KERNEL);
 
        if (!clk_data->clks) {
 
                return;
 
        clk_data->clk_num = num_odfs;
-       clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+       clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
                                 GFP_KERNEL);
 
        if (!clk_data->clks)
 
        if (!clk_data)
                return;
 
-       clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL);
+       clk_data->clks = kcalloc(qty + 1, sizeof(struct clk *), GFP_KERNEL);
        if (!clk_data->clks) {
                kfree(clk_data);
                return;
 
        if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
                return NULL;
 
-       periph_clk_enb_refcnt = kzalloc(32 * banks *
-                               sizeof(*periph_clk_enb_refcnt), GFP_KERNEL);
+       periph_clk_enb_refcnt = kcalloc(32 * banks,
+                                       sizeof(*periph_clk_enb_refcnt),
+                                       GFP_KERNEL);
        if (!periph_clk_enb_refcnt)
                return NULL;
 
        periph_banks = banks;
 
-       clks = kzalloc(num * sizeof(struct clk *), GFP_KERNEL);
+       clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
        if (!clks)
                kfree(periph_clk_enb_refcnt);
 
 
                goto cleanup;
        }
 
-       parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+       parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
        if (!parent_names)
                goto cleanup;
 
 
 
        num_dividers = i;
 
-       tmp = kzalloc(sizeof(*tmp) * (valid_div + 1), GFP_KERNEL);
+       tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
        if (!tmp)
                return -ENOMEM;
 
                return ERR_PTR(-EINVAL);
        }
 
-       table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+       table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL);
 
        if (!table)
                return ERR_PTR(-ENOMEM);
 
                goto cleanup;
        }
 
-       parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+       parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
        if (!parent_names)
                goto cleanup;
 
 
 
        /* Allocate and setup the channels. */
        cmt->num_channels = hweight8(cmt->hw_channels);
-       cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
+       cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
                                GFP_KERNEL);
        if (cmt->channels == NULL) {
                ret = -ENOMEM;
 
        /* Allocate and setup the channels. */
        mtu->num_channels = 3;
 
-       mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
+       mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
                                GFP_KERNEL);
        if (mtu->channels == NULL) {
                ret = -ENOMEM;
 
        }
 
        /* Allocate and setup the channels. */
-       tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
+       tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
                                GFP_KERNEL);
        if (tmu->channels == NULL) {
                ret = -ENOMEM;
 
                goto err_unreg;
        }
 
-       freq_table = kzalloc(sizeof(*freq_table) *
-                   (perf->state_count+1), GFP_KERNEL);
+       freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
+                            GFP_KERNEL);
        if (!freq_table) {
                result = -ENOMEM;
                goto err_unreg;
 
        for (i = 0; i < MAX_CLUSTERS; i++)
                count += get_table_count(freq_table[i]);
 
-       table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+       table = kcalloc(count, sizeof(*table), GFP_KERNEL);
        if (!table)
                return -ENOMEM;
 
 
        if (acpi_disabled)
                return -ENODEV;
 
-       all_cpu_data = kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL);
+       all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
+                              GFP_KERNEL);
        if (!all_cpu_data)
                return -ENOMEM;
 
 
        }
 
        /* alloc freq_table */
-       freq_table = kzalloc(sizeof(*freq_table) *
-                                  (data->acpi_data.state_count + 1),
+       freq_table = kcalloc(data->acpi_data.state_count + 1,
+                                  sizeof(*freq_table),
                                   GFP_KERNEL);
        if (!freq_table) {
                result = -ENOMEM;
 
                return -EINVAL;
        }
 
-       longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
-                       GFP_KERNEL);
+       longhaul_table = kcalloc(numscales + 1, sizeof(*longhaul_table),
+                                GFP_KERNEL);
        if (!longhaul_table)
                return -ENOMEM;
 
 
        struct cpufreq_frequency_table *table;
        int i;
 
-       table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
+       table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL);
        if (table == NULL)
                return -ENOMEM;
 
 
        size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
        size++;
 
-       ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
+       ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
        if (!ftab)
                return -ENOMEM;
 
 
        if (ret)
                return ret;
 
-       freq_table = kzalloc(sizeof(*freq_table) *
-                       (num_freq_table_entries + 1), GFP_KERNEL);
+       freq_table = kcalloc(num_freq_table_entries + 1, sizeof(*freq_table),
+                            GFP_KERNEL);
        if (!freq_table) {
                ret = -ENOMEM;
                goto err_free_array;
 
        cnt = prop->length / sizeof(u32);
        val = prop->value;
 
-       freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+       freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
        if (!freq_tbl) {
                ret = -ENOMEM;
                goto out_put_node;
 
 
 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 {
-       ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
+       ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
        if (ctx->sa_in == NULL)
                return -ENOMEM;
 
-       ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
+       ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
        if (ctx->sa_out == NULL) {
                kfree(ctx->sa_in);
                ctx->sa_in = NULL;
        if (!dev->pdr)
                return -ENOMEM;
 
-       dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
-                               GFP_KERNEL);
+       dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
+                                GFP_KERNEL);
        if (!dev->pdr_uinfo) {
                dma_free_coherent(dev->core_dev->device,
                                  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 
        crypto_ahash_clear_flags(tfm, ~0);
        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 
-       ipad = kzalloc(2 * blocksize, GFP_KERNEL);
+       ipad = kcalloc(2, blocksize, GFP_KERNEL);
        if (!ipad) {
                ret = -ENOMEM;
                goto free_request;
 
 
        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 
-       ipad = kzalloc(2 * blocksize, GFP_KERNEL);
+       ipad = kcalloc(2, blocksize, GFP_KERNEL);
        if (!ipad) {
                ret = -ENOMEM;
                goto free_req;
 
                goto out_hvapi_release;
 
        err = -ENOMEM;
-       cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+       cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
                             GFP_KERNEL);
        if (!cpu_to_cwq)
                goto out_queue_cache_destroy;
 
-       cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+       cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
                             GFP_KERNEL);
        if (!cpu_to_mau)
                goto out_free_cwq_table;
 
        suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
 
        if (suof_handle->img_table.num_simgs != 0) {
-               suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
-                                      sizeof(img_header), GFP_KERNEL);
+               suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
+                                      sizeof(img_header),
+                                      GFP_KERNEL);
                if (!suof_img_hdr)
                        return -ENOMEM;
                suof_handle->img_table.simg_hdr = suof_img_hdr;
 
        unsigned long tmo;
        unsigned long flags;
 
-       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+       src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
        if (!src)
                return -ENOMEM;
-       dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+       dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
        if (!dest) {
                kfree(src);
                return -ENOMEM;
 
        if (!src)
                return -ENOMEM;
 
-       dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
+       dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (!dest) {
                kfree(src);
                return -ENOMEM;
 
        int i;
 
        /* Allocate 1 Manager and 'chans' Channel threads */
-       pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
+       pl330->channels = kcalloc(1 + chans, sizeof(*thrd),
                                        GFP_KERNEL);
        if (!pl330->channels)
                return -ENOMEM;
 
        pl330->num_peripherals = num_chan;
 
-       pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
+       pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL);
        if (!pl330->peripherals) {
                ret = -ENOMEM;
                goto probe_err2;
 
 
 static int __init shdma_enter(void)
 {
-       shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
-                                   sizeof(long), GFP_KERNEL);
+       shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
+                                  sizeof(long),
+                                  GFP_KERNEL);
        if (!shdma_slave_used)
                return -ENOMEM;
        return 0;
 
        if (ret < 0)
                return ret;
 
-       chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
+       chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc),
                                     GFP_KERNEL);
        if (!chan->sw_desc_pool)
                return -ENOMEM;
 
        opstate_init();
 
        err = -ENOMEM;
-       ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
+       ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
        if (!ecc_stngs)
                goto err_free;
 
 
        if (!i7core_dev)
                return NULL;
 
-       i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
+       i7core_dev->pdev = kcalloc(table->n_devs, sizeof(*i7core_dev->pdev),
                                   GFP_KERNEL);
        if (!i7core_dev->pdev) {
                kfree(i7core_dev);
 
                char *str;
                struct extcon_cable *cable;
 
-               edev->cables = kzalloc(sizeof(struct extcon_cable) *
-                                      edev->max_supported, GFP_KERNEL);
+               edev->cables = kcalloc(edev->max_supported,
+                                      sizeof(struct extcon_cable),
+                                      GFP_KERNEL);
                if (!edev->cables) {
                        ret = -ENOMEM;
                        goto err_sysfs_alloc;
                        cable = &edev->cables[index];
 
                        snprintf(buf, 10, "cable.%d", index);
-                       str = kzalloc(sizeof(char) * (strlen(buf) + 1),
+                       str = kzalloc(strlen(buf) + 1,
                                      GFP_KERNEL);
                        if (!str) {
                                for (index--; index >= 0; index--) {
                for (index = 0; edev->mutually_exclusive[index]; index++)
                        ;
 
-               edev->attrs_muex = kzalloc(sizeof(struct attribute *) *
-                                          (index + 1), GFP_KERNEL);
+               edev->attrs_muex = kcalloc(index + 1,
+                                          sizeof(struct attribute *),
+                                          GFP_KERNEL);
                if (!edev->attrs_muex) {
                        ret = -ENOMEM;
                        goto err_muex;
                }
 
-               edev->d_attrs_muex = kzalloc(sizeof(struct device_attribute) *
-                                            index, GFP_KERNEL);
+               edev->d_attrs_muex = kcalloc(index,
+                                            sizeof(struct device_attribute),
+                                            GFP_KERNEL);
                if (!edev->d_attrs_muex) {
                        ret = -ENOMEM;
                        kfree(edev->attrs_muex);
 
                for (index = 0; edev->mutually_exclusive[index]; index++) {
                        sprintf(buf, "0x%x", edev->mutually_exclusive[index]);
-                       name = kzalloc(sizeof(char) * (strlen(buf) + 1),
+                       name = kzalloc(strlen(buf) + 1,
                                       GFP_KERNEL);
                        if (!name) {
                                for (index--; index >= 0; index--) {
 
        if (edev->max_supported) {
                edev->extcon_dev_type.groups =
-                       kzalloc(sizeof(struct attribute_group *) *
-                               (edev->max_supported + 2), GFP_KERNEL);
+                       kcalloc(edev->max_supported + 2,
+                               sizeof(struct attribute_group *),
+                               GFP_KERNEL);
                if (!edev->extcon_dev_type.groups) {
                        ret = -ENOMEM;
                        goto err_alloc_groups;
 
        packet_array_size = max(
                        (unsigned int)(allocation_floor / rbu_data.packetsize),
                        (unsigned int)1);
-       invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*),
+       invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
                                                GFP_KERNEL);
 
        if (!invalid_addr_packet_array) {
 
        count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
        sg_count = sg_pages_num(count);
 
-       sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
+       sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
        if (!sg_pages)
                return -ENOMEM;
 
 
        if (!efi_enabled(EFI_MEMMAP))
                return 0;
 
-       map_entries = kzalloc(efi.memmap.nr_map * sizeof(entry), GFP_KERNEL);
+       map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL);
        if (!map_entries) {
                ret = -ENOMEM;
                goto out;
 
        arr = kzalloc(sizeof(*arr), GFP_KERNEL);
        if (!arr)
                return ERR_PTR(-ENOMEM);
-       arr->record = kzalloc(sizeof(arr->record[0]) * n, GFP_KERNEL);
-       arr->subtree = kzalloc(sizeof(arr->subtree[0]) * n, GFP_KERNEL);
+       arr->record = kcalloc(n, sizeof(arr->record[0]), GFP_KERNEL);
+       arr->subtree = kcalloc(n, sizeof(arr->subtree[0]), GFP_KERNEL);
        if (!arr->record || !arr->subtree) {
                kfree(arr->record);
                kfree(arr->subtree);
 
                goto err_iomap;
        }
 
-       chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL);
+       chip_save = kcalloc(8, sizeof(*chip), GFP_KERNEL);
        if (chip_save == NULL) {
                ret = -ENOMEM;
                goto err_kzalloc;
 
                pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
        }
 
-       adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
+       adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
                                                        GFP_KERNEL);
 
        if (adev->acp.acp_cell == NULL)
                return -ENOMEM;
 
-       adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
+       adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
 
        if (adev->acp.acp_res == NULL) {
                kfree(adev->acp.acp_cell);
                return -ENOMEM;
        }
 
-       i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
+       i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
        if (i2s_pdata == NULL) {
                kfree(adev->acp.acp_res);
                kfree(adev->acp.acp_cell);
 
                        ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 
                        adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
-                               kzalloc(psl->ucNumEntries *
+                               kcalloc(psl->ucNumEntries,
                                        sizeof(struct amdgpu_phase_shedding_limits_entry),
                                        GFP_KERNEL);
                        if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 
                n -= adev->irq.ih.ring_size;
        n /= size;
 
-       gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+       gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
        if (!gtt_obj) {
                DRM_ERROR("Failed to allocate %d pointers\n", n);
                r = 1;
 
        ectx.abort = false;
        ectx.last_jump = 0;
        if (ws)
-               ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+               ectx.ws = kcalloc(4, ws, GFP_KERNEL);
        else
                ectx.ws = NULL;
 
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct amdgpu_ps),
+                                 GFP_KERNEL);
        if (!adev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
        ci_set_private_data_variables_based_on_pptable(adev);
 
        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct amdgpu_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                ci_dpm_fini(adev);
                return -ENOMEM;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct amdgpu_ps),
+                                 GFP_KERNEL);
        if (!adev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct amdgpu_ps),
+                                 GFP_KERNEL);
        if (!adev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
                return ret;
 
        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct amdgpu_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                amdgpu_free_extended_power_table(adev);
                return -ENOMEM;
 
                return false;
        }
 
-       msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+       msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
 
        if (!msgs)
                return false;
 
        entry->type = log_type;
        entry->logger = logger;
 
-       entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+       entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
                             GFP_KERNEL);
 
        entry->buf_offset = 0;
 
                return false;
        }
 
-       vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+       vector->container = kcalloc(capacity, struct_size, GFP_KERNEL);
        if (vector->container == NULL)
                return false;
        vector->capacity = capacity;
                return false;
        }
 
-       vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+       vector->container = kcalloc(count, struct_size, GFP_KERNEL);
 
        if (vector->container == NULL)
                return false;
 
        if (*ss_entries_num == 0)
                return;
 
-       ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num),
+       ss_info = kcalloc(*ss_entries_num,
+                         sizeof(struct spread_spectrum_info),
                          GFP_KERNEL);
        ss_info_cur = ss_info;
        if (ss_info == NULL)
                return;
 
-       ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num),
+       ss_data = kcalloc(*ss_entries_num,
+                         sizeof(struct spread_spectrum_data),
                          GFP_KERNEL);
        if (ss_data == NULL)
                goto out_free_info;
 
                        if (number_of_bits) {
                                uint32_t index_of_uint = 0;
 
-                               slot = kzalloc(number_of_uints * sizeof(uint32_t),
+                               slot = kcalloc(number_of_uints,
+                                              sizeof(uint32_t),
                                               GFP_KERNEL);
 
                                if (!slot) {
 
 
        output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
-       rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
-                       GFP_KERNEL);
+       rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS,
+                          sizeof(*rgb_user),
+                          GFP_KERNEL);
        if (!rgb_user)
                goto rgb_user_alloc_fail;
 
-       rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
-                       GFP_KERNEL);
+       rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+                             sizeof(*rgb_regamma),
+                             GFP_KERNEL);
        if (!rgb_regamma)
                goto rgb_regamma_alloc_fail;
 
 
        if (core_freesync == NULL)
                goto fail_alloc_context;
 
-       core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+       core_freesync->map = kcalloc(MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+                                       sizeof(struct freesync_entity),
                                        GFP_KERNEL);
 
        if (core_freesync->map == NULL)
 
                        else
                                core_stats->entries = reg_data;
                }
-               core_stats->time = kzalloc(
-                       sizeof(struct stats_time_cache) *
-                               core_stats->entries,
+               core_stats->time = kcalloc(core_stats->entries,
+                                               sizeof(struct stats_time_cache),
                                                GFP_KERNEL);
 
                if (core_stats->time == NULL)
                        goto fail_construct_time;
 
                core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
-               core_stats->events = kzalloc(
-                       sizeof(struct stats_event_cache) *
-                               core_stats->event_entries,
-                                               GFP_KERNEL);
+               core_stats->events = kcalloc(core_stats->event_entries,
+                                            sizeof(struct stats_event_cache),
+                                            GFP_KERNEL);
 
                if (core_stats->events == NULL)
                        goto fail_construct_events;
 
                return 0;
        }
 
-       hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
+       hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
        if (hwmgr->ps == NULL)
                return -ENOMEM;
 
 
        high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
        num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
 
-       gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+       gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
                             GFP_KERNEL);
        if (!gvt->types)
                return -ENOMEM;
 
        if (num_downstream == 0)
                return -EINVAL;
 
-       ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+       ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
        if (!ksv_fifo)
                return -ENOMEM;
 
 
        if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
                return 0;
 
-       valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
+       valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
                        GFP_KERNEL);
        if (!valid)
                return -ENOMEM;
 
                goto done;
 
        device->runlists = fls64(a->v.runlists.data);
-       device->runlist = kzalloc(sizeof(*device->runlist) *
-                                 device->runlists, GFP_KERNEL);
+       device->runlist = kcalloc(device->runlists, sizeof(*device->runlist),
+                                 GFP_KERNEL);
        if (!device->runlist) {
                ret = -ENOMEM;
                goto done;
 
                        return ret;
        }
 
-       *psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+       *psclass = kcalloc(args->sclass.count, sizeof(**psclass), GFP_KERNEL);
        if (*psclass) {
                for (i = 0; i < args->sclass.count; i++) {
                        (*psclass)[i].oclass = args->sclass.oclass[i].oclass;
 
 nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
                struct nvkm_event *event)
 {
-       event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
+       event->refs = kzalloc(array3_size(index_nr, types_nr,
+                                         sizeof(*event->refs)),
                              GFP_KERNEL);
        if (!event->refs)
                return -ENOMEM;
 
        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 
        /* Read PBDMA->runlist(s) mapping from HW. */
-       if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
+       if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL)))
                return -ENOMEM;
 
        for (i = 0; i < fifo->pbdma_nr; i++)
 
                        }
                }
        } else {
-               addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
+               addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
                if (!addrs) {
                        ret = -ENOMEM;
                        goto free_pages;
 
        ectx.abort = false;
        ectx.last_jump = 0;
        if (ws)
-               ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+               ectx.ws = kcalloc(4, ws, GFP_KERNEL);
        else
                ectx.ws = NULL;
 
 
                return ret;
 
        rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct radeon_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                r600_free_extended_power_table(rdev);
                return -ENOMEM;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
        ci_set_private_data_variables_based_on_pptable(rdev);
 
        rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct radeon_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                ci_dpm_fini(rdev);
                return -ENOMEM;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
 
                return -EINVAL;
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 power_info->pplib.ucNumStates, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
 
                return ret;
 
        rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct radeon_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                r600_free_extended_power_table(rdev);
                return -ENOMEM;
 
                        ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 
                        rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
-                               kzalloc(psl->ucNumEntries *
+                               kcalloc(psl->ucNumEntries,
                                        sizeof(struct radeon_phase_shedding_limits_entry),
                                        GFP_KERNEL);
                        if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 
                num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
        if (num_modes == 0)
                return state_index;
-       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+       rdev->pm.power_state = kcalloc(num_modes,
+                                      sizeof(struct radeon_power_state),
+                                      GFP_KERNEL);
        if (!rdev->pm.power_state)
                return state_index;
        /* last mode is usually default, array is low to high */
        for (i = 0; i < num_modes; i++) {
                rdev->pm.power_state[state_index].clock_info =
-                       kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
+                               GFP_KERNEL);
                if (!rdev->pm.power_state[state_index].clock_info)
                        return state_index;
                rdev->pm.power_state[state_index].num_clock_modes = 1;
        radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
        if (power_info->pplib.ucNumStates == 0)
                return state_index;
-       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
-                                      power_info->pplib.ucNumStates, GFP_KERNEL);
+       rdev->pm.power_state = kcalloc(power_info->pplib.ucNumStates,
+                                      sizeof(struct radeon_power_state),
+                                      GFP_KERNEL);
        if (!rdev->pm.power_state)
                return state_index;
        /* first mode is usually default, followed by low to high */
                         le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
                         (power_state->v1.ucNonClockStateIndex *
                          power_info->pplib.ucNonClockSize));
-               rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
-                                                            ((power_info->pplib.ucStateEntrySize - 1) ?
-                                                             (power_info->pplib.ucStateEntrySize - 1) : 1),
-                                                            GFP_KERNEL);
+               rdev->pm.power_state[i].clock_info =
+                       kcalloc((power_info->pplib.ucStateEntrySize - 1) ?
+                               (power_info->pplib.ucStateEntrySize - 1) : 1,
+                               sizeof(struct radeon_pm_clock_info),
+                               GFP_KERNEL);
                if (!rdev->pm.power_state[i].clock_info)
                        return state_index;
                if (power_info->pplib.ucStateEntrySize - 1) {
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
        if (state_array->ucNumEntries == 0)
                return state_index;
-       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
-                                      state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.power_state = kcalloc(state_array->ucNumEntries,
+                                      sizeof(struct radeon_power_state),
+                                      GFP_KERNEL);
        if (!rdev->pm.power_state)
                return state_index;
        power_state_offset = (u8 *)state_array->states;
                non_clock_array_index = power_state->v2.nonClockInfoIndex;
                non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
                        &non_clock_info_array->nonClockInfo[non_clock_array_index];
-               rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
-                                                            (power_state->v2.ucNumDPMLevels ?
-                                                             power_state->v2.ucNumDPMLevels : 1),
-                                                            GFP_KERNEL);
+               rdev->pm.power_state[i].clock_info =
+                       kcalloc(power_state->v2.ucNumDPMLevels ?
+                               power_state->v2.ucNumDPMLevels : 1,
+                               sizeof(struct radeon_pm_clock_info),
+                               GFP_KERNEL);
                if (!rdev->pm.power_state[i].clock_info)
                        return state_index;
                if (power_state->v2.ucNumDPMLevels) {
                rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
                if (rdev->pm.power_state) {
                        rdev->pm.power_state[0].clock_info =
-                               kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+                               kcalloc(1,
+                                       sizeof(struct radeon_pm_clock_info),
+                                       GFP_KERNEL);
                        if (rdev->pm.power_state[0].clock_info) {
                                /* add the default mode */
                                rdev->pm.power_state[state_index].type =
 
        rdev->pm.default_power_state_index = -1;
 
        /* allocate 2 power states */
-       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+       rdev->pm.power_state = kcalloc(2, sizeof(struct radeon_power_state),
+                                      GFP_KERNEL);
        if (rdev->pm.power_state) {
                /* allocate 1 clock mode per state */
                rdev->pm.power_state[0].clock_info =
-                       kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
+                               GFP_KERNEL);
                rdev->pm.power_state[1].clock_info =
-                       kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
+                               GFP_KERNEL);
                if (!rdev->pm.power_state[0].clock_info ||
                    !rdev->pm.power_state[1].clock_info)
                        goto pm_failed;
 
        n = rdev->mc.gtt_size - rdev->gart_pin_size;
        n /= size;
 
-       gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+       gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
        if (!gtt_obj) {
                DRM_ERROR("Failed to allocate %d pointers\n", n);
                r = 1;
 
                return -EINVAL;
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 power_info->pplib.ucNumStates, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
 
 
                return -EINVAL;
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 power_info->pplib.ucNumStates, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
 
 
                return -EINVAL;
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 power_info->pplib.ucNumStates, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
 
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
                return ret;
 
        rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
-               kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+               kcalloc(4,
+                       sizeof(struct radeon_clock_voltage_dependency_entry),
+                       GFP_KERNEL);
        if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
                r600_free_extended_power_table(rdev);
                return -ENOMEM;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
 
                (mode_info->atom_context->bios + data_offset +
                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 
-       rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
-                                 state_array->ucNumEntries, GFP_KERNEL);
+       rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
+                                 sizeof(struct radeon_ps),
+                                 GFP_KERNEL);
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
 
        if (!nodes)
                goto err;
 
-       bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+       bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
                         GFP_KERNEL);
        if (!bitmap)
                goto err_nodes;
        if (!nodes)
                goto err;
 
-       bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+       bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
                         GFP_KERNEL);
        if (!bitmap)
                goto err_nodes;
 
        char *buf = NULL;
 
        if (!f) {
-               buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+               buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
                if (!buf)
                        return ERR_PTR(-ENOMEM);
        }
                goto out;
        }
 
-       if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
+       if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
                err = -ENOMEM;
                kfree(list);
                goto out;
 
 {
        int cpu;
 
-       hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
+       hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
                                         GFP_KERNEL);
        if (hv_context.hv_numa_map == NULL) {
                pr_err("Unable to allocate NUMA map\n");
 
         * First page holds struct hv_ring_buffer, do wraparound mapping for
         * the rest.
         */
-       pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
+       pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
                                   GFP_KERNEL);
        if (!pages_wraparound)
                return -ENOMEM;
 
        if (!pss->package.count)
                goto end;
 
-       resource->domain_devices = kzalloc(sizeof(struct acpi_device *) *
-                                          pss->package.count, GFP_KERNEL);
+       resource->domain_devices = kcalloc(pss->package.count,
+                                          sizeof(struct acpi_device *),
+                                          GFP_KERNEL);
        if (!resource->domain_devices) {
                res = -ENOMEM;
                goto end;
                        goto error;
                }
 
-               *str = kzalloc(sizeof(u8) * (element->string.length + 1),
+               *str = kcalloc(element->string.length + 1, sizeof(u8),
                               GFP_KERNEL);
                if (!*str) {
                        res = -ENOMEM;
 
                return -ENODEV;
 
        max_packages = topology_max_packages();
-       pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
+       pkg_devices = kcalloc(max_packages, sizeof(struct platform_device *),
                              GFP_KERNEL);
        if (!pkg_devices)
                return -ENOMEM;
 
                num_ambs += hweight16(data->amb_present[i] & 0x7fff);
 
        /* Set up sysfs stuff */
-       data->attrs = kzalloc(sizeof(*data->attrs) * num_ambs * KNOBS_PER_AMB,
-                               GFP_KERNEL);
+       data->attrs = kzalloc(array3_size(num_ambs, KNOBS_PER_AMB,
+                                         sizeof(*data->attrs)),
+                             GFP_KERNEL);
        if (!data->attrs)
                return -ENOMEM;
        data->num_attrs = 0;
 
                return -ENOENT;
        data->num_sensors = err;
 
-       data->sensors = kzalloc(data->num_sensors * sizeof(*data->sensors),
+       data->sensors = kcalloc(data->num_sensors, sizeof(*data->sensors),
                                GFP_KERNEL);
        if (!data->sensors)
                return -ENOMEM;
 
 
        printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n");
        /* Define the 5 virtual adapters and algorithms structures */
-       if (!(s4882_adapter = kzalloc(5 * sizeof(struct i2c_adapter),
+       if (!(s4882_adapter = kcalloc(5, sizeof(struct i2c_adapter),
                                      GFP_KERNEL))) {
                error = -ENOMEM;
                goto ERROR1;
        }
-       if (!(s4882_algo = kzalloc(5 * sizeof(struct i2c_algorithm),
+       if (!(s4882_algo = kcalloc(5, sizeof(struct i2c_algorithm),
                                   GFP_KERNEL))) {
                error = -ENOMEM;
                goto ERROR2;
 
 
        printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n");
        /* Define the 5 virtual adapters and algorithms structures */
-       s4985_adapter = kzalloc(5 * sizeof(struct i2c_adapter), GFP_KERNEL);
+       s4985_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL);
        if (!s4985_adapter) {
                error = -ENOMEM;
                goto ERROR1;
        }
-       s4985_algo = kzalloc(5 * sizeof(struct i2c_algorithm), GFP_KERNEL);
+       s4985_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL);
        if (!s4985_algo) {
                error = -ENOMEM;
                goto ERROR2;
 
        int res1, res2;
 
        /* we support 2 SMBus adapters */
-       smbuses = kzalloc(2 * sizeof(struct nforce2_smbus), GFP_KERNEL);
+       smbuses = kcalloc(2, sizeof(struct nforce2_smbus), GFP_KERNEL);
        if (!smbuses)
                return -ENOMEM;
        pci_set_drvdata(dev, smbuses);
 
                chip->bank_mask >>= 1;
        }
 
-       chip->bank_words = kzalloc(chip->bank_mask * chip->bank_size *
-                                  sizeof(u16), GFP_KERNEL);
+       chip->bank_words = kcalloc(chip->bank_mask * chip->bank_size,
+                                  sizeof(u16),
+                                  GFP_KERNEL);
        if (!chip->bank_words)
                return -ENOMEM;
 
 
        if (info == &hpt36x || info == &hpt374)
                dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
 
-       dyn_info = kzalloc(sizeof(*dyn_info) * (dev2 ? 2 : 1), GFP_KERNEL);
+       dyn_info = kcalloc(dev2 ? 2 : 1, sizeof(*dyn_info), GFP_KERNEL);
        if (dyn_info == NULL) {
                printk(KERN_ERR "%s %s: out of memory!\n",
                        d.name, pci_name(dev));
 
        struct it821x_dev *itdevs;
        int rc;
 
-       itdevs = kzalloc(2 * sizeof(*itdevs), GFP_KERNEL);
+       itdevs = kcalloc(2, sizeof(*itdevs), GFP_KERNEL);
        if (itdevs == NULL) {
                printk(KERN_ERR DRV_NAME " %s: out of memory\n", pci_name(dev));
                return -ENOMEM;
 
        if (!adis->xfer)
                return -ENOMEM;
 
-       adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL);
+       adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
        if (!adis->buffer)
                return -ENOMEM;
 
 
        }
 
        /* NULL terminated array to save passing size */
-       chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
+       chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
        if (chans == NULL) {
                ret = -ENOMEM;
                goto error_ret;
 
        rwlock_init(&device->cache.lock);
 
        device->cache.ports =
-               kzalloc(sizeof(*device->cache.ports) *
-                       (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
+               kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
+                       sizeof(*device->cache.ports),
+                       GFP_KERNEL);
        if (!device->cache.ports)
                return -ENOMEM;
 
 
         * Therefore port_immutable is declared as a 1 based array with
         * potential empty slots at the beginning.
         */
-       device->port_immutable = kzalloc(sizeof(*device->port_immutable)
-                                        * (end_port + 1),
+       device->port_immutable = kcalloc(end_port + 1,
+                                        sizeof(*device->port_immutable),
                                         GFP_KERNEL);
        if (!device->port_immutable)
                return -ENOMEM;
 
        int ret = 0;
        mutex_lock(&iwpm_admin_lock);
        if (atomic_read(&iwpm_admin.refcount) == 0) {
-               iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
-                                       sizeof(struct hlist_head), GFP_KERNEL);
+               iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE,
+                                          sizeof(struct hlist_head),
+                                          GFP_KERNEL);
                if (!iwpm_hash_bucket) {
                        ret = -ENOMEM;
                        goto init_exit;
                }
-               iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
-                                       sizeof(struct hlist_head), GFP_KERNEL);
+               iwpm_reminfo_bucket = kcalloc(IWPM_REMINFO_HASH_SIZE,
+                                             sizeof(struct hlist_head),
+                                             GFP_KERNEL);
                if (!iwpm_reminfo_bucket) {
                        kfree(iwpm_hash_bucket);
                        ret = -ENOMEM;
 
        if (!wq->qpid)
                return -ENOMEM;
 
-       wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
+       wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
        if (!wq->rq)
                goto err1;
 
        if (!wq->rq_addr)
                goto err2;
 
-       wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
+       wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL);
        if (!wq->sq)
                goto err3;
 
 
        rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
 
        if (c4iw_wr_log) {
-               rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
-                                      sizeof(*rdev->wr_log), GFP_KERNEL);
+               rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
+                                      sizeof(*rdev->wr_log),
+                                      GFP_KERNEL);
                if (rdev->wr_log) {
                        rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
                        atomic_set(&rdev->wr_log_idx, 0);
        ctx->dev->db_state = RECOVERY;
        idr_for_each(&ctx->dev->qpidr, count_qps, &count);
 
-       qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
+       qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
        if (!qp_list.qps) {
                spin_unlock_irq(&ctx->dev->lock);
                return;
 
        }
 
        if (!user) {
-               wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
-                                GFP_KERNEL);
+               wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
+                                      GFP_KERNEL);
                if (!wq->sq.sw_sq) {
                        ret = -ENOMEM;
                        goto free_rq_qid;
                }
 
-               wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
-                                GFP_KERNEL);
+               wq->rq.sw_rq = kcalloc(wq->rq.size, sizeof(*wq->rq.sw_rq),
+                                      GFP_KERNEL);
                if (!wq->rq.sw_rq) {
                        ret = -ENOMEM;
                        goto free_sw_sq;
 
        struct device *dev = hr_dev->dev;
        int ret = -EINVAL;
 
-       context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
+       context = kcalloc(2, sizeof(*context), GFP_KERNEL);
        if (!context)
                return -ENOMEM;
 
 
 
        tun_qp = &ctx->qp[qp_type];
 
-       tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
+       tun_qp->ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
+                              sizeof(struct mlx4_ib_buf),
                               GFP_KERNEL);
        if (!tun_qp->ring)
                return -ENOMEM;
 
        buddy->max_order = max_order;
        spin_lock_init(&buddy->lock);
 
-       buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+       buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
                              GFP_KERNEL);
        buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
                                  GFP_KERNEL);
 
        struct mthca_resource *profile;
        int i, j;
 
-       profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
+       profile = kcalloc(MTHCA_RES_NUM, sizeof(*profile), GFP_KERNEL);
        if (!profile)
                return -ENOMEM;
 
 
        int ret;
 
        /* Allocate space the all mgt QPs once */
-       mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
+       mgtvnic = kcalloc(NES_MGT_QP_COUNT, sizeof(struct nes_vnic_mgt),
+                         GFP_KERNEL);
        if (!mgtvnic)
                return -ENOMEM;
 
 
                                                                ibmr = ERR_PTR(-ENOMEM);
                                                                goto reg_user_mr_err;
                                                        }
-                                                       root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
-                                                                       GFP_KERNEL);
+                                                       root_vpbl.leaf_vpbl = kcalloc(1024,
+                                                                                     sizeof(*root_vpbl.leaf_vpbl),
+                                                                                     GFP_KERNEL);
                                                        if (!root_vpbl.leaf_vpbl) {
                                                                ib_umem_release(region);
                                                                pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
 
        if (!num_eq)
                return -EINVAL;
 
-       dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+       dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
        if (!dev->eq_tbl)
                return -ENOMEM;
 
 
 static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
 {
        mutex_init(&dev->dev_lock);
-       dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
-                             OCRDMA_MAX_CQ, GFP_KERNEL);
+       dev->cq_tbl = kcalloc(OCRDMA_MAX_CQ, sizeof(struct ocrdma_cq *),
+                             GFP_KERNEL);
        if (!dev->cq_tbl)
                goto alloc_err;
 
        if (dev->attr.max_qp) {
-               dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
-                                     OCRDMA_MAX_QP, GFP_KERNEL);
+               dev->qp_tbl = kcalloc(OCRDMA_MAX_QP,
+                                     sizeof(struct ocrdma_qp *),
+                                     GFP_KERNEL);
                if (!dev->qp_tbl)
                        goto alloc_err;
        }
 
-       dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
+       dev->stag_arr = kcalloc(OCRDMA_MAX_STAG, sizeof(u64), GFP_KERNEL);
        if (dev->stag_arr == NULL)
                goto alloc_err;
 
 
        void *va;
        dma_addr_t pa;
 
-       mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
-                               mr->num_pbls, GFP_KERNEL);
+       mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
+                               GFP_KERNEL);
 
        if (!mr->pbl_table)
                return -ENOMEM;
 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
 {
        qp->wqe_wr_id_tbl =
-           kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
+           kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
                    GFP_KERNEL);
        if (qp->wqe_wr_id_tbl == NULL)
                return -ENOMEM;
        qp->rqe_wr_id_tbl =
-           kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
+           kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
        if (qp->rqe_wr_id_tbl == NULL)
                return -ENOMEM;
 
 
        if (udata == NULL) {
                status = -ENOMEM;
-               srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
-                           GFP_KERNEL);
+               srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
+                                            GFP_KERNEL);
                if (srq->rqe_wr_id_tbl == NULL)
                        goto arm_err;
 
 
        u16 n_entries;
        int i, rc;
 
-       dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
-                               QEDR_MAX_SGID, GFP_KERNEL);
+       dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
+                               GFP_KERNEL);
        if (!dev->sgid_tbl)
                return -ENOMEM;
 
 
        qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
                              dev->attr.max_sqe);
 
-       qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
+       qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
                                GFP_KERNEL);
        if (!qp->wqe_wr_id) {
                DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
        qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
 
        /* Allocate driver internal RQ array */
-       qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
+       qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
                                GFP_KERNEL);
        if (!qp->rqe_wr_id) {
                DP_ERR(dev,
 
                actual_cnt -= dd->num_pports;
 
        tabsize = actual_cnt;
-       dd->cspec->msix_entries = kzalloc(tabsize *
-                       sizeof(struct qib_msix_entry), GFP_KERNEL);
+       dd->cspec->msix_entries = kcalloc(tabsize,
+                                         sizeof(struct qib_msix_entry),
+                                         GFP_KERNEL);
        if (!dd->cspec->msix_entries)
                tabsize = 0;
 
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
 
-               qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
-                                     sizeof(long), GFP_KERNEL);
+               qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
+                                     GFP_KERNEL);
                if (qib_cpulist)
                        qib_cpulist_count = count;
        }
 
                /* Do Nothing */
        }
 
-       res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
+       res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
                                        GFP_ATOMIC);
        if (!res_chunk_list)
                return ERR_PTR(-ENOMEM);
 
        }
 
        chunk->cnt = chunk->free_cnt = cnt;
-       chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
+       chunk->res = kcalloc(cnt, sizeof(*(chunk->res)), GFP_KERNEL);
        if (!chunk->res)
                return -ENOMEM;
 
 
                return -ENOMEM;
        set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
        size = roundup_pow_of_two(arp_tbl.gc_thresh3);
-       buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
+       buckets = kcalloc(size, sizeof(*buckets), GFP_KERNEL);
        if (!buckets) {
                kfree(htbl);
                return -ENOMEM;
        ipoib_napi_add(dev);
 
        /* Allocate RX/TX "rings" to hold queued skbs */
-       priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
-                               GFP_KERNEL);
+       priv->rx_ring = kcalloc(ipoib_recvq_size,
+                                      sizeof(*priv->rx_ring),
+                                      GFP_KERNEL);
        if (!priv->rx_ring)
                goto out;
 
 
        u64 dma_addr;
        int i, j;
 
-       isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
-                               sizeof(struct iser_rx_desc), GFP_KERNEL);
+       isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS,
+                                      sizeof(struct iser_rx_desc),
+                                      GFP_KERNEL);
        if (!isert_conn->rx_descs)
                return -ENOMEM;
 
 
 
        keypad_data->row_shift = get_count_order(keypad_data->cols);
        max_keys = keypad_data->rows << keypad_data->row_shift;
-       keypad_data->keymap = kzalloc(max_keys * sizeof(keypad_data->keymap[0]),
+       keypad_data->keymap = kcalloc(max_keys,
+                                     sizeof(keypad_data->keymap[0]),
                                      GFP_KERNEL);
        if (!keypad_data->keymap) {
                dev_err(&pdev->dev, "Not enough memory for keymap\n");
 
 
        qi->desc = page_address(desc_page);
 
-       qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
+       qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
                kfree(qi);
 
        /* This is too big for the stack - allocate it from slab */
        ctxt_table_entries = ext ? 512 : 256;
        ret = -ENOMEM;
-       ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
+       ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
        if (!ctxt_tbls)
                goto out_unmap;
 
        unsigned long flag;
 
        for_each_active_iommu(iommu, drhd) {
-               iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
+               iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
                                                 GFP_ATOMIC);
                if (!iommu->iommu_state)
                        goto nomem;
 
        if (num_iommus < 0)
                return 0;
 
-       arch_data = kzalloc((num_iommus + 1) * sizeof(*arch_data), GFP_KERNEL);
+       arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
        if (!arch_data)
                return -ENOMEM;
 
 
 {
        int res;
 
-       tpci200->slots = kzalloc(
-               TPCI200_NB_SLOT * sizeof(struct tpci200_slot), GFP_KERNEL);
+       tpci200->slots = kcalloc(TPCI200_NB_SLOT, sizeof(struct tpci200_slot),
+                                GFP_KERNEL);
        if (tpci200->slots == NULL)
                return -ENOMEM;
 
 
                goto err_priv;
        }
 
-       priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis),
+       priv->msi_map = kcalloc(BITS_TO_LONGS(priv->num_spis),
+                               sizeof(*priv->msi_map),
                                GFP_KERNEL);
        if (!priv->msi_map) {
                ret = -ENOMEM;
 
                break;
        }
 
-       v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
+       v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
                          GFP_KERNEL);
        if (!v2m->bm) {
                ret = -ENOMEM;
 
        if (!its_dev->event_map.vm) {
                struct its_vlpi_map *maps;
 
-               maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
+               maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
                               GFP_KERNEL);
                if (!maps) {
                        ret = -ENOMEM;
 {
        lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
 
-       lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
+       lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
                             GFP_KERNEL);
        if (!lpi_bitmap) {
                lpi_chunks = 0;
        if (!nr_chunks)
                goto out;
 
-       bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
+       bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
+                        sizeof(long),
                         GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
 static int its_alloc_collections(struct its_node *its)
 {
-       its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
+       its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
                                   GFP_KERNEL);
        if (!its->collections)
                return -ENOMEM;
        if (alloc_lpis) {
                lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
                if (lpi_map)
-                       col_map = kzalloc(sizeof(*col_map) * nr_lpis,
+                       col_map = kcalloc(nr_lpis, sizeof(*col_map),
                                          GFP_KERNEL);
        } else {
-               col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
+               col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
                nr_lpis = 0;
                lpi_base = 0;
        }
        its = list_first_entry(&its_nodes, struct its_node, entry);
 
        entries = roundup_pow_of_two(nr_cpu_ids);
-       vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
+       vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
                                 GFP_KERNEL);
        if (!vpe_proxy.vpes) {
                pr_err("ITS: Can't allocate GICv4 proxy device array\n");
 
        if (!nr_parts)
                goto out_put_node;
 
-       parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
+       parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
        if (WARN_ON(!parts))
                goto out_put_node;
 
        if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
                nr_redist_regions = 1;
 
-       rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
+       rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
+                            GFP_KERNEL);
        if (!rdist_regs) {
                err = -ENOMEM;
                goto out_unmap_dist;
 
                goto out;
        desc->domain = d;
 
-       desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
+       desc->bitmap = kcalloc(BITS_TO_LONGS(nr_parts), sizeof(long),
                               GFP_KERNEL);
        if (WARN_ON(!desc->bitmap))
                goto out;
 
                        return -ENOMEM;
 
                intc->domain = domain;
-               intc->irqs = kzalloc(sizeof(struct s3c_irq_data) * 32,
+               intc->irqs = kcalloc(32, sizeof(struct s3c_irq_data),
                                     GFP_KERNEL);
                if (!intc->irqs) {
                        kfree(intc);
 
        if (capi_ttyminors <= 0)
                capi_ttyminors = CAPINC_NR_PORTS;
 
-       capiminors = kzalloc(sizeof(struct capiminor *) * capi_ttyminors,
+       capiminors = kcalloc(capi_ttyminors, sizeof(struct capiminor *),
                             GFP_KERNEL);
        if (!capiminors)
                return -ENOMEM;
 
        cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
 
        /* build command table */
-       commands = kzalloc(AT_NUM * (sizeof *commands), GFP_KERNEL);
+       commands = kcalloc(AT_NUM, sizeof(*commands), GFP_KERNEL);
        if (!commands)
                goto oom;
 
 
                dev_kfree_skb(bcs->rx_skb);
                gigaset_new_rx_skb(bcs);
 
-               commands = kzalloc(AT_NUM * (sizeof *commands), GFP_ATOMIC);
+               commands = kcalloc(AT_NUM, sizeof(*commands), GFP_ATOMIC);
                if (!commands) {
                        gigaset_free_channel(bcs);
                        dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
 
        if (!card)
                return NULL;
 
-       cinfo = kzalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL);
+       cinfo = kcalloc(nr_controllers, sizeof(*cinfo), GFP_KERNEL);
        if (!cinfo) {
                kfree(card);
                return NULL;
 
        int i;
 
        fsm->jumpmatrix =
-               kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
+               kzalloc(array3_size(sizeof(FSMFNPTR), fsm->state_count,
+                                   fsm->event_count),
+                       GFP_KERNEL);
        if (!fsm->jumpmatrix)
                return -ENOMEM;
 
 
 
        if ((adding) && (d->rcverr))
                kfree(d->rcverr);
-       if (!(d->rcverr = kzalloc(sizeof(int) * m, GFP_ATOMIC))) {
+       if (!(d->rcverr = kcalloc(m, sizeof(int), GFP_ATOMIC))) {
                printk(KERN_WARNING "register_isdn: Could not alloc rcverr\n");
                return -1;
        }
 
        if ((adding) && (d->rcvcount))
                kfree(d->rcvcount);
-       if (!(d->rcvcount = kzalloc(sizeof(int) * m, GFP_ATOMIC))) {
+       if (!(d->rcvcount = kcalloc(m, sizeof(int), GFP_ATOMIC))) {
                printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n");
                if (!adding)
                        kfree(d->rcverr);
 
 {
        int i;
 
-       fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
-                                 fsm->event_count, GFP_KERNEL);
+       fsm->jumpmatrix =
+               kzalloc(array3_size(sizeof(FSMFNPTR), fsm->state_count,
+                                   fsm->event_count),
+                       GFP_KERNEL);
        if (fsm->jumpmatrix == NULL)
                return -ENOMEM;
 
 
                return -EINVAL;
        }
 
-       pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t),
+       pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
                                                                GFP_KERNEL);
        if (!pblk->pad_dist)
                return -ENOMEM;
 
                return -EINVAL;
        }
 
-       pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * count, GFP_KERNEL);
+       pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan),
+                                   GFP_KERNEL);
        if (!pcc_mbox_channels) {
                pr_err("Could not allocate space for PCC mbox channels\n");
                return -ENOMEM;
 
        iter_size = (sb->bucket_size / sb->block_size + 1) *
                sizeof(struct btree_iter_set);
 
-       if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
+       if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
            mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
            mempool_init_kmalloc_pool(&c->bio_meta, 2,
                                      sizeof(struct bbio) + sizeof(struct bio_vec) *
            !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
            !(ca->buckets       = vzalloc(sizeof(struct bucket) *
                                          ca->sb.nbuckets)) ||
-           !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
-                                         2, GFP_KERNEL)) ||
+           !(ca->prio_buckets  = kzalloc(array3_size(sizeof(uint64_t),
+                                                     prio_buckets(ca), 2),
+                                         GFP_KERNEL)) ||
            !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)))
                return -ENOMEM;
 
 
        unsigned i;
        int err;
 
-       cc->cipher_tfm.tfms = kzalloc(cc->tfms_count *
-                                     sizeof(struct crypto_skcipher *), GFP_KERNEL);
+       cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
+                                     sizeof(struct crypto_skcipher *),
+                                     GFP_KERNEL);
        if (!cc->cipher_tfm.tfms)
                return -ENOMEM;
 
 
 
        pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
 
-       new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
+       new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
        ret = -ENOMEM;
        if (!new_bp) {
                bitmap_file_unmap(&store);
 
        char str[64];
        struct md_cluster_info *cinfo = mddev->cluster_info;
 
-       cinfo->other_bitmap_lockres = kzalloc((mddev->bitmap_info.nodes - 1) *
-                                            sizeof(struct dlm_lock_resource *),
-                                            GFP_KERNEL);
+       cinfo->other_bitmap_lockres =
+               kcalloc(mddev->bitmap_info.nodes - 1,
+                       sizeof(struct dlm_lock_resource *), GFP_KERNEL);
        if (!cinfo->other_bitmap_lockres) {
                pr_err("md: can't alloc mem for other bitmap locks\n");
                return 0;
 
        if (!conf)
                goto out;
 
-       conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
+       conf->multipaths = kcalloc(mddev->raid_disks,
+                                  sizeof(struct multipath_info),
                                   GFP_KERNEL);
        if (!conf->multipaths)
                goto out_free_conf;
 
        }
 
        err = -ENOMEM;
-       conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
-                               conf->nr_strip_zones, GFP_KERNEL);
+       conf->strip_zone = kcalloc(conf->nr_strip_zones,
+                                  sizeof(struct strip_zone),
+                                  GFP_KERNEL);
        if (!conf->strip_zone)
                goto abort;
-       conf->devlist = kzalloc(sizeof(struct md_rdev*)*
-                               conf->nr_strip_zones*mddev->raid_disks,
+       conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
+                                           conf->nr_strip_zones,
+                                           mddev->raid_disks),
                                GFP_KERNEL);
        if (!conf->devlist)
                goto abort;
 
        if (!conf->barrier)
                goto abort;
 
-       conf->mirrors = kzalloc(sizeof(struct raid1_info)
-                               * mddev->raid_disks * 2,
-                                GFP_KERNEL);
+       conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
+                                           mddev->raid_disks, 2),
+                               GFP_KERNEL);
        if (!conf->mirrors)
                goto abort;
 
                kfree(newpoolinfo);
                return ret;
        }
-       newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
+       newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
+                                        raid_disks, 2),
                             GFP_KERNEL);
        if (!newmirrors) {
                kfree(newpoolinfo);
 
                goto out;
 
        /* FIXME calc properly */
-       conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
-                                                           max(0,-mddev->delta_disks)),
+       conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
+                               sizeof(struct raid10_info),
                                GFP_KERNEL);
        if (!conf->mirrors)
                goto out;
        conf->mirrors_new = NULL;
        if (mddev->delta_disks > 0) {
                /* allocate new 'mirrors' list */
-               conf->mirrors_new = kzalloc(
-                       sizeof(struct raid10_info)
-                       *(mddev->raid_disks +
-                         mddev->delta_disks),
-                       GFP_KERNEL);
+               conf->mirrors_new =
+                       kcalloc(mddev->raid_disks + mddev->delta_disks,
+                               sizeof(struct raid10_info),
+                               GFP_KERNEL);
                if (!conf->mirrors_new)
                        return -ENOMEM;
        }
 
         * is completely stalled, so now is a good time to resize
         * conf->disks and the scribble region
         */
-       ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
+       ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO);
        if (ndisks) {
                for (i = 0; i < conf->pool_size; i++)
                        ndisks[i] = conf->disks[i];
        }
        *group_cnt = num_possible_nodes();
        size = sizeof(struct r5worker) * cnt;
-       workers = kzalloc(size * *group_cnt, GFP_NOIO);
-       *worker_groups = kzalloc(sizeof(struct r5worker_group) *
-                               *group_cnt, GFP_NOIO);
+       workers = kcalloc(size, *group_cnt, GFP_NOIO);
+       *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group),
+                                GFP_NOIO);
        if (!*worker_groups || !workers) {
                kfree(workers);
                kfree(*worker_groups);
                goto abort;
        INIT_LIST_HEAD(&conf->free_list);
        INIT_LIST_HEAD(&conf->pending_list);
-       conf->pending_data = kzalloc(sizeof(struct r5pending_data) *
-               PENDING_IO_MAX, GFP_KERNEL);
+       conf->pending_data = kcalloc(PENDING_IO_MAX,
+                                    sizeof(struct r5pending_data),
+                                    GFP_KERNEL);
        if (!conf->pending_data)
                goto abort;
        for (i = 0; i < PENDING_IO_MAX; i++)
                conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
        max_disks = max(conf->raid_disks, conf->previous_raid_disks);
 
-       conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
+       conf->disks = kcalloc(max_disks, sizeof(struct disk_info),
                              GFP_KERNEL);
 
        if (!conf->disks)
 
        };
        int ret = 0;
 
-       tx = kzalloc(2*sizeof(u8), GFP_KERNEL);
+       tx = kzalloc(2, GFP_KERNEL);
        if (!tx)
                return -ENOMEM;
-       rx = kzalloc(2*sizeof(u8), GFP_KERNEL);
+       rx = kzalloc(2, GFP_KERNEL);
        if (!rx) {
                ret = -ENOMEM;
                goto rx_memory_error;
 
        u8 new_addr = 0;
        struct i2c_device client = {.adap = host };
 
-       client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
+       client.i2c_write_buffer = kzalloc(4, GFP_KERNEL);
        if (!client.i2c_write_buffer) {
                dprintk("%s: not enough memory\n", __func__);
                return -ENOMEM;
        }
-       client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
+       client.i2c_read_buffer = kzalloc(4, GFP_KERNEL);
        if (!client.i2c_read_buffer) {
                dprintk("%s: not enough memory\n", __func__);
                ret = -ENOMEM;
 
        u8 new_addr = 0;
        struct i2c_device client = {.i2c_adap = i2c };
 
-       client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
+       client.i2c_write_buffer = kzalloc(4, GFP_KERNEL);
        if (!client.i2c_write_buffer) {
                dprintk("%s: not enough memory\n", __func__);
                return -ENOMEM;
        }
-       client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
+       client.i2c_read_buffer = kzalloc(4, GFP_KERNEL);
        if (!client.i2c_read_buffer) {
                dprintk("%s: not enough memory\n", __func__);
                ret = -ENOMEM;
 
        dev->isoc_ctl.isoc_copy = isoc_copy;
        dev->isoc_ctl.num_bufs = num_bufs;
 
-       dev->isoc_ctl.urb = kzalloc(sizeof(void *)*num_bufs,  GFP_KERNEL);
+       dev->isoc_ctl.urb = kcalloc(num_bufs, sizeof(void *),  GFP_KERNEL);
        if (!dev->isoc_ctl.urb) {
                au0828_isocdbg("cannot alloc memory for usb buffers\n");
                return -ENOMEM;
        }
 
-       dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
-                                             GFP_KERNEL);
+       dev->isoc_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *),
+                                               GFP_KERNEL);
        if (!dev->isoc_ctl.transfer_buffer) {
                au0828_isocdbg("cannot allocate memory for usb transfer\n");
                kfree(dev->isoc_ctl.urb);
 
                dma_q->partial_buf[i] = 0;
 
        dev->video_mode.isoc_ctl.urb =
-           kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+           kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->video_mode.isoc_ctl.urb) {
                dev_err(dev->dev,
                        "cannot alloc memory for usb buffers\n");
        }
 
        dev->video_mode.isoc_ctl.transfer_buffer =
-           kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+           kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->video_mode.isoc_ctl.transfer_buffer) {
                dev_err(dev->dev,
                        "cannot allocate memory for usbtransfer\n");
                dma_q->partial_buf[i] = 0;
 
        dev->video_mode.bulk_ctl.urb =
-           kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+           kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->video_mode.bulk_ctl.urb) {
                dev_err(dev->dev,
                        "cannot alloc memory for usb buffers\n");
        }
 
        dev->video_mode.bulk_ctl.transfer_buffer =
-           kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+           kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->video_mode.bulk_ctl.transfer_buffer) {
                dev_err(dev->dev,
                        "cannot allocate memory for usbtransfer\n");
 
        for (i = 0; i < 8; i++)
                dma_q->partial_buf[i] = 0;
 
-       dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
+       dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *),
                                             GFP_KERNEL);
        if (!dev->vbi_mode.bulk_ctl.urb) {
                dev_err(dev->dev,
        }
 
        dev->vbi_mode.bulk_ctl.transfer_buffer =
-           kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+           kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
                dev_err(dev->dev,
                        "cannot allocate memory for usbtransfer\n");
 
                        GO7007_FW_NAME);
                return -1;
        }
-       code = kzalloc(codespace * 2, GFP_KERNEL);
+       code = kcalloc(codespace, 2, GFP_KERNEL);
        if (code == NULL)
                goto fw_failed;
 
 
 
        hdw->control_cnt = CTRLDEF_COUNT;
        hdw->control_cnt += MPEGDEF_COUNT;
-       hdw->controls = kzalloc(sizeof(struct pvr2_ctrl) * hdw->control_cnt,
+       hdw->controls = kcalloc(hdw->control_cnt, sizeof(struct pvr2_ctrl),
                                GFP_KERNEL);
        if (!hdw->controls) goto fail;
        hdw->hdw_desc = hdw_desc;
 
                   std_cnt);
        if (!std_cnt) return NULL; // paranoia
 
-       stddefs = kzalloc(sizeof(struct v4l2_standard) * std_cnt,
+       stddefs = kcalloc(std_cnt, sizeof(struct v4l2_standard),
                          GFP_KERNEL);
        if (!stddefs)
                return NULL;
 
 
        dev->isoc_ctl.buf = NULL;
        dev->isoc_ctl.max_pkt_size = dev->max_pkt_size;
-       dev->isoc_ctl.urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+       dev->isoc_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
        if (!dev->isoc_ctl.urb) {
                stk1160_err("out of memory for urb array\n");
                return -ENOMEM;
        }
 
-       dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
-                                             GFP_KERNEL);
+       dev->isoc_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *),
+                                               GFP_KERNEL);
        if (!dev->isoc_ctl.transfer_buffer) {
                stk1160_err("out of memory for usb transfers\n");
                kfree(dev->isoc_ctl.urb);
 
        if (dev->sio_bufs != NULL)
                pr_err("sio_bufs already allocated\n");
        else {
-               dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer),
-                               GFP_KERNEL);
+               dev->sio_bufs = kcalloc(n_sbufs,
+                                       sizeof(struct stk_sio_buffer),
+                                       GFP_KERNEL);
                if (dev->sio_bufs == NULL)
                        return -ENOMEM;
                for (i = 0; i < n_sbufs; i++) {
 
        ip->pipe = usb_rcvisocpipe(usbtv->udev, USBTV_VIDEO_ENDP);
        ip->interval = 1;
        ip->transfer_flags = URB_ISO_ASAP;
-       ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS,
+       ip->transfer_buffer = kcalloc(USBTV_ISOC_PACKETS, size,
                                                GFP_KERNEL);
        if (!ip->transfer_buffer) {
                usb_free_urb(ip);
 
        resp = (struct ec_response_motion_sense *)msg->data;
        sensor_num = resp->dump.sensor_count;
        /* Allocate 1 extra sensors in FIFO are needed */
-       sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 1),
+       sensor_cells = kcalloc(sensor_num + 1, sizeof(struct mfd_cell),
                               GFP_KERNEL);
        if (sensor_cells == NULL)
                goto error;
 
-       sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
-                 (sensor_num + 1), GFP_KERNEL);
+       sensor_platforms = kcalloc(sensor_num + 1,
+                                  sizeof(struct cros_ec_sensor_platform),
+                                  GFP_KERNEL);
        if (sensor_platforms == NULL)
                goto error_platforms;
 
 
        if (!pdev)
                goto fail_alloc;
 
-       res = kzalloc(sizeof(*res) * cell->num_resources, GFP_KERNEL);
+       res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL);
        if (!res)
                goto fail_device;
 
 
                goto err_config;
        }
 
-       msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
-               GFP_KERNEL);
+       msix_entries = kcalloc(TIMBERDALE_NR_IRQS, sizeof(*msix_entries),
+                              GFP_KERNEL);
        if (!msix_entries)
                goto err_config;
 
 
        if (sym_count <= 0)
                goto exit_done;
 
-       vars = kzalloc(sym_count * sizeof(long), GFP_KERNEL);
+       vars = kcalloc(sym_count, sizeof(long), GFP_KERNEL);
 
        if (vars == NULL)
                status = -ENOMEM;
 
        if (status == 0) {
-               var_size = kzalloc(sym_count * sizeof(s32), GFP_KERNEL);
+               var_size = kcalloc(sym_count, sizeof(s32), GFP_KERNEL);
 
                if (var_size == NULL)
                        status = -ENOMEM;
                                /* Allocate a writable buffer for this array */
                                count = var_size[variable_id];
                                long_tmp = vars[variable_id];
-                               longptr_tmp = kzalloc(count * sizeof(long),
+                               longptr_tmp = kcalloc(count, sizeof(long),
                                                                GFP_KERNEL);
                                vars[variable_id] = (long)longptr_tmp;
 
 
                mod = 0;
        }
 
-       vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
+       vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
        if (!vpd_buf)
                return -ENOMEM;
 
 
        if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
                return -EINVAL;
 
-       adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail),
+       adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail),
                                            GFP_KERNEL);
        if (adapter->guest->irq_avail == NULL)
                return -ENOMEM;
 
                        "[%s] **err: could not allocate DDCB **\n", __func__);
                return -ENOMEM;
        }
-       queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) *
-                                 queue->ddcb_max, GFP_KERNEL);
+       queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
+                                 GFP_KERNEL);
        if (!queue->ddcb_req) {
                rc = -ENOMEM;
                goto free_ddcbs;
        }
 
-       queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) *
-                                    queue->ddcb_max, GFP_KERNEL);
+       queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
+                                    sizeof(wait_queue_head_t),
+                                    GFP_KERNEL);
        if (!queue->ddcb_waitqs) {
                rc = -ENOMEM;
                goto free_requs;
 
         * memory.
         */
        DBUG_ON(part->channels != NULL);
-       part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
+       part->channels = kcalloc(XPC_MAX_NCHANNELS,
+                                sizeof(struct xpc_channel),
                                 GFP_KERNEL);
        if (part->channels == NULL) {
                dev_err(xpc_chan, "can't get memory for channels\n");
        short partid;
        struct xpc_partition *part;
 
-       xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
-                                xp_max_npartitions, GFP_KERNEL);
+       xpc_partitions = kcalloc(xp_max_npartitions,
+                                sizeof(struct xpc_partition),
+                                GFP_KERNEL);
        if (xpc_partitions == NULL) {
                dev_err(xpc_part, "can't get memory for partition structure\n");
                return -ENOMEM;
 
        if (remote_rp == NULL)
                return;
 
-       discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs,
+       discovered_nasids = kcalloc(xpc_nasid_mask_nlongs, sizeof(long),
                                    GFP_KERNEL);
        if (discovered_nasids == NULL) {
                kfree(remote_rp_base);
 
 
        dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
 
-       xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) *
-                                            sizeof(long), GFP_KERNEL);
+       xpnet_broadcast_partitions = kcalloc(BITS_TO_LONGS(xp_max_npartitions),
+                                            sizeof(long),
+                                            GFP_KERNEL);
        if (xpnet_broadcast_partitions == NULL)
                return -ENOMEM;
 
 
         * after the reserved blocks from the dt are processed.
         */
        nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
-       rblocks = kzalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
+       rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
        if (!rblocks)
                return -ENOMEM;
 
 
        int retries = 10;
        struct mtd_partition *ar7_parts;
 
-       ar7_parts = kzalloc(sizeof(*ar7_parts) * AR7_PARTS, GFP_KERNEL);
+       ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
        if (!ar7_parts)
                return -ENOMEM;
        ar7_parts[0].name = "loader";
 
                blocksize = 0x1000;
 
        /* Alloc */
-       parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
+       parts = kcalloc(BCM47XXPART_MAX_PARTS, sizeof(struct mtd_partition),
                        GFP_KERNEL);
        if (!parts)
                return -ENOMEM;
 
        mtd->size = devsize * cfi->numchips;
 
        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
-       mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
-                       * mtd->numeraseregions, GFP_KERNEL);
+       mtd->eraseregions = kcalloc(mtd->numeraseregions,
+                                   sizeof(struct mtd_erase_region_info),
+                                   GFP_KERNEL);
        if (!mtd->eraseregions)
                goto setup_err;
 
 
         * first check the locking status of all sectors and save
         * it for future use.
         */
-       sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+       sect = kcalloc(MAX_SECTORS, sizeof(struct ppb_lock), GFP_KERNEL);
        if (!sect)
                return -ENOMEM;
 
 
        mtd->dev.parent = dev;
        bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
                                   8 * DOC_LAYOUT_PAGE_SIZE);
-       docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+       docg3->bbt = kcalloc(DOC_LAYOUT_PAGE_SIZE, bbt_nbpages, GFP_KERNEL);
        if (!docg3->bbt)
                goto nomem3;
 
 
        if (count < 0)
                return part_probe_types_def;
 
-       res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
+       res = kcalloc(count + 1, sizeof(*res), GFP_KERNEL);
        if (!res)
                return NULL;
 
 
        dev_set_drvdata(&dev->dev, info);
 
-       mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
+       mtd_list = kcalloc(count, sizeof(*mtd_list), GFP_KERNEL);
        if (!mtd_list)
                goto err_flash_remove;
 
 
                this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
                /* Maximum possible erase regions */
                mtd->numeraseregions = this->dies << 1;
-               mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
-                                       * (this->dies << 1), GFP_KERNEL);
+               mtd->eraseregions =
+                       kcalloc(this->dies << 1,
+                               sizeof(struct mtd_erase_region_info),
+                               GFP_KERNEL);
                if (!mtd->eraseregions)
                        return -ENOMEM;
        }
 
        if (nr_parts == 0)
                return 0;
 
-       parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
+       parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
        if (!parts)
                return -ENOMEM;
 
 
        nr_parts = plen / sizeof(part[0]);
 
-       parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
+       parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
        if (!parts)
                return -ENOMEM;
 
 
        uint8_t curr_part = 0, i = 0;
        int err;
 
-       parts = kzalloc(sizeof(struct mtd_partition) * TRX_PARSER_MAX_PARTS,
+       parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition),
                        GFP_KERNEL);
        if (!parts)
                return -ENOMEM;
 
                return err;
        }
 
-       sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
-                                    SHARPSL_NAND_PARTS, GFP_KERNEL);
+       sharpsl_nand_parts = kcalloc(SHARPSL_NAND_PARTS,
+                                    sizeof(*sharpsl_nand_parts),
+                                    GFP_KERNEL);
        if (!sharpsl_nand_parts)
                return -ENOMEM;
 
 
 
 
        /* Create array of pointers to the attributes */
-       attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+       attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
                                                                GFP_KERNEL);
        if (!attributes)
                goto error3;
                goto error2;
 
        /* Allocate zone array, it will be initialized on demand */
-       ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+       ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
                                                                GFP_KERNEL);
        if (!ftl->zones)
                goto error3;
 
        unsigned char *pp1, *pp2, *pp3, *pp4;
 
        pr_info("crosstest\n");
-       pp1 = kzalloc(pgsize * 4, GFP_KERNEL);
+       pp1 = kcalloc(pgsize, 4, GFP_KERNEL);
        if (!pp1)
                return -ENOMEM;
        pp2 = pp1 + pgsize;
 
        sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
 
        err = -ENOMEM;
-       ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
+       ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
        if (!ubi->lookuptbl)
                return err;
 
 
        struct list_head  *iter;
 
        if (start_dev == end_dev) {
-               tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC);
+               tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
                if (!tags)
                        return ERR_PTR(-ENOMEM);
                tags[level].vlan_proto = VLAN_N_VID;
 
                return err;
        }
 
-       priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb),
+       priv->echo_skb = kcalloc(dma->tx.size, sizeof(*priv->echo_skb),
                                 GFP_KERNEL);
        if (!priv->echo_skb) {
                err = -ENOMEM;
        priv->can.echo_skb_max = dma->tx.size;
        priv->can.echo_skb = priv->echo_skb;
 
-       priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL);
+       priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
        if (!priv->txdlc) {
                err = -ENOMEM;
                goto exit_free_echo_skb;
 
        pr_info("slcan: serial line CAN interface driver\n");
        pr_info("slcan: %d dynamic interface channels.\n", maxdev);
 
-       slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
+       slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
        if (!slcan_devs)
                return -ENOMEM;
 
 
        priv->tx_desc_alloc_size = size;
        priv->tx_desc_cpu = p;
 
-       priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+       priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
                               GFP_KERNEL);
        if (!priv->tx_skb) {
                dev_err(kdev, "cannot allocate rx skb queue\n");
        spin_lock_init(&priv->tx_lock);
 
        /* init & fill rx ring with skbs */
-       priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+       priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
                               GFP_KERNEL);
        if (!priv->rx_skb) {
                dev_err(kdev, "cannot allocate rx skb queue\n");
 
        else
                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
        if (mc_num) {
-               mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+               mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
                             GFP_KERNEL);
                if (!mc) {
                        BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
           num_vfs_param, iov->nr_virtfn);
 
        /* allocate the vf array */
-       bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
-                               BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+       bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
+                               sizeof(struct bnx2x_virtf),
+                               GFP_KERNEL);
        if (!bp->vfdb->vfs) {
                BNX2X_ERR("failed to allocate vf array\n");
                err = -ENOMEM;
        }
 
        /* allocate the queue arrays for all VFs */
-       bp->vfdb->vfqs = kzalloc(
-               BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
-               GFP_KERNEL);
+       bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
+                                sizeof(struct bnx2x_vf_queue),
+                                GFP_KERNEL);
 
        if (!bp->vfdb->vfqs) {
                BNX2X_ERR("failed to allocate vf queue array\n");
 
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
                        cp->fcoe_init_cid = 0x10;
        }
 
-       cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
+       cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
                                GFP_KERNEL);
        if (!cp->iscsi_tbl)
                goto error;
 
-       cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
-                               cp->max_cid_space, GFP_KERNEL);
+       cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
+                             GFP_KERNEL);
        if (!cp->ctx_tbl)
                goto error;
 
        struct cnic_local *cp = dev->cnic_priv;
        u32 port_id;
 
-       cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+       cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
                              GFP_KERNEL);
        if (!cp->csk_tbl)
                return -ENOMEM;
 
                tnapi++;
 
        for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
-               tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
-                                           TG3_TX_RING_SIZE, GFP_KERNEL);
+               tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
+                                           sizeof(struct tg3_tx_ring_info),
+                                           GFP_KERNEL);
                if (!tnapi->tx_buffers)
                        goto err_out;
 
 
        if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
                goto mode_default;
 
-       mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+       mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
        if (mac_list == NULL)
                goto mode_default;
 
        if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
                goto mode_allmulti;
 
-       mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+       mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
 
        if (mac_list == NULL)
                goto mode_allmulti;
 
 
        netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
 
-       priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+       priv->rx_skbuff = kcalloc(DMA_RX_RING_SZ, sizeof(struct sk_buff *),
                                  GFP_KERNEL);
        if (!priv->rx_skbuff)
                return -ENOMEM;
        if (!priv->dma_rx)
                goto err_dma_rx;
 
-       priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+       priv->tx_skbuff = kcalloc(DMA_TX_RING_SZ, sizeof(struct sk_buff *),
                                  GFP_KERNEL);
        if (!priv->tx_skbuff)
                goto err_tx_skb;
 
                rbdr->is_xdp = true;
        }
        rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
-       rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
-                               rbdr->pgcnt, GFP_KERNEL);
+       rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache),
+                               GFP_KERNEL);
        if (!rbdr->pgcache)
                return -ENOMEM;
        rbdr->pgidx = 0;
 
        if (!adap->uld)
                return -ENOMEM;
 
-       s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
+       s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
                                  sizeof(struct sge_uld_rxq_info *),
                                  GFP_KERNEL);
        if (!s->uld_rxq_info)
                goto err_uld;
 
-       s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
+       s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
                                  sizeof(struct sge_uld_txq_info *),
                                  GFP_KERNEL);
        if (!s->uld_txq_info)
 
        }
 
        /* Allocate a mapping to page look-up index */
-       geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
-                                  GFP_KERNEL);
+       geth->freeq_pages = kcalloc(pages, sizeof(*geth->freeq_pages),
+                                   GFP_KERNEL);
        if (!geth->freeq_pages)
                goto err_freeq;
        geth->num_freeq_pages = pages;
 
                return -EINVAL;
        }
 
-       priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
+       priv->ring_data = kzalloc(array3_size(h->q_num,
+                                             sizeof(*priv->ring_data), 2),
                                  GFP_KERNEL);
        if (!priv->ring_data)
                return -ENOMEM;
 
                return 0;
        }
 
-       mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+       mta_list = kcalloc(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC);
        if (!mta_list)
                return -ENOMEM;
 
 
        /* Assume MSI-X interrupts, will be checked during IRQ allocation */
        adapter->flags |= IGB_FLAG_HAS_MSIX;
 
-       adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
-                                    hw->mac.rar_entry_count, GFP_ATOMIC);
+       adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
+                                    sizeof(struct igb_mac_addr),
+                                    GFP_ATOMIC);
        if (!adapter->mac_table)
                return -ENOMEM;
 
                return 0;
        }
 
-       mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+       mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
        if (!mta_list)
                return -ENOMEM;
 
 
        for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
                adapter->jump_tables[i] = NULL;
 
-       adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
-                                    hw->mac.num_rar_entries,
+       adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
+                                    sizeof(struct ixgbe_mac_addr),
                                     GFP_ATOMIC);
        if (!adapter->mac_table)
                return -ENOMEM;
 
        atomic_set(&txring->next_to_clean, 0);
        atomic_set(&txring->nr_free, jme->tx_ring_size);
 
-       txring->bufinf          = kzalloc(sizeof(struct jme_buffer_info) *
-                                       jme->tx_ring_size, GFP_ATOMIC);
+       txring->bufinf          = kcalloc(jme->tx_ring_size,
+                                               sizeof(struct jme_buffer_info),
+                                               GFP_ATOMIC);
        if (unlikely(!(txring->bufinf)))
                goto err_free_txring;
 
        rxring->next_to_use     = 0;
        atomic_set(&rxring->next_to_clean, 0);
 
-       rxring->bufinf          = kzalloc(sizeof(struct jme_buffer_info) *
-                                       jme->rx_ring_size, GFP_ATOMIC);
+       rxring->bufinf          = kcalloc(jme->rx_ring_size,
+                                               sizeof(struct jme_buffer_info),
+                                               GFP_ATOMIC);
        if (unlikely(!(rxring->bufinf)))
                goto err_free_rxring;
 
 
        bitmap->avail = num - reserved_top - reserved_bot;
        bitmap->effective_len = bitmap->avail;
        spin_lock_init(&bitmap->lock);
-       bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
-                               sizeof(long), GFP_KERNEL);
+       bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
+                               GFP_KERNEL);
        if (!bitmap->table)
                return -ENOMEM;
 
 
                struct mlx4_vf_admin_state *vf_admin;
 
                priv->mfunc.master.slave_state =
-                       kzalloc(dev->num_slaves *
-                               sizeof(struct mlx4_slave_state), GFP_KERNEL);
+                       kcalloc(dev->num_slaves,
+                               sizeof(struct mlx4_slave_state),
+                               GFP_KERNEL);
                if (!priv->mfunc.master.slave_state)
                        goto err_comm;
 
                priv->mfunc.master.vf_admin =
-                       kzalloc(dev->num_slaves *
-                               sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
+                       kcalloc(dev->num_slaves,
+                               sizeof(struct mlx4_vf_admin_state),
+                               GFP_KERNEL);
                if (!priv->mfunc.master.vf_admin)
                        goto err_comm_admin;
 
                priv->mfunc.master.vf_oper =
-                       kzalloc(dev->num_slaves *
-                               sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
+                       kcalloc(dev->num_slaves,
+                               sizeof(struct mlx4_vf_oper_state),
+                               GFP_KERNEL);
                if (!priv->mfunc.master.vf_oper)
                        goto err_comm_oper;
 
 
                if (!dst->tx_ring_num[t])
                        continue;
 
-               dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
-                                         MAX_TX_RINGS, GFP_KERNEL);
+               dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
+                                         sizeof(struct mlx4_en_tx_ring *),
+                                         GFP_KERNEL);
                if (!dst->tx_ring[t])
                        goto err_free_tx;
 
-               dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
-                                       MAX_TX_RINGS, GFP_KERNEL);
+               dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
+                                       sizeof(struct mlx4_en_cq *),
+                                       GFP_KERNEL);
                if (!dst->tx_cq[t]) {
                        kfree(dst->tx_ring[t]);
                        goto err_free_tx;
                if (!priv->tx_ring_num[t])
                        continue;
 
-               priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
-                                          MAX_TX_RINGS, GFP_KERNEL);
+               priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
+                                          sizeof(struct mlx4_en_tx_ring *),
+                                          GFP_KERNEL);
                if (!priv->tx_ring[t]) {
                        err = -ENOMEM;
                        goto out;
                }
-               priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
-                                        MAX_TX_RINGS, GFP_KERNEL);
+               priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
+                                        sizeof(struct mlx4_en_cq *),
+                                        GFP_KERNEL);
                if (!priv->tx_cq[t]) {
                        err = -ENOMEM;
                        goto out;
 
        int num_entries = dev->caps.num_ports;
        int i, j;
 
-       priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
+       priv->steer = kcalloc(num_entries, sizeof(struct mlx4_steer),
+                             GFP_KERNEL);
        if (!priv->steer)
                return -ENOMEM;
 
                }
        }
 
-       dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
+       dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL);
        if (NULL == dev->dev_vfs) {
                mlx4_err(dev, "Failed to allocate memory for VFs\n");
                goto disable_sriov;
 
        int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 
        priv->mfunc.master.res_tracker.slave_list =
-               kzalloc(dev->num_slaves * sizeof(struct slave_list),
+               kcalloc(dev->num_slaves, sizeof(struct slave_list),
                        GFP_KERNEL);
        if (!priv->mfunc.master.res_tracker.slave_list)
                return -ENOMEM;
                                                      sizeof(int),
                                                      GFP_KERNEL);
                if (i == RES_MAC || i == RES_VLAN)
-                       res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
-                                                      (dev->persist->num_vfs
-                                                      + 1) *
-                                                      sizeof(int), GFP_KERNEL);
+                       res_alloc->allocated =
+                               kcalloc(MLX4_MAX_PORTS *
+                                               (dev->persist->num_vfs + 1),
+                                       sizeof(int), GFP_KERNEL);
                else
-                       res_alloc->allocated = kzalloc((dev->persist->
-                                                       num_vfs + 1) *
-                                                      sizeof(int), GFP_KERNEL);
+                       res_alloc->allocated =
+                               kcalloc(dev->persist->num_vfs + 1,
+                                       sizeof(int), GFP_KERNEL);
                /* Reduce the sink counter */
                if (i == RES_COUNTER)
                        res_alloc->res_free = dev->caps.max_counters - 1;
 
 
        count = mlx5_fpga_ipsec_counters_count(mdev);
 
-       data = kzalloc(sizeof(*data) * count * 2, GFP_KERNEL);
+       data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
        if (!data) {
                ret = -ENOMEM;
                goto out;
 
        int i;
 
        clock->ptp_info.pin_config =
-                       kzalloc(sizeof(*clock->ptp_info.pin_config) *
-                               clock->ptp_info.n_pins, GFP_KERNEL);
+                       kcalloc(clock->ptp_info.n_pins,
+                               sizeof(*clock->ptp_info.pin_config),
+                               GFP_KERNEL);
        if (!clock->ptp_info.pin_config)
                return -ENOMEM;
        clock->ptp_info.enable = mlx5_ptp_enable;
 
        mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
        mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
 
-       mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
+       mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
+                                sizeof(*mlxsw_sp_qdisc),
                                 GFP_KERNEL);
        if (!mlxsw_sp_qdisc)
                goto err_tclass_qdiscs_init;
 
  */
 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
 {
-       desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+       desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
                                  GFP_KERNEL);
        if (!desc_info->ring)
                return 1;
 
        channel->length = length;
        channel->vp_id = vp_id;
 
-       channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
        if (channel->work_arr == NULL)
                goto exit1;
 
-       channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
        if (channel->free_arr == NULL)
                goto exit1;
        channel->free_ptr = length;
 
-       channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
        if (channel->reserve_arr == NULL)
                goto exit1;
        channel->reserve_ptr = length;
        channel->reserve_top = 0;
 
-       channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
        if (channel->orig_arr == NULL)
                goto exit1;
 
 
        vxge_initialize_ethtool_ops(ndev);
 
        /* Allocate memory for vpath */
-       vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
-                               no_of_vpath, GFP_KERNEL);
+       vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
+                              GFP_KERNEL);
        if (!vdev->vpaths) {
                vxge_debug_init(VXGE_ERR,
                        "%s: vpath memory allocation failed",
 
        spin_lock_init(&ring->lock);
 
        ring->size = RX_RING_SIZE;
-       ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
-                                 RX_RING_SIZE, GFP_KERNEL);
+       ring->ring_info = kcalloc(RX_RING_SIZE,
+                                 sizeof(struct pasemi_mac_buffer),
+                                 GFP_KERNEL);
 
        if (!ring->ring_info)
                goto out_ring_info;
        spin_lock_init(&ring->lock);
 
        ring->size = TX_RING_SIZE;
-       ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
-                                 TX_RING_SIZE, GFP_KERNEL);
+       ring->ring_info = kcalloc(TX_RING_SIZE,
+                                 sizeof(struct pasemi_mac_buffer),
+                                 GFP_KERNEL);
        if (!ring->ring_info)
                goto out_ring_info;
 
 
 
        /* Read no. of modules and allocate memory for their pointers */
        meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
-       meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+       meta->modules = kcalloc(meta->modules_num, sizeof(char *),
+                               GFP_KERNEL);
        if (!meta->modules)
                return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
 
 
        /* Read number of formats and allocate memory for all formats */
        meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
-       meta->formats = kzalloc(meta->formats_num *
+       meta->formats = kcalloc(meta->formats_num,
                                sizeof(struct mcp_trace_format),
                                GFP_KERNEL);
        if (!meta->formats)
 
        if (rc)
                goto alloc_err;
 
-       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
-                                       qed_init_qm_get_num_pqs(p_hwfn),
+       qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn),
+                                       sizeof(*qm_info->qm_pq_params),
                                        GFP_KERNEL);
        if (!qm_info->qm_pq_params)
                goto alloc_err;
 
-       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
-                                          qed_init_qm_get_num_vports(p_hwfn),
+       qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
+                                          sizeof(*qm_info->qm_vport_params),
                                           GFP_KERNEL);
        if (!qm_info->qm_vport_params)
                goto alloc_err;
 
-       qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
-                                         p_hwfn->cdev->num_ports_in_engine,
+       qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine,
+                                         sizeof(*qm_info->qm_port_params),
                                          GFP_KERNEL);
        if (!qm_info->qm_port_params)
                goto alloc_err;
 
-       qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
-                                   qed_init_qm_get_num_vports(p_hwfn),
+       qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
+                                   sizeof(*qm_info->wfq_data),
                                    GFP_KERNEL);
        if (!qm_info->wfq_data)
                goto alloc_err;
 
        if (IS_VF(p_hwfn->cdev))
                return 0;
 
-       rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
+       rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
                                   GFP_KERNEL);
        if (!rt_data->b_valid)
                return -ENOMEM;
 
-       rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
+       rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
                                    GFP_KERNEL);
        if (!rt_data->init_val) {
                kfree(rt_data->b_valid);
 
                p_l2_info->queues = max_t(u8, rx, tx);
        }
 
-       pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
+       pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
                          GFP_KERNEL);
        if (!pp_qids)
                return -ENOMEM;
 
 
        act_pci_func = ahw->total_nic_func;
 
-       adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
-                                act_pci_func, GFP_KERNEL);
+       adapter->npars = kcalloc(act_pci_func,
+                                sizeof(struct qlcnic_npar_info),
+                                GFP_KERNEL);
        if (!adapter->npars) {
                ret = -ENOMEM;
                goto err_pci_info;
        }
 
-       adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
-                               QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+       adapter->eswitch = kcalloc(QLCNIC_NIU_MAX_XG_PORTS,
+                                  sizeof(struct qlcnic_eswitch),
+                                  GFP_KERNEL);
        if (!adapter->eswitch) {
                ret = -ENOMEM;
                goto err_npars;
 
        adapter->ahw->sriov = sriov;
        sriov->num_vfs = num_vfs;
        bc = &sriov->bc;
-       sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
-                                num_vfs, GFP_KERNEL);
+       sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info),
+                                GFP_KERNEL);
        if (!sriov->vf_info) {
                err = -ENOMEM;
                goto qlcnic_free_sriov;
                return 0;
 
        num_vlans = sriov->num_allowed_vlans;
-       sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
+       sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
        if (!sriov->allowed_vlans)
                return -ENOMEM;
 
 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
                                            u32 size)
 {
-       *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+       *hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC);
        if (!*hdr)
                return -ENOMEM;
 
 
                goto err;
        }
 
-       dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+       dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
        if (!dring->desc) {
                ret = -ENOMEM;
                goto err;
 
        pr_debug("%s: wl=%p port=%p\n", __func__, wl, port);
 
        /* allocate scan list */
-       wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) *
-                              GELIC_WL_BSS_MAX_ENT, GFP_KERNEL);
+       wl->networks = kcalloc(GELIC_WL_BSS_MAX_ENT,
+                              sizeof(struct gelic_wl_scan_info),
+                              GFP_KERNEL);
 
        if (!wl->networks)
                goto fail_bss;
 
        if (!clock)
                goto out;
 
-       clock->caps.pin_config = kzalloc(sizeof(struct ptp_pin_desc) *
-                                        DP83640_N_PINS, GFP_KERNEL);
+       clock->caps.pin_config = kcalloc(DP83640_N_PINS,
+                                        sizeof(struct ptp_pin_desc),
+                                        GFP_KERNEL);
        if (!clock->caps.pin_config) {
                kfree(clock);
                clock = NULL;
 
        printk(KERN_INFO "SLIP linefill/keepalive option.\n");
 #endif
 
-       slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev,
+       slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *),
                                                                GFP_KERNEL);
        if (!slip_devs)
                return -ENOMEM;
 
        struct team_option **dst_opts;
        int err;
 
-       dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+       dst_opts = kcalloc(option_count, sizeof(struct team_option *),
                           GFP_KERNEL);
        if (!dst_opts)
                return -ENOMEM;
 
        }
 
        if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
-               u32 *filter_mask = kzalloc(sizeof(u32) * 32, GFP_KERNEL);
+               u32 *filter_mask = kcalloc(32, sizeof(u32), GFP_KERNEL);
                u32 command[2];
                u32 offset[2];
                u32 crc[4];
 
                    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
 
        /* Allocate space for find_vqs parameters */
-       vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
+       vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
        if (!vqs)
                goto err_vq;
        callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
        if (!names)
                goto err_names;
        if (!vi->big_packets || vi->mergeable_rx_bufs) {
-               ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
+               ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
                if (!ctx)
                        goto err_ctx;
        } else {
        vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
        if (!vi->ctrl)
                goto err_ctrl;
-       vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
+       vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
        if (!vi->sq)
                goto err_sq;
-       vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
+       vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
        if (!vi->rq)
                goto err_rq;
 
 
                goto free_tx_bd;
        }
 
-       priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
+       priv->rx_skbuff = kcalloc(priv->rx_ring_size,
+                                 sizeof(*priv->rx_skbuff),
                                  GFP_KERNEL);
        if (!priv->rx_skbuff)
                goto free_ucc_pram;
 
-       priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
+       priv->tx_skbuff = kcalloc(priv->tx_ring_size,
+                                 sizeof(*priv->tx_skbuff),
                                  GFP_KERNEL);
        if (!priv->tx_skbuff)
                goto free_rx_skbuff;
 
        }
 
        htt->rx_ring.netbufs_ring =
-               kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+               kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
                        GFP_KERNEL);
        if (!htt->rx_ring.netbufs_ring)
                goto err_netbuf;
 
        const void **tb;
        int ret;
 
-       tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
+       tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
        if (!tb)
                return ERR_PTR(-ENOMEM);
 
 
 
                n_channels = request->n_channels;
 
-               channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
+               channels = kcalloc(n_channels, sizeof(u16), GFP_KERNEL);
                if (channels == NULL) {
                        ath6kl_warn("failed to set scan channels, scan all channels");
                        n_channels = 0;
 
        if (!bands)
                return -EINVAL;
 
-       ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
+       ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
        if (!ar->survey)
                return -ENOMEM;
        ar->num_channels = chans;
        if (WARN_ON(ar->mem_bitmap))
                return -EINVAL;
 
-       ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
-                                sizeof(unsigned long), GFP_KERNEL);
+       ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
+                                sizeof(unsigned long),
+                                GFP_KERNEL);
 
        if (!ar->mem_bitmap)
                return -ENOMEM;
 
        u16 i;
        u32 *data;
 
-       data = kzalloc(len * sizeof(u32), GFP_KERNEL);
+       data = kcalloc(len, sizeof(u32), GFP_KERNEL);
        if (!data) {
                b43err(dev->wl, "allocation for samples loading failed\n");
                return -ENOMEM;
 
 
        if ((phy->type == B43legacy_PHYTYPE_B) ||
            (phy->type == B43legacy_PHYTYPE_G)) {
-               phy->_lo_pairs = kzalloc(sizeof(struct b43legacy_lopair)
-                                        * B43legacy_LO_COUNT,
+               phy->_lo_pairs = kcalloc(B43legacy_LO_COUNT,
+                                        sizeof(struct b43legacy_lopair),
                                         GFP_KERNEL);
                if (!phy->_lo_pairs)
                        return -ENOMEM;
 
                (struct brcmf_commonring **)if_msgbuf->commonrings;
        msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
        msgbuf->max_flowrings = if_msgbuf->max_flowrings;
-       msgbuf->flowring_dma_handle = kzalloc(msgbuf->max_flowrings *
-               sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
+       msgbuf->flowring_dma_handle =
+               kcalloc(msgbuf->max_flowrings,
+                       sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
        if (!msgbuf->flowring_dma_handle)
                goto fail;
 
 
                channel_cnt = AF_PEER_SEARCH_CNT;
        else
                channel_cnt = SOCIAL_CHAN_CNT;
-       default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+       default_chan_list = kcalloc(channel_cnt, sizeof(*default_chan_list),
                                    GFP_KERNEL);
        if (default_chan_list == NULL) {
                brcmf_err("channel list allocation failed\n");
 
        wlc->hw->wlc = wlc;
 
        wlc->hw->bandstate[0] =
-               kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC);
+               kcalloc(MAXBANDS, sizeof(struct brcms_hw_band), GFP_ATOMIC);
        if (wlc->hw->bandstate[0] == NULL) {
                *err = 1006;
                goto fail;
        }
 
        wlc->modulecb =
-               kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC);
+               kcalloc(BRCMS_MAXMODULES, sizeof(struct modulecb),
+                       GFP_ATOMIC);
        if (wlc->modulecb == NULL) {
                *err = 1009;
                goto fail;
        }
 
        wlc->bandstate[0] =
-               kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC);
+               kcalloc(MAXBANDS, sizeof(struct brcms_band), GFP_ATOMIC);
        if (wlc->bandstate[0] == NULL) {
                *err = 1025;
                goto fail;
 
        D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
 
        il->channel_info =
-           kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+           kcalloc(il->channel_count, sizeof(struct il_channel_info),
                    GFP_KERNEL);
        if (!il->channel_info) {
                IL_ERR("Could not allocate channel_info\n");
        }
 
        txq->meta =
-           kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+           kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL);
        txq->cmd =
-           kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+           kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL);
 
        if (!txq->meta || !txq->cmd)
                goto out_free_arrays;
        }
 
        channels =
-           kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+           kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
                    GFP_KERNEL);
        if (!channels)
                return -ENOMEM;
 {
        if (!il->txq)
                il->txq =
-                   kzalloc(sizeof(struct il_tx_queue) *
-                           il->cfg->num_of_queues, GFP_KERNEL);
+                   kcalloc(il->cfg->num_of_queues,
+                           sizeof(struct il_tx_queue),
+                           GFP_KERNEL);
        if (!il->txq) {
                IL_ERR("Not enough memory for txq\n");
                return -ENOMEM;
 
        else
                blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
 
-       blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+       blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL);
        if (!blacklist)
                return -ENOMEM;
 
 
        if (!tmp)
                goto err_out;
 
-       tmp->channels = kzalloc(sizeof(struct ieee80211_channel) *
-                               list->band_channel_num[band], GFP_KERNEL);
+       tmp->channels = kcalloc(list->band_channel_num[band],
+                               sizeof(struct ieee80211_channel),
+                               GFP_KERNEL);
        if (!tmp->channels)
                goto err_out;
 
                goto free;
        }
        priv->chan_num = max_channel_num;
-       priv->survey = kzalloc(sizeof(struct survey_info) * max_channel_num,
+       priv->survey = kcalloc(max_channel_num, sizeof(struct survey_info),
                               GFP_KERNEL);
        if (!priv->survey) {
                ret = -ENOMEM;
        }
 
        list->max_entries = max_channel_num;
-       list->channels = kzalloc(sizeof(struct p54_channel_entry) *
-                                max_channel_num, GFP_KERNEL);
+       list->channels = kcalloc(max_channel_num,
+                                sizeof(struct p54_channel_entry),
+                                GFP_KERNEL);
        if (!list->channels) {
                ret = -ENOMEM;
                goto free;
 
        /* Alloc the cache */
        for (i = 0; i < OID_NUM_LAST; i++) {
                if (isl_oid[i].flags & OID_FLAG_CACHED) {
-                       priv->mib[i] = kzalloc(isl_oid[i].size *
+                       priv->mib[i] = kcalloc(isl_oid[i].size,
                                               (isl_oid[i].range + 1),
                                               GFP_KERNEL);
                        if (!priv->mib[i])
 
 
        new_node->win_size = win_size;
 
-       new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
-                                       GFP_KERNEL);
+       new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *),
+                                          GFP_KERNEL);
        if (!new_node->rx_reorder_ptr) {
                kfree((u8 *) new_node);
                mwifiex_dbg(priv->adapter, ERROR,
 
                return -ENOMEM;
 
        /* Allocate skb pointer buffers */
-       card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) *
-                                      card->mp_agg_pkt_limit, GFP_KERNEL);
+       card->mpa_rx.skb_arr = kcalloc(card->mp_agg_pkt_limit, sizeof(void *),
+                                      GFP_KERNEL);
        if (!card->mpa_rx.skb_arr) {
                kfree(card->mp_regs);
                return -ENOMEM;
        }
 
-       card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
-                                      card->mp_agg_pkt_limit, GFP_KERNEL);
+       card->mpa_rx.len_arr = kcalloc(card->mp_agg_pkt_limit,
+                                      sizeof(*card->mpa_rx.len_arr),
+                                      GFP_KERNEL);
        if (!card->mpa_rx.len_arr) {
                kfree(card->mp_regs);
                kfree(card->mpa_rx.skb_arr);
 
                                return -EINVAL;
                        }
 
-                       limits = kzalloc(sizeof(*limits) * rec->n_limits,
+                       limits = kcalloc(rec->n_limits, sizeof(*limits),
                                         GFP_KERNEL);
                        if (!limits)
                                return -ENOMEM;
 
        if (*offset)
                return 0;
 
-       data = kzalloc((1 + CIPHER_MAX) * MAX_LINE_LENGTH, GFP_KERNEL);
+       data = kcalloc(1 + CIPHER_MAX, MAX_LINE_LENGTH, GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
 
        }
 
        /* allocate memory for efuse_tbl and efuse_word */
-       efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] *
-                           sizeof(u8), GFP_ATOMIC);
+       efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE],
+                           GFP_ATOMIC);
        if (!efuse_tbl)
                return;
        efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
 
        }
        rtlpriv = hw->priv;
        rtlpriv->hw = hw;
-       rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
+       rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
                                    GFP_KERNEL);
        if (!rtlpriv->usb_data)
                return -ENOMEM;
 
        spin_lock_init(&stats->lock);
        init_waitqueue_head(&stats->wait_link_id_empty);
 
-       stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
+       stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
                                        GFP_KERNEL);
        if (!stats->link_map_cache)
                return -ENOMEM;
        spin_lock_init(&queue->lock);
        timer_setup(&queue->gc, cw1200_queue_gc, 0);
 
-       queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
-                       GFP_KERNEL);
+       queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
+                             GFP_KERNEL);
        if (!queue->pool)
                return -ENOMEM;
 
-       queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
-                       GFP_KERNEL);
+       queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
+                                       GFP_KERNEL);
        if (!queue->link_map_cache) {
                kfree(queue->pool);
                queue->pool = NULL;
 
                        scan.type = WSM_SCAN_TYPE_BACKGROUND;
                        scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
                }
-               scan.ch = kzalloc(
-                       sizeof(struct wsm_scan_ch) * (it - priv->scan.curr),
-                       GFP_KERNEL);
+               scan.ch = kcalloc(it - priv->scan.curr,
+                                 sizeof(struct wsm_scan_ch),
+                                 GFP_KERNEL);
                if (!scan.ch) {
                        priv->scan.status = -ENOMEM;
                        goto fail;
 
        addr_offset = offset % RK3399_NBYTES;
        addr_len = addr_end - addr_start;
 
-       buf = kzalloc(sizeof(*buf) * addr_len * RK3399_NBYTES, GFP_KERNEL);
+       buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
+                     GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
                goto nomem;
        addr_offset = offset % RK3399_NBYTES;
        addr_len = addr_end - addr_start;
 
-       buf = kzalloc(sizeof(*buf) * addr_len * RK3399_NBYTES, GFP_KERNEL);
+       buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
+                     GFP_KERNEL);
        if (!buf) {
                clk_disable_unprepare(efuse->clk);
                return -ENOMEM;
 
        if (IS_ERR(nvmem))
                return PTR_ERR(nvmem);
 
-       randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL);
+       randomness = kzalloc(size, GFP_KERNEL);
        if (!randomness) {
                ret = -EINVAL;
                goto err_unreg_nvmem;
 
 
        /* Populate the resource table */
        if (num_irq || num_reg) {
-               res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+               res = kcalloc(num_irq + num_reg, sizeof(*res), GFP_KERNEL);
                if (!res) {
                        platform_device_put(dev);
                        return NULL;
 
        }
 
        /* Array of 4 properties for the purpose of testing */
-       prop = kzalloc(sizeof(*prop) * 4, GFP_KERNEL);
+       prop = kcalloc(4, sizeof(*prop), GFP_KERNEL);
        if (!prop) {
                unittest(0, "kzalloc() failed\n");
                return;
 
                goto out;
        }
 
-       table = kzalloc(sizeof(*data->vdd_table) *
-                                 data->num_vdd_table, GFP_KERNEL);
+       table = kcalloc(data->num_vdd_table, sizeof(*data->vdd_table),
+                       GFP_KERNEL);
        if (!table) {
                ret = -ENOMEM;
                goto out;
 
                return 0;
 
        /* Dynamically create the MSI attributes for the PCI device */
-       msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
+       msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
        if (!msi_attrs)
                return -ENOMEM;
        for_each_pci_msi_entry(entry, pdev) {
        msi_irq_group->name = "msi_irqs";
        msi_irq_group->attrs = msi_attrs;
 
-       msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
+       msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
        if (!msi_irq_groups)
                goto error_irq_group;
        msi_irq_groups[0] = msi_irq_group;
 
 {
        int error;
 
-       b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
+       b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
                               GFP_ATOMIC);
        if (!b->legacy_io)
                goto kzalloc_err;
 
        char configbyte;
        struct pd6729_socket *socket;
 
-       socket = kzalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS,
+       socket = kcalloc(MAX_SOCKETS, sizeof(struct pd6729_socket),
                         GFP_KERNEL);
        if (!socket) {
                dev_warn(&dev->dev, "failed to kzalloc socket.\n");
 
                maps_per_pin++;
        if (num_pulls)
                maps_per_pin++;
-       cur_map = maps = kzalloc(num_pins * maps_per_pin * sizeof(*maps),
-                               GFP_KERNEL);
+       cur_map = maps = kcalloc(num_pins * maps_per_pin, sizeof(*maps),
+                                GFP_KERNEL);
        if (!maps)
                return -ENOMEM;
 
 
        if (!purecfg && config)
                new_num = 2;
 
-       new_map = kzalloc(sizeof(*new_map) * new_num, GFP_KERNEL);
+       new_map = kcalloc(new_num, sizeof(*new_map), GFP_KERNEL);
        if (!new_map)
                return -ENOMEM;
 
 
 
        for_each_child_of_node(np_config, np)
                max_maps += ltq_pinctrl_dt_subnode_size(np);
-       *map = kzalloc(max_maps * sizeof(struct pinctrl_map) * 2, GFP_KERNEL);
+       *map = kzalloc(array3_size(max_maps, sizeof(struct pinctrl_map), 2),
+                      GFP_KERNEL);
        if (!*map)
                return -ENOMEM;
        tmp = *map;
 
                return -ENODEV;
        }
 
-       *map = kzalloc(sizeof(**map) * count, GFP_KERNEL);
+       *map = kcalloc(count, sizeof(**map), GFP_KERNEL);
        if (!*map)
                return -ENOMEM;
 
 
                return -ENODEV;
        }
 
-       *map = kzalloc(sizeof(**map) * count, GFP_KERNEL);
+       *map = kcalloc(count, sizeof(**map), GFP_KERNEL);
        if (!*map)
                return -ENOMEM;
 
 
        if (!configlen)
                return NULL;
 
-       pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+       pinconfig = kcalloc(configlen, sizeof(*pinconfig), GFP_KERNEL);
        if (!pinconfig)
                return ERR_PTR(-ENOMEM);
 
 
        if (num_pulls)
                maps_per_pin++;
 
-       cur_map = maps = kzalloc(num_pins * maps_per_pin * sizeof(*maps),
+       cur_map = maps = kcalloc(num_pins * maps_per_pin, sizeof(*maps),
                                 GFP_KERNEL);
        if (!maps)
                return -ENOMEM;
 
         *      - zone_data num_zones is for the distinct zones
         */
        zone_dev_attrs =
-           kzalloc(sizeof(struct device_attribute) * (quirks->num_zones + 1),
+           kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
                    GFP_KERNEL);
        if (!zone_dev_attrs)
                return -ENOMEM;
 
        zone_attrs =
-           kzalloc(sizeof(struct attribute *) * (quirks->num_zones + 2),
+           kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
                    GFP_KERNEL);
        if (!zone_attrs)
                return -ENOMEM;
 
        zone_data =
-           kzalloc(sizeof(struct platform_zone) * (quirks->num_zones),
+           kcalloc(quirks->num_zones, sizeof(struct platform_zone),
                    GFP_KERNEL);
        if (!zone_data)
                return -ENOMEM;
 
        u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
        u8 cur_seqno, last_seqno;
 
-       mcp_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
-       ctv1_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
-       ctv2_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
-       mch_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
-       cpu_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
-       mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
+       mcp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+       ctv1_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+       ctv2_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+       mch_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
+       cpu_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
+       mchp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
        if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
                        !cpu_samples || !mchp_samples) {
                dev_err(ips->dev,
 
                return -ENOMEM;
        }
 
-       pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL);
+       pcc->sinf = kcalloc(num_sifr + 1, sizeof(u32), GFP_KERNEL);
        if (!pcc->sinf) {
                result = -ENOMEM;
                goto out_hotkey;
 
        if (led_supported == TPACPI_LED_NONE)
                return 1;
 
-       tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
+       tpacpi_leds = kcalloc(TPACPI_LED_NUMLEDS, sizeof(*tpacpi_leds),
                              GFP_KERNEL);
        if (!tpacpi_leds) {
                pr_err("Out of memory for LED data\n");
 
        if (pdata->min_voltage >= 0)
                props++;        /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
 
-       prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
+       prop = kcalloc(props, sizeof(*prop), GFP_KERNEL);
        if (!prop) {
                ret = -ENOMEM;
                goto err3;
 
        if (info->min_voltage >= 0)
                props++;        /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
 
-       prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
+       prop = kcalloc(props, sizeof(*prop), GFP_KERNEL);
        if (!prop)
                return -ENOMEM;
 
 
        dev_set_name(&power_zone->dev, "%s:%x",
                                        dev_name(power_zone->dev.parent),
                                        power_zone->id);
-       power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
-                                        nr_constraints, GFP_KERNEL);
+       power_zone->constraints = kcalloc(nr_constraints,
+                                         sizeof(*power_zone->constraints),
+                                         GFP_KERNEL);
        if (!power_zone->constraints)
                goto err_const_alloc;
 
        nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
                                                POWERCAP_ZONE_MAX_ATTRS + 1;
-       power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
-                                               nr_attrs, GFP_KERNEL);
+       power_zone->zone_dev_attrs = kcalloc(nr_attrs, sizeof(void *),
+                                            GFP_KERNEL);
        if (!power_zone->zone_dev_attrs)
                goto err_attr_alloc;
        create_power_zone_common_attributes(power_zone);
 
                rswitch = rdev->rswitch;
                rswitch->port_ok = 0;
                spin_lock_init(&rswitch->lock);
-               rswitch->route_table = kzalloc(sizeof(u8)*
-                                       RIO_MAX_ROUTE_ENTRIES(port->sys_size),
-                                       GFP_KERNEL);
+               rswitch->route_table =
+                       kzalloc(RIO_MAX_ROUTE_ENTRIES(port->sys_size),
+                               GFP_KERNEL);
                if (!rswitch->route_table)
                        goto cleanup;
                /* Initialize switch route table */
 
                }
        }
 
-       rdata = kzalloc(sizeof(*rdata) * rdev_num, GFP_KERNEL);
+       rdata = kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL);
        if (!rdata)
                return -ENOMEM;
 
 
        if (dev_info->num_of_segments <= 1)
                return 0;
 
-       sort_list = kzalloc(
-                       sizeof(struct segment_info) * dev_info->num_of_segments,
-                       GFP_KERNEL);
+       sort_list = kcalloc(dev_info->num_of_segments,
+                           sizeof(struct segment_info),
+                           GFP_KERNEL);
        if (sort_list == NULL)
                return -ENOMEM;
        i = 0;
 
                }
        }
        kbd->fn_handler =
-               kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
+               kcalloc(NR_FN_HANDLER, sizeof(fn_handler_fn *), GFP_KERNEL);
        if (!kbd->fn_handler)
                goto out_func;
        kbd->accent_table = kmemdup(ebc_accent_table,
 
         * That means we allocate room for CCWs to cover count/reclen
         * records plus a NOP.
         */
-       cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
+       cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
                      GFP_KERNEL | GFP_DMA);
        if (!cpa)
                return ERR_PTR(-ENOMEM);
 
        char *buf;
        int i = 0;
 
-       buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
+       buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
        if (!buf) {
                return -ENOMEM;
        }
 
 
 int qdio_enable_async_operation(struct qdio_output_q *outq)
 {
-       outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+       outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
                             GFP_ATOMIC);
        if (!outq->aobs) {
                outq->use_cq = 0;
 
 /* allocate non-shared indicators and shared indicator */
 int __init tiqdio_allocate_memory(void)
 {
-       q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
-                            GFP_KERNEL);
+       q_indicators = kcalloc(TIQDIO_NR_INDICATORS,
+                              sizeof(struct indicator_t),
+                              GFP_KERNEL);
        if (!q_indicators)
                return -ENOMEM;
        return 0;
 
         * allocate consecutive memory for request CPRB, request param
         * block, reply CPRB and reply param block
         */
-       cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL);
+       cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
        if (!cprbmem)
                return -ENOMEM;
 
 
        } else
                ccw_num = 8;
 
-       ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+       ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
        if (ch->ccw == NULL)
                                        goto nomem_return;
 
 
                }
                card->qdio.no_in_queues = 2;
                card->qdio.out_bufstates =
-                       kzalloc(card->qdio.no_out_queues *
-                               QDIO_MAX_BUFFERS_PER_Q *
-                               sizeof(struct qdio_outbuf_state), GFP_KERNEL);
+                       kcalloc(card->qdio.no_out_queues *
+                                       QDIO_MAX_BUFFERS_PER_Q,
+                               sizeof(struct qdio_outbuf_state),
+                               GFP_KERNEL);
                outbuf_states = card->qdio.out_bufstates;
                if (outbuf_states == NULL) {
                        rc = -1;
 
        /* outbound */
        card->qdio.out_qs =
-               kzalloc(card->qdio.no_out_queues *
-                       sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
+               kcalloc(card->qdio.no_out_queues,
+                       sizeof(struct qeth_qdio_out_q *),
+                       GFP_KERNEL);
        if (!card->qdio.out_qs)
                goto out_freepool;
        for (i = 0; i < card->qdio.no_out_queues; ++i) {
 
        QETH_DBF_TEXT(SETUP, 2, "qdioest");
 
-       qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
-                             GFP_KERNEL);
+       qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
+                                 GFP_KERNEL);
        if (!qib_param_field) {
                rc =  -ENOMEM;
                goto out_free_nothing;
        qeth_create_qib_param_field(card, qib_param_field);
        qeth_create_qib_param_field_blkt(card, qib_param_field);
 
-       in_sbal_ptrs = kzalloc(card->qdio.no_in_queues *
-                              QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
+       in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
+                              sizeof(void *),
                               GFP_KERNEL);
        if (!in_sbal_ptrs) {
                rc = -ENOMEM;
                        virt_to_phys(card->qdio.in_q->bufs[i].buffer);
        }
 
-       queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
+       queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
                                   GFP_KERNEL);
        if (!queue_start_poll) {
                rc = -ENOMEM;
        qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
 
        out_sbal_ptrs =
-               kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
-                       sizeof(void *), GFP_KERNEL);
+               kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
+                       sizeof(void *),
+                       GFP_KERNEL);
        if (!out_sbal_ptrs) {
                rc = -ENOMEM;
                goto out_free_queue_start_poll;
 
        if (blogic_probe_options.noprobe)
                return -ENODEV;
        blogic_probeinfo_list =
-           kzalloc(BLOGIC_MAX_ADAPTERS * sizeof(struct blogic_probeinfo),
+           kcalloc(BLOGIC_MAX_ADAPTERS, sizeof(struct blogic_probeinfo),
                            GFP_KERNEL);
        if (blogic_probeinfo_list == NULL) {
                blogic_err("BusLogic: Unable to allocate Probe Info List\n",
 
        if (aac_reset_devices || reset_devices)
                aac->init_reset = true;
 
-       aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
+       aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
+                           sizeof(struct fib),
+                           GFP_KERNEL);
        if (!aac->fibs)
                goto out_free_host;
        spin_lock_init(&aac->fib_lock);
 
        SLIST_INIT(&scb_data->sg_maps);
 
        /* Allocate SCB resources */
-       scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
-                               GFP_ATOMIC);
+       scb_data->scbarray = kcalloc(AHC_SCB_MAX_ALLOC, sizeof(struct scb),
+                                    GFP_ATOMIC);
        if (scb_data->scbarray == NULL)
                return (ENOMEM);
 
 
 
        /* allocate the index array and bitmap */
        asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
-       asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
-                                            sizeof(void *), GFP_KERNEL);
+       asd_ha->seq.tc_index_array = kcalloc(asd_ha->seq.tc_index_bitmap_bits,
+                                            sizeof(void *),
+                                            GFP_KERNEL);
        if (!asd_ha->seq.tc_index_array)
                return -ENOMEM;
 
 
        int flash_command = FLASH_CMD_NONE;
        int err = 0;
 
-       cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+       cmd_ptr = kcalloc(count, 2, GFP_KERNEL);
 
        if (!cmd_ptr) {
                err = FAIL_OUT_MEMORY;
 
 
        /* Allocate memory for wrb_context */
        phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
-                                         phba->params.cxns_per_ctrl,
+       phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl,
+                                         sizeof(struct hwi_wrb_context),
                                          GFP_KERNEL);
        if (!phwi_ctrlr->wrb_context) {
                kfree(phba->phwi_ctrlr);
 
        /* Allocate memory for WRBQ */
        phwi_ctxt = phwi_ctrlr->phwi_ctxt;
-       phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
-                                    phba->params.cxns_per_ctrl,
+       phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl,
+                                    sizeof(struct be_queue_info),
                                     GFP_KERNEL);
        if (!phwi_ctxt->be_wrbq) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
        for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                pwrb_context->pwrb_handle_base =
-                               kzalloc(sizeof(struct wrb_handle *) *
-                                       phba->params.wrbs_per_cxn, GFP_KERNEL);
+                               kcalloc(phba->params.wrbs_per_cxn,
+                                       sizeof(struct wrb_handle *),
+                                       GFP_KERNEL);
                if (!pwrb_context->pwrb_handle_base) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                                    "BM_%d : Mem Alloc Failed. Failing to load\n");
                        goto init_wrb_hndl_failed;
                }
                pwrb_context->pwrb_handle_basestd =
-                               kzalloc(sizeof(struct wrb_handle *) *
-                                       phba->params.wrbs_per_cxn, GFP_KERNEL);
+                               kcalloc(phba->params.wrbs_per_cxn,
+                                       sizeof(struct wrb_handle *),
+                                       GFP_KERNEL);
                if (!pwrb_context->pwrb_handle_basestd) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                                    "BM_%d : Mem Alloc Failed. Failing to load\n");
        mem_descr_sglh = phba->init_mem;
        mem_descr_sglh += HWI_MEM_SGLH;
        if (1 == mem_descr_sglh->num_elements) {
-               phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
-                                                phba->params.ios_per_ctrl,
+               phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl,
+                                                sizeof(struct sgl_handle *),
                                                 GFP_KERNEL);
                if (!phba->io_sgl_hndl_base) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                                    "BM_%d : Mem Alloc Failed. Failing to load\n");
                        return -ENOMEM;
                }
-               phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
-                                                (phba->params.icds_per_ctrl -
-                                                phba->params.ios_per_ctrl),
-                                                GFP_KERNEL);
+               phba->eh_sgl_hndl_base =
+                       kcalloc(phba->params.icds_per_ctrl -
+                                       phba->params.ios_per_ctrl,
+                               sizeof(struct sgl_handle *), GFP_KERNEL);
                if (!phba->eh_sgl_hndl_base) {
                        kfree(phba->io_sgl_hndl_base);
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                        phba->cid_array_info[ulp_num] = ptr_cid_info;
                }
        }
-       phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-                                phba->params.cxns_per_ctrl, GFP_KERNEL);
+       phba->ep_array = kcalloc(phba->params.cxns_per_ctrl,
+                                sizeof(struct iscsi_endpoint *),
+                                GFP_KERNEL);
        if (!phba->ep_array) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                goto free_memory;
        }
 
-       phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
-                                  phba->params.cxns_per_ctrl, GFP_KERNEL);
+       phba->conn_table = kcalloc(phba->params.cxns_per_ctrl,
+                                  sizeof(struct beiscsi_conn *),
+                                  GFP_KERNEL);
        if (!phba->conn_table) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in"
 
        struct bfa_rport_qualifier_s *rports = NULL;
        unsigned long   flags;
 
-       rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
+       rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
                         GFP_ATOMIC);
        if (rports == NULL)
                return snprintf(buf, PAGE_SIZE, "Failed\n");
 
        struct bfa_sge_s        *sg_table;
        int sge_num = 1;
 
-       buf_base = kzalloc((sizeof(struct bfad_buf_info) +
-                          sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+       buf_base = kcalloc(sizeof(struct bfad_buf_info) +
+                               sizeof(struct bfa_sge_s),
+                          sge_num, GFP_KERNEL);
        if (!buf_base)
                return NULL;
 
 
        hba->next_conn_id = 0;
 
        hba->tgt_ofld_list =
-               kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
+               kcalloc(BNX2FC_NUM_MAX_SESS, sizeof(struct bnx2fc_rport *),
                        GFP_KERNEL);
        if (!hba->tgt_ofld_list) {
                printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
 
                return NULL;
        }
 
-       cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
-                                 arr_sz, GFP_KERNEL);
+       cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
+                                 GFP_KERNEL);
        if (!cmgr->free_list) {
                printk(KERN_ERR PFX "failed to alloc free_list\n");
                goto mem_err;
        }
 
-       cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
-                                      arr_sz, GFP_KERNEL);
+       cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock),
+                                      GFP_KERNEL);
        if (!cmgr->free_list_lock) {
                printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
                kfree(cmgr->free_list);
 
                        q->un.iq.flq_idx = flq_idx;
 
                        flq = wrm->q_arr[q->un.iq.flq_idx];
-                       flq->un.fl.bufs = kzalloc(flq->credits *
+                       flq->un.fl.bufs = kcalloc(flq->credits,
                                                  sizeof(struct csio_dma_buf),
                                                  GFP_KERNEL);
                        if (!flq->un.fl.bufs) {
                return -EINVAL;
        }
 
-       wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+       wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL);
        if (!wrm->q_arr)
                goto err;
 
 
 
        /* allocate requests for asynchronous events */
        a->first_ae_req =
-               kzalloc(num_ae_requests * sizeof(struct esas2r_request),
+               kcalloc(num_ae_requests, sizeof(struct esas2r_request),
                        GFP_KERNEL);
 
        if (a->first_ae_req == NULL) {
        }
 
        /* allocate the S/G list memory descriptors */
-       a->sg_list_mds = kzalloc(
-               num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+       a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
+                                GFP_KERNEL);
 
        if (a->sg_list_mds == NULL) {
                esas2r_log(ESAS2R_LOG_CRIT,
 
        /* allocate the request table */
        a->req_table =
-               kzalloc((num_requests + num_ae_requests +
-                        1) * sizeof(struct esas2r_request *), GFP_KERNEL);
+               kcalloc(num_requests + num_ae_requests + 1,
+                       sizeof(struct esas2r_request *),
+                       GFP_KERNEL);
 
        if (a->req_table == NULL) {
                esas2r_log(ESAS2R_LOG_CRIT,
 
        }
        spin_unlock_irqrestore(&h->reset_lock, flags);
 
-       added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
-       removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
+       added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
+       removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
 
        if (!added || !removed) {
                dev_warn(&h->pdev->dev, "out of memory in "
                return 0;
 
        h->ioaccel2_cmd_sg_list =
-               kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
+               kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
                                        GFP_KERNEL);
        if (!h->ioaccel2_cmd_sg_list)
                return -ENOMEM;
        if (h->chainsize <= 0)
                return 0;
 
-       h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
-                               GFP_KERNEL);
+       h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
+                                GFP_KERNEL);
        if (!h->cmd_sg_list)
                return -ENOMEM;
 
        bool physical_device;
        DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
-       currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
+       currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
        physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
        logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
                status = -EINVAL;
                goto cleanup1;
        }
-       buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
+       buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
        if (!buff) {
                status = -ENOMEM;
                goto cleanup1;
 
 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
 {
-       h->cmd_pool_bits = kzalloc(
-               DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
-               sizeof(unsigned long), GFP_KERNEL);
+       h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
+                                  sizeof(unsigned long),
+                                  GFP_KERNEL);
        h->cmd_pool = pci_alloc_consistent(h->pdev,
                    h->nr_cmds * sizeof(*h->cmd_pool),
                    &(h->cmd_pool_dhandle));
        if (!h)
                return NULL;
 
-       h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
+       h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
        if (!h->reply_map) {
                kfree(h);
                return NULL;
 
        int i, rc = -ENOMEM;
 
        ENTER;
-       ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
-                                      ioa_cfg->max_devs_supported, GFP_KERNEL);
+       ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
+                                      sizeof(struct ipr_resource_entry),
+                                      GFP_KERNEL);
 
        if (!ioa_cfg->res_entries)
                goto out;
                list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
        }
 
-       ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
-                                IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
+       ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
+                                sizeof(struct ipr_trace_entry),
+                                GFP_KERNEL);
 
        if (!ioa_cfg->trace)
                goto out_free_hostrcb_dma;
 
        struct expander_device *ex = &dev->ex_dev;
        int res = -ENOMEM;
 
-       ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL);
+       ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL);
        if (!ex->ex_phy)
                return -ENOMEM;
 
 
        }
 
        if (!phba->sli.sli3_ring)
-               phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
-                       sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+               phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
+                                             sizeof(struct lpfc_sli_ring),
+                                             GFP_KERNEL);
        if (!phba->sli.sli3_ring)
                return -ENOMEM;
 
 
        /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
-       phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+       phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
                                         GFP_KERNEL);
        if (!phba->fcf.fcf_rr_bmask) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 
                                           - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
                new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
                spin_unlock_irq(&phba->hbalock);
-               new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
+               new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
                                  GFP_KERNEL);
                if (new_arr) {
                        spin_lock_irq(&phba->hbalock);
                 */
                if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
                        longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
-                       phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+                       phba->vpi_bmask = kcalloc(longs,
+                                                 sizeof(unsigned long),
                                                  GFP_KERNEL);
                        if (!phba->vpi_bmask) {
                                rc = -ENOMEM;
                                goto lpfc_sli_hba_setup_error;
                        }
 
-                       phba->vpi_ids = kzalloc(
-                                       (phba->max_vpi+1) * sizeof(uint16_t),
-                                       GFP_KERNEL);
+                       phba->vpi_ids = kcalloc(phba->max_vpi + 1,
+                                               sizeof(uint16_t),
+                                               GFP_KERNEL);
                        if (!phba->vpi_ids) {
                                kfree(phba->vpi_bmask);
                                rc = -ENOMEM;
        length = sizeof(struct lpfc_rsrc_blks);
        switch (type) {
        case LPFC_RSC_TYPE_FCOE_RPI:
-               phba->sli4_hba.rpi_bmask = kzalloc(longs *
+               phba->sli4_hba.rpi_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
                        rc = -ENOMEM;
                        goto err_exit;
                }
-               phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+               phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
                                                 sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.rpi_ids)) {
                ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
                break;
        case LPFC_RSC_TYPE_FCOE_VPI:
-               phba->vpi_bmask = kzalloc(longs *
-                                         sizeof(unsigned long),
+               phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
                                          GFP_KERNEL);
                if (unlikely(!phba->vpi_bmask)) {
                        rc = -ENOMEM;
                        goto err_exit;
                }
-               phba->vpi_ids = kzalloc(rsrc_id_cnt *
-                                        sizeof(uint16_t),
+               phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
                                         GFP_KERNEL);
                if (unlikely(!phba->vpi_ids)) {
                        kfree(phba->vpi_bmask);
                ext_blk_list = &phba->lpfc_vpi_blk_list;
                break;
        case LPFC_RSC_TYPE_FCOE_XRI:
-               phba->sli4_hba.xri_bmask = kzalloc(longs *
+               phba->sli4_hba.xri_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.xri_bmask)) {
                        goto err_exit;
                }
                phba->sli4_hba.max_cfg_param.xri_used = 0;
-               phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+               phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
                                                 sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.xri_ids)) {
                ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
                break;
        case LPFC_RSC_TYPE_FCOE_VFI:
-               phba->sli4_hba.vfi_bmask = kzalloc(longs *
+               phba->sli4_hba.vfi_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
                        rc = -ENOMEM;
                        goto err_exit;
                }
-               phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+               phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
                                                 sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.vfi_ids)) {
                }
                base = phba->sli4_hba.max_cfg_param.rpi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
-               phba->sli4_hba.rpi_bmask = kzalloc(longs *
+               phba->sli4_hba.rpi_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.rpi_bmask)) {
                        rc = -ENOMEM;
                        goto err_exit;
                }
-               phba->sli4_hba.rpi_ids = kzalloc(count *
-                                                sizeof(uint16_t),
+               phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.rpi_ids)) {
                        rc = -ENOMEM;
                }
                base = phba->sli4_hba.max_cfg_param.vpi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
-               phba->vpi_bmask = kzalloc(longs *
-                                         sizeof(unsigned long),
+               phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
                                          GFP_KERNEL);
                if (unlikely(!phba->vpi_bmask)) {
                        rc = -ENOMEM;
                        goto free_rpi_ids;
                }
-               phba->vpi_ids = kzalloc(count *
-                                       sizeof(uint16_t),
+               phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
                                        GFP_KERNEL);
                if (unlikely(!phba->vpi_ids)) {
                        rc = -ENOMEM;
                }
                base = phba->sli4_hba.max_cfg_param.xri_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
-               phba->sli4_hba.xri_bmask = kzalloc(longs *
+               phba->sli4_hba.xri_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.xri_bmask)) {
                        goto free_vpi_ids;
                }
                phba->sli4_hba.max_cfg_param.xri_used = 0;
-               phba->sli4_hba.xri_ids = kzalloc(count *
-                                                sizeof(uint16_t),
+               phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.xri_ids)) {
                        rc = -ENOMEM;
                }
                base = phba->sli4_hba.max_cfg_param.vfi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
-               phba->sli4_hba.vfi_bmask = kzalloc(longs *
+               phba->sli4_hba.vfi_bmask = kcalloc(longs,
                                                   sizeof(unsigned long),
                                                   GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.vfi_bmask)) {
                        rc = -ENOMEM;
                        goto free_xri_ids;
                }
-               phba->sli4_hba.vfi_ids = kzalloc(count *
-                                                sizeof(uint16_t),
+               phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
                                                 GFP_KERNEL);
                if (unlikely(!phba->sli4_hba.vfi_ids)) {
                        rc = -ENOMEM;
 
        struct lpfc_vport *port_iterator;
        struct lpfc_vport **vports;
        int index = 0;
-       vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
+       vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *),
                         GFP_KERNEL);
        if (vports == NULL)
                return NULL;
 
        /* stream detection initialization */
        if (instance->adapter_type == VENTURA_SERIES) {
                fusion->stream_detect_by_ld =
-                       kzalloc(sizeof(struct LD_STREAM_DETECT *)
-                       * MAX_LOGICAL_DRIVES_EXT,
-                       GFP_KERNEL);
+                       kcalloc(MAX_LOGICAL_DRIVES_EXT,
+                               sizeof(struct LD_STREAM_DETECT *),
+                               GFP_KERNEL);
                if (!fusion->stream_detect_by_ld) {
                        dev_err(&instance->pdev->dev,
                                "unable to allocate stream detection for pool of LDs\n");
  */
 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
 {
-       instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
+       instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
                                      GFP_KERNEL);
        if (!instance->reply_map)
                return -ENOMEM;
 
         * commands.
         */
        fusion->cmd_list =
-               kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
+               kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *),
                        GFP_KERNEL);
        if (!fusion->cmd_list) {
                dev_err(&instance->pdev->dev,
 
                struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
                int i;
 
-               pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL);
+               pages = kcalloc(use_sg, sizeof(struct page *), GFP_KERNEL);
                if (!pages)
                        goto free_req;
 
 
                return -EINPROGRESS;
        pm8001_ha->fw_status = FLASH_IN_PROGRESS;
 
-       cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+       cmd_ptr = kcalloc(count, 2, GFP_KERNEL);
        if (!cmd_ptr) {
                pm8001_ha->fw_status = FAIL_OUT_MEMORY;
                return -ENOMEM;
 
        int i;
 
        pinstance->res_entries =
-                       kzalloc(sizeof(struct pmcraid_resource_entry) *
-                               PMCRAID_MAX_RESOURCES, GFP_KERNEL);
+                       kcalloc(PMCRAID_MAX_RESOURCES,
+                               sizeof(struct pmcraid_resource_entry),
+                               GFP_KERNEL);
 
        if (NULL == pinstance->res_entries) {
                pmcraid_err("failed to allocate memory for resource table\n");
 
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
 
                        req->num_outstanding_cmds = ha->cur_fw_iocb_count;
        }
 
-       req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
-           req->num_outstanding_cmds, GFP_KERNEL);
+       req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
+                                       sizeof(srb_t *),
+                                       GFP_KERNEL);
 
        if (!req->outstanding_cmds) {
                /*
                 * initialization.
                 */
                req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
-               req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
-                   req->num_outstanding_cmds, GFP_KERNEL);
+               req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
+                                               sizeof(srb_t *),
+                                               GFP_KERNEL);
 
                if (!req->outstanding_cmds) {
                        ql_log(ql_log_fatal, NULL, 0x0126,
 
                            "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
                }
        }
-       ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
-                               ha->msix_count, GFP_KERNEL);
+       ha->msix_entries = kcalloc(ha->msix_count,
+                                  sizeof(struct qla_msix_entry),
+                                  GFP_KERNEL);
        if (!ha->msix_entries) {
                ql_log(ql_log_fatal, vha, 0x00c8,
                    "Failed to allocate memory for ha->msix_entries.\n");
 
                                struct rsp_que *rsp)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-       ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
+       ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
                                GFP_KERNEL);
        if (!ha->req_q_map) {
                ql_log(ql_log_fatal, vha, 0x003b,
                goto fail_req_map;
        }
 
-       ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
+       ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
                                GFP_KERNEL);
        if (!ha->rsp_q_map) {
                ql_log(ql_log_fatal, vha, 0x003c,
            (*rsp)->ring);
        /* Allocate memory for NVRAM data for vports */
        if (ha->nvram_npiv_size) {
-               ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
-                   ha->nvram_npiv_size, GFP_KERNEL);
+               ha->npiv_info = kcalloc(ha->nvram_npiv_size,
+                                       sizeof(struct qla_npiv_entry),
+                                       GFP_KERNEL);
                if (!ha->npiv_info) {
                        ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
                            "Failed to allocate memory for npiv_info.\n");
        INIT_LIST_HEAD(&ha->vp_list);
 
        /* Allocate memory for our loop_id bitmap */
-       ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
-           GFP_KERNEL);
+       ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
+                                 sizeof(long),
+                                 GFP_KERNEL);
        if (!ha->loop_id_map)
                goto fail_loop_id_map;
        else {
 
                return -ENOMEM;
        }
 
-       tgt->qphints = kzalloc((ha->max_qpairs + 1) *
-           sizeof(struct qla_qpair_hint), GFP_KERNEL);
+       tgt->qphints = kcalloc(ha->max_qpairs + 1,
+                              sizeof(struct qla_qpair_hint),
+                              GFP_KERNEL);
        if (!tgt->qphints) {
                kfree(tgt);
                ql_log(ql_log_warn, base_vha, 0x0197,
        if (!QLA_TGT_MODE_ENABLED())
                return 0;
 
-       ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
-           MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+       ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
+                                    sizeof(struct qla_tgt_vp_map),
+                                    GFP_KERNEL);
        if (!ha->tgt.tgt_vp_map)
                return -ENOMEM;
 
 
                return check_condition_result;
        }
        dnum = 2 * num;
-       arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
+       arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
        if (NULL == arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
 
                buf = NULL;
        }
 page2_not_supported:
-       scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
+       scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
        if (!scomp)
                goto err_free;
 
 
                else {
                        sg_req_info_t *rinfo;
 
-                       rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+                       rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO,
                                        GFP_KERNEL);
                        if (!rinfo)
                                return -ENOMEM;
 
        struct device *dev;
        struct pqi_io_request *io_request;
 
-       ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
-               sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+       ctrl_info->io_request_pool =
+               kcalloc(ctrl_info->max_io_slots,
+                       sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
 
        if (!ctrl_info->io_request_pool) {
                dev_err(&ctrl_info->pci_dev->dev,
 
        tb->dma = need_dma;
        tb->buffer_size = 0;
 
-       tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
+       tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
                                     GFP_KERNEL);
        if (!tb->reserved_pages) {
                kfree(tb);
 
        int k;
 
        freq_table_size *= (nr_divs + 1);
-       freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+       freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
        if (!freq_table) {
                pr_err("%s: unable to alloc memory\n", __func__);
                return -ENOMEM;
 
 
        if (desc->num_resources) {
                d->nr_windows = desc->num_resources;
-               d->window = kzalloc(d->nr_windows * sizeof(*d->window),
+               d->window = kcalloc(d->nr_windows, sizeof(*d->window),
                                    GFP_NOWAIT);
                if (!d->window)
                        goto err1;
        d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
        d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
 
-       d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
+       d->reg = kcalloc(d->nr_reg, sizeof(*d->reg), GFP_NOWAIT);
        if (!d->reg)
                goto err2;
 
 #ifdef CONFIG_SMP
-       d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
+       d->smp = kcalloc(d->nr_reg, sizeof(*d->smp), GFP_NOWAIT);
        if (!d->smp)
                goto err3;
 #endif
        }
 
        if (hw->prio_regs) {
-               d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
+               d->prio = kcalloc(hw->nr_vectors, sizeof(*d->prio),
                                  GFP_NOWAIT);
                if (!d->prio)
                        goto err4;
        }
 
        if (hw->sense_regs) {
-               d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
+               d->sense = kcalloc(hw->nr_vectors, sizeof(*d->sense),
                                   GFP_NOWAIT);
                if (!d->sense)
                        goto err5;
 
        void *sendbuf = NULL;
 
        if (length) {
-               sendbuf = kzalloc(length * 4, GFP_KERNEL);
+               sendbuf = kcalloc(length, 4, GFP_KERNEL);
                if (!sendbuf) {
                        ret = -ENOMEM;
                        goto out;
 
        ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
        ctrl->rx.n = QCOM_RX_MSGS;
        ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
-       ctrl->wr_comp = kzalloc(sizeof(struct completion *) * QCOM_TX_MSGS,
+       ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
                                GFP_KERNEL);
        if (!ctrl->wr_comp)
                return -ENOMEM;
 
        if (!max_maps)
                return max_maps;
 
-       *map = kzalloc(max_maps * sizeof(struct pinctrl_map), GFP_KERNEL);
+       *map = kcalloc(max_maps, sizeof(struct pinctrl_map), GFP_KERNEL);
        if (!*map)
                return -ENOMEM;
 
 
        }
 
        /* allocate memory for efuse_tbl and efuse_word */
-       efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] *
-                           sizeof(u8), GFP_ATOMIC);
+       efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE],
+                           GFP_ATOMIC);
        if (!efuse_tbl)
                return;
        efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
 
                if (cmdrsp->scsi.no_disk_result == 0)
                        return;
 
-               buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
+               buf = kzalloc(36, GFP_KERNEL);
                if (!buf)
                        return;
 
 
 {
        int rc;
 
-       se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
+       se_sess->sess_cmd_map = kcalloc(tag_size, tag_num,
                                        GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
        if (!se_sess->sess_cmd_map) {
                se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
 
 
        info = &udev->uio_info;
 
-       udev->data_bitmap = kzalloc(BITS_TO_LONGS(udev->max_blocks) *
-                                   sizeof(unsigned long), GFP_KERNEL);
+       udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
+                                   sizeof(unsigned long),
+                                   GFP_KERNEL);
        if (!udev->data_bitmap) {
                ret = -ENOMEM;
                goto err_bitmap_alloc;
 
        }
 
        *trt_count = p->package.count;
-       trts = kzalloc(*trt_count * sizeof(struct trt), GFP_KERNEL);
+       trts = kcalloc(*trt_count, sizeof(struct trt), GFP_KERNEL);
        if (!trts) {
                result = -ENOMEM;
                goto end;
 
        /* ignore p->package.elements[0], as this is _ART Revision field */
        *art_count = p->package.count - 1;
-       arts = kzalloc(*art_count * sizeof(struct art), GFP_KERNEL);
+       arts = kcalloc(*art_count, sizeof(struct art), GFP_KERNEL);
        if (!arts) {
                result = -ENOMEM;
                goto end;
 
        if (ACPI_FAILURE(status))
                trip_cnt = 0;
        else {
-               int34x_thermal_zone->aux_trips = kzalloc(
-                               sizeof(*int34x_thermal_zone->aux_trips) *
-                               trip_cnt, GFP_KERNEL);
+               int34x_thermal_zone->aux_trips =
+                       kcalloc(trip_cnt,
+                               sizeof(*int34x_thermal_zone->aux_trips),
+                               GFP_KERNEL);
                if (!int34x_thermal_zone->aux_trips) {
                        ret = -ENOMEM;
                        goto err_trip_alloc;
 
        if (tz->ntrips == 0) /* must have at least one child */
                goto finish;
 
-       tz->trips = kzalloc(tz->ntrips * sizeof(*tz->trips), GFP_KERNEL);
+       tz->trips = kcalloc(tz->ntrips, sizeof(*tz->trips), GFP_KERNEL);
        if (!tz->trips) {
                ret = -ENOMEM;
                goto free_tz;
        if (tz->num_tbps == 0)
                goto finish;
 
-       tz->tbps = kzalloc(tz->num_tbps * sizeof(*tz->tbps), GFP_KERNEL);
+       tz->tbps = kcalloc(tz->num_tbps, sizeof(*tz->tbps), GFP_KERNEL);
        if (!tz->tbps) {
                ret = -ENOMEM;
                goto free_trips;
 
                return -ENODEV;
 
        max_packages = topology_max_packages();
-       packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL);
+       packages = kcalloc(max_packages, sizeof(struct pkg_device *),
+                          GFP_KERNEL);
        if (!packages)
                return -ENOMEM;
 
 
         * array, then you can use pointer math (e.g. "bc - bcs") to get its
         * tty index.
         */
-       bcs = kzalloc(count * sizeof(struct ehv_bc_data), GFP_KERNEL);
+       bcs = kcalloc(count, sizeof(struct ehv_bc_data), GFP_KERNEL);
        if (!bcs)
                return -ENOMEM;
 
 
        int ret;
        struct tty_driver *tty;
 
-       goldfish_ttys = kzalloc(sizeof(*goldfish_ttys) *
-                               goldfish_tty_line_count, GFP_KERNEL);
+       goldfish_ttys = kcalloc(goldfish_tty_line_count,
+                               sizeof(*goldfish_ttys),
+                               GFP_KERNEL);
        if (goldfish_ttys == NULL) {
                ret = -ENOMEM;
                goto err_alloc_goldfish_ttys_failed;
 
        if (size > MAX_VMID_FILTER)
                return -ENOSPC;
 
-       array = kzalloc(size * 8, GFP_KERNEL);
+       array = kcalloc(size, 8, GFP_KERNEL);
        if (!array)
                return -ENOMEM;
 
 
 
        priv->tx_dma_use = 1;
 
-       priv->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+       priv->sg_tx_p = kcalloc(num, sizeof(struct scatterlist), GFP_ATOMIC);
        if (!priv->sg_tx_p) {
                dev_err(priv->port.dev, "%s:kzalloc Failed\n", __func__);
                return 0;
 
         * Maybe we should be using a slab cache for this, especially if
         * we have a large number of ports to handle.
         */
-       drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
+       drv->state = kcalloc(drv->nr, sizeof(struct uart_state), GFP_KERNEL);
        if (!drv->state)
                goto out;
 
 
        }
 
        if (num_channels) {
-               sunsab_ports = kzalloc(sizeof(struct uart_sunsab_port) *
-                                      num_channels, GFP_KERNEL);
+               sunsab_ports = kcalloc(num_channels,
+                                      sizeof(struct uart_sunsab_port),
+                                      GFP_KERNEL);
                if (!sunsab_ports)
                        return -ENOMEM;
 
 
        if (!gdev)
                return -ENOMEM;
 
-       gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL);
+       gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
        if (!gdev->info) {
                kfree(gdev);
                return -ENOMEM;
 
        dev_info(hub_dev, "%d port%s detected\n", maxchild,
                        (maxchild == 1) ? "" : "s");
 
-       hub->ports = kzalloc(maxchild * sizeof(struct usb_port *), GFP_KERNEL);
+       hub->ports = kcalloc(maxchild, sizeof(struct usb_port *), GFP_KERNEL);
        if (!hub->ports) {
                ret = -ENOMEM;
                goto fail;
 
        dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
 
 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
-       hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
-                                        FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
+       hsotg->frame_num_array = kcalloc(FRAME_NUM_ARRAY_SIZE,
+                                        sizeof(*hsotg->frame_num_array),
+                                        GFP_KERNEL);
        if (!hsotg->frame_num_array)
                goto error1;
-       hsotg->last_frame_num_array = kzalloc(
-                       sizeof(*hsotg->last_frame_num_array) *
-                       FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
+       hsotg->last_frame_num_array =
+               kcalloc(FRAME_NUM_ARRAY_SIZE,
+                       sizeof(*hsotg->last_frame_num_array), GFP_KERNEL);
        if (!hsotg->last_frame_num_array)
                goto error1;
 #endif
 
                __func__, ep, num_tabs);
 
        /* Allocate memory for table array */
-       ep->bd_list.bd_table_array = kzalloc(
-                                       num_tabs * sizeof(struct bd_table *),
-                                       GFP_ATOMIC);
+       ep->bd_list.bd_table_array = kcalloc(num_tabs,
+                                            sizeof(struct bd_table *),
+                                            GFP_ATOMIC);
        if (!ep->bd_list.bd_table_array)
                return -ENOMEM;
 
 
        pdata = dev_get_platdata(&pdev->dev);
        udc->phy_mode = pdata->phy_mode;
 
-       udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
+       udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
        if (!udc->eps)
                return -1;
 
 
        if (utt->multi) {
                tt_index = utt->hcpriv;
                if (!tt_index) {                /* Create the index array */
-                       tt_index = kzalloc(utt->hub->maxchild *
-                                       sizeof(*tt_index), GFP_ATOMIC);
+                       tt_index = kcalloc(utt->hub->maxchild,
+                                          sizeof(*tt_index),
+                                          GFP_ATOMIC);
                        if (!tt_index)
                                return ERR_PTR(-ENOMEM);
                        utt->hcpriv = tt_index;
 
        if (urb_priv == NULL)
                return -ENOMEM;
 
-       urb_priv->isoc_td = kzalloc(
-               sizeof(struct td) * urb->number_of_packets, mem_flags);
+       urb_priv->isoc_td = kcalloc(urb->number_of_packets, sizeof(struct td),
+                                   mem_flags);
        if (urb_priv->isoc_td == NULL) {
                ret = -ENOMEM;
                goto alloc_td_failed;
 
                        return -EINVAL;
 
                size = CHUNK_ALIGN(arg);
-               vec = kzalloc(sizeof(struct mon_pgmap) * (size / CHUNK_SIZE), GFP_KERNEL);
+               vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap),
+                             GFP_KERNEL);
                if (vec == NULL) {
                        ret = -ENOMEM;
                        break;
 
        if (!gpriv)
                return -ENOMEM;
 
-       uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL);
+       uep = kcalloc(pipe_size, sizeof(struct usbhsg_uep), GFP_KERNEL);
        if (!uep) {
                ret = -ENOMEM;
                goto usbhs_mod_gadget_probe_err_gpriv;
 
                return -EINVAL;
        }
 
-       info->pipe = kzalloc(sizeof(struct usbhs_pipe) * pipe_size, GFP_KERNEL);
+       info->pipe = kcalloc(pipe_size, sizeof(struct usbhs_pipe),
+                            GFP_KERNEL);
        if (!info->pipe)
                return -ENOMEM;
 
 
 int wa_rpipes_create(struct wahc *wa)
 {
        wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
-       wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
+       wa->rpipe_bm = kcalloc(BITS_TO_LONGS(wa->rpipes),
+                              sizeof(unsigned long),
                               GFP_KERNEL);
        if (wa->rpipe_bm == NULL)
                return -ENOMEM;
 
        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
-               tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
+               tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
+                                         sizeof(struct scatterlist),
+                                         GFP_KERNEL);
                if (!tv_cmd->tvc_sgl) {
                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
                        goto out;
                }
 
-               tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                               VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
+               tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
+                                            sizeof(struct page *),
+                                            GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
                        goto out;
                }
 
-               tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-                               VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
+               tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+                                              sizeof(struct scatterlist),
+                                              GFP_KERNEL);
                if (!tv_cmd->tvc_prot_sgl) {
                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
                        goto out;
 
        unsigned char *n, *p, *q;
        int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
        
-       n = kzalloc(4*size, STI_LOWMEM);
+       n = kcalloc(4, size, STI_LOWMEM);
        if (!n)
                return NULL;
        p = n + 3;
 
        int tail_start_addr;
        int start_sector_addr;
 
-       sector_buffer = kzalloc(sizeof(char)*sector_size, GFP_KERNEL);
+       sector_buffer = kzalloc(sector_size, GFP_KERNEL);
        if (!sector_buffer)
                return -ENOMEM;
 
 
        int num = 0, i, first = 1;
        int ver, rev;
 
-       mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
+       mode = kcalloc(50, sizeof(struct fb_videomode), GFP_KERNEL);
        if (mode == NULL)
                return NULL;
 
        if (!(num + svd_n))
                return;
 
-       m = kzalloc((specs->modedb_len + num + svd_n) *
-                      sizeof(struct fb_videomode), GFP_KERNEL);
+       m = kcalloc(specs->modedb_len + num + svd_n,
+                   sizeof(struct fb_videomode),
+                   GFP_KERNEL);
 
        if (!m)
                return;
 
                return 0;
        }
        /* put videomode list to info structure */
-       videomodes = kzalloc(sizeof(struct fb_videomode) * videomode_num,
-                       GFP_KERNEL);
+       videomodes = kcalloc(videomode_num, sizeof(struct fb_videomode),
+                            GFP_KERNEL);
        if (!videomodes) {
                dev_err(fbi->dev, "can't malloc video modes\n");
                return -ENOMEM;
 
 
        num_managers = dss_feat_get_num_mgrs();
 
-       managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers,
-                       GFP_KERNEL);
+       managers = kcalloc(num_managers, sizeof(struct omap_overlay_manager),
+                          GFP_KERNEL);
 
        BUG_ON(managers == NULL);
 
 
 
        num_overlays = dss_feat_get_num_ovls();
 
-       overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays,
-                       GFP_KERNEL);
+       overlays = kcalloc(num_overlays, sizeof(struct omap_overlay),
+                          GFP_KERNEL);
 
        BUG_ON(overlays == NULL);
 
 
                mode++;
        }
 
-       par->vbe_modes = kzalloc(sizeof(struct vbe_mode_ib) *
-                               par->vbe_modes_cnt, GFP_KERNEL);
+       par->vbe_modes = kcalloc(par->vbe_modes_cnt,
+                                sizeof(struct vbe_mode_ib),
+                                GFP_KERNEL);
        if (!par->vbe_modes)
                return -ENOMEM;
 
         * Convert the modelist into a modedb so that we can use it with
         * fb_find_mode().
         */
-       mode = kzalloc(i * sizeof(*mode), GFP_KERNEL);
+       mode = kcalloc(i, sizeof(*mode), GFP_KERNEL);
        if (mode) {
                i = 0;
                list_for_each(pos, &info->modelist) {
 
                goto entryfail;
        }
 
-       disp->timings = kzalloc(sizeof(struct display_timing *) *
-                               disp->num_timings, GFP_KERNEL);
+       disp->timings = kcalloc(disp->num_timings,
+                               sizeof(struct display_timing *),
+                               GFP_KERNEL);
        if (!disp->timings) {
                pr_err("%pOF: could not allocate timings array\n", np);
                goto entryfail;
 
         * 'pages' is an array of struct page pointers that's initialized by
         * get_user_pages().
         */
-       pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
+       pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
        if (!pages) {
                pr_debug("fsl-hv: could not allocate page list\n");
                return -ENOMEM;
 
        if (!vp_dev->msix_names)
                goto error;
        vp_dev->msix_affinity_masks
-               = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+               = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
                          GFP_KERNEL);
        if (!vp_dev->msix_affinity_masks)
                goto error;
 
                if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
                        continue;
 
-               gpfns = kzalloc(sizeof(xen_pfn_t) * nr, GFP_KERNEL);
-               idxs = kzalloc(sizeof(xen_ulong_t) * nr, GFP_KERNEL);
-               errs = kzalloc(sizeof(int) * nr, GFP_KERNEL);
+               gpfns = kcalloc(nr, sizeof(xen_pfn_t), GFP_KERNEL);
+               idxs = kcalloc(nr, sizeof(xen_ulong_t), GFP_KERNEL);
+               errs = kcalloc(nr, sizeof(int), GFP_KERNEL);
                if (!gpfns || !idxs || !errs) {
                        kfree(gpfns);
                        kfree(idxs);
 
 
        num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
                    PAGE_SHIFT;
-       block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
-                                         sizeof(*block_ctx->pagev)) *
+       block_ctx->mem_to_free = kcalloc(sizeof(*block_ctx->datav) +
+                                               sizeof(*block_ctx->pagev),
                                         num_pages, GFP_NOFS);
        if (!block_ctx->mem_to_free)
                return -ENOMEM;
 
 cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
 {
        struct page **pages =
-               kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+               kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
        if (pages)
                return cifs_writedata_direct_alloc(pages, complete);
 
 
 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
 {
        struct page **pages =
-               kzalloc(sizeof(struct page *) * nr_pages, GFP_KERNEL);
+               kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
        struct cifs_readdata *ret = NULL;
 
        if (pages) {
 
        down_read(&ei->i_data_sem);
        depth = ext_depth(inode);
 
-       path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+       path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
                       GFP_NOFS);
        if (path == NULL) {
                up_read(&ei->i_data_sem);
        }
        if (!path) {
                /* account possible depth increase */
-               path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
+               path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
                                GFP_NOFS);
                if (unlikely(!path))
                        return ERR_PTR(-ENOMEM);
         * We need this to handle errors and free blocks
         * upon them.
         */
-       ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
+       ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS);
        if (!ablocks)
                return -ENOMEM;
 
                        path[k].p_block =
                                le16_to_cpu(path[k].p_hdr->eh_entries)+1;
        } else {
-               path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+               path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
                               GFP_NOFS);
                if (path == NULL) {
                        ext4_journal_stop(handle);
 
                fh_count = be32_to_cpup(p);
 
                fls->mirror_array[i]->fh_versions =
-                       kzalloc(fh_count * sizeof(struct nfs_fh),
+                       kcalloc(fh_count, sizeof(struct nfs_fh),
                                gfp_flags);
                if (fls->mirror_array[i]->fh_versions == NULL) {
                        rc = -ENOMEM;
 
        version_count = be32_to_cpup(p);
        dprintk("%s: version count %d\n", __func__, version_count);
 
-       ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
+       ds_versions = kcalloc(version_count,
+                             sizeof(struct nfs4_ff_ds_version),
                              gfp_flags);
        if (!ds_versions)
                goto out_scratch;
 
        if (fsloc->locations_count == 0)
                return 0;
 
-       fsloc->locations = kzalloc(fsloc->locations_count
-                       * sizeof(struct nfsd4_fs_location), GFP_KERNEL);
+       fsloc->locations = kcalloc(fsloc->locations_count,
+                                  sizeof(struct nfsd4_fs_location),
+                                  GFP_KERNEL);
        if (!fsloc->locations)
                return -ENOMEM;
        for (i=0; i < fsloc->locations_count; i++) {
 
                goto bail;
        }
 
-       rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
+       rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
        if (!rm_quota) {
                status = -ENOMEM;
                goto bail;
 
        spin_unlock(&osb->osb_lock);
 
        if (unlikely(!local_system_inodes)) {
-               local_system_inodes = kzalloc(sizeof(struct inode *) *
-                                             NUM_LOCAL_SYSTEM_INODES *
-                                             osb->max_slots,
-                                             GFP_NOFS);
+               local_system_inodes =
+                       kzalloc(array3_size(sizeof(struct inode *),
+                                           NUM_LOCAL_SYSTEM_INODES,
+                                           osb->max_slots),
+                               GFP_NOFS);
                if (!local_system_inodes) {
                        mlog_errno(-ENOMEM);
                        /*
 
 {
        char *n, *s;
 
-       n = kzalloc(fh->len * 2, GFP_KERNEL);
+       n = kcalloc(fh->len, 2, GFP_KERNEL);
        if (!n)
                return -ENOMEM;
 
 
        /* If there are mixed files and directories we need a new table */
        if (nr_dirs && nr_files) {
                struct ctl_table *new;
-               files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
+               files = kcalloc(nr_files + 1, sizeof(struct ctl_table),
                                GFP_KERNEL);
                if (!files)
                        goto out;
 
                        if (blocks_needed == 1) {
                                un = &unf_single;
                        } else {
-                               un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
+                               un = kcalloc(min(blocks_needed, max_to_insert),
+                                            UNFM_P_SIZE, GFP_NOFS);
                                if (!un) {
                                        un = &unf_single;
                                        blocks_needed = 1;
 
                struct udf_vds_record *new_loc;
                unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
 
-               new_loc = kzalloc(sizeof(*new_loc) * new_size, GFP_KERNEL);
+               new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
                if (!new_loc)
                        return ERR_PTR(-ENOMEM);
                memcpy(new_loc, data->part_descs_loc,
 
        memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
        data.size_part_descs = PART_DESC_ALLOC_STEP;
-       data.part_descs_loc = kzalloc(sizeof(*data.part_descs_loc) *
-                                       data.size_part_descs, GFP_KERNEL);
+       data.part_descs_loc = kcalloc(data.size_part_descs,
+                                     sizeof(*data.part_descs_loc),
+                                     GFP_KERNEL);
        if (!data.part_descs_loc)
                return -ENOMEM;
 
 
                insn->imm = 1;
        }
 
-       func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL);
+       func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
        if (!func)
                return -ENOMEM;
 
 
        }
        if (!s->usable)
                return KDB_NOTIMP;
-       s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+       s->command = kcalloc(s->count + 1, sizeof(*(s->command)), GFP_KDB);
        if (!s->command) {
                kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
                           cmdstr);
 
        if (unlikely(!area))
                goto out;
 
-       area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
+       area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
+                              GFP_KERNEL);
        if (!area->bitmap)
                goto free_area;
 
 
        }
 
        if (nwriters_stress) {
-               writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+               writer_tasks = kcalloc(cxt.nrealwriters_stress,
+                                      sizeof(writer_tasks[0]),
                                       GFP_KERNEL);
                if (writer_tasks == NULL) {
                        VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
        }
 
        if (cxt.cur_ops->readlock) {
-               reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
+               reader_tasks = kcalloc(cxt.nrealreaders_stress,
+                                      sizeof(reader_tasks[0]),
                                       GFP_KERNEL);
                if (reader_tasks == NULL) {
                        VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
 
        struct cfs_rq *cfs_rq;
        int i;
 
-       tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
+       tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
        if (!tg->cfs_rq)
                goto err;
-       tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
+       tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
        if (!tg->se)
                goto err;
 
 
        struct sched_rt_entity *rt_se;
        int i;
 
-       tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
+       tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
        if (!tg->rt_rq)
                goto err;
-       tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
+       tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
        if (!tg->rt_se)
                goto err;
 
 
                if (IS_ERR(kbuf))
                        return PTR_ERR(kbuf);
 
-               tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long),
+               tmp_bitmap = kcalloc(BITS_TO_LONGS(bitmap_len),
+                                    sizeof(unsigned long),
                                     GFP_KERNEL);
                if (!tmp_bitmap) {
                        kfree(kbuf);
 
         */
        size = FTRACE_PROFILE_HASH_SIZE;
 
-       stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+       stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 
        if (!stat->hash)
                return -ENOMEM;
 
 
        if (mask == TRACE_ITER_RECORD_TGID) {
                if (!tgid_map)
-                       tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
+                       tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
+                                          sizeof(*tgid_map),
                                           GFP_KERNEL);
                if (!tgid_map) {
                        tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
 
         * available.  Build one from cpu_to_node() which should have been
         * fully initialized by now.
         */
-       tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
+       tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
        BUG_ON(!tbl);
 
        for_each_node(node)
 
        slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
        if (!slot)
                goto out_fail;
-       element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
+       element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
        if (!element)
                goto out_fail;
 
 
                kzfree(a->d);
                a->d = p;
        } else {
-               a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+               a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
                if (!a->d)
                        return -ENOMEM;
        }
 
        if (x[0] == x[1]) {
                /* Increase the buffer size */
                mutex_unlock(&slab_mutex);
-               m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+               m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
+                                    GFP_KERNEL);
                if (!m->private) {
                        /* Too bad, we are really out */
                        m->private = x;
 
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
        void *p;
-       unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-                                    sizeof(long), GFP_ATOMIC);
+       unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects),
+                                    sizeof(long),
+                                    GFP_ATOMIC);
        if (!map)
                return;
        slab_err(s, page, text, s->name);
        int x;
        unsigned long *nodes;
 
-       nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+       nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
        if (!nodes)
                return -ENOMEM;
 
 
        mdb->max = max;
        mdb->old = old;
 
-       mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
+       mdb->mhash = kcalloc(max, sizeof(*mdb->mhash), GFP_ATOMIC);
        if (!mdb->mhash) {
                kfree(mdb);
                return -ENOMEM;
 
                        }
 
                        /* create and init array for received CAN frames */
-                       op->last_frames = kzalloc(msg_head->nframes * op->cfsiz,
+                       op->last_frames = kcalloc(msg_head->nframes,
+                                                 op->cfsiz,
                                                  GFP_KERNEL);
                        if (!op->last_frames) {
                                kfree(op->frames);
 
        memset(&info, 0, sizeof(info));
        info.cmd = ETHTOOL_GSSET_INFO;
 
-       info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
+       info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER);
        if (!info_buf)
                return -ENOMEM;
 
        if (info.cmd == ETHTOOL_GRXCLSRLALL) {
                if (info.rule_cnt > 0) {
                        if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
-                               rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
+                               rule_buf = kcalloc(info.rule_cnt, sizeof(u32),
                                                   GFP_USER);
                        if (!rule_buf)
                                return -ENOMEM;
 
 {
        void *hdr;
        int i, pages = 0;
-       uint32_t *buf = kzalloc(32 * sizeof(uint32_t), GFP_KERNEL);
+       uint32_t *buf = kcalloc(32, sizeof(uint32_t), GFP_KERNEL);
 
        pr_debug("%s\n", __func__);
 
 
                struct nlattr *mx;
                int len = 0;
 
-               mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
+               mx = kcalloc(3, nla_total_size(4), GFP_KERNEL);
                if (!mx)
                        return -ENOMEM;
 
 
 
        hash = rcu_dereference(nh->nh_exceptions);
        if (!hash) {
-               hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
+               hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
                if (!hash)
                        goto out_unlock;
                rcu_assign_pointer(nh->nh_exceptions, hash);
 
        int err, i, j;
 
        net->ipv6.icmp_sk =
-               kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
+               kcalloc(nr_cpu_ids, sizeof(struct sock *), GFP_KERNEL);
        if (!net->ipv6.icmp_sk)
                return -ENOMEM;
 
 
        lockdep_assert_held(&local->mtx);
        lockdep_assert_held(&local->chanctx_mtx);
 
-       vif_chsw = kzalloc(sizeof(vif_chsw[0]) * n_vifs, GFP_KERNEL);
+       vif_chsw = kcalloc(n_vifs, sizeof(vif_chsw[0]), GFP_KERNEL);
        if (!vif_chsw)
                return -ENOMEM;
 
 
                        max_rates = sband->n_bitrates;
        }
 
-       mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
+       mi->r = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
        if (!mi->r)
                goto error;
 
 
        if (!msp)
                return NULL;
 
-       msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
+       msp->ratelist = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
        if (!msp->ratelist)
                goto error;
 
 
                }
        }
 
-       ie = kzalloc(num_bands * iebufsz, GFP_KERNEL);
+       ie = kcalloc(iebufsz, num_bands, GFP_KERNEL);
        if (!ie) {
                ret = -ENOMEM;
                goto out;
 
        if (WARN_ON(res))
                return res;
 
-       funcs = kzalloc((sdata->local->hw.max_nan_de_entries + 1) *
-                       sizeof(*funcs), GFP_KERNEL);
+       funcs = kcalloc(sdata->local->hw.max_nan_de_entries + 1,
+                       sizeof(*funcs),
+                       GFP_KERNEL);
        if (!funcs)
                return -ENOMEM;
 
 
        if (err < 0)
                return err;
 
-       ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
+       ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL);
        if (!ops)
                return -ENOMEM;
 
 
        if (class_max > NF_CT_MAX_EXPECT_CLASSES)
                return -EOVERFLOW;
 
-       expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
-                               class_max, GFP_KERNEL);
+       expect_policy = kcalloc(class_max,
+                               sizeof(struct nf_conntrack_expect_policy),
+                               GFP_KERNEL);
        if (expect_policy == NULL)
                return -ENOMEM;
 
 
                return -1;
        }
 
-       dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+       dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
        if (dev_nr == NULL) {
                printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
                return -1;
 
  */
 int ovs_vport_init(void)
 {
-       dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+       dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
                            GFP_KERNEL);
        if (!dev_table)
                return -ENOMEM;
 
        rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
        rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
 
-       rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors,
+       rds_ibdev->vector_load = kcalloc(device->num_comp_vectors,
+                                        sizeof(int),
                                         GFP_KERNEL);
        if (!rds_ibdev->vector_load) {
                pr_err("RDS/IB: %s failed to allocate vector memory\n",
 
 
        rose_callsign = null_ax25_address;
 
-       dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+       dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
+                          GFP_KERNEL);
        if (dev_rose == NULL) {
                printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
                rc = -ENOMEM;
 
                return 0;
 
        /* Allocated the array of pointers to transorms */
-       ep->auth_hmacs = kzalloc(sizeof(struct crypto_shash *) *
-                                SCTP_AUTH_NUM_HMACS, gfp);
+       ep->auth_hmacs = kcalloc(SCTP_AUTH_NUM_HMACS,
+                                sizeof(struct crypto_shash *),
+                                gfp);
        if (!ep->auth_hmacs)
                return -ENOMEM;
 
 
                                   GFP_KERNEL);
        if (!link->wr_rx_sges)
                goto no_mem_wr_tx_sges;
-       link->wr_tx_mask = kzalloc(
-               BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*link->wr_tx_mask),
-               GFP_KERNEL);
+       link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
+                                  sizeof(*link->wr_tx_mask),
+                                  GFP_KERNEL);
        if (!link->wr_tx_mask)
                goto no_mem_wr_rx_sges;
        link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
 
 static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
 {
        arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
-       arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
+       arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL);
        /*
         * XXX: actual pages are allocated by xdr layer in
         * xdr_partial_copy_from_skb.
 
        if (cd == NULL)
                return ERR_PTR(-ENOMEM);
 
-       cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
+       cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
                                 GFP_KERNEL);
        if (cd->hash_table == NULL) {
                kfree(cd);
 
        struct nlattr **tb;
        int err;
 
-       tb = kzalloc(NUM_NL80211_ATTR * sizeof(*tb), GFP_KERNEL);
+       tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL);
        if (!tb)
                return -ENOMEM;
 
 
                        func->srf_num_macs = n_entries;
                        func->srf_macs =
-                               kzalloc(sizeof(*func->srf_macs) * n_entries,
+                               kcalloc(n_entries, sizeof(*func->srf_macs),
                                        GFP_KERNEL);
                        if (!func->srf_macs) {
                                err = -ENOMEM;
 
                /* currently 4 exec bits and entries 0-3 are reserved iupcx */
                if (size > 16 - 4)
                        goto fail;
-               profile->file.trans.table = kzalloc(sizeof(char *) * size,
+               profile->file.trans.table = kcalloc(size, sizeof(char *),
                                                    GFP_KERNEL);
                if (!profile->file.trans.table)
                        goto fail;
 
        int rc = 0;
        struct policy_file file = { data, len }, *fp = &file;
 
-       oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL);
+       oldpolicydb = kcalloc(2, sizeof(*oldpolicydb), GFP_KERNEL);
        if (!oldpolicydb) {
                rc = -ENOMEM;
                goto out;
 
        __le32 *reg;
        int i;
 
-       reg = kzalloc(sizeof(__le32) * 18, GFP_KERNEL);
+       reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
        if (reg == NULL)
                return -ENOMEM;
 
 
 
        /* Get AMIXER resource */
        n_amixer = (n_amixer < 2) ? 2 : n_amixer;
-       apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
+       apcm->amixers = kcalloc(n_amixer, sizeof(void *), GFP_KERNEL);
        if (!apcm->amixers) {
                err = -ENOMEM;
                goto error1;
        }
 
        if (n_srcc) {
-               apcm->srccs = kzalloc(sizeof(void *)*n_srcc, GFP_KERNEL);
+               apcm->srccs = kcalloc(n_srcc, sizeof(void *), GFP_KERNEL);
                if (!apcm->srccs)
                        return -ENOMEM;
        }
        if (n_amixer) {
-               apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
+               apcm->amixers = kcalloc(n_amixer, sizeof(void *), GFP_KERNEL);
                if (!apcm->amixers) {
                        err = -ENOMEM;
                        goto error1;
                }
        }
-       apcm->srcimps = kzalloc(sizeof(void *)*n_srcimp, GFP_KERNEL);
+       apcm->srcimps = kcalloc(n_srcimp, sizeof(void *), GFP_KERNEL);
        if (!apcm->srcimps) {
                err = -ENOMEM;
                goto error1;
 
        /* Get AMIXER resource */
        n_amixer = (n_amixer < 2) ? 2 : n_amixer;
-       apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
+       apcm->amixers = kcalloc(n_amixer, sizeof(void *), GFP_KERNEL);
        if (!apcm->amixers) {
                err = -ENOMEM;
                goto error1;
        num_daios = ((atc->model == CTSB1270) ? 8 : 7);
        num_srcs = ((atc->model == CTSB1270) ? 6 : 4);
 
-       atc->daios = kzalloc(sizeof(void *)*num_daios, GFP_KERNEL);
+       atc->daios = kcalloc(num_daios, sizeof(void *), GFP_KERNEL);
        if (!atc->daios)
                return -ENOMEM;
 
-       atc->srcs = kzalloc(sizeof(void *)*num_srcs, GFP_KERNEL);
+       atc->srcs = kcalloc(num_srcs, sizeof(void *), GFP_KERNEL);
        if (!atc->srcs)
                return -ENOMEM;
 
-       atc->srcimps = kzalloc(sizeof(void *)*num_srcs, GFP_KERNEL);
+       atc->srcimps = kcalloc(num_srcs, sizeof(void *), GFP_KERNEL);
        if (!atc->srcimps)
                return -ENOMEM;
 
-       atc->pcm = kzalloc(sizeof(void *)*(2*4), GFP_KERNEL);
+       atc->pcm = kcalloc(2 * 4, sizeof(void *), GFP_KERNEL);
        if (!atc->pcm)
                return -ENOMEM;
 
 
        if (err)
                return err;
 
-       dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
+       dao->imappers = kzalloc(array3_size(sizeof(void *), desc->msr, 2),
+                               GFP_KERNEL);
        if (!dao->imappers) {
                err = -ENOMEM;
                goto error1;
 
        if (!mixer)
                return -ENOMEM;
 
-       mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM),
+       mixer->amixers = kcalloc(NUM_CT_AMIXERS * CHN_NUM, sizeof(void *),
                                 GFP_KERNEL);
        if (!mixer->amixers) {
                err = -ENOMEM;
                goto error1;
        }
-       mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL);
+       mixer->sums = kcalloc(NUM_CT_SUMS * CHN_NUM, sizeof(void *),
+                             GFP_KERNEL);
        if (!mixer->sums) {
                err = -ENOMEM;
                goto error2;
 
                return err;
 
        /* Reserve memory for imapper nodes */
-       srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr,
+       srcimp->imappers = kcalloc(desc->msr, sizeof(struct imapper),
                                   GFP_KERNEL);
        if (!srcimp->imappers) {
                err = -ENOMEM;
 
        spec->chip_init_verbs = ca0132_init_verbs0;
        if (spec->quirk == QUIRK_SBZ)
                spec->sbz_init_verbs = sbz_init_verbs;
-       spec->spec_init_verbs = kzalloc(sizeof(struct hda_verb) * NUM_SPEC_VERBS, GFP_KERNEL);
+       spec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,
+                                       sizeof(struct hda_verb),
+                                       GFP_KERNEL);
        if (!spec->spec_init_verbs)
                return -ENOMEM;
 
 
                adsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
                          pos + len, be32_to_cpu(val));
 
-       alg = kzalloc(len * 2, GFP_KERNEL | GFP_DMA);
+       alg = kcalloc(len, 2, GFP_KERNEL | GFP_DMA);
        if (!alg)
                return ERR_PTR(-ENOMEM);
 
 
 {
        int i;
 
-       ipc->msg = kzalloc(sizeof(struct ipc_message) *
-               IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
+       ipc->msg = kcalloc(IPC_EMPTY_LIST_SIZE, sizeof(struct ipc_message),
+                          GFP_KERNEL);
        if (ipc->msg == NULL)
                return -ENOMEM;
 
 
        if (!rtd->dai_link->ops)
                rtd->dai_link->ops = &null_snd_soc_ops;
 
-       rtd->codec_dais = kzalloc(sizeof(struct snd_soc_dai *) *
-                                       dai_link->num_codecs,
+       rtd->codec_dais = kcalloc(dai_link->num_codecs,
+                                       sizeof(struct snd_soc_dai *),
                                        GFP_KERNEL);
        if (!rtd->codec_dais) {
                kfree(rtd);
 
                        continue;
 
                if (w->num_kcontrols) {
-                       w->kcontrols = kzalloc(w->num_kcontrols *
+                       w->kcontrols = kcalloc(w->num_kcontrols,
                                                sizeof(struct snd_kcontrol *),
                                                GFP_KERNEL);
                        if (!w->kcontrols) {
 
        int i, ret;
 
        se->dobj.control.dtexts =
-               kzalloc(sizeof(char *) * ec->items, GFP_KERNEL);
+               kcalloc(ec->items, sizeof(char *), GFP_KERNEL);
        if (se->dobj.control.dtexts == NULL)
                return -ENOMEM;
 
 
        int i;
 
        for (i = 0; i < PCM_N_URBS; i++) {
-               rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
-                               * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+               rt->out_urbs[i].buffer = kcalloc(PCM_MAX_PACKET_SIZE,
+                                                PCM_N_PACKETS_PER_URB,
+                                                GFP_KERNEL);
                if (!rt->out_urbs[i].buffer)
                        return -ENOMEM;
-               rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
-                               * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+               rt->in_urbs[i].buffer = kcalloc(PCM_MAX_PACKET_SIZE,
+                                               PCM_N_PACKETS_PER_URB,
+                                               GFP_KERNEL);
                if (!rt->in_urbs[i].buffer)
                        return -ENOMEM;
        }
 
        struct usb_line6 *line6 = line6pcm->line6;
        int i;
 
-       line6pcm->in.urbs = kzalloc(
-               sizeof(struct urb *) * line6->iso_buffers, GFP_KERNEL);
+       line6pcm->in.urbs = kcalloc(line6->iso_buffers, sizeof(struct urb *),
+                                   GFP_KERNEL);
        if (line6pcm->in.urbs == NULL)
                return -ENOMEM;
 
 
        struct usb_line6 *line6 = line6pcm->line6;
        int i;
 
-       line6pcm->out.urbs = kzalloc(
-               sizeof(struct urb *) * line6->iso_buffers, GFP_KERNEL);
+       line6pcm->out.urbs = kcalloc(line6->iso_buffers, sizeof(struct urb *),
+                                    GFP_KERNEL);
        if (line6pcm->out.urbs == NULL)
                return -ENOMEM;
 
 
 
        nr_vcpus = atomic_read(&kvm->online_vcpus);
 
-       dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
+       dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
                                    GFP_KERNEL);
        if (!dist->its_vm.vpes)
                return -ENOMEM;