return -ENOMEM;
        }
        for_each_possible_cpu(i) {
-               if (!alloc_cpumask_var_node(
+               if (!zalloc_cpumask_var_node(
                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
                        GFP_KERNEL, cpu_to_node(i))) {
 
 
                goto err0;
        }
 
-       if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
+       if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
                                                                GFP_KERNEL)) {
                retval = -ENOMEM;
                goto err05;
 
        /* notify BIOS that we exist */
        acpi_processor_notify_smm(THIS_MODULE);
 
-       if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
                printk(KERN_ERR PFX
                                "unable to alloc powernow_k8_data cpumask\n");
                ret_val = -ENOMEM;
 
 
        if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
                return -ENOMEM;
-       if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
+       if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
                free_cpumask_var(saved_mask);
                return -ENOMEM;
        }
 
        if (!mce_available(&boot_cpu_data))
                return -EIO;
 
-       alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
+       zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
 
        err = mce_init_banks();
        if (err)
 
                return 0;
 
        for_each_possible_cpu(cur_cpu)
-               alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
+               zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
                                       GFP_KERNEL, cpu_to_node(cur_cpu));
 
        uv_bau_retry_limit = 1;
 
        if (!pr)
                return -ENOMEM;
 
-       if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
                kfree(pr);
                return -ENOMEM;
        }
 
                ret = -ENOMEM;
                goto nomem_out;
        }
-       if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
                free_cpumask_var(policy->cpus);
                kfree(policy);
                ret = -ENOMEM;
 
                vec->count = 0;
                if (bootmem)
                        alloc_bootmem_cpumask_var(&vec->mask);
-               else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
+               else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
                        goto cleanup;
        }
 
 
        unsigned int i;
 
        for_each_possible_cpu(i)
-               alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+               zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
                                        GFP_KERNEL, cpu_to_node(i));
 }
 #endif /* CONFIG_SMP */
 
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+               if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
                                cpu_to_node(cpu)))
                        return NOTIFY_BAD;
                break;