Remove open-coded zalloc_cpumask_var() and zalloc_cpumask_var_node().
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
 
        cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
        if (cfg) {
-               if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+               if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
                        kfree(cfg);
                        cfg = NULL;
-               } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+               } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
                                                          GFP_ATOMIC, node)) {
                        free_cpumask_var(cfg->domain);
                        kfree(cfg);
                        cfg = NULL;
-               } else {
-                       cpumask_clear(cfg->domain);
-                       cpumask_clear(cfg->old_domain);
                }
        }
 
 
 void __init init_c1e_mask(void)
 {
        /* If we're using c1e_idle, we need to allocate c1e_mask. */
-       if (pm_idle == c1e_idle) {
-               alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
-               cpumask_clear(c1e_mask);
-       }
+       if (pm_idle == c1e_idle)
+               zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
 }
 
 static int __init idle_setup(char *str)
 
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
-               alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
-               cpumask_clear(per_cpu(cpu_core_map, i));
-               cpumask_clear(per_cpu(cpu_sibling_map, i));
-               cpumask_clear(cpu_data(i).llc_shared_map);
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
        }
        set_cpu_sibling_map(0);
 
 
        struct acpi_processor *match_pr;
        struct acpi_psd_package *match_pdomain;
 
-       if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
                return -ENOMEM;
 
        mutex_lock(&performance_mutex);
         * Now that we have _PSD data from all CPUs, lets setup P-state 
         * domain info.
         */
-       cpumask_clear(covered_cpus);
        for_each_possible_cpu(i) {
                pr = per_cpu(processors, i);
                if (!pr)
 
        struct acpi_tsd_package *pdomain, *match_pdomain;
        struct acpi_processor_throttling *pthrottling, *match_pthrottling;
 
-       if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
                return -ENOMEM;
 
        /*
        if (retval)
                goto err_ret;
 
-       cpumask_clear(covered_cpus);
        for_each_possible_cpu(i) {
                pr = per_cpu(processors, i);
                if (!pr)
 
        int count;
        int cpu;
 
-       if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+       if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
                printk(KERN_WARNING
                       "sfc: RSS disabled due to allocation failure\n");
                return 1;
        }
 
-       cpumask_clear(core_mask);
        count = 0;
        for_each_online_cpu(cpu) {
                if (!cpumask_test_cpu(cpu, core_mask)) {
 
 {
        int err;
 
-       if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
-       cpumask_clear(marked_cpus);
 
        start_cpu_work();
 
 
        if (current_trace)
                *iter->trace = *current_trace;
 
-       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
                goto fail;
 
-       cpumask_clear(iter->started);
-
        if (current_trace && current_trace->print_max)
                iter->tr = &max_tr;
        else
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
 
-       if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
                goto out_free_tracing_cpumask;
 
        /* To save memory, keep the ring buffer size to its minimum */
 
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
-       cpumask_clear(tracing_reader_cpumask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(ring_buf_size,
 
        bool called = true;
        struct kvm_vcpu *vcpu;
 
-       if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
-               cpumask_clear(cpus);
+       zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
        spin_lock(&kvm->requests_lock);
        me = smp_processor_id();