/* Make sure we are running on right CPU */
        saved_mask = current->cpus_allowed;
-       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                return -1;
 
                 cx->address);
 
 out:
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
        return retval;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
 
        cpumask_t saved_mask = current->cpus_allowed;
        cmd->val = 0;
 
-       set_cpus_allowed(current, cmd->mask);
+       set_cpus_allowed_ptr(current, &cmd->mask);
        do_drv_read(cmd);
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
 }
 
 static void drv_write(struct drv_cmd *cmd)
        unsigned int i;
 
        for_each_cpu_mask(i, cmd->mask) {
-               set_cpus_allowed(current, cpumask_of_cpu(i));
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                do_drv_write(cmd);
        }
 
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
        return;
 }
 
-static u32 get_cur_val(cpumask_t mask)
+static u32 get_cur_val(const cpumask_t *mask)
 {
        struct acpi_processor_performance *perf;
        struct drv_cmd cmd;
 
-       if (unlikely(cpus_empty(mask)))
+       if (unlikely(cpus_empty(*mask)))
                return 0;
 
-       switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) {
+       switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
                break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
-               perf = per_cpu(drv_data, first_cpu(mask))->acpi_data;
+               perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data;
                cmd.addr.io.port = perf->control_register.address;
                cmd.addr.io.bit_width = perf->control_register.bit_width;
                break;
                return 0;
        }
 
-       cmd.mask = mask;
+       cmd.mask = *mask;
 
        drv_read(&cmd);
 
        unsigned int retval;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (get_cpu() != cpu) {
                /* We were not able to run on requested processor */
                put_cpu();
        retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
 
        put_cpu();
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
 
        dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
        return retval;
                return 0;
        }
 
-       freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
+       freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
        dprintk("cur freq = %u\n", freq);
 
        return freq;
 }
 
-static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
+static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
                                struct acpi_cpufreq_data *data)
 {
        unsigned int cur_freq;
        drv_write(&cmd);
 
        if (acpi_pstate_strict) {
-               if (!check_freqs(cmd.mask, freqs.new, data)) {
+               if (!check_freqs(&cmd.mask, freqs.new, data)) {
                        dprintk("acpi_cpufreq_target failed (%d)\n",
                                policy->cpu);
                        return -EAGAIN;
 
 
 static int check_supported_cpu(unsigned int cpu)
 {
-       cpumask_t oldmask = CPU_MASK_ALL;
+       cpumask_t oldmask;
        u32 eax, ebx, ecx, edx;
        unsigned int rc = 0;
 
        oldmask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
        rc = 1;
 
 out:
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
        return rc;
 }
 
 /* Driver entry point to switch to the target frequency */
 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 {
-       cpumask_t oldmask = CPU_MASK_ALL;
+       cpumask_t oldmask;
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
        ret = 0;
 
 err_out:
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
        return ret;
 }
 
 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
        struct powernow_k8_data *data;
-       cpumask_t oldmask = CPU_MASK_ALL;
+       cpumask_t oldmask;
        int rc;
 
        if (!cpu_online(pol->cpu))
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
                fidvid_msr_init();
 
        /* run on any CPU again */
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
                pol->cpus = cpumask_of_cpu(pol->cpu);
        return 0;
 
 err_out:
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
        powernow_k8_cpu_exit_acpi(data);
 
        kfree(data);
        if (!data)
                return -EINVAL;
 
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu) {
-               printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
-               set_cpus_allowed(current, oldmask);
+               printk(KERN_ERR PFX
+                       "limiting to CPU %d failed in powernowk8_get\n", cpu);
+               set_cpus_allowed_ptr(current, &oldmask);
                return 0;
        }
 
                goto out;
 
        if (cpu_family == CPU_HW_PSTATE)
-               khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+               khz = find_khz_freq_from_pstate(data->powernow_table,
+                                               data->currpstate);
        else
                khz = find_khz_freq_from_fid(data->currfid);
 
 
 out:
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
        return khz;
 }
 
 
        cpumask_t saved_mask;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu)
                return 0;
 
                clock_freq = extract_clock(l, cpu, 1);
        }
 
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
        return clock_freq;
 }
 
                else
                        cpu_set(j, set_mask);
 
-               set_cpus_allowed(current, set_mask);
+               set_cpus_allowed_ptr(current, &set_mask);
                preempt_disable();
                if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
                        dprintk("couldn't limit to CPUs in this domain\n");
 
                if (!cpus_empty(covered_cpus)) {
                        for_each_cpu_mask(j, covered_cpus) {
-                               set_cpus_allowed(current, cpumask_of_cpu(j));
+                               set_cpus_allowed_ptr(current,
+                                                    &cpumask_of_cpu(j));
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                        }
                }
                        cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
                }
        }
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
        return 0;
 
 migrate_end:
        preempt_enable();
-       set_cpus_allowed(current, saved_mask);
+       set_cpus_allowed_ptr(current, &saved_mask);
        return 0;
 }
 
 
        return 0;
 }
 
-static unsigned int _speedstep_get(cpumask_t cpus)
+static unsigned int _speedstep_get(const cpumask_t *cpus)
 {
        unsigned int speed;
        cpumask_t cpus_allowed;
 
        cpus_allowed = current->cpus_allowed;
-       set_cpus_allowed(current, cpus);
+       set_cpus_allowed_ptr(current, cpus);
        speed = speedstep_get_processor_frequency(speedstep_processor);
-       set_cpus_allowed(current, cpus_allowed);
+       set_cpus_allowed_ptr(current, &cpus_allowed);
        dprintk("detected %u kHz as current frequency\n", speed);
        return speed;
 }
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       return _speedstep_get(cpumask_of_cpu(cpu));
+       return _speedstep_get(&cpumask_of_cpu(cpu));
 }
 
 /**
        if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
                return -EINVAL;
 
-       freqs.old = _speedstep_get(policy->cpus);
+       freqs.old = _speedstep_get(&policy->cpus);
        freqs.new = speedstep_freqs[newstate].frequency;
        freqs.cpu = policy->cpu;
 
        }
 
        /* switch to physical CPU where state is to be changed */
-       set_cpus_allowed(current, policy->cpus);
+       set_cpus_allowed_ptr(current, &policy->cpus);
 
        speedstep_set_state(newstate);
 
        /* allow to be run on all CPUs */
-       set_cpus_allowed(current, cpus_allowed);
+       set_cpus_allowed_ptr(current, &cpus_allowed);
 
        for_each_cpu_mask(i, policy->cpus) {
                freqs.cpu = i;
 #endif
 
        cpus_allowed = current->cpus_allowed;
-       set_cpus_allowed(current, policy->cpus);
+       set_cpus_allowed_ptr(current, &policy->cpus);
 
        /* detect low and high frequency and transition latency */
        result = speedstep_get_freqs(speedstep_processor,
                                     &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
                                     &policy->cpuinfo.transition_latency,
                                     &speedstep_set_state);
-       set_cpus_allowed(current, cpus_allowed);
+       set_cpus_allowed_ptr(current, &cpus_allowed);
        if (result)
                return result;
 
        /* get current speed setting */
-       speed = _speedstep_get(policy->cpus);
+       speed = _speedstep_get(&policy->cpus);
        if (!speed)
                return -EIO;
 
 
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                goto out;
 
                }
                cache_shared_cpu_map_setup(cpu, j);
        }
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
 
 out:
        if (retval) {
 
 
                        if (!uci->valid)
                                continue;
-                       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
                        error = get_maching_microcode(new_mc, cpu);
                        if (error < 0)
                                goto out;
                vfree(new_mc);
        if (cursor < 0)
                error = cursor;
-       set_cpus_allowed(current, old);
+       set_cpus_allowed_ptr(current, &old);
        return error;
 }
 
                return 0;
 
        old = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        /* Check if the microcode we have in memory matches the CPU */
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
                        " sig=0x%x, pf=0x%x, rev=0x%x\n",
                        cpu, uci->sig, uci->pf, uci->rev);
 
-       set_cpus_allowed(current, old);
+       set_cpus_allowed_ptr(current, &old);
        return err;
 }
 
 
        old = current->cpus_allowed;
 
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        mutex_lock(µcode_mutex);
        collect_cpu_info(cpu);
        if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
                cpu_request_microcode(cpu);
        mutex_unlock(µcode_mutex);
-       set_cpus_allowed(current, old);
+       set_cpus_allowed_ptr(current, &old);
 }
 
 static void microcode_fini_cpu(int cpu)
                old = current->cpus_allowed;
 
                get_online_cpus();
-               set_cpus_allowed(current, cpumask_of_cpu(cpu));
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
                mutex_lock(µcode_mutex);
                if (uci->valid)
                        err = cpu_request_microcode(cpu);
                mutex_unlock(µcode_mutex);
                put_online_cpus();
-               set_cpus_allowed(current, old);
+               set_cpus_allowed_ptr(current, &old);
        }
        if (err)
                return err;
 
                reboot_cpu_id = smp_processor_id();
 
        /* Make certain I only run on the appropriate processor */
-       set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
 
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.