{
        struct knav_device *kdev = range->kdev;
        struct knav_acc_channel *acc;
-       unsigned long cpu_map;
+       struct cpumask *cpu_mask;
        int ret = 0, irq;
        u32 old, new;
 
        if (range->flags & RANGE_MULTI_QUEUE) {
                acc = range->acc;
                irq = range->irqs[0].irq;
-               cpu_map = range->irqs[0].cpu_map;
+               cpu_mask = range->irqs[0].cpu_mask;
        } else {
                acc = range->acc + queue;
                irq = range->irqs[queue].irq;
-               cpu_map = range->irqs[queue].cpu_map;
+               cpu_mask = range->irqs[queue].cpu_mask;
        }
 
        old = acc->open_mask;
                        acc->name, acc->name);
                ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
                                  range);
-               if (!ret && cpu_map) {
-                       ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+               if (!ret && cpu_mask) {
+                       ret = irq_set_affinity_hint(irq, cpu_mask);
                        if (ret) {
                                dev_warn(range->kdev->dev,
                                         "Failed to set IRQ affinity\n");
 
                          struct knav_queue_inst *inst)
 {
        unsigned queue = inst->id - range->queue_base;
-       unsigned long cpu_map;
        int ret = 0, irq;
 
        if (range->flags & RANGE_HAS_IRQ) {
                irq = range->irqs[queue].irq;
-               cpu_map = range->irqs[queue].cpu_map;
                ret = request_irq(irq, knav_queue_int_handler, 0,
                                        inst->irq_name, inst);
                if (ret)
                        return ret;
                disable_irq(irq);
-               if (cpu_map) {
-                       ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+               if (range->irqs[queue].cpu_mask) {
+                       ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
                        if (ret) {
                                dev_warn(range->kdev->dev,
                                         "Failed to set IRQ affinity\n");
 
                range->num_irqs++;
 
-               if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
-                       range->irqs[i].cpu_map =
-                               (oirq.args[2] & 0x0000ff00) >> 8;
+               if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
+                       unsigned long mask;
+                       int bit;
+
+                       range->irqs[i].cpu_mask = devm_kzalloc(dev,
+                                                              cpumask_size(), GFP_KERNEL);
+                       if (!range->irqs[i].cpu_mask)
+                               return -ENOMEM;
+
+                       mask = (oirq.args[2] & 0x0000ff00) >> 8;
+                       for_each_set_bit(bit, &mask, BITS_PER_LONG)
+                               cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
+               }
        }
 
        range->num_irqs = min(range->num_irqs, range->num_queues);