{
        int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu)
                /* Enable all cpus with a declared buffer. */
                if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
                    !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
                        smp_call_function_single(cpu, iucv_allow_cpu,
                                                 NULL, 1);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /**
        size_t alloc_size;
        int cpu, rc;
 
-       get_online_cpus();
+       cpus_read_lock();
        rc = -ENOMEM;
        alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
        iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
        if (cpumask_empty(&iucv_buffer_cpumask))
                /* No cpu could declare an iucv buffer. */
                goto out;
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 out:
        kfree(iucv_path_table);
        iucv_path_table = NULL;
-       put_online_cpus();
+       cpus_read_unlock();
        return rc;
 }
 
  */
 static void iucv_disable(void)
 {
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu(iucv_retrieve_cpu, NULL, 1);
        kfree(iucv_path_table);
        iucv_path_table = NULL;
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static int iucv_cpu_dead(unsigned int cpu)
        if (cpumask_empty(&iucv_irq_cpumask))
                return NOTIFY_DONE;
 
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
        preempt_disable();
        for (i = 0; i < iucv_max_pathid; i++) {
                        iucv_sever_pathid(i, NULL);
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        iucv_disable();
        return NOTIFY_DONE;
 }