#define CIF_IGNORE_IRQ         5       /* ignore interrupt (for udelay) */
 #define CIF_ENABLED_WAIT       6       /* in enabled wait state */
 #define CIF_MCCK_GUEST         7       /* machine check happening in guest */
+#define CIF_DEDICATED_CPU      8       /* this CPU is dedicated */
 
 #define _CIF_MCCK_PENDING      _BITUL(CIF_MCCK_PENDING)
 #define _CIF_ASCE_PRIMARY      _BITUL(CIF_ASCE_PRIMARY)
 #define _CIF_IGNORE_IRQ                _BITUL(CIF_IGNORE_IRQ)
 #define _CIF_ENABLED_WAIT      _BITUL(CIF_ENABLED_WAIT)
 #define _CIF_MCCK_GUEST                _BITUL(CIF_MCCK_GUEST)
+#define _CIF_DEDICATED_CPU     _BITUL(CIF_DEDICATED_CPU)
 
 #ifndef __ASSEMBLY__
 
 
  */
 static void smp_start_secondary(void *cpuvoid)
 {
+       int cpu = smp_processor_id();
+
        S390_lowcore.last_update_clock = get_tod_clock();
        S390_lowcore.restart_stack = (unsigned long) restart_stack;
        S390_lowcore.restart_fn = (unsigned long) do_restart;
        init_cpu_timer();
        vtime_init();
        pfault_init();
-       notify_cpu_starting(smp_processor_id());
-       set_cpu_online(smp_processor_id(), true);
+       notify_cpu_starting(cpu);
+       if (topology_cpu_dedicated(cpu))
+               set_cpu_flag(CIF_DEDICATED_CPU);
+       else
+               clear_cpu_flag(CIF_DEDICATED_CPU);
+       set_cpu_online(cpu, true);
        inc_irq_stat(CPU_RST);
        local_irq_enable();
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 
                        topo->socket_id = socket->id;
                        topo->core_id = rcore;
                        topo->thread_id = lcpu + i;
+                       topo->dedicated = tl_core->d;
                        cpumask_set_cpu(lcpu + i, &drawer->mask);
                        cpumask_set_cpu(lcpu + i, &book->mask);
                        cpumask_set_cpu(lcpu + i, &socket->mask);
        stsi(info, 15, 1, topology_mnest_limit());
 }
 
+static void __arch_update_dedicated_flag(void *arg)
+{
+       if (topology_cpu_dedicated(smp_processor_id()))
+               set_cpu_flag(CIF_DEDICATED_CPU);
+       else
+               clear_cpu_flag(CIF_DEDICATED_CPU);
+}
+
 static int __arch_update_cpu_topology(void)
 {
        struct sysinfo_15_1_x *info = tl_info;
        int cpu, rc;
 
        rc = __arch_update_cpu_topology();
+       on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
                kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        .attrs = topology_cpu_attrs,
 };
 
+static ssize_t cpu_dedicated_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       int cpu = dev->id;
+       ssize_t count;
+
+       mutex_lock(&smp_cpu_state_mutex);
+       count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
+       mutex_unlock(&smp_cpu_state_mutex);
+       return count;
+}
+static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
+
+static struct attribute *topology_extra_cpu_attrs[] = {
+       &dev_attr_dedicated.attr,
+       NULL,
+};
+
+static struct attribute_group topology_extra_cpu_attr_group = {
+       .attrs = topology_extra_cpu_attrs,
+};
+
 int topology_cpu_init(struct cpu *cpu)
 {
-       return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+       int rc;
+
+       rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+       if (rc || !MACHINE_HAS_TOPOLOGY)
+               return rc;
+       rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
+       if (rc)
+               sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+       return rc;
 }
 
 static const struct cpumask *cpu_thread_mask(int cpu)
        alloc_masks(info, &drawer_info, 3);
 out:
        __arch_update_cpu_topology();
+       __arch_update_dedicated_flag(NULL);
 }
 
 static inline int topology_get_mode(int enabled)