From 9dc1624013c641c2b61ae63ed9586fc1e985698e Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Thu, 28 Jan 2016 16:48:15 -0800 Subject: [PATCH] LDoms CPU Hotplug - dynamic mondo queue allocation. Orabug: 22620474 - Allocate mondo queues for present cpus only at boot time - Allocate mondo queues dynamically and with proper alignment at hot-add Signed-off-by: Bijan Mottahedeh (cherry picked from commit 41f763e66dcbdb72632cd0675e2990085e47a527) (cherry picked from commit fb59288f6c4e17eca98a6a38512c0c05d55ac8e9) --- arch/sparc/include/asm/irq_64.h | 4 ++ arch/sparc/kernel/irq_64.c | 90 +++++++++++++++++++++++++++------ arch/sparc/kernel/smp_64.c | 3 ++ 3 files changed, 81 insertions(+), 16 deletions(-) diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 3f70f900e834..1c163db85a3f 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -91,6 +91,10 @@ void arch_trigger_all_cpu_backtrace(bool); extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; + +extern int sun4v_alloc_mondo_queues(int); +extern void sun4v_free_mondo_queues(int); + #define __ARCH_HAS_DO_SOFTIRQ #define NO_IRQ 0xffffffff diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 4033c23bdfa6..865d40312041 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -1010,19 +1010,81 @@ void notrace sun4v_register_mondo_queues(int this_cpu) * size. The base real address must be aligned to the size of the * region. Thus, an 8KB queue must be 8KB aligned, for example. */ -static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) +static int alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) { unsigned long size = PAGE_ALIGN(qmask + 1); unsigned long order = get_order(size); unsigned long p; - p = __get_free_pages(GFP_KERNEL, order); + p = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); if (!p) { - prom_printf("SUN4V: Error, cannot allocate queue.\n"); - prom_halt(); + pr_err("SUN4V: Error, cannot allocate queue.\n"); + return -ENOMEM; } *pa_ptr = __pa(p); + return 0; +} + +static void free_one_queue(unsigned long *pa_ptr, unsigned long qmask) +{ + unsigned long size = PAGE_ALIGN(qmask + 1); + unsigned long order = get_order(size); + unsigned long p = *pa_ptr; + + __free_pages(pfn_to_page(p >> PAGE_SHIFT), order); +} + +/* Allocate mondo and error queues for a cpu. */ +int sun4v_alloc_mondo_queues(int cpu) +{ + int err; + struct trap_per_cpu *tb = &trap_block[cpu]; + + err = alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); + if (err) + return err; + err = alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); + if (err) + goto cpu_mondo; + err = alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); + if (err) + goto dev_mondo; + err = alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); + if (err) + goto resum_mondo; + err = alloc_one_queue(&tb->nonresum_mondo_pa, + tb->nonresum_qmask); + if (err) + goto resum_kernel_buf; + err = alloc_one_queue(&tb->nonresum_kernel_buf_pa, + tb->nonresum_qmask); + if (!err) + return 0; + + free_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); +resum_kernel_buf: + free_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); +resum_mondo: + free_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); +dev_mondo: + free_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); +cpu_mondo: + free_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); + + return err; +} + +void sun4v_free_mondo_queues(int cpu) +{ + struct trap_per_cpu *tb = &trap_block[cpu]; + + free_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); + free_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); + free_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); + free_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); + free_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); + free_one_queue(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask); } static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) @@ -1043,21 +1105,17 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) #endif } -/* Allocate mondo and error queues for all possible cpus. */ +/* Allocate mondo and error queues for all present cpus. */ static void __init sun4v_init_mondo_queues(void) { - int cpu; + int cpu, rv; - for_each_possible_cpu(cpu) { - struct trap_per_cpu *tb = &trap_block[cpu]; - - alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); - alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); - alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); - alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); - alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); - alloc_one_queue(&tb->nonresum_kernel_buf_pa, - tb->nonresum_qmask); + for_each_present_cpu(cpu) { + rv = sun4v_alloc_mondo_queues(cpu); + if (rv) { + prom_printf("SUN4V: Can't allocate queues (%d).\n", rv); + prom_halt(); + } } } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 0d5869c27097..39f96a89c3df 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -322,6 +323,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, tte_data += 0x400000; } + sun4v_alloc_mondo_queues(cpu); trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); hv_err = sun4v_cpu_start(cpu, trampoline_ra, @@ -1385,6 +1387,7 @@ void __cpu_die(unsigned int cpu) hv_err = sun4v_cpu_stop(cpu); if (hv_err == HV_EOK) { set_cpu_present(cpu, false); + sun4v_free_mondo_queues(cpu); break; } } while (--limit > 0); -- 2.50.1