Orabug:
24449941
Number of context IDs supported by the hardware
is reported via machine descriptor for sun4v
systems. For systems > T3, 16 bits are used
to represent context ID in the HW. For these
systems the context ID wrap around happens if
there are more that 65536 processes running
simultaneously. For systems older than that
13 bits are used and the context ID wraps around
if there are 8192 processes running simultaneously.
Reviewed-by: Babu Moger <babu.moger@oracle.com>
Acked-by: Rob Gardner <rob.gardner@oracle.com>
Signed-off-by: Sanath Kumar <sanath.s.kumar@oracle.com>
Signed-off-by: Allen Pais <allen.pais@oracle.com>
void mdesc_fill_in_cpu_data(cpumask_t *mask);
void mdesc_populate_present_mask(cpumask_t *mask);
void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
+extern void mdesc_get_mmu_ctx_bits(cpumask_t *mask, unsigned long *ctx_bits);
void sun4v_mdesc_init(void);
#include <asm/page.h>
#include <asm/hypervisor.h>
-#define CTX_NR_BITS 13
+#define MAX_CTX_NR_BITS 16
+#define DEFAULT_CTX_NR_BITS 13
-#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
+#define MAX_CTX_NR (_AC(1, UL) << MAX_CTX_NR_BITS)
+#define TAG_CONTEXT_BITS ((_AC(1, UL) << ctx_nr_bits) - _AC(1, UL))
/* UltraSPARC-III+ and later have a feature whereby you can
* select what page size the various Data-TLB instances in the
extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
+extern unsigned long ctx_nr_bits;
extern unsigned long mmu_context_bmap[];
void get_new_mmu_context(struct mm_struct *mm);
#include <asm/uaccess.h>
#include <asm/oplib.h>
#include <asm/smp.h>
+#include <asm/mmu_64.h>
/* Unlike the OBP device tree, the machine description is a full-on
* DAG. An arbitrary number of ARCs are possible from one
mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
}
+static void * __init get_one_mmu_ctx_bits(struct mdesc_handle *hp, u64 mp,
+ int cpuid, void *arg)
+{
+ const u64 *ctx_md = mdesc_get_property(hp, mp, "mmu-#context-bits",
+ NULL);
+ unsigned long *ctx_bits = arg;
+ u64 val = DEFAULT_CTX_NR_BITS;
+
+ if (ctx_md && *ctx_md)
+ val = *ctx_md;
+
+ if (!*ctx_bits)
+ *ctx_bits = val;
+
+ /* The previous cpu must report the same number of context-bits */
+ if (*ctx_bits != val) {
+ printk_once(KERN_WARNING "Inconsistent context-bits reported by MD\n");
+ *ctx_bits = DEFAULT_CTX_NR_BITS;
+ } else {
+ *ctx_bits = val;
+ }
+
+ if (*ctx_bits > MAX_CTX_NR_BITS) {
+ printk_once(KERN_WARNING "Unsupported number of context-bits reported by MD\n");
+ *ctx_bits = DEFAULT_CTX_NR_BITS;
+ }
+
+ return NULL;
+}
+
+void __init mdesc_get_mmu_ctx_bits(cpumask_t *mask, unsigned long *ctx_bits)
+{
+ *ctx_bits = 0;
+ mdesc_iterate_over_cpus(get_one_mmu_ctx_bits, ctx_bits, mask);
+}
+
static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
{
const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
#include "init_64.h"
+unsigned long ctx_nr_bits = DEFAULT_CTX_NR_BITS;
unsigned long kern_linear_pte_xor[4] __read_mostly;
static unsigned long page_cache4v_flag;
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
-#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
spin_lock(&ctx_alloc_lock);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
- new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+ new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << ctx_nr_bits, ctx);
new_version = 0;
- if (new_ctx >= (1 << CTX_NR_BITS)) {
+ if (new_ctx >= (1UL << ctx_nr_bits)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
int i;
mdesc_fill_in_cpu_data(cpu_all_mask);
#endif
mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
+ mdesc_get_mmu_ctx_bits(cpu_all_mask, &ctx_nr_bits);
sun4v_linear_pte_xor_finalize();