#include <linux/gfp.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/ratelimit.h>
#include <asm/head.h>
#include <asm/page.h>
#include "init_64.h"
unsigned long ctx_nr_bits = DEFAULT_CTX_NR_BITS;
+int max_user_nctx;
unsigned long kern_linear_pte_xor[4] __read_mostly;
static unsigned long page_cache4v_flag;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
spin_unlock(&ctx_alloc_lock);
- if (unlikely(new_version))
+ if (unlikely(new_version)) {
+ pr_err_ratelimited("Context ID wrapped: %s(%d) CPU%d\n",
+ current->comm, task_pid_nr(current),
+ smp_processor_id());
smp_new_mmu_context_version();
+ }
}
static int numa_enabled = 1;
sun4u_linear_pte_xor_finalize();
}
+ max_user_nctx = (1UL << ctx_nr_bits) - 1;
+
/* Flush the TLBs and the 4M TSB so that the updated linear
* pte XOR settings are realized for all mappings.
*/
#include <asm/tsb.h>
#include <asm/tlb.h>
#include <asm/oplib.h>
+#include <linux/ratelimit.h>
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
unsigned long *capture_huge_pte_count) {}
#endif /* CONFIG_HUGETLB_PAGE || CONFIG_TRANSPARENT_HUGEPAGE */
+static atomic_t nctxs = ATOMIC_INIT(0);
+
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long capture_huge_pte_count[MM_NUM_HUGEPAGE_SIZES];
unsigned long saved_thp_pte_count;
unsigned int i;
+ int max_nctx = max_user_nctx;
+ int ret = 0;
+ int uid = current_cred()->uid.val;
+
+ /*
+ * In the worst case, user(s) might use up all contexts and make the
+ * system unusable. Give root extra 100 grace ctxs to recover the
+ * system. E.g by killing some user processes.
+ */
+ if (uid != 0)
+ max_nctx -= 100;
+
+ if (unlikely(max_nctx <= atomic_inc_return(&nctxs))) {
+ pr_warn_ratelimited("Reached max(%d) number of processes for %s\n",
+ max_nctx, uid ? "users" : "root");
+ ret = -EAGAIN;
+ goto error;
+ }
spin_lock_init(&mm->context.lock);
captured_hugepage_pte_count_grow_tsb(mm, &saved_thp_pte_count,
capture_huge_pte_count);
- if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
- return -ENOMEM;
+ if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) {
+ ret = -ENOMEM;
+ goto error;
+ }
- return 0;
+ return ret;
+error:
+ atomic_dec(&nctxs);
+ return ret;
}
static void tsb_destroy_one(struct tsb_config *tp)
for (i = 0; i < MM_NUM_TSBS; i++)
tsb_destroy_one(&mm->context.tsb_block[i]);
+ atomic_dec(&nctxs);
+
spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) {