extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long ctx_nr_bits;
-extern int max_user_nctx;
extern unsigned long mmu_context_bmap[];
void get_new_mmu_context(struct mm_struct *mm);
#include <linux/gfp.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
-#include <linux/ratelimit.h>
#include <asm/head.h>
#include <asm/page.h>
#include "init_64.h"
unsigned long ctx_nr_bits = DEFAULT_CTX_NR_BITS;
-int max_user_nctx;
unsigned long kern_linear_pte_xor[4] __read_mostly;
static unsigned long page_cache4v_flag;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
spin_unlock(&ctx_alloc_lock);
- if (unlikely(new_version)) {
- pr_err_ratelimited("Context ID wrapped: %s(%d) CPU%d\n",
- current->comm, task_pid_nr(current),
- smp_processor_id());
+ if (unlikely(new_version))
smp_new_mmu_context_version();
- }
}
static int numa_enabled = 1;
sun4u_linear_pte_xor_finalize();
}
- max_user_nctx = (1UL << ctx_nr_bits) - 1;
-
/* Flush the TLBs and the 4M TSB so that the updated linear
* pte XOR settings are realized for all mappings.
*/
#include <asm/tlb.h>
#include <asm/oplib.h>
#include <asm/mdesc.h>
-#include <linux/ratelimit.h>
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
}
}
-static atomic_t nctxs = ATOMIC_INIT(0);
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long mm_rss = get_mm_rss(mm);
unsigned long saved_thp_pte_count;
#endif
unsigned int i;
- int max_nctx = max_user_nctx;
- int ret = 0;
- int uid = current_cred()->uid.val;
-
- /*
- * In the worst case, user(s) might use up all contexts and make the
- * system unusable. Give root extra 100 grace ctxs to recover the
- * system. E.g by killing some user processes.
- */
- if (uid != 0)
- max_nctx -= 100;
-
- if (unlikely(max_nctx <= atomic_inc_return(&nctxs))) {
- pr_warn_ratelimited("Reached max(%d) number of processes for %s\n",
- max_nctx, uid ? "users" : "root");
- ret = -EAGAIN;
- goto error;
- }
spin_lock_init(&mm->context.lock);
(saved_hugetlb_pte_count + saved_thp_pte_count) *
REAL_HPAGE_PER_HPAGE);
#endif
+ if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
+ return -ENOMEM;
- if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) {
- ret = -ENOMEM;
- goto error;
- }
-
- return ret;
-error:
- atomic_dec(&nctxs);
- return ret;
+ return 0;
}
static void tsb_destroy_one(struct tsb_config *tp)
for (i = 0; i < MM_NUM_TSBS; i++)
tsb_destroy_one(&mm->context.tsb_block[i]);
- atomic_dec(&nctxs);
-
spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) {