if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
                return -EINVAL;
 
+again:
        gts = gru_find_lock_gts(cb);
        if (!gts)
                return -EINVAL;
        if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
                goto exit;
 
-       gru_check_context_placement(gts);
+       if (gru_check_context_placement(gts)) {
+               gru_unlock_gts(gts);
+               gru_unload_context(gts, 1);
+               goto again;
+       }
 
        /*
         * CCH may contain stale data if ts_force_cch_reload is set.
                } else {
                        gts->ts_user_blade_id = req.val1;
                        gts->ts_user_chiplet_id = req.val0;
-                       gru_check_context_placement(gts);
+                       if (gru_check_context_placement(gts)) {
+                               gru_unlock_gts(gts);
+                               gru_unload_context(gts, 1);
+                               return ret;
+                       }
                }
                break;
        case sco_gseg_owner:
 
  * chiplet. Misassignment can occur if the process migrates to a different
  * blade or if the user changes the selected blade/chiplet.
  */
-void gru_check_context_placement(struct gru_thread_state *gts)
+int gru_check_context_placement(struct gru_thread_state *gts)
 {
        struct gru_state *gru;
+       int ret = 0;
 
        /*
         * If the current task is the context owner, verify that the
         * references. Pthread apps use non-owner references to the CBRs.
         */
        gru = gts->ts_gru;
+       /*
+        * If gru or gts->ts_tgid_owner isn't initialized properly, return
+        * success to indicate that the caller does not need to unload the
+        * gru context.The caller is responsible for their inspection and
+        * reinitialization if needed.
+        */
        if (!gru || gts->ts_tgid_owner != current->tgid)
-               return;
+               return ret;
 
        if (!gru_check_chiplet_assignment(gru, gts)) {
                STAT(check_context_unload);
-               gru_unload_context(gts, 1);
+               ret = -EINVAL;
        } else if (gru_retarget_intr(gts)) {
                STAT(check_context_retarget_intr);
        }
+
+       return ret;
 }
 
 
        mutex_lock(>s->ts_ctxlock);
        preempt_disable();
 
-       gru_check_context_placement(gts);
+       if (gru_check_context_placement(gts)) {
+               preempt_enable();
+               mutex_unlock(>s->ts_ctxlock);
+               gru_unload_context(gts, 1);
+               return VM_FAULT_NOPAGE;
+       }
 
        if (!gts->ts_gru) {
                STAT(load_user_context);
 
 extern int gru_user_unload_context(unsigned long arg);
 extern int gru_get_exception_detail(unsigned long arg);
 extern int gru_set_context_option(unsigned long address);
-extern void gru_check_context_placement(struct gru_thread_state *gts);
+extern int gru_check_context_placement(struct gru_thread_state *gts);
 extern int gru_cpu_fault_map_id(void);
 extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
 extern void gru_flush_all_tlb(struct gru_state *gru);