struct vm_area_struct *vma;
 
                vma = find_extend_vma(mm, start);
-               if (!vma && in_gate_area(tsk->mm, start)) {
+               if (!vma && in_gate_area(mm, start)) {
                        unsigned long pg = start & PAGE_MASK;
-                       struct vm_area_struct *gate_vma = get_gate_vma(tsk->mm);
+                       struct vm_area_struct *gate_vma = get_gate_vma(mm);
                        pgd_t *pgd;
                        pud_t *pud;
                        pmd_t *pmd;
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
-                               if (ret & VM_FAULT_MAJOR)
-                                       tsk->maj_flt++;
-                               else
-                                       tsk->min_flt++;
+
+                               if (tsk) {
+                                       if (ret & VM_FAULT_MAJOR)
+                                               tsk->maj_flt++;
+                                       else
+                                               tsk->min_flt++;
+                               }
 
                                if (ret & VM_FAULT_RETRY) {
                                        *nonblocking = 0;
 
 /**
  * get_user_pages() - pin user pages in memory
- * @tsk:       task_struct of target task
+ * @tsk:       the task_struct to use for page fault accounting, or
+ *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin