static struct frame_tail* user_backtrace(struct frame_tail *tail)
 {
-       struct frame_tail buftail;
+       struct frame_tail buftail[2];
 
-       /* hardware pte might not be valid due to dirty/accessed bit emulation
-        * so we use copy_from_user and benefit from exception fixups */
-       if (copy_from_user(&buftail, tail, sizeof(struct frame_tail)))
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+               return NULL;
+       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
                return NULL;
 
-       oprofile_add_trace(buftail.lr);
+       oprofile_add_trace(buftail[0].lr);
 
        /* frame pointers should strictly progress back up the stack
         * (towards higher addresses) */
-       if (tail >= buftail.fp)
+       if (tail >= buftail[0].fp)
                return NULL;
 
-       return buftail.fp-1;
-}
-
-/* Compare two addresses and see if they're on the same page */
-#define CMP_ADDR_EQUAL(x,y,offset) ((((unsigned long) x) >> PAGE_SHIFT) \
-       == ((((unsigned long) y) + offset) >> PAGE_SHIFT))
-
-/* check that the page(s) containing the frame tail are present */
-static int pages_present(struct frame_tail *tail)
-{
-       struct mm_struct * mm = current->mm;
-
-       if (!check_user_page_readable(mm, (unsigned long)tail))
-               return 0;
-
-       if (CMP_ADDR_EQUAL(tail, tail, 8))
-               return 1;
-
-       if (!check_user_page_readable(mm, ((unsigned long)tail) + 8))
-               return 0;
-
-       return 1;
+       return buftail[0].fp-1;
 }
 
 /*
 void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
        struct frame_tail *tail;
-       unsigned long last_address = 0;
 
        tail = ((struct frame_tail *) regs->ARM_fp) - 1;
 
                return;
        }
 
-       while (depth-- && tail && !((unsigned long) tail & 3)) {
-               if ((!CMP_ADDR_EQUAL(last_address, tail, 0)
-                       || !CMP_ADDR_EQUAL(last_address, tail, 8))
-                               && !pages_present(tail))
-                       return;
-               last_address = (unsigned long) tail;
+       while (depth-- && tail && !((unsigned long) tail & 3))
                tail = user_backtrace(tail);
-       }
 }
-
 
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <asm/ptrace.h>
+#include <asm/uaccess.h>
 
 struct frame_head {
        struct frame_head * ebp;
 static struct frame_head *
 dump_backtrace(struct frame_head * head)
 {
-       oprofile_add_trace(head->ret);
+       struct frame_head bufhead[2];
 
-       /* frame pointers should strictly progress back up the stack
-        * (towards higher addresses) */
-       if (head >= head->ebp)
+       /* Also check accessibility of one struct frame_head beyond */
+       if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
+               return NULL;
+       if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
                return NULL;
 
-       return head->ebp;
-}
-
-/* check that the page(s) containing the frame head are present */
-static int pages_present(struct frame_head * head)
-{
-       struct mm_struct * mm = current->mm;
+       oprofile_add_trace(bufhead[0].ret);
 
-       /* FIXME: only necessary once per page */
-       if (!check_user_page_readable(mm, (unsigned long)head))
-               return 0;
+       /* frame pointers should strictly progress back up the stack
+        * (towards higher addresses) */
+       if (head >= bufhead[0].ebp)
+               return NULL;
 
-       return check_user_page_readable(mm, (unsigned long)(head + 1));
+       return bufhead[0].ebp;
 }
 
 /*
                return;
        }
 
-#ifdef CONFIG_SMP
-       if (!spin_trylock(¤t->mm->page_table_lock))
-               return;
-#endif
-
-       while (depth-- && head && pages_present(head))
+       while (depth-- && head)
                head = dump_backtrace(head);
-
-#ifdef CONFIG_SMP
-       spin_unlock(¤t->mm->page_table_lock);
-#endif
 }
 
 extern unsigned long vmalloc_to_pfn(void *addr);
 extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
                int write);
-extern int check_user_page_readable(struct mm_struct *mm, unsigned long address);
 int remap_pfn_range(struct vm_area_struct *, unsigned long,
                unsigned long, unsigned long, pgprot_t);
 
 
  * Do a quick page-table lookup for a single page.
  * mm->page_table_lock must be held.
  */
-static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
-                       int read, int write, int accessed)
+struct page *follow_page(struct mm_struct *mm, unsigned long address, int write)
 {
        pgd_t *pgd;
        pud_t *pud;
        if (pte_present(pte)) {
                if (write && !pte_write(pte))
                        goto out;
-               if (read && !pte_read(pte))
-                       goto out;
                pfn = pte_pfn(pte);
                if (pfn_valid(pfn)) {
                        page = pfn_to_page(pfn);
-                       if (accessed) {
-                               if (write && !pte_dirty(pte) &&!PageDirty(page))
-                                       set_page_dirty(page);
-                               mark_page_accessed(page);
-                       }
+                       if (write && !pte_dirty(pte) &&!PageDirty(page))
+                               set_page_dirty(page);
+                       mark_page_accessed(page);
                        return page;
                }
        }
        return NULL;
 }
 
-inline struct page *
-follow_page(struct mm_struct *mm, unsigned long address, int write)
-{
-       return __follow_page(mm, address, 0, write, 1);
-}
-
-/*
- * check_user_page_readable() can be called frm niterrupt context by oprofile,
- * so we need to avoid taking any non-irq-safe locks
- */
-int check_user_page_readable(struct mm_struct *mm, unsigned long address)
-{
-       return __follow_page(mm, address, 1, 0, 0) != NULL;
-}
-EXPORT_SYMBOL(check_user_page_readable);
-
 static inline int
 untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
                         unsigned long address)