int vmf_ret = 0;
        void *entry;
 
+       trace_dax_pte_fault(inode, vmf, vmf_ret);
        /*
         * Check whether offset isn't beyond end of file now. Caller is supposed
         * to hold locks serializing us with truncate / punch hole so this is
         * a reliable test.
         */
-       if (pos >= i_size_read(inode))
-               return VM_FAULT_SIGBUS;
+       if (pos >= i_size_read(inode)) {
+               vmf_ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
 
        if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
                flags |= IOMAP_WRITE;
         * that we never have to deal with more than a single extent here.
         */
        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
-       if (error)
-               return dax_fault_return(error);
+       if (error) {
+               vmf_ret = dax_fault_return(error);
+               goto out;
+       }
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
                vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
                goto finish_iomap;
                 */
                ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
        }
+out:
+       trace_dax_pte_fault_done(inode, vmf, vmf_ret);
        return vmf_ret;
 }
 
 
 DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
 DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
 
+DECLARE_EVENT_CLASS(dax_pte_fault_class,
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
+       TP_ARGS(inode, vmf, result),
+       TP_STRUCT__entry(
+               __field(unsigned long, ino)
+               __field(unsigned long, vm_flags)
+               __field(unsigned long, address)
+               __field(pgoff_t, pgoff)
+               __field(dev_t, dev)
+               __field(unsigned int, flags)
+               __field(int, result)
+       ),
+       TP_fast_assign(
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->ino = inode->i_ino;
+               __entry->vm_flags = vmf->vma->vm_flags;
+               __entry->address = vmf->address;
+               __entry->flags = vmf->flags;
+               __entry->pgoff = vmf->pgoff;
+               __entry->result = result;
+       ),
+       TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
+               MAJOR(__entry->dev),
+               MINOR(__entry->dev),
+               __entry->ino,
+               __entry->vm_flags & VM_SHARED ? "shared" : "private",
+               __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+               __entry->address,
+               __entry->pgoff,
+               __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
+       )
+)
+
+#define DEFINE_PTE_FAULT_EVENT(name) \
+DEFINE_EVENT(dax_pte_fault_class, name, \
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
+       TP_ARGS(inode, vmf, result))
+
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
+
 #endif /* _TRACE_FS_DAX_H */
 
 /* This part must be outside protection */