TP_ARGS(folio)
        );
 
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+       TP_PROTO(
+               struct address_space *mapping,
+               pgoff_t index,
+               pgoff_t last_index
+       ),
+
+       TP_ARGS(mapping, index, last_index),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, i_ino)
+               __field(dev_t, s_dev)
+               __field(unsigned long, index)
+               __field(unsigned long, last_index)
+       ),
+
+       TP_fast_assign(
+               __entry->i_ino = mapping->host->i_ino;
+               if (mapping->host->i_sb)
+                       __entry->s_dev =
+                               mapping->host->i_sb->s_dev;
+               else
+                       __entry->s_dev = mapping->host->i_rdev;
+               __entry->index = index;
+               __entry->last_index = last_index;
+       ),
+
+       TP_printk(
+               "dev=%d:%d ino=%lx ofs=%lld-%lld",
+               MAJOR(__entry->s_dev),
+               MINOR(__entry->s_dev), __entry->i_ino,
+               ((loff_t)__entry->index) << PAGE_SHIFT,
+               ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+       )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+       TP_PROTO(
+               struct address_space *mapping,
+               pgoff_t index,
+               pgoff_t last_index
+       ),
+       TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+       TP_PROTO(
+               struct address_space *mapping,
+               pgoff_t index,
+               pgoff_t last_index
+       ),
+       TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+       TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+       TP_ARGS(mapping, index),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, i_ino)
+               __field(dev_t, s_dev)
+               __field(unsigned long, index)
+       ),
+
+       TP_fast_assign(
+               __entry->i_ino = mapping->host->i_ino;
+               if (mapping->host->i_sb)
+                       __entry->s_dev =
+                               mapping->host->i_sb->s_dev;
+               else
+                       __entry->s_dev = mapping->host->i_rdev;
+               __entry->index = index;
+       ),
+
+       TP_printk(
+               "dev=%d:%d ino=%lx ofs=%lld",
+               MAJOR(__entry->s_dev),
+               MINOR(__entry->s_dev), __entry->i_ino,
+               ((loff_t)__entry->index) << PAGE_SHIFT
+       )
+);
+
 TRACE_EVENT(filemap_set_wb_err,
                TP_PROTO(struct address_space *mapping, errseq_t eseq),
 
 
                        goto err;
        }
 
+       trace_mm_filemap_get_pages(mapping, index, last_index);
        return 0;
 err:
        if (err < 0)
        if (unlikely(index >= max_idx))
                return VM_FAULT_SIGBUS;
 
+       trace_mm_filemap_fault(mapping, index);
+
        /*
         * Do we have something in the page cache already?
         */
        } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
        add_mm_counter(vma->vm_mm, folio_type, rss);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
+       trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
 out:
        rcu_read_unlock();