#define RECLAIM_WB_MIXED       0x0010u
 #define RECLAIM_WB_SYNC                0x0004u /* Unused, all reclaim async */
 #define RECLAIM_WB_ASYNC       0x0008u
+#define RECLAIM_WB_LRU         (RECLAIM_WB_ANON|RECLAIM_WB_FILE)
 
 #define show_reclaim_flags(flags)                              \
        (flags) ? __print_flags(flags, "|",                     \
                show_reclaim_flags(__entry->reclaim_flags))
 );
 
+TRACE_EVENT(mm_vmscan_inactive_list_is_low,
+
+       TP_PROTO(int nid, int reclaim_idx,
+               unsigned long total_inactive, unsigned long inactive,
+               unsigned long total_active, unsigned long active,
+               unsigned long ratio, int file),
+
+       TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file),
+
+       TP_STRUCT__entry(
+               __field(int, nid)
+               __field(int, reclaim_idx)
+               __field(unsigned long, total_inactive)
+               __field(unsigned long, inactive)
+               __field(unsigned long, total_active)
+               __field(unsigned long, active)
+               __field(unsigned long, ratio)
+               __field(int, reclaim_flags)
+       ),
+
+       TP_fast_assign(
+               __entry->nid = nid;
+               __entry->reclaim_idx = reclaim_idx;
+               __entry->total_inactive = total_inactive;
+               __entry->inactive = inactive;
+               __entry->total_active = total_active;
+               __entry->active = active;
+               __entry->ratio = ratio;
+               __entry->reclaim_flags = trace_shrink_flags(file) & RECLAIM_WB_LRU;
+       ),
+
+       TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
+               __entry->nid,
+               __entry->reclaim_idx,
+               __entry->total_inactive, __entry->inactive,
+               __entry->total_active, __entry->active,
+               __entry->ratio,
+               show_reclaim_flags(__entry->reclaim_flags))
+);
 #endif /* _TRACE_VMSCAN_H */
 
 /* This part must be outside protection */
 
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-                                               struct scan_control *sc)
+                                               struct scan_control *sc, bool trace)
 {
        unsigned long inactive_ratio;
-       unsigned long inactive;
-       unsigned long active;
+       unsigned long total_inactive, inactive;
+       unsigned long total_active, active;
        unsigned long gb;
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
        int zid;
        if (!file && !total_swap_pages)
                return false;
 
-       inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
-       active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
+       total_inactive = inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
+       total_active = active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
 
        /*
         * For zone-constrained allocations, it is necessary to check if
        else
                inactive_ratio = 1;
 
+       if (trace)
+               trace_mm_vmscan_inactive_list_is_low(pgdat->node_id,
+                               sc->reclaim_idx,
+                               total_inactive, inactive,
+                               total_active, active, inactive_ratio, file);
        return inactive * inactive_ratio < active;
 }
 
                                 struct lruvec *lruvec, struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc))
+               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
         * lruvec even if it has plenty of old anonymous pages unless the
         * system is under heavy pressure.
         */
-       if (!inactive_list_is_low(lruvec, true, sc) &&
+       if (!inactive_list_is_low(lruvec, true, sc, false) &&
            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false, sc))
+       if (inactive_list_is_low(lruvec, false, sc, true))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 }
        do {
                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-               if (inactive_list_is_low(lruvec, false, sc))
+               if (inactive_list_is_low(lruvec, false, sc, true))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);