return false;
        } else {
                set_bit(WB_has_dirty_io, &wb->state);
+               WARN_ON_ONCE(!wb->avg_write_bandwidth);
                atomic_long_add(wb->avg_write_bandwidth,
                                &wb->bdi->tot_write_bandwidth);
                return true;
        if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
            list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
                clear_bit(WB_has_dirty_io, &wb->state);
-               atomic_long_sub(wb->avg_write_bandwidth,
-                               &wb->bdi->tot_write_bandwidth);
+               WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
+                                       &wb->bdi->tot_write_bandwidth) < 0);
        }
 }
 
 
        unsigned long dirtied_stamp;
        unsigned long written_stamp;    /* pages written at bw_time_stamp */
        unsigned long write_bandwidth;  /* the estimated write bandwidth */
-       unsigned long avg_write_bandwidth; /* further smoothed write bw */
+       unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
 
        /*
         * The base dirty throttle rate, re-calculated on every 200ms.
        unsigned int min_ratio;
        unsigned int max_ratio, max_prop_frac;
 
-       atomic_long_t tot_write_bandwidth; /* sum of active avg_write_bw */
+       /*
+        * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
+        * any dirty wbs, which is depended upon by bdi_has_dirty().
+        */
+       atomic_long_t tot_write_bandwidth;
 
        struct bdi_writeback wb;  /* the root writeback info for this bdi */
        struct bdi_writeback_congested wb_congested; /* its congested state */
 
                        enum wb_reason reason);
 void bdi_start_background_writeback(struct backing_dev_info *bdi);
 void wb_workfn(struct work_struct *work);
-bool bdi_has_dirty_io(struct backing_dev_info *bdi);
 void wb_wakeup_delayed(struct bdi_writeback *wb);
 
 extern spinlock_t bdi_lock;
        return test_bit(WB_has_dirty_io, &wb->state);
 }
 
+static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
+{
+       /*
+        * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
+        * any dirty wbs.  See wb_update_write_bandwidth().
+        */
+       return atomic_long_read(&bdi->tot_write_bandwidth);
+}
+
 static inline void __add_wb_stat(struct bdi_writeback *wb,
                                 enum wb_stat_item item, s64 amount)
 {
 
 }
 subsys_initcall(default_bdi_init);
 
-bool bdi_has_dirty_io(struct backing_dev_info *bdi)
-{
-       return wb_has_dirty_io(&bdi->wb);
-}
-
 /*
  * This function is used when the first inode for this wb is marked dirty. It
  * wakes-up the corresponding bdi thread which should then take care of the
 
                avg += (old - avg) >> 3;
 
 out:
-       if (wb_has_dirty_io(wb))
-               atomic_long_add(avg - wb->avg_write_bandwidth,
-                               &wb->bdi->tot_write_bandwidth);
+       /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
+       avg = max(avg, 1LU);
+       if (wb_has_dirty_io(wb)) {
+               long delta = avg - wb->avg_write_bandwidth;
+               WARN_ON_ONCE(atomic_long_add_return(delta,
+                                       &wb->bdi->tot_write_bandwidth) <= 0);
+       }
        wb->write_bandwidth = bw;
        wb->avg_write_bandwidth = avg;
 }