* @fm_size: fastmap size in bytes
  * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
  * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
  *
  * @used: RB-tree of used physical eraseblocks
  * @erroneous: RB-tree of erroneous used physical eraseblocks
  * @pq_head: protection queue head
  * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
  *          @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- *          @erroneous, and @erroneous_peb_count fields
+ *          @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
  * @move_mutex: serializes eraseblock moves
  * @work_sem: used to wait for all the scheduled works to finish and prevent
  * new works from being submitted
        void *fm_buf;
        size_t fm_size;
        struct work_struct fm_work;
+       int fm_work_scheduled;
 
        /* Wear-leveling sub-system's stuff */
        struct rb_root used;
 
 {
        struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
        ubi_update_fastmap(ubi);
+       spin_lock(&ubi->wl_lock);
+       ubi->fm_work_scheduled = 0;
+       spin_unlock(&ubi->wl_lock);
 }
 
 /**
                /* We cannot update the fastmap here because this
                 * function is called in atomic context.
                 * Let's fail here and refill/update it as soon as possible. */
-               schedule_work(&ubi->fm_work);
+               if (!ubi->fm_work_scheduled) {
+                       ubi->fm_work_scheduled = 1;
+                       schedule_work(&ubi->fm_work);
+               }
                return NULL;
        } else {
                pnum = pool->pebs[pool->used++];