for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
 
+               /*
+                * Synchronize only while the log writer thread is alive.
+                * Leave flushing out after the log writer thread exits to
+                * the cleanup work in nilfs_segctor_destroy().
+                */
+               if (!sci->sc_task)
+                       break;
+
                if (atomic_read(&wait_req.done)) {
                        err = wait_req.err;
                        break;
        return err;
 }
 
-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
 {
        struct nilfs_segctor_wait_request *wrq, *n;
        unsigned long flags;
        spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
        list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
                if (!atomic_read(&wrq->done) &&
-                   nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
+                   (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
                        wrq->err = err;
                        atomic_set(&wrq->done, 1);
                }
        if (mode == SC_LSEG_SR) {
                sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
                sci->sc_seq_done = sci->sc_seq_accepted;
-               nilfs_segctor_wakeup(sci, err);
+               nilfs_segctor_wakeup(sci, err, false);
                sci->sc_flush_request = 0;
        } else {
                if (mode == SC_FLUSH_FILE)
                || sci->sc_seq_request != sci->sc_seq_done);
        spin_unlock(&sci->sc_state_lock);
 
+       /*
+        * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
+        * be called from delayed iput() via nilfs_evict_inode() and can race
+        * with the above log writer thread termination.
+        */
+       nilfs_segctor_wakeup(sci, 0, true);
+
        if (flush_work(&sci->sc_iput_work))
                flag = true;