#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_trace.h"
+#include "xfs_fsops.h"
 
 kmem_zone_t    *xfs_log_ticket_zone;
 
 }
 
 /*
- * Finish the recovery of the file system.  This is separate from
- * the xfs_log_mount() call, because it depends on the code in
- * xfs_mountfs() to read in the root and real-time bitmap inodes
- * between calling xfs_log_mount() and here.
+ * Finish the recovery of the file system.  This is separate from the
+ * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
+ * in the root and real-time bitmap inodes between calling xfs_log_mount() and
+ * here.
  *
- * mp          - ubiquitous xfs mount point structure
+ * If we finish recovery successfully, start the background log work. If we are
+ * not doing recovery, then we have a RO filesystem and we don't need to start
+ * it.
  */
 int
 xfs_log_mount_finish(xfs_mount_t *mp)
 {
-       int     error;
+       int     error = 0;
 
-       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
+       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
                error = xlog_recover_finish(mp->m_log);
-       else {
-               error = 0;
+               if (!error)
+                       xfs_log_work_queue(mp);
+       } else {
                ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
        }
 
+
        return error;
 }
 
 void
 xfs_log_unmount(xfs_mount_t *mp)
 {
-       cancel_delayed_work_sync(&mp->m_sync_work);
+       cancel_delayed_work_sync(&mp->m_log->l_work);
        xfs_trans_ail_destroy(mp);
        xlog_dealloc_log(mp->m_log);
 }
 }      /* xlog_get_iclog_buffer_size */
 
 
+void
+xfs_log_work_queue(
+       struct xfs_mount        *mp)
+{
+       queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work,
+                               msecs_to_jiffies(xfs_syncd_centisecs * 10));
+}
+
+/*
+ * Every sync period we need to unpin all items in the AIL and push them to
+ * disk. If there is nothing dirty, then we might need to cover the log to
+ * indicate that the filesystem is idle.
+ */
+void
+xfs_log_worker(
+       struct work_struct      *work)
+{
+       struct xlog             *log = container_of(to_delayed_work(work),
+                                               struct xlog, l_work);
+       struct xfs_mount        *mp = log->l_mp;
+
+       /* dgc: errors ignored - not fatal and nowhere to report them */
+       if (xfs_log_need_covered(mp))
+               xfs_fs_log_dummy(mp);
+       else
+               xfs_log_force(mp, 0);
+
+       /* start pushing all the metadata that is currently dirty */
+       xfs_ail_push_all(mp->m_ail);
+
+       /* queue us up again */
+       xfs_log_work_queue(mp);
+}
+
 /*
  * This routine initializes some of the log structure for a given mount point.
  * Its primary purpose is to fill in enough, so recovery can occur.  However,
        log->l_logBBsize   = num_bblks;
        log->l_covered_state = XLOG_STATE_COVER_IDLE;
        log->l_flags       |= XLOG_ACTIVE_RECOVERY;
+       INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
 
        log->l_prev_block  = -1;
        /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
        } while (iclog != log->l_iclog);
        return 1;
 }
+
 
                                xfs_lsn_t *commit_lsn, int flags);
 bool   xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
 
+void   xfs_log_work_queue(struct xfs_mount *mp);
+void   xfs_log_worker(struct work_struct *work);
+
 #endif
 #endif /* __XFS_LOG_H__ */
 
        struct xfs_buf          *l_xbuf;        /* extra buffer for log
                                                 * wrapping */
        struct xfs_buftarg      *l_targ;        /* buftarg of log */
+       struct delayed_work     l_work;         /* background flush work */
        uint                    l_flags;
        uint                    l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
        struct list_head        *l_buf_cancel_table;
 
        struct mutex            m_icsb_mutex;   /* balancer sync lock */
 #endif
        struct xfs_mru_cache    *m_filestream;  /* per-mount filestream data */
-       struct delayed_work     m_sync_work;    /* background sync work */
        struct delayed_work     m_reclaim_work; /* background inode reclaim */
        struct work_struct      m_flush_work;   /* background inode flush */
        __int64_t               m_update_flags; /* sb flags we need to update
 
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
-       cancel_delayed_work_sync(&mp->m_sync_work);
        cancel_work_sync(&mp->m_flush_work);
 
        xfs_filestream_unmount(mp);
        if (laptop_mode) {
                /*
                 * The disk must be active because we're syncing.
-                * We schedule xfssyncd now (now that the disk is
+                * We schedule log work now (now that the disk is
                 * active) instead of later (when it might not be).
                 */
-               flush_delayed_work(&mp->m_sync_work);
+               flush_delayed_work(&mp->m_log->l_work);
        }
 
        return 0;
                 * value if it is non-zero, otherwise go with the default.
                 */
                xfs_restore_resvblks(mp);
-               xfs_syncd_queue_sync(mp);
+               xfs_log_work_queue(mp);
        }
 
        /* rw -> ro */
        struct xfs_mount        *mp = XFS_M(sb);
 
        xfs_restore_resvblks(mp);
-       xfs_syncd_queue_sync(mp);
+       xfs_log_work_queue(mp);
        return 0;
 }
 
        mutex_init(&mp->m_growlock);
        atomic_set(&mp->m_active_trans, 0);
        INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
-       INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
        INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
 
        mp->m_super = sb;
                goto out_unmount;
        }
 
-       /*
-        * The filesystem is successfully mounted, so we can start background
-        * sync work now.
-        */
-       xfs_syncd_queue_sync(mp);
-
        return 0;
 
  out_filestream_unmount:
 
 #include "xfs_fs.h"
 #include "xfs_types.h"
 #include "xfs_log.h"
+#include "xfs_log_priv.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
 #include "xfs_trans_priv.h"
        /* flush all pending changes from the AIL */
        xfs_ail_push_all_sync(mp->m_ail);
 
-       /* stop background sync work */
-       cancel_delayed_work_sync(&mp->m_sync_work);
+       /* stop background log work */
+       cancel_delayed_work_sync(&mp->m_log->l_work);
 
        /*
         * Just warn here till VFS can correctly support
        xfs_buf_unlock(mp->m_sb_bp);
 }
 
-void
-xfs_syncd_queue_sync(
-       struct xfs_mount        *mp)
-{
-       queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
-                               msecs_to_jiffies(xfs_syncd_centisecs * 10));
-}
-
-/*
- * Every sync period we need to push dirty metadata and try to cover the log
- * to indicate the filesystem is idle and not frozen.
- */
-void
-xfs_sync_worker(
-       struct work_struct *work)
-{
-       struct xfs_mount *mp = container_of(to_delayed_work(work),
-                                       struct xfs_mount, m_sync_work);
-       int             error;
-
-       /* dgc: errors ignored here */
-       if (mp->m_super->s_writers.frozen == SB_UNFROZEN &&
-           xfs_log_need_covered(mp))
-               error = xfs_fs_log_dummy(mp);
-       else
-               xfs_log_force(mp, 0);
-
-       /* start pushing all the metadata that is currently dirty */
-       xfs_ail_push_all(mp->m_ail);
-
-       /* queue us up again */
-       xfs_syncd_queue_sync(mp);
-}
-
 /*
  * Queue a new inode reclaim pass if there are reclaimable inodes and there
  * isn't a reclaim pass already in progress. By default it runs every 5s based
 
 
 extern struct workqueue_struct *xfs_syncd_wq;  /* sync workqueue */
 
-void xfs_syncd_queue_sync(struct xfs_mount *mp);
-void xfs_sync_worker(struct work_struct *work);
 void xfs_flush_worker(struct work_struct *work);
 void xfs_reclaim_worker(struct work_struct *work);