#include <linux/netfs.h>
 #include "internal.h"
 
+static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
+                              loff_t i_size, bool caching);
+
+#ifdef CONFIG_AFS_FSCACHE
 /*
- * mark a page as having been made dirty and thus needing writeback
+ * Mark a page as having been made dirty and thus needing writeback.  We also
+ * need to pin the cache object to write back to.
  */
 int afs_set_page_dirty(struct page *page)
 {
-       _enter("");
-       return __set_page_dirty_nobuffers(page);
+       return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host)));
+}
+static void afs_folio_start_fscache(bool caching, struct folio *folio)
+{
+       if (caching)
+               folio_start_fscache(folio);
+}
+#else
+static void afs_folio_start_fscache(bool caching, struct folio *folio)
+{
 }
+#endif
 
 /*
  * prepare to perform part of a write to a page
        unsigned long priv;
        unsigned int f, from = offset_in_folio(folio, pos);
        unsigned int t, to = from + copied;
-       loff_t i_size, maybe_i_size;
+       loff_t i_size, write_end_pos;
 
        _enter("{%llx:%llu},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
        if (copied == 0)
                goto out;
 
-       maybe_i_size = pos + copied;
+       write_end_pos = pos + copied;
 
        i_size = i_size_read(&vnode->vfs_inode);
-       if (maybe_i_size > i_size) {
+       if (write_end_pos > i_size) {
                write_seqlock(&vnode->cb_lock);
                i_size = i_size_read(&vnode->vfs_inode);
-               if (maybe_i_size > i_size)
-                       afs_set_i_size(vnode, maybe_i_size);
+               if (write_end_pos > i_size)
+                       afs_set_i_size(vnode, write_end_pos);
                write_sequnlock(&vnode->cb_lock);
+               fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
        }
 
        if (folio_test_private(folio)) {
                                 loff_t start,
                                 loff_t max_len,
                                 bool new_content,
+                                bool caching,
                                 unsigned int *_len)
 {
        struct pagevec pvec;
                                folio_put(folio);
                                break;
                        }
-                       if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
+                       if (!folio_test_dirty(folio) ||
+                           folio_test_writeback(folio) ||
+                           folio_test_fscache(folio)) {
                                folio_unlock(folio);
                                folio_put(folio);
                                break;
                                BUG();
                        if (folio_start_writeback(folio))
                                BUG();
+                       afs_folio_start_fscache(caching, folio);
 
                        *_count -= folio_nr_pages(folio);
                        folio_unlock(folio);
        unsigned int offset, to, len, max_len;
        loff_t i_size = i_size_read(&vnode->vfs_inode);
        bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+       bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
        long count = wbc->nr_to_write;
        int ret;
 
 
        if (folio_start_writeback(folio))
                BUG();
+       afs_folio_start_fscache(caching, folio);
 
        count -= folio_nr_pages(folio);
 
                if (len < max_len &&
                    (to == folio_size(folio) || new_content))
                        afs_extend_writeback(mapping, vnode, &count,
-                                            start, max_len, new_content, &len);
+                                            start, max_len, new_content,
+                                            caching, &len);
                len = min_t(loff_t, len, max_len);
        }
 
        if (start < i_size) {
                _debug("write back %x @%llx [%llx]", len, start, i_size);
 
+               /* Speculatively write to the cache.  We have to fix this up
+                * later if the store fails.
+                */
+               afs_write_to_cache(vnode, start, len, i_size, caching);
+
                iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
                ret = afs_store_data(vnode, &iter, start, false);
        } else {
                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 
                /* The dirty region was entirely beyond the EOF. */
+               fscache_clear_page_bits(afs_vnode_cache(vnode),
+                                       mapping, start, len, caching);
                afs_pages_written_back(vnode, start, len);
                ret = 0;
        }
 
        _enter("{%lx},", folio_index(folio));
 
+#ifdef CONFIG_AFS_FSCACHE
+       folio_wait_fscache(folio);
+#endif
+
        start = folio_index(folio) * PAGE_SIZE;
        ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
                                               folio, start, LLONG_MAX - start);
                        continue;
                }
 
-               if (folio_test_writeback(folio)) {
+               if (folio_test_writeback(folio) ||
+                   folio_test_fscache(folio)) {
                        folio_unlock(folio);
-                       if (wbc->sync_mode != WB_SYNC_NONE)
+                       if (wbc->sync_mode != WB_SYNC_NONE) {
                                folio_wait_writeback(folio);
+#ifdef CONFIG_AFS_FSCACHE
+                               folio_wait_fscache(folio);
+#endif
+                       }
                        folio_put(folio);
                        continue;
                }
        folio_wait_fscache(folio);
        return ret;
 }
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
+                                   bool was_async)
+{
+       struct afs_vnode *vnode = priv;
+
+       if (IS_ERR_VALUE(transferred_or_error) &&
+           transferred_or_error != -ENOBUFS)
+               afs_invalidate_cache(vnode, 0);
+}
+
+/*
+ * Save the write to the cache also.
+ */
+static void afs_write_to_cache(struct afs_vnode *vnode,
+                              loff_t start, size_t len, loff_t i_size,
+                              bool caching)
+{
+       fscache_write_to_cache(afs_vnode_cache(vnode),
+                              vnode->vfs_inode.i_mapping, start, len, i_size,
+                              afs_write_to_cache_done, vnode, caching);
+}