{
        struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
        struct page *page = sio->bvec[0].bv_page;
+       int p;
 
-       if (ret != PAGE_SIZE) {
+       if (ret != PAGE_SIZE * sio->pages) {
                /*
                 * In the case of swap-over-nfs, this can be a
                 * temporary failure if the system has limited
                 * the normal direct-to-bio case as it could
                 * be temporary.
                 */
-               set_page_dirty(page);
-               ClearPageReclaim(page);
                pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
                                   ret, page_file_offset(page));
+               for (p = 0; p < sio->pages; p++) {
+                       page = sio->bvec[p].bv_page;
+                       set_page_dirty(page);
+                       ClearPageReclaim(page);
+               }
        } else
-               count_vm_event(PSWPOUT);
-       end_page_writeback(page);
+               count_vm_events(PSWPOUT, sio->pages);
+
+       for (p = 0; p < sio->pages; p++)
+               end_page_writeback(sio->bvec[p].bv_page);
+
        mempool_free(sio, sio_pool);
 }
 
 static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
 {
-       struct swap_iocb *sio;
+       struct swap_iocb *sio = NULL;
        struct swap_info_struct *sis = page_swap_info(page);
        struct file *swap_file = sis->swap_file;
-       struct address_space *mapping = swap_file->f_mapping;
-       struct iov_iter from;
-       int ret;
+       loff_t pos = page_file_offset(page);
 
        set_page_writeback(page);
        unlock_page(page);
-       sio = mempool_alloc(sio_pool, GFP_NOIO);
-       init_sync_kiocb(&sio->iocb, swap_file);
-       sio->iocb.ki_complete = sio_write_complete;
-       sio->iocb.ki_pos = page_file_offset(page);
-       sio->bvec[0].bv_page = page;
-       sio->bvec[0].bv_len = PAGE_SIZE;
-       sio->bvec[0].bv_offset = 0;
-       iov_iter_bvec(&from, WRITE, &sio->bvec[0], 1, PAGE_SIZE);
-       ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
-       if (ret != -EIOCBQUEUED)
-               sio_write_complete(&sio->iocb, ret);
-       return ret;
+       if (wbc->swap_plug)
+               sio = *wbc->swap_plug;
+       if (sio) {
+               if (sio->iocb.ki_filp != swap_file ||
+                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
+                       swap_write_unplug(sio);
+                       sio = NULL;
+               }
+       }
+       if (!sio) {
+               sio = mempool_alloc(sio_pool, GFP_NOIO);
+               init_sync_kiocb(&sio->iocb, swap_file);
+               sio->iocb.ki_complete = sio_write_complete;
+               sio->iocb.ki_pos = pos;
+               sio->pages = 0;
+       }
+       sio->bvec[sio->pages].bv_page = page;
+       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
+       sio->bvec[sio->pages].bv_offset = 0;
+       sio->pages += 1;
+       if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
+               swap_write_unplug(sio);
+               sio = NULL;
+       }
+       if (wbc->swap_plug)
+               *wbc->swap_plug = sio;
+
+       return 0;
 }
 
 int __swap_writepage(struct page *page, struct writeback_control *wbc,
-               bio_end_io_t end_write_func)
+                    bio_end_io_t end_write_func)
 {
        struct bio *bio;
        int ret;
        return 0;
 }
 
+void swap_write_unplug(struct swap_iocb *sio)
+{
+       struct iov_iter from;
+       struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+       int ret;
+
+       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
+                     PAGE_SIZE * sio->pages);
+       ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+       if (ret != -EIOCBQUEUED)
+               sio_write_complete(&sio->iocb, ret);
+}
+
 static void sio_read_complete(struct kiocb *iocb, long ret)
 {
        struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
 
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-static pageout_t pageout(struct folio *folio, struct address_space *mapping)
+static pageout_t pageout(struct folio *folio, struct address_space *mapping,
+                        struct swap_iocb **plug)
 {
        /*
         * If the folio is dirty, only perform writeback if that write
                        .range_start = 0,
                        .range_end = LLONG_MAX,
                        .for_reclaim = 1,
+                       .swap_plug = plug,
                };
 
                folio_set_reclaim(folio);
        unsigned int nr_reclaimed = 0;
        unsigned int pgactivate = 0;
        bool do_demote_pass;
+       struct swap_iocb *plug = NULL;
 
        memset(stat, 0, sizeof(*stat));
        cond_resched();
                         * starts and then write it out here.
                         */
                        try_to_unmap_flush_dirty();
-                       switch (pageout(folio, mapping)) {
+                       switch (pageout(folio, mapping, &plug)) {
                        case PAGE_KEEP:
                                goto keep_locked;
                        case PAGE_ACTIVATE:
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
 
+       if (plug)
+               swap_write_unplug(plug);
        return nr_reclaimed;
 }