return le32_to_cpu(dlen);
  }
  
 -      write_compress_length(page_address(cur_page) + offset_in_page(*cur_out),
+ /*
+  * Will do:
+  *
+  * - Write a segment header into the destination
+  * - Copy the compressed buffer into the destination
+  * - Make sure we have enough space in the last sector to fit a segment header
+  *   If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
+  *
+  * Will allocate new pages when needed.
+  */
+ static int copy_compressed_data_to_page(char *compressed_data,
+                                       size_t compressed_size,
+                                       struct page **out_pages,
+                                       u32 *cur_out,
+                                       const u32 sectorsize)
+ {
+       u32 sector_bytes_left;
+       u32 orig_out;
+       struct page *cur_page;
++      char *kaddr;
+ 
+       /*
+        * We never allow a segment header crossing sector boundary, previous
+        * run should ensure we have enough space left inside the sector.
+        */
+       ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
+ 
+       cur_page = out_pages[*cur_out / PAGE_SIZE];
+       /* Allocate a new page */
+       if (!cur_page) {
+               cur_page = alloc_page(GFP_NOFS);
+               if (!cur_page)
+                       return -ENOMEM;
+               out_pages[*cur_out / PAGE_SIZE] = cur_page;
+       }
+ 
 -              memcpy(page_address(cur_page) + offset_in_page(*cur_out),
++      kaddr = kmap(cur_page);
++      write_compress_length(kaddr + offset_in_page(*cur_out),
+                             compressed_size);
+       *cur_out += LZO_LEN;
+ 
+       orig_out = *cur_out;
+ 
+       /* Copy compressed data */
+       while (*cur_out - orig_out < compressed_size) {
+               u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
+                                    orig_out + compressed_size - *cur_out);
+ 
++              kunmap(cur_page);
+               cur_page = out_pages[*cur_out / PAGE_SIZE];
+               /* Allocate a new page */
+               if (!cur_page) {
+                       cur_page = alloc_page(GFP_NOFS);
+                       if (!cur_page)
+                               return -ENOMEM;
+                       out_pages[*cur_out / PAGE_SIZE] = cur_page;
+               }
++              kaddr = kmap(cur_page);
+ 
 -              return 0;
++              memcpy(kaddr + offset_in_page(*cur_out),
+                      compressed_data + *cur_out - orig_out, copy_len);
+ 
+               *cur_out += copy_len;
+       }
+ 
+       /*
+        * Check if we can fit the next segment header into the remaining space
+        * of the sector.
+        */
+       sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
+       if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
 -      memset(page_address(cur_page) + offset_in_page(*cur_out), 0,
++              goto out;
+ 
+       /* The remaining size is not enough, pad it with zeros */
++      memset(kaddr + offset_in_page(*cur_out), 0,
+              sector_bytes_left);
+       *cur_out += sector_bytes_left;
++
++out:
++      kunmap(cur_page);
+       return 0;
+ }
+ 
  int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                u64 start, struct page **pages, unsigned long *out_pages,
                unsigned long *total_in, unsigned long *total_out)
  {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
+       const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
+       struct page *page_in = NULL;
++      char *sizes_ptr;
        int ret = 0;
-       char *data_in;
-       char *cpage_out, *sizes_ptr;
-       int nr_pages = 0;
-       struct page *in_page = NULL;
-       struct page *out_page = NULL;
-       unsigned long bytes_left;
-       unsigned long len = *total_out;
-       unsigned long nr_dest_pages = *out_pages;
-       const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
-       size_t in_len;
-       size_t out_len;
-       char *buf;
-       unsigned long tot_in = 0;
-       unsigned long tot_out = 0;
-       unsigned long pg_bytes_left;
-       unsigned long out_offset;
-       unsigned long bytes;
+       /* Points to the file offset of input data */
+       u64 cur_in = start;
+       /* Points to the current output byte */
+       u32 cur_out = 0;
+       u32 len = *total_out;
  
        *out_pages = 0;
        *total_out = 0;
        *total_in = 0;
  
-       in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-       data_in = kmap(in_page);
- 
        /*
-        * store the size of all chunks of compressed data in
-        * the first 4 bytes
+        * Skip the header for now, we will later come back and write the total
+        * compressed size
         */
-       out_page = alloc_page(GFP_NOFS);
-       if (out_page == NULL) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       cpage_out = kmap(out_page);
-       out_offset = LZO_LEN;
-       tot_out = LZO_LEN;
-       pages[0] = out_page;
-       nr_pages = 1;
-       pg_bytes_left = PAGE_SIZE - LZO_LEN;
- 
-       /* compress at most one page of data each time */
-       in_len = min(len, PAGE_SIZE);
-       while (tot_in < len) {
-               ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
-                                      &out_len, workspace->mem);
-               if (ret != LZO_E_OK) {
-                       pr_debug("BTRFS: lzo in loop returned %d\n",
-                              ret);
+       cur_out += LZO_LEN;
+       while (cur_in < start + len) {
++              char *data_in;
+               const u32 sectorsize_mask = sectorsize - 1;
+               u32 sector_off = (cur_in - start) & sectorsize_mask;
+               u32 in_len;
+               size_t out_len;
+ 
+               /* Get the input page first */
+               if (!page_in) {
+                       page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
+                       ASSERT(page_in);
+               }
+ 
+               /* Compress at most one sector of data each time */
+               in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
+               ASSERT(in_len);
 -              ret = lzo1x_1_compress(page_address(page_in) +
++              data_in = kmap(page_in);
++              ret = lzo1x_1_compress(data_in +
+                                      offset_in_page(cur_in), in_len,
+                                      workspace->cbuf, &out_len,
+                                      workspace->mem);
++              kunmap(page_in);
+               if (ret < 0) {
+                       pr_debug("BTRFS: lzo in loop returned %d\n", ret);
                        ret = -EIO;
                        goto out;
                }
                        goto out;
                }
  
-               /* we're all done */
-               if (tot_in >= len)
-                       break;
- 
-               if (tot_out > max_out)
-                       break;
- 
-               bytes_left = len - tot_in;
-               kunmap(in_page);
-               put_page(in_page);
- 
-               start += PAGE_SIZE;
-               in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-               data_in = kmap(in_page);
-               in_len = min(bytes_left, PAGE_SIZE);
-       }
- 
-       if (tot_out >= tot_in) {
-               ret = -E2BIG;
-               goto out;
+               /* Check if we have reached page boundary */
+               if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
+                       put_page(page_in);
+                       page_in = NULL;
+               }
        }
  
-       /* store the size of all chunks of compressed data */
+       /* Store the size of all chunks of compressed data */
 -      write_compress_length(page_address(pages[0]), cur_out);
 +      sizes_ptr = kmap_local_page(pages[0]);
-       write_compress_length(sizes_ptr, tot_out);
++      write_compress_length(sizes_ptr, cur_out);
 +      kunmap_local(sizes_ptr);
  
        ret = 0;
-       *total_out = tot_out;
-       *total_in = tot_in;
+       *total_out = cur_out;
+       *total_in = cur_in - start;
  out:
-       *out_pages = nr_pages;
-       if (out_page)
-               kunmap(out_page);
- 
-       if (in_page) {
-               kunmap(in_page);
-               put_page(in_page);
-       }
- 
+       *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
        return ret;
  }