/*
* fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
- * but it accepts kernel address for fi_extents_start
+ * but it uses 'fe_k' instead of fieinfo->fi_extents_start
*/
static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
- u64 logical, u64 phys, u64 len, u32 flags)
+ struct fiemap_extent *fe_k, u64 logical,
+ u64 phys, u64 len, u32 flags)
{
struct fiemap_extent extent;
- struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
/* only count the extents */
if (fieinfo->fi_extents_max == 0) {
extent.fe_length = len;
extent.fe_flags = flags;
- dest += fieinfo->fi_extents_mapped;
- memcpy(dest, &extent, sizeof(extent));
+ memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent));
fieinfo->fi_extents_mapped++;
if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
__u64 vbo, __u64 len)
{
int err = 0;
- struct fiemap_extent __user *fe_u = fieinfo->fi_extents_start;
struct fiemap_extent *fe_k = NULL;
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
err = -ENOMEM;
goto out;
}
- fieinfo->fi_extents_start = fe_k;
end = vbo + len;
alloc_size = le64_to_cpu(attr->nres.alloc_size);
if (vbo + dlen >= end)
flags |= FIEMAP_EXTENT_LAST;
- err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, dlen,
- flags);
+ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo,
+ dlen, flags);
if (err < 0)
break;
if (vbo + bytes >= end)
flags |= FIEMAP_EXTENT_LAST;
- err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, bytes,
+ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes,
flags);
if (err < 0)
break;
/*
* Copy to user memory out of lock
*/
- if (copy_to_user(fe_u, fe_k,
+ if (copy_to_user(fieinfo->fi_extents_start, fe_k,
fieinfo->fi_extents_max *
sizeof(struct fiemap_extent))) {
err = -EFAULT;
}
out:
- /* Restore original pointer. */
- fieinfo->fi_extents_start = fe_u;
kfree(fe_k);
return err;
}