*
  * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
  *      we update the addresses of corresponding vmas in
- *     event::addr_filters_offs array and bump the event::addr_filters_gen;
+ *     event::addr_filter_ranges array and bump the event::addr_filters_gen;
  * (p2) when an event is scheduled in (pmu::add), it calls
  *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
  *      if the generation has changed since the previous call.
 
        perf_event_free_bpf_prog(event);
        perf_addr_filters_splice(event, NULL);
-       kfree(event->addr_filters_offs);
+       kfree(event->addr_filter_ranges);
 
        if (event->destroy)
                event->destroy(event);
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
                if (filter->path.dentry) {
-                       event->addr_filters_offs[count] = 0;
+                       event->addr_filter_ranges[count].start = 0;
+                       event->addr_filter_ranges[count].size = 0;
                        restart++;
                }
 
        return true;
 }
 
+static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
+                                       struct vm_area_struct *vma,
+                                       struct perf_addr_filter_range *fr)
+{
+       unsigned long vma_size = vma->vm_end - vma->vm_start;
+       unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+       struct file *file = vma->vm_file;
+
+       if (!perf_addr_filter_match(filter, file, off, vma_size))
+               return false;
+
+       if (filter->offset < off) {
+               fr->start = vma->vm_start;
+               fr->size = min(vma_size, filter->size - (off - filter->offset));
+       } else {
+               fr->start = vma->vm_start + filter->offset - off;
+               fr->size = min(vma->vm_end - fr->start, filter->size);
+       }
+
+       return true;
+}
+
 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
 {
        struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
        struct vm_area_struct *vma = data;
-       unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
-       struct file *file = vma->vm_file;
        struct perf_addr_filter *filter;
        unsigned int restart = 0, count = 0;
+       unsigned long flags;
 
        if (!has_addr_filter(event))
                return;
 
-       if (!file)
+       if (!vma->vm_file)
                return;
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               if (perf_addr_filter_match(filter, file, off,
-                                            vma->vm_end - vma->vm_start)) {
-                       event->addr_filters_offs[count] = vma->vm_start;
+               if (perf_addr_filter_vma_adjust(filter, vma,
+                                               &event->addr_filter_ranges[count]))
                        restart++;
-               }
 
                count++;
        }
  * @filter; if so, adjust filter's address range.
  * Called with mm::mmap_sem down for reading.
  */
-static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
-                                           struct mm_struct *mm)
+static void perf_addr_filter_apply(struct perf_addr_filter *filter,
+                                  struct mm_struct *mm,
+                                  struct perf_addr_filter_range *fr)
 {
        struct vm_area_struct *vma;
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               struct file *file = vma->vm_file;
-               unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
-               unsigned long vma_size = vma->vm_end - vma->vm_start;
-
-               if (!file)
+               if (!vma->vm_file)
                        continue;
 
-               if (!perf_addr_filter_match(filter, file, off, vma_size))
-                       continue;
-
-               return vma->vm_start;
+               if (perf_addr_filter_vma_adjust(filter, vma, fr))
+                       return;
        }
-
-       return 0;
 }
 
 /*
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               event->addr_filters_offs[count] = 0;
+               event->addr_filter_ranges[count].start = 0;
+               event->addr_filter_ranges[count].size = 0;
 
                /*
                 * Adjust base offset if the filter is associated to a binary
                 * that needs to be mapped:
                 */
                if (filter->path.dentry)
-                       event->addr_filters_offs[count] =
-                               perf_addr_filter_apply(filter, mm);
+                       perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
 
                count++;
        }
                goto err_pmu;
 
        if (has_addr_filter(event)) {
-               event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
-                                                  sizeof(unsigned long),
-                                                  GFP_KERNEL);
-               if (!event->addr_filters_offs) {
+               event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
+                                                   sizeof(struct perf_addr_filter_range),
+                                                   GFP_KERNEL);
+               if (!event->addr_filter_ranges) {
                        err = -ENOMEM;
                        goto err_per_task;
                }
                        struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
 
                        raw_spin_lock_irq(&ifh->lock);
-                       memcpy(event->addr_filters_offs,
-                              event->parent->addr_filters_offs,
-                              pmu->nr_addr_filters * sizeof(unsigned long));
+                       memcpy(event->addr_filter_ranges,
+                              event->parent->addr_filter_ranges,
+                              pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
                        raw_spin_unlock_irq(&ifh->lock);
                }
 
        return event;
 
 err_addr_filters:
-       kfree(event->addr_filters_offs);
+       kfree(event->addr_filter_ranges);
 
 err_per_task:
        exclusive_event_destroy(event);