return 0;
}
+static vm_fault_t mmap_kcore_fault(struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct kcore_mmap_ops = {
+ .fault = mmap_kcore_fault,
+};
+
+static int mmap_kcore(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+ u64 start, pfn;
+ int nphdr;
+ size_t data_offset;
+ size_t phdrs_len, notes_len;
+ struct kcore_list *m = NULL;
+ int ret = 0;
+
+ down_read(&kclist_lock);
+
+ get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
+
+ start = kc_offset_to_vaddr(((u64)vma->vm_pgoff << PAGE_SHIFT) -
+ ((data_offset >> PAGE_SHIFT) << PAGE_SHIFT));
+
+ list_for_each_entry(m, &kclist_head, list) {
+ if (start >= m->addr && size <= m->size)
+ break;
+ }
+
+ if (&m->list == &kclist_head) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vma->vm_flags & (VM_WRITE | VM_EXEC)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_ops = &kcore_mmap_ops;
+
+ if (kern_addr_valid(start)) {
+ if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
+ pfn = __pa(start) >> PAGE_SHIFT;
+ else if (m->type == KCORE_TEXT)
+ pfn = __pa_symbol(start) >> PAGE_SHIFT;
+ else {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start, pfn, size,
+ vma->vm_page_prot);
+ } else {
+ ret = -EFAULT;
+ }
+
+out:
+ up_read(&kclist_lock);
+ return ret;
+}
+
static const struct proc_ops kcore_proc_ops = {
.proc_read = read_kcore,
.proc_open = open_kcore,
.proc_release = release_kcore,
.proc_lseek = default_llseek,
+ .proc_mmap = mmap_kcore,
};
/* just remember that we have to update kcore */