}
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-static int fanotify_get_response_from_access(struct fsnotify_group *group,
-                                            struct fanotify_event_info *event)
+static int fanotify_get_response(struct fsnotify_group *group,
+                                struct fanotify_perm_event_info *event)
 {
        int ret;
 
        return false;
 }
 
+struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
+                                                struct path *path)
+{
+       struct fanotify_event_info *event;
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       if (mask & FAN_ALL_PERM_EVENTS) {
+               struct fanotify_perm_event_info *pevent;
+
+               pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
+                                         GFP_KERNEL);
+               if (!pevent)
+                       return NULL;
+               event = &pevent->fae;
+               pevent->response = 0;
+               goto init;
+       }
+#endif
+       event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+       if (!event)
+               return NULL;
+init: __maybe_unused
+       fsnotify_init_event(&event->fse, inode, mask);
+       event->tgid = get_pid(task_tgid(current));
+       if (path) {
+               event->path = *path;
+               path_get(&event->path);
+       } else {
+               event->path.mnt = NULL;
+               event->path.dentry = NULL;
+       }
+       return event;
+}
+
 static int fanotify_handle_event(struct fsnotify_group *group,
                                 struct inode *inode,
                                 struct fsnotify_mark *inode_mark,
        pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
                 mask);
 
-       event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+       event = fanotify_alloc_event(inode, mask, data);
        if (unlikely(!event))
                return -ENOMEM;
 
        fsn_event = &event->fse;
-       fsnotify_init_event(fsn_event, inode, mask);
-       event->tgid = get_pid(task_tgid(current));
-       if (data_type == FSNOTIFY_EVENT_PATH) {
-               struct path *path = data;
-               event->path = *path;
-               path_get(&event->path);
-       } else {
-               event->path.mnt = NULL;
-               event->path.dentry = NULL;
-       }
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       event->response = 0;
-#endif
-
        ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
        if (ret) {
                /* Permission events shouldn't be merged */
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        if (mask & FAN_ALL_PERM_EVENTS) {
-               ret = fanotify_get_response_from_access(group, event);
+               ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
                fsnotify_destroy_event(group, fsn_event);
        }
 #endif
        event = FANOTIFY_E(fsn_event);
        path_put(&event->path);
        put_pid(event->tgid);
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
+               kmem_cache_free(fanotify_perm_event_cachep,
+                               FANOTIFY_PE(fsn_event));
+               return;
+       }
+#endif
        kmem_cache_free(fanotify_event_cachep, event);
 }
 
 
 #include <linux/slab.h>
 
 extern struct kmem_cache *fanotify_event_cachep;
+extern struct kmem_cache *fanotify_perm_event_cachep;
 
 /*
- * Lifetime of the structure differs for normal and permission events. In both
- * cases the structure is allocated in fanotify_handle_event(). For normal
- * events the structure is freed immediately after reporting it to userspace.
- * For permission events we free it only after we receive response from
- * userspace.
+ * Structure for normal fanotify events. It gets allocated in
+ * fanotify_handle_event() and freed when the information is retrieved by
+ * userspace
  */
 struct fanotify_event_info {
        struct fsnotify_event fse;
         */
        struct path path;
        struct pid *tgid;
+};
+
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       u32 response;   /* userspace answer to question */
-#endif
+/*
+ * Structure for permission fanotify events. It gets allocated and freed in
+ * fanotify_handle_event() since we wait there for user response. When the
+ * information is retrieved by userspace the structure is moved from
+ * group->notification_list to group->fanotify_data.access_list to wait for
+ * user response.
+ */
+struct fanotify_perm_event_info {
+       struct fanotify_event_info fae;
+       int response;   /* userspace answer to question */
+       int fd;         /* fd we passed to userspace for this event */
 };
 
+static inline struct fanotify_perm_event_info *
+FANOTIFY_PE(struct fsnotify_event *fse)
+{
+       return container_of(fse, struct fanotify_perm_event_info, fae.fse);
+}
+#endif
+
 static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
 {
        return container_of(fse, struct fanotify_event_info, fse);
 }
+
+struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
+                                                struct path *path);
 
 extern const struct fsnotify_ops fanotify_fsnotify_ops;
 
 static struct kmem_cache *fanotify_mark_cache __read_mostly;
-static struct kmem_cache *fanotify_response_event_cache __read_mostly;
 struct kmem_cache *fanotify_event_cachep __read_mostly;
-
-struct fanotify_response_event {
-       struct list_head list;
-       __s32 fd;
-       struct fanotify_event_info *event;
-};
+struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
 
 /*
  * Get an fsnotify notification event if one exists and is small
 }
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
-                                                 __s32 fd)
+static struct fanotify_perm_event_info *dequeue_event(
+                               struct fsnotify_group *group, int fd)
 {
-       struct fanotify_response_event *re, *return_re = NULL;
+       struct fanotify_perm_event_info *event, *return_e = NULL;
 
        mutex_lock(&group->fanotify_data.access_mutex);
-       list_for_each_entry(re, &group->fanotify_data.access_list, list) {
-               if (re->fd != fd)
+       list_for_each_entry(event, &group->fanotify_data.access_list,
+                           fae.fse.list) {
+               if (event->fd != fd)
                        continue;
 
-               list_del_init(&re->list);
-               return_re = re;
+               list_del_init(&event->fae.fse.list);
+               return_e = event;
                break;
        }
        mutex_unlock(&group->fanotify_data.access_mutex);
 
-       pr_debug("%s: found return_re=%p\n", __func__, return_re);
+       pr_debug("%s: found return_re=%p\n", __func__, return_e);
 
-       return return_re;
+       return return_e;
 }
 
 static int process_access_response(struct fsnotify_group *group,
                                   struct fanotify_response *response_struct)
 {
-       struct fanotify_response_event *re;
-       __s32 fd = response_struct->fd;
-       __u32 response = response_struct->response;
+       struct fanotify_perm_event_info *event;
+       int fd = response_struct->fd;
+       int response = response_struct->response;
 
        pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
                 fd, response);
        if (fd < 0)
                return -EINVAL;
 
-       re = dequeue_re(group, fd);
-       if (!re)
+       event = dequeue_event(group, fd);
+       if (!event)
                return -ENOENT;
 
-       re->event->response = response;
-
+       event->response = response;
        wake_up(&group->fanotify_data.access_waitq);
 
-       kmem_cache_free(fanotify_response_event_cache, re);
-
-       return 0;
-}
-
-static int prepare_for_access_response(struct fsnotify_group *group,
-                                      struct fsnotify_event *event,
-                                      __s32 fd)
-{
-       struct fanotify_response_event *re;
-
-       if (!(event->mask & FAN_ALL_PERM_EVENTS))
-               return 0;
-
-       re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
-       if (!re)
-               return -ENOMEM;
-
-       re->event = FANOTIFY_E(event);
-       re->fd = fd;
-
-       mutex_lock(&group->fanotify_data.access_mutex);
-       list_add_tail(&re->list, &group->fanotify_data.access_list);
-       mutex_unlock(&group->fanotify_data.access_mutex);
-
-       return 0;
-}
-
-#else
-static int prepare_for_access_response(struct fsnotify_group *group,
-                                      struct fsnotify_event *event,
-                                      __s32 fd)
-{
        return 0;
 }
-
 #endif
 
 static ssize_t copy_event_to_user(struct fsnotify_group *group,
                         fanotify_event_metadata.event_len))
                goto out_close_fd;
 
-       ret = prepare_for_access_response(group, event, fd);
-       if (ret)
-               goto out_close_fd;
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       if (event->mask & FAN_ALL_PERM_EVENTS) {
+               struct fanotify_perm_event_info *pevent;
+
+               pevent = FANOTIFY_PE(event);
+               pevent->fd = fd;
+               mutex_lock(&group->fanotify_data.access_mutex);
+               list_add_tail(&pevent->fae.fse.list,
+                             &group->fanotify_data.access_list);
+               mutex_unlock(&group->fanotify_data.access_mutex);
+       }
+#endif
 
        if (fd != FAN_NOFD)
                fd_install(fd, f);
 out:
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        if (event->mask & FAN_ALL_PERM_EVENTS) {
-               FANOTIFY_E(event)->response = FAN_DENY;
+               FANOTIFY_PE(event)->response = FAN_DENY;
                wake_up(&group->fanotify_data.access_waitq);
        }
 #endif
                                break;
                        ret = copy_event_to_user(group, kevent, buf);
                        /*
-                        * Permission events get destroyed after we
-                        * receive response
+                        * Permission events get queued to wait for response.
+                        * Other events can be destroyed now.
                         */
                        if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
                                fsnotify_destroy_event(group, kevent);
        struct fsnotify_group *group = file->private_data;
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       struct fanotify_response_event *re, *lre;
+       struct fanotify_perm_event_info *event, *next;
 
        mutex_lock(&group->fanotify_data.access_mutex);
 
        atomic_inc(&group->fanotify_data.bypass_perm);
 
-       list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
-               pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
-                        re, re->event);
-
-               list_del_init(&re->list);
-               re->event->response = FAN_ALLOW;
+       list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
+                                fae.fse.list) {
+               pr_debug("%s: found group=%p event=%p\n", __func__, group,
+                        event);
 
-               kmem_cache_free(fanotify_response_event_cache, re);
+               list_del_init(&event->fae.fse.list);
+               event->response = FAN_ALLOW;
        }
        mutex_unlock(&group->fanotify_data.access_mutex);
 
        group->fanotify_data.user = user;
        atomic_inc(&user->fanotify_listeners);
 
-       oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+       oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
        if (unlikely(!oevent)) {
                fd = -ENOMEM;
                goto out_destroy_group;
        }
        group->overflow_event = &oevent->fse;
-       fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
-       oevent->tgid = get_pid(task_tgid(current));
-       oevent->path.mnt = NULL;
-       oevent->path.dentry = NULL;
 
        group->fanotify_data.f_flags = event_f_flags;
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       oevent->response = 0;
        mutex_init(&group->fanotify_data.access_mutex);
        init_waitqueue_head(&group->fanotify_data.access_waitq);
        INIT_LIST_HEAD(&group->fanotify_data.access_list);
 static int __init fanotify_user_setup(void)
 {
        fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
-       fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
-                                                  SLAB_PANIC);
        fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
+                                               SLAB_PANIC);
+#endif
 
        return 0;
 }