* @old_size:  size of the old function
  * @new_size:  size of the new function
  * @kobj_added: @kobj has been added and needs freeing
+ * @nop:        temporary patch to use the original code again; dyn. allocated
  * @patched:   the func has been added to the klp_ops list
  * @transition:        the func is currently being applied or reverted
  *
        struct list_head stack_node;
        unsigned long old_size, new_size;
        bool kobj_added;
+       bool nop;
        bool patched;
        bool transition;
 };
  * @mod:       kernel module associated with the patched object
  *             (NULL for vmlinux)
  * @kobj_added: @kobj has been added and needs freeing
+ * @dynamic:    temporary object for nop functions; dynamically allocated
  * @patched:   the object's funcs have been added to the klp_ops list
  */
 struct klp_object {
        struct list_head node;
        struct module *mod;
        bool kobj_added;
+       bool dynamic;
        bool patched;
 };
 
  * struct klp_patch - patch structure for live patching
  * @mod:       reference to the live patch module
  * @objs:      object entries for kernel objects to be patched
+ * @replace:   replace all actively used patches
  * @list:      list node for global list of actively used patches
  * @kobj:      kobject for sysfs resources
  * @obj_list:  dynamic list of the object entries
        /* external */
        struct module *mod;
        struct klp_object *objs;
+       bool replace;
 
        /* internal */
        struct list_head list;
 #define klp_for_each_object_static(patch, obj) \
        for (obj = patch->objs; obj->funcs || obj->name; obj++)
 
+#define klp_for_each_object_safe(patch, obj, tmp_obj)          \
+       list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
+
 #define klp_for_each_object(patch, obj)        \
        list_for_each_entry(obj, &patch->obj_list, node)
 
             func->old_name || func->new_func || func->old_sympos; \
             func++)
 
+#define klp_for_each_func_safe(obj, func, tmp_func)                    \
+       list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
+
 #define klp_for_each_func(obj, func)   \
        list_for_each_entry(func, &obj->func_list, node)
 
 
        return !!klp_root_kobj;
 }
 
+static struct klp_func *klp_find_func(struct klp_object *obj,
+                                     struct klp_func *old_func)
+{
+       struct klp_func *func;
+
+       klp_for_each_func(obj, func) {
+               if ((strcmp(old_func->old_name, func->old_name) == 0) &&
+                   (old_func->old_sympos == func->old_sympos)) {
+                       return func;
+               }
+       }
+
+       return NULL;
+}
+
+static struct klp_object *klp_find_object(struct klp_patch *patch,
+                                         struct klp_object *old_obj)
+{
+       struct klp_object *obj;
+
+       klp_for_each_object(patch, obj) {
+               if (klp_is_module(old_obj)) {
+                       if (klp_is_module(obj) &&
+                           strcmp(old_obj->name, obj->name) == 0) {
+                               return obj;
+                       }
+               } else if (!klp_is_module(obj)) {
+                       return obj;
+               }
+       }
+
+       return NULL;
+}
+
 struct klp_find_arg {
        const char *objname;
        const char *name;
        NULL
 };
 
+static void klp_free_object_dynamic(struct klp_object *obj)
+{
+       kfree(obj->name);
+       kfree(obj);
+}
+
+static struct klp_object *klp_alloc_object_dynamic(const char *name)
+{
+       struct klp_object *obj;
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return NULL;
+
+       if (name) {
+               obj->name = kstrdup(name, GFP_KERNEL);
+               if (!obj->name) {
+                       kfree(obj);
+                       return NULL;
+               }
+       }
+
+       INIT_LIST_HEAD(&obj->func_list);
+       obj->dynamic = true;
+
+       return obj;
+}
+
+static void klp_free_func_nop(struct klp_func *func)
+{
+       kfree(func->old_name);
+       kfree(func);
+}
+
+static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
+                                          struct klp_object *obj)
+{
+       struct klp_func *func;
+
+       func = kzalloc(sizeof(*func), GFP_KERNEL);
+       if (!func)
+               return NULL;
+
+       if (old_func->old_name) {
+               func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
+               if (!func->old_name) {
+                       kfree(func);
+                       return NULL;
+               }
+       }
+
+       /*
+        * func->new_func is same as func->old_func. These addresses are
+        * set when the object is loaded, see klp_init_object_loaded().
+        */
+       func->old_sympos = old_func->old_sympos;
+       func->nop = true;
+
+       return func;
+}
+
+static int klp_add_object_nops(struct klp_patch *patch,
+                              struct klp_object *old_obj)
+{
+       struct klp_object *obj;
+       struct klp_func *func, *old_func;
+
+       obj = klp_find_object(patch, old_obj);
+
+       if (!obj) {
+               obj = klp_alloc_object_dynamic(old_obj->name);
+               if (!obj)
+                       return -ENOMEM;
+
+               list_add_tail(&obj->node, &patch->obj_list);
+       }
+
+       klp_for_each_func(old_obj, old_func) {
+               func = klp_find_func(obj, old_func);
+               if (func)
+                       continue;
+
+               func = klp_alloc_func_nop(old_func, obj);
+               if (!func)
+                       return -ENOMEM;
+
+               list_add_tail(&func->node, &obj->func_list);
+       }
+
+       return 0;
+}
+
+/*
+ * Add 'nop' functions which simply return to the caller to run
+ * the original function. The 'nop' functions are added to a
+ * patch to facilitate a 'replace' mode.
+ */
+static int klp_add_nops(struct klp_patch *patch)
+{
+       struct klp_patch *old_patch;
+       struct klp_object *old_obj;
+
+       list_for_each_entry(old_patch, &klp_patches, list) {
+               klp_for_each_object(old_patch, old_obj) {
+                       int err;
+
+                       err = klp_add_object_nops(patch, old_obj);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static void klp_kobj_release_patch(struct kobject *kobj)
 {
        struct klp_patch *patch;
 
 static void klp_kobj_release_object(struct kobject *kobj)
 {
+       struct klp_object *obj;
+
+       obj = container_of(kobj, struct klp_object, kobj);
+
+       if (obj->dynamic)
+               klp_free_object_dynamic(obj);
 }
 
 static struct kobj_type klp_ktype_object = {
 
 static void klp_kobj_release_func(struct kobject *kobj)
 {
+       struct klp_func *func;
+
+       func = container_of(kobj, struct klp_func, kobj);
+
+       if (func->nop)
+               klp_free_func_nop(func);
 }
 
 static struct kobj_type klp_ktype_func = {
 
 static void klp_free_funcs(struct klp_object *obj)
 {
-       struct klp_func *func;
+       struct klp_func *func, *tmp_func;
 
-       klp_for_each_func(obj, func) {
+       klp_for_each_func_safe(obj, func, tmp_func) {
                /* Might be called from klp_init_patch() error path. */
-               if (func->kobj_added)
+               if (func->kobj_added) {
                        kobject_put(&func->kobj);
+               } else if (func->nop) {
+                       klp_free_func_nop(func);
+               }
        }
 }
 
 
        obj->mod = NULL;
 
-       klp_for_each_func(obj, func)
+       klp_for_each_func(obj, func) {
                func->old_func = NULL;
+
+               if (func->nop)
+                       func->new_func = NULL;
+       }
 }
 
 static void klp_free_objects(struct klp_patch *patch)
 {
-       struct klp_object *obj;
+       struct klp_object *obj, *tmp_obj;
 
-       klp_for_each_object(patch, obj) {
+       klp_for_each_object_safe(patch, obj, tmp_obj) {
                klp_free_funcs(obj);
 
                /* Might be called from klp_init_patch() error path. */
-               if (obj->kobj_added)
+               if (obj->kobj_added) {
                        kobject_put(&obj->kobj);
+               } else if (obj->dynamic) {
+                       klp_free_object_dynamic(obj);
+               }
        }
 }
 
 {
        int ret;
 
-       if (!func->old_name || !func->new_func)
+       if (!func->old_name)
+               return -EINVAL;
+
+       /*
+        * NOPs get the address later. The patched module must be loaded,
+        * see klp_init_object_loaded().
+        */
+       if (!func->new_func && !func->nop)
                return -EINVAL;
 
        if (strlen(func->old_name) >= KSYM_NAME_LEN)
                        return -ENOENT;
                }
 
+               if (func->nop)
+                       func->new_func = func->old_func;
+
                ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
                                                  &func->new_size, NULL);
                if (!ret) {
                return ret;
        patch->kobj_added = true;
 
+       if (patch->replace) {
+               ret = klp_add_nops(patch);
+               if (ret)
+                       return ret;
+       }
+
        klp_for_each_object(patch, obj) {
                ret = klp_init_object(patch, obj);
                if (ret)
 }
 EXPORT_SYMBOL_GPL(klp_enable_patch);
 
+/*
+ * This function removes replaced patches.
+ *
+ * We could be pretty aggressive here. It is called in the situation where
+ * these structures are no longer accessible. All functions are redirected
+ * by the klp_transition_patch. They use either a new code or they are in
+ * the original code because of the special nop function patches.
+ *
+ * The only exception is when the transition was forced. In this case,
+ * klp_ftrace_handler() might still see the replaced patch on the stack.
+ * Fortunately, it is carefully designed to work with removed functions
+ * thanks to RCU. We only have to keep the patches on the system. Also
+ * this is handled transparently by patch->module_put.
+ */
+void klp_discard_replaced_patches(struct klp_patch *new_patch)
+{
+       struct klp_patch *old_patch, *tmp_patch;
+
+       list_for_each_entry_safe(old_patch, tmp_patch, &klp_patches, list) {
+               if (old_patch == new_patch)
+                       return;
+
+               old_patch->enabled = false;
+               klp_unpatch_objects(old_patch);
+               klp_free_patch_start(old_patch);
+               schedule_work(&old_patch->free_work);
+       }
+}
+
 /*
  * Remove parts of patches that touch a given kernel module. The list of
  * patches processed might be limited. When limit is NULL, all patches