}
  EXPORT_SYMBOL_GPL(klp_register_patch);
  
 +/*
 + * Remove parts of patches that touch a given kernel module. The list of
 + * patches processed might be limited. When limit is NULL, all patches
 + * will be handled.
 + */
 +static void klp_cleanup_module_patches_limited(struct module *mod,
 +                                             struct klp_patch *limit)
 +{
 +      struct klp_patch *patch;
 +      struct klp_object *obj;
 +
 +      list_for_each_entry(patch, &klp_patches, list) {
 +              if (patch == limit)
 +                      break;
 +
 +              klp_for_each_object(patch, obj) {
 +                      if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 +                              continue;
 +
 +                      /*
 +                       * Only unpatch the module if the patch is enabled or
 +                       * is in transition.
 +                       */
 +                      if (patch->enabled || patch == klp_transition_patch) {
++
++                              if (patch != klp_transition_patch)
++                                      klp_pre_unpatch_callback(obj);
++
 +                              pr_notice("reverting patch '%s' on unloading module '%s'\n",
 +                                        patch->mod->name, obj->mod->name);
 +                              klp_unpatch_object(obj);
++
++                              klp_post_unpatch_callback(obj);
 +                      }
 +
 +                      klp_free_object_loaded(obj);
 +                      break;
 +              }
 +      }
 +}
 +
  int klp_module_coming(struct module *mod)
  {
        int ret;