* protects this. */
        struct list_head users;
 
+       /* Information to supply to users. */
+       unsigned char ipmi_version_major;
+       unsigned char ipmi_version_minor;
+
        /* Used for wake ups at startup. */
        wait_queue_head_t waitq;
 
        char *my_dev_name;
        char *sysfs_name;
 
-       /* This is the lower-layer's sender routine. */
+       /* This is the lower-layer's sender routine.  Note that you
+        * must either be holding the ipmi_interfaces_mutex or be in
+        * an umpreemptible region to use this.  You must fetch the
+        * value into a local variable and make sure it is not NULL. */
        struct ipmi_smi_handlers *handlers;
        void                     *send_info;
 
        spinlock_t       events_lock; /* For dealing with event stuff. */
        struct list_head waiting_events;
        unsigned int     waiting_events_count; /* How many events in queue? */
+       int              delivering_events;
 
        /* The event receiver for my BMC, only really used at panic
           shutdown as a place to store this. */
 /* List of watchers that want to know when smi's are added and
    deleted. */
 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
-static DECLARE_RWSEM(smi_watchers_sem);
+static DEFINE_MUTEX(smi_watchers_mutex);
 
 
 static void free_recv_msg_list(struct list_head *q)
 }
 
 struct watcher_entry {
+       int              intf_num;
+       ipmi_smi_t       intf;
        struct list_head link;
-       int intf_num;
 };
 
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
        struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
        struct watcher_entry *e, *e2;
 
+       mutex_lock(&smi_watchers_mutex);
+
        mutex_lock(&ipmi_interfaces_mutex);
 
+       /* Build a list of things to deliver. */
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                if (intf->intf_num == -1)
                        continue;
                e = kmalloc(sizeof(*e), GFP_KERNEL);
                if (!e)
                        goto out_err;
+               kref_get(&intf->refcount);
+               e->intf = intf;
                e->intf_num = intf->intf_num;
                list_add_tail(&e->link, &to_deliver);
        }
 
-       down_write(&smi_watchers_sem);
-       list_add(&(watcher->link), &smi_watchers);
-       up_write(&smi_watchers_sem);
+       /* We will succeed, so add it to the list. */
+       list_add(&watcher->link, &smi_watchers);
 
        mutex_unlock(&ipmi_interfaces_mutex);
 
        list_for_each_entry_safe(e, e2, &to_deliver, link) {
                list_del(&e->link);
-               watcher->new_smi(e->intf_num, intf->si_dev);
+               watcher->new_smi(e->intf_num, e->intf->si_dev);
+               kref_put(&e->intf->refcount, intf_free);
                kfree(e);
        }
 
+       mutex_unlock(&smi_watchers_mutex);
 
        return 0;
 
  out_err:
+       mutex_unlock(&ipmi_interfaces_mutex);
+       mutex_unlock(&smi_watchers_mutex);
        list_for_each_entry_safe(e, e2, &to_deliver, link) {
                list_del(&e->link);
+               kref_put(&e->intf->refcount, intf_free);
                kfree(e);
        }
        return -ENOMEM;
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
-       down_write(&smi_watchers_sem);
+       mutex_lock(&smi_watchers_mutex);
        list_del(&(watcher->link));
-       up_write(&smi_watchers_sem);
+       mutex_unlock(&smi_watchers_mutex);
        return 0;
 }
 
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
 static void
 call_smi_watchers(int i, struct device *dev)
 {
        struct ipmi_smi_watcher *w;
 
-       down_read(&smi_watchers_sem);
        list_for_each_entry(w, &smi_watchers, link) {
                if (try_module_get(w->owner)) {
                        w->new_smi(i, dev);
                        module_put(w->owner);
                }
        }
-       up_read(&smi_watchers_sem);
 }
 
 static int
        }
 }
 
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+       msg->msg_data[0] = err;
+       msg->msg.netfn |= 1; /* Convert to a response. */
+       msg->msg.data_len = 1;
+       msg->msg.data = msg->msg_data;
+       deliver_response(msg);
+}
+
 /* Find the next sequence number not being used and add the given
    message with the given timeout to the sequence table.  This must be
    called with the interface's seq_lock held. */
        }
        spin_unlock_irqrestore(&(intf->seq_lock), flags);
 
-       if (msg) {
-               msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-               msg->msg_data[0] = err;
-               msg->msg.netfn |= 1; /* Convert to a response. */
-               msg->msg.data_len = 1;
-               msg->msg.data = msg->msg_data;
-               deliver_response(msg);
-       }
+       if (msg)
+               deliver_err_response(msg, err);
 
        return rv;
 }
        if (!new_user)
                return -ENOMEM;
 
-       rcu_read_lock();
+       mutex_lock(&ipmi_interfaces_mutex);
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                if (intf->intf_num == if_num)
                        goto found;
        }
-       rcu_read_unlock();
+       /* Not found, return an error */
        rv = -EINVAL;
        goto out_kfree;
 
  found:
        /* Note that each existing user holds a refcount to the interface. */
        kref_get(&intf->refcount);
-       rcu_read_unlock();
 
        kref_init(&new_user->refcount);
        new_user->handler = handler;
                }
        }
 
+       /* Hold the lock so intf->handlers is guaranteed to be good
+        * until now */
+       mutex_unlock(&ipmi_interfaces_mutex);
+
        new_user->valid = 1;
        spin_lock_irqsave(&intf->seq_lock, flags);
        list_add_rcu(&new_user->link, &intf->users);
 out_kref:
        kref_put(&intf->refcount, intf_free);
 out_kfree:
+       mutex_unlock(&ipmi_interfaces_mutex);
        kfree(new_user);
        return rv;
 }
                    && (intf->seq_table[i].recv_msg->user == user))
                {
                        intf->seq_table[i].inuse = 0;
+                       ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
                }
        }
        spin_unlock_irqrestore(&intf->seq_lock, flags);
                kfree(rcvr);
        }
 
-       module_put(intf->handlers->owner);
-       if (intf->handlers->dec_usecount)
-               intf->handlers->dec_usecount(intf->send_info);
+       mutex_lock(&ipmi_interfaces_mutex);
+       if (intf->handlers) {
+               module_put(intf->handlers->owner);
+               if (intf->handlers->dec_usecount)
+                       intf->handlers->dec_usecount(intf->send_info);
+       }
+       mutex_unlock(&ipmi_interfaces_mutex);
 
        kref_put(&intf->refcount, intf_free);
 
                      unsigned char *major,
                      unsigned char *minor)
 {
-       *major = ipmi_version_major(&user->intf->bmc->id);
-       *minor = ipmi_version_minor(&user->intf->bmc->id);
+       *major = user->intf->ipmi_version_major;
+       *minor = user->intf->ipmi_version_minor;
 }
 
 int ipmi_set_my_address(ipmi_user_t   user,
        spin_lock_irqsave(&intf->events_lock, flags);
        user->gets_events = val;
 
-       if (val) {
-               /* Deliver any queued events. */
+       if (intf->delivering_events)
+               /*
+                * Another thread is delivering events for this, so
+                * let it handle any new events.
+                */
+               goto out;
+
+       /* Deliver any queued events. */
+       while (user->gets_events && !list_empty(&intf->waiting_events)) {
                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
                        list_move_tail(&msg->link, &msgs);
                intf->waiting_events_count = 0;
-       }
 
-       /* Hold the events lock while doing this to preserve order. */
-       list_for_each_entry_safe(msg, msg2, &msgs, link) {
-               msg->user = user;
-               kref_get(&user->refcount);
-               deliver_response(msg);
+               intf->delivering_events = 1;
+               spin_unlock_irqrestore(&intf->events_lock, flags);
+
+               list_for_each_entry_safe(msg, msg2, &msgs, link) {
+                       msg->user = user;
+                       kref_get(&user->refcount);
+                       deliver_response(msg);
+               }
+
+               spin_lock_irqsave(&intf->events_lock, flags);
+               intf->delivering_events = 0;
        }
 
+ out:
        spin_unlock_irqrestore(&intf->events_lock, flags);
 
        return 0;
 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
 {
        ipmi_smi_t intf = user->intf;
-       intf->handlers->set_run_to_completion(intf->send_info, val);
+       if (intf->handlers)
+               intf->handlers->set_run_to_completion(intf->send_info, val);
 }
 
 static unsigned char
                          int                  retries,
                          unsigned int         retry_time_ms)
 {
-       int                  rv = 0;
-       struct ipmi_smi_msg  *smi_msg;
-       struct ipmi_recv_msg *recv_msg;
-       unsigned long        flags;
+       int                      rv = 0;
+       struct ipmi_smi_msg      *smi_msg;
+       struct ipmi_recv_msg     *recv_msg;
+       unsigned long            flags;
+       struct ipmi_smi_handlers *handlers;
 
 
        if (supplied_recv) {
                }
        }
 
+       rcu_read_lock();
+       handlers = intf->handlers;
+       if (!handlers) {
+               rv = -ENODEV;
+               goto out_err;
+       }
+
        recv_msg->user = user;
        if (user)
                kref_get(&user->refcount);
                printk("\n");
        }
 #endif
-       intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+       handlers->sender(intf->send_info, smi_msg, priority);
+       rcu_read_unlock();
 
        return 0;
 
  out_err:
+       rcu_read_unlock();
        ipmi_free_smi_msg(smi_msg);
        ipmi_free_recv_msg(recv_msg);
        return rv;
        int              rv;
        ipmi_smi_t       intf;
        ipmi_smi_t       tintf;
-       int              version_major;
-       int              version_minor;
        struct list_head *link;
 
-       version_major = ipmi_version_major(device_id);
-       version_minor = ipmi_version_minor(device_id);
-
        /* Make sure the driver is actually initialized, this handles
           problems with initialization order. */
        if (!initialized) {
        if (!intf)
                return -ENOMEM;
        memset(intf, 0, sizeof(*intf));
+
+       intf->ipmi_version_major = ipmi_version_major(device_id);
+       intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
        intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
        if (!intf->bmc) {
                kfree(intf);
        spin_lock_init(&intf->counter_lock);
        intf->proc_dir = NULL;
 
+       mutex_lock(&smi_watchers_mutex);
        mutex_lock(&ipmi_interfaces_mutex);
        /* Look for a hole in the numbers. */
        i = 0;
 
        get_guid(intf);
 
-       if ((version_major > 1)
-           || ((version_major == 1) && (version_minor >= 5)))
+       if ((intf->ipmi_version_major > 1)
+           || ((intf->ipmi_version_major == 1)
+               && (intf->ipmi_version_minor >= 5)))
        {
                /* Start scanning the channels to see what is
                   available. */
        if (rv) {
                if (intf->proc_dir)
                        remove_proc_entries(intf);
+               intf->handlers = NULL;
                list_del_rcu(&intf->link);
                mutex_unlock(&ipmi_interfaces_mutex);
+               mutex_unlock(&smi_watchers_mutex);
                synchronize_rcu();
                kref_put(&intf->refcount, intf_free);
        } else {
                intf->intf_num = i;
                mutex_unlock(&ipmi_interfaces_mutex);
                call_smi_watchers(i, intf->si_dev);
+               mutex_unlock(&smi_watchers_mutex);
        }
 
        return rv;
 }
 
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+       int              i;
+       struct seq_table *ent;
+
+       /* No need for locks, the interface is down. */
+       for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+               ent = &(intf->seq_table[i]);
+               if (!ent->inuse)
+                       continue;
+               deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+       }
+}
+
 int ipmi_unregister_smi(ipmi_smi_t intf)
 {
        struct ipmi_smi_watcher *w;
+       int    intf_num = intf->intf_num;
 
        ipmi_bmc_unregister(intf);
 
+       mutex_lock(&smi_watchers_mutex);
        mutex_lock(&ipmi_interfaces_mutex);
+       intf->intf_num = -1;
+       intf->handlers = NULL;
        list_del_rcu(&intf->link);
        mutex_unlock(&ipmi_interfaces_mutex);
        synchronize_rcu();
 
+       cleanup_smi_msgs(intf);
+
        remove_proc_entries(intf);
 
        /* Call all the watcher interfaces to tell them that
           an interface is gone. */
-       down_read(&smi_watchers_sem);
        list_for_each_entry(w, &smi_watchers, link)
-               w->smi_gone(intf->intf_num);
-       up_read(&smi_watchers_sem);
+               w->smi_gone(intf_num);
+       mutex_unlock(&smi_watchers_mutex);
 
        kref_put(&intf->refcount, intf_free);
        return 0;
        struct ipmi_ipmb_addr    *ipmb_addr;
        struct ipmi_recv_msg     *recv_msg;
        unsigned long            flags;
+       struct ipmi_smi_handlers *handlers;
 
        if (msg->rsp_size < 10) {
                /* Message not big enough, just ignore it. */
                printk("\n");
        }
 #endif
-               intf->handlers->sender(intf->send_info, msg, 0);
-
-               rv = -1; /* We used the message, so return the value that
-                           causes it to not be freed or queued. */
+               rcu_read_lock();
+               handlers = intf->handlers;
+               if (handlers) {
+                       handlers->sender(intf->send_info, msg, 0);
+                       /* We used the message, so return the value
+                          that causes it to not be freed or
+                          queued. */
+                       rv = -1;
+               }
+               rcu_read_unlock();
        } else {
                /* Deliver the message to the user. */
                spin_lock_irqsave(&intf->counter_lock, flags);
        rcu_read_unlock();
 }
 
-static void
-handle_msg_timeout(struct ipmi_recv_msg *msg)
-{
-       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-       msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
-       msg->msg.netfn |= 1; /* Convert to a response. */
-       msg->msg.data_len = 1;
-       msg->msg.data = msg->msg_data;
-       deliver_response(msg);
-}
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
                              struct list_head *timeouts, long timeout_period,
                              int slot, unsigned long *flags)
 {
-       struct ipmi_recv_msg *msg;
+       struct ipmi_recv_msg     *msg;
+       struct ipmi_smi_handlers *handlers;
+
+       if (intf->intf_num == -1)
+               return;
 
        if (!ent->inuse)
                return;
                        return;
 
                spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
                /* Send the new message.  We send with a zero
                 * priority.  It timed out, I doubt time is
                 * that critical now, and high priority
                 * messages are really only for messages to the
                 * local MC, which don't get resent. */
-               intf->handlers->sender(intf->send_info,
-                                      smi_msg, 0);
+               handlers = intf->handlers;
+               if (handlers)
+                       intf->handlers->sender(intf->send_info,
+                                              smi_msg, 0);
+               else
+                       ipmi_free_smi_msg(smi_msg);
+
                spin_lock_irqsave(&intf->seq_lock, *flags);
        }
 }
                spin_unlock_irqrestore(&intf->seq_lock, flags);
 
                list_for_each_entry_safe(msg, msg2, &timeouts, link)
-                       handle_msg_timeout(msg);
+                       deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
        }
        rcu_read_unlock();
 }
 
 static void ipmi_request_event(void)
 {
-       ipmi_smi_t intf;
+       ipmi_smi_t               intf;
+       struct ipmi_smi_handlers *handlers;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(intf, &ipmi_interfaces, link)
-               intf->handlers->request_events(intf->send_info);
+       /* Called from the timer, no need to check if handlers is
+        * valid. */
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               handlers = intf->handlers;
+               if (handlers)
+                       handlers->request_events(intf->send_info);
+       }
        rcu_read_unlock();
 }
 
 
        /* For every registered interface, send the event. */
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
-               if (intf->intf_num == -1)
-                       /* Interface was not ready yet. */
+               if (!intf->handlers)
+                       /* Interface is not ready. */
                        continue;
 
                /* Send the event announcing the panic. */
 
        /* For every registered interface, set it to run to completion. */
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
-               if (intf->intf_num == -1)
-                       /* Interface was not ready yet. */
+               if (!intf->handlers)
+                       /* Interface is not ready. */
                        continue;
 
                intf->handlers->set_run_to_completion(intf->send_info, 1);
 
 static int nowayout = WATCHDOG_NOWAYOUT;
 
 static ipmi_user_t watchdog_user = NULL;
+static int watchdog_ifnum;
 
 /* Default the timeout to 10 seconds. */
 static int timeout = 10;
 static char pretimeout_since_last_heartbeat = 0;
 static char expect_close;
 
+static int ifnum_to_use = -1;
+
 static DECLARE_RWSEM(register_sem);
 
 /* Parameters to ipmi_set_timeout */
 #define IPMI_SET_TIMEOUT_FORCE_HB              2
 
 static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
 
 /* If true, the driver will start running as soon as it is configured
    and ready. */
        return strlen(buffer);
 }
 
+
+static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+{
+       int rv = param_set_int(val, kp);
+       if (rv)
+               return rv;
+       if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+               return 0;
+
+       ipmi_unregister_watchdog(watchdog_ifnum);
+       ipmi_register_watchdog(ifnum_to_use);
+       return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
+                 &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+                "timer.  Setting to -1 defaults to the first registered "
+                "interface");
+
 module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
 MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 
 MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
                 "preop_none, preop_panic, preop_give_data.");
 
-module_param(start_now, int, 0);
+module_param(start_now, int, 0444);
 MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
                 "soon as the driver is loaded.");
 
 module_param(nowayout, int, 0644);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+                "(default=CONFIG_WATCHDOG_NOWAYOUT)");
 
 /* Default state of the timer. */
 static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
        if (watchdog_user)
                goto out;
 
+       if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+               goto out;
+
+       watchdog_ifnum = ipmi_intf;
+
        rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
        if (rv < 0) {
                printk(KERN_CRIT PFX "Unable to register with ipmi\n");
        }
 }
 
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+       int rv;
+
+       down_write(®ister_sem);
+
+       if (!watchdog_user)
+               goto out;
+
+       if (watchdog_ifnum != ipmi_intf)
+               goto out;
+
+       /* Make sure no one can call us any more. */
+       misc_deregister(&ipmi_wdog_miscdev);
+
+       /* Wait to make sure the message makes it out.  The lower layer has
+          pointers to our buffers, we want to make sure they are done before
+          we release our memory. */
+       while (atomic_read(&set_timeout_tofree))
+               schedule_timeout_uninterruptible(1);
+
+       /* Disconnect from IPMI. */
+       rv = ipmi_destroy_user(watchdog_user);
+       if (rv) {
+               printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+                      rv);
+       }
+       watchdog_user = NULL;
+
+ out:
+       up_write(®ister_sem);
+}
+
 #ifdef HAVE_NMI_HANDLER
 static int
 ipmi_nmi(void *dev_id, int cpu, int handled)
 
 static void ipmi_smi_gone(int if_num)
 {
-       /* This can never be called, because once the watchdog is
-          registered, the interface can't go away until the watchdog
-          is unregistered. */
+       ipmi_unregister_watchdog(if_num);
 }
 
 static struct ipmi_smi_watcher smi_watcher =
 
        check_parms();
 
+       register_reboot_notifier(&wdog_reboot_notifier);
+       atomic_notifier_chain_register(&panic_notifier_list,
+                       &wdog_panic_notifier);
+
        rv = ipmi_smi_watcher_register(&smi_watcher);
        if (rv) {
 #ifdef HAVE_NMI_HANDLER
                if (preaction_val == WDOG_PRETIMEOUT_NMI)
                        release_nmi(&ipmi_nmi_handler);
 #endif
+               atomic_notifier_chain_unregister(&panic_notifier_list,
+                                                &wdog_panic_notifier);
+               unregister_reboot_notifier(&wdog_reboot_notifier);
                printk(KERN_WARNING PFX "can't register smi watcher\n");
                return rv;
        }
 
-       register_reboot_notifier(&wdog_reboot_notifier);
-       atomic_notifier_chain_register(&panic_notifier_list,
-                       &wdog_panic_notifier);
-
        printk(KERN_INFO PFX "driver initialized\n");
 
        return 0;
 }
 
-static __exit void ipmi_unregister_watchdog(void)
+static void __exit ipmi_wdog_exit(void)
 {
-       int rv;
-
-       down_write(®ister_sem);
+       ipmi_smi_watcher_unregister(&smi_watcher);
+       ipmi_unregister_watchdog(watchdog_ifnum);
 
 #ifdef HAVE_NMI_HANDLER
        if (nmi_handler_registered)
 #endif
 
        atomic_notifier_chain_unregister(&panic_notifier_list,
-                       &wdog_panic_notifier);
+                                        &wdog_panic_notifier);
        unregister_reboot_notifier(&wdog_reboot_notifier);
-
-       if (! watchdog_user)
-               goto out;
-
-       /* Make sure no one can call us any more. */
-       misc_deregister(&ipmi_wdog_miscdev);
-
-       /* Wait to make sure the message makes it out.  The lower layer has
-          pointers to our buffers, we want to make sure they are done before
-          we release our memory. */
-       while (atomic_read(&set_timeout_tofree))
-               schedule_timeout_uninterruptible(1);
-
-       /* Disconnect from IPMI. */
-       rv = ipmi_destroy_user(watchdog_user);
-       if (rv) {
-               printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
-                      rv);
-       }
-       watchdog_user = NULL;
-
- out:
-       up_write(®ister_sem);
-}
-
-static void __exit ipmi_wdog_exit(void)
-{
-       ipmi_smi_watcher_unregister(&smi_watcher);
-       ipmi_unregister_watchdog();
 }
 module_exit(ipmi_wdog_exit);
 module_init(ipmi_wdog_init);