ipc_rmid(&msg_ids(ns), &s->q_perm);
 }
 
+static void msg_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct msg_queue *msq = ipc_rcu_to_struct(p);
+
+       security_msg_queue_free(msq);
+       ipc_rcu_free(head);
+}
+
 /**
  * newque - Create a new msg queue
  * @ns: namespace
        msq->q_perm.security = NULL;
        retval = security_msg_queue_alloc(msq);
        if (retval) {
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                return retval;
        }
 
        /* ipc_addid() locks msq upon success. */
        id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
        if (id < 0) {
-               security_msg_queue_free(msq);
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, msg_rcu_free);
                return id;
        }
 
                free_msg(msg);
        }
        atomic_sub(msq->q_cbytes, &ns->msg_bytes);
-       security_msg_queue_free(msq);
-       ipc_rcu_putref(msq);
+       ipc_rcu_putref(msq, msg_rcu_free);
 }
 
 /*
                rcu_read_lock();
                ipc_lock_object(&msq->q_perm);
 
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                if (msq->q_perm.deleted) {
                        err = -EIDRM;
                        goto out_unlock0;
 
        }
 }
 
+static void sem_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct sem_array *sma = ipc_rcu_to_struct(p);
+
+       security_sem_free(sma);
+       ipc_rcu_free(head);
+}
+
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
 static inline void sem_lock_and_putref(struct sem_array *sma)
 {
        sem_lock(sma, NULL, -1);
-       ipc_rcu_putref(sma);
-}
-
-static inline void sem_putref(struct sem_array *sma)
-{
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, ipc_rcu_free);
 }
 
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
        sma->sem_perm.security = NULL;
        retval = security_sem_alloc(sma);
        if (retval) {
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return retval;
        }
 
        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
        if (id < 0) {
-               security_sem_free(sma);
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, sem_rcu_free);
                return id;
        }
        ns->used_sems += nsems;
 
        wake_up_sem_queue_do(&tasks);
        ns->used_sems -= sma->sem_nsems;
-       security_sem_free(sma);
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, sem_rcu_free);
 }
 
 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
                        rcu_read_unlock();
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
 
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
-                       sem_putref(sma);
+                       ipc_rcu_putref(sma, ipc_rcu_free);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                err = -ERANGE;
                                goto out_free;
                        }
        /* step 2: allocate new undo structure */
        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               sem_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return ERR_PTR(-ENOMEM);
        }
 
 
        ipc_lock_object(&ipcp->shm_perm);
 }
 
+static void shm_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct shmid_kernel *shp = ipc_rcu_to_struct(p);
+
+       security_shm_free(shp);
+       ipc_rcu_free(head);
+}
+
 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 {
        ipc_rmid(&shm_ids(ns), &s->shm_perm);
                user_shm_unlock(file_inode(shp->shm_file)->i_size,
                                                shp->mlock_user);
        fput (shp->shm_file);
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
 }
 
 /*
        shp->shm_perm.security = NULL;
        error = security_shm_alloc(shp);
        if (error) {
-               ipc_rcu_putref(shp);
+               ipc_rcu_putref(shp, ipc_rcu_free);
                return error;
        }
 
                user_shm_unlock(size, shp->mlock_user);
        fput(file);
 no_file:
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
        return error;
 }
 
 
                kfree(ptr);
 }
 
-struct ipc_rcu {
-       struct rcu_head rcu;
-       atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
 /**
  *     ipc_rcu_alloc   -       allocate ipc and rcu space 
  *     @size: size desired
        return atomic_inc_not_zero(&p->refcount);
 }
 
-/**
- * ipc_schedule_free - free ipc + rcu space
- * @head: RCU callback structure for queued work
- */
-static void ipc_schedule_free(struct rcu_head *head)
-{
-       vfree(container_of(head, struct ipc_rcu, rcu));
-}
-
-void ipc_rcu_putref(void *ptr)
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
 {
        struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
 
        if (!atomic_dec_and_test(&p->refcount))
                return;
 
-       if (is_vmalloc_addr(ptr)) {
-               call_rcu(&p->rcu, ipc_schedule_free);
-       } else {
-               kfree_rcu(p, rcu);
-       }
+       call_rcu(&p->rcu, func);
+}
+
+void ipc_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+
+       if (is_vmalloc_addr(p))
+               vfree(p);
+       else
+               kfree(p);
 }
 
 /**
 
 static inline void shm_exit_ns(struct ipc_namespace *ns) { }
 #endif
 
+struct ipc_rcu {
+       struct rcu_head rcu;
+       atomic_t refcount;
+} ____cacheline_aligned_in_smp;
+
+#define ipc_rcu_to_struct(p)  ((void *)(p+1))
+
 /*
  * Structure that holds the parameters needed by the ipc operations
  * (see after)
  */
 void* ipc_rcu_alloc(int size);
 int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr);
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
+void ipc_rcu_free(struct rcu_head *head);
 
 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);