static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 static struct {
-       spinlock_t lock ____cacheline_aligned_in_smp;
+       raw_spinlock_t lock ____cacheline_aligned_in_smp;
 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
 
-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 {
        return &(kretprobe_table_locks[hash].lock);
 }
        hlist_del(&ri->hlist);
        INIT_HLIST_NODE(&ri->hlist);
        if (likely(rp)) {
-               spin_lock(&rp->lock);
+               raw_spin_lock(&rp->lock);
                hlist_add_head(&ri->hlist, &rp->free_instances);
-               spin_unlock(&rp->lock);
+               raw_spin_unlock(&rp->lock);
        } else
                /* Unregistering */
                hlist_add_head(&ri->hlist, head);
 __acquires(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-       spinlock_t *hlist_lock;
+       raw_spinlock_t *hlist_lock;
 
        *head = &kretprobe_inst_table[hash];
        hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_lock_irqsave(hlist_lock, *flags);
+       raw_spin_lock_irqsave(hlist_lock, *flags);
 }
 
 static void __kprobes kretprobe_table_lock(unsigned long hash,
        unsigned long *flags)
 __acquires(hlist_lock)
 {
-       spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_lock_irqsave(hlist_lock, *flags);
+       raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+       raw_spin_lock_irqsave(hlist_lock, *flags);
 }
 
 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
 __releases(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-       spinlock_t *hlist_lock;
+       raw_spinlock_t *hlist_lock;
 
        hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_unlock_irqrestore(hlist_lock, *flags);
+       raw_spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
 static void __kprobes kretprobe_table_unlock(unsigned long hash,
        unsigned long *flags)
 __releases(hlist_lock)
 {
-       spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_unlock_irqrestore(hlist_lock, *flags);
+       raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+       raw_spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
 /*
 
        /*TODO: consider to only swap the RA after the last pre_handler fired */
        hash = hash_ptr(current, KPROBE_HASH_BITS);
-       spin_lock_irqsave(&rp->lock, flags);
+       raw_spin_lock_irqsave(&rp->lock, flags);
        if (!hlist_empty(&rp->free_instances)) {
                ri = hlist_entry(rp->free_instances.first,
                                struct kretprobe_instance, hlist);
                hlist_del(&ri->hlist);
-               spin_unlock_irqrestore(&rp->lock, flags);
+               raw_spin_unlock_irqrestore(&rp->lock, flags);
 
                ri->rp = rp;
                ri->task = current;
                kretprobe_table_unlock(hash, &flags);
        } else {
                rp->nmissed++;
-               spin_unlock_irqrestore(&rp->lock, flags);
+               raw_spin_unlock_irqrestore(&rp->lock, flags);
        }
        return 0;
 }
                rp->maxactive = num_possible_cpus();
 #endif
        }
-       spin_lock_init(&rp->lock);
+       raw_spin_lock_init(&rp->lock);
        INIT_HLIST_HEAD(&rp->free_instances);
        for (i = 0; i < rp->maxactive; i++) {
                inst = kmalloc(sizeof(struct kretprobe_instance) +
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                INIT_HLIST_HEAD(&kprobe_table[i]);
                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
-               spin_lock_init(&(kretprobe_table_locks[i].lock));
+               raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
        }
 
        /*