*/
 
 /* spin lock wrappers. */
-#define sctp_spin_lock(lock)    spin_lock(lock)
-#define sctp_spin_unlock(lock)  spin_unlock(lock)
 #define sctp_write_lock(lock)   write_lock(lock)
 #define sctp_write_unlock(lock) write_unlock(lock)
 #define sctp_read_lock(lock)    read_lock(lock)
        unsigned long flags;
 
        spin_lock_irqsave(&head->lock, flags);
-       sctp_spin_lock(&list->lock);
+       spin_lock(&list->lock);
 
        skb_queue_splice_tail_init(list, head);
 
-       sctp_spin_unlock(&list->lock);
+       spin_unlock(&list->lock);
        spin_unlock_irqrestore(&head->lock, flags);
 }
 
 
                                continue;
                        index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
-                       sctp_spin_lock(&head->lock);
+                       spin_lock(&head->lock);
                        sctp_for_each_hentry(pp, &head->chain)
                                if ((pp->port == rover) &&
                                    net_eq(sock_net(sk), pp->net))
                                        goto next;
                        break;
                next:
-                       sctp_spin_unlock(&head->lock);
+                       spin_unlock(&head->lock);
                } while (--remaining > 0);
 
                /* Exhausted local port range during search? */
                 * port iterator, pp being NULL.
                 */
                head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
-               sctp_spin_lock(&head->lock);
+               spin_lock(&head->lock);
                sctp_for_each_hentry(pp, &head->chain) {
                        if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
                                goto pp_found;
        ret = 0;
 
 fail_unlock:
-       sctp_spin_unlock(&head->lock);
+       spin_unlock(&head->lock);
 
 fail:
        local_bh_enable();
                                                  inet_sk(sk)->inet_num)];
        struct sctp_bind_bucket *pp;
 
-       sctp_spin_lock(&head->lock);
+       spin_lock(&head->lock);
        pp = sctp_sk(sk)->bind_hash;
        __sk_del_bind_node(sk);
        sctp_sk(sk)->bind_hash = NULL;
        inet_sk(sk)->inet_num = 0;
        sctp_bucket_destroy(pp);
-       sctp_spin_unlock(&head->lock);
+       spin_unlock(&head->lock);
 }
 
 void sctp_put_port(struct sock *sk)
        head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
                                                 inet_sk(oldsk)->inet_num)];
        local_bh_disable();
-       sctp_spin_lock(&head->lock);
+       spin_lock(&head->lock);
        pp = sctp_sk(oldsk)->bind_hash;
        sk_add_bind_node(newsk, &pp->owner);
        sctp_sk(newsk)->bind_hash = pp;
        inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
-       sctp_spin_unlock(&head->lock);
+       spin_unlock(&head->lock);
        local_bh_enable();
 
        /* Copy the bind_addr list from the original endpoint to the new