},
        {
                .hmac_id = SCTP_AUTH_HMAC_ID_SHA1,
-               .hmac_name="hmac(sha1)",
+               .hmac_name = "hmac(sha1)",
                .hmac_len = SCTP_SHA1_SIG_SIZE,
        },
        {
 #if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
        {
                .hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
-               .hmac_name="hmac(sha256)",
+               .hmac_name = "hmac(sha256)",
                .hmac_len = SCTP_SHA256_SIG_SIZE,
        }
 #endif
                 * lead-zero padded.  If it is not, it
                 * is automatically larger numerically.
                 */
-               for (i = 0; i < abs(diff); i++ ) {
+               for (i = 0; i < abs(diff); i++) {
                        if (longer[i] != 0)
                                return diff;
                }
 
                SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
        /* Create chunks for all the full sized DATA chunks. */
-       for (i=0, len=first_len; i < whole; i++) {
+       for (i = 0, len = first_len; i < whole; i++) {
                frag = SCTP_DATA_MIDDLE_FRAG;
 
                if (0 == i)
                        goto errout;
                }
 
-               err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
+               err = sctp_user_addto_chunk(chunk, offset, over, msgh->msg_iov);
 
                /* Put the chunk->skb back into the form expected by send.  */
                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
 
        struct sctp_af *af;
        struct net *net = dev_net(skb->dev);
 
-       if (skb->pkt_type!=PACKET_HOST)
+       if (skb->pkt_type != PACKET_HOST)
                goto discard_it;
 
        SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
                if (ch_end > skb_tail_pointer(skb))
                        break;
 
-               switch(ch->type) {
+               switch (ch->type) {
                    case SCTP_CID_AUTH:
                            have_auth = chunk_num;
                            break;
 
 }
 
 /* Initialize a sockaddr_storage from in incoming skb. */
-static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
+static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb,
                             int is_saddr)
 {
        __be16 *port;
 
                                       struct sctp_transport *transport,
                                       int count_of_newacks)
 {
-       if (count_of_newacks >=2 && transport != primary)
+       if (count_of_newacks >= 2 && transport != primary)
                return 1;
        return 0;
 }
        struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
-       switch(reason) {
+       switch (reason) {
        case SCTP_RTXR_T3_RTX:
                SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
         *
         * --xguo
         */
-       while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
+       while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
                struct sctp_transport *t = list_entry(ltransport,
                                                      struct sctp_transport,
                                                      send_ready);
                 * destinations for which cacc_saw_newack is set.
                 */
                if (transport->cacc.cacc_saw_newack)
-                       count_of_newacks ++;
+                       count_of_newacks++;
        }
 
        /* Move the Cumulative TSN Ack Point if appropriate.  */
 
         * added as the primary transport.  The source address seems to
         * be a a better choice than any of the embedded addresses.
         */
-       if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
+       if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
                goto nomem;
 
        if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
 
        while (asconf_ack_len > 0) {
                if (asconf_ack_param->crr_id == asconf_param->crr_id) {
-                       switch(asconf_ack_param->param_hdr.type) {
+                       switch (asconf_ack_param->param_hdr.type) {
                        case SCTP_PARAM_SUCCESS_REPORT:
                                return SCTP_ERROR_NO_ERROR;
                        case SCTP_PARAM_ERR_CAUSE:
 
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
        struct net *net = sock_net(asoc->base.sk);
-       
+
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 {
        struct sctp_ulpevent *event;
 
-       event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
+       event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
                                                (__u16)error, 0, 0, NULL,
                                                GFP_ATOMIC);
 
 
                return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
-       error = sctp_eat_data(asoc, chunk, commands );
+       error = sctp_eat_data(asoc, chunk, commands);
        switch (error) {
        case SCTP_IERROR_NO_ERROR:
                break;
                return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
-       error = sctp_eat_data(asoc, chunk, commands );
+       error = sctp_eat_data(asoc, chunk, commands);
        switch (error) {
        case SCTP_IERROR_NO_ERROR:
        case SCTP_IERROR_HIGH_TSN:
                 */
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
-               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
                sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                /* We are going to ABORT, so we might as well stop
                 * processing the rest of the chunks in the packet.
                 */
-               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
                sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                     void *arg,
                                     sctp_cmd_seq_t *commands)
 {
-       static const char err_str[]="The following chunk had invalid length:";
+       static const char err_str[] = "The following chunk had invalid length:";
 
        return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
                                     void *arg,
                                     sctp_cmd_seq_t *commands)
 {
-       static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
+       static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
 
        return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
                                     void *arg,
                                     sctp_cmd_seq_t *commands)
 {
-       static const char err_str[]="The following chunk violates protocol:";
+       static const char err_str[] = "The following chunk violates protocol:";
 
        if (!asoc)
                return sctp_sf_violation(net, ep, asoc, type, arg, commands);
                /* Special case the INIT-ACK as there is no peer's vtag
                 * yet.
                 */
-               switch(chunk->chunk_hdr->type) {
+               switch (chunk->chunk_hdr->type) {
                case SCTP_CID_INIT_ACK:
                {
                        sctp_initack_chunk_t *initack;
                /* Special case the INIT and stale COOKIE_ECHO as there is no
                 * vtag yet.
                 */
-               switch(chunk->chunk_hdr->type) {
+               switch (chunk->chunk_hdr->type) {
                case SCTP_CID_INIT:
                {
                        sctp_init_chunk_t *init;
                /* We are going to ABORT, so we might as well stop
                 * processing the rest of the chunks in the packet.
                 */
-               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
                sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 
        struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
-       struct sctp_association *new_asoc=NULL, *asoc=NULL;
+       struct sctp_association *new_asoc = NULL, *asoc = NULL;
        struct sctp_transport *transport, *chunk_tp;
        struct sctp_chunk *chunk;
        union sctp_addr to;
        int hb_change, pmtud_change, sackdelay_change;
 
        if (optlen != sizeof(struct sctp_paddrparams))
-               return - EINVAL;
+               return -EINVAL;
 
        if (copy_from_user(¶ms, optval, optlen))
                return -EFAULT;
        /* If an address other than INADDR_ANY is specified, and
         * no transport is found, then the request is invalid.
         */
-       if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) {
+       if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
                trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
                                               params.spp_assoc_id);
                if (!trans)
                else
                        params.sack_freq = 0;
        } else
-               return - EINVAL;
+               return -EINVAL;
 
        /* Validate value parameter. */
        if (params.sack_delay > 500)
        if (optlen < sizeof(struct sctp_hmacalgo))
                return -EINVAL;
 
-       hmacs= memdup_user(optval, optlen);
+       hmacs = memdup_user(optval, optlen);
        if (IS_ERR(hmacs))
                return PTR_ERR(hmacs);
 
        if (optlen <= sizeof(struct sctp_authkey))
                return -EINVAL;
 
-       authkey= memdup_user(optval, optlen);
+       authkey = memdup_user(optval, optlen);
        if (IS_ERR(authkey))
                return PTR_ERR(authkey);
 
        /* If an address other than INADDR_ANY is specified, and
         * no transport is found, then the request is invalid.
         */
-       if (!sctp_is_any(sk, ( union sctp_addr *)¶ms.spp_address)) {
+       if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
                trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
                                               params.spp_assoc_id);
                if (!trans) {
                if (copy_from_user(¶ms, optval, len))
                        return -EFAULT;
        } else
-               return - EINVAL;
+               return -EINVAL;
 
        /* Get association, if sack_assoc_id != 0 and the socket is a one
         * to many style socket, and an association was not found, then
        if (!asoc)
                return -EINVAL;
 
-       to = optval + offsetof(struct sctp_getaddrs,addrs);
-       space_left = len - offsetof(struct sctp_getaddrs,addrs);
+       to = optval + offsetof(struct sctp_getaddrs, addrs);
+       space_left = len - offsetof(struct sctp_getaddrs, addrs);
 
        list_for_each_entry(from, &asoc->peer.transport_addr_list,
                                transports) {
                memcpy(to, &temp, addrlen);
 
                to += addrlen;
-               cnt ++;
+               cnt++;
                space_left -= addrlen;
                *bytes_copied += addrlen;
        }
                bp = &asoc->base.bind_addr;
        }
 
-       to = optval + offsetof(struct sctp_getaddrs,addrs);
-       space_left = len - offsetof(struct sctp_getaddrs,addrs);
+       to = optval + offsetof(struct sctp_getaddrs, addrs);
+       space_left = len - offsetof(struct sctp_getaddrs, addrs);
 
        addrs = kmalloc(space_left, GFP_KERNEL);
        if (!addrs)
                memcpy(buf, &temp, addrlen);
                buf += addrlen;
                bytes_copied += addrlen;
-               cnt ++;
+               cnt++;
                space_left -= addrlen;
        }
 
                assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
 
                list_for_each(pos, &asoc->peer.transport_addr_list) {
-                       cnt ++;
+                       cnt++;
                }
 
                assocparams.sasoc_number_peer_destinations = cnt;
 
        event = sctp_ulpq_reasm(ulpq, event);
 
        /* Do ordering if needed.  */
-       if ((event) && (event->msg_flags & MSG_EOR)){
+       if ((event) && (event->msg_flags & MSG_EOR)) {
                /* Create a temporary list to collect chunks on.  */
                skb_queue_head_init(&temp);
                __skb_queue_tail(&temp, sctp_event2skb(event));
 
        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
                /* Do ordering if needed.  */
-               if ((event) && (event->msg_flags & MSG_EOR)){
+               if ((event) && (event->msg_flags & MSG_EOR)) {
                        skb_queue_head_init(&temp);
                        __skb_queue_tail(&temp, sctp_event2skb(event));