sk->sk_write_space      = unix_write_space;
        sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
        sk->sk_destruct         = unix_sock_destructor;
-       u         = unix_sk(sk);
+       u = unix_sk(sk);
+       u->inflight = 0;
        u->path.dentry = NULL;
        u->path.mnt = NULL;
        spin_lock_init(&u->lock);
-       atomic_long_set(&u->inflight, 0);
        INIT_LIST_HEAD(&u->link);
        mutex_init(&u->iolock); /* single task reading lock */
        mutex_init(&u->bindlock); /* single task binding lock */
 
 
 static void dec_inflight(struct unix_sock *usk)
 {
-       atomic_long_dec(&usk->inflight);
+       usk->inflight--;
 }
 
 static void inc_inflight(struct unix_sock *usk)
 {
-       atomic_long_inc(&usk->inflight);
+       usk->inflight++;
 }
 
 static void inc_inflight_move_tail(struct unix_sock *u)
 {
-       atomic_long_inc(&u->inflight);
+       u->inflight++;
+
        /* If this still might be part of a cycle, move it to the end
         * of the list, so that it's checked even if it was already
         * passed over
         */
        list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
                long total_refs;
-               long inflight_refs;
 
                total_refs = file_count(u->sk.sk_socket->file);
-               inflight_refs = atomic_long_read(&u->inflight);
 
-               BUG_ON(inflight_refs < 1);
-               BUG_ON(total_refs < inflight_refs);
-               if (total_refs == inflight_refs) {
+               BUG_ON(!u->inflight);
+               BUG_ON(total_refs < u->inflight);
+               if (total_refs == u->inflight) {
                        list_move_tail(&u->link, &gc_candidates);
                        __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
                        __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
                /* Move cursor to after the current position. */
                list_move(&cursor, &u->link);
 
-               if (atomic_long_read(&u->inflight) > 0) {
+               if (u->inflight) {
                        list_move_tail(&u->link, ¬_cycle_list);
                        __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
                        scan_children(&u->sk, inc_inflight_move_tail, NULL);
 
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
-               if (atomic_long_inc_return(&u->inflight) == 1) {
+               if (!u->inflight) {
                        BUG_ON(!list_empty(&u->link));
                        list_add_tail(&u->link, &gc_inflight_list);
                } else {
                        BUG_ON(list_empty(&u->link));
                }
+               u->inflight++;
                /* Paired with READ_ONCE() in wait_for_unix_gc() */
                WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
        }
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
-               BUG_ON(!atomic_long_read(&u->inflight));
+               BUG_ON(!u->inflight);
                BUG_ON(list_empty(&u->link));
 
-               if (atomic_long_dec_and_test(&u->inflight))
+               u->inflight--;
+               if (!u->inflight)
                        list_del_init(&u->link);
                /* Paired with READ_ONCE() in wait_for_unix_gc() */
                WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);