struct netns_frags {
        int                     nqueues;
        struct list_head        lru_list;
+       spinlock_t              lru_lock;
 
        /* The percpu_counter "mem" need to be cacheline aligned.
         *  mem.count must not share cacheline with other writers
        return percpu_counter_sum_positive(&nf->mem);
 }
 
+static inline void inet_frag_lru_move(struct inet_frag_queue *q)
+{
+       spin_lock(&q->net->lru_lock);
+       list_move_tail(&q->lru_list, &q->net->lru_list);
+       spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_del(struct inet_frag_queue *q)
+{
+       spin_lock(&q->net->lru_lock);
+       list_del(&q->lru_list);
+       spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_add(struct netns_frags *nf,
+                                    struct inet_frag_queue *q)
+{
+       spin_lock(&nf->lru_lock);
+       list_add_tail(&q->lru_list, &nf->lru_list);
+       spin_unlock(&nf->lru_lock);
+}
 #endif
 
        nf->nqueues = 0;
        init_frag_mem_limit(nf);
        INIT_LIST_HEAD(&nf->lru_list);
+       spin_lock_init(&nf->lru_lock);
 }
 EXPORT_SYMBOL(inet_frags_init_net);
 
 {
        write_lock(&f->lock);
        hlist_del(&fq->list);
-       list_del(&fq->lru_list);
        fq->net->nqueues--;
        write_unlock(&f->lock);
+       inet_frag_lru_del(fq);
 }
 
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
 
        work = frag_mem_limit(nf) - nf->low_thresh;
        while (work > 0) {
-               read_lock(&f->lock);
+               spin_lock(&nf->lru_lock);
+
                if (list_empty(&nf->lru_list)) {
-                       read_unlock(&f->lock);
+                       spin_unlock(&nf->lru_lock);
                        break;
                }
 
                q = list_first_entry(&nf->lru_list,
                                struct inet_frag_queue, lru_list);
                atomic_inc(&q->refcnt);
-               read_unlock(&f->lock);
+               spin_unlock(&nf->lru_lock);
 
                spin_lock(&q->lock);
                if (!(q->last_in & INET_FRAG_COMPLETE))
 
        atomic_inc(&qp->refcnt);
        hlist_add_head(&qp->list, &f->hash[hash]);
-       list_add_tail(&qp->lru_list, &nf->lru_list);
        nf->nqueues++;
        write_unlock(&f->lock);
+       inet_frag_lru_add(nf, qp);
        return qp;
 }
 
 
            qp->q.meat == qp->q.len)
                return ip_frag_reasm(qp, prev, dev);
 
-       write_lock(&ip4_frags.lock);
-       list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
-       write_unlock(&ip4_frags.lock);
+       inet_frag_lru_move(&qp->q);
        return -EINPROGRESS;
 
 err:
 
                fq->nhoffset = nhoff;
                fq->q.last_in |= INET_FRAG_FIRST_IN;
        }
-       write_lock(&nf_frags.lock);
-       list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
-       write_unlock(&nf_frags.lock);
+
+       inet_frag_lru_move(&fq->q);
        return 0;
 
 discard_fq:
 
            fq->q.meat == fq->q.len)
                return ip6_frag_reasm(fq, prev, dev);
 
-       write_lock(&ip6_frags.lock);
-       list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
-       write_unlock(&ip6_frags.lock);
+       inet_frag_lru_move(&fq->q);
        return -1;
 
 discard_fq: