unix_update_graph(unix_edge_successor(edge));
 }
 
-static bool gc_in_progress;
-
 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
 {
        struct unix_vertex *vertex = edge->predecessor->vertex;
 
-       if (!gc_in_progress)
+       if (!fpl->dead)
                unix_update_graph(unix_edge_successor(edge));
 
        list_del(&edge->vertex_entry);
                unix_del_edge(fpl, edge);
        } while (i < fpl->count_unix);
 
-       if (!gc_in_progress) {
+       if (!fpl->dead) {
                receiver = fpl->edges[0].successor;
                receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
        }
        list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
 }
 
+static bool gc_in_progress;
+
 static void __unix_gc(struct work_struct *work)
 {
        struct sk_buff_head hitlist;
+       struct sk_buff *skb;
 
        spin_lock(&unix_gc_lock);
 
 
        spin_unlock(&unix_gc_lock);
 
+       skb_queue_walk(&hitlist, skb) {
+               if (UNIXCB(skb).fp)
+                       UNIXCB(skb).fp->dead = true;
+       }
+
        __skb_queue_purge(&hitlist);
 skip_gc:
        WRITE_ONCE(gc_in_progress, false);