*/
        u16 count;
        u8 flags;
+       u8 order;
        struct list_head list;
 };
 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
+#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
 
 /*
  * The first page in the swap file is the swap header, which is always marked
        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
        struct list_head free_clusters; /* free clusters list */
+       struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+                                       /* list of cluster that contains at least one free slot */
        unsigned int lowest_bit;        /* index of first free in swap_map */
        unsigned int highest_bit;       /* index of last free in swap_map */
        unsigned int pages;             /* total of usable pages of swap */
 
        memset(si->swap_map + idx * SWAPFILE_CLUSTER,
                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 
-       list_add_tail(&ci->list, &si->discard_clusters);
+       VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
+       if (ci->flags & CLUSTER_FLAG_NONFULL)
+               list_move_tail(&ci->list, &si->discard_clusters);
+       else
+               list_add_tail(&ci->list, &si->discard_clusters);
+       ci->flags = 0;
        schedule_work(&si->discard_work);
 }
 
 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
 {
+       if (ci->flags & CLUSTER_FLAG_NONFULL)
+               list_move_tail(&ci->list, &si->free_clusters);
+       else
+               list_add_tail(&ci->list, &si->free_clusters);
        ci->flags = CLUSTER_FLAG_FREE;
-       list_add_tail(&ci->list, &si->free_clusters);
 }
 
 /*
        VM_BUG_ON(ci->count == 0);
        ci->count--;
 
-       if (!ci->count)
+       if (!ci->count) {
                free_cluster(p, ci);
+               return;
+       }
+
+       if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
+               list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]);
+               ci->flags |= CLUSTER_FLAG_NONFULL;
+       }
 }
 
 /*
        if (tmp == SWAP_NEXT_INVALID) {
                if (!list_empty(&si->free_clusters)) {
                        ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
+                       list_del(&ci->list);
+                       spin_lock(&ci->lock);
+                       ci->order = order;
+                       ci->flags = 0;
+                       spin_unlock(&ci->lock);
+                       tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER;
+               } else if (!list_empty(&si->nonfull_clusters[order])) {
+                       ci = list_first_entry(&si->nonfull_clusters[order],
+                                             struct swap_cluster_info, list);
+                       list_del(&ci->list);
+                       spin_lock(&ci->lock);
+                       ci->flags = 0;
+                       spin_unlock(&ci->lock);
                        tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER;
                } else if (!list_empty(&si->discard_clusters)) {
                        /*
        ci = lock_cluster(si, offset);
        memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
        ci->count = 0;
+       ci->order = 0;
        ci->flags = 0;
        free_cluster(si, ci);
        unlock_cluster(ci);
        INIT_LIST_HEAD(&p->free_clusters);
        INIT_LIST_HEAD(&p->discard_clusters);
 
+       for (i = 0; i < SWAP_NR_ORDERS; i++)
+               INIT_LIST_HEAD(&p->nonfull_clusters[i]);
+
        for (i = 0; i < swap_header->info.nr_badpages; i++) {
                unsigned int page_nr = swap_header->info.badpages[i];
                if (page_nr == 0 || page_nr > swap_header->info.last_page)