mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 }
 
-
 static void do_flush_tlb_all(void *info)
 {
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 void flush_tlb_all(void)
 {
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
-       on_each_cpu(do_flush_tlb_all, NULL, 1);
+
+       /* First try (faster) hardware-assisted TLB invalidation. */
+       if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+               invlpgb_flush_all();
+       else
+               /* Fall back to the IPI-based invalidation. */
+               on_each_cpu(do_flush_tlb_all, NULL, 1);
+}
+
+/* Flush an arbitrarily large range of memory with INVLPGB. */
+static void invlpgb_kernel_range_flush(struct flush_tlb_info *info)
+{
+       unsigned long addr, nr;
+
+       for (addr = info->start; addr < info->end; addr += nr << PAGE_SHIFT) {
+               nr = (info->end - addr) >> PAGE_SHIFT;
+
+               /*
+                * INVLPGB has a limit on the size of ranges it can
+                * flush. Break up large flushes.
+                */
+               nr = clamp_val(nr, 1, invlpgb_count_max);
+
+               invlpgb_flush_addr_nosync(addr, nr);
+       }
+       __tlbsync();
 }
 
 static void do_kernel_range_flush(void *info)
                flush_tlb_one_kernel(addr);
 }
 
+static void kernel_tlb_flush_all(struct flush_tlb_info *info)
+{
+       if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+               invlpgb_flush_all();
+       else
+               on_each_cpu(do_flush_tlb_all, NULL, 1);
+}
+
+static void kernel_tlb_flush_range(struct flush_tlb_info *info)
+{
+       if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+               invlpgb_kernel_range_flush(info);
+       else
+               on_each_cpu(do_kernel_range_flush, info, 1);
+}
+
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        struct flush_tlb_info *info;
                                  TLB_GENERATION_INVALID);
 
        if (info->end == TLB_FLUSH_ALL)
-               on_each_cpu(do_flush_tlb_all, NULL, 1);
+               kernel_tlb_flush_all(info);
        else
-               on_each_cpu(do_kernel_range_flush, info, 1);
+               kernel_tlb_flush_range(info);
 
        put_flush_tlb_info();
 }