{
        void *shadow_start, *shadow_end;
 
+       /*
+        * Perform shadow offset calculation based on untagged address, as
+        * some of the callers (e.g. kasan_poison_object_data) pass tagged
+        * addresses to this function.
+        */
+       address = reset_tag(address);
+
        shadow_start = kasan_mem_to_shadow(address);
        shadow_end = kasan_mem_to_shadow(address + size);
 
 
 void kasan_unpoison_shadow(const void *address, size_t size)
 {
-       kasan_poison_shadow(address, size, 0);
+       u8 tag = get_tag(address);
+
+       /*
+        * Perform shadow offset calculation based on untagged address, as
+        * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+        * addresses to this function.
+        */
+       address = reset_tag(address);
+
+       kasan_poison_shadow(address, size, tag);
 
        if (size & KASAN_SHADOW_MASK) {
                u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
-               *shadow = size & KASAN_SHADOW_MASK;
+
+               if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+                       *shadow = tag;
+               else
+                       *shadow = size & KASAN_SHADOW_MASK;
        }
 }
 
 
 void kasan_alloc_pages(struct page *page, unsigned int order)
 {
-       if (likely(!PageHighMem(page)))
-               kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
+       if (unlikely(PageHighMem(page)))
+               return;
+       kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
 }
 
 void kasan_free_pages(struct page *page, unsigned int order)
  */
 static inline unsigned int optimal_redzone(unsigned int object_size)
 {
+       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+               return 0;
+
        return
                object_size <= 64        - 16   ? 16 :
                object_size <= 128       - 32   ? 32 :
                        slab_flags_t *flags)
 {
        unsigned int orig_size = *size;
+       unsigned int redzone_size;
        int redzone_adjust;
 
        /* Add alloc meta. */
        *size += sizeof(struct kasan_alloc_meta);
 
        /* Add free meta. */
-       if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
-           cache->object_size < sizeof(struct kasan_free_meta)) {
+       if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+           (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
+            cache->object_size < sizeof(struct kasan_free_meta))) {
                cache->kasan_info.free_meta_offset = *size;
                *size += sizeof(struct kasan_free_meta);
        }
-       redzone_adjust = optimal_redzone(cache->object_size) -
-               (*size - cache->object_size);
 
+       redzone_size = optimal_redzone(cache->object_size);
+       redzone_adjust = redzone_size - (*size - cache->object_size);
        if (redzone_adjust > 0)
                *size += redzone_adjust;
 
        *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
-                       max(*size, cache->object_size +
-                                       optimal_redzone(cache->object_size)));
+                       max(*size, cache->object_size + redzone_size));
 
        /*
         * If the metadata doesn't fit, don't enable KASAN at all.
                return;
        }
 
+       cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
+
        *flags |= SLAB_KASAN;
 }
 
                        KASAN_KMALLOC_REDZONE);
 }
 
+/*
+ * Since it's desirable to only call object contructors once during slab
+ * allocation, we preassign tags to all such objects. Also preassign tags for
+ * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
+ * For SLAB allocator we can't preassign tags randomly since the freelist is
+ * stored as an array of indexes instead of a linked list. Assign tags based
+ * on objects indexes, so that objects that are next to each other get
+ * different tags.
+ * After a tag is assigned, the object always gets allocated with the same tag.
+ * The reason is that we can't change tags for objects with constructors on
+ * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
+ * code can save the pointer to the object somewhere (e.g. in the object
+ * itself). Then if we retag it, the old saved pointer will become invalid.
+ */
+static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
+{
+       if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
+               return new ? KASAN_TAG_KERNEL : random_tag();
+
+#ifdef CONFIG_SLAB
+       return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
+#else
+       return new ? random_tag() : get_tag(object);
+#endif
+}
+
 void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
 {
        struct kasan_alloc_meta *alloc_info;
        alloc_info = get_alloc_info(cache, object);
        __memset(alloc_info, 0, sizeof(*alloc_info));
 
+       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+               object = set_tag(object, assign_tag(cache, object, true));
+
        return (void *)object;
 }
 
        return kasan_kmalloc(cache, object, cache->object_size, flags);
 }
 
+static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
+{
+       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+               return shadow_byte < 0 ||
+                       shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+       else
+               return tag != (u8)shadow_byte;
+}
+
 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
                              unsigned long ip, bool quarantine)
 {
        s8 shadow_byte;
+       u8 tag;
+       void *tagged_object;
        unsigned long rounded_up_size;
 
+       tag = get_tag(object);
+       tagged_object = object;
+       object = reset_tag(object);
+
        if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
            object)) {
-               kasan_report_invalid_free(object, ip);
+               kasan_report_invalid_free(tagged_object, ip);
                return true;
        }
 
                return false;
 
        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
-       if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
-               kasan_report_invalid_free(object, ip);
+       if (shadow_invalid(tag, shadow_byte)) {
+               kasan_report_invalid_free(tagged_object, ip);
                return true;
        }
 
        rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
        kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 
-       if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
+       if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
+                       unlikely(!(cache->flags & SLAB_KASAN)))
                return false;
 
        set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
        quarantine_put(get_free_info(cache, object), cache);
-       return true;
+
+       return IS_ENABLED(CONFIG_KASAN_GENERIC);
 }
 
 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 {
        unsigned long redzone_start;
        unsigned long redzone_end;
+       u8 tag;
 
        if (gfpflags_allow_blocking(flags))
                quarantine_reduce();
        redzone_end = round_up((unsigned long)object + cache->object_size,
                                KASAN_SHADOW_SCALE_SIZE);
 
-       kasan_unpoison_shadow(object, size);
+       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+               tag = assign_tag(cache, object, false);
+
+       /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
+       kasan_unpoison_shadow(set_tag(object, tag), size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
                KASAN_KMALLOC_REDZONE);
 
        if (cache->flags & SLAB_KASAN)
                set_track(&get_alloc_info(cache, object)->alloc_track, flags);
 
-       return (void *)object;
+       return set_tag(object, tag);
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
        page = virt_to_head_page(ptr);
 
        if (unlikely(!PageSlab(page))) {
-               if (ptr != page_address(page)) {
+               if (reset_tag(ptr) != page_address(page)) {
                        kasan_report_invalid_free(ptr, ip);
                        return;
                }
 
 void kasan_kfree_large(void *ptr, unsigned long ip)
 {
-       if (ptr != page_address(virt_to_head_page(ptr)))
+       if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
                kasan_report_invalid_free(ptr, ip);
        /* The object will be poisoned by page_alloc. */
 }
 
 void check_memory_region(unsigned long addr, size_t size, bool write,
                                unsigned long ret_ip)
 {
+       u8 tag;
+       u8 *shadow_first, *shadow_last, *shadow;
+       void *untagged_addr;
+
+       if (unlikely(size == 0))
+               return;
+
+       tag = get_tag((const void *)addr);
+
+       /*
+        * Ignore accesses for pointers tagged with 0xff (native kernel
+        * pointer tag) to suppress false positives caused by kmap.
+        *
+        * Some kernel code was written to account for archs that don't keep
+        * high memory mapped all the time, but rather map and unmap particular
+        * pages when needed. Instead of storing a pointer to the kernel memory,
+        * this code saves the address of the page structure and offset within
+        * that page for later use. Those pages are then mapped and unmapped
+        * with kmap/kunmap when necessary and virt_to_page is used to get the
+        * virtual address of the page. For arm64 (that keeps the high memory
+        * mapped all the time), kmap is turned into a page_address call.
+
+        * The issue is that with use of the page_address + virt_to_page
+        * sequence the top byte value of the original pointer gets lost (gets
+        * set to KASAN_TAG_KERNEL (0xFF)).
+        */
+       if (tag == KASAN_TAG_KERNEL)
+               return;
+
+       untagged_addr = reset_tag((const void *)addr);
+       if (unlikely(untagged_addr <
+                       kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+               kasan_report(addr, size, write, ret_ip);
+               return;
+       }
+       shadow_first = kasan_mem_to_shadow(untagged_addr);
+       shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
+       for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
+               if (*shadow != tag) {
+                       kasan_report(addr, size, write, ret_ip);
+                       return;
+               }
+       }
 }
 
 #define DEFINE_HWASAN_LOAD_STORE(size)                                 \
        void __hwasan_load##size##_noabort(unsigned long addr)          \
        {                                                               \
+               check_memory_region(addr, size, false, _RET_IP_);       \
        }                                                               \
        EXPORT_SYMBOL(__hwasan_load##size##_noabort);                   \
        void __hwasan_store##size##_noabort(unsigned long addr)         \
        {                                                               \
+               check_memory_region(addr, size, true, _RET_IP_);        \
        }                                                               \
        EXPORT_SYMBOL(__hwasan_store##size##_noabort)
 
 
 void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
 {
+       check_memory_region(addr, size, false, _RET_IP_);
 }
 EXPORT_SYMBOL(__hwasan_loadN_noabort);
 
 void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
 {
+       check_memory_region(addr, size, true, _RET_IP_);
 }
 EXPORT_SYMBOL(__hwasan_storeN_noabort);
 
 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
 {
+       kasan_poison_shadow((void *)addr, size, tag);
 }
 EXPORT_SYMBOL(__hwasan_tag_memory);