seconds. Use this parameter to check at some
other rate. 0 disables periodic checking.
- memtest= [KNL,X86,ARM,PPC] Enable memtest
+ memory_hotplug.memmap_on_memory
+ [KNL,X86,ARM] Boolean flag to enable this feature.
+ Format: {on | off (default)}
+ When enabled, memory to build the pages tables for the
+ memmap array describing the hot-added range will be taken
+ from the range itself, so the memmap page tables will be
+ self-hosted.
+ Since only single memory device ranges are supported at
+ the moment, this option is disabled by default because
+ it might have an impact on workloads that needs large
+ contiguous memory chunks.
+ The state of the flag can be read in
+ /sys/module/memory_hotplug/parameters/memmap_on_memory.
+ Note that even when enabled, there are a few cases where
+ the feature is not effective.
+
+ memtest= [KNL,X86,ARM,PPC,RISCV] Enable memtest
Format: <integer>
default : 0 <disable>
Specifies the number of memtest passes to be
def_bool y
depends on ARM_PMU
- config SYS_SUPPORTS_HUGETLBFS
- def_bool y
-
- config ARCH_HAS_CACHE_LINE_SIZE
- def_bool y
-
+config ARCH_HAS_FILTER_PGPROT
+ def_bool y
+
- config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y if PGTABLE_LEVELS > 2
-
# Supported by clang >= 7.0
config CC_HAVE_SHADOW_CALL_STACK
def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
curr = (u64)__tag_set(addr, tag);
end = curr + size;
- do {
- /*
- * 'asm volatile' is required to prevent the compiler to move
- * the statement outside of the loop.
- */
- asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
-
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
+ /*
+ * 'asm volatile' is required to prevent the compiler to move
+ * the statement outside of the loop.
+ */
+ if (init) {
+ do {
+ asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
+ :
+ : "r" (curr)
+ : "memory");
+ curr += MTE_GRANULE_SIZE;
+ } while (curr != end);
+ } else {
+ do {
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
+ :
+ : "r" (curr)
+ : "memory");
+ curr += MTE_GRANULE_SIZE;
+ } while (curr != end);
+ }
}
-void mte_enable_kernel(void);
+void mte_enable_kernel_sync(void);
+void mte_enable_kernel_async(void);
void mte_init_tags(u64 max_tag);
void mte_set_report_once(bool state);
# Please keep this list sorted alphabetically.
#
select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select GENERIC_GETTIMEOFDAY
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
+ select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD
+ select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD
select HAVE_ARCH_VMAP_STACK if X86_64
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
+ #include <linux/pagemap.h>
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
#ifndef __ASSEMBLY__
-#include <linux/types.h>
+ #include <linux/align.h>
#include <linux/bitops.h>
- #include <linux/kernel.h>
+ #include <linux/limits.h>
#include <linux/string.h>
+#include <linux/types.h>
+
+struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
+ /*
+ * Cpus stopping functions in panic. All have default weak definitions.
+ * Architecture-dependent code may override them.
+ */
+ void panic_smp_self_stop(void);
+ void nmi_panic_self_stop(struct pt_regs *regs);
+ void crash_smp_send_stop(void);
+
/*
* Call a function on all processors
*/
* fields, it can reorder or optimize away the accesses to those fields.
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
+ *
+ * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
+ * false. This allows detecting KASAN reports that happen outside of the checks
+ * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
+ * and in kasan_test_exit.
*/
- #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) \
- migrate_disable(); \
- WRITE_ONCE(fail_data.report_expected, true); \
- WRITE_ONCE(fail_data.report_found, false); \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- barrier(); \
- expression; \
- barrier(); \
- if (kasan_async_mode_enabled()) \
- kasan_force_async_fault(); \
- barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) { \
- if (READ_ONCE(fail_data.report_found)) \
- kasan_enable_tagging_sync(); \
- migrate_enable(); \
- } \
+ #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
++ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
++ !kasan_async_mode_enabled()) \
+ migrate_disable(); \
+ KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ barrier(); \
+ expression; \
+ barrier(); \
++ if (kasan_async_mode_enabled()) \
++ kasan_force_async_fault(); \
++ barrier(); \
+ KUNIT_EXPECT_EQ(test, \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
++ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
++ !kasan_async_mode_enabled()) { \
+ if (READ_ONCE(fail_data.report_found)) \
- kasan_enable_tagging(); \
++ kasan_enable_tagging_sync(); \
+ migrate_enable(); \
+ } \
+ WRITE_ONCE(fail_data.report_found, false); \
+ WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
#define arch_get_mem_tag(addr) (0xFF)
#endif
#ifndef arch_set_mem_tag_range
- #define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
+ #define arch_set_mem_tag_range(addr, size, tag, init) ((void *)(addr))
#endif
-#define hw_enable_tagging() arch_enable_tagging()
+#define hw_enable_tagging_sync() arch_enable_tagging_sync()
+#define hw_enable_tagging_async() arch_enable_tagging_async()
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
#define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
+#define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
- #define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag))
+ #define hw_set_mem_tag_range(addr, size, tag, init) \
+ arch_set_mem_tag_range((addr), (size), (tag), (init))
#else /* CONFIG_KASAN_HW_TAGS */
kmem_dump_obj(object);
return;
}
+
if (vmalloc_dump_obj(object))
return;
- if (!virt_addr_valid(object)) {
- if (object == NULL)
- pr_cont(" NULL pointer.\n");
- else if (object == ZERO_SIZE_PTR)
- pr_cont(" zero-size pointer.\n");
- else
- pr_cont(" non-paged memory.\n");
- return;
- }
- pr_cont(" non-slab/vmalloc memory.\n");
+
+ if (virt_addr_valid(object))
+ type = "non-slab/vmalloc memory";
+ else if (object == NULL)
+ type = "NULL pointer";
+ else if (object == ZERO_SIZE_PTR)
+ type = "zero-size pointer";
+ else
+ type = "non-paged memory";
+
+ pr_cont(" %s\n", type);
}
+EXPORT_SYMBOL_GPL(mem_dump_obj);
+#endif