While discussing early DMA pool pre-allocation failure with Christoph [1]
I have realized that the allocation failure warning is rather noisy for
constrained allocations like GFP_DMA{32}. Those zones are usually not
populated on all nodes very often as their memory ranges are constrained.
This is an attempt to reduce the ballast that doesn't provide any relevant
information for those allocation failures investigation. Please note that
I have only compile tested it (in my default config setup) and I am
throwing it mostly to see what people think about it.
[1] http://lkml.kernel.org/r/
20220817060647.
1032426-1-hch@lst.de
Link: https://lkml.kernel.org/r/YwScVmVofIZkopkF@dhcp22.suse.cz
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static void sysrq_handle_showmem(int key)
{
- show_mem(0, NULL);
+ show_mem(0, NULL, GFP_HIGHUSER_MOVABLE);
}
static const struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
static void fn_show_mem(struct vc_data *vc)
{
- show_mem(0, NULL);
+ show_mem(0, NULL, GFP_HIGHUSER_MOVABLE);
}
static void fn_show_state(struct vc_data *vc)
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+extern void show_free_areas(unsigned int flags, nodemask_t *nodemask, gfp_t gfp_mask);
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+extern void show_mem(unsigned int flags, nodemask_t *nodemask, gfp_t gfp_mask);
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
{
va_list args;
- show_mem(0, NULL);
+ show_mem(0, NULL, GFP_HIGHUSER_MOVABLE);
va_start(args, fmt);
panic(fmt, args);
va_end(args);
show_state();
if (panic_print & PANIC_PRINT_MEM_INFO)
- show_mem(0, NULL);
+ show_mem(0, NULL, GFP_HIGHUSER_MOVABLE);
if (panic_print & PANIC_PRINT_TIMER_INFO)
sysrq_timer_list_show();
#include <linux/mm.h>
#include <linux/cma.h>
-void show_mem(unsigned int filter, nodemask_t *nodemask)
+void show_mem(unsigned int filter, nodemask_t *nodemask, gfp_t gfp_mask)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n");
- show_free_areas(filter, nodemask);
+ show_free_areas(filter, nodemask, gfp_mask);
for_each_online_pgdat(pgdat) {
int zoneid;
enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm);
- show_free_areas(0, NULL);
+ show_free_areas(0, NULL, GFP_KERNEL);
return -ENOMEM;
}
kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0, NULL);
+ show_free_areas(0, NULL, GFP_KERNEL);
return -ENOMEM;
error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0, NULL);
+ show_free_areas(0, NULL, GFP_KERNEL);
return -ENOMEM;
error_maple_preallocate:
if (is_memcg_oom(oc))
mem_cgroup_print_oom_meminfo(oc->memcg);
else {
- show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
+ show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, oc->gfp_mask);
if (should_dump_unreclaim_slab())
dump_unreclaimable_slab();
}
if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES;
- show_mem(filter, nodemask);
+ show_mem(filter, nodemask, gfp_mask);
}
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
printk(KERN_CONT "(%s) ", tmp);
}
+static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
+{
+ int zone_idx;
+ for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
+ if (zone_managed_pages(pgdat->node_zones + zone_idx))
+ return true;
+ return false;
+}
+
/*
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
* SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
* cpuset.
*/
-void show_free_areas(unsigned int filter, nodemask_t *nodemask)
+void show_free_areas(unsigned int filter, nodemask_t *nodemask, gfp_t gfp_mask)
{
unsigned long free_pcp = 0;
+ int max_zone_idx = gfp_zone(gfp_mask);
int cpu, nid;
struct zone *zone;
pg_data_t *pgdat;
for_each_populated_zone(zone) {
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
for_each_online_pgdat(pgdat) {
if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
continue;
+ if (!node_has_managed_zones(pgdat, max_zone_idx))
+ continue;
printk("Node %d"
" active_anon:%lukB"
for_each_populated_zone(zone) {
int i;
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
unsigned long nr[MAX_ORDER], flags, total = 0;
unsigned char types[MAX_ORDER];
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
show_node(zone);