static int hardwall_proc_show(struct seq_file *sf, void *v)
 {
        struct hardwall_info *info = sf->private;
-       char buf[256];
 
-       int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
-       buf[rc++] = '\n';
-       seq_write(sf, buf, rc);
+       seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
        return 0;
 }
 
 
        int n = ptr_to_cpu(v);
 
        if (n == 0) {
-               char buf[NR_CPUS*5];
-               cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
                seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
-               seq_printf(m, "cpu list\t: %s\n", buf);
+               seq_printf(m, "cpu list\t: %*pbl\n",
+                          cpumask_pr_args(cpu_online_mask));
                seq_printf(m, "model name\t: %s\n", chip_model);
                seq_printf(m, "flags\t\t:\n");  /* nothing for now */
                seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
 
 
 static int __init setup_isolnodes(char *str)
 {
-       char buf[MAX_NUMNODES * 5];
        if (str == NULL || nodelist_parse(str, isolnodes) != 0)
                return -EINVAL;
 
-       nodelist_scnprintf(buf, sizeof(buf), isolnodes);
-       pr_info("Set isolnodes value to '%s'\n", buf);
+       pr_info("Set isolnodes value to '%*pbl'\n",
+               nodemask_pr_args(&isolnodes));
        return 0;
 }
 early_param("isolnodes", setup_isolnodes);
 
 void __init print_disabled_cpus(void)
 {
-       if (!cpumask_empty(&disabled_map)) {
-               char buf[100];
-               cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
-               pr_info("CPUs not available for Linux: %s\n", buf);
-       }
+       if (!cpumask_empty(&disabled_map))
+               pr_info("CPUs not available for Linux: %*pbl\n",
+                       cpumask_pr_args(&disabled_map));
 }
 
 static void __init setup_cpu_maps(void)
 
        struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
        struct cpumask *cache_cpumask, *tlb_cpumask;
        HV_PhysAddr cache_pa;
-       char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
 
        mb();   /* provided just to simplify "magic hypervisor" mode */
 
                             asids, asidcount);
        if (rc == 0)
                return;
-       cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
-       cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
 
-       pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
-              cache_pa, cache_control, cache_cpumask, cache_buf,
-              (unsigned long)tlb_va, tlb_length, tlb_pgsize,
-              tlb_cpumask, tlb_buf, asids, asidcount, rc);
+       pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
+              cache_pa, cache_control, cache_cpumask,
+              cpumask_pr_args(&cache_cpumask_copy),
+              (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
+              cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
        panic("Unsafe to continue.");
 }
 
 
 
        /* Neighborhood ktext pages on specified mask */
        else if (cpulist_parse(str, &ktext_mask) == 0) {
-               char buf[NR_CPUS * 5];
-               cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
                if (cpumask_weight(&ktext_mask) > 1) {
                        ktext_small = 1;
-                       pr_info("ktext: using caching neighborhood %s with small pages\n",
-                               buf);
+                       pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
+                               cpumask_pr_args(&ktext_mask));
                } else {
-                       pr_info("ktext: caching on cpu %s with one huge page\n",
-                               buf);
+                       pr_info("ktext: caching on cpu %*pbl with one huge page\n",
+                               cpumask_pr_args(&ktext_mask));
                }
        }
 
                struct cpumask bad;
                cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
                cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
-               if (!cpumask_empty(&bad)) {
-                       char buf[NR_CPUS * 5];
-                       cpulist_scnprintf(buf, sizeof(buf), &bad);
-                       pr_info("ktext: not using unavailable cpus %s\n", buf);
-               }
+               if (!cpumask_empty(&bad))
+                       pr_info("ktext: not using unavailable cpus %*pbl\n",
+                               cpumask_pr_args(&bad));
                if (cpumask_empty(&ktext_mask)) {
                        pr_warn("ktext: no valid cpus; caching on %d\n",
                                smp_processor_id());
 
  */
 static bool network_cpus_init(void)
 {
-       char buf[1024];
        int rc;
 
        if (network_cpus_string == NULL)
                return false;
        }
 
-       cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
-       pr_info("Linux network CPUs: %s\n", buf);
+       pr_info("Linux network CPUs: %*pbl\n",
+               cpumask_pr_args(&network_cpus_map));
        return true;
 }
 
 
                if (cpumask_empty(&network_cpus_map)) {
                        pr_warn("Ignoring network_cpus='%s'\n", str);
                } else {
-                       char buf[1024];
-                       cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
-                       pr_info("Linux network CPUs: %s\n", buf);
+                       pr_info("Linux network CPUs: %*pbl\n",
+                               cpumask_pr_args(&network_cpus_map));
                        network_cpus_used = true;
                }
        }