atomic_long_t _totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(_totalhigh_pages);
 
-unsigned int __nr_free_highpages (void)
+unsigned int __nr_free_highpages(void)
 {
        struct zone *zone;
        unsigned int pages = 0;
 static int pkmap_count[LAST_PKMAP];
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
 
-pte_t * pkmap_page_table;
+pte_t *pkmap_page_table;
 
 /*
  * Most architectures have no use for kmap_high_get(), so let's abstract
 
        if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
                int i = PKMAP_NR(addr);
+
                return pte_page(pkmap_page_table[i]);
        }
 
        pkmap_count[PKMAP_NR(vaddr)]++;
        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
        unlock_kmap();
-       return (void*) vaddr;
+       return (void *) vaddr;
 }
-
 EXPORT_SYMBOL(kmap_high);
 
 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
                pkmap_count[PKMAP_NR(vaddr)]++;
        }
        unlock_kmap_any(flags);
-       return (void*) vaddr;
+       return (void *) vaddr;
 }
 #endif
 
        spin_unlock_irqrestore(&pas->lock, flags);
        return ret;
 }
-
 EXPORT_SYMBOL(page_address);
 
 /**