void flush_dcache_folio(struct folio *folio)
{
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
return;
}
EXPORT_SYMBOL(flush_dcache_folio);
copy_page(kto, kfrom);
- clear_bit(PG_dc_clean, &dst->flags);
- clear_bit(PG_dc_clean, &src->flags);
+ clear_bit(PG_dc_clean, &dst->flags.f);
+ clear_bit(PG_dc_clean, &src->flags.f);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
{
struct folio *folio = page_folio(page);
clear_page(to);
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
}
EXPORT_SYMBOL(clear_user_page);
*/
if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
- int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
if (dirty) {
unsigned long offset = offset_in_folio(folio, paddr);
nr = folio_nr_pages(folio);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
/* FIXME: not highmem safe */
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
- if (!test_and_set_bit(PG_dcache_clean, &src->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
if (size < sz)
break;
if (!offset)
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
offset = 0;
size -= sz;
if (!size)
folio = page_folio(pfn_to_page(pfn));
mapping = folio_flush_mapping(folio);
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
else
mapping = NULL;
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
return;
}
if (!cache_ops_need_broadcast() &&
mapping && !folio_mapped(folio))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
__flush_dcache_folio(mapping, folio);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, folio);
else if (mapping)
__flush_icache_all();
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
- clear_bit(PG_mte_tagged, &folio->flags);
- clear_bit(PG_mte_lock, &folio->flags);
+ clear_bit(PG_mte_tagged, &folio->flags.f);
+ clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}
* before the page flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &page->flags);
+ set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
- bool ret = test_bit(PG_mte_tagged, &page->flags);
+ bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
- if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
* before the folio flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &folio->flags);
+ set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
- bool ret = test_bit(PG_mte_tagged, &folio->flags);
+ bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
- if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+ if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
{
struct folio *folio = page_folio(pte_page(pte));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
sync_icache_aliases((unsigned long)folio_address(folio),
(unsigned long)folio_address(folio) +
folio_size(folio));
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
*/
void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
EXPORT_SYMBOL(flush_dcache_folio);
mapping = folio_flush_mapping(folio);
if (mapping && !folio_mapped(folio))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
return;
folio = page_folio(pfn_to_page(pfn));
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
dcache_wbinv_all();
if (folio_flush_mapping(folio)) {
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
} else {
__flush_dcache_folio(folio);
if (mapping) {
flush_aliases(mapping, folio);
flush_icache_range(start, start + folio_size(folio));
}
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
return;
folio = page_folio(pfn_to_page(pfn));
- if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(folio);
mapping = folio_flush_mapping(folio);
static inline void flush_dcache_folio(struct folio *folio)
{
- clear_bit(PG_dc_clean, &folio->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
{
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
struct folio *folio = page_folio(pfn_to_page(pfn));
- int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
/*
* Since icaches do not snoop for updated data on OpenRISC, we
pfn = folio_pfn(folio);
nr = folio_nr_pages(folio);
if (folio_flush_mapping(folio) &&
- test_bit(PG_dcache_dirty, &folio->flags)) {
+ test_bit(PG_dcache_dirty, &folio->flags.f)) {
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
- clear_bit(PG_dcache_dirty, &folio->flags);
+ clear_bit(PG_dcache_dirty, &folio->flags.f);
} else if (parisc_requires_coherency())
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &folio->flags);
+ set_bit(PG_dcache_dirty, &folio->flags.f);
return;
}
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
/* Clear i-cache for new pages */
folio = page_folio(pfn_to_page(pfn));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
folio = page_folio(pte_page(pte));
/* page is dirty */
- if (!test_bit(PG_dcache_clean, &folio->flags) &&
+ if (!test_bit(PG_dcache_clean, &folio->flags.f) &&
!folio_test_reserved(folio)) {
if (trap == INTERRUPT_INST_STORAGE) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
} else
pp |= HPTE_R_N;
}
struct folio *folio = maybe_pte_to_folio(pte);
if (!folio)
return pte;
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
return pte;
return pte;
/* If the page clean, we move on */
- if (test_bit(PG_dcache_clean, &folio->flags))
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
return pte;
/* If it's an exec fault, we flush the cache and make it clean */
if (is_exec_fault()) {
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
return pte;
}
goto bail;
/* If the page is already clean, we move on */
- if (test_bit(PG_dcache_clean, &folio->flags))
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
goto bail;
/* Clean the page and set PG_dcache_clean */
flush_dcache_icache_folio(folio);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
bail:
return pte_mkexec(pte);
static inline void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
{
struct folio *folio = page_folio(pte_page(pte));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_icache_mm(mm, false);
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
#endif /* CONFIG_MMU */
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
folio_get(folio);
rc = uv_destroy(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
folio_get(folio);
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
* convert_to_secure.
* As secure pages are never large folios, both variants can co-exists.
*/
- if (!test_bit(PG_arch_1, &folio->flags))
+ if (!test_bit(PG_arch_1, &folio->flags.f))
return 0;
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
cond_resched();
return 0;
}
paddr = rste & PMD_MASK;
}
- if (!test_and_set_bit(PG_arch_1, &folio->flags))
+ if (!test_and_set_bit(PG_arch_1, &folio->flags.f))
__storage_key_init_range(paddr, paddr + size);
}
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else
#endif
{
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
else {
unsigned long pfn = folio_pfn(folio);
unsigned int i, nr = folio_nr_pages(folio);
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
if (vma->vm_flags & VM_EXEC)
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
}
vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
- test_bit(PG_dcache_clean, &src->flags)) {
+ test_bit(PG_dcache_clean, &src->flags.f)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
if (pfn_valid(pfn)) {
struct folio *folio = page_folio(pfn_to_page(pfn));
- int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
+ int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f);
if (dirty)
__flush_purge_region(folio_address(folio),
folio_size(folio));
if (pages_do_alias(addr, vmaddr)) {
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
- test_bit(PG_dcache_clean, &folio->flags)) {
+ test_bit(PG_dcache_clean, &folio->flags.f)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
enum fixed_addresses idx;
unsigned long vaddr;
- BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
+ BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f));
preempt_disable();
pagefault_disable();
((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
#define dcache_dirty_cpu(folio) \
- (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
+ (((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
{
"bne,pn %%xcc, 1b\n\t"
" nop"
: /* no outputs */
- : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags)
+ : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f)
: "g1", "g7");
}
" nop\n"
"2:"
: /* no outputs */
- : "r" (cpu), "r" (mask), "r" (&folio->flags),
+ : "r" (cpu), "r" (mask), "r" (&folio->flags.f),
"i" (PG_dcache_cpu_mask),
"i" (PG_dcache_cpu_shift)
: "g1", "g7");
struct folio *folio = page_folio(page);
unsigned long pg_flags;
- pg_flags = folio->flags;
+ pg_flags = folio->flags.f;
if (pg_flags & (1UL << PG_dcache_dirty)) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping)) {
- bool dirty = test_bit(PG_dcache_dirty, &folio->flags);
+ bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f);
if (dirty) {
int dirty_cpu = dcache_dirty_cpu(folio);
*/
if (mapping && !mapping_mapped(mapping)) {
- if (!test_bit(PG_arch_1, &folio->flags))
- set_bit(PG_arch_1, &folio->flags);
+ if (!test_bit(PG_arch_1, &folio->flags.f))
+ set_bit(PG_arch_1, &folio->flags.f);
return;
} else {
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
- if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
+ if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags.f)) {
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long tmp;
}
preempt_enable();
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
}
#else
- if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
+ if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags.f)
&& (vma->vm_flags & VM_EXEC) != 0) {
for (i = 0; i < nr; i++) {
void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
__invalidate_icache_page((unsigned long)paddr);
kunmap_local(paddr);
}
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
}
#endif
}