int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+ int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
bool kernel_page_present(struct page *page);
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
#endif /* _ASM_ARM64_SET_MEMORY_H */
PAGE_SIZE, change_page_range, &data);
}
+static int __set_memory_enc_dec(unsigned long addr,
+ int numpages,
+ bool encrypt)
+{
+ unsigned long set_prot = 0, clear_prot = 0;
+ phys_addr_t start, end;
+ int ret;
+
+ if (!is_realm_world())
+ return 0;
+
+ if (!__is_lm_address(addr))
+ return -EINVAL;
+
+ start = __virt_to_phys(addr);
+ end = start + numpages * PAGE_SIZE;
+
+ if (encrypt)
+ clear_prot = PROT_NS_SHARED;
+ else
+ set_prot = PROT_NS_SHARED;
+
+ /*
+ * Break the mapping before we make any changes to avoid stale TLB
+ * entries or Synchronous External Aborts caused by RIPAS_EMPTY
+ */
+ ret = __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(set_prot),
+ __pgprot(clear_prot | PTE_VALID));
+
+ if (ret)
+ return ret;
+
+ if (encrypt)
+ ret = rsi_set_memory_range_protected(start, end);
+ else
+ ret = rsi_set_memory_range_shared(start, end);
+
+ if (ret)
+ return ret;
+
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(PTE_VALID),
+ __pgprot(0));
+}
+
+static int realm_set_memory_encrypted(unsigned long addr, int numpages)
+{
+ int ret = __set_memory_enc_dec(addr, numpages, true);
+
+ /*
+ * If the request to change state fails, then the only sensible cause
+ * of action for the caller is to leak the memory
+ */
+ WARN(ret, "Failed to encrypt memory, %d pages will be leaked",
+ numpages);
+
+ return ret;
+}
+
+static int realm_set_memory_decrypted(unsigned long addr, int numpages)
+{
+ int ret = __set_memory_enc_dec(addr, numpages, false);
+
+ WARN(ret, "Failed to decrypt memory, %d pages will be leaked",
+ numpages);
+
+ return ret;
+}
+
+static const struct arm64_mem_crypt_ops realm_crypt_ops = {
+ .encrypt = realm_set_memory_encrypted,
+ .decrypt = realm_set_memory_decrypted,
+};
+
+int realm_register_memory_enc_ops(void)
+{
+ return arm64_mem_crypt_ops_register(&realm_crypt_ops);
+}
+
+ int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+ {
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (!can_set_direct_map())
+ return 0;
+
+ return set_memory_valid(addr, nr, valid);
+ }
+
#ifdef CONFIG_DEBUG_PAGEALLOC
++/*
++ * This is - apart from the return value - doing the same
++ * thing as the new set_direct_map_valid_noflush() function.
++ *
++ * Unify? Explain the conceptual differences?
++ */
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!can_set_direct_map())
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+ int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+bool kernel_page_present(struct page *page);
#endif
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
}
+ int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+ {
+ unsigned long flags;
+
+ if (valid)
+ flags = SET_MEMORY_DEF;
+ else
+ flags = SET_MEMORY_INV;
+
+ return __set_memory((unsigned long)page_to_virt(page), nr, flags);
+ }
++
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ unsigned int cc;
+
+ addr = (unsigned long)page_address(page);
+ asm volatile(
+ " lra %[addr],0(%[addr])\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [addr] "+a" (addr)
+ :
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc) == 0;
+}
+
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static void ipte_range(pte_t *pte, unsigned long address, int nr)
}
#endif
- #ifdef CONFIG_MEM_ALLOC_PROFILING
- static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
- {
- int i;
- struct alloc_tag *tag;
- unsigned int nr_pages = 1 << new_order;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- tag = pgalloc_tag_get(&folio->page);
- if (!tag)
- return;
-
- for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
- union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
-
- if (ref) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(ref, tag);
- put_page_tag_ref(ref);
- }
- }
- }
-
- static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
- {
- struct alloc_tag *tag;
- union codetag_ref *ref;
-
- tag = pgalloc_tag_get(&old->page);
- if (!tag)
- return;
-
- ref = get_page_tag_ref(&new->page);
- if (!ref)
- return;
-
- /* Clear the old ref to the original allocation tag. */
- clear_page_tag_ref(&old->page);
- /* Decrement the counters of the tag on get_new_folio. */
- alloc_tag_sub(ref, folio_nr_pages(new));
-
- __alloc_tag_ref_set(ref, tag);
-
- put_page_tag_ref(ref);
- }
- #else /* !CONFIG_MEM_ALLOC_PROFILING */
- static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
- {
- }
-
- static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
- {
- }
- #endif /* CONFIG_MEM_ALLOC_PROFILING */
-
+int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
+int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
#endif /* _LINUX_MM_H */
return;
}
- /* We first start with searching the page inside the stable tree */
- kpage = stable_tree_search(page);
- if (kpage == page && rmap_item->head == stable_node) {
- put_page(kpage);
+ /* Start by searching for the folio in the stable tree */
+ kfolio = stable_tree_search(page);
- if (!IS_ERR_OR_NULL(kfolio) && &kfolio->page == page &&
- rmap_item->head == stable_node) {
++ if (&kfolio->page == page && rmap_item->head == stable_node) {
+ folio_put(kfolio);
return;
}
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- inode_lock_shared(inode);
generic_fillattr(idmap, request_mask, inode, stat);
- inode_unlock_shared(inode);
- if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {