For platforms that support ADI two things must be done during boot.
First, etrap is patched to ensure that execution in the kernel runs with
PSTATE.mcde=1. This ensures that version checking is enabled if the
kernel accesses ADI-enabled user memory through copy_{to/from}_user,
etc..
Second, the initialization of the common flags used in TTEs must be
adjusted to not set TTE.cv (cacheable-in-virtually-indexed-cache) which
has been replaced by TTE.mcd which enables MCD (aka ADI).
These steps are currently done for M7. This patch adds support for M8
and does some minor cleanup to avoid M7-specific naming and reduce the
number of places chip type needs to be checked (and adjusted in the
future).
Orabug:
26096575
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
Reviewed-by: Steve Sistare <steven.sistare@oracle.com>
" sllx %1, 32, %1\n"
" or %0, %1, %0\n"
" .previous\n"
- " .section .sun_m7_2insn_patch, \"ax\"\n"
+ " .section .sun4v_adi_2insn_patch, \"ax\"\n"
" .word 661b\n"
" sethi %%uhi(%4), %1\n"
" sethi %%hi(%4), %0\n"
" andn %0, %4, %0\n"
" or %0, %5, %0\n"
" .previous\n"
- " .section .sun_m7_2insn_patch, \"ax\"\n"
+ " .section .sun4v_adi_2insn_patch, \"ax\"\n"
" .word 661b\n"
" andn %0, %6, %0\n"
" or %0, %5, %0\n"
};
extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end;
-extern struct sun4v_1insn_patch_entry __sun_m7_1insn_patch,
- __sun_m7_1insn_patch_end;
+extern struct sun4v_1insn_patch_entry __sun4v_adi_1insn_patch,
+ __sun4v_adi_1insn_patch_end;
struct sun4v_2insn_patch_entry {
unsigned int addr;
};
extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
__sun4v_2insn_patch_end;
-extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
- __sun_m7_2insn_patch_end;
+extern struct sun4v_2insn_patch_entry __sun4v_adi_2insn_patch,
+ __sun4v_adi_2insn_patch_end;
#endif /* !(__ASSEMBLY__) */
struct sun4v_1insn_patch_entry *);
void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
struct sun4v_2insn_patch_entry *);
-void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
- struct sun4v_2insn_patch_entry *);
extern unsigned int dcache_parity_tl1_occurred;
extern unsigned int icache_parity_tl1_occurred;
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
or %l7, %l0, %l7
661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0
- .section .sun_m7_1insn_patch, "ax"
+ .section .sun4v_adi_1insn_patch, "ax"
.word 661b
sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0
.previous
}
}
-void sun_m7_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
- struct sun4v_1insn_patch_entry *end)
-{
- sun4v_patch_1insn_range(start, end);
-}
-
void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
struct sun4v_2insn_patch_entry *end)
{
}
}
-void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
- struct sun4v_2insn_patch_entry *end)
-{
- while (start < end) {
- unsigned long addr = start->addr;
-
- *(unsigned int *) (addr + 0) = start->insns[0];
- wmb();
- __asm__ __volatile__("flush %0" : : "r" (addr + 0));
-
- *(unsigned int *) (addr + 4) = start->insns[1];
- wmb();
- __asm__ __volatile__("flush %0" : : "r" (addr + 4));
-
- start++;
- }
-}
-
static void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
- if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
- sun4v_chip_type == SUN4V_CHIP_SPARC_S7) {
- sun_m7_patch_1insn_range(&__sun_m7_1insn_patch,
- &__sun_m7_1insn_patch_end);
- sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
- &__sun_m7_2insn_patch_end);
+
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_S7:
+ case SUN4V_CHIP_SPARC_M8:
+ case SUN4V_CHIP_SPARC_S8:
+ sun4v_patch_1insn_range(&__sun4v_adi_1insn_patch,
+ &__sun4v_adi_1insn_patch_end);
+ sun4v_patch_2insn_range(&__sun4v_adi_2insn_patch,
+ &__sun4v_adi_2insn_patch_end);
+ break;
+ default:
+ break;
}
sun4v_hvapi_init();
*(.pause_3insn_patch)
__pause_3insn_patch_end = .;
}
- .sun_m7_1insn_patch : {
- __sun_m7_1insn_patch = .;
- *(.sun_m7_1insn_patch)
- __sun_m7_1insn_patch_end = .;
+ .sun4v_adi_1insn_patch : {
+ __sun4v_adi_1insn_patch = .;
+ *(.sun4v_adi_1insn_patch)
+ __sun4v_adi_1insn_patch_end = .;
}
- .sun_m7_2insn_patch : {
- __sun_m7_2insn_patch = .;
- *(.sun_m7_2insn_patch)
- __sun_m7_2insn_patch_end = .;
+ .sun4v_adi_2insn_patch : {
+ __sun4v_adi_2insn_patch = .;
+ *(.sun4v_adi_2insn_patch)
+ __sun4v_adi_2insn_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES)
static void __init sun4v_linear_pte_xor_finalize(void)
{
- unsigned long pagecv_flag;
-
- /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
- * enables MCD error. Do not set bit 9 on M7 processor.
- */
- switch (sun4v_chip_type) {
- case SUN4V_CHIP_SPARC_M7:
- case SUN4V_CHIP_SPARC_S7:
- pagecv_flag = 0x00;
- break;
- default:
- pagecv_flag = _PAGE_CV_4V;
- break;
- }
#ifndef CONFIG_DEBUG_PAGEALLOC
- if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
- kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
- PAGE_OFFSET;
- kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
- _PAGE_P_4V | _PAGE_W_4V);
- } else {
+ unsigned long pte = (_PAGE_VALID ^ PAGE_OFFSET) | page_cache4v_flag |
+ _PAGE_P_4V | _PAGE_W_4V;
+
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB)
+ kern_linear_pte_xor[1] = _PAGE_SZ256MB_4V | pte;
+ else
kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
- }
- if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
- kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
- PAGE_OFFSET;
- kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
- _PAGE_P_4V | _PAGE_W_4V);
- } else {
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB)
+ kern_linear_pte_xor[2] = _PAGE_SZ2GB_4V | pte;
+ else
kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
- }
- if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
- kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
- PAGE_OFFSET;
- kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
- _PAGE_P_4V | _PAGE_W_4V);
- } else {
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB)
+ kern_linear_pte_xor[3] = _PAGE_SZ16GB_4V | pte;
+ else
kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
- }
#endif
}
memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
#endif
- /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
- * bit on M7 processor. This is a conflicting usage of the same
- * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
- * Detection error on all pages and this will lead to problems
- * later. Kernel does not run with MCD enabled and hence rest
- * of the required steps to fully configure memory corruption
- * detection are not taken. We need to ensure TTE.mcde is not
- * set on M7 processor. Compute the value of cacheability
- * flag for use later taking this into consideration.
+ /* Compute a cacheability flag based on chip type to use when
+ * initialzing common flags for TTEs. On processors that support
+ * Memory Corruption Detection, TTE bit 9 is no longer the TTE.cv
+ * bit used to control cacheability in a virtually indexed cache
+ * and is instead the TTE.mcd bit which controls whether MCD is
+ * enabled for a mapping.
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
case SUN4V_CHIP_SPARC_S7:
+ case SUN4V_CHIP_SPARC_M8:
+ case SUN4V_CHIP_SPARC_S8:
page_cache4v_flag = _PAGE_CP_4V;
break;
default: