#define copy_mc_fragile_enabled (0)
#endif
-unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
-
/**
* copy_mc_to_kernel - memory copy that handles source exceptions
*
* @src: source address
* @len: number of bytes to copy
*
- * Call into the 'fragile' version on systems that benefit from avoiding
- * corner case poison consumption scenarios, For example, accessing
- * poison across 2 cachelines with a single instruction. Almost all
- * other uses case can use copy_mc_enhanced_fast_string() for a fast
- * recoverable copy, or fallback to plain memcpy.
+ * Call into the 'fragile' version on systems that have trouble
+ * actually do machine check recovery. Everyone else can just
+ * use memcpy().
*
* Return 0 for success, or number of bytes not copied if there was an
* exception.
{
if (copy_mc_fragile_enabled)
return copy_mc_fragile(dst, src, len);
- if (static_cpu_has(X86_FEATURE_ERMS))
- return copy_mc_enhanced_fast_string(dst, src, len);
memcpy(dst, src, len);
return 0;
}
{
unsigned long ret;
- if (copy_mc_fragile_enabled) {
- __uaccess_begin();
- ret = copy_mc_fragile(dst, src, len);
- __uaccess_end();
- return ret;
- }
-
- if (static_cpu_has(X86_FEATURE_ERMS)) {
- __uaccess_begin();
- ret = copy_mc_enhanced_fast_string(dst, src, len);
- __uaccess_end();
- return ret;
- }
+ if (!copy_mc_fragile_enabled)
+ return copy_user_generic(dst, src, len);
- return copy_user_generic(dst, src, len);
+ __uaccess_begin();
+ ret = copy_mc_fragile(dst, src, len);
+ __uaccess_end();
+ return ret;
}
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
#endif /* CONFIG_X86_MCE */
-
-/*
- * copy_mc_enhanced_fast_string - memory copy with exception handling
- *
- * Fast string copy + fault / exception handling. If the CPU does
- * support machine check exception recovery, but does not support
- * recovering from fast-string exceptions then this CPU needs to be
- * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
- * machine check recovery support this version should be no slower than
- * standard memcpy.
- */
-SYM_FUNC_START(copy_mc_enhanced_fast_string)
- movq %rdi, %rax
- movq %rdx, %rcx
-.L_copy:
- rep movsb
- /* Copy successful. Return zero */
- xorl %eax, %eax
- ret
-SYM_FUNC_END(copy_mc_enhanced_fast_string)
-
- .section .fixup, "ax"
-.E_copy:
- /*
- * On fault %rcx is updated such that the copy instruction could
- * optionally be restarted at the fault position, i.e. it
- * contains 'bytes remaining'. A non-zero return indicates error
- * to copy_mc_generic() users, or indicate short transfers to
- * user-copy routines.
- */
- movq %rcx, %rax
- ret
-
- .previous
-
- _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
#endif /* !CONFIG_UML */