* 'UL' and other type specifiers unilaterally.  We
  * use the following macros to deal with this.
  */
-
-#ifdef __ASSEMBLY__
-#define _UML_AC(X, Y)  (Y)
-#else
-#define __UML_AC(X, Y) (X(Y))
-#define _UML_AC(X, Y)  __UML_AC(X, Y)
-#endif
-
-#define STUB_START _UML_AC(, 0x100000)
-#define STUB_CODE _UML_AC((unsigned long), STUB_START)
-#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
-#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
+#define STUB_START 0x100000UL
+#define STUB_CODE STUB_START
+#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
+#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE)
 
 #ifndef __ASSEMBLY__
 
 
 /* SPDX-License-Identifier: GPL-2.0 */
 /* for use by sys-$SUBARCH/kernel-offsets.c */
+#include <stub-data.h>
 
 DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
 
 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT);
 #endif
+
+/* for stub */
+DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset));
+DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err));
+DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd));
 
                goto done;
        }
 
-       remap_stack(data->fd, data->offset);
-       goto done;
+       remap_stack_and_trap();
 
  done:
        trap_myself();
 
        syscall_regs[REGS_IP_INDEX] = STUB_CODE +
                ((unsigned long) batch_syscall_stub -
                 (unsigned long) __syscall_stub_start);
+       syscall_regs[REGS_SP_INDEX] = STUB_DATA;
+
        return 0;
 }
 
 
 #define __SYSDEP_STUB_H
 
 #include <asm/ptrace.h>
+#include <generated/asm-offsets.h>
 
-#define STUB_SYSCALL_RET EAX
 #define STUB_MMAP_NR __NR_mmap2
 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
 
        __asm("int3");
 }
 
-static inline void remap_stack(int fd, unsigned long offset)
+static void inline remap_stack_and_trap(void)
 {
-       __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
-                         "movl %7, %%ebx ; movl %%eax, (%%ebx)"
-                         : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
-                           "c" (UM_KERN_PAGE_SIZE),
-                           "d" (PROT_READ | PROT_WRITE),
-                           "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
-                           "a" (offset),
-                           "i" (&((struct stub_data *) STUB_DATA)->child_err)
-                         : "memory");
+       __asm__ volatile (
+               "movl %%esp,%%ebx ;"
+               "andl %0,%%ebx ;"
+               "movl %1,%%eax ;"
+               "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;"
+               "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;"
+               "int $0x80 ;"
+               "addl %4,%%ebx ; movl %%eax, (%%ebx) ;"
+               "int $3"
+               : :
+               "g" (~(UM_KERN_PAGE_SIZE - 1)),
+               "g" (STUB_MMAP_NR),
+               "g" (UML_STUB_FIELD_FD),
+               "g" (UML_STUB_FIELD_OFFSET),
+               "g" (UML_STUB_FIELD_CHILD_ERR),
+               "c" (UM_KERN_PAGE_SIZE),
+               "d" (PROT_READ | PROT_WRITE),
+               "S" (MAP_FIXED | MAP_SHARED)
+               :
+               "memory");
 }
 
 #endif
 
 #define __SYSDEP_STUB_H
 
 #include <sysdep/ptrace_user.h>
+#include <generated/asm-offsets.h>
 
-#define STUB_SYSCALL_RET PT_INDEX(RAX)
 #define STUB_MMAP_NR __NR_mmap
 #define MMAP_OFFSET(o) (o)
 
        __asm("int3");
 }
 
-static inline void remap_stack(long fd, unsigned long offset)
+static inline void remap_stack_and_trap(void)
 {
-       __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
-                         "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
-                         "movq %%rax, (%%rbx)":
-                         : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
-                           "S" (UM_KERN_PAGE_SIZE),
-                           "d" (PROT_READ | PROT_WRITE),
-                            "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
-                           "g" (offset),
-                           "i" (&((struct stub_data *) STUB_DATA)->child_err)
-                         : __syscall_clobber, "r10", "r8", "r9" );
+       __asm__ volatile (
+               "movq %0,%%rax ;"
+               "movq %%rsp,%%rdi ;"
+               "andq %1,%%rdi ;"
+               "movq %2,%%r10 ;"
+               "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
+               "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
+               __syscall ";"
+               "movq %%rsp,%%rdi ; andq %1,%%rdi ;"
+               "addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
+               "int3"
+               : :
+               "g" (STUB_MMAP_NR),
+               "g" (~(UM_KERN_PAGE_SIZE - 1)),
+               "g" (MAP_FIXED | MAP_SHARED),
+               "g" (UML_STUB_FIELD_FD),
+               "g" (UML_STUB_FIELD_OFFSET),
+               "g" (UML_STUB_FIELD_CHILD_ERR),
+               "S" (UM_KERN_PAGE_SIZE),
+               "d" (PROT_READ | PROT_WRITE)
+               :
+               __syscall_clobber, "r10", "r8", "r9");
 }
 
 #endif
 
 
        .globl batch_syscall_stub
 batch_syscall_stub:
-       /* load pointer to first operation */
-       mov     $(STUB_DATA+8), %esp
-
+       /* %esp comes in as "top of page" */
+       mov %esp, %ecx
+       /* %esp has pointer to first operation */
+       add $8, %esp
 again:
        /* load length of additional data */
        mov     0x0(%esp), %eax
 
        /* if(length == 0) : end of list */
        /* write possible 0 to header */
-       mov     %eax, STUB_DATA+4
+       mov     %eax, 0x4(%ecx)
        cmpl    $0, %eax
        jz      done
 
        /* save current pointer */
-       mov     %esp, STUB_DATA+4
+       mov     %esp, 0x4(%ecx)
 
        /* skip additional data */
        add     %eax, %esp
        /* execute syscall */
        int     $0x80
 
+       /* restore top of page pointer in %ecx */
+       mov     %esp, %ecx
+       andl    $(~UM_KERN_PAGE_SIZE) + 1, %ecx
+
        /* check return value */
        pop     %ebx
        cmp     %ebx, %eax
 
 done:
        /* save return value */
-       mov     %eax, STUB_DATA
+       mov     %eax, (%ecx)
 
        /* stop */
        int3
 
 .section .__syscall_stub, "ax"
        .globl batch_syscall_stub
 batch_syscall_stub:
-       mov     $(STUB_DATA), %rbx
-       /* load pointer to first operation */
-       mov     %rbx, %rsp
+       /* %rsp has the pointer to first operation */
+       mov     %rsp, %rbx
        add     $0x10, %rsp
 again:
        /* load length of additional data */
 
 void __attribute__ ((__section__ (".__syscall_stub")))
 stub_segv_handler(int sig, siginfo_t *info, void *p)
 {
+       int stack;
        ucontext_t *uc = p;
+       struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));
 
-       GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
-                             &uc->uc_mcontext);
+       GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
        trap_myself();
 }