ENTRY(vfp_get_float)
        tbl_branch r0, r3, #3
+       .fpu    vfpv2
        .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     mrc     p10, 0, r0, c\dr, c0, 0 @ fmrs  r0, s0
+1:     vmov    r0, s\dr
        ret     lr
        .org    1b + 8
-1:     mrc     p10, 0, r0, c\dr, c0, 4 @ fmrs  r0, s1
+       .endr
+       .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+1:     vmov    r0, s\dr
        ret     lr
        .org    1b + 8
        .endr
 
 ENTRY(vfp_put_float)
        tbl_branch r1, r3, #3
+       .fpu    vfpv2
        .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     mcr     p10, 0, r0, c\dr, c0, 0 @ fmsr  r0, s0
+1:     vmov    s\dr, r0
        ret     lr
        .org    1b + 8
-1:     mcr     p10, 0, r0, c\dr, c0, 4 @ fmsr  r0, s1
+       .endr
+       .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+1:     vmov    s\dr, r0
        ret     lr
        .org    1b + 8
        .endr
 
 ENTRY(vfp_get_double)
        tbl_branch r0, r3, #3
+       .fpu    vfpv2
        .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     fmrrd   r0, r1, d\dr
+1:     vmov    r0, r1, d\dr
        ret     lr
        .org    1b + 8
        .endr
 #ifdef CONFIG_VFPv3
        @ d16 - d31 registers
-       .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     mrrc    p11, 3, r0, r1, c\dr    @ fmrrd r0, r1, d\dr
+       .fpu    vfpv3
+       .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+1:     vmov    r0, r1, d\dr
        ret     lr
        .org    1b + 8
        .endr
 
 ENTRY(vfp_put_double)
        tbl_branch r2, r3, #3
+       .fpu    vfpv2
        .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     fmdrr   d\dr, r0, r1
+1:     vmov    d\dr, r0, r1
        ret     lr
        .org    1b + 8
        .endr
 #ifdef CONFIG_VFPv3
+       .fpu    vfpv3
        @ d16 - d31 registers
-       .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     mcrr    p11, 3, r0, r1, c\dr    @ fmdrr r0, r1, d\dr
+       .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+1:     vmov    d\dr, r0, r1
        ret     lr
        .org    1b + 8
        .endr