bcm64: use MOVP instruction for saving and restoring registers

This commit is contained in:
cinap_lenrek 2019-05-09 11:11:45 +02:00
parent 1c0427e6dd
commit 9d790238f2
2 changed files with 85 additions and 182 deletions

View file

@ -17,7 +17,6 @@ extern void noteret(void);
extern void returnto(void*); extern void returnto(void*);
extern void fpsaveregs(void*); extern void fpsaveregs(void*);
extern void fploadregs(void*); extern void fploadregs(void*);
extern void magic(void);
extern void setttbr(uintptr pa); extern void setttbr(uintptr pa);
extern uintptr getfar(void); extern uintptr getfar(void);

View file

@ -334,17 +334,6 @@ TEXT setttbr(SB), 1, $-4
B cacheiinv(SB) B cacheiinv(SB)
TEXT magic(SB), 1, $-4
DSB $SY
ISB $SY
DSB $SY
ISB $SY
DSB $SY
ISB $SY
DSB $SY
ISB $SY
RETURN
/* /*
* TLB maintenance operations. * TLB maintenance operations.
* these broadcast to all cpu's in the cluser * these broadcast to all cpu's in the cluser
@ -421,10 +410,8 @@ TEXT vsys0(SB), 1, $-4
CMPW $0x15, R17 // SVC trap? CMPW $0x15, R17 // SVC trap?
BNE _itsatrap // nope. BNE _itsatrap // nope.
MOV R26, 224(RSP) // special MOVP R26, R27, 224(RSP)
MOV R27, 232(RSP) // special MOVP R28, R29, 240(RSP)
MOV R28, 240(RSP) // sb
MOV R29, 248(RSP) // special
MRS SP_EL0, R1 MRS SP_EL0, R1
MRS ELR_EL1, R2 MRS ELR_EL1, R2
@ -456,10 +443,8 @@ TEXT forkret(SB), 1, $-4
MSR R2, ELR_EL1 MSR R2, ELR_EL1
MSR R3, SPSR_EL1 MSR R3, SPSR_EL1
MOV 224(RSP), R26 // special MOVP 224(RSP), R26, R27
MOV 232(RSP), R27 // special MOVP 240(RSP), R28, R29
MOV 240(RSP), R28 // sb
MOV 248(RSP), R29 // special
MOV 256(RSP), R30 // link MOV 256(RSP), R30 // link
@ -468,38 +453,24 @@ TEXT forkret(SB), 1, $-4
TEXT itsatrap<>(SB), 1, $-4 TEXT itsatrap<>(SB), 1, $-4
_itsatrap: _itsatrap:
MOV R1, 24(RSP) MOVP R1, R2, 24(RSP)
MOV R2, 32(RSP) MOVP R3, R4, 40(RSP)
MOV R3, 40(RSP) MOVP R5, R6, 56(RSP)
MOV R4, 48(RSP) MOVP R7, R8, 72(RSP)
MOV R5, 56(RSP) MOVP R9, R10, 88(RSP)
MOV R6, 64(RSP) MOVP R11, R12, 104(RSP)
MOV R7, 72(RSP) MOVP R13, R14, 120(RSP)
MOV R8, 80(RSP) MOVP R15, R16, 136(RSP)
MOV R9, 88(RSP)
MOV R10, 96(RSP)
MOV R11, 104(RSP)
MOV R12, 112(RSP)
MOV R13, 120(RSP)
MOV R14, 128(RSP)
MOV R15, 136(RSP)
MOV R16, 144(RSP)
MOV R18, 160(RSP) MOVP R18, R19, 160(RSP)
MOV R19, 168(RSP) MOVP R20, R21, 176(RSP)
MOV R20, 176(RSP) MOVP R22, R23, 192(RSP)
MOV R21, 184(RSP) MOVP R24, R25, 208(RSP)
MOV R22, 192(RSP)
MOV R23, 200(RSP)
MOV R24, 208(RSP)
MOV R25, 216(RSP)
// trap/irq/fiq/serr from EL0 // trap/irq/fiq/serr from EL0
TEXT vtrap0(SB), 1, $-4 TEXT vtrap0(SB), 1, $-4
MOV R26, 224(RSP) // special MOVP R26, R27, 224(RSP)
MOV R27, 232(RSP) // special MOVP R28, R29, 240(RSP)
MOV R28, 240(RSP) // sb
MOV R29, 248(RSP) // special
MRS SP_EL0, R1 MRS SP_EL0, R1
MRS ELR_EL1, R2 MRS ELR_EL1, R2
@ -530,38 +501,23 @@ TEXT noteret(SB), 1, $-4
MSR R2, ELR_EL1 MSR R2, ELR_EL1
MSR R3, SPSR_EL1 MSR R3, SPSR_EL1
MOV 224(RSP), R26 // special MOVP 224(RSP), R26, R27
MOV 232(RSP), R27 // special MOVP 240(RSP), R28, R29
MOV 240(RSP), R28 // sb
MOV 248(RSP), R29 // special
_intrreturn: _intrreturn:
MOV 16(RSP), R0 MOVP 16(RSP), R0, R1
MOV 24(RSP), R1 MOVP 32(RSP), R2, R3
MOV 32(RSP), R2 MOVP 48(RSP), R4, R5
MOV 40(RSP), R3 MOVP 64(RSP), R6, R7
MOV 48(RSP), R4 MOVP 80(RSP), R8, R9
MOV 56(RSP), R5 MOVP 96(RSP), R10, R11
MOV 64(RSP), R6 MOVP 112(RSP), R12, R13
MOV 72(RSP), R7 MOVP 128(RSP), R14, R15
MOV 80(RSP), R8 MOVP 144(RSP), R16, R17
MOV 88(RSP), R9 MOVP 160(RSP), R18, R19
MOV 96(RSP), R10 MOVP 176(RSP), R20, R21
MOV 104(RSP), R11 MOVP 192(RSP), R22, R23
MOV 112(RSP), R12 MOVP 208(RSP), R24, R25
MOV 120(RSP), R13
MOV 128(RSP), R14
MOV 136(RSP), R15
MOV 144(RSP), R16
MOV 152(RSP), R17
MOV 160(RSP), R18
MOV 168(RSP), R19
MOV 176(RSP), R20
MOV 184(RSP), R21
MOV 192(RSP), R22
MOV 200(RSP), R23
MOV 208(RSP), R24
MOV 216(RSP), R25
MOV 256(RSP), R30 // link MOV 256(RSP), R30 // link
@ -612,32 +568,19 @@ _vsyspatch:
TEXT vtrap(SB), 1, $-4 TEXT vtrap(SB), 1, $-4
SUB $TRAPFRAMESIZE, RSP SUB $TRAPFRAMESIZE, RSP
MOV R0, 16(RSP) MOVP R0, R1, 16(RSP)
MOV R1, 24(RSP) MOVP R2, R3, 32(RSP)
MOV R2, 32(RSP) MOVP R4, R5, 48(RSP)
MOV R3, 40(RSP) MOVP R6, R7, 64(RSP)
MOV R4, 48(RSP) MOVP R8, R9, 80(RSP)
MOV R5, 56(RSP) MOVP R10, R11, 96(RSP)
MOV R6, 64(RSP) MOVP R12, R13, 112(RSP)
MOV R7, 72(RSP) MOVP R14, R15, 128(RSP)
MOV R8, 80(RSP) MOVP R16, R17, 144(RSP)
MOV R9, 88(RSP) MOVP R18, R19, 160(RSP)
MOV R10, 96(RSP) MOVP R20, R21, 176(RSP)
MOV R11, 104(RSP) MOVP R22, R23, 192(RSP)
MOV R12, 112(RSP) MOVP R24, R25, 208(RSP)
MOV R13, 120(RSP)
MOV R14, 128(RSP)
MOV R15, 136(RSP)
MOV R16, 144(RSP)
MOV R17, 152(RSP)
MOV R18, 160(RSP)
MOV R19, 168(RSP)
MOV R20, 176(RSP)
MOV R21, 184(RSP)
MOV R22, 192(RSP)
MOV R23, 200(RSP)
MOV R24, 208(RSP)
MOV R25, 216(RSP)
MOV R30, 256(RSP) // link MOV R30, 256(RSP) // link
@ -649,32 +592,19 @@ _vtrappatch:
TEXT virq(SB), 1, $-4 TEXT virq(SB), 1, $-4
SUB $TRAPFRAMESIZE, RSP SUB $TRAPFRAMESIZE, RSP
MOV R0, 16(RSP) MOVP R0, R1, 16(RSP)
MOV R1, 24(RSP) MOVP R2, R3, 32(RSP)
MOV R2, 32(RSP) MOVP R4, R5, 48(RSP)
MOV R3, 40(RSP) MOVP R6, R7, 64(RSP)
MOV R4, 48(RSP) MOVP R8, R9, 80(RSP)
MOV R5, 56(RSP) MOVP R10, R11, 96(RSP)
MOV R6, 64(RSP) MOVP R12, R13, 112(RSP)
MOV R7, 72(RSP) MOVP R14, R15, 128(RSP)
MOV R8, 80(RSP) MOVP R16, R17, 144(RSP)
MOV R9, 88(RSP) MOVP R18, R19, 160(RSP)
MOV R10, 96(RSP) MOVP R20, R21, 176(RSP)
MOV R11, 104(RSP) MOVP R22, R23, 192(RSP)
MOV R12, 112(RSP) MOVP R24, R25, 208(RSP)
MOV R13, 120(RSP)
MOV R14, 128(RSP)
MOV R15, 136(RSP)
MOV R16, 144(RSP)
MOV R17, 152(RSP)
MOV R18, 160(RSP)
MOV R19, 168(RSP)
MOV R20, 176(RSP)
MOV R21, 184(RSP)
MOV R22, 192(RSP)
MOV R23, 200(RSP)
MOV R24, 208(RSP)
MOV R25, 216(RSP)
MOV R30, 256(RSP) // link MOV R30, 256(RSP) // link
@ -686,32 +616,19 @@ _virqpatch:
TEXT vfiq(SB), 1, $-4 TEXT vfiq(SB), 1, $-4
SUB $TRAPFRAMESIZE, RSP SUB $TRAPFRAMESIZE, RSP
MOV R0, 16(RSP) MOVP R0, R1, 16(RSP)
MOV R1, 24(RSP) MOVP R2, R3, 32(RSP)
MOV R2, 32(RSP) MOVP R4, R5, 48(RSP)
MOV R3, 40(RSP) MOVP R6, R7, 64(RSP)
MOV R4, 48(RSP) MOVP R8, R9, 80(RSP)
MOV R5, 56(RSP) MOVP R10, R11, 96(RSP)
MOV R6, 64(RSP) MOVP R12, R13, 112(RSP)
MOV R7, 72(RSP) MOVP R14, R15, 128(RSP)
MOV R8, 80(RSP) MOVP R16, R17, 144(RSP)
MOV R9, 88(RSP) MOVP R18, R19, 160(RSP)
MOV R10, 96(RSP) MOVP R20, R21, 176(RSP)
MOV R11, 104(RSP) MOVP R22, R23, 192(RSP)
MOV R12, 112(RSP) MOVP R24, R25, 208(RSP)
MOV R13, 120(RSP)
MOV R14, 128(RSP)
MOV R15, 136(RSP)
MOV R16, 144(RSP)
MOV R17, 152(RSP)
MOV R18, 160(RSP)
MOV R19, 168(RSP)
MOV R20, 176(RSP)
MOV R21, 184(RSP)
MOV R22, 192(RSP)
MOV R23, 200(RSP)
MOV R24, 208(RSP)
MOV R25, 216(RSP)
MOV R30, 256(RSP) // link MOV R30, 256(RSP) // link
MOV $(2<<32), R0 // type fiq MOV $(2<<32), R0 // type fiq
@ -722,32 +639,19 @@ _vfiqpatch:
TEXT vserr(SB), 1, $-4 TEXT vserr(SB), 1, $-4
SUB $TRAPFRAMESIZE, RSP SUB $TRAPFRAMESIZE, RSP
MOV R0, 16(RSP) MOVP R0, R1, 16(RSP)
MOV R1, 24(RSP) MOVP R2, R3, 32(RSP)
MOV R2, 32(RSP) MOVP R4, R5, 48(RSP)
MOV R3, 40(RSP) MOVP R6, R7, 64(RSP)
MOV R4, 48(RSP) MOVP R8, R9, 80(RSP)
MOV R5, 56(RSP) MOVP R10, R11, 96(RSP)
MOV R6, 64(RSP) MOVP R12, R13, 112(RSP)
MOV R7, 72(RSP) MOVP R14, R15, 128(RSP)
MOV R8, 80(RSP) MOVP R16, R17, 144(RSP)
MOV R9, 88(RSP) MOVP R18, R19, 160(RSP)
MOV R10, 96(RSP) MOVP R20, R21, 176(RSP)
MOV R11, 104(RSP) MOVP R22, R23, 192(RSP)
MOV R12, 112(RSP) MOVP R24, R25, 208(RSP)
MOV R13, 120(RSP)
MOV R14, 128(RSP)
MOV R15, 136(RSP)
MOV R16, 144(RSP)
MOV R17, 152(RSP)
MOV R18, 160(RSP)
MOV R19, 168(RSP)
MOV R20, 176(RSP)
MOV R21, 184(RSP)
MOV R22, 192(RSP)
MOV R23, 200(RSP)
MOV R24, 208(RSP)
MOV R25, 216(RSP)
MOV R30, 256(RSP) // link MOV R30, 256(RSP) // link