retire the dec alpha port

This commit is contained in:
cinap_lenrek 2016-05-04 16:11:48 +02:00
parent f7703d6971
commit 986886f2b8
147 changed files with 12 additions and 28898 deletions

View file

@ -1,14 +0,0 @@
#include <u.h>
#include <libc.h>
extern int _seek(vlong*, int, vlong, int);
vlong
seek(int fd, vlong o, int p)
{
vlong l;
if(_seek(&l, fd, o, p) < 0)
l = -1LL;
return l;
}

View file

@ -1,4 +0,0 @@
GLOBL argv0(SB), $4
GLOBL _tos(SB), $4
GLOBL _privates(SB), $4
GLOBL _nprivates(SB), $4

View file

@ -1,67 +0,0 @@
TEXT ainc(SB),$-8 /* long ainc(long *); */
MOVQ R0, R1 /* p */
inc1:
MOVLL (R1), R2 /* *p */
ADDL $1, R2
MOVQ R2, R0 /* copy to return */
MOVLC R2, (R1) /* (*p)++ */
BEQ R2, inc1 /* write failed, retry */
RET
TEXT adec(SB),$-8 /* long ainc(long *); */
MOVQ R0, R1 /* p */
dec1:
MOVLL (R1), R2 /* *p */
SUBL $1, R2
MOVQ R2, R0 /* copy to return */
MOVLC R2, (R1) /* (*p)++ */
BEQ R2, dec1 /* write failed, retry */
RET
TEXT _xinc(SB), $-8
MOVQ R0, R1 /* p */
xinc1:
MOVLL (R1), R0 /* *p */
ADDL $1, R0
MOVLC R0, (R1) /* (*p)++ */
BEQ R0, xinc1 /* write failed, retry */
RET
TEXT _xdec(SB), $-8
MOVQ R0, R1 /* p */
xdec1:
MOVLL (R1), R0 /* *p */
SUBL $1, R0
MOVQ R0, R2
MOVLC R2, (R1) /* --(*p) */
BEQ R2, xdec1 /* write failed, retry */
RET
TEXT cas(SB), $-8
TEXT casp(SB), $-8
MOVQ R0, R1 /* p */
MOVL old+4(FP), R2
MOVL new+8(FP), R3
MOVLL (R1), R0
CMPEQ R0, R2, R4
BEQ R4, fail /* if R0 != [sic] R2, goto fail */
MOVQ R3, R0
MOVLC R0, (R1)
RET
fail:
MOVL $0, R0
RET
TEXT loadlink(SB), $-8
MOVLL (R0), R0
RET
TEXT storecond(SB), $-8
MOVW val+4(FP), R1
MOVLC R1, (R0)
BEQ R1, storecondfail /* write failed */
MOVW $1, R0
RET
storecondfail:
MOVW $0, R0
RET

View file

@ -1,7 +0,0 @@
#include <u.h>
#include <libc.h>
void cycles(uvlong*u)
{
*u = 0LL;
}

View file

@ -1,189 +0,0 @@
/*
* ulong
* _udiv(ulong num, ulong den)
* {
* int i;
* ulong quo;
*
* if(den == 0)
* *(ulong*)-1 = 0;
* quo = num;
* if(quo > 1<<(32-1))
* quo = 1<<(32-1);
* for(i=0; den<quo; i++)
* den <<= 1;
* quo = 0;
* for(; i>=0; i--) {
* quo <<= 1;
* if(num >= den) {
* num -= den;
* quo |= 1;
* }
* den >>= 1;
* }
* return quo::num;
* }
*/
#define NOPROF 1
/*
* calling sequence:
* num: 8(R30)
* den: 12(R30)
* returns
* quo: 8(R30)
* rem: 12(R30)
*/
TEXT _udivmodl(SB), NOPROF, $-8
MOVQ $-1, R11
SLLQ $31, R11 /* (1<<31) in canonical form */
MOVL 8(R30), R23 /* numerator */
MOVL 12(R30), R10 /* denominator */
BNE R10, udm20
MOVQ R31, -1(R31) /* fault -- divide by zero; todo: use gentrap? */
udm20:
MOVQ R23, R12
BGE R12, udm34
MOVQ R11, R12
udm34:
MOVQ R31, R11
udm38:
CMPUGE R10, R12, R24
BNE R24, udm54
SLLL $1, R10
ADDQ $1, R11
JMP udm38
udm54:
MOVQ R31, R12
udm58:
BLT R11, udm8c
SLLL $1, R12
CMPUGE R23, R10, R24
BEQ R24, udm7c
SUBL R10, R23
OR $1, R12
udm7c:
SRLL $1, R10
SUBQ $1, R11
JMP udm58
udm8c:
MOVL R12, 8(R30) /* quotient */
MOVL R23, 12(R30) /* remainder */
RET
/*
* save working registers
* and bring in num/den parameters
*/
TEXT _unsargl(SB), NOPROF, $-8
MOVQ R10, 24(R30)
MOVQ R11, 32(R30)
MOVQ R12, 40(R30)
MOVQ R23, 48(R30)
MOVQ R24, 56(R30)
MOVL R27, 8(R30)
MOVL 72(R30), R27
MOVL R27, 12(R30)
RET
/*
* save working registers
* and bring in absolute value
* of num/den parameters
*/
TEXT _absargl(SB), NOPROF, $-8
MOVQ R10, 24(R30)
MOVQ R11, 32(R30)
MOVQ R12, 40(R30)
MOVQ R23, 48(R30)
MOVQ R24, 56(R30)
MOVL R27, 64(R30)
BGE R27, ab1
SUBL R27, R31, R27
ab1:
MOVL R27, 8(R30) /* numerator */
MOVL 72(R30), R27
BGE R27, ab2
SUBL R27, R31, R27
ab2:
MOVL R27, 12(R30) /* denominator */
RET
/*
* restore registers and
* return to original caller
* answer is in R27
*/
TEXT _retargl(SB), NOPROF, $-8
MOVQ 24(R30), R10
MOVQ 32(R30), R11
MOVQ 40(R30), R12
MOVQ 48(R30), R23
MOVQ 56(R30), R24
MOVL 0(R30), R26
ADDQ $64, R30
RET /* back to main sequence */
TEXT _divl(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVL R26, 0(R30)
JSR _absargl(SB)
JSR _udivmodl(SB)
MOVL 8(R30), R27
MOVL 64(R30), R10 /* clean up the sign */
MOVL 72(R30), R11
XOR R11, R10
BGE R10, div1
SUBL R27, R31, R27
div1:
JSR _retargl(SB)
RET /* not executed */
TEXT _divlu(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVL R26, 0(R30)
JSR _unsargl(SB)
JSR _udivmodl(SB)
MOVL 8(R30), R27
JSR _retargl(SB)
RET /* not executed */
TEXT _modl(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVL R26, 0(R30)
JSR _absargl(SB)
JSR _udivmodl(SB)
MOVL 12(R30), R27
MOVL 64(R30), R10 /* clean up the sign */
BGE R10, div2
SUBL R27, R31, R27
div2:
JSR _retargl(SB)
RET /* not executed */
TEXT _modlu(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVL R26, 0(R30)
JSR _unsargl(SB)
JSR _udivmodl(SB)
MOVL 12(R30), R27
JSR _retargl(SB)
RET /* not executed */

View file

@ -1,191 +0,0 @@
/*
* uvlong
* _udiv(uvlong num, uvlong den)
* {
* int i;
* uvlong quo;
*
* if(den == 0)
* *(ulong*)-1 = 0;
* quo = num;
* if(quo > 1<<(64-1))
* quo = 1<<(64-1);
* for(i=0; den<quo; i++)
* den <<= 1;
* quo = 0;
* for(; i>=0; i--) {
* quo <<= 1;
* if(num >= den) {
* num -= den;
* quo |= 1;
* }
* den >>= 1;
* }
* return quo::num;
* }
*/
#define NOPROF 1
/*
* calling sequence:
* num: 8(R30)
* den: 16(R30)
* returns
* quo: 8(R30)
* rem: 16(R30)
*/
TEXT _udivmodq(SB), NOPROF, $-8
MOVQ $1, R11
SLLQ $63, R11
MOVQ 8(R30), R23 /* numerator */
MOVQ 16(R30), R10 /* denominator */
BNE R10, udm20
MOVQ R31, -1(R31) /* fault -- divide by zero; todo: use gentrap? */
udm20:
MOVQ R23, R12
BGE R12, udm34
MOVQ R11, R12
udm34:
MOVQ R31, R11
udm38:
CMPUGE R10, R12, R24
BNE R24, udm54
SLLQ $1, R10
ADDQ $1, R11
JMP udm38
udm54:
MOVQ R31, R12
udm58:
BLT R11, udm8c
SLLQ $1, R12
CMPUGE R23, R10, R24
BEQ R24, udm7c
SUBQ R10, R23
OR $1, R12
udm7c:
SRLQ $1, R10
SUBQ $1, R11
JMP udm58
udm8c:
MOVQ R12, 8(R30) /* quotient */
MOVQ R23, 16(R30) /* remainder */
RET
/*
* save working registers
* and bring in num/den parameters
*/
TEXT _unsargq(SB), NOPROF, $-8
MOVQ R10, 24(R30)
MOVQ R11, 32(R30)
MOVQ R12, 40(R30)
MOVQ R23, 48(R30)
MOVQ R24, 56(R30)
MOVQ R27, 8(R30)
MOVQ 72(R30), R27
MOVQ R27, 16(R30)
MOVQ (R30), R10 /* debug */
RET
/*
* save working registers
* and bring in absolute value
* of num/den parameters
*/
TEXT _absargq(SB), NOPROF, $-8
MOVQ R10, 24(R30)
MOVQ R11, 32(R30)
MOVQ R12, 40(R30)
MOVQ R23, 48(R30)
MOVQ R24, 56(R30)
MOVQ R27, 64(R30)
BGE R27, ab1
SUBQ R27, R31, R27
ab1:
MOVQ R27, 8(R30) /* numerator */
MOVQ 72(R30), R27
BGE R27, ab2
SUBQ R27, R31, R27
ab2:
MOVQ R27, 16(R30) /* denominator */
MOVQ (R30), R10 /* debug */
RET
/*
* restore registers and
* return to original caller
* answer is in R27
*/
TEXT _retargq(SB), NOPROF, $-8
MOVQ 24(R30), R10
MOVQ 32(R30), R11
MOVQ 40(R30), R12
MOVQ 48(R30), R23
MOVQ 56(R30), R24
MOVL 0(R30), R26
ADDQ $64, R30
RET /* back to main sequence */
TEXT _divq(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVQ R26, 0(R30)
JSR _absargq(SB)
JSR _udivmodq(SB)
MOVQ 8(R30), R27
MOVQ 64(R30), R10 /* clean up the sign */
MOVQ 72(R30), R11
XOR R11, R10
BGE R10, div1
SUBQ R27, R31, R27
div1:
JSR _retargq(SB)
RET /* not executed */
TEXT _divqu(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVQ R26, 0(R30)
JSR _unsargq(SB)
JSR _udivmodq(SB)
MOVQ 8(R30), R27
JSR _retargq(SB)
RET /* not executed */
TEXT _modq(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVQ R26, 0(R30)
JSR _absargq(SB)
JSR _udivmodq(SB)
MOVQ 16(R30), R27
MOVQ 64(R30), R10 /* clean up the sign */
BGE R10, div2
SUBQ R27, R31, R27
div2:
JSR _retargq(SB)
RET /* not executed */
TEXT _modqu(SB), NOPROF, $-8
SUBQ $64, R30 /* 5 reg save, 2 parameters, link */
MOVQ R26, 0(R30)
JSR _unsargq(SB)
JSR _udivmodq(SB)
MOVQ 16(R30), R27
JSR _retargq(SB)
RET /* not executed */

View file

@ -1,4 +0,0 @@
TEXT getcallerpc(SB), $-8
MOVL 0(SP), R0
RET

View file

@ -1,57 +0,0 @@
#define EXCB WORD $0x60000400 /* until 7a/7l catch up */
TEXT getfsr(SB), $8
EXCB
MOVT FPCR, F0
EXCB
MOVT F0, tmp-8(SP)
MOVL tmp-4(SP), R1
MOVQ $0x01e00000, R2
AND R2, R1, R0
RET
TEXT setfsr(SB), $8
MOVQ $0x01e00000, R2
EXCB
MOVT FPCR, F0
EXCB
MOVT F0, tmp-8(SP)
MOVL tmp-4(SP), R1
ANDNOT R2, R1, R3
AND R2, R0, R4
OR R3, R4, R5
MOVL R5, tmp-4(SP)
MOVT tmp-8(SP), F0
EXCB
MOVT F0, FPCR
EXCB
RET
TEXT getfcr(SB), $8
EXCB
MOVT FPCR, F0
EXCB
MOVT F0, tmp-8(SP)
MOVL tmp-4(SP), R1
MOVQ $0x700c0000, R2
AND R2, R1, R0
XOR R2, R0
RET
TEXT setfcr(SB), $8
MOVQ $0x700c0000, R2
XOR R2, R0
EXCB
MOVT FPCR, F0
EXCB
MOVT F0, tmp-8(SP)
MOVL tmp-4(SP), R1
ANDNOT R2, R1, R3
AND R2, R0, R4
OR R3, R4, R5
MOVL R5, tmp-4(SP)
MOVT tmp-8(SP), F0
EXCB
MOVT F0, FPCR
EXCB
RET

View file

@ -1,27 +0,0 @@
#define NPRIVATES 16
TEXT _main(SB), 1, $(16 + NPRIVATES*4)
MOVQ $setSB(SB), R29
MOVL R0, _tos(SB)
MOVQ $p-64(SP),R1
MOVL R1,_privates+0(SB)
MOVQ $16,R1
MOVL R1,_nprivates+0(SB)
MOVL inargc-8(FP), R0
MOVL $inargv-4(FP), R1
MOVL R0, 8(R30)
MOVL R1, 12(R30)
JSR main(SB)
loop:
MOVL $_exitstr<>(SB), R0
MOVL R0, 8(R30)
JSR exits(SB)
MOVQ $_divq(SB), R31 /* force loading of divq */
MOVQ $_divl(SB), R31 /* force loading of divl */
JMP loop
DATA _exitstr<>+0(SB)/4, $"main"
GLOBL _exitstr<>+0(SB), $5

View file

@ -1,38 +0,0 @@
#define NPRIVATES 16
TEXT _mainp(SB), 1, $16
MOVQ $setSB(SB), R29
MOVL R0, _tos(SB)
MOVQ $p-64(SP),R1
MOVL R1,_privates+0(SB)
MOVQ $16,R1
MOVL R1,_nprivates+0(SB)
JSR _profmain(SB)
MOVL __prof+4(SB), R0
MOVL R0, __prof+0(SB)
MOVL inargc-4(FP), R0
MOVL $inargv+0(FP), R1
MOVL R0, 8(R30)
MOVL R1, 12(R30)
JSR main(SB)
loop:
MOVQ $exits<>(SB), R0
MOVL R0, 8(R30)
JSR exits(SB)
MOVQ $_divq(SB), R31 /* force loading of divq */
MOVQ $_divl(SB), R31 /* force loading of divl */
MOVQ $_profin(SB), R31 /* force loading of profile */
JMP loop
TEXT _saveret(SB), 1, $0
TEXT _savearg(SB), 1, $0
RET
TEXT _callpc(SB), 1, $0
MOVL argp-8(FP), R0
RET
DATA exits<>+0(SB)/4, $"main"
GLOBL exits<>+0(SB), $5

View file

@ -1,201 +0,0 @@
#define QUAD 8
#define ALIGN 64
#define BLOCK 64
TEXT memmove(SB), $0
_memmove:
MOVL from+4(FP), R7
MOVL n+8(FP), R10
MOVQ R0, R6
CMPUGE R7, R0, R5
BNE R5, _forward
MOVQ R6, R8 /* end to address */
ADDL R10, R6, R6 /* to+n */
ADDL R10, R7, R7 /* from+n */
CMPUGE $ALIGN, R10, R1 /* need at least ALIGN bytes */
BNE R1, _b1tail
_balign:
AND $(ALIGN-1), R6, R1
BEQ R1, _baligned
MOVBU -1(R7), R2
ADDL $-1, R6, R6
MOVB R2, (R6)
ADDL $-1, R7, R7
JMP _balign
_baligned:
AND $(QUAD-1), R7, R1 /* is the source quad-aligned */
BNE R1, _bunaligned
ADDL $(BLOCK-1), R8, R9
_bblock:
CMPUGE R9, R6, R1
BNE R1, _b8tail
MOVQ -64(R7), R22
MOVQ -56(R7), R23
MOVQ -48(R7), R24
MOVQ -40(R7), R25
MOVQ -32(R7), R2
MOVQ -24(R7), R3
MOVQ -16(R7), R4
MOVQ -8(R7), R5
SUBL $64, R6, R6
SUBL $64, R7, R7
MOVQ R22, (R6)
MOVQ R23, 8(R6)
MOVQ R24, 16(R6)
MOVQ R25, 24(R6)
MOVQ R2, 32(R6)
MOVQ R3, 40(R6)
MOVQ R4, 48(R6)
MOVQ R5, 56(R6)
JMP _bblock
_b8tail:
ADDL $(QUAD-1), R8, R9
_b8block:
CMPUGE R9, R6, R1
BNE R1, _b1tail
MOVQ -8(R7), R2
SUBL $8, R6
MOVQ R2, (R6)
SUBL $8, R7
JMP _b8block
_b1tail:
CMPUGE R8, R6, R1
BNE R1, _ret
MOVBU -1(R7), R2
SUBL $1, R6, R6
MOVB R2, (R6)
SUBL $1, R7, R7
JMP _b1tail
_ret:
RET
_bunaligned:
ADDL $(16-1), R8, R9
_bu8block:
CMPUGE R9, R6, R1
BNE R1, _b1tail
MOVQU -16(R7), R4
MOVQU -8(R7), R3
MOVQU (R7), R2
SUBL $16, R6
EXTQH R7, R2, R2
EXTQL R7, R3, R5
OR R5, R2, R11
EXTQH R7, R3, R3
EXTQL R7, R4, R4
OR R3, R4, R13
MOVQ R11, 8(R6)
MOVQ R13, (R6)
SUBL $16, R7
JMP _bu8block
_forward:
ADDL R10, R6, R8 /* end to address */
CMPUGE $ALIGN, R10, R1 /* need at least ALIGN bytes */
BNE R1, _f1tail
_falign:
AND $(ALIGN-1), R6, R1
BEQ R1, _faligned
MOVBU (R7), R2
ADDL $1, R6, R6
ADDL $1, R7, R7
MOVB R2, -1(R6)
JMP _falign
_faligned:
AND $(QUAD-1), R7, R1 /* is the source quad-aligned */
BNE R1, _funaligned
SUBL $(BLOCK-1), R8, R9
_fblock:
CMPUGT R9, R6, R1
BEQ R1, _f8tail
MOVQ (R7), R2
MOVQ 8(R7), R3
MOVQ 16(R7), R4
MOVQ 24(R7), R5
MOVQ 32(R7), R22
MOVQ 40(R7), R23
MOVQ 48(R7), R24
MOVQ 56(R7), R25
ADDL $64, R6, R6
ADDL $64, R7, R7
MOVQ R2, -64(R6)
MOVQ R3, -56(R6)
MOVQ R4, -48(R6)
MOVQ R5, -40(R6)
MOVQ R22, -32(R6)
MOVQ R23, -24(R6)
MOVQ R24, -16(R6)
MOVQ R25, -8(R6)
JMP _fblock
_f8tail:
SUBL $(QUAD-1), R8, R9
_f8block:
CMPUGT R9, R6, R1
BEQ R1, _f1tail
MOVQ (R7), R2
ADDL $8, R6
ADDL $8, R7
MOVQ R2, -8(R6)
JMP _f8block
_f1tail:
CMPUGT R8, R6, R1
BEQ R1, _fret
MOVBU (R7), R2
ADDL $1, R6, R6
ADDL $1, R7, R7
MOVB R2, -1(R6)
JMP _f1tail
_fret:
RET
_funaligned:
SUBL $(16-1), R8, R9
_fu8block:
CMPUGT R9, R6, R1
BEQ R1, _f1tail
MOVQU (R7), R2
MOVQU 8(R7), R3
MOVQU 16(R7), R4
EXTQL R7, R2, R2
EXTQH R7, R3, R5
OR R5, R2, R11
EXTQL R7, R3, R3
MOVQ R11, (R6)
EXTQH R7, R4, R4
OR R3, R4, R11
MOVQ R11, 8(R6)
ADDL $16, R6
ADDL $16, R7
JMP _fu8block
TEXT memcpy(SB), $0
JMP _memmove

View file

@ -1,61 +0,0 @@
TEXT memset(SB), $0
MOVL R0, R6
MOVBU data+4(FP), R2
MOVL n+8(FP), R10
ADDL R10, R0, R8
CMPUGE $8, R10, R1 /* need at least 8 bytes */
BNE R1, _1loop
SLLQ $8, R2, R1 /* replicate the byte */
OR R1, R2
SLLQ $16, R2, R1
OR R1, R2
SLLQ $32, R2, R1
OR R1, R2
_align:
AND $(8-1), R6, R1
BEQ R1, _aligned
MOVB R2, (R6)
ADDL $1, R6, R6
JMP _align
_aligned:
SUBL $(64-1), R8, R9 /* end pointer minus slop */
_64loop:
CMPUGT R9, R6, R1
BEQ R1, _8tail
MOVQ R2, (R6)
MOVQ R2, 8(R6)
MOVQ R2, 16(R6)
MOVQ R2, 24(R6)
MOVQ R2, 32(R6)
MOVQ R2, 40(R6)
MOVQ R2, 48(R6)
MOVQ R2, 56(R6)
ADDL $64, R6, R6
JMP _64loop
_8tail:
SUBL $(8-1), R8, R9
_8loop:
CMPUGT R9, R6, R1
BEQ R1, _1loop
MOVQ R2, (R6)
ADDL $8, R6
JMP _8loop
_1loop:
CMPUGT R8, R6, R1
BEQ R1, _ret
MOVB R2, (R6)
ADDL $1, R6
JMP _1loop
_ret:
RET

View file

@ -1,33 +0,0 @@
objtype=alpha
</$objtype/mkfile
LIB=/$objtype/lib/libc.a
SFILES=\
argv0.s\
atom.s\
divl.s\
divq.s\
getcallerpc.s\
getfcr.s\
main9.s\
main9p.s\
memmove.s\
memset.s\
setjmp.s\
tas.s
CFILES=\
_seek.c\
cycles.c\
notejmp.c\
HFILES=/sys/include/libc.h
OFILES=${CFILES:%.c=%.$O} ${SFILES:%.s=%.$O}
UPDATE=mkfile\
$HFILES\
$CFILES\
$SFILES\
</sys/src/cmd/mksyslib

View file

@ -1,16 +0,0 @@
#include <u.h>
#include <libc.h>
#include <ureg.h>
void
notejmp(void *vr, jmp_buf j, int ret)
{
struct Ureg *r = vr;
r->r0 = ret;
if(ret == 0)
r->r0 = 1;
r->pc = j[JMPBUFPC];
r->sp = j[JMPBUFSP];
noted(NCONT);
}

View file

@ -1,14 +0,0 @@
TEXT setjmp(SB), 1, $-8
MOVL R30, (R0)
MOVL R26, 4(R0)
MOVQ $0, R0
RET
TEXT longjmp(SB), 1, $-8
MOVL r+4(FP), R3
BNE R3, ok /* ansi: "longjmp(0) => longjmp(1)" */
MOVQ $1, R3 /* bless their pointed heads */
ok: MOVL (R0), R30
MOVL 4(R0), R26
MOVL R3, R0
RET

View file

@ -1,10 +0,0 @@
TEXT _tas(SB), $-8
MOVQ R0, R1 /* l */
tas1:
MOVLL (R1), R0 /* l->key */
BNE R0, tas2
MOVQ $1, R2
MOVLC R2, (R1) /* l->key = 1 */
BEQ R2, tas1 /* write failed, try again? */
tas2:
RET