libc and ape support for amd64

This commit is contained in:
cinap_lenrek 2014-02-01 10:31:41 +01:00
parent d4fb753c9c
commit ed9e9f98e9
51 changed files with 2016 additions and 0 deletions

View file

@ -0,0 +1,14 @@
#include <u.h>
#include <libc.h>
extern int _seek(vlong*, int, vlong, int);
vlong
seek(int fd, vlong o, int p)
{
vlong l;
if(_seek(&l, fd, o, p) < 0)
l = -1LL;
return l;
}

View file

@ -0,0 +1,4 @@
GLOBL argv0(SB), $8
GLOBL _tos(SB), $8
GLOBL _privates(SB), $8
GLOBL _nprivates(SB), $4

69
sys/src/libc/amd64/atom.s Normal file
View file

@ -0,0 +1,69 @@
TEXT ainc(SB), 1, $0 /* int ainc(int *); */
ainclp:
MOVL (RARG), AX /* exp */
MOVL AX, BX
INCL BX /* new */
LOCK; CMPXCHGL BX, (RARG)
JNZ ainclp
MOVL BX, AX
RET
TEXT adec(SB), 1, $0 /* int adec(int*); */
adeclp:
MOVL (RARG), AX
MOVL AX, BX
DECL BX
LOCK; CMPXCHGL BX, (RARG)
JNZ adeclp
MOVL BX, AX
RET
/*
* int cas32(u32int *p, u32int ov, u32int nv);
* int cas(uint *p, int ov, int nv);
* int casl(ulong *p, ulong ov, ulong nv);
*/
TEXT cas32(SB), 1, $0
TEXT cas(SB), 1, $0
TEXT casul(SB), 1, $0
TEXT casl(SB), 1, $0 /* back compat */
MOVL exp+8(FP), AX
MOVL new+16(FP), BX
LOCK; CMPXCHGL BX, (RARG)
MOVL $1, AX /* use CMOVLEQ etc. here? */
JNZ _cas32r0
_cas32r1:
RET
_cas32r0:
DECL AX
RET
/*
* int cas64(u64int *p, u64int ov, u64int nv);
* int casp(void **p, void *ov, void *nv);
*/
TEXT cas64(SB), 1, $0
TEXT casp(SB), 1, $0
MOVQ exp+8(FP), AX
MOVQ new+16(FP), BX
LOCK; CMPXCHGQ BX, (RARG)
MOVL $1, AX /* use CMOVLEQ etc. here? */
JNZ _cas64r0
_cas64r1:
RET
_cas64r0:
DECL AX
RET
TEXT fas64(SB), 1, $-4
TEXT fasp(SB), 1, $-4
MOVQ p+8(FP), AX
LOCK; XCHGQ AX, (RARG) /* */
RET
TEXT fas32(SB), 1, $-4
MOVL p+8(FP), AX
LOCK; XCHGL AX, (RARG) /* */
RET

View file

@ -0,0 +1,5 @@
TEXT cycles(SB),1,$0 /* time stamp counter; cycles since power up */
RDTSC
MOVL AX, 0(RARG) /* lo */
MOVL DX, 4(RARG) /* hi */
RET

View file

@ -0,0 +1,3 @@
TEXT getcallerpc(SB), 1, $0
MOVQ -8(RARG), AX
RET

View file

@ -0,0 +1,38 @@
TEXT setfcr(SB), $4
XORL $(0x3F<<7),RARG /* bits are cleared in csr to enable them */
ANDL $0xFFC0, RARG /* just the fcr bits */
WAIT /* is this needed? */
STMXCSR 0(SP)
MOVL 0(SP), AX
ANDL $~0x3F, AX
ORL RARG, AX
MOVL AX, 0(SP)
LDMXCSR 0(SP)
RET
TEXT getfcr(SB), $4
WAIT
STMXCSR 0(SP)
MOVWLZX 0(SP), AX
ANDL $0xFFC0, AX
XORL $(0x3F<<7),AX
RET
TEXT getfsr(SB), $4
WAIT
STMXCSR 0(SP)
MOVL 0(SP), AX
ANDL $0x3F, AX
RET
TEXT setfsr(SB), $4
ANDL $0x3F, RARG
WAIT
STMXCSR 0(SP)
MOVL 0(SP), AX
ANDL $~0x3F, AX
ORL RARG, AX
MOVL AX, 0(SP)
LDMXCSR 0(SP)
RET

View file

@ -0,0 +1,19 @@
#define NPRIVATES 16
TEXT _main(SB), 1, $(2*8+NPRIVATES*8)
MOVQ AX, _tos(SB)
LEAQ 16(SP), AX
MOVQ AX, _privates(SB)
MOVL $NPRIVATES, _nprivates(SB)
MOVL inargc-8(FP), RARG
LEAQ inargv+0(FP), AX
MOVQ AX, 8(SP)
CALL main(SB)
loop:
MOVQ $_exits<>(SB), RARG
CALL exits(SB)
JMP loop
DATA _exits<>+0(SB)/4, $"main"
GLOBL _exits<>+0(SB), $5

View file

@ -0,0 +1,41 @@
#define NPRIVATES 16
TEXT _mainp(SB), 1, $(2*8+NPRIVATES*8)
MOVQ AX, _tos(SB) /* _tos = arg */
LEAQ 16(SP), AX
MOVQ AX, _privates(SB)
MOVL $NPRIVATES, _nprivates(SB)
CALL _profmain(SB) /* _profmain(); */
MOVQ _tos+0(SB), DX /* _tos->prof.pp = _tos->prof.next; */
MOVQ 8(DX), CX
MOVQ CX, (DX)
MOVL inargc-8(FP), RARG /* main(argc, argv); */
LEAQ inargv+0(FP), AX
MOVQ AX, 8(SP)
CALL main(SB)
loop:
MOVQ $_exits<>(SB), RARG
CALL exits(SB)
MOVQ $_profin(SB), AX /* force loading of profile */
JMP loop
TEXT _savearg(SB), 1, $0
MOVQ RARG, AX
RET
TEXT _saveret(SB), 1, $0
RET
TEXT _restorearg(SB), 1, $0
RET /* we want RARG in RARG */
TEXT _callpc(SB), 1, $0
MOVQ 8(RARG), AX
RET
DATA _exits<>+0(SB)/4, $"main"
GLOBL _exits<>+0(SB), $5

View file

@ -0,0 +1,58 @@
TEXT memccpy(SB),$0
MOVL n+24(FP), CX
CMPL CX, $0
JEQ none
MOVQ p2+8(FP), DI
MOVBLZX c+16(FP), AX
CLD
/*
* find the character in the second string
*/
REPN; SCASB
JEQ found
/*
* if not found, set count to 'n'
*/
none:
MOVL $0, AX
MOVL n+24(FP), BX
JMP memcpy
/*
* if found, set count to bytes thru character
*/
found:
MOVQ DI, AX
SUBQ p2+8(FP), AX
MOVQ AX, BX
ADDQ RARG, AX
/*
* copy the memory
*/
memcpy:
MOVQ RARG, DI
MOVQ p2+8(FP), SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, DX
ORQ SI, DX
ANDL $3, DX
JNE c3
MOVL BX, CX
SHRQ $2, CX
REP; MOVSL
/*
* copy the rest, by bytes
*/
ANDL $3, BX
c3:
MOVL BX, CX
REP; MOVSB
RET

View file

@ -0,0 +1,23 @@
TEXT memchr(SB),$0
MOVL n+16(FP), CX
CMPL CX, $0
JEQ none
MOVQ RARG, DI
MOVBLZX c+8(FP), AX
CLD
/*
* SCASB is memchr instruction
*/
REPN; SCASB
JEQ found
none:
MOVL $0, AX
RET
found:
MOVQ DI, AX
SUBQ $1, AX
RET

View file

@ -0,0 +1,52 @@
TEXT memcmp(SB),$0
MOVL n+16(FP), BX
CMPL BX, $0
JEQ none
MOVQ RARG, DI
MOVQ p2+8(FP), SI
CLD
MOVQ DI, CX
ORQ SI, CX
ANDL $3, CX
JNE c3
/*
* first by longs
*/
MOVL BX, CX
SHRQ $2, CX
REP; CMPSL
JNE found
/*
* then by bytes
*/
ANDL $3, BX
c3:
MOVL BX, CX
REP; CMPSB
JNE found1
none:
MOVQ $0, AX
RET
/*
* if long found,
* back up and look by bytes
*/
found:
MOVL $4, CX
SUBQ CX, DI
SUBQ CX, SI
REP; CMPSB
found1:
JLS lt
MOVQ $-1, AX
RET
lt:
MOVQ $1, AX
RET

View file

@ -0,0 +1,81 @@
TEXT memcpy(SB), $0
MOVQ RARG, DI
MOVQ DI, AX /* return value */
MOVQ p2+8(FP), SI
MOVL n+16(FP), BX
CMPL BX, $0
JGT _ok
JEQ _return /* nothing to do if n == 0 */
MOVL $0, SI /* fault if n < 0 */
/*
* check and set for backwards:
* (p2 < p1) && ((p2+n) > p1)
*/
_ok:
CMPQ SI, DI
JGT _forward
JEQ _return /* nothing to do if p2 == p1 */
MOVQ SI, DX
ADDQ BX, DX
CMPQ DX, DI
JGT _back
/*
* copy whole longs if aligned
*/
_forward:
CLD
MOVQ SI, DX
ORQ DI, DX
ANDL $3, DX
JNE c3f
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3f:
MOVL BX, CX
REP; MOVSB
RET
/*
* whole thing backwards has
* adjusted addresses
*/
_back:
ADDQ BX, DI
ADDQ BX, SI
STD
SUBQ $4, DI
SUBQ $4, SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, DX
ORQ SI, DX
ANDL $3, DX
JNE c3b
MOVL BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3b:
ADDQ $3, DI
ADDQ $3, SI
MOVL BX, CX
REP; MOVSB
_return:
RET

View file

@ -0,0 +1,81 @@
TEXT memmove(SB), $0
MOVQ RARG, DI
MOVQ DI, AX /* return value */
MOVQ p2+8(FP), SI
MOVL n+16(FP), BX
CMPL BX, $0
JGT _ok
JEQ _return /* nothing to do if n == 0 */
MOVL $0, SI /* fault if n < 0 */
/*
* check and set for backwards:
* (p2 < p1) && ((p2+n) > p1)
*/
_ok:
CMPQ SI, DI
JGT _forward
JEQ _return /* nothing to do if p2 == p1 */
MOVQ SI, DX
ADDQ BX, DX
CMPQ DX, DI
JGT _back
/*
* copy whole longs if aligned
*/
_forward:
CLD
MOVQ SI, DX
ORQ DI, DX
ANDL $3, DX
JNE c3f
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3f:
MOVL BX, CX
REP; MOVSB
RET
/*
* whole thing backwards has
* adjusted addresses
*/
_back:
ADDQ BX, DI
ADDQ BX, SI
STD
SUBQ $4, DI
SUBQ $4, SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, DX
ORQ SI, DX
ANDL $3, DX
JNE c3b
MOVL BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3b:
ADDQ $3, DI
ADDQ $3, SI
MOVL BX, CX
REP; MOVSB
_return:
RET

View file

@ -0,0 +1,41 @@
TEXT memset(SB),$0
CLD
MOVQ RARG, DI
MOVBLZX c+8(FP), AX
MOVL n+16(FP), BX
/*
* if not enough bytes, just set bytes
*/
CMPL BX, $9
JLS c3
/*
* if not aligned, just set bytes
*/
MOVQ RARG, CX
ANDL $3,CX
JNE c3
/*
* build word in AX
*/
MOVB AL, AH
MOVL AX, CX
SHLL $16, CX
ORL CX, AX
/*
* set whole longs
*/
c1:
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; STOSL
/*
* set the rest, by bytes
*/
c3:
MOVL BX, CX
REP; STOSB
ret:
MOVQ RARG,AX
RET

41
sys/src/libc/amd64/mkfile Normal file
View file

@ -0,0 +1,41 @@
objtype=amd64
</$objtype/mkfile
LIB=/$objtype/lib/libc.a
SFILES=\
argv0.s\
atom.s\
cycles.s\
getfcr.s\
main9.s\
main9p.s\
memccpy.s\
memchr.s\
memcmp.s\
memcpy.s\
memmove.s\
memset.s\
muldiv.s\
setjmp.s\
sqrt.s\
strcat.s\
strchr.s\
strcpy.s\
strlen.s\
tas.s\
CFILES=\
_seek.c\
getcallerpc.c\
notejmp.c\
HFILES=/sys/include/libc.h
OFILES=${CFILES:%.c=%.$O} ${SFILES:%.s=%.$O}
UPDATE=mkfile\
$HFILES\
$CFILES\
$SFILES\
</sys/src/cmd/mksyslib

View file

@ -0,0 +1,12 @@
TEXT umuldiv(SB), $0
MOVL RARG, AX
MULL b+8(FP)
DIVL c+16(FP)
RET
TEXT muldiv(SB), $0
MOVL RARG, AX
IMULL b+8(FP)
IDIVL c+16(FP)
RET
END

View file

@ -0,0 +1,16 @@
#include <u.h>
#include <libc.h>
#include <ureg.h>
void
notejmp(void *vr, jmp_buf j, int ret)
{
struct Ureg *r = vr;
r->ax = ret;
if(ret == 0)
r->ax = 1;
r->pc = j[JMPBUFPC];
r->sp = j[JMPBUFSP] + 8;
noted(NCONT);
}

View file

@ -0,0 +1,17 @@
TEXT longjmp(SB), $0
MOVL r+8(FP), AX
CMPL AX, $0
JNE ok /* ansi: "longjmp(0) => longjmp(1)" */
MOVL $1, AX /* bless their pointed heads */
ok:
MOVQ 0(RARG), SP /* restore sp */
MOVQ 8(RARG), BX /* put return pc on the stack */
MOVQ BX, 0(SP)
RET
TEXT setjmp(SB), $0
MOVQ SP, 0(RARG) /* store sp */
MOVQ 0(SP), BX /* store return pc */
MOVQ BX, 8(RARG)
MOVL $0, AX /* return 0 */
RET

View file

@ -0,0 +1,4 @@
TEXT sqrt(SB), $0
MOVSD a+0(FP), X0
SQRTSD X0, X0
RET

View file

@ -0,0 +1,48 @@
TEXT strcat(SB),$0
MOVL $0, AX
MOVQ $-1, CX
CLD
/*
* find length of second string
*/
MOVQ p2+8(FP), DI
REPN; SCASB
MOVQ DI, BX
SUBQ p2+8(FP), BX
/*
* find end of first string
*/
MOVQ RARG, DI
REPN; SCASB
/*
* copy the memory
*/
SUBQ $1, DI
MOVQ p2+8(FP), SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, CX
ORQ SI, CX
ANDL $3, CX
JNE c3
MOVQ BX, CX
SHRQ $2, CX
REP; MOVSL
/*
* copy the rest, by bytes
*/
ANDL $3, BX
c3:
MOVQ BX, CX
REP; MOVSB
MOVQ RARG, AX
RET

View file

@ -0,0 +1,38 @@
TEXT strchr(SB), $0
MOVQ RARG, DI
MOVB c+8(FP), AX
CMPB AX, $0
JEQ l2 /**/
/*
* char is not null
*/
l1:
MOVB (DI), BX
CMPB BX, $0
JEQ ret0
ADDQ $1, DI
CMPB AX, BX
JNE l1
MOVQ DI, AX
SUBQ $1, AX
RET
/*
* char is null
*/
l2:
MOVQ $-1, CX
CLD
REPN; SCASB
MOVQ DI, AX
SUBQ $1, AX
RET
ret0:
MOVQ $0, AX
RET

View file

@ -0,0 +1,40 @@
TEXT strcpy(SB),$0
MOVL $0, AX
MOVQ $-1, CX
CLD
/*
* find end of second string
*/
MOVQ p2+8(FP), DI
REPN; SCASB
MOVQ DI, BX
SUBQ p2+8(FP), BX
/*
* copy the memory
*/
MOVQ RARG, DI
MOVQ p2+8(FP), SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, CX
ORQ SI, CX
ANDL $3, CX
JNE c3
MOVQ BX, CX
SHRQ $2, CX
REP; MOVSL
/*
* copy the rest, by bytes
*/
ANDL $3, BX
c3:
MOVL BX, CX
REP; MOVSB
MOVQ RARG, AX
RET

View file

@ -0,0 +1,16 @@
TEXT strlen(SB),$0
MOVL $0, AX
MOVQ $-1, CX
CLD
/*
* look for end of string
*/
MOVQ RARG, DI
REPN; SCASB
MOVQ DI, AX
SUBQ RARG, AX
SUBQ $1, AX
RET

5
sys/src/libc/amd64/tas.s Normal file
View file

@ -0,0 +1,5 @@
TEXT _tas(SB), 1, $0
MOVL $0xdeaddead,AX
XCHGL AX,(RARG)
RET