- Make all x86assembly ML compatible
- Remove memcpy, it was duplicated from memmove, copy the label instead
- Guard some code against compilation on msvc, as these functions are intrinsics on MSVC and cannot be implemented
- Fix some x64 assembly (don't modify stack below rsp)

svn path=/branches/cmake-bringup/; revision=49421
This commit is contained in:
Timo Kreuzer 2010-11-02 00:06:33 +00:00
parent 0d7a1c46a0
commit a3623f23de
49 changed files with 912 additions and 918 deletions

View file

@ -1,114 +1,120 @@
/*
* void *memmove (void *to, const void *from, size_t count)
* void *memcpy (void *to, const void *from, size_t count)
*
* NOTE: This code is duplicated in memcpy_asm.s
* NOTE: This code is a duplicate of memmove function from memmove_asm.s
*/
.globl _memmove
#include <reactos/asm.h>
PUBLIC _memcpy
PUBLIC _memmove
.code
_memcpy:
_memmove:
push %ebp
mov %esp,%ebp
push ebp
mov ebp, esp
push %esi
push %edi
push esi
push edi
mov 8(%ebp),%edi
mov 12(%ebp),%esi
mov 16(%ebp),%ecx
mov edi, [ebp + 8]
mov esi, [ebp + 12]
mov ecx, [ebp + 16]
cmp %esi,%edi
cmp edi, esi
jbe .CopyUp
mov %ecx,%eax
add %esi,%eax
cmp %eax,%edi
jb .CopyDown
mov eax, ecx
add eax, esi
cmp edi, eax
jb .CopyDown
.CopyUp:
cld
cmp $16,%ecx
jb .L1
mov %ecx,%edx
test $3,%edi
je .L2
cmp ecx, 16
jb .L1
mov edx, ecx
test edi, 3
je .L2
/*
* Make the destination dword aligned
*/
mov %edi,%ecx
and $3,%ecx
sub $5,%ecx
not %ecx
sub %ecx,%edx
rep movsb
mov %edx,%ecx
mov ecx, edi
and ecx, 3
sub ecx, 5
not ecx
sub edx, ecx
rep movsb
mov ecx, edx
.L2:
shr $2,%ecx
rep movsl
mov %edx,%ecx
and $3,%ecx
shr ecx, 2
rep movsd
mov ecx, edx
and ecx, 3
.L1:
test %ecx,%ecx
je .L3
rep movsb
test ecx, ecx
je .L3
rep movsb
.L3:
mov 8(%ebp),%eax
pop %edi
pop %esi
mov eax, [ebp + 8]
pop edi
pop esi
leave
ret
.CopyDown:
std
std
add %ecx,%edi
add %ecx,%esi
add edi, ecx
add esi, ecx
cmp $16,%ecx
jb .L4
mov %ecx,%edx
test $3,%edi
je .L5
cmp ecx, 16
jb .L4
mov edx, ecx
test edi, 3
je .L5
/*
* Make the destination dword aligned
*/
mov %edi,%ecx
and $3,%ecx
sub %ecx,%edx
dec %esi
dec %edi
rep movsb
mov %edx,%ecx
mov ecx, edi
and ecx, 3
sub edx, ecx
dec esi
dec edi
rep movsb
mov ecx, edx
sub $3,%esi
sub $3,%edi
sub esi, 3
sub edi, 3
.L6:
shr $2,%ecx
rep movsl
mov %edx,%ecx
and $3,%ecx
je .L7
add $3,%esi
add $3,%edi
shr ecx, 2
rep movsd
mov ecx, edx
and ecx, 3
je .L7
add esi, 3
add edi, 3
.L8:
rep movsb
rep movsb
.L7:
cld
mov 8(%ebp),%eax
pop %edi
pop %esi
mov eax, [ebp + 8]
pop edi
pop esi
leave
ret
.L5:
sub $4,%edi
sub $4,%esi
jmp .L6
.L4:
test %ecx,%ecx
je .L7
dec %esi
dec %edi
jmp .L8
sub edi, 4
sub esi, 4
jmp .L6
.L4:
test ecx, ecx
je .L7
dec esi
dec edi
jmp .L8
END