- corrected 64 bit assembler code (ebx was not preserved)

- minor optimization

git-svn-id: svn://ttmath.org/publicrep/ttmath/branches/chk@147 e52654a7-88a9-db11-a3e9-0013d4bc506e
This commit is contained in:
Christian Kaiser 2009-05-15 14:42:43 +00:00
parent a8c3a506ea
commit 9b576ddbe2
3 changed files with 66 additions and 46 deletions

View File

@ -3443,13 +3443,13 @@ private:
*/
int FromString_ReadScientificIfExists(const tchar_t * & source)
{
int c = 0;
uint c = 0;
bool scientific_read = false;
const tchar_t * before_scientific = source;
if( FromString_TestScientific(source) )
c += (size_t)FromString_ReadPartScientific( source, scientific_read );
c += FromString_ReadPartScientific( source, scientific_read );
if( !scientific_read )
source = before_scientific;

View File

@ -104,7 +104,7 @@ namespace ttmath
inc edx
dec ecx
jnz p
jnz p
setc al
movzx eax, al
@ -192,12 +192,12 @@ namespace ttmath
p:
add [ebx+edx*4], eax
jnc end
jnc end
mov eax, 1
inc edx
dec ecx
jnz p
jnz p
end:
setc al
@ -302,12 +302,12 @@ namespace ttmath
p:
adc [ebx+edx*4], eax
jnc end
jnc end
mov eax, 0
inc edx
dec ecx
jnz p
jnz p
end:
setc al
@ -398,7 +398,7 @@ namespace ttmath
inc edx
dec ecx
jnz p
jnz p
setc al
movzx eax, al
@ -482,12 +482,12 @@ namespace ttmath
p:
sub [ebx+edx*4], eax
jnc end
jnc end
mov eax, 1
inc edx
dec ecx
jnz p
jnz p
end:
setc al
@ -569,7 +569,7 @@ namespace ttmath
inc edx
dec ecx
jnz p
jnz p
setc dl
movzx eax, dl
@ -644,7 +644,7 @@ namespace ttmath
rcr dword ptr [ebx+ecx*4-4], 1
dec ecx
jnz p
jnz p
setc cl
movzx eax, cl
@ -724,7 +724,7 @@ namespace ttmath
or eax, eax
cmovnz esi, [mask] // if c then old value = mask
p:
p:
rol dword ptr [ebx+edx*4], cl
mov eax, [ebx+edx*4]
@ -735,7 +735,7 @@ namespace ttmath
inc edx
dec edi
jnz p
jnz p
and eax, 1
}
@ -850,7 +850,7 @@ namespace ttmath
dec edx
dec edi
jnz p
jnz p
rol eax, 1 // bit 31 will be bit 0
and eax, 1
@ -974,14 +974,14 @@ namespace ttmath
#ifndef __GNUC__
__asm
{
mov eax, [v]
mov ebx, [bit]
bts eax, ebx
mov [v], eax
mov eax, [v]
mov ebx, [bit]
bts eax, ebx
mov [v], eax
setc bl
movzx ebx, bl
mov eax, ebx
setc bl
movzx ebx, bl
mov eax, ebx
}
#endif
@ -1034,11 +1034,11 @@ namespace ttmath
__asm
{
mov eax, [a]
mul dword ptr [b]
mov eax, [a]
mul dword ptr [b]
mov [result2_], edx
mov [result1_], eax
mov [result2_], edx
mov [result1_], eax
}
#endif

View File

@ -42,11 +42,14 @@ adc_x64 PROC
jnz loop1
setc al
movzx rax, al
jc return_1 ; most of the times, there will be NO carry (I hope)
xor rax, rax
ret
return_1:
mov rax, 1
ret
ret
adc_x64 ENDP
;----------------------------------------
@ -73,10 +76,13 @@ loop1:
jnz loop1
done:
setc al
movzx rax, al
ret
jc return_1 ; most of the times, there will be NO carry (I hope)
xor rax, rax
ret
return_1:
mov rax, 1
ret
addindexed_x64 ENDP
@ -110,10 +116,13 @@ loop1:
jnz loop1
done:
setc al
movzx rax, al
ret
jc return_1 ; most of the times, there will be NO carry (I hope)
xor rax, rax
ret
return_1:
mov rax, 1
ret
addindexed2_x64 ENDP
@ -144,10 +153,13 @@ sbb_x64 PROC
jnz loop1
setc al
movzx rax, al
ret
jc return_1 ; most of the times, there will be NO carry (I hope)
xor rax, rax
ret
return_1:
mov rax, 1
ret
sbb_x64 ENDP
@ -174,10 +186,13 @@ loop1:
jnz loop1
done:
setc al
movzx rax, al
ret
jc return_1 ; most of the times, there will be NO carry (I hope)
xor rax, rax
ret
return_1:
mov rax, 1
ret
subindexed_x64 ENDP
@ -270,6 +285,8 @@ rcl2_x64 PROC
; r8 = bits
; r9 = c
push rbx
mov r10, rcx ; r10 = p1
xor rax, rax
@ -301,6 +318,7 @@ loop1:
jnz loop1
and rax, 1
pop rbx
ret
rcl2_x64 ENDP
@ -317,6 +335,7 @@ rcr2_x64 PROC
; r8 = bits
; r9 = c
push rbx
mov r10, rcx ; r10 = p1
xor rax, rax
@ -350,6 +369,7 @@ loop1:
rol rax, 1
and rax, 1
pop rbx
ret