From 6580dfee39358646b4d64fe5c007e6d64461d73e Mon Sep 17 00:00:00 2001 From: nickysn Date: Tue, 4 Apr 2017 16:28:54 +0000 Subject: [PATCH] * generate better i386 code for 64-bit shl/shr, by masking the shift count by 63, instead of comparing it to 64 and branching. Note that, although this changes the behaviour of 64-bit shifts by values larger than 64 (when stored in a variable), it actually makes them consistent with both the code, generated on x86_64, as well as with 64-bit shift by constant on i386 itself. git-svn-id: trunk@35727 - --- compiler/i386/n386mat.pas | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/compiler/i386/n386mat.pas b/compiler/i386/n386mat.pas index 9b9c366eb9..ab7b019bc7 100644 --- a/compiler/i386/n386mat.pas +++ b/compiler/i386/n386mat.pas @@ -119,7 +119,7 @@ implementation var hreg64hi,hreg64lo:Tregister; v : TConstExprInt; - l1,l2,l3:Tasmlabel; + l2,l3:Tasmlabel; begin location_reset(location,LOC_REGISTER,def_cgsize(resultdef)); @@ -177,15 +177,9 @@ implementation { the damned shift instructions work only til a count of 32 } { so we've to do some tricks here } - current_asmdata.getjumplabel(l1); current_asmdata.getjumplabel(l2); current_asmdata.getjumplabel(l3); - emit_const_reg(A_CMP,S_L,64,NR_ECX); - cg.a_jmp_flags(current_asmdata.CurrAsmList,F_L,l1); - emit_reg_reg(A_XOR,S_L,hreg64lo,hreg64lo); - emit_reg_reg(A_XOR,S_L,hreg64hi,hreg64hi); - cg.a_jmp_always(current_asmdata.CurrAsmList,l3); - cg.a_label(current_asmdata.CurrAsmList,l1); + emit_const_reg(A_AND,S_L,63,NR_ECX); emit_const_reg(A_CMP,S_L,32,NR_ECX); cg.a_jmp_flags(current_asmdata.CurrAsmList,F_L,l2); emit_const_reg(A_SUB,S_L,32,NR_ECX);