Don't do ARM FoldShiftLdrStr peephole optimization if there's an offset in the reference.

Use UXTH+UXTB instructions instead of two shifts on processors that supports that.
Eliminate internalerror when constant pointers are typecast as arrays.

git-svn-id: trunk@26647 -
This commit is contained in:
Jeppe Johansen 2014-02-01 13:29:35 +00:00
parent 2a7dcf6c7b
commit 07b2982e77
3 changed files with 15 additions and 2 deletions

View File

@ -1376,7 +1376,8 @@ Implementation
) )
) )
) and ) and
{ Only fold if there isn't another shifterop already. } { Only fold if there isn't another shifterop already, and offset is zero. }
(taicpu(hp1).oper[1]^.ref^.offset = 0) and
(taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and (taicpu(hp1).oper[1]^.ref^.shiftmode = SM_None) and
not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and not(RegModifiedBetween(taicpu(p).oper[1]^.reg,p,hp1)) and
RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then RegEndOfLife(taicpu(p).oper[0]^.reg, taicpu(hp1)) then

View File

@ -1022,7 +1022,15 @@ unit cgcpu;
((lsb = 0) or ((lsb + width) = 32)) then ((lsb = 0) or ((lsb + width) = 32)) then
begin begin
shifterop_reset(so); shifterop_reset(so);
if lsb = 0 then if (width = 16) and
(lsb = 0) and
(current_settings.cputype >= cpu_armv6) then
list.concat(taicpu.op_reg_reg(A_UXTH,dst,src))
else if (width = 8) and
(lsb = 0) and
(current_settings.cputype >= cpu_armv6) then
list.concat(taicpu.op_reg_reg(A_UXTB,dst,src))
else if lsb = 0 then
begin begin
so.shiftmode:=SM_LSL; so.shiftmode:=SM_LSL;
so.shiftimm:=32-width; so.shiftimm:=32-width;

View File

@ -378,6 +378,10 @@ interface
if left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then if left.location.loc in [LOC_REFERENCE,LOC_CREFERENCE] then
location_freetemp(current_asmdata.CurrAsmList,left.location); location_freetemp(current_asmdata.CurrAsmList,left.location);
end; end;
LOC_CONSTANT:
begin
location.reference.offset:=left.location.value;
end
else else
internalerror(2002032216); internalerror(2002032216);
end; end;