Allow FoldShiftLdrStr for all sizes of LDR/STR, and disable it for references that post/pre increment the base register on Thumb-2 targets.

git-svn-id: trunk@26671 -
This commit is contained in:
Jeppe Johansen 2014-02-04 17:29:13 +00:00
parent a72a3f4e01
commit 6861cbcf16

View File

@ -1358,8 +1358,10 @@ Implementation
(taicpu(p).oppostfix = PF_NONE) and (taicpu(p).oppostfix = PF_NONE) and
GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and GetNextInstructionUsingReg(p, hp1, taicpu(p).oper[0]^.reg) and
{Only LDR, LDRB, STR, STRB can handle scaled register indexing} {Only LDR, LDRB, STR, STRB can handle scaled register indexing}
MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], (MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B]) or
[PF_None, PF_B]) and (GenerateThumb2Code and
MatchInstruction(hp1, [A_LDR, A_STR], [taicpu(p).condition], [PF_None, PF_B, PF_SB, PF_H, PF_SH]))
) and
( (
{If this is address by offset, one of the two registers can be used} {If this is address by offset, one of the two registers can be used}
((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and ((taicpu(hp1).oper[1]^.ref^.addressmode=AM_OFFSET) and
@ -1373,7 +1375,8 @@ Implementation
( (
(taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and (taicpu(hp1).oper[1]^.ref^.index = taicpu(p).oper[0]^.reg) and
(taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg) (taicpu(hp1).oper[1]^.ref^.base <> taicpu(p).oper[0]^.reg)
) ) and
(not GenerateThumb2Code)
) )
) and ) and
{ Only fold if there isn't another shifterop already, and offset is zero. } { Only fold if there isn't another shifterop already, and offset is zero. }