Note that this doesn't mean you can build ClamAV with external LLVM 2.7,
since ClamAV need some patches from LLVM trunk too! (namely some leak fixes)
commit 7bdecb32f3a8567b8ed48a60f278a6905061813d
Author: Tanya Lattner <tonic@nondot.org>
Date: Tue Apr 27 06:53:59 2010 +0000
Commit 2.7 release notes.
Update getting started guide for 2.7
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_27@102412 91177308-0d34-0410-b5e6-96231b3b80d8
commit 7c95569bb1e47ff3f8af4032d379d10f52e8cdfc
Author: Tanya Lattner <tonic@nondot.org>
Date: Sun Apr 25 19:18:33 2010 +0000
Merge 102239 from mainline to fix PR6835
-gThis line, and those below, will be ignored--
M Makefile.rules
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_27@102310 91177308-0d34-0410-b5e6-96231b3b80d8
commit efb1d9ee60a09cb15f775eec0ff04a6a789fa878
Author: Tanya Lattner <tonic@nondot.org>
Date: Tue Apr 13 16:34:45 2010 +0000
Merge r100936 from mainline to fix PR6760.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_27@101137 91177308-0d34-0410-b5e6-96231b3b80d8
commit 97e923e813ee633a368d5e930843f5c46a0f3fbf
Author: Tanya Lattner <tonic@nondot.org>
Date: Fri Apr 9 05:05:41 2010 +0000
Merge r100559 from mainline to fix PR6696.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_27@100851 91177308-0d34-0410-b5e6-96231b3b80d8
commit e7004817b6aa6e52b7c2e21a5d7fd96ac957fd5b
Author: Tanya Lattner <tonic@nondot.org>
Date: Mon Apr 5 18:44:01 2010 +0000
Merge 100438 from mainline.
Push const through the regex engine. Fixes some of the warnings in PR6616.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_27@100450 91177308-0d34-0410-b5e6-96231b3b80d8
... | ... |
@@ -1,3 +1,7 @@ |
1 |
+Thu May 13 23:40:11 EEST 2010 (edwin) |
|
2 |
+------------------------------------- |
|
3 |
+ * libclamav/c++/llvm: Update to LLVM 2.7 release. |
|
4 |
+ |
|
1 | 5 |
Thu May 13 23:35:55 EEST 2010 (edwin) |
2 | 6 |
------------------------------------- |
3 | 7 |
* libclamav/bytecode*, unit_tests: add new unit tests for bytecode API and fix bugs. |
... | ... |
@@ -72,11 +72,11 @@ struct match { |
72 | 72 |
struct re_guts *g; |
73 | 73 |
int eflags; |
74 | 74 |
llvm_regmatch_t *pmatch; /* [nsub+1] (0 element unused) */ |
75 |
- char *offp; /* offsets work from here */ |
|
76 |
- char *beginp; /* start of string -- virtual NUL precedes */ |
|
77 |
- char *endp; /* end of string -- virtual NUL here */ |
|
78 |
- char *coldp; /* can be no match starting before here */ |
|
79 |
- char **lastpos; /* [nplus+1] */ |
|
75 |
+ const char *offp; /* offsets work from here */ |
|
76 |
+ const char *beginp; /* start of string -- virtual NUL precedes */ |
|
77 |
+ const char *endp; /* end of string -- virtual NUL here */ |
|
78 |
+ const char *coldp; /* can be no match starting before here */ |
|
79 |
+ const char **lastpos; /* [nplus+1] */ |
|
80 | 80 |
STATEVARS; |
81 | 81 |
states st; /* current states */ |
82 | 82 |
states fresh; /* states for a fresh start */ |
... | ... |
@@ -84,11 +84,14 @@ struct match { |
84 | 84 |
states empty; /* empty set of states */ |
85 | 85 |
}; |
86 | 86 |
|
87 |
-static int matcher(struct re_guts *, char *, size_t, llvm_regmatch_t[], int); |
|
88 |
-static char *dissect(struct match *, char *, char *, sopno, sopno); |
|
89 |
-static char *backref(struct match *, char *, char *, sopno, sopno, sopno, int); |
|
90 |
-static char *fast(struct match *, char *, char *, sopno, sopno); |
|
91 |
-static char *slow(struct match *, char *, char *, sopno, sopno); |
|
87 |
+static int matcher(struct re_guts *, const char *, size_t, |
|
88 |
+ llvm_regmatch_t[], int); |
|
89 |
+static const char *dissect(struct match *, const char *, const char *, sopno, |
|
90 |
+ sopno); |
|
91 |
+static const char *backref(struct match *, const char *, const char *, sopno, |
|
92 |
+ sopno, sopno, int); |
|
93 |
+static const char *fast(struct match *, const char *, const char *, sopno, sopno); |
|
94 |
+static const char *slow(struct match *, const char *, const char *, sopno, sopno); |
|
92 | 95 |
static states step(struct re_guts *, sopno, sopno, states, int, states); |
93 | 96 |
#define MAX_RECURSION 100 |
94 | 97 |
#define BOL (OUT+1) |
... | ... |
@@ -125,18 +128,19 @@ static int nope = 0; |
125 | 125 |
- matcher - the actual matching engine |
126 | 126 |
*/ |
127 | 127 |
static int /* 0 success, REG_NOMATCH failure */ |
128 |
-matcher(struct re_guts *g, char *string, size_t nmatch, llvm_regmatch_t pmatch[], |
|
128 |
+matcher(struct re_guts *g, const char *string, size_t nmatch, |
|
129 |
+ llvm_regmatch_t pmatch[], |
|
129 | 130 |
int eflags) |
130 | 131 |
{ |
131 |
- char *endp; |
|
132 |
+ const char *endp; |
|
132 | 133 |
size_t i; |
133 | 134 |
struct match mv; |
134 | 135 |
struct match *m = &mv; |
135 |
- char *dp; |
|
136 |
+ const char *dp; |
|
136 | 137 |
const sopno gf = g->firststate+1; /* +1 for OEND */ |
137 | 138 |
const sopno gl = g->laststate; |
138 |
- char *start; |
|
139 |
- char *stop; |
|
139 |
+ const char *start; |
|
140 |
+ const char *stop; |
|
140 | 141 |
|
141 | 142 |
/* simplify the situation where possible */ |
142 | 143 |
if (g->cflags®_NOSUB) |
... | ... |
@@ -216,7 +220,7 @@ matcher(struct re_guts *g, char *string, size_t nmatch, llvm_regmatch_t pmatch[] |
216 | 216 |
dp = dissect(m, m->coldp, endp, gf, gl); |
217 | 217 |
} else { |
218 | 218 |
if (g->nplus > 0 && m->lastpos == NULL) |
219 |
- m->lastpos = (char **)malloc((g->nplus+1) * |
|
219 |
+ m->lastpos = (const char **)malloc((g->nplus+1) * |
|
220 | 220 |
sizeof(char *)); |
221 | 221 |
if (g->nplus > 0 && m->lastpos == NULL) { |
222 | 222 |
free(m->pmatch); |
... | ... |
@@ -287,21 +291,22 @@ matcher(struct re_guts *g, char *string, size_t nmatch, llvm_regmatch_t pmatch[] |
287 | 287 |
/* |
288 | 288 |
- dissect - figure out what matched what, no back references |
289 | 289 |
*/ |
290 |
-static char * /* == stop (success) always */ |
|
291 |
-dissect(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
|
290 |
+static const char * /* == stop (success) always */ |
|
291 |
+dissect(struct match *m, const char *start, const char *stop, sopno startst, |
|
292 |
+ sopno stopst) |
|
292 | 293 |
{ |
293 | 294 |
int i; |
294 | 295 |
sopno ss; /* start sop of current subRE */ |
295 | 296 |
sopno es; /* end sop of current subRE */ |
296 |
- char *sp; /* start of string matched by it */ |
|
297 |
- char *stp; /* string matched by it cannot pass here */ |
|
298 |
- char *rest; /* start of rest of string */ |
|
299 |
- char *tail; /* string unmatched by rest of RE */ |
|
297 |
+ const char *sp; /* start of string matched by it */ |
|
298 |
+ const char *stp; /* string matched by it cannot pass here */ |
|
299 |
+ const char *rest; /* start of rest of string */ |
|
300 |
+ const char *tail; /* string unmatched by rest of RE */ |
|
300 | 301 |
sopno ssub; /* start sop of subsubRE */ |
301 | 302 |
sopno esub; /* end sop of subsubRE */ |
302 |
- char *ssp; /* start of string matched by subsubRE */ |
|
303 |
- char *sep; /* end of string matched by subsubRE */ |
|
304 |
- char *oldssp; /* previous ssp */ |
|
303 |
+ const char *ssp; /* start of string matched by subsubRE */ |
|
304 |
+ const char *sep; /* end of string matched by subsubRE */ |
|
305 |
+ const char *oldssp; /* previous ssp */ |
|
305 | 306 |
|
306 | 307 |
AT("diss", start, stop, startst, stopst); |
307 | 308 |
sp = start; |
... | ... |
@@ -360,7 +365,7 @@ dissect(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
360 | 360 |
esub = es - 1; |
361 | 361 |
/* did innards match? */ |
362 | 362 |
if (slow(m, sp, rest, ssub, esub) != NULL) { |
363 |
- char *dp = dissect(m, sp, rest, ssub, esub); |
|
363 |
+ const char *dp = dissect(m, sp, rest, ssub, esub); |
|
364 | 364 |
(void)dp; /* avoid warning if assertions off */ |
365 | 365 |
assert(dp == rest); |
366 | 366 |
} else /* no */ |
... | ... |
@@ -400,7 +405,7 @@ dissect(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
400 | 400 |
assert(sep == rest); /* must exhaust substring */ |
401 | 401 |
assert(slow(m, ssp, sep, ssub, esub) == rest); |
402 | 402 |
{ |
403 |
- char *dp = dissect(m, ssp, sep, ssub, esub); |
|
403 |
+ const char *dp = dissect(m, ssp, sep, ssub, esub); |
|
404 | 404 |
(void)dp; /* avoid warning if assertions off */ |
405 | 405 |
assert(dp == sep); |
406 | 406 |
} |
... | ... |
@@ -438,7 +443,7 @@ dissect(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
438 | 438 |
assert(OP(m->g->strip[esub]) == O_CH); |
439 | 439 |
} |
440 | 440 |
{ |
441 |
- char *dp = dissect(m, sp, rest, ssub, esub); |
|
441 |
+ const char *dp = dissect(m, sp, rest, ssub, esub); |
|
442 | 442 |
(void)dp; /* avoid warning if assertions off */ |
443 | 443 |
assert(dp == rest); |
444 | 444 |
} |
... | ... |
@@ -474,17 +479,17 @@ dissect(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
474 | 474 |
/* |
475 | 475 |
- backref - figure out what matched what, figuring in back references |
476 | 476 |
*/ |
477 |
-static char * /* == stop (success) or NULL (failure) */ |
|
478 |
-backref(struct match *m, char *start, char *stop, sopno startst, sopno stopst, |
|
479 |
- sopno lev, int rec) /* PLUS nesting level */ |
|
477 |
+static const char * /* == stop (success) or NULL (failure) */ |
|
478 |
+backref(struct match *m, const char *start, const char *stop, sopno startst, |
|
479 |
+ sopno stopst, sopno lev, int rec) /* PLUS nesting level */ |
|
480 | 480 |
{ |
481 | 481 |
int i; |
482 | 482 |
sopno ss; /* start sop of current subRE */ |
483 |
- char *sp; /* start of string matched by it */ |
|
483 |
+ const char *sp; /* start of string matched by it */ |
|
484 | 484 |
sopno ssub; /* start sop of subsubRE */ |
485 | 485 |
sopno esub; /* end sop of subsubRE */ |
486 |
- char *ssp; /* start of string matched by subsubRE */ |
|
487 |
- char *dp; |
|
486 |
+ const char *ssp; /* start of string matched by subsubRE */ |
|
487 |
+ const char *dp; |
|
488 | 488 |
size_t len; |
489 | 489 |
int hard; |
490 | 490 |
sop s; |
... | ... |
@@ -674,18 +679,19 @@ backref(struct match *m, char *start, char *stop, sopno startst, sopno stopst, |
674 | 674 |
/* |
675 | 675 |
- fast - step through the string at top speed |
676 | 676 |
*/ |
677 |
-static char * /* where tentative match ended, or NULL */ |
|
678 |
-fast(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
|
677 |
+static const char * /* where tentative match ended, or NULL */ |
|
678 |
+fast(struct match *m, const char *start, const char *stop, sopno startst, |
|
679 |
+ sopno stopst) |
|
679 | 680 |
{ |
680 | 681 |
states st = m->st; |
681 | 682 |
states fresh = m->fresh; |
682 | 683 |
states tmp = m->tmp; |
683 |
- char *p = start; |
|
684 |
+ const char *p = start; |
|
684 | 685 |
int c = (start == m->beginp) ? OUT : *(start-1); |
685 | 686 |
int lastc; /* previous c */ |
686 | 687 |
int flagch; |
687 | 688 |
int i; |
688 |
- char *coldp; /* last p after which no match was underway */ |
|
689 |
+ const char *coldp; /* last p after which no match was underway */ |
|
689 | 690 |
|
690 | 691 |
CLEAR(st); |
691 | 692 |
SET1(st, startst); |
... | ... |
@@ -758,18 +764,19 @@ fast(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
758 | 758 |
/* |
759 | 759 |
- slow - step through the string more deliberately |
760 | 760 |
*/ |
761 |
-static char * /* where it ended */ |
|
762 |
-slow(struct match *m, char *start, char *stop, sopno startst, sopno stopst) |
|
761 |
+static const char * /* where it ended */ |
|
762 |
+slow(struct match *m, const char *start, const char *stop, sopno startst, |
|
763 |
+ sopno stopst) |
|
763 | 764 |
{ |
764 | 765 |
states st = m->st; |
765 | 766 |
states empty = m->empty; |
766 | 767 |
states tmp = m->tmp; |
767 |
- char *p = start; |
|
768 |
+ const char *p = start; |
|
768 | 769 |
int c = (start == m->beginp) ? OUT : *(start-1); |
769 | 770 |
int lastc; /* previous c */ |
770 | 771 |
int flagch; |
771 | 772 |
int i; |
772 |
- char *matchp; /* last p at which a match ended */ |
|
773 |
+ const char *matchp; /* last p at which a match ended */ |
|
773 | 774 |
|
774 | 775 |
AT("slow", start, stop, startst, stopst); |
775 | 776 |
CLEAR(st); |
... | ... |
@@ -155,7 +155,7 @@ llvm_regexec(const llvm_regex_t *preg, const char *string, size_t nmatch, |
155 | 155 |
eflags = GOODFLAGS(eflags); |
156 | 156 |
|
157 | 157 |
if (g->nstates <= (long)(CHAR_BIT*sizeof(states1)) && !(eflags®_LARGE)) |
158 |
- return(smatcher(g, (char *)string, nmatch, pmatch, eflags)); |
|
158 |
+ return(smatcher(g, string, nmatch, pmatch, eflags)); |
|
159 | 159 |
else |
160 |
- return(lmatcher(g, (char *)string, nmatch, pmatch, eflags)); |
|
160 |
+ return(lmatcher(g, string, nmatch, pmatch, eflags)); |
|
161 | 161 |
} |
... | ... |
@@ -67,6 +67,12 @@ TargetAsmBackend *createX86_64AsmBackend(const Target &, MCAssembler &); |
67 | 67 |
/// |
68 | 68 |
FunctionPass *createEmitX86CodeToMemory(); |
69 | 69 |
|
70 |
+/// createX86MaxStackAlignmentHeuristicPass - This function returns a pass |
|
71 |
+/// which determines whether the frame pointer register should be |
|
72 |
+/// reserved in case dynamic stack alignment is later required. |
|
73 |
+/// |
|
74 |
+FunctionPass *createX86MaxStackAlignmentHeuristicPass(); |
|
75 |
+ |
|
70 | 76 |
extern Target TheX86_32Target, TheX86_64Target; |
71 | 77 |
|
72 | 78 |
} // End llvm namespace |
... | ... |
@@ -52,6 +52,10 @@ class X86MachineFunctionInfo : public MachineFunctionInfo { |
52 | 52 |
/// relocation models. |
53 | 53 |
unsigned GlobalBaseReg; |
54 | 54 |
|
55 |
+ /// ReserveFP - whether the function should reserve the frame pointer |
|
56 |
+ /// when allocating, even if there may not actually be a frame pointer used. |
|
57 |
+ bool ReserveFP; |
|
58 |
+ |
|
55 | 59 |
public: |
56 | 60 |
X86MachineFunctionInfo() : ForceFramePointer(false), |
57 | 61 |
CalleeSavedFrameSize(0), |
... | ... |
@@ -68,7 +72,8 @@ public: |
68 | 68 |
ReturnAddrIndex(0), |
69 | 69 |
TailCallReturnAddrDelta(0), |
70 | 70 |
SRetReturnReg(0), |
71 |
- GlobalBaseReg(0) {} |
|
71 |
+ GlobalBaseReg(0), |
|
72 |
+ ReserveFP(false) {} |
|
72 | 73 |
|
73 | 74 |
bool getForceFramePointer() const { return ForceFramePointer;} |
74 | 75 |
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; } |
... | ... |
@@ -90,6 +95,9 @@ public: |
90 | 90 |
|
91 | 91 |
unsigned getGlobalBaseReg() const { return GlobalBaseReg; } |
92 | 92 |
void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; } |
93 |
+ |
|
94 |
+ bool getReserveFP() const { return ReserveFP; } |
|
95 |
+ void setReserveFP(bool reserveFP) { ReserveFP = reserveFP; } |
|
93 | 96 |
}; |
94 | 97 |
|
95 | 98 |
} // End llvm namespace |
... | ... |
@@ -1489,3 +1489,46 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { |
1489 | 1489 |
} |
1490 | 1490 |
|
1491 | 1491 |
#include "X86GenRegisterInfo.inc" |
1492 |
+ |
|
1493 |
+namespace { |
|
1494 |
+ struct MSAH : public MachineFunctionPass { |
|
1495 |
+ static char ID; |
|
1496 |
+ MSAH() : MachineFunctionPass(&ID) {} |
|
1497 |
+ |
|
1498 |
+ virtual bool runOnMachineFunction(MachineFunction &MF) { |
|
1499 |
+ const X86TargetMachine *TM = |
|
1500 |
+ static_cast<const X86TargetMachine *>(&MF.getTarget()); |
|
1501 |
+ const X86RegisterInfo *X86RI = TM->getRegisterInfo(); |
|
1502 |
+ MachineRegisterInfo &RI = MF.getRegInfo(); |
|
1503 |
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); |
|
1504 |
+ unsigned StackAlignment = X86RI->getStackAlignment(); |
|
1505 |
+ |
|
1506 |
+ // Be over-conservative: scan over all vreg defs and find whether vector |
|
1507 |
+ // registers are used. If yes, there is a possibility that vector register |
|
1508 |
+ // will be spilled and thus require dynamic stack realignment. |
|
1509 |
+ for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister; |
|
1510 |
+ RegNum < RI.getLastVirtReg(); ++RegNum) |
|
1511 |
+ if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) { |
|
1512 |
+ FuncInfo->setReserveFP(true); |
|
1513 |
+ return true; |
|
1514 |
+ } |
|
1515 |
+ |
|
1516 |
+ // Nothing to do |
|
1517 |
+ return false; |
|
1518 |
+ } |
|
1519 |
+ |
|
1520 |
+ virtual const char *getPassName() const { |
|
1521 |
+ return "X86 Maximal Stack Alignment Check"; |
|
1522 |
+ } |
|
1523 |
+ |
|
1524 |
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const { |
|
1525 |
+ AU.setPreservesCFG(); |
|
1526 |
+ MachineFunctionPass::getAnalysisUsage(AU); |
|
1527 |
+ } |
|
1528 |
+ }; |
|
1529 |
+ |
|
1530 |
+ char MSAH::ID = 0; |
|
1531 |
+} |
|
1532 |
+ |
|
1533 |
+FunctionPass* |
|
1534 |
+llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); } |
... | ... |
@@ -352,11 +352,12 @@ def GR8 : RegisterClass<"X86", [i8], 8, |
352 | 352 |
const TargetMachine &TM = MF.getTarget(); |
353 | 353 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
354 | 354 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
355 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
355 | 356 |
// Does the function dedicate RBP / EBP to being a frame ptr? |
356 | 357 |
if (!Subtarget.is64Bit()) |
357 | 358 |
// In 32-mode, none of the 8-bit registers aliases EBP or ESP. |
358 | 359 |
return begin() + 8; |
359 |
- else if (RI->hasFP(MF)) |
|
360 |
+ else if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
360 | 361 |
// If so, don't allocate SPL or BPL. |
361 | 362 |
return array_endof(X86_GR8_AO_64) - 1; |
362 | 363 |
else |
... | ... |
@@ -396,9 +397,10 @@ def GR16 : RegisterClass<"X86", [i16], 16, |
396 | 396 |
const TargetMachine &TM = MF.getTarget(); |
397 | 397 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
398 | 398 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
399 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
399 | 400 |
if (Subtarget.is64Bit()) { |
400 | 401 |
// Does the function dedicate RBP to being a frame ptr? |
401 |
- if (RI->hasFP(MF)) |
|
402 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
402 | 403 |
// If so, don't allocate SP or BP. |
403 | 404 |
return array_endof(X86_GR16_AO_64) - 1; |
404 | 405 |
else |
... | ... |
@@ -406,7 +408,7 @@ def GR16 : RegisterClass<"X86", [i16], 16, |
406 | 406 |
return array_endof(X86_GR16_AO_64); |
407 | 407 |
} else { |
408 | 408 |
// Does the function dedicate EBP to being a frame ptr? |
409 |
- if (RI->hasFP(MF)) |
|
409 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
410 | 410 |
// If so, don't allocate SP or BP. |
411 | 411 |
return begin() + 6; |
412 | 412 |
else |
... | ... |
@@ -447,9 +449,10 @@ def GR32 : RegisterClass<"X86", [i32], 32, |
447 | 447 |
const TargetMachine &TM = MF.getTarget(); |
448 | 448 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
449 | 449 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
450 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
450 | 451 |
if (Subtarget.is64Bit()) { |
451 | 452 |
// Does the function dedicate RBP to being a frame ptr? |
452 |
- if (RI->hasFP(MF)) |
|
453 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
453 | 454 |
// If so, don't allocate ESP or EBP. |
454 | 455 |
return array_endof(X86_GR32_AO_64) - 1; |
455 | 456 |
else |
... | ... |
@@ -457,7 +460,7 @@ def GR32 : RegisterClass<"X86", [i32], 32, |
457 | 457 |
return array_endof(X86_GR32_AO_64); |
458 | 458 |
} else { |
459 | 459 |
// Does the function dedicate EBP to being a frame ptr? |
460 |
- if (RI->hasFP(MF)) |
|
460 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
461 | 461 |
// If so, don't allocate ESP or EBP. |
462 | 462 |
return begin() + 6; |
463 | 463 |
else |
... | ... |
@@ -484,9 +487,11 @@ def GR64 : RegisterClass<"X86", [i64], 64, |
484 | 484 |
const TargetMachine &TM = MF.getTarget(); |
485 | 485 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
486 | 486 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
487 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
487 | 488 |
if (!Subtarget.is64Bit()) |
488 | 489 |
return begin(); // None of these are allocatable in 32-bit. |
489 |
- if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr? |
|
490 |
+ // Does the function dedicate RBP to being a frame ptr? |
|
491 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
490 | 492 |
return end()-3; // If so, don't allocate RIP, RSP or RBP |
491 | 493 |
else |
492 | 494 |
return end()-2; // If not, just don't allocate RIP or RSP |
... | ... |
@@ -582,8 +587,9 @@ def GR16_NOREX : RegisterClass<"X86", [i16], 16, |
582 | 582 |
GR16_NOREXClass::allocation_order_end(const MachineFunction &MF) const { |
583 | 583 |
const TargetMachine &TM = MF.getTarget(); |
584 | 584 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
585 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
585 | 586 |
// Does the function dedicate RBP / EBP to being a frame ptr? |
586 |
- if (RI->hasFP(MF)) |
|
587 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
587 | 588 |
// If so, don't allocate SP or BP. |
588 | 589 |
return end() - 2; |
589 | 590 |
else |
... | ... |
@@ -604,8 +610,9 @@ def GR32_NOREX : RegisterClass<"X86", [i32], 32, |
604 | 604 |
GR32_NOREXClass::allocation_order_end(const MachineFunction &MF) const { |
605 | 605 |
const TargetMachine &TM = MF.getTarget(); |
606 | 606 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
607 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
607 | 608 |
// Does the function dedicate RBP / EBP to being a frame ptr? |
608 |
- if (RI->hasFP(MF)) |
|
609 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
609 | 610 |
// If so, don't allocate ESP or EBP. |
610 | 611 |
return end() - 2; |
611 | 612 |
else |
... | ... |
@@ -626,8 +633,9 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64, |
626 | 626 |
GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const { |
627 | 627 |
const TargetMachine &TM = MF.getTarget(); |
628 | 628 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
629 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
629 | 630 |
// Does the function dedicate RBP to being a frame ptr? |
630 |
- if (RI->hasFP(MF)) |
|
631 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
631 | 632 |
// If so, don't allocate RIP, RSP or RBP. |
632 | 633 |
return end() - 3; |
633 | 634 |
else |
... | ... |
@@ -668,9 +676,10 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32, |
668 | 668 |
const TargetMachine &TM = MF.getTarget(); |
669 | 669 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
670 | 670 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
671 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
671 | 672 |
if (Subtarget.is64Bit()) { |
672 | 673 |
// Does the function dedicate RBP to being a frame ptr? |
673 |
- if (RI->hasFP(MF)) |
|
674 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
674 | 675 |
// If so, don't allocate EBP. |
675 | 676 |
return array_endof(X86_GR32_NOSP_AO_64) - 1; |
676 | 677 |
else |
... | ... |
@@ -678,7 +687,7 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32, |
678 | 678 |
return array_endof(X86_GR32_NOSP_AO_64); |
679 | 679 |
} else { |
680 | 680 |
// Does the function dedicate EBP to being a frame ptr? |
681 |
- if (RI->hasFP(MF)) |
|
681 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
682 | 682 |
// If so, don't allocate EBP. |
683 | 683 |
return begin() + 6; |
684 | 684 |
else |
... | ... |
@@ -703,9 +712,11 @@ def GR64_NOSP : RegisterClass<"X86", [i64], 64, |
703 | 703 |
const TargetMachine &TM = MF.getTarget(); |
704 | 704 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
705 | 705 |
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>(); |
706 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
706 | 707 |
if (!Subtarget.is64Bit()) |
707 | 708 |
return begin(); // None of these are allocatable in 32-bit. |
708 |
- if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr? |
|
709 |
+ // Does the function dedicate RBP to being a frame ptr? |
|
710 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
709 | 711 |
return end()-1; // If so, don't allocate RBP |
710 | 712 |
else |
711 | 713 |
return end(); // If not, any reg in this class is ok. |
... | ... |
@@ -726,8 +737,9 @@ def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64, |
726 | 726 |
{ |
727 | 727 |
const TargetMachine &TM = MF.getTarget(); |
728 | 728 |
const TargetRegisterInfo *RI = TM.getRegisterInfo(); |
729 |
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
|
729 | 730 |
// Does the function dedicate RBP to being a frame ptr? |
730 |
- if (RI->hasFP(MF)) |
|
731 |
+ if (RI->hasFP(MF) || MFI->getReserveFP()) |
|
731 | 732 |
// If so, don't allocate RBP. |
732 | 733 |
return end() - 1; |
733 | 734 |
else |
... | ... |
@@ -160,6 +160,7 @@ bool X86TargetMachine::addInstSelector(PassManagerBase &PM, |
160 | 160 |
|
161 | 161 |
bool X86TargetMachine::addPreRegAlloc(PassManagerBase &PM, |
162 | 162 |
CodeGenOpt::Level OptLevel) { |
163 |
+ PM.add(createX86MaxStackAlignmentHeuristicPass()); |
|
163 | 164 |
return false; // -print-machineinstr shouldn't print after this. |
164 | 165 |
} |
165 | 166 |
|
... | ... |
@@ -955,7 +955,8 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V, |
955 | 955 |
continue; // Otherwise, storing through it, or storing into GV... fine. |
956 | 956 |
} |
957 | 957 |
|
958 |
- if (isa<GetElementPtrInst>(Inst)) { |
|
958 |
+ // Must index into the array and into the struct. |
|
959 |
+ if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { |
|
959 | 960 |
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) |
960 | 961 |
return false; |
961 | 962 |
continue; |