代码拉取完成,页面将自动刷新
同步操作将从 eastb233/src-openeuler_gcc 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
This patch is a combine of following 8 commits
commit e944354ec05891474b0d204c6c239c04ee7b527b
Author: Robin Dapp <rdapp@linux.ibm.com>
Date: Mon Aug 26 10:18:24 2019 +0000
[PATCH 1/2] Allow folding all statements.
commit df7d46d925c7baca7bf9961aee900876d8aef225
Author: Robin Dapp <rdapp@linux.ibm.com>
Date: Mon Aug 26 10:24:44 2019 +0000
[PATCH 2/2] Add simplify rule for wrapped addition.
commit 6c14d008122fcee4157be79a60f8d6685869ad19
Author: Robin Dapp <rdapp@linux.ibm.com>
Date: Tue Aug 27 12:08:58 2019 +0000
re PR testsuite/91549 (gcc.dg/wrapped-binop-simplify.c fails starting with r274925)
commit 129bd066049f065e522990e63bb10ff92b3c018d
Author: Jakub Jelinek <jakub@redhat.com>
Date: Tue Dec 3 10:20:43 2019 +0100
re PR tree-optimization/92734 (Missing match.pd simplification done by fold_binary_loc on generic)
commit 526b4c716a340ee9464965e63eee2b9954fe21f1
Author: Jakub Jelinek <jakub@redhat.com>
Date: Wed Dec 4 10:38:48 2019 +0100
re PR tree-optimization/92734 (Missing match.pd simplification done by fold_binary_loc on generic)
commit 28fabd43d9d249134244eb9d7815917c7ae44b64
Author: Richard Biener <rguenther@suse.de>
Date: Fri Dec 6 10:25:08 2019 +0000
genmatch.c (enum tree_code): Remove CONVERT{0,1,2} and VIEW_CONVERT{0,1,2}.
commit e150da383346adc762bc904342f9877f2f071265
Author: Richard Biener <rguenther@suse.de>
Date: Fri Dec 6 11:44:27 2019 +0000
match.pd (nop_convert): Remove empty match.
commit 496f4f884716ae061f771a62e44868a32dbd502f
Author: Jakub Jelinek <jakub@redhat.com>
Date: Mon May 4 11:01:08 2020 +0200
match.pd: Decrease number of nop conversions around bitwise ops [PR94718]
diff -Nurp a/gcc/genmatch.c b/gcc/genmatch.c
--- a/gcc/genmatch.c 2020-03-12 19:07:21.000000000 +0800
+++ b/gcc/genmatch.c 2020-11-24 14:49:12.792000000 +0800
@@ -224,12 +224,6 @@ output_line_directive (FILE *f, location
#define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM,
enum tree_code {
#include "tree.def"
-CONVERT0,
-CONVERT1,
-CONVERT2,
-VIEW_CONVERT0,
-VIEW_CONVERT1,
-VIEW_CONVERT2,
MAX_TREE_CODES
};
#undef DEFTREECODE
@@ -695,11 +689,12 @@ struct expr : public operand
expr (id_base *operation_, location_t loc, bool is_commutative_ = false)
: operand (OP_EXPR, loc), operation (operation_),
ops (vNULL), expr_type (NULL), is_commutative (is_commutative_),
- is_generic (false), force_single_use (false) {}
+ is_generic (false), force_single_use (false), opt_grp (0) {}
expr (expr *e)
: operand (OP_EXPR, e->location), operation (e->operation),
ops (vNULL), expr_type (e->expr_type), is_commutative (e->is_commutative),
- is_generic (e->is_generic), force_single_use (e->force_single_use) {}
+ is_generic (e->is_generic), force_single_use (e->force_single_use),
+ opt_grp (e->opt_grp) {}
void append_op (operand *op) { ops.safe_push (op); }
/* The operator and its operands. */
id_base *operation;
@@ -714,6 +709,8 @@ struct expr : public operand
/* Whether pushing any stmt to the sequence should be conditional
on this expression having a single-use. */
bool force_single_use;
+ /* If non-zero, the group for optional handling. */
+ unsigned char opt_grp;
virtual void gen_transform (FILE *f, int, const char *, bool, int,
const char *, capture_info *,
dt_operand ** = 0, int = 0);
@@ -1079,18 +1076,17 @@ lower_commutative (simplify *s, vec<simp
}
}
-/* Strip conditional conversios using operator OPER from O and its
- children if STRIP, else replace them with an unconditional convert. */
+/* Strip conditional operations using group GRP from O and its
+ children if STRIP, else replace them with an unconditional operation. */
operand *
-lower_opt_convert (operand *o, enum tree_code oper,
- enum tree_code to_oper, bool strip)
+lower_opt (operand *o, unsigned char grp, bool strip)
{
if (capture *c = dyn_cast<capture *> (o))
{
if (c->what)
return new capture (c->location, c->where,
- lower_opt_convert (c->what, oper, to_oper, strip),
+ lower_opt (c->what, grp, strip),
c->value_match);
else
return c;
@@ -1100,36 +1096,34 @@ lower_opt_convert (operand *o, enum tree
if (!e)
return o;
- if (*e->operation == oper)
+ if (e->opt_grp == grp)
{
if (strip)
- return lower_opt_convert (e->ops[0], oper, to_oper, strip);
+ return lower_opt (e->ops[0], grp, strip);
expr *ne = new expr (e);
- ne->operation = (to_oper == CONVERT_EXPR
- ? get_operator ("CONVERT_EXPR")
- : get_operator ("VIEW_CONVERT_EXPR"));
- ne->append_op (lower_opt_convert (e->ops[0], oper, to_oper, strip));
+ ne->opt_grp = 0;
+ ne->append_op (lower_opt (e->ops[0], grp, strip));
return ne;
}
expr *ne = new expr (e);
for (unsigned i = 0; i < e->ops.length (); ++i)
- ne->append_op (lower_opt_convert (e->ops[i], oper, to_oper, strip));
+ ne->append_op (lower_opt (e->ops[i], grp, strip));
return ne;
}
-/* Determine whether O or its children uses the conditional conversion
- operator OPER. */
+/* Determine whether O or its children uses the conditional operation
+ group GRP. */
static bool
-has_opt_convert (operand *o, enum tree_code oper)
+has_opt (operand *o, unsigned char grp)
{
if (capture *c = dyn_cast<capture *> (o))
{
if (c->what)
- return has_opt_convert (c->what, oper);
+ return has_opt (c->what, grp);
else
return false;
}
@@ -1138,11 +1132,11 @@ has_opt_convert (operand *o, enum tree_c
if (!e)
return false;
- if (*e->operation == oper)
+ if (e->opt_grp == grp)
return true;
for (unsigned i = 0; i < e->ops.length (); ++i)
- if (has_opt_convert (e->ops[i], oper))
+ if (has_opt (e->ops[i], grp))
return true;
return false;
@@ -1152,34 +1146,24 @@ has_opt_convert (operand *o, enum tree_c
if required. */
static vec<operand *>
-lower_opt_convert (operand *o)
+lower_opt (operand *o)
{
vec<operand *> v1 = vNULL, v2;
v1.safe_push (o);
- enum tree_code opers[]
- = { CONVERT0, CONVERT_EXPR,
- CONVERT1, CONVERT_EXPR,
- CONVERT2, CONVERT_EXPR,
- VIEW_CONVERT0, VIEW_CONVERT_EXPR,
- VIEW_CONVERT1, VIEW_CONVERT_EXPR,
- VIEW_CONVERT2, VIEW_CONVERT_EXPR };
-
- /* Conditional converts are lowered to a pattern with the
- conversion and one without. The three different conditional
- convert codes are lowered separately. */
+ /* Conditional operations are lowered to a pattern with the
+ operation and one without. All different conditional operation
+ groups are lowered separately. */
- for (unsigned i = 0; i < sizeof (opers) / sizeof (enum tree_code); i += 2)
+ for (unsigned i = 1; i <= 10; ++i)
{
v2 = vNULL;
for (unsigned j = 0; j < v1.length (); ++j)
- if (has_opt_convert (v1[j], opers[i]))
+ if (has_opt (v1[j], i))
{
- v2.safe_push (lower_opt_convert (v1[j],
- opers[i], opers[i+1], false));
- v2.safe_push (lower_opt_convert (v1[j],
- opers[i], opers[i+1], true));
+ v2.safe_push (lower_opt (v1[j], i, false));
+ v2.safe_push (lower_opt (v1[j], i, true));
}
if (v2 != vNULL)
@@ -1197,9 +1181,9 @@ lower_opt_convert (operand *o)
the resulting multiple patterns to SIMPLIFIERS. */
static void
-lower_opt_convert (simplify *s, vec<simplify *>& simplifiers)
+lower_opt (simplify *s, vec<simplify *>& simplifiers)
{
- vec<operand *> matchers = lower_opt_convert (s->match);
+ vec<operand *> matchers = lower_opt (s->match);
for (unsigned i = 0; i < matchers.length (); ++i)
{
simplify *ns = new simplify (s->kind, s->id, matchers[i], s->result,
@@ -1543,7 +1527,7 @@ lower (vec<simplify *>& simplifiers, boo
{
auto_vec<simplify *> out_simplifiers;
for (unsigned i = 0; i < simplifiers.length (); ++i)
- lower_opt_convert (simplifiers[i], out_simplifiers);
+ lower_opt (simplifiers[i], out_simplifiers);
simplifiers.truncate (0);
for (unsigned i = 0; i < out_simplifiers.length (); ++i)
@@ -3927,7 +3911,7 @@ private:
unsigned get_internal_capture_id ();
- id_base *parse_operation ();
+ id_base *parse_operation (unsigned char &);
operand *parse_capture (operand *, bool);
operand *parse_expr ();
c_expr *parse_c_expr (cpp_ttype);
@@ -4118,47 +4102,36 @@ parser::record_operlist (location_t loc,
convert2? */
id_base *
-parser::parse_operation ()
+parser::parse_operation (unsigned char &opt_grp)
{
const cpp_token *id_tok = peek ();
+ char *alt_id = NULL;
const char *id = get_ident ();
const cpp_token *token = peek ();
- if (strcmp (id, "convert0") == 0)
- fatal_at (id_tok, "use 'convert?' here");
- else if (strcmp (id, "view_convert0") == 0)
- fatal_at (id_tok, "use 'view_convert?' here");
+ opt_grp = 0;
if (token->type == CPP_QUERY
&& !(token->flags & PREV_WHITE))
{
- if (strcmp (id, "convert") == 0)
- id = "convert0";
- else if (strcmp (id, "convert1") == 0)
- ;
- else if (strcmp (id, "convert2") == 0)
- ;
- else if (strcmp (id, "view_convert") == 0)
- id = "view_convert0";
- else if (strcmp (id, "view_convert1") == 0)
- ;
- else if (strcmp (id, "view_convert2") == 0)
- ;
- else
- fatal_at (id_tok, "non-convert operator conditionalized");
-
if (!parsing_match_operand)
fatal_at (id_tok, "conditional convert can only be used in "
"match expression");
+ if (ISDIGIT (id[strlen (id) - 1]))
+ {
+ opt_grp = id[strlen (id) - 1] - '0' + 1;
+ alt_id = xstrdup (id);
+ alt_id[strlen (id) - 1] = '\0';
+ if (opt_grp == 1)
+ fatal_at (id_tok, "use '%s?' here", alt_id);
+ }
+ else
+ opt_grp = 1;
eat_token (CPP_QUERY);
}
- else if (strcmp (id, "convert1") == 0
- || strcmp (id, "convert2") == 0
- || strcmp (id, "view_convert1") == 0
- || strcmp (id, "view_convert2") == 0)
- fatal_at (id_tok, "expected '?' after conditional operator");
- id_base *op = get_operator (id);
+ id_base *op = get_operator (alt_id ? alt_id : id);
if (!op)
- fatal_at (id_tok, "unknown operator %s", id);
-
+ fatal_at (id_tok, "unknown operator %s", alt_id ? alt_id : id);
+ if (alt_id)
+ free (alt_id);
user_id *p = dyn_cast<user_id *> (op);
if (p && p->is_oper_list)
{
@@ -4214,7 +4187,8 @@ struct operand *
parser::parse_expr ()
{
const cpp_token *token = peek ();
- expr *e = new expr (parse_operation (), token->src_loc);
+ unsigned char opt_grp;
+ expr *e = new expr (parse_operation (opt_grp), token->src_loc);
token = peek ();
operand *op;
bool is_commutative = false;
@@ -4310,6 +4284,12 @@ parser::parse_expr ()
"commutative");
}
e->expr_type = expr_type;
+ if (opt_grp != 0)
+ {
+ if (e->ops.length () != 1)
+ fatal_at (token, "only unary operations can be conditional");
+ e->opt_grp = opt_grp;
+ }
return op;
}
else if (!(token->flags & PREV_WHITE))
@@ -4692,10 +4672,6 @@ parser::parse_for (location_t)
id_base *idb = get_operator (oper, true);
if (idb == NULL)
fatal_at (token, "no such operator '%s'", oper);
- if (*idb == CONVERT0 || *idb == CONVERT1 || *idb == CONVERT2
- || *idb == VIEW_CONVERT0 || *idb == VIEW_CONVERT1
- || *idb == VIEW_CONVERT2)
- fatal_at (token, "conditional operators cannot be used inside for");
if (arity == -1)
arity = idb->nargs;
@@ -5102,12 +5078,6 @@ main (int argc, char **argv)
add_operator (SYM, # SYM, # TYPE, NARGS);
#define END_OF_BASE_TREE_CODES
#include "tree.def"
-add_operator (CONVERT0, "convert0", "tcc_unary", 1);
-add_operator (CONVERT1, "convert1", "tcc_unary", 1);
-add_operator (CONVERT2, "convert2", "tcc_unary", 1);
-add_operator (VIEW_CONVERT0, "view_convert0", "tcc_unary", 1);
-add_operator (VIEW_CONVERT1, "view_convert1", "tcc_unary", 1);
-add_operator (VIEW_CONVERT2, "view_convert2", "tcc_unary", 1);
#undef END_OF_BASE_TREE_CODES
#undef DEFTREECODE
diff -Nurp a/gcc/gimple-loop-versioning.cc b/gcc/gimple-loop-versioning.cc
--- a/gcc/gimple-loop-versioning.cc 2020-03-12 19:07:21.000000000 +0800
+++ b/gcc/gimple-loop-versioning.cc 2020-11-24 14:49:12.792000000 +0800
@@ -1264,6 +1264,12 @@ loop_versioning::record_address_fragment
continue;
}
}
+ if (CONVERT_EXPR_CODE_P (code))
+ {
+ tree op1 = gimple_assign_rhs1 (assign);
+ address->terms[i].expr = strip_casts (op1);
+ continue;
+ }
}
i += 1;
}
diff -Nurp a/gcc/match.pd b/gcc/match.pd
--- a/gcc/match.pd 2020-11-24 14:54:43.576000000 +0800
+++ b/gcc/match.pd 2020-11-24 14:49:12.792000000 +0800
@@ -97,8 +97,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(define_operator_list COND_TERNARY
IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
-/* As opposed to convert?, this still creates a single pattern, so
- it is not a suitable replacement for convert? in all cases. */
+/* With nop_convert? combine convert? and view_convert? in one pattern
+ plus conditionalize on tree_nop_conversion_p conversions. */
(match (nop_convert @0)
(convert @0)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
@@ -108,9 +108,6 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& known_eq (TYPE_VECTOR_SUBPARTS (type),
TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
&& tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
-/* This one has to be last, or it shadows the others. */
-(match (nop_convert @0)
- @0)
/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
ABSU_EXPR returns unsigned absolute value of the operand and the operand
@@ -1260,7 +1257,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
We combine the above two cases by using a conditional convert. */
(for bitop (bit_and bit_ior bit_xor)
(simplify
- (bitop (convert @0) (convert? @1))
+ (bitop (convert@2 @0) (convert?@3 @1))
(if (((TREE_CODE (@1) == INTEGER_CST
&& INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& int_fits_type_p (@1, TREE_TYPE (@0)))
@@ -1279,8 +1276,24 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|| GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
/* Or if the precision of TO is not the same as the precision
of its mode. */
- || !type_has_mode_precision_p (type)))
- (convert (bitop @0 (convert @1))))))
+ || !type_has_mode_precision_p (type)
+ /* In GIMPLE, getting rid of 2 conversions for one new results
+ in smaller IL. */
+ || (GIMPLE
+ && TREE_CODE (@1) != INTEGER_CST
+ && tree_nop_conversion_p (type, TREE_TYPE (@0))
+ && single_use (@2)
+ && single_use (@3))))
+ (convert (bitop @0 (convert @1)))))
+ /* In GIMPLE, getting rid of 2 conversions for one new results
+ in smaller IL. */
+ (simplify
+ (convert (bitop:cs@2 (nop_convert:s @0) @1))
+ (if (GIMPLE
+ && TREE_CODE (@1) != INTEGER_CST
+ && tree_nop_conversion_p (type, TREE_TYPE (@2))
+ && types_match (type, @0))
+ (bitop @0 (convert @1)))))
(for bitop (bit_and bit_ior)
rbitop (bit_ior bit_and)
@@ -1374,7 +1387,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Convert - (~A) to A + 1. */
(simplify
- (negate (nop_convert (bit_not @0)))
+ (negate (nop_convert? (bit_not @0)))
(plus (view_convert @0) { build_each_one_cst (type); }))
/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
@@ -1401,7 +1414,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
(simplify
- (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
+ (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
(if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
(bit_not (bit_xor (view_convert @0) @1))))
@@ -1614,7 +1627,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* For equality, this is also true with wrapping overflow. */
(for op (eq ne)
(simplify
- (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
+ (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
(if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
&& (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
@@ -1623,7 +1636,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
(op @0 { build_zero_cst (TREE_TYPE (@0)); })))
(simplify
- (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
+ (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
(if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
&& tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
&& (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
@@ -1866,7 +1879,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|| !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
(convert (negate @1))))
(simplify
- (negate (nop_convert (negate @1)))
+ (negate (nop_convert? (negate @1)))
(if (!TYPE_OVERFLOW_SANITIZED (type)
&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
(view_convert @1)))
@@ -1883,20 +1896,26 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* A - (A +- B) -> -+ B */
/* A +- (B -+ A) -> +- B */
(simplify
- (minus (plus:c @0 @1) @0)
- @1)
- (simplify
- (minus (minus @0 @1) @0)
- (negate @1))
+ (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
+ (view_convert @1))
(simplify
- (plus:c (minus @0 @1) @1)
- @0)
+ (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
+ (if (!ANY_INTEGRAL_TYPE_P (type)
+ || TYPE_OVERFLOW_WRAPS (type))
+ (negate (view_convert @1))
+ (view_convert (negate @1))))
+ (simplify
+ (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
+ (view_convert @0))
+ (simplify
+ (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
+ (if (!ANY_INTEGRAL_TYPE_P (type)
+ || TYPE_OVERFLOW_WRAPS (type))
+ (negate (view_convert @1))
+ (view_convert (negate @1))))
(simplify
- (minus @0 (plus:c @0 @1))
- (negate @1))
- (simplify
- (minus @0 (minus @0 @1))
- @1)
+ (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
+ (view_convert @1))
/* (A +- B) + (C - A) -> C +- B */
/* (A + B) - (A - C) -> B + C */
/* More cases are handled with comparisons. */
@@ -1922,7 +1941,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for inner_op (plus minus)
neg_inner_op (minus plus)
(simplify
- (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
+ (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
CONSTANT_CLASS_P@2)
/* If one of the types wraps, use that one. */
(if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
@@ -1961,17 +1980,70 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* (CST1 - A) +- CST2 -> CST3 - A */
(for outer_op (plus minus)
(simplify
- (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
- (with { tree cst = const_binop (outer_op, type, @1, @2); }
- (if (cst && !TREE_OVERFLOW (cst))
- (minus { cst; } @0)))))
-
- /* CST1 - (CST2 - A) -> CST3 + A */
- (simplify
- (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
- (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
- (if (cst && !TREE_OVERFLOW (cst))
- (plus { cst; } @0))))
+ (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
+ /* If one of the types wraps, use that one. */
+ (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
+ /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
+ forever if something doesn't simplify into a constant. */
+ (if (!CONSTANT_CLASS_P (@0))
+ (minus (outer_op (view_convert @1) @2) (view_convert @0)))
+ (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+ (view_convert (minus (outer_op @1 (view_convert @2)) @0))
+ (if (types_match (type, @0))
+ (with { tree cst = const_binop (outer_op, type, @1, @2); }
+ (if (cst && !TREE_OVERFLOW (cst))
+ (minus { cst; } @0))))))))
+
+ /* CST1 - (CST2 - A) -> CST3 + A
+ Use view_convert because it is safe for vectors and equivalent for
+ scalars. */
+ (simplify
+ (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
+ /* If one of the types wraps, use that one. */
+ (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
+ /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
+ forever if something doesn't simplify into a constant. */
+ (if (!CONSTANT_CLASS_P (@0))
+ (plus (view_convert @0) (minus @1 (view_convert @2))))
+ (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+ (view_convert (plus @0 (minus (view_convert @1) @2)))
+ (if (types_match (type, @0))
+ (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
+ (if (cst && !TREE_OVERFLOW (cst))
+ (plus { cst; } @0)))))))
+
+/* ((T)(A)) + CST -> (T)(A + CST) */
+#if GIMPLE
+ (simplify
+ (plus (convert SSA_NAME@0) INTEGER_CST@1)
+ (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
+ && TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
+ && int_fits_type_p (@1, TREE_TYPE (@0)))
+ /* Perform binary operation inside the cast if the constant fits
+ and (A + CST)'s range does not overflow. */
+ (with
+ {
+ wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
+ max_ovf = wi::OVF_OVERFLOW;
+ tree inner_type = TREE_TYPE (@0);
+
+ wide_int w1 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
+ TYPE_SIGN (inner_type));
+
+ wide_int wmin0, wmax0;
+ if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
+ {
+ wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
+ wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
+ }
+ }
+ (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
+ (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
+ )))
+#endif
/* ~A + A -> -1 */
(simplify
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-5.c b/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-5.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-5.c 2020-03-12 19:07:22.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-5.c 2020-11-24 14:49:14.568000000 +0800
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-ch2-details" } */
+/* { dg-options "-O2 -fno-tree-vrp -fdump-tree-ch2-details" } */
int is_sorted(int *a, int n)
{
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-7.c b/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-7.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-7.c 2020-03-12 19:07:22.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/copy-headers-7.c 2020-11-24 14:49:14.568000000 +0800
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-ch2-details --param logical-op-non-short-circuit=0" } */
+/* { dg-options "-O2 -fno-tree-vrp -fdump-tree-ch2-details --param logical-op-non-short-circuit=0" } */
int is_sorted(int *a, int n, int m, int k)
{
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/loop-15.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-15.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/loop-15.c 2020-03-12 19:07:22.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-15.c 2020-11-24 14:49:14.568000000 +0800
@@ -19,7 +19,7 @@ int bla(void)
}
/* Since the loop is removed, there should be no addition. */
-/* { dg-final { scan-tree-dump-times " \\+ " 0 "optimized" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times " \\+ " 0 "optimized" } } */
/* { dg-final { scan-tree-dump-times " \\* " 1 "optimized" } } */
/* The if from the loop header copying remains in the code. */
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c b/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c 2020-03-12 19:07:22.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c 2020-11-24 14:49:14.568000000 +0800
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fno-tree-ccp -fdisable-tree-evrp -fdump-tree-vrp1" } */
+/* { dg-options "-O2 -fno-tree-ccp -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
void h (void);
@@ -17,4 +17,4 @@ int g (int i, int j)
return 1;
}
-/* { dg-final { scan-tree-dump-times "Folding predicate.*to 1" 1 "vrp1" } } */
+/* { dg-final { scan-tree-dump-times "gimple_simplified" 1 "vrp1" } } */
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/pr92734-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr92734-2.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr92734-2.c 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr92734-2.c 2020-11-24 14:49:14.568000000 +0800
@@ -0,0 +1,76 @@
+/* PR tree-optimization/92734 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* Verify there are no binary additions or subtractions left. There can
+ be just casts and negations. */
+/* { dg-final { scan-tree-dump-not " \[+-] " "optimized" } } */
+
+int
+f1 (int x, unsigned y)
+{
+ int a = x + y;
+ return a - x;
+}
+
+unsigned
+f2 (unsigned x, int y)
+{
+ unsigned a = (int) x + y;
+ return a - x;
+}
+
+int
+f3 (int x, unsigned y)
+{
+ int a = x - y;
+ return a - x;
+}
+
+unsigned
+f4 (unsigned x, int y)
+{
+ unsigned a = (int) x - y;
+ return a - x;
+}
+
+int
+f5 (unsigned x, int y)
+{
+ int a = x - y;
+ return a + y;
+}
+
+unsigned
+f6 (int x, unsigned y)
+{
+ unsigned a = x - (int) y;
+ return a + y;
+}
+
+int
+f7 (int x, unsigned y)
+{
+ int a = x + y;
+ return x - a;
+}
+
+unsigned
+f8 (unsigned x, int y)
+{
+ unsigned a = (int) x + y;
+ return x - a;
+}
+
+int
+f9 (int x, unsigned y)
+{
+ int a = x - y;
+ return x - a;
+}
+
+unsigned
+f10 (unsigned x, int y)
+{
+ unsigned a = (int) x - y;
+ return x - a;
+}
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/pr92734.c b/gcc/testsuite/gcc.dg/tree-ssa/pr92734.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr92734.c 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr92734.c 2020-11-24 14:49:14.568000000 +0800
@@ -0,0 +1,31 @@
+/* PR tree-optimization/92734 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-forwprop1" } */
+/* { dg-final { scan-tree-dump-times "return t_\[0-9]*\\\(D\\\);" 4 "forwprop1" } } */
+
+int
+f1 (int t)
+{
+ return 1 - (int) (1U - t);
+}
+
+int
+f2 (int t)
+{
+ int a = 7U - t;
+ return 7 - a;
+}
+
+int
+f3 (int t)
+{
+ int a = 32U - t;
+ return 32 - a;
+}
+
+int
+f4 (int t)
+{
+ int a = 32 - t;
+ return (int) (32 - (unsigned) a);
+}
diff -Nurp a/gcc/testsuite/gcc.dg/tree-ssa/pr94718-3.c b/gcc/testsuite/gcc.dg/tree-ssa/pr94718-3.c
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr94718-3.c 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr94718-3.c 2020-11-24 14:49:14.568000000 +0800
@@ -0,0 +1,45 @@
+/* PR tree-optimization/94718 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-ipa-icf -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump-times " \\\(int\\\) " 2 "optimized" } } */
+/* { dg-final { scan-tree-dump-times " \\\(unsigned int\\\) " 2 "optimized" } } */
+
+int
+f1 (int x, int y)
+{
+ return (int) ((unsigned) x | (unsigned) y);
+}
+
+int
+f2 (int x, int y)
+{
+ unsigned a = x;
+ unsigned b = y;
+ return a | b;
+}
+
+int
+f3 (int x, unsigned y)
+{
+ return (int) ((unsigned) x | y);
+}
+
+int
+f4 (int x, unsigned y)
+{
+ unsigned a = x;
+ return a | y;
+}
+
+unsigned
+f5 (int x, unsigned y)
+{
+ return (unsigned) (x | (int) y);
+}
+
+unsigned
+f6 (int x, unsigned y)
+{
+ int a = y;
+ return x | a;
+}
diff -Nurp a/gcc/testsuite/gcc.dg/wrapped-binop-simplify.c b/gcc/testsuite/gcc.dg/wrapped-binop-simplify.c
--- a/gcc/testsuite/gcc.dg/wrapped-binop-simplify.c 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gcc.dg/wrapped-binop-simplify.c 2020-11-24 14:49:14.484000000 +0800
@@ -0,0 +1,43 @@
+/* { dg-do compile { target { { i?86-*-* x86_64-*-* s390*-*-* } && lp64 } } } */
+/* { dg-options "-O2 -fdump-tree-vrp2-details" } */
+/* { dg-final { scan-tree-dump-times "gimple_simplified to" 4 "vrp2" } } */
+
+void v1 (unsigned long *in, unsigned long *out, unsigned int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ {
+ out[i] = in[i];
+ }
+}
+
+void v2 (unsigned long *in, unsigned long *out, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ {
+ out[i] = in[i];
+ }
+}
+
+void v3 (unsigned long *in, unsigned long *out, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ {
+ out[i] = in[i];
+ }
+}
+
+void v4 (unsigned long *in, unsigned long *out, int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ {
+ out[i] = in[i];
+ }
+}
diff -Nurp a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
--- a/gcc/tree-ssa-propagate.c 2020-11-24 14:54:42.556000000 +0800
+++ b/gcc/tree-ssa-propagate.c 2020-11-24 14:49:12.792000000 +0800
@@ -814,7 +814,6 @@ ssa_propagation_engine::ssa_propagate (v
ssa_prop_fini ();
}
-
/* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref'
is a non-volatile pointer dereference, a structure reference or a
reference to a single _DECL. Ignore volatile memory references
@@ -1071,6 +1070,14 @@ substitute_and_fold_dom_walker::before_d
stmt = gsi_stmt (i);
gimple_set_modified (stmt, true);
}
+ /* Also fold if we want to fold all statements. */
+ else if (substitute_and_fold_engine->fold_all_stmts
+ && fold_stmt (&i, follow_single_use_edges))
+ {
+ did_replace = true;
+ stmt = gsi_stmt (i);
+ gimple_set_modified (stmt, true);
+ }
/* Some statements may be simplified using propagator
specific information. Do this before propagating
diff -Nurp a/gcc/tree-ssa-propagate.h b/gcc/tree-ssa-propagate.h
--- a/gcc/tree-ssa-propagate.h 2020-03-12 19:07:23.000000000 +0800
+++ b/gcc/tree-ssa-propagate.h 2020-11-24 14:49:12.792000000 +0800
@@ -100,6 +100,8 @@ class ssa_propagation_engine
class substitute_and_fold_engine
{
public:
+ substitute_and_fold_engine (bool fold_all_stmts = false)
+ : fold_all_stmts (fold_all_stmts) { }
virtual ~substitute_and_fold_engine (void) { }
virtual bool fold_stmt (gimple_stmt_iterator *) { return false; }
virtual tree get_value (tree) { return NULL_TREE; }
@@ -107,6 +109,10 @@ class substitute_and_fold_engine
bool substitute_and_fold (basic_block = NULL);
bool replace_uses_in (gimple *);
bool replace_phi_args_in (gphi *);
+
+ /* Users like VRP can set this when they want to perform
+ folding for every propagation. */
+ bool fold_all_stmts;
};
#endif /* _TREE_SSA_PROPAGATE_H */
diff -Nurp a/gcc/tree-vrp.c b/gcc/tree-vrp.c
--- a/gcc/tree-vrp.c 2020-11-24 14:54:43.564000000 +0800
+++ b/gcc/tree-vrp.c 2020-11-24 14:49:12.792000000 +0800
@@ -6384,6 +6384,7 @@ vrp_prop::visit_phi (gphi *phi)
class vrp_folder : public substitute_and_fold_engine
{
public:
+ vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
tree get_value (tree) FINAL OVERRIDE;
bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
bool fold_predicate_in (gimple_stmt_iterator *);
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。