代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/gcc 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From 4d76b521d9bb539556011304b8a76dea1e2657a1 Mon Sep 17 00:00:00 2001
From: bule <bule1@huawei.com>
Date: Fri, 6 Aug 2021 10:20:54 +0800
Subject: [PATCH 17/22] [mcmodel] Enable mcmodel=medium on kunpeng
Enable mcmodel=medium on kunpeng
diff --git a/gcc/combine.c b/gcc/combine.c
index 35505cc5311..497e53289ca 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -1923,6 +1923,12 @@ can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
break;
case SET:
+ /* If the set is a symbol loaded by medium code model unspec
+ escape this combine. */
+ if (GET_CODE (SET_SRC (elt)) == UNSPEC
+ && XVECLEN (SET_SRC (elt), 0) != 0
+ && targetm.medium_symbol_p (SET_SRC (elt)))
+ return 0;
/* Ignore SETs whose result isn't used but not those that
have side-effects. */
if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
diff --git a/gcc/config/aarch64/aarch64-opts.h b/gcc/config/aarch64/aarch64-opts.h
index ee7bed34924..21828803480 100644
--- a/gcc/config/aarch64/aarch64-opts.h
+++ b/gcc/config/aarch64/aarch64-opts.h
@@ -66,6 +66,10 @@ enum aarch64_code_model {
/* -fpic for small memory model.
GOT size to 28KiB (4K*8-4K) or 3580 entries. */
AARCH64_CMODEL_SMALL_SPIC,
+ /* Using movk insn sequence to do 64bit PC relative relocation. */
+ AARCH64_CMODEL_MEDIUM,
+ /* Using movk insn sequence to do 64bit PC relative got relocation. */
+ AARCH64_CMODEL_MEDIUM_PIC,
/* No assumptions about addresses of code and data.
The PIC variant is not yet implemented. */
AARCH64_CMODEL_LARGE
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index bebd1b36228..226f3a8ff01 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -95,9 +95,11 @@
*/
enum aarch64_symbol_type
{
+ SYMBOL_MEDIUM_ABSOLUTE,
SYMBOL_SMALL_ABSOLUTE,
SYMBOL_SMALL_GOT_28K,
SYMBOL_SMALL_GOT_4G,
+ SYMBOL_MEDIUM_GOT_4G,
SYMBOL_SMALL_TLSGD,
SYMBOL_SMALL_TLSDESC,
SYMBOL_SMALL_TLSIE,
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 79dc8f186f4..f78942b04c6 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -3127,6 +3127,29 @@ aarch64_load_symref_appropriately (rtx dest, rtx imm,
emit_insn (gen_add_losym (dest, tmp_reg, imm));
return;
}
+ case SYMBOL_MEDIUM_ABSOLUTE:
+ {
+ rtx tmp_reg = dest;
+ machine_mode mode = GET_MODE (dest);
+
+ gcc_assert (mode == Pmode || mode == ptr_mode);
+ if (can_create_pseudo_p ())
+ tmp_reg = gen_reg_rtx (mode);
+
+ if (mode == DImode)
+ {
+ emit_insn (gen_load_symbol_medium_di (dest, tmp_reg, imm));
+ }
+ else
+ {
+ emit_insn (gen_load_symbol_medium_si (dest, tmp_reg, imm));
+ }
+ if (REG_P (dest))
+ {
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (imm));
+ }
+ return;
+ }
case SYMBOL_TINY_ABSOLUTE:
emit_insn (gen_rtx_SET (dest, imm));
@@ -3249,6 +3272,60 @@ aarch64_load_symref_appropriately (rtx dest, rtx imm,
return;
}
+ case SYMBOL_MEDIUM_GOT_4G:
+ {
+ rtx tmp_reg = dest;
+ machine_mode mode = GET_MODE (dest);
+ if (can_create_pseudo_p ())
+ {
+ tmp_reg = gen_reg_rtx (mode);
+ }
+ rtx insn;
+ rtx mem;
+ rtx s = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+
+ if (mode == DImode)
+ {
+ emit_insn (gen_load_symbol_medium_di (tmp_reg, dest, s));
+ }
+ else
+ {
+ emit_insn (gen_load_symbol_medium_si (tmp_reg, dest, s));
+ }
+ if (REG_P (dest))
+ {
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (s));
+ }
+
+ if (mode == ptr_mode)
+ {
+ if (mode == DImode)
+ {
+ emit_insn (gen_get_gotoff_di (dest, imm));
+ insn = gen_ldr_got_medium_di (dest, tmp_reg, dest);
+ }
+ else
+ {
+ emit_insn (gen_get_gotoff_si (dest, imm));
+ insn = gen_ldr_got_medium_si (dest, tmp_reg, dest);
+ }
+ mem = XVECEXP (SET_SRC (insn), 0, 0);
+ }
+ else
+ {
+ gcc_assert (mode == Pmode);
+ emit_insn (gen_get_gotoff_di (dest, imm));
+ insn = gen_ldr_got_medium_sidi (dest, tmp_reg, dest);
+ mem = XVECEXP (XEXP (SET_SRC (insn), 0), 0, 0);
+ }
+
+ gcc_assert (GET_CODE (mem) == MEM);
+ MEM_READONLY_P (mem) = 1;
+ MEM_NOTRAP_P (mem) = 1;
+ emit_insn (insn);
+ return;
+ }
+
case SYMBOL_SMALL_TLSGD:
{
rtx_insn *insns;
@@ -5256,11 +5333,12 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
return;
- case SYMBOL_SMALL_TLSGD:
- case SYMBOL_SMALL_TLSDESC:
+ case SYMBOL_SMALL_TLSGD:
+ case SYMBOL_SMALL_TLSDESC:
case SYMBOL_SMALL_TLSIE:
case SYMBOL_SMALL_GOT_28K:
case SYMBOL_SMALL_GOT_4G:
+ case SYMBOL_MEDIUM_GOT_4G:
case SYMBOL_TINY_GOT:
case SYMBOL_TINY_TLSIE:
if (const_offset != 0)
@@ -5279,6 +5357,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
case SYMBOL_TLSLE24:
case SYMBOL_TLSLE32:
case SYMBOL_TLSLE48:
+ case SYMBOL_MEDIUM_ABSOLUTE:
aarch64_load_symref_appropriately (dest, imm, sty);
return;
@@ -9389,7 +9468,14 @@ aarch64_classify_address (struct aarch64_address_info *info,
if (GET_CODE (sym) == SYMBOL_REF
&& offset.is_constant (&const_offset)
&& (aarch64_classify_symbol (sym, const_offset)
- == SYMBOL_SMALL_ABSOLUTE))
+ == SYMBOL_SMALL_ABSOLUTE
+ /* Fix fail on dbl_mov_immediate_1.c. If end up here with
+ MEDIUM_ABSOLUTE, the symbol is a constant number that is
+ forced to memory in reload pass, which is ok to go on with
+ the original design that subtitude the mov to
+ 'adrp and ldr :losum'. */
+ || aarch64_classify_symbol (sym, const_offset)
+ == SYMBOL_MEDIUM_ABSOLUTE))
{
/* The symbol and offset must be aligned to the access size. */
unsigned int align;
@@ -11346,7 +11432,13 @@ static inline bool
aarch64_can_use_per_function_literal_pools_p (void)
{
return (aarch64_pcrelative_literal_loads
- || aarch64_cmodel == AARCH64_CMODEL_LARGE);
+ || aarch64_cmodel == AARCH64_CMODEL_LARGE
+ /* Fix const9.C so that constants goes to function_literal_pools.
+ According to the orignal design of aarch64 mcmodel=medium, we
+ don't care where this symbol is put. For the benefit of code size
+ and behaviour consistent with other mcmodel, put it into
+ function_literal_pools. */
+ || aarch64_cmodel == AARCH64_CMODEL_MEDIUM);
}
static bool
@@ -13003,6 +13095,13 @@ cost_plus:
if (speed)
*cost += extra_cost->alu.arith;
}
+ else if (aarch64_cmodel == AARCH64_CMODEL_MEDIUM
+ || aarch64_cmodel == AARCH64_CMODEL_MEDIUM_PIC)
+ {
+ /* 4 movs adr sub add 2movs ldr. */
+ if (speed)
+ *cost += 7*extra_cost->alu.arith;
+ }
if (flag_pic)
{
@@ -13010,6 +13109,8 @@ cost_plus:
*cost += COSTS_N_INSNS (1);
if (speed)
*cost += extra_cost->ldst.load;
+ if (aarch64_cmodel == AARCH64_CMODEL_MEDIUM_PIC)
+ *cost += 2*extra_cost->alu.arith;
}
return true;
@@ -14373,6 +14474,7 @@ initialize_aarch64_tls_size (struct gcc_options *opts)
if (aarch64_tls_size > 32)
aarch64_tls_size = 32;
break;
+ case AARCH64_CMODEL_MEDIUM:
case AARCH64_CMODEL_LARGE:
/* The maximum TLS size allowed under large is 16E.
FIXME: 16E should be 64bit, we only support 48bit offset now. */
@@ -15266,6 +15368,12 @@ initialize_aarch64_code_model (struct gcc_options *opts)
#endif
}
break;
+ case AARCH64_CMODEL_MEDIUM:
+ if (opts->x_flag_pic)
+ {
+ aarch64_cmodel = AARCH64_CMODEL_MEDIUM_PIC;
+ }
+ break;
case AARCH64_CMODEL_LARGE:
if (opts->x_flag_pic)
sorry ("code model %qs with %<-f%s%>", "large",
@@ -15276,6 +15384,7 @@ initialize_aarch64_code_model (struct gcc_options *opts)
case AARCH64_CMODEL_TINY_PIC:
case AARCH64_CMODEL_SMALL_PIC:
case AARCH64_CMODEL_SMALL_SPIC:
+ case AARCH64_CMODEL_MEDIUM_PIC:
gcc_unreachable ();
}
}
@@ -15286,6 +15395,7 @@ static void
aarch64_option_save (struct cl_target_option *ptr, struct gcc_options *opts)
{
ptr->x_aarch64_override_tune_string = opts->x_aarch64_override_tune_string;
+ ptr->x_aarch64_data_threshold = opts->x_aarch64_data_threshold;
ptr->x_aarch64_branch_protection_string
= opts->x_aarch64_branch_protection_string;
}
@@ -15301,6 +15411,7 @@ aarch64_option_restore (struct gcc_options *opts, struct cl_target_option *ptr)
opts->x_explicit_arch = ptr->x_explicit_arch;
selected_arch = aarch64_get_arch (ptr->x_explicit_arch);
opts->x_aarch64_override_tune_string = ptr->x_aarch64_override_tune_string;
+ opts->x_aarch64_data_threshold = ptr->x_aarch64_data_threshold;
opts->x_aarch64_branch_protection_string
= ptr->x_aarch64_branch_protection_string;
if (opts->x_aarch64_branch_protection_string)
@@ -16169,6 +16280,8 @@ aarch64_classify_symbol (rtx x, HOST_WIDE_INT offset)
case AARCH64_CMODEL_SMALL_SPIC:
case AARCH64_CMODEL_SMALL_PIC:
+ case AARCH64_CMODEL_MEDIUM_PIC:
+ case AARCH64_CMODEL_MEDIUM:
case AARCH64_CMODEL_SMALL:
return SYMBOL_SMALL_ABSOLUTE;
@@ -16205,6 +16318,7 @@ aarch64_classify_symbol (rtx x, HOST_WIDE_INT offset)
return SYMBOL_TINY_ABSOLUTE;
case AARCH64_CMODEL_SMALL:
+ AARCH64_SMALL_ROUTINE:
/* Same reasoning as the tiny code model, but the offset cap here is
1MB, allowing +/-3.9GB for the offset to the symbol. */
@@ -16228,7 +16342,50 @@ aarch64_classify_symbol (rtx x, HOST_WIDE_INT offset)
? SYMBOL_SMALL_GOT_28K : SYMBOL_SMALL_GOT_4G);
return SYMBOL_SMALL_ABSOLUTE;
+ case AARCH64_CMODEL_MEDIUM:
+ {
+ tree decl_local = SYMBOL_REF_DECL (x);
+ if (decl_local != NULL
+ && tree_fits_uhwi_p (DECL_SIZE_UNIT (decl_local)))
+ {
+ HOST_WIDE_INT size = tree_to_uhwi (DECL_SIZE_UNIT (decl_local));
+ /* If the data is smaller than the threshold, goto
+ the small code model. Else goto the large code
+ model. */
+ if (size >= HOST_WIDE_INT (aarch64_data_threshold))
+ goto AARCH64_LARGE_ROUTINE;
+ }
+ goto AARCH64_SMALL_ROUTINE;
+ }
+
+ case AARCH64_CMODEL_MEDIUM_PIC:
+ {
+ tree decl_local = SYMBOL_REF_DECL (x);
+ if (decl_local != NULL
+ && tree_fits_uhwi_p (DECL_SIZE_UNIT (decl_local)))
+ {
+ HOST_WIDE_INT size = tree_to_uhwi (DECL_SIZE_UNIT (decl_local));
+ if (size < HOST_WIDE_INT (aarch64_data_threshold))
+ {
+ if (!aarch64_symbol_binds_local_p (x))
+ {
+ /* flag_pic is 2 only when -fPIC is on, when we should
+ use 4G GOT. */
+ return flag_pic == 2 ? SYMBOL_SMALL_GOT_4G
+ : SYMBOL_SMALL_GOT_28K ;
+ }
+ return SYMBOL_SMALL_ABSOLUTE;
+ }
+ }
+ if (!aarch64_symbol_binds_local_p (x))
+ {
+ return SYMBOL_MEDIUM_GOT_4G;
+ }
+ return SYMBOL_MEDIUM_ABSOLUTE;
+ }
+
case AARCH64_CMODEL_LARGE:
+ AARCH64_LARGE_ROUTINE:
/* This is alright even in PIC code as the constant
pool reference is always PC relative and within
the same translation unit. */
@@ -19352,6 +19509,8 @@ aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
case AARCH64_CMODEL_SMALL:
case AARCH64_CMODEL_SMALL_PIC:
case AARCH64_CMODEL_SMALL_SPIC:
+ case AARCH64_CMODEL_MEDIUM:
+ case AARCH64_CMODEL_MEDIUM_PIC:
/* text+got+data < 4Gb. 4-byte signed relocs are sufficient
for everything. */
type = DW_EH_PE_sdata4;
@@ -22605,7 +22764,14 @@ aarch64_empty_mask_is_expensive (unsigned)
bool
aarch64_use_pseudo_pic_reg (void)
{
- return aarch64_cmodel == AARCH64_CMODEL_SMALL_SPIC;
+ /* flag_pic is 2 when -fPIC is on, where we do not need the pseudo
+ pic reg. In medium code mode, when combine with -fpie/-fpic, there are
+ possibility that some symbol size smaller than the -mlarge-data-threshold
+ will still use SMALL_SPIC relocation, which need the pseudo pic reg.
+ Fix spill_1.c fail. */
+ return aarch64_cmodel == AARCH64_CMODEL_SMALL_SPIC
+ || (aarch64_cmodel == AARCH64_CMODEL_MEDIUM_PIC
+ && flag_pic != 2);
}
/* Implement TARGET_UNSPEC_MAY_TRAP_P. */
@@ -22615,6 +22781,7 @@ aarch64_unspec_may_trap_p (const_rtx x, unsigned flags)
{
switch (XINT (x, 1))
{
+ case UNSPEC_GOTMEDIUMPIC4G:
case UNSPEC_GOTSMALLPIC:
case UNSPEC_GOTSMALLPIC28K:
case UNSPEC_GOTTINYPIC:
@@ -22976,6 +23143,18 @@ aarch64_estimated_poly_value (poly_int64 val)
return val.coeffs[0] + val.coeffs[1] * over_128 / 128;
}
+/* Implement TARGET_MEDIUM_SYMBOL_P.
+ Return true if x is a symbol loaded by UNSPEC_LOAD_SYMBOL_MEDIUM. */
+bool
+aarch64_medium_symbol_p (rtx x)
+{
+ if (GET_CODE (x) != UNSPEC)
+ {
+ return false;
+ }
+ return XINT (x, 1) == UNSPEC_LOAD_SYMBOL_MEDIUM;
+}
+
/* Return true for types that could be supported as SIMD return or
argument types. */
@@ -24015,6 +24194,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_ESTIMATED_POLY_VALUE
#define TARGET_ESTIMATED_POLY_VALUE aarch64_estimated_poly_value
+#undef TARGET_MEDIUM_SYMBOL_P
+#define TARGET_MEDIUM_SYMBOL_P aarch64_medium_symbol_p
+
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE aarch64_attribute_table
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 51148846345..8fc92d13dcb 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -33,6 +33,10 @@
#define REGISTER_TARGET_PRAGMAS() aarch64_register_pragmas ()
+/* Default threshold 64-bit relocation data
+ with aarch64 medium memory model. */
+#define AARCH64_DEFAULT_LARGE_DATA_THRESHOLD 65536
+
/* Target machine storage layout. */
#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 58445dea941..ee80261f1ac 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -224,6 +224,9 @@
UNSPEC_RSQRTS
UNSPEC_NZCV
UNSPEC_XPACLRI
+ UNSPEC_GOTMEDIUMPIC4G
+ UNSPEC_GET_GOTOFF
+ UNSPEC_LOAD_SYMBOL_MEDIUM
UNSPEC_LD1_SVE
UNSPEC_ST1_SVE
UNSPEC_LDNT1_SVE
@@ -6792,6 +6795,39 @@
[(set_attr "type" "load_4")]
)
+(define_insn "get_gotoff_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand 1 "aarch64_valid_symref" "S")]
+ UNSPEC_GET_GOTOFF))]
+ ""
+ "movz\\t%x0, :gotoff_g1:%A1\;movk\\t%x0, :gotoff_g0_nc:%A1"
+ [(set_attr "type" "multiple")
+ (set_attr "length" "8")]
+)
+
+(define_insn "ldr_got_medium_<mode>"
+ [(set (match_operand:PTR 0 "register_operand" "=r")
+ (unspec:PTR [(mem:PTR (lo_sum:PTR
+ (match_operand:PTR 1 "register_operand" "r")
+ (match_operand:PTR 2 "register_operand" "r")))]
+ UNSPEC_GOTMEDIUMPIC4G))]
+ ""
+ "ldr\\t%0, [%1, %2]"
+ [(set_attr "type" "load_4")]
+)
+
+(define_insn "ldr_got_medium_sidi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (unspec:SI [(mem:SI (lo_sum:DI
+ (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ UNSPEC_GOTMEDIUMPIC4G)))]
+ "TARGET_ILP32"
+ "ldr\\t%0, [%1, %2]"
+ [(set_attr "type" "load_4")]
+)
+
(define_insn "ldr_got_small_28k_<mode>"
[(set (match_operand:PTR 0 "register_operand" "=r")
(unspec:PTR [(mem:PTR (lo_sum:PTR
@@ -6955,6 +6991,23 @@
(set_attr "length" "12")]
)
+(define_insn "load_symbol_medium_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand 2 "aarch64_valid_symref" "S")]
+ UNSPEC_LOAD_SYMBOL_MEDIUM))
+ (clobber (match_operand:GPI 1 "register_operand" "=r"))]
+ ""
+ "movz\\t%x0, :prel_g3:%A2\;\\
+movk\\t%x0, :prel_g2_nc:%A2\;\\
+movk\\t%x0, :prel_g1_nc:%A2\;\\
+movk\\t%x0, :prel_g0_nc:%A2\;\\
+adr\\t%x1, .\;\\
+sub\\t%x1, %x1, 0x4\;\\
+add\\t%x0, %x0, %x1"
+ [(set_attr "type" "multiple")
+ (set_attr "length" "28")]
+)
+
(define_expand "tlsdesc_small_<mode>"
[(unspec:PTR [(match_operand 0 "aarch64_valid_symref")] UNSPEC_TLSDESC)]
"TARGET_TLS_DESC"
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
index 4539156d6f4..bb888461ab0 100644
--- a/gcc/config/aarch64/aarch64.opt
+++ b/gcc/config/aarch64/aarch64.opt
@@ -27,6 +27,10 @@ enum aarch64_processor explicit_tune_core = aarch64_none
TargetVariable
enum aarch64_arch explicit_arch = aarch64_no_arch
+;; -mlarge-data-threshold=
+TargetSave
+int x_aarch64_data_threshold
+
TargetSave
const char *x_aarch64_override_tune_string
@@ -60,9 +64,16 @@ Enum(cmodel) String(tiny) Value(AARCH64_CMODEL_TINY)
EnumValue
Enum(cmodel) String(small) Value(AARCH64_CMODEL_SMALL)
+EnumValue
+Enum(cmodel) String(medium) Value(AARCH64_CMODEL_MEDIUM)
+
EnumValue
Enum(cmodel) String(large) Value(AARCH64_CMODEL_LARGE)
+mlarge-data-threshold=
+Target RejectNegative Joined UInteger Var(aarch64_data_threshold) Init(AARCH64_DEFAULT_LARGE_DATA_THRESHOLD)
+-mlarge-data-threshold=<number> Data greater than given threshold will be assume that it should be relocated using 64-bit relocation.
+
mbig-endian
Target Report RejectNegative Mask(BIG_END)
Assume target CPU is configured as big endian.
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index fcb7245e95c..0508fce57a7 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -6983,6 +6983,11 @@ things like cost calculations or profiling frequencies. The default
implementation returns the lowest possible value of @var{val}.
@end deftypefn
+@deftypefn {Target Hook} bool TARGET_MEDIUM_SYMBOL_P (rtx @var{x})
+Return true if the input rtx is a symbol loaded by kunpeng medium code
+model.
+@end deftypefn
+
@node Scheduling
@section Adjusting the Instruction Scheduler
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index c17209daa51..3b70ea4841a 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -4701,6 +4701,8 @@ Define this macro if a non-short-circuit operation produced by
@hook TARGET_ESTIMATED_POLY_VALUE
+@hook TARGET_MEDIUM_SYMBOL_P
+
@node Scheduling
@section Adjusting the Instruction Scheduler
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index d61cadb5208..bad8208cd22 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -14501,14 +14501,17 @@ const_ok_for_output_1 (rtx rtl)
/* If delegitimize_address couldn't do anything with the UNSPEC, and
the target hook doesn't explicitly allow it in debug info, assume
we can't express it in the debug info. */
- /* Don't complain about TLS UNSPECs, those are just too hard to
- delegitimize. Note this could be a non-decl SYMBOL_REF such as
- one in a constant pool entry, so testing SYMBOL_REF_TLS_MODEL
- rather than DECL_THREAD_LOCAL_P is not just an optimization. */
+ /* Don't complain about TLS UNSPECs and aarch64 medium code model
+ related UNSPECs, those are just too hard to delegitimize. Note
+ this could be a non-decl SYMBOL_REF such as one in a constant
+ pool entry, so testing SYMBOL_REF_TLS_MODEL rather than
+ DECL_THREAD_LOCAL_P is not just an optimization. */
if (flag_checking
&& (XVECLEN (rtl, 0) == 0
|| GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
- || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE))
+ || (!targetm.medium_symbol_p (rtl)
+ && SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0))
+ == TLS_MODEL_NONE)))
inform (current_function_decl
? DECL_SOURCE_LOCATION (current_function_decl)
: UNKNOWN_LOCATION,
diff --git a/gcc/target.def b/gcc/target.def
index f5a6d507e91..2020564118b 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -3869,6 +3869,13 @@ implementation returns the lowest possible value of @var{val}.",
HOST_WIDE_INT, (poly_int64 val),
default_estimated_poly_value)
+DEFHOOK
+(medium_symbol_p,
+ "Return true if the input rtx is a symbol loaded by kunpeng medium code\n\
+model.",
+ bool, (rtx x),
+ default_medium_symbol_p)
+
/* Permit speculative instructions in delay slots during delayed-branch
scheduling. */
DEFHOOK
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 7cb04f30bdb..43a9f0cdf5b 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -1708,6 +1708,13 @@ default_estimated_poly_value (poly_int64 x)
return x.coeffs[0];
}
+/* The default implementation of TARGET_MEDIUM_SYMBOL_P. */
+bool
+default_medium_symbol_p (rtx x ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
/* For hooks which use the MOVE_RATIO macro, this gives the legacy default
behavior. SPEED_P is true if we are compiling for speed. */
--
2.21.0.windows.1
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。