gcc: add an updated version of 4.4.1+cs - successfully builds a toolchain now, please...
[openwrt.git] / toolchain / gcc / patches / 4.4.1+cs / 000-codesourcery_2010_71.patch
1 --- a/gcc/addresses.h
2 +++ b/gcc/addresses.h
3 @@ -78,3 +78,42 @@
4
5 return ok_for_base_p_1 (regno, mode, outer_code, index_code);
6 }
7 +
8 +/* Wrapper function to unify target macros MODE_INDEX_REG_CLASS and
9 + INDEX_REG_CLASS. Arguments as for the MODE_INDEX_REG_CLASS macro. */
10 +
11 +static inline enum reg_class
12 +index_reg_class (enum machine_mode mode ATTRIBUTE_UNUSED)
13 +{
14 +#ifdef MODE_INDEX_REG_CLASS
15 + return MODE_INDEX_REG_CLASS (mode);
16 +#else
17 + return INDEX_REG_CLASS;
18 +#endif
19 +}
20 +
21 +/* Wrapper function to unify target macros REGNO_MODE_OK_FOR_INDEX_P
22 + and REGNO_OK_FOR_INDEX_P. Arguments as for the
23 + REGNO_MODE_OK_FOR_INDEX_P macro. */
24 +
25 +static inline bool
26 +ok_for_index_p_1 (unsigned regno, enum machine_mode mode ATTRIBUTE_UNUSED)
27 +{
28 +#ifdef REGNO_MODE_OK_FOR_INDEX_P
29 + return REGNO_MODE_OK_FOR_INDEX_P (regno, mode);
30 +#else
31 + return REGNO_OK_FOR_INDEX_P (regno);
32 +#endif
33 +}
34 +
35 +/* Wrapper around ok_for_index_p_1, for use after register allocation is
36 + complete. Arguments as for the called function. */
37 +
38 +static inline bool
39 +regno_ok_for_index_p (unsigned regno, enum machine_mode mode)
40 +{
41 + if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0)
42 + regno = reg_renumber[regno];
43 +
44 + return ok_for_index_p_1 (regno, mode);
45 +}
46 --- a/gcc/calls.c
47 +++ b/gcc/calls.c
48 @@ -3803,7 +3803,7 @@
49 cse'ing of library calls could delete a call and leave the pop. */
50 NO_DEFER_POP;
51 valreg = (mem_value == 0 && outmode != VOIDmode
52 - ? hard_libcall_value (outmode) : NULL_RTX);
53 + ? hard_libcall_value (outmode, orgfun) : NULL_RTX);
54
55 /* Stack must be properly aligned now. */
56 gcc_assert (!(stack_pointer_delta
57 @@ -4048,8 +4048,17 @@
58 /* We need to make a save area. */
59 unsigned int size = arg->locate.size.constant * BITS_PER_UNIT;
60 enum machine_mode save_mode = mode_for_size (size, MODE_INT, 1);
61 - rtx adr = memory_address (save_mode, XEXP (arg->stack_slot, 0));
62 - rtx stack_area = gen_rtx_MEM (save_mode, adr);
63 + rtx adr;
64 + rtx stack_area;
65 +
66 + /* We can only use save_mode if the arg is sufficiently
67 + aligned. */
68 + if (STRICT_ALIGNMENT
69 + && GET_MODE_ALIGNMENT (save_mode) > arg->locate.boundary)
70 + save_mode = BLKmode;
71 +
72 + adr = memory_address (save_mode, XEXP (arg->stack_slot, 0));
73 + stack_area = gen_rtx_MEM (save_mode, adr);
74
75 if (save_mode == BLKmode)
76 {
77 --- a/gcc/c-common.c
78 +++ b/gcc/c-common.c
79 @@ -33,7 +33,6 @@
80 #include "varray.h"
81 #include "expr.h"
82 #include "c-common.h"
83 -#include "diagnostic.h"
84 #include "tm_p.h"
85 #include "obstack.h"
86 #include "cpplib.h"
87 @@ -42,6 +41,7 @@
88 #include "tree-inline.h"
89 #include "c-tree.h"
90 #include "toplev.h"
91 +#include "diagnostic.h"
92 #include "tree-iterator.h"
93 #include "hashtab.h"
94 #include "tree-mudflap.h"
95 @@ -497,6 +497,10 @@
96 This is a count, since unevaluated expressions can nest. */
97 int skip_evaluation;
98
99 +/* Whether lexing has been completed, so subsequent preprocessor
100 + errors should use the compiler's input_location. */
101 +bool done_lexing = false;
102 +
103 /* Information about how a function name is generated. */
104 struct fname_var_t
105 {
106 @@ -7522,6 +7526,68 @@
107 #undef catenate_messages
108 }
109
110 +/* Callback from cpp_error for PFILE to print diagnostics from the
111 + preprocessor. The diagnostic is of type LEVEL, at location
112 + LOCATION unless this is after lexing and the compiler's location
113 + should be used instead, with column number possibly overridden by
114 + COLUMN_OVERRIDE if not zero; MSG is the translated message and AP
115 + the arguments. Returns true if a diagnostic was emitted, false
116 + otherwise. */
117 +
118 +bool
119 +c_cpp_error (cpp_reader *pfile ATTRIBUTE_UNUSED, int level,
120 + location_t location, unsigned int column_override,
121 + const char *msg, va_list *ap)
122 +{
123 + diagnostic_info diagnostic;
124 + diagnostic_t dlevel;
125 + int save_warn_system_headers = warn_system_headers;
126 + bool ret;
127 +
128 + switch (level)
129 + {
130 + case CPP_DL_WARNING_SYSHDR:
131 + if (flag_no_output)
132 + return false;
133 + warn_system_headers = 1;
134 + /* Fall through. */
135 + case CPP_DL_WARNING:
136 + if (flag_no_output)
137 + return false;
138 + dlevel = DK_WARNING;
139 + break;
140 + case CPP_DL_PEDWARN:
141 + if (flag_no_output && !flag_pedantic_errors)
142 + return false;
143 + dlevel = DK_PEDWARN;
144 + break;
145 + case CPP_DL_ERROR:
146 + dlevel = DK_ERROR;
147 + break;
148 + case CPP_DL_ICE:
149 + dlevel = DK_ICE;
150 + break;
151 + case CPP_DL_NOTE:
152 + dlevel = DK_NOTE;
153 + break;
154 + case CPP_DL_FATAL:
155 + dlevel = DK_FATAL;
156 + break;
157 + default:
158 + gcc_unreachable ();
159 + }
160 + if (done_lexing)
161 + location = input_location;
162 + diagnostic_set_info_translated (&diagnostic, msg, ap,
163 + location, dlevel);
164 + if (column_override)
165 + diagnostic_override_column (&diagnostic, column_override);
166 + ret = report_diagnostic (&diagnostic);
167 + if (level == CPP_DL_WARNING_SYSHDR)
168 + warn_system_headers = save_warn_system_headers;
169 + return ret;
170 +}
171 +
172 /* Walk a gimplified function and warn for functions whose return value is
173 ignored and attribute((warn_unused_result)) is set. This is done before
174 inlining, so we don't have to worry about that. */
175 --- a/gcc/c-common.h
176 +++ b/gcc/c-common.h
177 @@ -658,6 +658,11 @@
178
179 extern int skip_evaluation;
180
181 +/* Whether lexing has been completed, so subsequent preprocessor
182 + errors should use the compiler's input_location. */
183 +
184 +extern bool done_lexing;
185 +
186 /* C types are partitioned into three subsets: object, function, and
187 incomplete types. */
188 #define C_TYPE_OBJECT_P(type) \
189 --- a/gcc/c-convert.c
190 +++ b/gcc/c-convert.c
191 @@ -70,6 +70,7 @@
192 tree e = expr;
193 enum tree_code code = TREE_CODE (type);
194 const char *invalid_conv_diag;
195 + tree ret;
196
197 if (type == error_mark_node
198 || expr == error_mark_node
199 @@ -85,6 +86,9 @@
200
201 if (type == TREE_TYPE (expr))
202 return expr;
203 + ret = targetm.convert_to_type (type, expr);
204 + if (ret)
205 + return ret;
206
207 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)))
208 return fold_convert (type, expr);
209 --- a/gcc/c-decl.c
210 +++ b/gcc/c-decl.c
211 @@ -3994,6 +3994,7 @@
212 bool bitfield = width != NULL;
213 tree element_type;
214 struct c_arg_info *arg_info = 0;
215 + const char *errmsg;
216
217 if (decl_context == FUNCDEF)
218 funcdef_flag = true, decl_context = NORMAL;
219 @@ -4531,6 +4532,12 @@
220 error ("%qs declared as function returning an array", name);
221 type = integer_type_node;
222 }
223 + errmsg = targetm.invalid_return_type (type);
224 + if (errmsg)
225 + {
226 + error (errmsg);
227 + type = integer_type_node;
228 + }
229
230 /* Construct the function type and go to the next
231 inner layer of declarator. */
232 @@ -5044,6 +5051,7 @@
233 {
234 tree parm, type, typelt;
235 unsigned int parmno;
236 + const char *errmsg;
237
238 /* If there is a parameter of incomplete type in a definition,
239 this is an error. In a declaration this is valid, and a
240 @@ -5087,6 +5095,14 @@
241 }
242 }
243
244 + errmsg = targetm.invalid_parameter_type (type);
245 + if (errmsg)
246 + {
247 + error (errmsg);
248 + TREE_VALUE (typelt) = error_mark_node;
249 + TREE_TYPE (parm) = error_mark_node;
250 + }
251 +
252 if (DECL_NAME (parm) && TREE_USED (parm))
253 warn_if_shadowing (parm);
254 }
255 @@ -8071,7 +8087,7 @@
256
257 /* Don't waste time on further processing if -fsyntax-only or we've
258 encountered errors. */
259 - if (flag_syntax_only || errorcount || sorrycount || cpp_errors (parse_in))
260 + if (flag_syntax_only || errorcount || sorrycount)
261 return;
262
263 /* Close the external scope. */
264 --- a/gcc/cfgexpand.c
265 +++ b/gcc/cfgexpand.c
266 @@ -488,7 +488,8 @@
267 {
268 unsigned int align;
269
270 - align = LOCAL_DECL_ALIGNMENT (decl);
271 + align = alignment_for_aligned_arrays (TREE_TYPE (decl),
272 + LOCAL_DECL_ALIGNMENT (decl));
273
274 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
275 align = MAX_SUPPORTED_STACK_ALIGNMENT;
276 --- a/gcc/cgraph.c
277 +++ b/gcc/cgraph.c
278 @@ -475,9 +475,11 @@
279 if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
280 {
281 node->origin = cgraph_node (DECL_CONTEXT (decl));
282 + node->origin->ever_was_nested = 1;
283 node->next_nested = node->origin->nested;
284 node->origin->nested = node;
285 node->master_clone = node;
286 + node->ever_was_nested = 1;
287 }
288 if (assembler_name_hash)
289 {
290 --- a/gcc/cgraph.h
291 +++ b/gcc/cgraph.h
292 @@ -185,6 +185,8 @@
293 unsigned output : 1;
294 /* Set for aliases once they got through assemble_alias. */
295 unsigned alias : 1;
296 + /* Set if the function is a nested function or has nested functions. */
297 + unsigned ever_was_nested : 1;
298
299 /* In non-unit-at-a-time mode the function body of inline candidates is saved
300 into clone before compiling so the function in original form can be
301 --- a/gcc/common.opt
302 +++ b/gcc/common.opt
303 @@ -153,6 +153,10 @@
304 Common Var(warn_padded) Warning
305 Warn when padding is required to align structure members
306
307 +Wpoison-system-directories
308 +Common Var(flag_poison_system_directories) Init(1)
309 +Warn for -I and -L options using system directories if cross compiling
310 +
311 Wshadow
312 Common Var(warn_shadow) Warning
313 Warn when one local variable shadows another
314 @@ -270,6 +274,12 @@
315 fabi-version=
316 Common Joined UInteger Var(flag_abi_version) Init(2)
317
318 +falign-arrays
319 +Target Report Var(flag_align_arrays)
320 +Set the minimum alignment for array variables to be the largest power
321 +of two less than or equal to their total storage size, or the biggest
322 +alignment used on the machine, whichever is smaller.
323 +
324 falign-functions
325 Common Report Var(align_functions,0) Optimization UInteger
326 Align the start of functions
327 @@ -467,6 +477,10 @@
328 Common Report Var(flag_early_inlining) Init(1) Optimization
329 Perform early inlining
330
331 +feglibc=
332 +Common Report Joined Undocumented
333 +EGLIBC configuration specifier, serves multilib purposes.
334 +
335 feliminate-dwarf2-dups
336 Common Report Var(flag_eliminate_dwarf2_dups)
337 Perform DWARF2 duplicate elimination
338 @@ -895,6 +909,10 @@
339 Common Report Var(flag_profile_values)
340 Insert code to profile values of expressions
341
342 +fpromote-loop-indices
343 +Common Report Var(flag_promote_loop_indices) Optimization
344 +Promote loop indices to word-sized indices when safe
345 +
346 frandom-seed
347 Common
348
349 @@ -1227,6 +1245,15 @@
350 Common Report Var(flag_tree_pre) Optimization
351 Enable SSA-PRE optimization on trees
352
353 +ftree-pre-partial-partial
354 +Common Report Var(flag_tree_pre_partial_partial) Optimization
355 +In SSA-PRE optimization on trees, enable partial-partial redundancy elimination.
356 +
357 +ftree-pre-partial-partial-obliviously
358 +Common Report Var(flag_tree_pre_partial_partial_obliviously) Optimization
359 +In SSA-PRE optimization on trees, enable partial-partial redundancy
360 +elimination without regard for the cost of the inserted phi nodes.
361 +
362 ftree-reassoc
363 Common Report Var(flag_tree_reassoc) Init(1) Optimization
364 Enable reassociation on tree level
365 --- a/gcc/config/arm/arm.c
366 +++ b/gcc/config/arm/arm.c
367 @@ -43,6 +43,7 @@
368 #include "optabs.h"
369 #include "toplev.h"
370 #include "recog.h"
371 +#include "cgraph.h"
372 #include "ggc.h"
373 #include "except.h"
374 #include "c-pragma.h"
375 @@ -53,6 +54,8 @@
376 #include "debug.h"
377 #include "langhooks.h"
378 #include "df.h"
379 +#include "intl.h"
380 +#include "params.h"
381
382 /* Forward definitions of types. */
383 typedef struct minipool_node Mnode;
384 @@ -110,6 +113,7 @@
385 static unsigned long arm_isr_value (tree);
386 static unsigned long arm_compute_func_type (void);
387 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
388 +static tree arm_handle_pcs_attribute (tree *, tree, tree, int, bool *);
389 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
390 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
391 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
392 @@ -123,6 +127,10 @@
393 static int count_insns_for_constant (HOST_WIDE_INT, int);
394 static int arm_get_strip_length (int);
395 static bool arm_function_ok_for_sibcall (tree, tree);
396 +static bool arm_return_in_memory (const_tree, const_tree);
397 +static rtx arm_function_value (const_tree, const_tree, bool);
398 +static rtx arm_libcall_value (enum machine_mode, rtx);
399 +
400 static void arm_internal_label (FILE *, const char *, unsigned long);
401 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
402 tree);
403 @@ -148,6 +156,9 @@
404 static rtx emit_set_insn (rtx, rtx);
405 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
406 tree, bool);
407 +static rtx aapcs_allocate_return_reg (enum machine_mode, const_tree,
408 + const_tree);
409 +static int aapcs_select_return_coproc (const_tree, const_tree);
410
411 #ifdef OBJECT_FORMAT_ELF
412 static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
413 @@ -175,6 +186,7 @@
414 static bool arm_output_ttype (rtx);
415 #endif
416 static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
417 +static rtx arm_dwarf_register_span(rtx);
418
419 static tree arm_cxx_guard_type (void);
420 static bool arm_cxx_guard_mask_bit (void);
421 @@ -197,6 +209,15 @@
422 static int arm_issue_rate (void);
423 static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
424 static bool arm_allocate_stack_slots_for_args (void);
425 +static bool arm_warn_func_result (void);
426 +static int arm_multipass_dfa_lookahead (void);
427 +static const char *arm_invalid_parameter_type (const_tree t);
428 +static const char *arm_invalid_return_type (const_tree t);
429 +static tree arm_promoted_type (const_tree t);
430 +static tree arm_convert_to_type (tree type, tree expr);
431 +static bool arm_scalar_mode_supported_p (enum machine_mode);
432 +static int arm_vector_min_alignment (const_tree type);
433 +static bool arm_vector_always_misalign(const_tree);
434
435 \f
436 /* Initialize the GCC target structure. */
437 @@ -256,6 +277,12 @@
438 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
439 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
440
441 +#undef TARGET_FUNCTION_VALUE
442 +#define TARGET_FUNCTION_VALUE arm_function_value
443 +
444 +#undef TARGET_LIBCALL_VALUE
445 +#define TARGET_LIBCALL_VALUE arm_libcall_value
446 +
447 #undef TARGET_ASM_OUTPUT_MI_THUNK
448 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
449 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
450 @@ -299,6 +326,9 @@
451 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
452 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arm_allocate_stack_slots_for_args
453
454 +#undef TARGET_WARN_FUNC_RESULT
455 +#define TARGET_WARN_FUNC_RESULT arm_warn_func_result
456 +
457 #undef TARGET_DEFAULT_SHORT_ENUMS
458 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
459
460 @@ -353,6 +383,9 @@
461 #undef TARGET_ASM_TTYPE
462 #define TARGET_ASM_TTYPE arm_output_ttype
463
464 +#undef TARGET_CXX_TTYPE_REF_ENCODE
465 +#define TARGET_CXX_TTYPE_REF_ENCODE hook_cxx_ttype_ref_in_bit0
466 +
467 #undef TARGET_ARM_EABI_UNWINDER
468 #define TARGET_ARM_EABI_UNWINDER true
469 #endif /* TARGET_UNWIND_INFO */
470 @@ -360,6 +393,9 @@
471 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
472 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
473
474 +#undef TARGET_DWARF_REGISTER_SPAN
475 +#define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span
476 +
477 #undef TARGET_CANNOT_COPY_INSN_P
478 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
479
480 @@ -398,6 +434,30 @@
481 #define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel
482 #endif
483
484 +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
485 +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD arm_multipass_dfa_lookahead
486 +
487 +#undef TARGET_INVALID_PARAMETER_TYPE
488 +#define TARGET_INVALID_PARAMETER_TYPE arm_invalid_parameter_type
489 +
490 +#undef TARGET_INVALID_RETURN_TYPE
491 +#define TARGET_INVALID_RETURN_TYPE arm_invalid_return_type
492 +
493 +#undef TARGET_PROMOTED_TYPE
494 +#define TARGET_PROMOTED_TYPE arm_promoted_type
495 +
496 +#undef TARGET_CONVERT_TO_TYPE
497 +#define TARGET_CONVERT_TO_TYPE arm_convert_to_type
498 +
499 +#undef TARGET_SCALAR_MODE_SUPPORTED_P
500 +#define TARGET_SCALAR_MODE_SUPPORTED_P arm_scalar_mode_supported_p
501 +
502 +#undef TARGET_VECTOR_MIN_ALIGNMENT
503 +#define TARGET_VECTOR_MIN_ALIGNMENT arm_vector_min_alignment
504 +
505 +#undef TARGET_VECTOR_ALWAYS_MISALIGN
506 +#define TARGET_VECTOR_ALWAYS_MISALIGN arm_vector_always_misalign
507 +
508 struct gcc_target targetm = TARGET_INITIALIZER;
509 \f
510 /* Obstack for minipool constant handling. */
511 @@ -423,18 +483,18 @@
512 /* The default processor used if not overridden by commandline. */
513 static enum processor_type arm_default_cpu = arm_none;
514
515 -/* Which floating point model to use. */
516 -enum arm_fp_model arm_fp_model;
517 -
518 -/* Which floating point hardware is available. */
519 -enum fputype arm_fpu_arch;
520 -
521 /* Which floating point hardware to schedule for. */
522 -enum fputype arm_fpu_tune;
523 +int arm_fpu_attr;
524 +
525 +/* Which floating popint hardware to use. */
526 +const struct arm_fpu_desc *arm_fpu_desc;
527
528 /* Whether to use floating point hardware. */
529 enum float_abi_type arm_float_abi;
530
531 +/* Which __fp16 format to use. */
532 +enum arm_fp16_format_type arm_fp16_format;
533 +
534 /* Which ABI to use. */
535 enum arm_abi_type arm_abi;
536
537 @@ -473,9 +533,19 @@
538 #define FL_DIV (1 << 18) /* Hardware divide. */
539 #define FL_VFPV3 (1 << 19) /* Vector Floating Point V3. */
540 #define FL_NEON (1 << 20) /* Neon instructions. */
541 +#define FL_MARVELL_F (1 << 21) /* Marvell Feroceon. */
542 +#define FL_ARCH7EM (1 << 22) /* Instructions present in ARMv7E-M. */
543
544 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
545
546 +/* Some flags are ignored when comparing -mcpu and -march:
547 + FL_MARVELL_F so that -mcpu=marvell-f -march=v5te works.
548 + FL_LDSCHED and FL_WBUF only effect tuning,
549 + FL_CO_PROC, FL_VFPV2, FL_VFPV3 and FL_NEON because FP
550 + coprocessors are handled separately. */
551 +#define FL_COMPAT (FL_MARVELL_F | FL_LDSCHED | FL_WBUF | FL_CO_PROC | \
552 + FL_VFPV2 | FL_VFPV3 | FL_NEON)
553 +
554 #define FL_FOR_ARCH2 FL_NOTM
555 #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
556 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
557 @@ -497,6 +567,7 @@
558 #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM)
559 #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
560 #define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
561 +#define FL_FOR_ARCH7EM (FL_FOR_ARCH7M | FL_ARCH7EM)
562
563 /* The bits in this mask specify which
564 instructions we are allowed to generate. */
565 @@ -533,6 +604,9 @@
566 /* Nonzero if instructions not present in the 'M' profile can be used. */
567 int arm_arch_notm = 0;
568
569 +/* Nonzero if instructions present in ARMv7E-M can be used. */
570 +int arm_arch7em = 0;
571 +
572 /* Nonzero if this chip can benefit from load scheduling. */
573 int arm_ld_sched = 0;
574
575 @@ -551,6 +625,9 @@
576 /* Nonzero if tuning for XScale */
577 int arm_tune_xscale = 0;
578
579 +/* Nonzero if tuning for Marvell Feroceon. */
580 +int arm_tune_marvell_f = 0;
581 +
582 /* Nonzero if we want to tune for stores that access the write-buffer.
583 This typically means an ARM6 or ARM7 with MMU or MPU. */
584 int arm_tune_wbuf = 0;
585 @@ -561,6 +638,9 @@
586 /* Nonzero if generating Thumb instructions. */
587 int thumb_code = 0;
588
589 +/* Nonzero if generating code for Janus2. */
590 +int janus2_code = 0;
591 +
592 /* Nonzero if we should define __THUMB_INTERWORK__ in the
593 preprocessor.
594 XXX This is a bit of a hack, it's intended to help work around
595 @@ -593,6 +673,8 @@
596 /* The maximum number of insns to be used when loading a constant. */
597 static int arm_constant_limit = 3;
598
599 +static enum arm_pcs arm_pcs_default;
600 +
601 /* For an explanation of these variables, see final_prescan_insn below. */
602 int arm_ccfsm_state;
603 /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
604 @@ -673,9 +755,11 @@
605 {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
606 {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
607 {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
608 + {"armv7e-m", cortexm3, "7EM", FL_CO_PROC | FL_FOR_ARCH7EM, NULL},
609 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
610 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
611 {"iwmmxt2", iwmmxt2, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
612 + {"marvell-f", marvell_f, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE | FL_MARVELL_F, NULL},
613 {NULL, arm_none, NULL, 0 , NULL}
614 };
615
616 @@ -705,49 +789,34 @@
617
618 /* The name of the preprocessor macro to define for this architecture. */
619
620 -char arm_arch_name[] = "__ARM_ARCH_0UNK__";
621 -
622 -struct fpu_desc
623 -{
624 - const char * name;
625 - enum fputype fpu;
626 -};
627 -
628 +#define ARM_ARCH_NAME_SIZE 25
629 +char arm_arch_name[ARM_ARCH_NAME_SIZE] = "__ARM_ARCH_0UNK__";
630
631 /* Available values for -mfpu=. */
632
633 -static const struct fpu_desc all_fpus[] =
634 +static const struct arm_fpu_desc all_fpus[] =
635 {
636 - {"fpa", FPUTYPE_FPA},
637 - {"fpe2", FPUTYPE_FPA_EMU2},
638 - {"fpe3", FPUTYPE_FPA_EMU2},
639 - {"maverick", FPUTYPE_MAVERICK},
640 - {"vfp", FPUTYPE_VFP},
641 - {"vfp3", FPUTYPE_VFP3},
642 - {"vfpv3", FPUTYPE_VFP3},
643 - {"vfpv3-d16", FPUTYPE_VFP3D16},
644 - {"neon", FPUTYPE_NEON}
645 + {"fpa", ARM_FP_MODEL_FPA, 0, 0, false, false},
646 + {"fpe2", ARM_FP_MODEL_FPA, 2, 0, false, false},
647 + {"fpe3", ARM_FP_MODEL_FPA, 3, 0, false, false},
648 + {"maverick", ARM_FP_MODEL_MAVERICK, 0, 0, false, false},
649 + {"vfp", ARM_FP_MODEL_VFP, 2, VFP_REG_D16, false, false},
650 + {"vfpv3", ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, false},
651 + {"vfpv3-fp16", ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, true },
652 + {"vfpv3-d16", ARM_FP_MODEL_VFP, 3, VFP_REG_D16, false, false},
653 + {"vfpv3xd", ARM_FP_MODEL_VFP, 3, VFP_REG_SINGLE, false, false},
654 + {"vfpv3xd-fp16", ARM_FP_MODEL_VFP, 3, VFP_REG_SINGLE, false, true },
655 + {"vfpv3-d16-fp16", ARM_FP_MODEL_VFP, 3, VFP_REG_D16, false, true },
656 + {"neon", ARM_FP_MODEL_VFP, 3, VFP_REG_D32, true , false},
657 + {"neon-fp16", ARM_FP_MODEL_VFP, 3, VFP_REG_D32, true , true },
658 + {"vfpv4", ARM_FP_MODEL_VFP, 4, VFP_REG_D32, false, true },
659 + {"vfpv4-d16", ARM_FP_MODEL_VFP, 4, VFP_REG_D16, false, true },
660 + {"fpv4-sp-d16", ARM_FP_MODEL_VFP, 4, VFP_REG_SINGLE, false, true },
661 + {"neon-vfpv4", ARM_FP_MODEL_VFP, 4, VFP_REG_D32, true , true },
662 + /* Compatibility aliases. */
663 + {"vfp3", ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, false},
664 };
665
666 -
667 -/* Floating point models used by the different hardware.
668 - See fputype in arm.h. */
669 -
670 -static const enum fputype fp_model_for_fpu[] =
671 -{
672 - /* No FP hardware. */
673 - ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
674 - ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
675 - ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
676 - ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
677 - ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
678 - ARM_FP_MODEL_VFP, /* FPUTYPE_VFP */
679 - ARM_FP_MODEL_VFP, /* FPUTYPE_VFP3D16 */
680 - ARM_FP_MODEL_VFP, /* FPUTYPE_VFP3 */
681 - ARM_FP_MODEL_VFP /* FPUTYPE_NEON */
682 -};
683 -
684 -
685 struct float_abi
686 {
687 const char * name;
688 @@ -765,6 +834,23 @@
689 };
690
691
692 +struct fp16_format
693 +{
694 + const char *name;
695 + enum arm_fp16_format_type fp16_format_type;
696 +};
697 +
698 +
699 +/* Available values for -mfp16-format=. */
700 +
701 +static const struct fp16_format all_fp16_formats[] =
702 +{
703 + {"none", ARM_FP16_FORMAT_NONE},
704 + {"ieee", ARM_FP16_FORMAT_IEEE},
705 + {"alternative", ARM_FP16_FORMAT_ALTERNATIVE}
706 +};
707 +
708 +
709 struct abi_name
710 {
711 const char *name;
712 @@ -922,6 +1008,44 @@
713 set_optab_libfunc (umod_optab, DImode, NULL);
714 set_optab_libfunc (smod_optab, SImode, NULL);
715 set_optab_libfunc (umod_optab, SImode, NULL);
716 +
717 + /* Half-precision float operations. The compiler handles all operations
718 + with NULL libfuncs by converting the SFmode. */
719 + switch (arm_fp16_format)
720 + {
721 + case ARM_FP16_FORMAT_IEEE:
722 + case ARM_FP16_FORMAT_ALTERNATIVE:
723 +
724 + /* Conversions. */
725 + set_conv_libfunc (trunc_optab, HFmode, SFmode,
726 + (arm_fp16_format == ARM_FP16_FORMAT_IEEE
727 + ? "__gnu_f2h_ieee"
728 + : "__gnu_f2h_alternative"));
729 + set_conv_libfunc (sext_optab, SFmode, HFmode,
730 + (arm_fp16_format == ARM_FP16_FORMAT_IEEE
731 + ? "__gnu_h2f_ieee"
732 + : "__gnu_h2f_alternative"));
733 +
734 + /* Arithmetic. */
735 + set_optab_libfunc (add_optab, HFmode, NULL);
736 + set_optab_libfunc (sdiv_optab, HFmode, NULL);
737 + set_optab_libfunc (smul_optab, HFmode, NULL);
738 + set_optab_libfunc (neg_optab, HFmode, NULL);
739 + set_optab_libfunc (sub_optab, HFmode, NULL);
740 +
741 + /* Comparisons. */
742 + set_optab_libfunc (eq_optab, HFmode, NULL);
743 + set_optab_libfunc (ne_optab, HFmode, NULL);
744 + set_optab_libfunc (lt_optab, HFmode, NULL);
745 + set_optab_libfunc (le_optab, HFmode, NULL);
746 + set_optab_libfunc (ge_optab, HFmode, NULL);
747 + set_optab_libfunc (gt_optab, HFmode, NULL);
748 + set_optab_libfunc (unord_optab, HFmode, NULL);
749 + break;
750 +
751 + default:
752 + break;
753 + }
754 }
755
756 /* On AAPCS systems, this is the "struct __va_list". */
757 @@ -1135,6 +1259,7 @@
758 arm_override_options (void)
759 {
760 unsigned i;
761 + int len;
762 enum processor_type target_arch_cpu = arm_none;
763 enum processor_type selected_cpu = arm_none;
764
765 @@ -1152,7 +1277,11 @@
766 {
767 /* Set the architecture define. */
768 if (i != ARM_OPT_SET_TUNE)
769 - sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
770 + {
771 + len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE,
772 + "__ARM_ARCH_%s__", sel->arch);
773 + gcc_assert (len < ARM_ARCH_NAME_SIZE);
774 + }
775
776 /* Determine the processor core for which we should
777 tune code-generation. */
778 @@ -1178,8 +1307,8 @@
779 make sure that they are compatible. We only generate
780 a warning though, and we prefer the CPU over the
781 architecture. */
782 - if (insn_flags != 0 && (insn_flags ^ sel->flags))
783 - warning (0, "switch -mcpu=%s conflicts with -march= switch",
784 + if (insn_flags != 0 && ((insn_flags ^ sel->flags) & ~FL_COMPAT))
785 + warning (0, "switch -mcpu=%s conflicts with -march= switch, assuming CPU feature set",
786 ptr->string);
787
788 insn_flags = sel->flags;
789 @@ -1279,7 +1408,11 @@
790
791 insn_flags = sel->flags;
792 }
793 - sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
794 +
795 + len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE,
796 + "__ARM_ARCH_%s__", sel->arch);
797 + gcc_assert (len < ARM_ARCH_NAME_SIZE);
798 +
799 arm_default_cpu = (enum processor_type) (sel - all_cores);
800 if (arm_tune == arm_none)
801 arm_tune = arm_default_cpu;
802 @@ -1289,8 +1422,35 @@
803 chosen. */
804 gcc_assert (arm_tune != arm_none);
805
806 + if (arm_tune == cortexa8 && optimize >= 3)
807 + {
808 + /* These alignments were experimentally determined to improve SPECint
809 + performance on SPECCPU 2000. */
810 + if (align_functions <= 0)
811 + align_functions = 16;
812 + if (align_jumps <= 0)
813 + align_jumps = 16;
814 + }
815 +
816 tune_flags = all_cores[(int)arm_tune].flags;
817
818 + if (target_fp16_format_name)
819 + {
820 + for (i = 0; i < ARRAY_SIZE (all_fp16_formats); i++)
821 + {
822 + if (streq (all_fp16_formats[i].name, target_fp16_format_name))
823 + {
824 + arm_fp16_format = all_fp16_formats[i].fp16_format_type;
825 + break;
826 + }
827 + }
828 + if (i == ARRAY_SIZE (all_fp16_formats))
829 + error ("invalid __fp16 format option: -mfp16-format=%s",
830 + target_fp16_format_name);
831 + }
832 + else
833 + arm_fp16_format = ARM_FP16_FORMAT_NONE;
834 +
835 if (target_abi_name)
836 {
837 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
838 @@ -1383,6 +1543,7 @@
839 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
840 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
841 arm_arch_notm = (insn_flags & FL_NOTM) != 0;
842 + arm_arch7em = (insn_flags & FL_ARCH7EM) != 0;
843 arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
844 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
845 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
846 @@ -1390,12 +1551,25 @@
847 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
848 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
849 thumb_code = (TARGET_ARM == 0);
850 + janus2_code = (TARGET_FIX_JANUS != 0);
851 + if (janus2_code && TARGET_THUMB2)
852 + error ("janus2 fix is not applicable when targeting a thumb2 core");
853 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
854 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
855 + arm_tune_marvell_f = (tune_flags & FL_MARVELL_F) != 0;
856 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
857 - arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
858 arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0;
859
860 + /* Hardware integer division is supported by some variants of the ARM
861 + architecture in Thumb-2 mode. In addition some (but not all) Marvell
862 + CPUs support their own hardware integer division instructions.
863 + The assembler will pick the correct encoding. */
864 + if (TARGET_MARVELL_DIV && (insn_flags & FL_MARVELL_F) == 0)
865 + error ("-mmarvell-div is only supported when targeting a Marvell core");
866 +
867 + arm_arch_hwdiv = (TARGET_ARM && TARGET_MARVELL_DIV)
868 + || (TARGET_THUMB2 && (insn_flags & FL_DIV) != 0);
869 +
870 /* If we are not using the default (ARM mode) section anchor offset
871 ranges, then set the correct ranges now. */
872 if (TARGET_THUMB1)
873 @@ -1434,7 +1608,6 @@
874 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
875 error ("iwmmxt abi requires an iwmmxt capable cpu");
876
877 - arm_fp_model = ARM_FP_MODEL_UNKNOWN;
878 if (target_fpu_name == NULL && target_fpe_name != NULL)
879 {
880 if (streq (target_fpe_name, "2"))
881 @@ -1445,46 +1618,52 @@
882 error ("invalid floating point emulation option: -mfpe=%s",
883 target_fpe_name);
884 }
885 - if (target_fpu_name != NULL)
886 - {
887 - /* The user specified a FPU. */
888 - for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
889 - {
890 - if (streq (all_fpus[i].name, target_fpu_name))
891 - {
892 - arm_fpu_arch = all_fpus[i].fpu;
893 - arm_fpu_tune = arm_fpu_arch;
894 - arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
895 - break;
896 - }
897 - }
898 - if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
899 - error ("invalid floating point option: -mfpu=%s", target_fpu_name);
900 - }
901 - else
902 +
903 + if (target_fpu_name == NULL)
904 {
905 #ifdef FPUTYPE_DEFAULT
906 - /* Use the default if it is specified for this platform. */
907 - arm_fpu_arch = FPUTYPE_DEFAULT;
908 - arm_fpu_tune = FPUTYPE_DEFAULT;
909 + target_fpu_name = FPUTYPE_DEFAULT;
910 #else
911 - /* Pick one based on CPU type. */
912 - /* ??? Some targets assume FPA is the default.
913 - if ((insn_flags & FL_VFP) != 0)
914 - arm_fpu_arch = FPUTYPE_VFP;
915 - else
916 - */
917 if (arm_arch_cirrus)
918 - arm_fpu_arch = FPUTYPE_MAVERICK;
919 + target_fpu_name = "maverick";
920 else
921 - arm_fpu_arch = FPUTYPE_FPA_EMU2;
922 + target_fpu_name = "fpe2";
923 #endif
924 - if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
925 - arm_fpu_tune = FPUTYPE_FPA;
926 + }
927 +
928 + arm_fpu_desc = NULL;
929 + for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
930 + {
931 + if (streq (all_fpus[i].name, target_fpu_name))
932 + {
933 + arm_fpu_desc = &all_fpus[i];
934 + break;
935 + }
936 + }
937 + if (!arm_fpu_desc)
938 + error ("invalid floating point option: -mfpu=%s", target_fpu_name);
939 +
940 + switch (arm_fpu_desc->model)
941 + {
942 + case ARM_FP_MODEL_FPA:
943 + if (arm_fpu_desc->rev == 2)
944 + arm_fpu_attr = FPU_FPE2;
945 + else if (arm_fpu_desc->rev == 3)
946 + arm_fpu_attr = FPU_FPE3;
947 else
948 - arm_fpu_tune = arm_fpu_arch;
949 - arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
950 - gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
951 + arm_fpu_attr = FPU_FPA;
952 + break;
953 +
954 + case ARM_FP_MODEL_MAVERICK:
955 + arm_fpu_attr = FPU_MAVERICK;
956 + break;
957 +
958 + case ARM_FP_MODEL_VFP:
959 + arm_fpu_attr = FPU_VFP;
960 + break;
961 +
962 + default:
963 + gcc_unreachable();
964 }
965
966 if (target_float_abi_name != NULL)
967 @@ -1505,9 +1684,6 @@
968 else
969 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
970
971 - if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
972 - sorry ("-mfloat-abi=hard and VFP");
973 -
974 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
975 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
976 will ever exist. GCC makes no attempt to support this combination. */
977 @@ -1518,15 +1694,40 @@
978 if (TARGET_THUMB2 && TARGET_IWMMXT)
979 sorry ("Thumb-2 iWMMXt");
980
981 + /* __fp16 support currently assumes the core has ldrh. */
982 + if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE)
983 + sorry ("__fp16 and no ldrh");
984 +
985 /* If soft-float is specified then don't use FPU. */
986 if (TARGET_SOFT_FLOAT)
987 - arm_fpu_arch = FPUTYPE_NONE;
988 + arm_fpu_attr = FPU_NONE;
989 +
990 + if (TARGET_AAPCS_BASED)
991 + {
992 + if (arm_abi == ARM_ABI_IWMMXT)
993 + arm_pcs_default = ARM_PCS_AAPCS_IWMMXT;
994 + else if (arm_float_abi == ARM_FLOAT_ABI_HARD
995 + && TARGET_HARD_FLOAT
996 + && TARGET_VFP)
997 + arm_pcs_default = ARM_PCS_AAPCS_VFP;
998 + else
999 + arm_pcs_default = ARM_PCS_AAPCS;
1000 + }
1001 + else
1002 + {
1003 + if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1004 + sorry ("-mfloat-abi=hard and VFP");
1005 +
1006 + if (arm_abi == ARM_ABI_APCS)
1007 + arm_pcs_default = ARM_PCS_APCS;
1008 + else
1009 + arm_pcs_default = ARM_PCS_ATPCS;
1010 + }
1011
1012 /* For arm2/3 there is no need to do any scheduling if there is only
1013 a floating point emulator, or we are doing software floating-point. */
1014 if ((TARGET_SOFT_FLOAT
1015 - || arm_fpu_tune == FPUTYPE_FPA_EMU2
1016 - || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1017 + || (TARGET_FPA && arm_fpu_desc->rev))
1018 && (tune_flags & FL_MODE32) == 0)
1019 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1020
1021 @@ -1616,8 +1817,7 @@
1022 fix_cm3_ldrd = 0;
1023 }
1024
1025 - /* ??? We might want scheduling for thumb2. */
1026 - if (TARGET_THUMB && flag_schedule_insns)
1027 + if (TARGET_THUMB1 && flag_schedule_insns)
1028 {
1029 /* Don't warn since it's on by default in -O2. */
1030 flag_schedule_insns = 0;
1031 @@ -1653,6 +1853,36 @@
1032
1033 /* Register global variables with the garbage collector. */
1034 arm_add_gc_roots ();
1035 +
1036 + if (low_irq_latency && TARGET_THUMB)
1037 + {
1038 + warning (0,
1039 + "-low-irq-latency has no effect when compiling for the Thumb");
1040 + low_irq_latency = 0;
1041 + }
1042 +
1043 + /* CSL LOCAL */
1044 + /* Loop unrolling can be a substantial win. At -O2, limit to 2x
1045 + unrolling by default to prevent excessive code growth; at -O3,
1046 + limit to 4x unrolling by default. We know we are not optimizing
1047 + for size if this is set (see arm_optimization_options). */
1048 + if (flag_unroll_loops == 2)
1049 + {
1050 + if (optimize == 2)
1051 + {
1052 + flag_unroll_loops = 1;
1053 + if (!PARAM_SET_P (PARAM_MAX_UNROLL_TIMES))
1054 + set_param_value ("max-unroll-times", 2);
1055 + }
1056 + else if (optimize > 2)
1057 + {
1058 + flag_unroll_loops = 1;
1059 + if (!PARAM_SET_P (PARAM_MAX_UNROLL_TIMES))
1060 + set_param_value ("max-unroll-times", 4);
1061 + }
1062 + else
1063 + flag_unroll_loops = 0;
1064 + }
1065 }
1066
1067 static void
1068 @@ -1782,6 +2012,14 @@
1069 return !IS_NAKED (arm_current_func_type ());
1070 }
1071
1072 +static bool
1073 +arm_warn_func_result (void)
1074 +{
1075 + /* Naked functions are implemented entirely in assembly, including the
1076 + return sequence, so suppress warnings about this. */
1077 + return !IS_NAKED (arm_current_func_type ());
1078 +}
1079 +
1080 \f
1081 /* Return 1 if it is possible to return using a single instruction.
1082 If SIBLING is non-null, this is a test for a return before a sibling
1083 @@ -2873,14 +3111,19 @@
1084
1085 /* Define how to find the value returned by a function. */
1086
1087 -rtx
1088 -arm_function_value(const_tree type, const_tree func ATTRIBUTE_UNUSED)
1089 +static rtx
1090 +arm_function_value(const_tree type, const_tree func,
1091 + bool outgoing ATTRIBUTE_UNUSED)
1092 {
1093 enum machine_mode mode;
1094 int unsignedp ATTRIBUTE_UNUSED;
1095 rtx r ATTRIBUTE_UNUSED;
1096
1097 mode = TYPE_MODE (type);
1098 +
1099 + if (TARGET_AAPCS_BASED)
1100 + return aapcs_allocate_return_reg (mode, type, func);
1101 +
1102 /* Promote integer types. */
1103 if (INTEGRAL_TYPE_P (type))
1104 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
1105 @@ -2897,7 +3140,36 @@
1106 }
1107 }
1108
1109 - return LIBCALL_VALUE(mode);
1110 + return LIBCALL_VALUE (mode);
1111 +}
1112 +
1113 +rtx
1114 +arm_libcall_value (enum machine_mode mode, rtx libcall)
1115 +{
1116 + if (TARGET_AAPCS_BASED && arm_pcs_default != ARM_PCS_AAPCS
1117 + && GET_MODE_CLASS (mode) == MODE_FLOAT)
1118 + {
1119 + /* The following libcalls return their result in integer registers,
1120 + even though they return a floating point value. */
1121 + if (rtx_equal_p (libcall,
1122 + convert_optab_libfunc (sfloat_optab, mode, SImode))
1123 + || rtx_equal_p (libcall,
1124 + convert_optab_libfunc (ufloat_optab, mode, SImode))
1125 + || rtx_equal_p (libcall,
1126 + convert_optab_libfunc (sfloat_optab, mode, DImode))
1127 + || rtx_equal_p (libcall,
1128 + convert_optab_libfunc (ufloat_optab, mode, DImode))
1129 + || rtx_equal_p (libcall,
1130 + convert_optab_libfunc (trunc_optab, HFmode, SFmode))
1131 + || rtx_equal_p (libcall,
1132 + convert_optab_libfunc (sext_optab, SFmode, HFmode)))
1133 + return gen_rtx_REG (mode, ARG_REGISTER(1));
1134 +
1135 + /* XXX There are other libcalls that return in integer registers,
1136 + but I think they are all handled by hard insns. */
1137 + }
1138 +
1139 + return LIBCALL_VALUE (mode);
1140 }
1141
1142 /* Determine the amount of memory needed to store the possible return
1143 @@ -2907,10 +3179,12 @@
1144 {
1145 int size = 16;
1146
1147 - if (TARGET_ARM)
1148 + if (TARGET_32BIT)
1149 {
1150 if (TARGET_HARD_FLOAT_ABI)
1151 {
1152 + if (TARGET_VFP)
1153 + size += 32;
1154 if (TARGET_FPA)
1155 size += 12;
1156 if (TARGET_MAVERICK)
1157 @@ -2923,27 +3197,56 @@
1158 return size;
1159 }
1160
1161 -/* Decide whether a type should be returned in memory (true)
1162 - or in a register (false). This is called as the target hook
1163 - TARGET_RETURN_IN_MEMORY. */
1164 +/* Decide whether TYPE should be returned in memory (true)
1165 + or in a register (false). FNTYPE is the type of the function making
1166 + the call. */
1167 static bool
1168 -arm_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1169 +arm_return_in_memory (const_tree type, const_tree fntype)
1170 {
1171 HOST_WIDE_INT size;
1172
1173 - size = int_size_in_bytes (type);
1174 + size = int_size_in_bytes (type); /* Negative if not fixed size. */
1175 +
1176 + if (TARGET_AAPCS_BASED)
1177 + {
1178 + /* Simple, non-aggregate types (ie not including vectors and
1179 + complex) are always returned in a register (or registers).
1180 + We don't care about which register here, so we can short-cut
1181 + some of the detail. */
1182 + if (!AGGREGATE_TYPE_P (type)
1183 + && TREE_CODE (type) != VECTOR_TYPE
1184 + && TREE_CODE (type) != COMPLEX_TYPE)
1185 + return false;
1186 +
1187 + /* Any return value that is no larger than one word can be
1188 + returned in r0. */
1189 + if (((unsigned HOST_WIDE_INT) size) <= UNITS_PER_WORD)
1190 + return false;
1191 +
1192 + /* Check any available co-processors to see if they accept the
1193 + type as a register candidate (VFP, for example, can return
1194 + some aggregates in consecutive registers). These aren't
1195 + available if the call is variadic. */
1196 + if (aapcs_select_return_coproc (type, fntype) >= 0)
1197 + return false;
1198 +
1199 + /* Vector values should be returned using ARM registers, not
1200 + memory (unless they're over 16 bytes, which will break since
1201 + we only have four call-clobbered registers to play with). */
1202 + if (TREE_CODE (type) == VECTOR_TYPE)
1203 + return (size < 0 || size > (4 * UNITS_PER_WORD));
1204 +
1205 + /* The rest go in memory. */
1206 + return true;
1207 + }
1208
1209 - /* Vector values should be returned using ARM registers, not memory (unless
1210 - they're over 16 bytes, which will break since we only have four
1211 - call-clobbered registers to play with). */
1212 if (TREE_CODE (type) == VECTOR_TYPE)
1213 return (size < 0 || size > (4 * UNITS_PER_WORD));
1214
1215 if (!AGGREGATE_TYPE_P (type) &&
1216 - !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
1217 - /* All simple types are returned in registers.
1218 - For AAPCS, complex types are treated the same as aggregates. */
1219 - return 0;
1220 + (TREE_CODE (type) != VECTOR_TYPE))
1221 + /* All simple types are returned in registers. */
1222 + return false;
1223
1224 if (arm_abi != ARM_ABI_APCS)
1225 {
1226 @@ -2960,7 +3263,7 @@
1227 the aggregate is either huge or of variable size, and in either case
1228 we will want to return it via memory and not in a register. */
1229 if (size < 0 || size > UNITS_PER_WORD)
1230 - return 1;
1231 + return true;
1232
1233 if (TREE_CODE (type) == RECORD_TYPE)
1234 {
1235 @@ -2980,18 +3283,18 @@
1236 continue;
1237
1238 if (field == NULL)
1239 - return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1240 + return false; /* An empty structure. Allowed by an extension to ANSI C. */
1241
1242 /* Check that the first field is valid for returning in a register. */
1243
1244 /* ... Floats are not allowed */
1245 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1246 - return 1;
1247 + return true;
1248
1249 /* ... Aggregates that are not themselves valid for returning in
1250 a register are not allowed. */
1251 if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
1252 - return 1;
1253 + return true;
1254
1255 /* Now check the remaining fields, if any. Only bitfields are allowed,
1256 since they are not addressable. */
1257 @@ -3003,10 +3306,10 @@
1258 continue;
1259
1260 if (!DECL_BIT_FIELD_TYPE (field))
1261 - return 1;
1262 + return true;
1263 }
1264
1265 - return 0;
1266 + return false;
1267 }
1268
1269 if (TREE_CODE (type) == UNION_TYPE)
1270 @@ -3023,18 +3326,18 @@
1271 continue;
1272
1273 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1274 - return 1;
1275 + return true;
1276
1277 if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
1278 - return 1;
1279 + return true;
1280 }
1281
1282 - return 0;
1283 + return false;
1284 }
1285 #endif /* not ARM_WINCE */
1286
1287 /* Return all other types in memory. */
1288 - return 1;
1289 + return true;
1290 }
1291
1292 /* Indicate whether or not words of a double are in big-endian order. */
1293 @@ -3059,14 +3362,780 @@
1294 return 1;
1295 }
1296
1297 +const struct pcs_attribute_arg
1298 +{
1299 + const char *arg;
1300 + enum arm_pcs value;
1301 +} pcs_attribute_args[] =
1302 + {
1303 + {"aapcs", ARM_PCS_AAPCS},
1304 + {"aapcs-vfp", ARM_PCS_AAPCS_VFP},
1305 + {"aapcs-iwmmxt", ARM_PCS_AAPCS_IWMMXT},
1306 + {"atpcs", ARM_PCS_ATPCS},
1307 + {"apcs", ARM_PCS_APCS},
1308 + {NULL, ARM_PCS_UNKNOWN}
1309 + };
1310 +
1311 +static enum arm_pcs
1312 +arm_pcs_from_attribute (tree attr)
1313 +{
1314 + const struct pcs_attribute_arg *ptr;
1315 + const char *arg;
1316 +
1317 + /* Get the value of the argument. */
1318 + if (TREE_VALUE (attr) == NULL_TREE
1319 + || TREE_CODE (TREE_VALUE (attr)) != STRING_CST)
1320 + return ARM_PCS_UNKNOWN;
1321 +
1322 + arg = TREE_STRING_POINTER (TREE_VALUE (attr));
1323 +
1324 + /* Check it against the list of known arguments. */
1325 + for (ptr = pcs_attribute_args; ptr->arg != NULL; ptr++)
1326 + if (streq (arg, ptr->arg))
1327 + return ptr->value;
1328 +
1329 + /* An unrecognized interrupt type. */
1330 + return ARM_PCS_UNKNOWN;
1331 +}
1332 +
1333 +/* Get the PCS variant to use for this call. TYPE is the function's type
1334 + specification, DECL is the specific declartion. DECL may be null if
1335 + the call could be indirect or if this is a library call. */
1336 +static enum arm_pcs
1337 +arm_get_pcs_model (const_tree type, const_tree decl)
1338 +{
1339 + bool user_convention = false;
1340 + enum arm_pcs user_pcs = arm_pcs_default;
1341 + tree attr;
1342 +
1343 + gcc_assert (type);
1344 +
1345 + attr = lookup_attribute ("pcs", TYPE_ATTRIBUTES (type));
1346 + if (attr)
1347 + {
1348 + user_pcs = arm_pcs_from_attribute (TREE_VALUE (attr));
1349 + user_convention = true;
1350 + }
1351 +
1352 + if (TARGET_AAPCS_BASED)
1353 + {
1354 + /* Detect varargs functions. These always use the base rules
1355 + (no argument is ever a candidate for a co-processor
1356 + register). */
1357 + bool base_rules = (TYPE_ARG_TYPES (type) != 0
1358 + && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (type)))
1359 + != void_type_node));
1360 +
1361 + if (user_convention)
1362 + {
1363 + if (user_pcs > ARM_PCS_AAPCS_LOCAL)
1364 + sorry ("Non-AAPCS derived PCS variant");
1365 + else if (base_rules && user_pcs != ARM_PCS_AAPCS)
1366 + error ("Variadic functions must use the base AAPCS variant");
1367 + }
1368 +
1369 + if (base_rules)
1370 + return ARM_PCS_AAPCS;
1371 + else if (user_convention)
1372 + return user_pcs;
1373 + else if (decl && flag_unit_at_a_time)
1374 + {
1375 + /* Local functions never leak outside this compilation unit,
1376 + so we are free to use whatever conventions are
1377 + appropriate. */
1378 + /* FIXME: remove CONST_CAST_TREE when cgraph is constified. */
1379 + struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
1380 + if (i && i->local)
1381 + return ARM_PCS_AAPCS_LOCAL;
1382 + }
1383 + }
1384 + else if (user_convention && user_pcs != arm_pcs_default)
1385 + sorry ("PCS variant");
1386 +
1387 + /* For everything else we use the target's default. */
1388 + return arm_pcs_default;
1389 +}
1390 +
1391 +
1392 +static void
1393 +aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
1394 + const_tree fntype ATTRIBUTE_UNUSED,
1395 + rtx libcall ATTRIBUTE_UNUSED,
1396 + const_tree fndecl ATTRIBUTE_UNUSED)
1397 +{
1398 + /* Record the unallocated VFP registers. */
1399 + pcum->aapcs_vfp_regs_free = (1 << NUM_VFP_ARG_REGS) - 1;
1400 + pcum->aapcs_vfp_reg_alloc = 0;
1401 +}
1402 +
1403 +/* Walk down the type tree of TYPE counting consecutive base elements.
1404 + If *MODEP is VOIDmode, then set it to the first valid floating point
1405 + type. If a non-floating point type is found, or if a floating point
1406 + type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
1407 + otherwise return the count in the sub-tree. */
1408 +static int
1409 +aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
1410 +{
1411 + enum machine_mode mode;
1412 + HOST_WIDE_INT size;
1413 +
1414 + switch (TREE_CODE (type))
1415 + {
1416 + case REAL_TYPE:
1417 + mode = TYPE_MODE (type);
1418 + if (mode != DFmode && mode != SFmode)
1419 + return -1;
1420 +
1421 + if (*modep == VOIDmode)
1422 + *modep = mode;
1423 +
1424 + if (*modep == mode)
1425 + return 1;
1426 +
1427 + break;
1428 +
1429 + case COMPLEX_TYPE:
1430 + mode = TYPE_MODE (TREE_TYPE (type));
1431 + if (mode != DFmode && mode != SFmode)
1432 + return -1;
1433 +
1434 + if (*modep == VOIDmode)
1435 + *modep = mode;
1436 +
1437 + if (*modep == mode)
1438 + return 2;
1439 +
1440 + break;
1441 +
1442 + case VECTOR_TYPE:
1443 + /* Use V2SImode and V4SImode as representatives of all 64-bit
1444 + and 128-bit vector types, whether or not those modes are
1445 + supported with the present options. */
1446 + size = int_size_in_bytes (type);
1447 + switch (size)
1448 + {
1449 + case 8:
1450 + mode = V2SImode;
1451 + break;
1452 + case 16:
1453 + mode = V4SImode;
1454 + break;
1455 + default:
1456 + return -1;
1457 + }
1458 +
1459 + if (*modep == VOIDmode)
1460 + *modep = mode;
1461 +
1462 + /* Vector modes are considered to be opaque: two vectors are
1463 + equivalent for the purposes of being homogeneous aggregates
1464 + if they are the same size. */
1465 + if (*modep == mode)
1466 + return 1;
1467 +
1468 + break;
1469 +
1470 + case ARRAY_TYPE:
1471 + {
1472 + int count;
1473 + tree index = TYPE_DOMAIN (type);
1474 +
1475 + /* Can't handle incomplete types. */
1476 + if (!COMPLETE_TYPE_P(type))
1477 + return -1;
1478 +
1479 + count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
1480 + if (count == -1
1481 + || !index
1482 + || !TYPE_MAX_VALUE (index)
1483 + || !host_integerp (TYPE_MAX_VALUE (index), 1)
1484 + || !TYPE_MIN_VALUE (index)
1485 + || !host_integerp (TYPE_MIN_VALUE (index), 1)
1486 + || count < 0)
1487 + return -1;
1488 +
1489 + count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1)
1490 + - tree_low_cst (TYPE_MIN_VALUE (index), 1));
1491 +
1492 + /* There must be no padding. */
1493 + if (!host_integerp (TYPE_SIZE (type), 1)
1494 + || (tree_low_cst (TYPE_SIZE (type), 1)
1495 + != count * GET_MODE_BITSIZE (*modep)))
1496 + return -1;
1497 +
1498 + return count;
1499 + }
1500 +
1501 + case RECORD_TYPE:
1502 + {
1503 + int count = 0;
1504 + int sub_count;
1505 + tree field;
1506 +
1507 + /* Can't handle incomplete types. */
1508 + if (!COMPLETE_TYPE_P(type))
1509 + return -1;
1510 +
1511 + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1512 + {
1513 + if (TREE_CODE (field) != FIELD_DECL)
1514 + continue;
1515 +
1516 + sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
1517 + if (sub_count < 0)
1518 + return -1;
1519 + count += sub_count;
1520 + }
1521 +
1522 + /* There must be no padding. */
1523 + if (!host_integerp (TYPE_SIZE (type), 1)
1524 + || (tree_low_cst (TYPE_SIZE (type), 1)
1525 + != count * GET_MODE_BITSIZE (*modep)))
1526 + return -1;
1527 +
1528 + return count;
1529 + }
1530 +
1531 + case UNION_TYPE:
1532 + case QUAL_UNION_TYPE:
1533 + {
1534 + /* These aren't very interesting except in a degenerate case. */
1535 + int count = 0;
1536 + int sub_count;
1537 + tree field;
1538 +
1539 + /* Can't handle incomplete types. */
1540 + if (!COMPLETE_TYPE_P(type))
1541 + return -1;
1542 +
1543 + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1544 + {
1545 + if (TREE_CODE (field) != FIELD_DECL)
1546 + continue;
1547 +
1548 + sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
1549 + if (sub_count < 0)
1550 + return -1;
1551 + count = count > sub_count ? count : sub_count;
1552 + }
1553 +
1554 + /* There must be no padding. */
1555 + if (!host_integerp (TYPE_SIZE (type), 1)
1556 + || (tree_low_cst (TYPE_SIZE (type), 1)
1557 + != count * GET_MODE_BITSIZE (*modep)))
1558 + return -1;
1559 +
1560 + return count;
1561 + }
1562 +
1563 + default:
1564 + break;
1565 + }
1566 +
1567 + return -1;
1568 +}
1569 +
1570 +/* Return true if PCS_VARIANT should use VFP registers. */
1571 +static bool
1572 +use_vfp_abi (enum arm_pcs pcs_variant, bool is_double)
1573 +{
1574 + if (pcs_variant == ARM_PCS_AAPCS_VFP)
1575 + return true;
1576 +
1577 + if (pcs_variant != ARM_PCS_AAPCS_LOCAL)
1578 + return false;
1579 +
1580 + return (TARGET_32BIT && TARGET_VFP && TARGET_HARD_FLOAT &&
1581 + (TARGET_VFP_DOUBLE || !is_double));
1582 +}
1583 +
1584 +static bool
1585 +aapcs_vfp_is_call_or_return_candidate (enum arm_pcs pcs_variant,
1586 + enum machine_mode mode, const_tree type,
1587 + int *base_mode, int *count)
1588 +{
1589 + enum machine_mode new_mode = VOIDmode;
1590 +
1591 + if (GET_MODE_CLASS (mode) == MODE_FLOAT
1592 + || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1593 + || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
1594 + {
1595 + *count = 1;
1596 + new_mode = mode;
1597 + }
1598 + else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
1599 + {
1600 + *count = 2;
1601 + new_mode = (mode == DCmode ? DFmode : SFmode);
1602 + }
1603 + else if (type && (mode == BLKmode || TREE_CODE (type) == VECTOR_TYPE))
1604 + {
1605 + int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
1606 +
1607 + if (ag_count > 0 && ag_count <= 4)
1608 + *count = ag_count;
1609 + else
1610 + return false;
1611 + }
1612 + else
1613 + return false;
1614 +
1615 +
1616 + if (!use_vfp_abi (pcs_variant, ARM_NUM_REGS (new_mode) > 1))
1617 + return false;
1618 +
1619 + *base_mode = new_mode;
1620 + return true;
1621 +}
1622 +
1623 +static bool
1624 +aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant,
1625 + enum machine_mode mode, const_tree type)
1626 +{
1627 + int count ATTRIBUTE_UNUSED;
1628 + int ag_mode ATTRIBUTE_UNUSED;
1629 +
1630 + if (!use_vfp_abi (pcs_variant, false))
1631 + return false;
1632 + return aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
1633 + &ag_mode, &count);
1634 +}
1635 +
1636 +static bool
1637 +aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
1638 + const_tree type)
1639 +{
1640 + if (!use_vfp_abi (pcum->pcs_variant, false))
1641 + return false;
1642 +
1643 + return aapcs_vfp_is_call_or_return_candidate (pcum->pcs_variant, mode, type,
1644 + &pcum->aapcs_vfp_rmode,
1645 + &pcum->aapcs_vfp_rcount);
1646 +}
1647 +
1648 +static bool
1649 +aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
1650 + const_tree type ATTRIBUTE_UNUSED)
1651 +{
1652 + int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode);
1653 + unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1;
1654 + int regno;
1655 +
1656 + for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift)
1657 + if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
1658 + {
1659 + pcum->aapcs_vfp_reg_alloc = mask << regno;
1660 + if (mode == BLKmode || (mode == TImode && !TARGET_NEON))
1661 + {
1662 + int i;
1663 + int rcount = pcum->aapcs_vfp_rcount;
1664 + int rshift = shift;
1665 + enum machine_mode rmode = pcum->aapcs_vfp_rmode;
1666 + rtx par;
1667 + if (!TARGET_NEON)
1668 + {
1669 + /* Avoid using unsupported vector modes. */
1670 + if (rmode == V2SImode)
1671 + rmode = DImode;
1672 + else if (rmode == V4SImode)
1673 + {
1674 + rmode = DImode;
1675 + rcount *= 2;
1676 + rshift /= 2;
1677 + }
1678 + }
1679 + par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount));
1680 + for (i = 0; i < rcount; i++)
1681 + {
1682 + rtx tmp = gen_rtx_REG (rmode,
1683 + FIRST_VFP_REGNUM + regno + i * rshift);
1684 + tmp = gen_rtx_EXPR_LIST
1685 + (VOIDmode, tmp,
1686 + GEN_INT (i * GET_MODE_SIZE (rmode)));
1687 + XVECEXP (par, 0, i) = tmp;
1688 + }
1689 +
1690 + pcum->aapcs_reg = par;
1691 + }
1692 + else
1693 + pcum->aapcs_reg = gen_rtx_REG (mode, FIRST_VFP_REGNUM + regno);
1694 + return true;
1695 + }
1696 + return false;
1697 +}
1698 +
1699 +static rtx
1700 +aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED,
1701 + enum machine_mode mode,
1702 + const_tree type ATTRIBUTE_UNUSED)
1703 +{
1704 + if (!use_vfp_abi (pcs_variant, false))
1705 + return false;
1706 +
1707 + if (mode == BLKmode || (mode == TImode && !TARGET_NEON))
1708 + {
1709 + int count;
1710 + int ag_mode;
1711 + int i;
1712 + rtx par;
1713 + int shift;
1714 +
1715 + aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
1716 + &ag_mode, &count);
1717 +
1718 + if (!TARGET_NEON)
1719 + {
1720 + if (ag_mode == V2SImode)
1721 + ag_mode = DImode;
1722 + else if (ag_mode == V4SImode)
1723 + {
1724 + ag_mode = DImode;
1725 + count *= 2;
1726 + }
1727 + }
1728 + shift = GET_MODE_SIZE(ag_mode) / GET_MODE_SIZE(SFmode);
1729 + par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
1730 + for (i = 0; i < count; i++)
1731 + {
1732 + rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift);
1733 + tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
1734 + GEN_INT (i * GET_MODE_SIZE (ag_mode)));
1735 + XVECEXP (par, 0, i) = tmp;
1736 + }
1737 +
1738 + return par;
1739 + }
1740 +
1741 + return gen_rtx_REG (mode, FIRST_VFP_REGNUM);
1742 +}
1743 +
1744 +static void
1745 +aapcs_vfp_advance (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
1746 + enum machine_mode mode ATTRIBUTE_UNUSED,
1747 + const_tree type ATTRIBUTE_UNUSED)
1748 +{
1749 + pcum->aapcs_vfp_regs_free &= ~pcum->aapcs_vfp_reg_alloc;
1750 + pcum->aapcs_vfp_reg_alloc = 0;
1751 + return;
1752 +}
1753 +
1754 +#define AAPCS_CP(X) \
1755 + { \
1756 + aapcs_ ## X ## _cum_init, \
1757 + aapcs_ ## X ## _is_call_candidate, \
1758 + aapcs_ ## X ## _allocate, \
1759 + aapcs_ ## X ## _is_return_candidate, \
1760 + aapcs_ ## X ## _allocate_return_reg, \
1761 + aapcs_ ## X ## _advance \
1762 + }
1763 +
1764 +/* Table of co-processors that can be used to pass arguments in
1765 + registers. Idealy no arugment should be a candidate for more than
1766 + one co-processor table entry, but the table is processed in order
1767 + and stops after the first match. If that entry then fails to put
1768 + the argument into a co-processor register, the argument will go on
1769 + the stack. */
1770 +static struct
1771 +{
1772 + /* Initialize co-processor related state in CUMULATIVE_ARGS structure. */
1773 + void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree);
1774 +
1775 + /* Return true if an argument of mode MODE (or type TYPE if MODE is
1776 + BLKmode) is a candidate for this co-processor's registers; this
1777 + function should ignore any position-dependent state in
1778 + CUMULATIVE_ARGS and only use call-type dependent information. */
1779 + bool (*is_call_candidate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
1780 +
1781 + /* Return true if the argument does get a co-processor register; it
1782 + should set aapcs_reg to an RTX of the register allocated as is
1783 + required for a return from FUNCTION_ARG. */
1784 + bool (*allocate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
1785 +
1786 + /* Return true if a result of mode MODE (or type TYPE if MODE is
1787 + BLKmode) is can be returned in this co-processor's registers. */
1788 + bool (*is_return_candidate) (enum arm_pcs, enum machine_mode, const_tree);
1789 +
1790 + /* Allocate and return an RTX element to hold the return type of a
1791 + call, this routine must not fail and will only be called if
1792 + is_return_candidate returned true with the same parameters. */
1793 + rtx (*allocate_return_reg) (enum arm_pcs, enum machine_mode, const_tree);
1794 +
1795 + /* Finish processing this argument and prepare to start processing
1796 + the next one. */
1797 + void (*advance) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
1798 +} aapcs_cp_arg_layout[ARM_NUM_COPROC_SLOTS] =
1799 + {
1800 + AAPCS_CP(vfp)
1801 + };
1802 +
1803 +#undef AAPCS_CP
1804 +
1805 +static int
1806 +aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
1807 + tree type)
1808 +{
1809 + int i;
1810 +
1811 + for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
1812 + if (aapcs_cp_arg_layout[i].is_call_candidate (pcum, mode, type))
1813 + return i;
1814 +
1815 + return -1;
1816 +}
1817 +
1818 +static int
1819 +aapcs_select_return_coproc (const_tree type, const_tree fntype)
1820 +{
1821 + /* We aren't passed a decl, so we can't check that a call is local.
1822 + However, it isn't clear that that would be a win anyway, since it
1823 + might limit some tail-calling opportunities. */
1824 + enum arm_pcs pcs_variant;
1825 +
1826 + if (fntype)
1827 + {
1828 + const_tree fndecl = NULL_TREE;
1829 +
1830 + if (TREE_CODE (fntype) == FUNCTION_DECL)
1831 + {
1832 + fndecl = fntype;
1833 + fntype = TREE_TYPE (fntype);
1834 + }
1835 +
1836 + pcs_variant = arm_get_pcs_model (fntype, fndecl);
1837 + }
1838 + else
1839 + pcs_variant = arm_pcs_default;
1840 +
1841 + if (pcs_variant != ARM_PCS_AAPCS)
1842 + {
1843 + int i;
1844 +
1845 + for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
1846 + if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
1847 + TYPE_MODE (type),
1848 + type))
1849 + return i;
1850 + }
1851 + return -1;
1852 +}
1853 +
1854 +static rtx
1855 +aapcs_allocate_return_reg (enum machine_mode mode, const_tree type,
1856 + const_tree fntype)
1857 +{
1858 + /* We aren't passed a decl, so we can't check that a call is local.
1859 + However, it isn't clear that that would be a win anyway, since it
1860 + might limit some tail-calling opportunities. */
1861 + enum arm_pcs pcs_variant;
1862 +
1863 + if (fntype)
1864 + {
1865 + const_tree fndecl = NULL_TREE;
1866 +
1867 + if (TREE_CODE (fntype) == FUNCTION_DECL)
1868 + {
1869 + fndecl = fntype;
1870 + fntype = TREE_TYPE (fntype);
1871 + }
1872 +
1873 + pcs_variant = arm_get_pcs_model (fntype, fndecl);
1874 + }
1875 + else
1876 + pcs_variant = arm_pcs_default;
1877 +
1878 + /* Promote integer types. */
1879 + if (type && INTEGRAL_TYPE_P (type))
1880 + PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
1881 +
1882 + if (pcs_variant != ARM_PCS_AAPCS)
1883 + {
1884 + int i;
1885 +
1886 + for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
1887 + if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, mode,
1888 + type))
1889 + return aapcs_cp_arg_layout[i].allocate_return_reg (pcs_variant,
1890 + mode, type);
1891 + }
1892 +
1893 + /* Promotes small structs returned in a register to full-word size
1894 + for big-endian AAPCS. */
1895 + if (type && arm_return_in_msb (type))
1896 + {
1897 + HOST_WIDE_INT size = int_size_in_bytes (type);
1898 + if (size % UNITS_PER_WORD != 0)
1899 + {
1900 + size += UNITS_PER_WORD - size % UNITS_PER_WORD;
1901 + mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
1902 + }
1903 + }
1904 +
1905 + return gen_rtx_REG (mode, R0_REGNUM);
1906 +}
1907 +
1908 +rtx
1909 +aapcs_libcall_value (enum machine_mode mode)
1910 +{
1911 + return aapcs_allocate_return_reg (mode, NULL_TREE, NULL_TREE);
1912 +}
1913 +
1914 +/* Lay out a function argument using the AAPCS rules. The rule
1915 + numbers referred to here are those in the AAPCS. */
1916 +static void
1917 +aapcs_layout_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
1918 + tree type, int named)
1919 +{
1920 + int nregs, nregs2;
1921 + int ncrn;
1922 +
1923 + /* We only need to do this once per argument. */
1924 + if (pcum->aapcs_arg_processed)
1925 + return;
1926 +
1927 + pcum->aapcs_arg_processed = true;
1928 +
1929 + /* Special case: if named is false then we are handling an incoming
1930 + anonymous argument which is on the stack. */
1931 + if (!named)
1932 + return;
1933 +
1934 + /* Is this a potential co-processor register candidate? */
1935 + if (pcum->pcs_variant != ARM_PCS_AAPCS)
1936 + {
1937 + int slot = aapcs_select_call_coproc (pcum, mode, type);
1938 + pcum->aapcs_cprc_slot = slot;
1939 +
1940 + /* We don't have to apply any of the rules from part B of the
1941 + preparation phase, these are handled elsewhere in the
1942 + compiler. */
1943 +
1944 + if (slot >= 0)
1945 + {
1946 + /* A Co-processor register candidate goes either in its own
1947 + class of registers or on the stack. */
1948 + if (!pcum->aapcs_cprc_failed[slot])
1949 + {
1950 + /* C1.cp - Try to allocate the argument to co-processor
1951 + registers. */
1952 + if (aapcs_cp_arg_layout[slot].allocate (pcum, mode, type))
1953 + return;
1954 +
1955 + /* C2.cp - Put the argument on the stack and note that we
1956 + can't assign any more candidates in this slot. We also
1957 + need to note that we have allocated stack space, so that
1958 + we won't later try to split a non-cprc candidate between
1959 + core registers and the stack. */
1960 + pcum->aapcs_cprc_failed[slot] = true;
1961 + pcum->can_split = false;
1962 + }
1963 +
1964 + /* We didn't get a register, so this argument goes on the
1965 + stack. */
1966 + gcc_assert (pcum->can_split == false);
1967 + return;
1968 + }
1969 + }
1970 +
1971 + /* C3 - For double-word aligned arguments, round the NCRN up to the
1972 + next even number. */
1973 + ncrn = pcum->aapcs_ncrn;
1974 + if ((ncrn & 1) && arm_needs_doubleword_align (mode, type))
1975 + ncrn++;
1976 +
1977 + nregs = ARM_NUM_REGS2(mode, type);
1978 +
1979 + /* Sigh, this test should really assert that nregs > 0, but a GCC
1980 + extension allows empty structs and then gives them empty size; it
1981 + then allows such a structure to be passed by value. For some of
1982 + the code below we have to pretend that such an argument has
1983 + non-zero size so that we 'locate' it correctly either in
1984 + registers or on the stack. */
1985 + gcc_assert (nregs >= 0);
1986 +
1987 + nregs2 = nregs ? nregs : 1;
1988 +
1989 + /* C4 - Argument fits entirely in core registers. */
1990 + if (ncrn + nregs2 <= NUM_ARG_REGS)
1991 + {
1992 + pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
1993 + pcum->aapcs_next_ncrn = ncrn + nregs;
1994 + return;
1995 + }
1996 +
1997 + /* C5 - Some core registers left and there are no arguments already
1998 + on the stack: split this argument between the remaining core
1999 + registers and the stack. */
2000 + if (ncrn < NUM_ARG_REGS && pcum->can_split)
2001 + {
2002 + pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
2003 + pcum->aapcs_next_ncrn = NUM_ARG_REGS;
2004 + pcum->aapcs_partial = (NUM_ARG_REGS - ncrn) * UNITS_PER_WORD;
2005 + return;
2006 + }
2007 +
2008 + /* C6 - NCRN is set to 4. */
2009 + pcum->aapcs_next_ncrn = NUM_ARG_REGS;
2010 +
2011 + /* C7,C8 - arugment goes on the stack. We have nothing to do here. */
2012 + return;
2013 +}
2014 +
2015 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2016 for a call to a function whose data type is FNTYPE.
2017 For a library call, FNTYPE is NULL. */
2018 void
2019 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2020 - rtx libname ATTRIBUTE_UNUSED,
2021 + rtx libname,
2022 tree fndecl ATTRIBUTE_UNUSED)
2023 {
2024 + /* Long call handling. */
2025 + if (fntype)
2026 + pcum->pcs_variant = arm_get_pcs_model (fntype, fndecl);
2027 + else
2028 + pcum->pcs_variant = arm_pcs_default;
2029 +
2030 + if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
2031 + {
2032 + /* XXX We should also detect some library calls here and handle
2033 + them using the base rules too; for example the floating point
2034 + support functions always work this way. */
2035 +
2036 + if (rtx_equal_p (libname,
2037 + convert_optab_libfunc (sfix_optab, DImode, DFmode))
2038 + || rtx_equal_p (libname,
2039 + convert_optab_libfunc (ufix_optab, DImode, DFmode))
2040 + || rtx_equal_p (libname,
2041 + convert_optab_libfunc (sfix_optab, DImode, SFmode))
2042 + || rtx_equal_p (libname,
2043 + convert_optab_libfunc (ufix_optab, DImode, SFmode))
2044 + || rtx_equal_p (libname,
2045 + convert_optab_libfunc (trunc_optab, HFmode, SFmode))
2046 + || rtx_equal_p (libname,
2047 + convert_optab_libfunc (sext_optab, SFmode, HFmode)))
2048 + pcum->pcs_variant = ARM_PCS_AAPCS;
2049 +
2050 + pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0;
2051 + pcum->aapcs_reg = NULL_RTX;
2052 + pcum->aapcs_partial = 0;
2053 + pcum->aapcs_arg_processed = false;
2054 + pcum->aapcs_cprc_slot = -1;
2055 + pcum->can_split = true;
2056 +
2057 + if (pcum->pcs_variant != ARM_PCS_AAPCS)
2058 + {
2059 + int i;
2060 +
2061 + for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
2062 + {
2063 + pcum->aapcs_cprc_failed[i] = false;
2064 + aapcs_cp_arg_layout[i].cum_init (pcum, fntype, libname, fndecl);
2065 + }
2066 + }
2067 + return;
2068 + }
2069 +
2070 + /* Legacy ABIs */
2071 +
2072 /* On the ARM, the offset starts at 0. */
2073 pcum->nregs = 0;
2074 pcum->iwmmxt_nregs = 0;
2075 @@ -3120,6 +4189,17 @@
2076 {
2077 int nregs;
2078
2079 + /* Handle the special case quickly. Pick an arbitrary value for op2 of
2080 + a call insn (op3 of a call_value insn). */
2081 + if (mode == VOIDmode)
2082 + return const0_rtx;
2083 +
2084 + if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
2085 + {
2086 + aapcs_layout_arg (pcum, mode, type, named);
2087 + return pcum->aapcs_reg;
2088 + }
2089 +
2090 /* Varargs vectors are treated the same as long long.
2091 named_count avoids having to change the way arm handles 'named' */
2092 if (TARGET_IWMMXT_ABI
2093 @@ -3161,10 +4241,16 @@
2094
2095 static int
2096 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2097 - tree type, bool named ATTRIBUTE_UNUSED)
2098 + tree type, bool named)
2099 {
2100 int nregs = pcum->nregs;
2101
2102 + if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
2103 + {
2104 + aapcs_layout_arg (pcum, mode, type, named);
2105 + return pcum->aapcs_partial;
2106 + }
2107 +
2108 if (TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (mode))
2109 return 0;
2110
2111 @@ -3176,6 +4262,39 @@
2112 return 0;
2113 }
2114
2115 +void
2116 +arm_function_arg_advance (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2117 + tree type, bool named)
2118 +{
2119 + if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
2120 + {
2121 + aapcs_layout_arg (pcum, mode, type, named);
2122 +
2123 + if (pcum->aapcs_cprc_slot >= 0)
2124 + {
2125 + aapcs_cp_arg_layout[pcum->aapcs_cprc_slot].advance (pcum, mode,
2126 + type);
2127 + pcum->aapcs_cprc_slot = -1;
2128 + }
2129 +
2130 + /* Generic stuff. */
2131 + pcum->aapcs_arg_processed = false;
2132 + pcum->aapcs_ncrn = pcum->aapcs_next_ncrn;
2133 + pcum->aapcs_reg = NULL_RTX;
2134 + pcum->aapcs_partial = 0;
2135 + }
2136 + else
2137 + {
2138 + pcum->nargs += 1;
2139 + if (arm_vector_mode_supported_p (mode)
2140 + && pcum->named_count > pcum->nargs
2141 + && TARGET_IWMMXT_ABI)
2142 + pcum->iwmmxt_nregs += 1;
2143 + else
2144 + pcum->nregs += ARM_NUM_REGS2 (mode, type);
2145 + }
2146 +}
2147 +
2148 /* Variable sized types are passed by reference. This is a GCC
2149 extension to the ARM ABI. */
2150
2151 @@ -3226,6 +4345,8 @@
2152 /* Whereas these functions are always known to reside within the 26 bit
2153 addressing range. */
2154 { "short_call", 0, 0, false, true, true, NULL },
2155 + /* Specify the procedure call conventions for a function. */
2156 + { "pcs", 1, 1, false, true, true, arm_handle_pcs_attribute },
2157 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2158 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2159 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2160 @@ -3328,6 +4449,21 @@
2161 return NULL_TREE;
2162 }
2163
2164 +/* Handle a "pcs" attribute; arguments as in struct
2165 + attribute_spec.handler. */
2166 +static tree
2167 +arm_handle_pcs_attribute (tree *node ATTRIBUTE_UNUSED, tree name, tree args,
2168 + int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2169 +{
2170 + if (arm_pcs_from_attribute (args) == ARM_PCS_UNKNOWN)
2171 + {
2172 + warning (OPT_Wattributes, "%qs attribute ignored",
2173 + IDENTIFIER_POINTER (name));
2174 + *no_add_attrs = true;
2175 + }
2176 + return NULL_TREE;
2177 +}
2178 +
2179 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2180 /* Handle the "notshared" attribute. This attribute is another way of
2181 requesting hidden visibility. ARM's compiler supports
2182 @@ -3489,7 +4625,7 @@
2183
2184 /* Return nonzero if it is ok to make a tail-call to DECL. */
2185 static bool
2186 -arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2187 +arm_function_ok_for_sibcall (tree decl, tree exp)
2188 {
2189 unsigned long func_type;
2190
2191 @@ -3522,6 +4658,21 @@
2192 if (IS_INTERRUPT (func_type))
2193 return false;
2194
2195 + if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2196 + {
2197 + /* Check that the return value locations are the same. For
2198 + example that we aren't returning a value from the sibling in
2199 + a VFP register but then need to transfer it to a core
2200 + register. */
2201 + rtx a, b;
2202 +
2203 + a = arm_function_value (TREE_TYPE (exp), decl, false);
2204 + b = arm_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2205 + cfun->decl, false);
2206 + if (!rtx_equal_p (a, b))
2207 + return false;
2208 + }
2209 +
2210 /* Never tailcall if function may be called with a misaligned SP. */
2211 if (IS_STACKALIGN (func_type))
2212 return false;
2213 @@ -4120,6 +5271,7 @@
2214 if (GET_MODE_SIZE (mode) <= 4
2215 && ! (arm_arch4
2216 && (mode == HImode
2217 + || mode == HFmode
2218 || (mode == QImode && outer == SIGN_EXTEND))))
2219 {
2220 if (code == MULT)
2221 @@ -4148,13 +5300,15 @@
2222 load. */
2223 if (arm_arch4)
2224 {
2225 - if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
2226 + if (mode == HImode
2227 + || mode == HFmode
2228 + || (outer == SIGN_EXTEND && mode == QImode))
2229 range = 256;
2230 else
2231 range = 4096;
2232 }
2233 else
2234 - range = (mode == HImode) ? 4095 : 4096;
2235 + range = (mode == HImode || mode == HFmode) ? 4095 : 4096;
2236
2237 return (code == CONST_INT
2238 && INTVAL (index) < range
2239 @@ -4325,7 +5479,8 @@
2240 return 1;
2241
2242 /* This is PC relative data after arm_reorg runs. */
2243 - else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
2244 + else if ((GET_MODE_SIZE (mode) >= 4 || mode == HFmode)
2245 + && reload_completed
2246 && (GET_CODE (x) == LABEL_REF
2247 || (GET_CODE (x) == CONST
2248 && GET_CODE (XEXP (x, 0)) == PLUS
2249 @@ -5024,7 +6179,7 @@
2250 case UMOD:
2251 if (TARGET_HARD_FLOAT && mode == SFmode)
2252 *total = COSTS_N_INSNS (2);
2253 - else if (TARGET_HARD_FLOAT && mode == DFmode)
2254 + else if (TARGET_HARD_FLOAT && mode == DFmode && !TARGET_VFP_SINGLE)
2255 *total = COSTS_N_INSNS (4);
2256 else
2257 *total = COSTS_N_INSNS (20);
2258 @@ -5063,23 +6218,6 @@
2259 return true;
2260
2261 case MINUS:
2262 - if (TARGET_THUMB2)
2263 - {
2264 - if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2265 - {
2266 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2267 - *total = COSTS_N_INSNS (1);
2268 - else
2269 - *total = COSTS_N_INSNS (20);
2270 - }
2271 - else
2272 - *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
2273 - /* Thumb2 does not have RSB, so all arguments must be
2274 - registers (subtracting a constant is canonicalized as
2275 - addition of the negated constant). */
2276 - return false;
2277 - }
2278 -
2279 if (mode == DImode)
2280 {
2281 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
2282 @@ -5102,7 +6240,9 @@
2283
2284 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2285 {
2286 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2287 + if (TARGET_HARD_FLOAT
2288 + && (mode == SFmode
2289 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2290 {
2291 *total = COSTS_N_INSNS (1);
2292 if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
2293 @@ -5143,6 +6283,17 @@
2294 return true;
2295 }
2296
2297 + /* A shift as a part of RSB costs no more than RSB itself. */
2298 + if (GET_CODE (XEXP (x, 0)) == MULT
2299 + && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2300 + && ((INTVAL (XEXP (XEXP (x, 0), 1))
2301 + & (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0))
2302 + {
2303 + *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, speed);
2304 + *total += rtx_cost (XEXP (x, 1), code, speed);
2305 + return true;
2306 + }
2307 +
2308 if (subcode == MULT
2309 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2310 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
2311 @@ -5164,6 +6315,19 @@
2312 return true;
2313 }
2314
2315 + /* MLS is just as expensive as its underlying multiplication.
2316 + Exclude a shift by a constant, which is expressed as a
2317 + multiplication. */
2318 + if (TARGET_32BIT && arm_arch_thumb2
2319 + && GET_CODE (XEXP (x, 1)) == MULT
2320 + && ! (GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2321 + && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
2322 + (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
2323 + {
2324 + /* The cost comes from the cost of the multiply. */
2325 + return false;
2326 + }
2327 +
2328 /* Fall through */
2329
2330 case PLUS:
2331 @@ -5192,7 +6356,9 @@
2332
2333 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2334 {
2335 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2336 + if (TARGET_HARD_FLOAT
2337 + && (mode == SFmode
2338 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2339 {
2340 *total = COSTS_N_INSNS (1);
2341 if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
2342 @@ -5307,7 +6473,9 @@
2343 case NEG:
2344 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2345 {
2346 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2347 + if (TARGET_HARD_FLOAT
2348 + && (mode == SFmode
2349 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2350 {
2351 *total = COSTS_N_INSNS (1);
2352 return false;
2353 @@ -5460,7 +6628,9 @@
2354 case ABS:
2355 if (GET_MODE_CLASS (mode == MODE_FLOAT))
2356 {
2357 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2358 + if (TARGET_HARD_FLOAT
2359 + && (mode == SFmode
2360 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2361 {
2362 *total = COSTS_N_INSNS (1);
2363 return false;
2364 @@ -5563,7 +6733,8 @@
2365 return true;
2366
2367 case CONST_DOUBLE:
2368 - if (TARGET_HARD_FLOAT && vfp3_const_double_rtx (x))
2369 + if (TARGET_HARD_FLOAT && vfp3_const_double_rtx (x)
2370 + && (mode == SFmode || !TARGET_VFP_SINGLE))
2371 *total = COSTS_N_INSNS (1);
2372 else
2373 *total = COSTS_N_INSNS (4);
2374 @@ -5638,7 +6809,8 @@
2375 return false;
2376
2377 case MINUS:
2378 - if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2379 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
2380 + && (mode == SFmode || !TARGET_VFP_SINGLE))
2381 {
2382 *total = COSTS_N_INSNS (1);
2383 return false;
2384 @@ -5668,7 +6840,8 @@
2385 return false;
2386
2387 case PLUS:
2388 - if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2389 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
2390 + && (mode == SFmode || !TARGET_VFP_SINGLE))
2391 {
2392 *total = COSTS_N_INSNS (1);
2393 return false;
2394 @@ -5698,7 +6871,8 @@
2395 return false;
2396
2397 case NEG:
2398 - if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2399 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
2400 + && (mode == SFmode || !TARGET_VFP_SINGLE))
2401 {
2402 *total = COSTS_N_INSNS (1);
2403 return false;
2404 @@ -5722,7 +6896,8 @@
2405 return false;
2406
2407 case ABS:
2408 - if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2409 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
2410 + && (mode == SFmode || !TARGET_VFP_SINGLE))
2411 *total = COSTS_N_INSNS (1);
2412 else
2413 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
2414 @@ -5939,7 +7114,9 @@
2415
2416 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2417 {
2418 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2419 + if (TARGET_HARD_FLOAT
2420 + && (mode == SFmode
2421 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2422 {
2423 *total = COSTS_N_INSNS (1);
2424 return false;
2425 @@ -6096,7 +7273,9 @@
2426
2427 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2428 {
2429 - if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2430 + if (TARGET_HARD_FLOAT
2431 + && (mode == SFmode
2432 + || (mode == DFmode && !TARGET_VFP_SINGLE)))
2433 {
2434 *total = COSTS_N_INSNS (1);
2435 return false;
2436 @@ -6919,10 +8098,13 @@
2437 }
2438
2439 /* Return TRUE if OP is a memory operand which we can load or store a vector
2440 - to/from. If CORE is true, we're moving from ARM registers not Neon
2441 - registers. */
2442 + to/from. TYPE is one of the following values:
2443 + 0 - Vector load/stor (vldr)
2444 + 1 - Core registers (ldm)
2445 + 2 - Element/structure loads (vld1)
2446 + */
2447 int
2448 -neon_vector_mem_operand (rtx op, bool core)
2449 +neon_vector_mem_operand (rtx op, int type)
2450 {
2451 rtx ind;
2452
2453 @@ -6955,23 +8137,16 @@
2454 return arm_address_register_rtx_p (ind, 0);
2455
2456 /* Allow post-increment with Neon registers. */
2457 - if (!core && GET_CODE (ind) == POST_INC)
2458 + if ((type != 1 && GET_CODE (ind) == POST_INC)
2459 + || (type == 0 && GET_CODE (ind) == PRE_DEC))
2460 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
2461
2462 -#if 0
2463 - /* FIXME: We can support this too if we use VLD1/VST1. */
2464 - if (!core
2465 - && GET_CODE (ind) == POST_MODIFY
2466 - && arm_address_register_rtx_p (XEXP (ind, 0), 0)
2467 - && GET_CODE (XEXP (ind, 1)) == PLUS
2468 - && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
2469 - ind = XEXP (ind, 1);
2470 -#endif
2471 + /* FIXME: vld1 allows register post-modify. */
2472
2473 /* Match:
2474 (plus (reg)
2475 (const)). */
2476 - if (!core
2477 + if (type == 0
2478 && GET_CODE (ind) == PLUS
2479 && GET_CODE (XEXP (ind, 0)) == REG
2480 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
2481 @@ -7038,10 +8213,19 @@
2482 enum reg_class
2483 coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
2484 {
2485 + if (mode == HFmode)
2486 + {
2487 + if (!TARGET_NEON_FP16)
2488 + return GENERAL_REGS;
2489 + if (s_register_operand (x, mode) || neon_vector_mem_operand (x, 2))
2490 + return NO_REGS;
2491 + return GENERAL_REGS;
2492 + }
2493 +
2494 if (TARGET_NEON
2495 && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
2496 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
2497 - && neon_vector_mem_operand (x, FALSE))
2498 + && neon_vector_mem_operand (x, 0))
2499 return NO_REGS;
2500
2501 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
2502 @@ -7438,6 +8622,9 @@
2503 int base_reg = -1;
2504 int i;
2505
2506 + if (low_irq_latency)
2507 + return 0;
2508 +
2509 /* Can only handle 2, 3, or 4 insns at present,
2510 though could be easily extended if required. */
2511 gcc_assert (nops >= 2 && nops <= 4);
2512 @@ -7667,6 +8854,9 @@
2513 int base_reg = -1;
2514 int i;
2515
2516 + if (low_irq_latency)
2517 + return 0;
2518 +
2519 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2520 extended if required. */
2521 gcc_assert (nops >= 2 && nops <= 4);
2522 @@ -7874,7 +9064,7 @@
2523
2524 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
2525 for counts of 3 or 4 regs. */
2526 - if (arm_tune_xscale && count <= 2 && ! optimize_size)
2527 + if (low_irq_latency || (arm_tune_xscale && count <= 2 && ! optimize_size))
2528 {
2529 rtx seq;
2530
2531 @@ -7937,7 +9127,7 @@
2532
2533 /* See arm_gen_load_multiple for discussion of
2534 the pros/cons of ldm/stm usage for XScale. */
2535 - if (arm_tune_xscale && count <= 2 && ! optimize_size)
2536 + if (low_irq_latency || (arm_tune_xscale && count <= 2 && ! optimize_size))
2537 {
2538 rtx seq;
2539
2540 @@ -9555,7 +10745,10 @@
2541 gcc_assert (GET_CODE (from) != BARRIER);
2542
2543 /* Count the length of this insn. */
2544 - count += get_attr_length (from);
2545 + if (LABEL_P (from) && (align_jumps > 0 || align_loops > 0))
2546 + count += MAX (align_jumps, align_loops);
2547 + else
2548 + count += get_attr_length (from);
2549
2550 /* If there is a jump table, add its length. */
2551 tmp = is_jump_table (from);
2552 @@ -9867,6 +11060,8 @@
2553 insn = table;
2554 }
2555 }
2556 + else if (LABEL_P (insn) && (align_jumps > 0 || align_loops > 0))
2557 + address += MAX (align_jumps, align_loops);
2558 }
2559
2560 fix = minipool_fix_head;
2561 @@ -10072,6 +11267,21 @@
2562 vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
2563 {
2564 int i;
2565 + int offset;
2566 +
2567 + if (low_irq_latency)
2568 + {
2569 + /* Output a sequence of FLDD instructions. */
2570 + offset = 0;
2571 + for (i = reg; i < reg + count; ++i, offset += 8)
2572 + {
2573 + fputc ('\t', stream);
2574 + asm_fprintf (stream, "fldd\td%d, [%r,#%d]\n", i, base, offset);
2575 + }
2576 + asm_fprintf (stream, "\tadd\tsp, sp, #%d\n", count * 8);
2577 + return;
2578 + }
2579 +
2580
2581 /* Workaround ARM10 VFPr1 bug. */
2582 if (count == 2 && !arm_arch6)
2583 @@ -10142,6 +11352,56 @@
2584 rtx tmp, reg;
2585 int i;
2586
2587 + if (low_irq_latency)
2588 + {
2589 + int saved_size;
2590 + rtx sp_insn;
2591 +
2592 + if (!count)
2593 + return 0;
2594 +
2595 + saved_size = count * GET_MODE_SIZE (DFmode);
2596 +
2597 + /* Since fstd does not have postdecrement addressing mode,
2598 + we first decrement stack pointer and then use base+offset
2599 + stores for VFP registers. The ARM EABI unwind information
2600 + can't easily describe base+offset loads, so we attach
2601 + a note for the effects of the whole block in the first insn,
2602 + and avoid marking the subsequent instructions
2603 + with RTX_FRAME_RELATED_P. */
2604 + sp_insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2605 + GEN_INT (-saved_size));
2606 + sp_insn = emit_insn (sp_insn);
2607 + RTX_FRAME_RELATED_P (sp_insn) = 1;
2608 +
2609 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
2610 + XVECEXP (dwarf, 0, 0) =
2611 + gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2612 + plus_constant (stack_pointer_rtx, -saved_size));
2613 +
2614 + /* push double VFP registers to stack */
2615 + for (i = 0; i < count; ++i )
2616 + {
2617 + rtx reg;
2618 + rtx mem;
2619 + rtx addr;
2620 + rtx insn;
2621 + reg = gen_rtx_REG (DFmode, base_reg + 2*i);
2622 + addr = (i == 0) ? stack_pointer_rtx
2623 + : gen_rtx_PLUS (SImode, stack_pointer_rtx,
2624 + GEN_INT (i * GET_MODE_SIZE (DFmode)));
2625 + mem = gen_frame_mem (DFmode, addr);
2626 + insn = emit_move_insn (mem, reg);
2627 + XVECEXP (dwarf, 0, i+1) =
2628 + gen_rtx_SET (VOIDmode, mem, reg);
2629 + }
2630 +
2631 + REG_NOTES (sp_insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2632 + REG_NOTES (sp_insn));
2633 +
2634 + return saved_size;
2635 + }
2636 +
2637 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
2638 register pairs are stored by a store multiple insn. We avoid this
2639 by pushing an extra pair. */
2640 @@ -10758,7 +12018,7 @@
2641 }
2642
2643 /* Output a move, load or store for quad-word vectors in ARM registers. Only
2644 - handles MEMs accepted by neon_vector_mem_operand with CORE=true. */
2645 + handles MEMs accepted by neon_vector_mem_operand with TYPE=1. */
2646
2647 const char *
2648 output_move_quad (rtx *operands)
2649 @@ -10954,6 +12214,12 @@
2650 ops[1] = reg;
2651 break;
2652
2653 + case PRE_DEC:
2654 + templ = "v%smdb%%?\t%%0!, %%h1";
2655 + ops[0] = XEXP (addr, 0);
2656 + ops[1] = reg;
2657 + break;
2658 +
2659 case POST_MODIFY:
2660 /* FIXME: Not currently enabled in neon_vector_mem_operand. */
2661 gcc_unreachable ();
2662 @@ -10968,7 +12234,7 @@
2663 {
2664 /* We're only using DImode here because it's a convenient size. */
2665 ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
2666 - ops[1] = adjust_address (mem, SImode, 8 * i);
2667 + ops[1] = adjust_address (mem, DImode, 8 * i);
2668 if (reg_overlap_mentioned_p (ops[0], mem))
2669 {
2670 gcc_assert (overlap == -1);
2671 @@ -11557,7 +12823,7 @@
2672 if (count > 0)
2673 {
2674 /* Workaround ARM10 VFPr1 bug. */
2675 - if (count == 2 && !arm_arch6)
2676 + if (count == 2 && !arm_arch6 && !low_irq_latency)
2677 count++;
2678 saved += count * 8;
2679 }
2680 @@ -11886,6 +13152,41 @@
2681 return_used_this_function = 0;
2682 }
2683
2684 +/* Generate to STREAM a code sequence that pops registers identified
2685 + in REGS_MASK from SP. SP is incremented as the result.
2686 +*/
2687 +static void
2688 +print_pop_reg_by_ldr (FILE *stream, int regs_mask, int rfe)
2689 +{
2690 + int reg;
2691 +
2692 + gcc_assert (! (regs_mask & (1 << SP_REGNUM)));
2693 +
2694 + for (reg = 0; reg < PC_REGNUM; ++reg)
2695 + if (regs_mask & (1 << reg))
2696 + asm_fprintf (stream, "\tldr\t%r, [%r], #4\n",
2697 + reg, SP_REGNUM);
2698 +
2699 + if (regs_mask & (1 << PC_REGNUM))
2700 + {
2701 + if (rfe)
2702 + /* When returning from exception, we need to
2703 + copy SPSR to CPSR. There are two ways to do
2704 + that: the ldm instruction with "^" suffix,
2705 + and movs instruction. The latter would
2706 + require that we load from stack to some
2707 + scratch register, and then move to PC.
2708 + Therefore, we'd need extra instruction and
2709 + have to make sure we actually have a spare
2710 + register. Using ldm with a single register
2711 + is simler. */
2712 + asm_fprintf (stream, "\tldm\tsp!, {pc}^\n");
2713 + else
2714 + asm_fprintf (stream, "\tldr\t%r, [%r], #4\n",
2715 + PC_REGNUM, SP_REGNUM);
2716 + }
2717 +}
2718 +
2719 const char *
2720 arm_output_epilogue (rtx sibling)
2721 {
2722 @@ -11946,7 +13247,7 @@
2723 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
2724 int vfp_offset = offsets->frame;
2725
2726 - if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
2727 + if (TARGET_FPA_EMU2)
2728 {
2729 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
2730 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
2731 @@ -12169,7 +13470,7 @@
2732 SP_REGNUM, HARD_FRAME_POINTER_REGNUM);
2733 }
2734
2735 - if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
2736 + if (TARGET_FPA_EMU2)
2737 {
2738 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
2739 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
2740 @@ -12253,22 +13554,19 @@
2741 to load use the LDR instruction - it is faster. For Thumb-2
2742 always use pop and the assembler will pick the best instruction.*/
2743 if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM)
2744 - && !IS_INTERRUPT(func_type))
2745 + && !IS_INTERRUPT (func_type))
2746 {
2747 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
2748 }
2749 else if (saved_regs_mask)
2750 {
2751 - if (saved_regs_mask & (1 << SP_REGNUM))
2752 - /* Note - write back to the stack register is not enabled
2753 - (i.e. "ldmfd sp!..."). We know that the stack pointer is
2754 - in the list of registers and if we add writeback the
2755 - instruction becomes UNPREDICTABLE. */
2756 - print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask,
2757 - rfe);
2758 - else if (TARGET_ARM)
2759 - print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
2760 - rfe);
2761 + gcc_assert ( ! (saved_regs_mask & (1 << SP_REGNUM)));
2762 + if (TARGET_ARM)
2763 + if (low_irq_latency)
2764 + print_pop_reg_by_ldr (f, saved_regs_mask, rfe);
2765 + else
2766 + print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
2767 + rfe);
2768 else
2769 print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0);
2770 }
2771 @@ -12389,6 +13687,32 @@
2772
2773 gcc_assert (num_regs && num_regs <= 16);
2774
2775 + if (low_irq_latency)
2776 + {
2777 + rtx insn = 0;
2778 +
2779 + /* Emit a series of ldr instructions rather rather than a single ldm. */
2780 + /* TODO: Use ldrd where possible. */
2781 + gcc_assert (! (mask & (1 << SP_REGNUM)));
2782 +
2783 + for (i = LAST_ARM_REGNUM; i >= 0; --i)
2784 + {
2785 + if (mask & (1 << i))
2786 +
2787 + {
2788 + rtx reg, where, mem;
2789 +
2790 + reg = gen_rtx_REG (SImode, i);
2791 + where = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
2792 + mem = gen_rtx_MEM (SImode, where);
2793 + insn = emit_move_insn (mem, reg);
2794 + RTX_FRAME_RELATED_P (insn) = 1;
2795 + }
2796 + }
2797 +
2798 + return insn;
2799 + }
2800 +
2801 /* We don't record the PC in the dwarf frame information. */
2802 num_dwarf_regs = num_regs;
2803 if (mask & (1 << PC_REGNUM))
2804 @@ -12737,22 +14061,23 @@
2805 {
2806 int reg = -1;
2807
2808 - for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
2809 - {
2810 - if ((offsets->saved_regs_mask & (1 << i)) == 0)
2811 - {
2812 - reg = i;
2813 - break;
2814 - }
2815 - }
2816 -
2817 - if (reg == -1 && arm_size_return_regs () <= 12
2818 - && !crtl->tail_call_emit)
2819 + /* If it is safe to use r3, then do so. This sometimes
2820 + generates better code on Thumb-2 by avoiding the need to
2821 + use 32-bit push/pop instructions. */
2822 + if (!crtl->tail_call_emit
2823 + && arm_size_return_regs () <= 12)
2824 {
2825 - /* Push/pop an argument register (r3) if all callee saved
2826 - registers are already being pushed. */
2827 reg = 3;
2828 }
2829 + else
2830 + for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
2831 + {
2832 + if ((offsets->saved_regs_mask & (1 << i)) == 0)
2833 + {
2834 + reg = i;
2835 + break;
2836 + }
2837 + }
2838
2839 if (reg != -1)
2840 {
2841 @@ -12876,7 +14201,7 @@
2842
2843 /* Save any floating point call-saved registers used by this
2844 function. */
2845 - if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
2846 + if (TARGET_FPA_EMU2)
2847 {
2848 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
2849 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
2850 @@ -13483,7 +14808,11 @@
2851 {
2852 fprintf (stream, ", %s ", shift);
2853 if (val == -1)
2854 - arm_print_operand (stream, XEXP (x, 1), 0);
2855 + {
2856 + arm_print_operand (stream, XEXP (x, 1), 0);
2857 + if (janus2_code)
2858 + fprintf(stream, "\n\tnop");
2859 + }
2860 else
2861 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
2862 }
2863 @@ -13704,6 +15033,30 @@
2864 }
2865 return;
2866
2867 + /* Print the high single-precision register of a VFP double-precision
2868 + register. */
2869 + case 'p':
2870 + {
2871 + int mode = GET_MODE (x);
2872 + int regno;
2873 +
2874 + if (GET_MODE_SIZE (mode) != 8 || GET_CODE (x) != REG)
2875 + {
2876 + output_operand_lossage ("invalid operand for code '%c'", code);
2877 + return;
2878 + }
2879 +
2880 + regno = REGNO (x);
2881 + if (!VFP_REGNO_OK_FOR_DOUBLE (regno))
2882 + {
2883 + output_operand_lossage ("invalid operand for code '%c'", code);
2884 + return;
2885 + }
2886 +
2887 + fprintf (stream, "s%d", regno - FIRST_VFP_REGNUM + 1);
2888 + }
2889 + return;
2890 +
2891 /* Print a VFP/Neon double precision or quad precision register name. */
2892 case 'P':
2893 case 'q':
2894 @@ -13821,6 +15174,57 @@
2895 }
2896 return;
2897
2898 + /* Memory operand for vld1/vst1 instruction. */
2899 + case 'A':
2900 + {
2901 + rtx addr;
2902 + bool postinc = FALSE;
2903 + unsigned align;
2904 +
2905 + gcc_assert (GET_CODE (x) == MEM);
2906 + addr = XEXP (x, 0);
2907 + if (GET_CODE (addr) == POST_INC)
2908 + {
2909 + postinc = 1;
2910 + addr = XEXP (addr, 0);
2911 + }
2912 + align = MEM_ALIGN (x) >> 3;
2913 + asm_fprintf (stream, "[%r", REGNO (addr));
2914 + if (align > GET_MODE_SIZE (GET_MODE (x)))
2915 + align = GET_MODE_SIZE (GET_MODE (x));
2916 + if (align >= 8)
2917 + asm_fprintf (stream, ", :%d", align << 3);
2918 + asm_fprintf (stream, "]");
2919 + if (postinc)
2920 + fputs("!", stream);
2921 + }
2922 + return;
2923 +
2924 + /* Register specifier for vld1.16/vst1.16. Translate the S register
2925 + number into a D register number and element index. */
2926 + case 'z':
2927 + {
2928 + int mode = GET_MODE (x);
2929 + int regno;
2930 +
2931 + if (GET_MODE_SIZE (mode) != 2 || GET_CODE (x) != REG)
2932 + {
2933 + output_operand_lossage ("invalid operand for code '%c'", code);
2934 + return;
2935 + }
2936 +
2937 + regno = REGNO (x);
2938 + if (!VFP_REGNO_OK_FOR_SINGLE (regno))
2939 + {
2940 + output_operand_lossage ("invalid operand for code '%c'", code);
2941 + return;
2942 + }
2943 +
2944 + regno = regno - FIRST_VFP_REGNUM;
2945 + fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0));
2946 + }
2947 + return;
2948 +
2949 default:
2950 if (x == 0)
2951 {
2952 @@ -13854,6 +15258,12 @@
2953 default:
2954 gcc_assert (GET_CODE (x) != NEG);
2955 fputc ('#', stream);
2956 + if (GET_CODE (x) == HIGH)
2957 + {
2958 + fputs (":lower16:", stream);
2959 + x = XEXP (x, 0);
2960 + }
2961 +
2962 output_addr_const (stream, x);
2963 break;
2964 }
2965 @@ -14245,6 +15655,10 @@
2966 first insn after the following code_label if REVERSE is true. */
2967 rtx start_insn = insn;
2968
2969 + /* Don't do this if we're not considering conditional execution. */
2970 + if (TARGET_NO_SINGLE_COND_EXEC)
2971 + return;
2972 +
2973 /* If in state 4, check if the target branch is reached, in order to
2974 change back to state 0. */
2975 if (arm_ccfsm_state == 4)
2976 @@ -14618,6 +16032,11 @@
2977 if (mode == DFmode)
2978 return VFP_REGNO_OK_FOR_DOUBLE (regno);
2979
2980 + /* VFP registers can hold HFmode values, but there is no point in
2981 + putting them there unless we have hardware conversion insns. */
2982 + if (mode == HFmode)
2983 + return TARGET_FP16 && VFP_REGNO_OK_FOR_SINGLE (regno);
2984 +
2985 if (TARGET_NEON)
2986 return (VALID_NEON_DREG_MODE (mode) && VFP_REGNO_OK_FOR_DOUBLE (regno))
2987 || (VALID_NEON_QREG_MODE (mode)
2988 @@ -14637,16 +16056,16 @@
2989 return mode == SImode;
2990
2991 if (IS_IWMMXT_REGNUM (regno))
2992 - return VALID_IWMMXT_REG_MODE (mode);
2993 + return VALID_IWMMXT_REG_MODE (mode) && mode != SImode;
2994 }
2995
2996 - /* We allow any value to be stored in the general registers.
2997 + /* We allow almost any value to be stored in the general registers.
2998 Restrict doubleword quantities to even register pairs so that we can
2999 - use ldrd. Do not allow Neon structure opaque modes in general registers;
3000 - they would use too many. */
3001 + use ldrd. Do not allow very large Neon structure opaque modes in
3002 + general registers; they would use too many. */
3003 if (regno <= LAST_ARM_REGNUM)
3004 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0)
3005 - && !VALID_NEON_STRUCT_MODE (mode);
3006 + && ARM_NUM_REGS (mode) <= 4;
3007
3008 if (regno == FRAME_POINTER_REGNUM
3009 || regno == ARG_POINTER_REGNUM)
3010 @@ -16103,6 +17522,15 @@
3011 }
3012
3013 static void
3014 +arm_init_fp16_builtins (void)
3015 +{
3016 + tree fp16_type = make_node (REAL_TYPE);
3017 + TYPE_PRECISION (fp16_type) = 16;
3018 + layout_type (fp16_type);
3019 + (*lang_hooks.types.register_builtin_type) (fp16_type, "__fp16");
3020 +}
3021 +
3022 +static void
3023 arm_init_builtins (void)
3024 {
3025 arm_init_tls_builtins ();
3026 @@ -16112,6 +17540,71 @@
3027
3028 if (TARGET_NEON)
3029 arm_init_neon_builtins ();
3030 +
3031 + if (arm_fp16_format)
3032 + arm_init_fp16_builtins ();
3033 +}
3034 +
3035 +/* Implement TARGET_INVALID_PARAMETER_TYPE. */
3036 +
3037 +static const char *
3038 +arm_invalid_parameter_type (const_tree t)
3039 +{
3040 + if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
3041 + return N_("function parameters cannot have __fp16 type");
3042 + return NULL;
3043 +}
3044 +
3045 +/* Implement TARGET_INVALID_PARAMETER_TYPE. */
3046 +
3047 +static const char *
3048 +arm_invalid_return_type (const_tree t)
3049 +{
3050 + if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
3051 + return N_("functions cannot return __fp16 type");
3052 + return NULL;
3053 +}
3054 +
3055 +/* Implement TARGET_PROMOTED_TYPE. */
3056 +
3057 +static tree
3058 +arm_promoted_type (const_tree t)
3059 +{
3060 + if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
3061 + return float_type_node;
3062 + return NULL_TREE;
3063 +}
3064 +
3065 +/* Implement TARGET_CONVERT_TO_TYPE.
3066 + Specifically, this hook implements the peculiarity of the ARM
3067 + half-precision floating-point C semantics that requires conversions between
3068 + __fp16 to or from double to do an intermediate conversion to float. */
3069 +
3070 +static tree
3071 +arm_convert_to_type (tree type, tree expr)
3072 +{
3073 + tree fromtype = TREE_TYPE (expr);
3074 + if (!SCALAR_FLOAT_TYPE_P (fromtype) || !SCALAR_FLOAT_TYPE_P (type))
3075 + return NULL_TREE;
3076 + if ((TYPE_PRECISION (fromtype) == 16 && TYPE_PRECISION (type) > 32)
3077 + || (TYPE_PRECISION (type) == 16 && TYPE_PRECISION (fromtype) > 32))
3078 + return convert (type, convert (float_type_node, expr));
3079 + return NULL_TREE;
3080 +}
3081 +
3082 +/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.
3083 + This simply adds HFmode as a supported mode; even though we don't
3084 + implement arithmetic on this type directly, it's supported by
3085 + optabs conversions, much the way the double-word arithmetic is
3086 + special-cased in the default hook. */
3087 +
3088 +static bool
3089 +arm_scalar_mode_supported_p (enum machine_mode mode)
3090 +{
3091 + if (mode == HFmode)
3092 + return (arm_fp16_format != ARM_FP16_FORMAT_NONE);
3093 + else
3094 + return default_scalar_mode_supported_p (mode);
3095 }
3096
3097 /* Errors in the source file can cause expand_expr to return const0_rtx
3098 @@ -17191,6 +18684,7 @@
3099 unsigned HOST_WIDE_INT mask = 0xff;
3100 int i;
3101
3102 + val = val & (unsigned HOST_WIDE_INT)0xffffffffu;
3103 if (val == 0) /* XXX */
3104 return 0;
3105
3106 @@ -18279,40 +19773,8 @@
3107 else
3108 {
3109 int set_float_abi_attributes = 0;
3110 - switch (arm_fpu_arch)
3111 - {
3112 - case FPUTYPE_FPA:
3113 - fpu_name = "fpa";
3114 - break;
3115 - case FPUTYPE_FPA_EMU2:
3116 - fpu_name = "fpe2";
3117 - break;
3118 - case FPUTYPE_FPA_EMU3:
3119 - fpu_name = "fpe3";
3120 - break;
3121 - case FPUTYPE_MAVERICK:
3122 - fpu_name = "maverick";
3123 - break;
3124 - case FPUTYPE_VFP:
3125 - fpu_name = "vfp";
3126 - set_float_abi_attributes = 1;
3127 - break;
3128 - case FPUTYPE_VFP3D16:
3129 - fpu_name = "vfpv3-d16";
3130 - set_float_abi_attributes = 1;
3131 - break;
3132 - case FPUTYPE_VFP3:
3133 - fpu_name = "vfpv3";
3134 - set_float_abi_attributes = 1;
3135 - break;
3136 - case FPUTYPE_NEON:
3137 - fpu_name = "neon";
3138 - set_float_abi_attributes = 1;
3139 - break;
3140 - default:
3141 - abort();
3142 - }
3143 - if (set_float_abi_attributes)
3144 + fpu_name = arm_fpu_desc->name;
3145 + if (arm_fp_model == ARM_FP_MODEL_VFP)
3146 {
3147 if (TARGET_HARD_FLOAT)
3148 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
3149 @@ -18362,6 +19824,11 @@
3150 val = 6;
3151 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
3152
3153 + /* Tag_ABI_FP_16bit_format. */
3154 + if (arm_fp16_format)
3155 + asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n",
3156 + (int)arm_fp16_format);
3157 +
3158 if (arm_lang_output_object_attributes_hook)
3159 arm_lang_output_object_attributes_hook();
3160 }
3161 @@ -18591,6 +20058,23 @@
3162 return 1;
3163 }
3164
3165 +/* Emit a fp16 constant appropriately padded to occupy a 4-byte word.
3166 + HFmode constant pool entries are actually loaded with ldr. */
3167 +void
3168 +arm_emit_fp16_const (rtx c)
3169 +{
3170 + REAL_VALUE_TYPE r;
3171 + long bits;
3172 +
3173 + REAL_VALUE_FROM_CONST_DOUBLE (r, c);
3174 + bits = real_to_target (NULL, &r, HFmode);
3175 + if (WORDS_BIG_ENDIAN)
3176 + assemble_zeros (2);
3177 + assemble_integer (GEN_INT (bits), 2, BITS_PER_WORD, 1);
3178 + if (!WORDS_BIG_ENDIAN)
3179 + assemble_zeros (2);
3180 +}
3181 +
3182 const char *
3183 arm_output_load_gr (rtx *operands)
3184 {
3185 @@ -18628,19 +20112,24 @@
3186 that way. */
3187
3188 static void
3189 -arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
3190 +arm_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
3191 enum machine_mode mode,
3192 tree type,
3193 int *pretend_size,
3194 int second_time ATTRIBUTE_UNUSED)
3195 {
3196 - int nregs = cum->nregs;
3197 - if (nregs & 1
3198 - && ARM_DOUBLEWORD_ALIGN
3199 - && arm_needs_doubleword_align (mode, type))
3200 - nregs++;
3201 -
3202 + int nregs;
3203 +
3204 cfun->machine->uses_anonymous_args = 1;
3205 + if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
3206 + {
3207 + nregs = pcum->aapcs_ncrn;
3208 + if ((nregs & 1) && arm_needs_doubleword_align (mode, type))
3209 + nregs++;
3210 + }
3211 + else
3212 + nregs = pcum->nregs;
3213 +
3214 if (nregs < NUM_ARG_REGS)
3215 *pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
3216 }
3217 @@ -19024,9 +20513,10 @@
3218 || mode == V16QImode || mode == V4SFmode || mode == V2DImode))
3219 return true;
3220
3221 - if ((mode == V2SImode)
3222 - || (mode == V4HImode)
3223 - || (mode == V8QImode))
3224 + if ((TARGET_NEON || TARGET_IWMMXT)
3225 + && ((mode == V2SImode)
3226 + || (mode == V4HImode)
3227 + || (mode == V8QImode)))
3228 return true;
3229
3230 return false;
3231 @@ -19057,9 +20547,14 @@
3232 if (IS_FPA_REGNUM (regno))
3233 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
3234
3235 - /* FIXME: VFPv3 register numbering. */
3236 if (IS_VFP_REGNUM (regno))
3237 - return 64 + regno - FIRST_VFP_REGNUM;
3238 + {
3239 + /* See comment in arm_dwarf_register_span. */
3240 + if (VFP_REGNO_OK_FOR_SINGLE (regno))
3241 + return 64 + regno - FIRST_VFP_REGNUM;
3242 + else
3243 + return 256 + (regno - FIRST_VFP_REGNUM) / 2;
3244 + }
3245
3246 if (IS_IWMMXT_GR_REGNUM (regno))
3247 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
3248 @@ -19070,6 +20565,39 @@
3249 gcc_unreachable ();
3250 }
3251
3252 +/* Dwarf models VFPv3 registers as 32 64-bit registers.
3253 + GCC models tham as 64 32-bit registers, so we need to describe this to
3254 + the DWARF generation code. Other registers can use the default. */
3255 +static rtx
3256 +arm_dwarf_register_span(rtx rtl)
3257 +{
3258 + unsigned regno;
3259 + int nregs;
3260 + int i;
3261 + rtx p;
3262 +
3263 + regno = REGNO (rtl);
3264 + if (!IS_VFP_REGNUM (regno))
3265 + return NULL_RTX;
3266 +
3267 + /* The EABI defines two VFP register ranges:
3268 + 64-95: Legacy VFPv2 numbering for S0-S31 (obsolescent)
3269 + 256-287: D0-D31
3270 + The recommended encodings for s0-s31 is a DW_OP_bit_piece of the
3271 + corresponding D register. However gdb6.6 does not support this, so
3272 + we use the legacy encodings. We also use these encodings for D0-D15
3273 + for compatibility with older debuggers. */
3274 + if (VFP_REGNO_OK_FOR_SINGLE (regno))
3275 + return NULL_RTX;
3276 +
3277 + nregs = GET_MODE_SIZE (GET_MODE (rtl)) / 8;
3278 + p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs));
3279 + regno = (regno - FIRST_VFP_REGNUM) / 2;
3280 + for (i = 0; i < nregs; i++)
3281 + XVECEXP (p, 0, i) = gen_rtx_REG (DImode, 256 + regno + i);
3282 +
3283 + return p;
3284 +}
3285
3286 #ifdef TARGET_UNWIND_INFO
3287 /* Emit unwind directives for a store-multiple instruction or stack pointer
3288 @@ -19556,6 +21084,7 @@
3289 case cortexr4f:
3290 case cortexa8:
3291 case cortexa9:
3292 + case marvell_f:
3293 return 2;
3294
3295 default:
3296 @@ -19620,6 +21149,10 @@
3297 return "St9__va_list";
3298 }
3299
3300 + /* Half-precision float. */
3301 + if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) == 16)
3302 + return "Dh";
3303 +
3304 if (TREE_CODE (type) != VECTOR_TYPE)
3305 return NULL;
3306
3307 @@ -19676,6 +21209,87 @@
3308 given on the command line. */
3309 if (level > 0)
3310 flag_section_anchors = 2;
3311 +
3312 + if (size)
3313 + {
3314 + /* Select optimizations that are a win for code size.
3315 +
3316 + The inlining options set below have two important
3317 + consequences for functions not explicitly marked
3318 + inline:
3319 + - Static functions used once are inlined if
3320 + sufficiently small. Static functions used twice
3321 + are not inlined.
3322 + - Non-static functions are never inlined.
3323 + So in effect, inlining will never cause two copies
3324 + of function bodies to be created. */
3325 + /* Empirical results show that these options benefit code
3326 + size on arm. */
3327 + /* FIXME: -fsee seems to be broken for Thumb-2. */
3328 + /* flag_see = 1; */
3329 + flag_move_loop_invariants = 0;
3330 + /* In Thumb mode the function call code size overhead is typically very
3331 + small, and narrow branch instructions have very limited range.
3332 + Inlining even medium sized functions tends to bloat the caller and
3333 + require the use of long branch instructions. On average the long
3334 + branches cost more than eliminating the function call overhead saves,
3335 + so we use extremely restrictive automatic inlining heuristics. In ARM
3336 + mode the results are fairly neutral, probably due to better constant
3337 + pool placement. */
3338 + set_param_value ("max-inline-insns-single", 1);
3339 + set_param_value ("max-inline-insns-auto", 1);
3340 + }
3341 + else
3342 + {
3343 + /* CSL LOCAL */
3344 + /* Set flag_unroll_loops to a default value, so that we can tell
3345 + if it was specified on the command line; see
3346 + arm_override_options. */
3347 + flag_unroll_loops = 2;
3348 + /* Promote loop indices to int where possible. Consider moving this
3349 + to -Os, also. */
3350 + flag_promote_loop_indices = 1;
3351 + }
3352 +}
3353 +
3354 +/* Return how many instructions to look ahead for better insn
3355 + scheduling. */
3356 +static int
3357 +arm_multipass_dfa_lookahead (void)
3358 +{
3359 + return (arm_tune == marvell_f) ? 4 : 0;
3360 +}
3361 +
3362 +/* Return the minimum alignment required to load or store a
3363 + vector of the given type, which may be less than the
3364 + natural alignment of the type. */
3365 +
3366 +static int
3367 +arm_vector_min_alignment (const_tree type)
3368 +{
3369 + if (TARGET_NEON)
3370 + {
3371 + /* The NEON element load and store instructions only require the
3372 + alignment of the element type. They can benefit from higher
3373 + statically reported alignment, but we do not take advantage
3374 + of that yet. */
3375 + gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
3376 + return TYPE_ALIGN_UNIT (TREE_TYPE (type));
3377 + }
3378 +
3379 + return default_vector_min_alignment (type);
3380 +}
3381 +
3382 +static bool
3383 +arm_vector_always_misalign(const_tree type ATTRIBUTE_UNUSED)
3384 +{
3385 + /* On big-endian targets array loads (vld1) and vector loads (vldm)
3386 + use a different format. Always use the "misaligned" array variant.
3387 + FIXME: this still doesn't work for big-endian because of constant
3388 + loads and other operations using vldm ordering. See
3389 + issue 6722. */
3390 + return TARGET_NEON && !BYTES_BIG_ENDIAN;
3391 }
3392
3393 #include "gt-arm.h"
3394 +
3395 --- a/gcc/config/arm/arm-cores.def
3396 +++ b/gcc/config/arm/arm-cores.def
3397 @@ -104,6 +104,7 @@
3398 ARM_CORE("xscale", xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale)
3399 ARM_CORE("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
3400 ARM_CORE("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
3401 +ARM_CORE("marvell-f", marvell_f, 5TE, FL_LDSCHED | FL_VFPV2 | FL_MARVELL_F, 9e)
3402
3403 /* V5TEJ Architecture Processors */
3404 ARM_CORE("arm926ej-s", arm926ejs, 5TEJ, FL_LDSCHED, 9e)
3405 @@ -117,9 +118,13 @@
3406 ARM_CORE("mpcorenovfp", mpcorenovfp, 6K, FL_LDSCHED, 9e)
3407 ARM_CORE("mpcore", mpcore, 6K, FL_LDSCHED | FL_VFPV2, 9e)
3408 ARM_CORE("arm1156t2-s", arm1156t2s, 6T2, FL_LDSCHED, 9e)
3409 +
3410 +/* V7 Architecture Processors */
3411 +ARM_CORE("cortex-a5", cortexa5, 7A, FL_LDSCHED, 9e)
3412 ARM_CORE("cortex-a8", cortexa8, 7A, FL_LDSCHED, 9e)
3413 ARM_CORE("cortex-a9", cortexa9, 7A, FL_LDSCHED, 9e)
3414 ARM_CORE("cortex-r4", cortexr4, 7R, FL_LDSCHED, 9e)
3415 ARM_CORE("cortex-r4f", cortexr4f, 7R, FL_LDSCHED, 9e)
3416 ARM_CORE("cortex-m3", cortexm3, 7M, FL_LDSCHED, 9e)
3417 ARM_CORE("cortex-m1", cortexm1, 6M, FL_LDSCHED, 9e)
3418 +ARM_CORE("cortex-m0", cortexm0, 6M, FL_LDSCHED, 9e)
3419 --- a/gcc/config/arm/arm.h
3420 +++ b/gcc/config/arm/arm.h
3421 @@ -85,6 +85,10 @@
3422 builtin_define ("__IWMMXT__"); \
3423 if (TARGET_AAPCS_BASED) \
3424 builtin_define ("__ARM_EABI__"); \
3425 + if (arm_tune_marvell_f) \
3426 + builtin_define ("__ARM_TUNE_MARVELL_F__"); \
3427 + if (low_irq_latency) \
3428 + builtin_define ("__low_irq_latency__"); \
3429 } while (0)
3430
3431 /* The various ARM cores. */
3432 @@ -199,6 +203,13 @@
3433 #define TARGET_AAPCS_BASED \
3434 (arm_abi != ARM_ABI_APCS && arm_abi != ARM_ABI_ATPCS)
3435
3436 +/* True if we should avoid generating conditional execution instructions. */
3437 +#define TARGET_NO_COND_EXEC (arm_tune_marvell_f && !optimize_size)
3438 +/* Avoid most conditional instructions, but allow pairs with opposite
3439 + conditions and the same destination. */
3440 +#define TARGET_NO_SINGLE_COND_EXEC \
3441 + ((arm_tune_cortex_a9 || arm_tune_marvell_f) && !optimize_size)
3442 +
3443 #define TARGET_HARD_TP (target_thread_pointer == TP_CP15)
3444 #define TARGET_SOFT_TP (target_thread_pointer == TP_SOFT)
3445
3446 @@ -211,35 +222,43 @@
3447 /* Thumb-1 only. */
3448 #define TARGET_THUMB1_ONLY (TARGET_THUMB1 && !arm_arch_notm)
3449
3450 +#define TARGET_FPA_EMU2 (TARGET_FPA && arm_fpu_desc->rev == 2)
3451 /* The following two macros concern the ability to execute coprocessor
3452 instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently
3453 only ever tested when we know we are generating for VFP hardware; we need
3454 to be more careful with TARGET_NEON as noted below. */
3455
3456 /* FPU is has the full VFPv3/NEON register file of 32 D registers. */
3457 -#define TARGET_VFPD32 (arm_fp_model == ARM_FP_MODEL_VFP \
3458 - && (arm_fpu_arch == FPUTYPE_VFP3 \
3459 - || arm_fpu_arch == FPUTYPE_NEON))
3460 +#define TARGET_VFPD32 (TARGET_VFP && arm_arch_vfp_regs == VFP_REG_D32)
3461
3462 /* FPU supports VFPv3 instructions. */
3463 -#define TARGET_VFP3 (arm_fp_model == ARM_FP_MODEL_VFP \
3464 - && (arm_fpu_arch == FPUTYPE_VFP3D16 \
3465 - || TARGET_VFPD32))
3466 +#define TARGET_VFP3 (TARGET_VFP && arm_arch_vfp_rev >= 3)
3467 +
3468 +/* FPU only supports VFP single-precision instructions. */
3469 +#define TARGET_VFP_SINGLE (TARGET_VFP && arm_arch_vfp_regs == VFP_REG_SINGLE)
3470 +
3471 +/* FPU supports VFP double-precision instructions. */
3472 +#define TARGET_VFP_DOUBLE (TARGET_VFP && arm_arch_vfp_regs != VFP_REG_SINGLE)
3473 +
3474 +/* FPU supports half-precision floating-point with NEON element load/store. */
3475 +#define TARGET_NEON_FP16 (TARGET_VFP && arm_arch_vfp_neon && arm_arch_vfp_fp16)
3476 +
3477 +/* FPU supports VFP half-precision floating-point. */
3478 +#define TARGET_FP16 (TARGET_VFP && arm_arch_vfp_fp16)
3479
3480 /* FPU supports Neon instructions. The setting of this macro gets
3481 revealed via __ARM_NEON__ so we add extra guards upon TARGET_32BIT
3482 and TARGET_HARD_FLOAT to ensure that NEON instructions are
3483 available. */
3484 #define TARGET_NEON (TARGET_32BIT && TARGET_HARD_FLOAT \
3485 - && arm_fp_model == ARM_FP_MODEL_VFP \
3486 - && arm_fpu_arch == FPUTYPE_NEON)
3487 + && TARGET_VFP && arm_arch_vfp_neon)
3488
3489 /* "DSP" multiply instructions, eg. SMULxy. */
3490 #define TARGET_DSP_MULTIPLY \
3491 - (TARGET_32BIT && arm_arch5e && arm_arch_notm)
3492 + (TARGET_32BIT && arm_arch5e && (arm_arch_notm || arm_arch7em))
3493 /* Integer SIMD instructions, and extend-accumulate instructions. */
3494 #define TARGET_INT_SIMD \
3495 - (TARGET_32BIT && arm_arch6 && arm_arch_notm)
3496 + (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7em))
3497
3498 /* Should MOVW/MOVT be used in preference to a constant pool. */
3499 #define TARGET_USE_MOVT (arm_arch_thumb2 && !optimize_size)
3500 @@ -289,40 +308,30 @@
3501 ARM_FP_MODEL_VFP
3502 };
3503
3504 -extern enum arm_fp_model arm_fp_model;
3505 -
3506 -/* Which floating point hardware is available. Also update
3507 - fp_model_for_fpu in arm.c when adding entries to this list. */
3508 -enum fputype
3509 -{
3510 - /* No FP hardware. */
3511 - FPUTYPE_NONE,
3512 - /* Full FPA support. */
3513 - FPUTYPE_FPA,
3514 - /* Emulated FPA hardware, Issue 2 emulator (no LFM/SFM). */
3515 - FPUTYPE_FPA_EMU2,
3516 - /* Emulated FPA hardware, Issue 3 emulator. */
3517 - FPUTYPE_FPA_EMU3,
3518 - /* Cirrus Maverick floating point co-processor. */
3519 - FPUTYPE_MAVERICK,
3520 - /* VFP. */
3521 - FPUTYPE_VFP,
3522 - /* VFPv3-D16. */
3523 - FPUTYPE_VFP3D16,
3524 - /* VFPv3. */
3525 - FPUTYPE_VFP3,
3526 - /* Neon. */
3527 - FPUTYPE_NEON
3528 +enum vfp_reg_type {
3529 + VFP_REG_D16,
3530 + VFP_REG_D32,
3531 + VFP_REG_SINGLE
3532 };
3533
3534 -/* Recast the floating point class to be the floating point attribute. */
3535 -#define arm_fpu_attr ((enum attr_fpu) arm_fpu_tune)
3536 -
3537 -/* What type of floating point to tune for */
3538 -extern enum fputype arm_fpu_tune;
3539 +extern const struct arm_fpu_desc
3540 +{
3541 + const char *name;
3542 + enum arm_fp_model model;
3543 + int rev;
3544 + enum vfp_reg_type myregs;
3545 + int neon;
3546 + int fp16;
3547 +} *arm_fpu_desc;
3548 +
3549 +#define arm_fp_model arm_fpu_desc->model
3550 +#define arm_arch_vfp_rev arm_fpu_desc->rev
3551 +#define arm_arch_vfp_regs arm_fpu_desc->myregs
3552 +#define arm_arch_vfp_neon arm_fpu_desc->neon
3553 +#define arm_arch_vfp_fp16 arm_fpu_desc->fp16
3554
3555 -/* What type of floating point instructions are available */
3556 -extern enum fputype arm_fpu_arch;
3557 +/* Which floating point hardware to schedule for. */
3558 +extern int arm_fpu_attr;
3559
3560 enum float_abi_type
3561 {
3562 @@ -337,6 +346,21 @@
3563 #define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
3564 #endif
3565
3566 +/* Which __fp16 format to use.
3567 + The enumeration values correspond to the numbering for the
3568 + Tag_ABI_FP_16bit_format attribute.
3569 + */
3570 +enum arm_fp16_format_type
3571 +{
3572 + ARM_FP16_FORMAT_NONE = 0,
3573 + ARM_FP16_FORMAT_IEEE = 1,
3574 + ARM_FP16_FORMAT_ALTERNATIVE = 2
3575 +};
3576 +
3577 +extern enum arm_fp16_format_type arm_fp16_format;
3578 +#define LARGEST_EXPONENT_IS_NORMAL(bits) \
3579 + ((bits) == 16 && arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
3580 +
3581 /* Which ABI to use. */
3582 enum arm_abi_type
3583 {
3584 @@ -383,12 +407,18 @@
3585 /* Nonzero if instructions not present in the 'M' profile can be used. */
3586 extern int arm_arch_notm;
3587
3588 +/* Nonzero if instructions present in ARMv7E-M can be used. */
3589 +extern int arm_arch7em;
3590 +
3591 /* Nonzero if this chip can benefit from load scheduling. */
3592 extern int arm_ld_sched;
3593
3594 /* Nonzero if generating thumb code. */
3595 extern int thumb_code;
3596
3597 +/* Nonzero if generating Janus2 code. */
3598 +extern int janus2_code;
3599 +
3600 /* Nonzero if this chip is a StrongARM. */
3601 extern int arm_tune_strongarm;
3602
3603 @@ -404,6 +434,9 @@
3604 /* Nonzero if tuning for XScale. */
3605 extern int arm_tune_xscale;
3606
3607 +/* Nonzero if tuning for Marvell Feroceon. */
3608 +extern int arm_tune_marvell_f;
3609 +
3610 /* Nonzero if tuning for stores via the write buffer. */
3611 extern int arm_tune_wbuf;
3612
3613 @@ -423,6 +456,10 @@
3614 /* Nonzero if chip supports integer division instruction. */
3615 extern int arm_arch_hwdiv;
3616
3617 +/* Nonzero if we should minimize interrupt latency of the
3618 + generated code. */
3619 +extern int low_irq_latency;
3620 +
3621 #ifndef TARGET_DEFAULT
3622 #define TARGET_DEFAULT (MASK_APCS_FRAME)
3623 #endif
3624 @@ -757,12 +794,11 @@
3625 fixed_regs[regno] = call_used_regs[regno] = 1; \
3626 } \
3627 \
3628 - if (TARGET_THUMB && optimize_size) \
3629 - { \
3630 - /* When optimizing for size, it's better not to use \
3631 - the HI regs, because of the overhead of stacking \
3632 - them. */ \
3633 - /* ??? Is this still true for thumb2? */ \
3634 + if (TARGET_THUMB1 && optimize_size) \
3635 + { \
3636 + /* When optimizing for size on Thumb-1, it's better not \
3637 + to use the HI regs, because of the overhead of \
3638 + stacking them. */ \
3639 for (regno = FIRST_HI_REGNUM; \
3640 regno <= LAST_HI_REGNUM; ++regno) \
3641 fixed_regs[regno] = call_used_regs[regno] = 1; \
3642 @@ -881,6 +917,9 @@
3643 /* The number of (integer) argument register available. */
3644 #define NUM_ARG_REGS 4
3645
3646 +/* And similarly for the VFP. */
3647 +#define NUM_VFP_ARG_REGS 16
3648 +
3649 /* Return the register number of the N'th (integer) argument. */
3650 #define ARG_REGISTER(N) (N - 1)
3651
3652 @@ -1059,7 +1098,7 @@
3653 (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
3654
3655 #define VALID_IWMMXT_REG_MODE(MODE) \
3656 - (arm_vector_mode_supported_p (MODE) || (MODE) == DImode)
3657 + (arm_vector_mode_supported_p (MODE) || (MODE) == DImode || (MODE) == SImode)
3658
3659 /* Modes valid for Neon D registers. */
3660 #define VALID_NEON_DREG_MODE(MODE) \
3661 @@ -1230,11 +1269,14 @@
3662 || reg_classes_intersect_p (VFP_REGS, (CLASS)) \
3663 : 0)
3664
3665 -/* We need to define this for LO_REGS on thumb. Otherwise we can end up
3666 - using r0-r4 for function arguments, r7 for the stack frame and don't
3667 - have enough left over to do doubleword arithmetic. */
3668 +/* We need to define this for LO_REGS on Thumb-1. Otherwise we can end up
3669 + using r0-r4 for function arguments, r7 for the stack frame and don't have
3670 + enough left over to do doubleword arithmetic. For Thumb-2 all the
3671 + potentially problematic instructions accept high registers so this is not
3672 + necessary. Care needs to be taken to avoid adding new Thumb-2 patterns
3673 + that require many low registers. */
3674 #define CLASS_LIKELY_SPILLED_P(CLASS) \
3675 - ((TARGET_THUMB && (CLASS) == LO_REGS) \
3676 + ((TARGET_THUMB1 && (CLASS) == LO_REGS) \
3677 || (CLASS) == CC_REG)
3678
3679 /* The class value for index registers, and the one for base regs. */
3680 @@ -1245,7 +1287,7 @@
3681 when addressing quantities in QI or HI mode; if we don't know the
3682 mode, then we must be conservative. */
3683 #define MODE_BASE_REG_CLASS(MODE) \
3684 - (TARGET_32BIT ? CORE_REGS : \
3685 + (TARGET_32BIT ? (TARGET_THUMB2 ? LO_REGS : CORE_REGS) : \
3686 (((MODE) == SImode) ? BASE_REGS : LO_REGS))
3687
3688 /* For Thumb we can not support SP+reg addressing, so we return LO_REGS
3689 @@ -1346,6 +1388,9 @@
3690 else if (TARGET_MAVERICK && TARGET_HARD_FLOAT) \
3691 /* Need to be careful, -256 is not a valid offset. */ \
3692 low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
3693 + else if (TARGET_REALLY_IWMMXT && MODE == SImode) \
3694 + /* Need to be careful, -1024 is not a valid offset. */ \
3695 + low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
3696 else if (MODE == SImode \
3697 || (MODE == SFmode && TARGET_SOFT_FLOAT) \
3698 || ((MODE == HImode || MODE == QImode) && ! arm_arch4)) \
3699 @@ -1416,13 +1461,17 @@
3700 /* If defined, gives a class of registers that cannot be used as the
3701 operand of a SUBREG that changes the mode of the object illegally. */
3702
3703 -/* Moves between FPA_REGS and GENERAL_REGS are two memory insns. */
3704 +/* Moves between FPA_REGS and GENERAL_REGS are two memory insns.
3705 + Moves between VFP_REGS and GENERAL_REGS are a single insn, but
3706 + it is typically more expensive than a single memory access. We set
3707 + the cost to less than two memory accesses so that floating
3708 + point to integer conversion does not go through memory. */
3709 #define REGISTER_MOVE_COST(MODE, FROM, TO) \
3710 (TARGET_32BIT ? \
3711 ((FROM) == FPA_REGS && (TO) != FPA_REGS ? 20 : \
3712 (FROM) != FPA_REGS && (TO) == FPA_REGS ? 20 : \
3713 - IS_VFP_CLASS (FROM) && !IS_VFP_CLASS (TO) ? 10 : \
3714 - !IS_VFP_CLASS (FROM) && IS_VFP_CLASS (TO) ? 10 : \
3715 + IS_VFP_CLASS (FROM) && !IS_VFP_CLASS (TO) ? 15 : \
3716 + !IS_VFP_CLASS (FROM) && IS_VFP_CLASS (TO) ? 15 : \
3717 (FROM) == IWMMXT_REGS && (TO) != IWMMXT_REGS ? 4 : \
3718 (FROM) != IWMMXT_REGS && (TO) == IWMMXT_REGS ? 4 : \
3719 (FROM) == IWMMXT_GR_REGS || (TO) == IWMMXT_GR_REGS ? 20 : \
3720 @@ -1491,9 +1540,10 @@
3721
3722 /* Define how to find the value returned by a library function
3723 assuming the value has mode MODE. */
3724 -#define LIBCALL_VALUE(MODE) \
3725 - (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \
3726 - && GET_MODE_CLASS (MODE) == MODE_FLOAT \
3727 +#define LIBCALL_VALUE(MODE) \
3728 + (TARGET_AAPCS_BASED ? aapcs_libcall_value (MODE) \
3729 + : (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \
3730 + && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
3731 ? gen_rtx_REG (MODE, FIRST_FPA_REGNUM) \
3732 : TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK \
3733 && GET_MODE_CLASS (MODE) == MODE_FLOAT \
3734 @@ -1502,22 +1552,16 @@
3735 ? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \
3736 : gen_rtx_REG (MODE, ARG_REGISTER (1)))
3737
3738 -/* Define how to find the value returned by a function.
3739 - VALTYPE is the data type of the value (as a tree).
3740 - If the precise function being called is known, FUNC is its FUNCTION_DECL;
3741 - otherwise, FUNC is 0. */
3742 -#define FUNCTION_VALUE(VALTYPE, FUNC) \
3743 - arm_function_value (VALTYPE, FUNC);
3744 -
3745 -/* 1 if N is a possible register number for a function value.
3746 - On the ARM, only r0 and f0 can return results. */
3747 -/* On a Cirrus chip, mvf0 can return results. */
3748 -#define FUNCTION_VALUE_REGNO_P(REGNO) \
3749 - ((REGNO) == ARG_REGISTER (1) \
3750 - || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \
3751 - && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \
3752 - || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \
3753 - || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \
3754 +/* 1 if REGNO is a possible register number for a function value. */
3755 +#define FUNCTION_VALUE_REGNO_P(REGNO) \
3756 + ((REGNO) == ARG_REGISTER (1) \
3757 + || (TARGET_AAPCS_BASED && TARGET_32BIT \
3758 + && TARGET_VFP && TARGET_HARD_FLOAT \
3759 + && (REGNO) == FIRST_VFP_REGNUM) \
3760 + || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \
3761 + && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \
3762 + || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \
3763 + || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \
3764 && TARGET_HARD_FLOAT_ABI && TARGET_FPA))
3765
3766 /* Amount of memory needed for an untyped call to save all possible return
3767 @@ -1617,9 +1661,27 @@
3768 that is in text_section. */
3769 extern GTY(()) rtx thumb_call_via_label[14];
3770
3771 +/* The number of potential ways of assigning to a co-processor. */
3772 +#define ARM_NUM_COPROC_SLOTS 1
3773 +
3774 +/* Enumeration of procedure calling standard variants. We don't really
3775 + support all of these yet. */
3776 +enum arm_pcs
3777 +{
3778 + ARM_PCS_AAPCS, /* Base standard AAPCS. */
3779 + ARM_PCS_AAPCS_VFP, /* Use VFP registers for floating point values. */
3780 + ARM_PCS_AAPCS_IWMMXT, /* Use iWMMXT registers for vectors. */
3781 + /* This must be the last AAPCS variant. */
3782 + ARM_PCS_AAPCS_LOCAL, /* Private call within this compilation unit. */
3783 + ARM_PCS_ATPCS, /* ATPCS. */
3784 + ARM_PCS_APCS, /* APCS (legacy Linux etc). */
3785 + ARM_PCS_UNKNOWN
3786 +};
3787 +
3788 +/* We can't define this inside a generator file because it needs enum
3789 + machine_mode. */
3790 /* A C type for declaring a variable that is used as the first argument of
3791 - `FUNCTION_ARG' and other related values. For some target machines, the
3792 - type `int' suffices and can hold the number of bytes of argument so far. */
3793 + `FUNCTION_ARG' and other related values. */
3794 typedef struct
3795 {
3796 /* This is the number of registers of arguments scanned so far. */
3797 @@ -1628,9 +1690,33 @@
3798 int iwmmxt_nregs;
3799 int named_count;
3800 int nargs;
3801 - int can_split;
3802 + /* Which procedure call variant to use for this call. */
3803 + enum arm_pcs pcs_variant;
3804 +
3805 + /* AAPCS related state tracking. */
3806 + int aapcs_arg_processed; /* No need to lay out this argument again. */
3807 + int aapcs_cprc_slot; /* Index of co-processor rules to handle
3808 + this argument, or -1 if using core
3809 + registers. */
3810 + int aapcs_ncrn;
3811 + int aapcs_next_ncrn;
3812 + rtx aapcs_reg; /* Register assigned to this argument. */
3813 + int aapcs_partial; /* How many bytes are passed in regs (if
3814 + split between core regs and stack.
3815 + Zero otherwise. */
3816 + int aapcs_cprc_failed[ARM_NUM_COPROC_SLOTS];
3817 + int can_split; /* Argument can be split between core regs
3818 + and the stack. */
3819 + /* Private data for tracking VFP register allocation */
3820 + unsigned aapcs_vfp_regs_free;
3821 + unsigned aapcs_vfp_reg_alloc;
3822 + int aapcs_vfp_rcount;
3823 + /* Can't include insn-modes.h because this header is needed before we
3824 + generate it. */
3825 + int /* enum machine_mode */ aapcs_vfp_rmode;
3826 } CUMULATIVE_ARGS;
3827
3828 +
3829 /* Define where to put the arguments to a function.
3830 Value is zero to push the argument on the stack,
3831 or a hard register in which to store the argument.
3832 @@ -1674,13 +1760,7 @@
3833 of mode MODE and data type TYPE.
3834 (TYPE is null for libcalls where that information may not be available.) */
3835 #define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
3836 - (CUM).nargs += 1; \
3837 - if (arm_vector_mode_supported_p (MODE) \
3838 - && (CUM).named_count > (CUM).nargs \
3839 - && TARGET_IWMMXT_ABI) \
3840 - (CUM).iwmmxt_nregs += 1; \
3841 - else \
3842 - (CUM).nregs += ARM_NUM_REGS2 (MODE, TYPE)
3843 + arm_function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED))
3844
3845 /* If defined, a C expression that gives the alignment boundary, in bits, of an
3846 argument with the specified mode and type. If it is not defined,
3847 @@ -1692,9 +1772,11 @@
3848
3849 /* 1 if N is a possible register number for function argument passing.
3850 On the ARM, r0-r3 are used to pass args. */
3851 -#define FUNCTION_ARG_REGNO_P(REGNO) \
3852 - (IN_RANGE ((REGNO), 0, 3) \
3853 - || (TARGET_IWMMXT_ABI \
3854 +#define FUNCTION_ARG_REGNO_P(REGNO) \
3855 + (IN_RANGE ((REGNO), 0, 3) \
3856 + || (TARGET_AAPCS_BASED && TARGET_VFP && TARGET_HARD_FLOAT \
3857 + && IN_RANGE ((REGNO), FIRST_VFP_REGNUM, FIRST_VFP_REGNUM + 15)) \
3858 + || (TARGET_IWMMXT_ABI \
3859 && IN_RANGE ((REGNO), FIRST_IWMMXT_REGNUM, FIRST_IWMMXT_REGNUM + 9)))
3860
3861 \f
3862 @@ -2324,7 +2406,8 @@
3863 /* Try to generate sequences that don't involve branches, we can then use
3864 conditional instructions */
3865 #define BRANCH_COST(speed_p, predictable_p) \
3866 - (TARGET_32BIT ? 4 : (optimize > 0 ? 2 : 0))
3867 + (TARGET_32BIT ? (TARGET_THUMB2 && optimize_size ? 1 : 4) \
3868 + : (optimize > 0 ? 2 : 0))
3869 \f
3870 /* Position Independent Code. */
3871 /* We decide which register to use based on the compilation options and
3872 @@ -2392,6 +2475,7 @@
3873
3874 /* The arm5 clz instruction returns 32. */
3875 #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
3876 +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
3877 \f
3878 #undef ASM_APP_OFF
3879 #define ASM_APP_OFF (TARGET_THUMB1 ? "\t.code\t16\n" : \
3880 @@ -2404,6 +2488,19 @@
3881 if (TARGET_ARM) \
3882 asm_fprintf (STREAM,"\tstmfd\t%r!,{%r}\n", \
3883 STACK_POINTER_REGNUM, REGNO); \
3884 + else if (TARGET_THUMB1 \
3885 + && (REGNO) == STATIC_CHAIN_REGNUM) \
3886 + { \
3887 + /* We can't push STATIC_CHAIN_REGNUM (r12) directly with Thumb-1.
3888 + We know that ASM_OUTPUT_REG_PUSH will be matched with
3889 + ASM_OUTPUT_REG_POP, and that r7 isn't used by the function
3890 + profiler, so we can use it as a scratch reg. WARNING: This isn't
3891 + safe in the general case! It may be sensitive to future changes
3892 + in final.c:profile_function. */ \
3893 + asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
3894 + asm_fprintf (STREAM, "\tmov\tr7, %r\n", REGNO);\
3895 + asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
3896 + } \
3897 else \
3898 asm_fprintf (STREAM, "\tpush {%r}\n", REGNO); \
3899 } while (0)
3900 @@ -2415,6 +2512,14 @@
3901 if (TARGET_ARM) \
3902 asm_fprintf (STREAM, "\tldmfd\t%r!,{%r}\n", \
3903 STACK_POINTER_REGNUM, REGNO); \
3904 + else if (TARGET_THUMB1 \
3905 + && (REGNO) == STATIC_CHAIN_REGNUM) \
3906 + { \
3907 + /* See comment in ASM_OUTPUT_REG_PUSH. */ \
3908 + asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
3909 + asm_fprintf (STREAM, "\tmov\t%r, r7\n", REGNO);\
3910 + asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
3911 + } \
3912 else \
3913 asm_fprintf (STREAM, "\tpop {%r}\n", REGNO); \
3914 } while (0)
3915 --- a/gcc/config/arm/arm.md
3916 +++ b/gcc/config/arm/arm.md
3917 @@ -99,6 +99,7 @@
3918 ; correctly for PIC usage.
3919 (UNSPEC_GOTSYM_OFF 24) ; The offset of the start of the the GOT from a
3920 ; a given symbolic address.
3921 + (UNSPEC_RBIT 25) ; rbit operation.
3922 ]
3923 )
3924
3925 @@ -131,6 +132,8 @@
3926 (VUNSPEC_WCMP_EQ 12) ; Used by the iWMMXt WCMPEQ instructions
3927 (VUNSPEC_WCMP_GTU 13) ; Used by the iWMMXt WCMPGTU instructions
3928 (VUNSPEC_WCMP_GT 14) ; Used by the iwMMXT WCMPGT instructions
3929 + (VUNSPEC_ALIGN16 15) ; Used to force 16-byte alignment.
3930 + (VUNSPEC_ALIGN32 16) ; Used to force 32-byte alignment.
3931 (VUNSPEC_EH_RETURN 20); Use to override the return address for exception
3932 ; handling.
3933 ]
3934 @@ -144,6 +147,10 @@
3935 ; patterns that share the same RTL in both ARM and Thumb code.
3936 (define_attr "is_thumb" "no,yes" (const (symbol_ref "thumb_code")))
3937
3938 +; FIX_JANUS is set to 'yes' when compiling for Janus2, it causes to
3939 +; add a nop after shifts, in order to work around a Janus2 bug
3940 +(define_attr "fix_janus" "no,yes" (const (symbol_ref "janus2_code")))
3941 +
3942 ; IS_STRONGARM is set to 'yes' when compiling for StrongARM, it affects
3943 ; scheduling decisions for the load unit and the multiplier.
3944 (define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_tune_strongarm")))
3945 @@ -158,7 +165,7 @@
3946 ; Floating Point Unit. If we only have floating point emulation, then there
3947 ; is no point in scheduling the floating point insns. (Well, for best
3948 ; performance we should try and group them together).
3949 -(define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp,vfpv3d16,vfpv3,neon"
3950 +(define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp"
3951 (const (symbol_ref "arm_fpu_attr")))
3952
3953 ; LENGTH of an instruction (in bytes)
3954 @@ -185,7 +192,7 @@
3955 ;; scheduling information.
3956
3957 (define_attr "insn"
3958 - "mov,mvn,smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,umaal,smlald,smlsld,clz,mrs,msr,xtab,sdiv,udiv,other"
3959 + "mov,mvn,and,orr,eor,smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,umaal,smlald,smlsld,clz,mrs,msr,xtab,sdiv,udiv,other"
3960 (const_string "other"))
3961
3962 ; TYPE attribute is used to detect floating point instructions which, if
3963 @@ -251,8 +258,6 @@
3964 (define_attr "ldsched" "no,yes" (const (symbol_ref "arm_ld_sched")))
3965
3966 ;; Classification of NEON instructions for scheduling purposes.
3967 -;; Do not set this attribute and the "type" attribute together in
3968 -;; any one instruction pattern.
3969 (define_attr "neon_type"
3970 "neon_int_1,\
3971 neon_int_2,\
3972 @@ -415,7 +420,7 @@
3973
3974 (define_attr "generic_sched" "yes,no"
3975 (const (if_then_else
3976 - (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa8,cortexa9")
3977 + (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa8,cortexa9,marvell_f")
3978 (eq_attr "tune_cortexr4" "yes"))
3979 (const_string "no")
3980 (const_string "yes"))))
3981 @@ -423,7 +428,7 @@
3982 (define_attr "generic_vfp" "yes,no"
3983 (const (if_then_else
3984 (and (eq_attr "fpu" "vfp")
3985 - (eq_attr "tune" "!arm1020e,arm1022e,cortexa8,cortexa9")
3986 + (eq_attr "tune" "!arm1020e,arm1022e,cortexa8,cortexa9,marvell_f")
3987 (eq_attr "tune_cortexr4" "no"))
3988 (const_string "yes")
3989 (const_string "no"))))
3990 @@ -437,6 +442,8 @@
3991 (include "cortex-a9.md")
3992 (include "cortex-r4.md")
3993 (include "cortex-r4f.md")
3994 +(include "marvell-f.md")
3995 +(include "marvell-f-vfp.md")
3996 (include "vfp11.md")
3997
3998 \f
3999 @@ -472,9 +479,9 @@
4000 if (TARGET_THUMB1)
4001 {
4002 if (GET_CODE (operands[1]) != REG)
4003 - operands[1] = force_reg (SImode, operands[1]);
4004 + operands[1] = force_reg (DImode, operands[1]);
4005 if (GET_CODE (operands[2]) != REG)
4006 - operands[2] = force_reg (SImode, operands[2]);
4007 + operands[2] = force_reg (DImode, operands[2]);
4008 }
4009 "
4010 )
4011 @@ -620,10 +627,11 @@
4012 sub%?\\t%0, %1, #%n2
4013 sub%?\\t%0, %1, #%n2
4014 #"
4015 - "TARGET_32BIT &&
4016 - GET_CODE (operands[2]) == CONST_INT
4017 + "TARGET_32BIT
4018 + && GET_CODE (operands[2]) == CONST_INT
4019 && !(const_ok_for_arm (INTVAL (operands[2]))
4020 - || const_ok_for_arm (-INTVAL (operands[2])))"
4021 + || const_ok_for_arm (-INTVAL (operands[2])))
4022 + && (reload_completed || !arm_eliminable_register (operands[1]))"
4023 [(clobber (const_int 0))]
4024 "
4025 arm_split_constant (PLUS, SImode, curr_insn,
4026 @@ -639,10 +647,10 @@
4027 ;; register. Trying to reload it will always fail catastrophically,
4028 ;; so never allow those alternatives to match if reloading is needed.
4029
4030 -(define_insn "*thumb1_addsi3"
4031 - [(set (match_operand:SI 0 "register_operand" "=l,l,l,*rk,*hk,l,!k")
4032 - (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k")
4033 - (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*hk,*rk,!M,!O")))]
4034 +(define_insn_and_split "*thumb1_addsi3"
4035 + [(set (match_operand:SI 0 "register_operand" "=l,l,l,*rk,*hk,l,!k,l,l")
4036 + (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k,0,l")
4037 + (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*hk,*rk,!M,!O,Pa,Pb")))]
4038 "TARGET_THUMB1"
4039 "*
4040 static const char * const asms[] =
4041 @@ -653,7 +661,9 @@
4042 \"add\\t%0, %0, %2\",
4043 \"add\\t%0, %0, %2\",
4044 \"add\\t%0, %1, %2\",
4045 - \"add\\t%0, %1, %2\"
4046 + \"add\\t%0, %1, %2\",
4047 + \"#\",
4048 + \"#\"
4049 };
4050 if ((which_alternative == 2 || which_alternative == 6)
4051 && GET_CODE (operands[2]) == CONST_INT
4052 @@ -661,7 +671,22 @@
4053 return \"sub\\t%0, %1, #%n2\";
4054 return asms[which_alternative];
4055 "
4056 - [(set_attr "length" "2")]
4057 + "&& reload_completed && CONST_INT_P (operands[2])
4058 + && operands[1] != stack_pointer_rtx
4059 + && (INTVAL (operands[2]) > 255 || INTVAL (operands[2]) < -255)"
4060 + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
4061 + (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
4062 + {
4063 + HOST_WIDE_INT offset = INTVAL (operands[2]);
4064 + if (offset > 255)
4065 + offset = 255;
4066 + else if (offset < -255)
4067 + offset = -255;
4068 +
4069 + operands[3] = GEN_INT (offset);
4070 + operands[2] = GEN_INT (INTVAL (operands[2]) - offset);
4071 + }
4072 + [(set_attr "length" "2,2,2,2,2,2,2,4,4")]
4073 )
4074
4075 ;; Reloading and elimination of the frame pointer can
4076 @@ -854,7 +879,11 @@
4077 [(set_attr "conds" "use")
4078 (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
4079 (const_string "alu_shift")
4080 - (const_string "alu_shift_reg")))]
4081 + (const_string "alu_shift_reg")))
4082 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4083 + (eq_attr "fix_janus" "yes"))
4084 + (const_int 8)
4085 + (const_int 4)))]
4086 )
4087
4088 (define_insn "*addsi3_carryin_alt1"
4089 @@ -938,7 +967,7 @@
4090 [(set (match_operand:DF 0 "s_register_operand" "")
4091 (plus:DF (match_operand:DF 1 "s_register_operand" "")
4092 (match_operand:DF 2 "arm_float_add_operand" "")))]
4093 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4094 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4095 "
4096 if (TARGET_MAVERICK
4097 && !cirrus_fp_register (operands[2], DFmode))
4098 @@ -1176,7 +1205,7 @@
4099 [(set (match_operand:DF 0 "s_register_operand" "")
4100 (minus:DF (match_operand:DF 1 "arm_float_rhs_operand" "")
4101 (match_operand:DF 2 "arm_float_rhs_operand" "")))]
4102 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4103 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4104 "
4105 if (TARGET_MAVERICK)
4106 {
4107 @@ -1332,6 +1361,49 @@
4108 (set_attr "predicable" "yes")]
4109 )
4110
4111 +; The combiner cannot combine the first and last insns in the
4112 +; following sequence because of the intervening insn, so help the
4113 +; combiner with this splitter. The combiner does attempt to split
4114 +; this particular combination but does not know this exact split.
4115 +; Note that the combiner puts the constant at the outermost operation
4116 +; as a part of canonicalization.
4117 +;
4118 +; mul r3, r2, r1
4119 +; <add/sub> r3, r3, <constant>
4120 +; add r3, r3, r4
4121 +
4122 +(define_split
4123 + [(set (match_operand:SI 0 "s_register_operand" "")
4124 + (match_operator:SI 1 "plusminus_operator"
4125 + [(plus:SI (mult:SI (match_operand:SI 2 "s_register_operand" "")
4126 + (match_operand:SI 3 "s_register_operand" ""))
4127 + (match_operand:SI 4 "s_register_operand" ""))
4128 + (match_operand:SI 5 "arm_immediate_operand" "")]))]
4129 + "TARGET_32BIT"
4130 + [(set (match_dup 0)
4131 + (plus:SI (mult:SI (match_dup 2) (match_dup 3))
4132 + (match_dup 4)))
4133 + (set (match_dup 0)
4134 + (match_op_dup:SI 1 [(match_dup 0) (match_dup 5)]))]
4135 + "")
4136 +
4137 +; Likewise for MLS. MLS is available only on select architectures.
4138 +
4139 +(define_split
4140 + [(set (match_operand:SI 0 "s_register_operand" "")
4141 + (match_operator:SI 1 "plusminus_operator"
4142 + [(minus:SI (match_operand:SI 2 "s_register_operand" "")
4143 + (mult:SI (match_operand:SI 3 "s_register_operand" "")
4144 + (match_operand:SI 4 "s_register_operand" "")))
4145 + (match_operand:SI 5 "arm_immediate_operand" "")]))]
4146 + "TARGET_32BIT && arm_arch_thumb2"
4147 + [(set (match_dup 0)
4148 + (minus:SI (match_dup 2)
4149 + (mult:SI (match_dup 3) (match_dup 4))))
4150 + (set (match_dup 0)
4151 + (match_op_dup:SI 1 [(match_dup 0) (match_dup 5)]))]
4152 + "")
4153 +
4154 (define_insn "*mulsi3addsi_compare0"
4155 [(set (reg:CC_NOOV CC_REGNUM)
4156 (compare:CC_NOOV
4157 @@ -1713,7 +1785,7 @@
4158 [(set (match_operand:DF 0 "s_register_operand" "")
4159 (mult:DF (match_operand:DF 1 "s_register_operand" "")
4160 (match_operand:DF 2 "arm_float_rhs_operand" "")))]
4161 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4162 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4163 "
4164 if (TARGET_MAVERICK
4165 && !cirrus_fp_register (operands[2], DFmode))
4166 @@ -1733,7 +1805,7 @@
4167 [(set (match_operand:DF 0 "s_register_operand" "")
4168 (div:DF (match_operand:DF 1 "arm_float_rhs_operand" "")
4169 (match_operand:DF 2 "arm_float_rhs_operand" "")))]
4170 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
4171 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP_DOUBLE)"
4172 "")
4173 \f
4174 ;; Modulo insns
4175 @@ -1960,6 +2032,7 @@
4176 DONE;
4177 "
4178 [(set_attr "length" "4,4,16")
4179 + (set_attr "insn" "and")
4180 (set_attr "predicable" "yes")]
4181 )
4182
4183 @@ -1969,7 +2042,8 @@
4184 (match_operand:SI 2 "register_operand" "l")))]
4185 "TARGET_THUMB1"
4186 "and\\t%0, %0, %2"
4187 - [(set_attr "length" "2")]
4188 + [(set_attr "length" "2")
4189 + (set_attr "insn" "and")]
4190 )
4191
4192 (define_insn "*andsi3_compare0"
4193 @@ -1984,7 +2058,8 @@
4194 "@
4195 and%.\\t%0, %1, %2
4196 bic%.\\t%0, %1, #%B2"
4197 - [(set_attr "conds" "set")]
4198 + [(set_attr "conds" "set")
4199 + (set_attr "insn" "and,*")]
4200 )
4201
4202 (define_insn "*andsi3_compare0_scratch"
4203 @@ -2280,7 +2355,7 @@
4204 }
4205 }
4206
4207 - target = operands[0];
4208 + target = copy_rtx (operands[0]);
4209 /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
4210 subreg as the final target. */
4211 if (GET_CODE (target) == SUBREG)
4212 @@ -2528,7 +2603,11 @@
4213 (set_attr "shift" "2")
4214 (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
4215 (const_string "alu_shift")
4216 - (const_string "alu_shift_reg")))]
4217 + (const_string "alu_shift_reg")))
4218 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4219 + (eq_attr "fix_janus" "yes"))
4220 + (const_int 8)
4221 + (const_int 4)))]
4222 )
4223
4224 (define_insn "*andsi_notsi_si_compare0"
4225 @@ -2576,6 +2655,7 @@
4226 orr%?\\t%Q0, %Q1, %2
4227 #"
4228 [(set_attr "length" "4,8")
4229 + (set_attr "insn" "orr")
4230 (set_attr "predicable" "yes")]
4231 )
4232
4233 @@ -2638,7 +2718,8 @@
4234 (match_operand:SI 2 "register_operand" "l")))]
4235 "TARGET_THUMB1"
4236 "orr\\t%0, %0, %2"
4237 - [(set_attr "length" "2")]
4238 + [(set_attr "length" "2")
4239 + (set_attr "insn" "orr")]
4240 )
4241
4242 (define_peephole2
4243 @@ -2663,7 +2744,8 @@
4244 (ior:SI (match_dup 1) (match_dup 2)))]
4245 "TARGET_32BIT"
4246 "orr%.\\t%0, %1, %2"
4247 - [(set_attr "conds" "set")]
4248 + [(set_attr "conds" "set")
4249 + (set_attr "insn" "orr")]
4250 )
4251
4252 (define_insn "*iorsi3_compare0_scratch"
4253 @@ -2674,7 +2756,8 @@
4254 (clobber (match_scratch:SI 0 "=r"))]
4255 "TARGET_32BIT"
4256 "orr%.\\t%0, %1, %2"
4257 - [(set_attr "conds" "set")]
4258 + [(set_attr "conds" "set")
4259 + (set_attr "insn" "orr")]
4260 )
4261
4262 (define_insn "xordi3"
4263 @@ -2697,7 +2780,8 @@
4264 eor%?\\t%Q0, %Q1, %2
4265 #"
4266 [(set_attr "length" "4,8")
4267 - (set_attr "predicable" "yes")]
4268 + (set_attr "predicable" "yes")
4269 + (set_attr "insn" "eor")]
4270 )
4271
4272 (define_insn "*xordi_sesidi_di"
4273 @@ -2728,7 +2812,8 @@
4274 (match_operand:SI 2 "arm_rhs_operand" "rI")))]
4275 "TARGET_32BIT"
4276 "eor%?\\t%0, %1, %2"
4277 - [(set_attr "predicable" "yes")]
4278 + [(set_attr "predicable" "yes")
4279 + (set_attr "insn" "eor")]
4280 )
4281
4282 (define_insn "*thumb1_xorsi3"
4283 @@ -2737,7 +2822,8 @@
4284 (match_operand:SI 2 "register_operand" "l")))]
4285 "TARGET_THUMB1"
4286 "eor\\t%0, %0, %2"
4287 - [(set_attr "length" "2")]
4288 + [(set_attr "length" "2")
4289 + (set_attr "insn" "eor")]
4290 )
4291
4292 (define_insn "*xorsi3_compare0"
4293 @@ -2749,7 +2835,8 @@
4294 (xor:SI (match_dup 1) (match_dup 2)))]
4295 "TARGET_32BIT"
4296 "eor%.\\t%0, %1, %2"
4297 - [(set_attr "conds" "set")]
4298 + [(set_attr "conds" "set")
4299 + (set_attr "insn" "eor")]
4300 )
4301
4302 (define_insn "*xorsi3_compare0_scratch"
4303 @@ -2906,7 +2993,7 @@
4304 (smax:SI (match_operand:SI 1 "s_register_operand" "")
4305 (match_operand:SI 2 "arm_rhs_operand" "")))
4306 (clobber (reg:CC CC_REGNUM))])]
4307 - "TARGET_32BIT"
4308 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
4309 "
4310 if (operands[2] == const0_rtx || operands[2] == constm1_rtx)
4311 {
4312 @@ -2933,7 +3020,8 @@
4313 (const_int -1)))]
4314 "TARGET_32BIT"
4315 "orr%?\\t%0, %1, %1, asr #31"
4316 - [(set_attr "predicable" "yes")]
4317 + [(set_attr "predicable" "yes")
4318 + (set_attr "insn" "orr")]
4319 )
4320
4321 (define_insn "*arm_smax_insn"
4322 @@ -2941,7 +3029,7 @@
4323 (smax:SI (match_operand:SI 1 "s_register_operand" "%0,?r")
4324 (match_operand:SI 2 "arm_rhs_operand" "rI,rI")))
4325 (clobber (reg:CC CC_REGNUM))]
4326 - "TARGET_ARM"
4327 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
4328 "@
4329 cmp\\t%1, %2\;movlt\\t%0, %2
4330 cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
4331 @@ -2955,7 +3043,7 @@
4332 (smin:SI (match_operand:SI 1 "s_register_operand" "")
4333 (match_operand:SI 2 "arm_rhs_operand" "")))
4334 (clobber (reg:CC CC_REGNUM))])]
4335 - "TARGET_32BIT"
4336 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
4337 "
4338 if (operands[2] == const0_rtx)
4339 {
4340 @@ -2973,7 +3061,8 @@
4341 (const_int 0)))]
4342 "TARGET_32BIT"
4343 "and%?\\t%0, %1, %1, asr #31"
4344 - [(set_attr "predicable" "yes")]
4345 + [(set_attr "predicable" "yes")
4346 + (set_attr "insn" "and")]
4347 )
4348
4349 (define_insn "*arm_smin_insn"
4350 @@ -2981,7 +3070,7 @@
4351 (smin:SI (match_operand:SI 1 "s_register_operand" "%0,?r")
4352 (match_operand:SI 2 "arm_rhs_operand" "rI,rI")))
4353 (clobber (reg:CC CC_REGNUM))]
4354 - "TARGET_ARM"
4355 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
4356 "@
4357 cmp\\t%1, %2\;movge\\t%0, %2
4358 cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
4359 @@ -2995,7 +3084,7 @@
4360 (umax:SI (match_operand:SI 1 "s_register_operand" "")
4361 (match_operand:SI 2 "arm_rhs_operand" "")))
4362 (clobber (reg:CC CC_REGNUM))])]
4363 - "TARGET_32BIT"
4364 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
4365 ""
4366 )
4367
4368 @@ -3004,7 +3093,7 @@
4369 (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
4370 (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
4371 (clobber (reg:CC CC_REGNUM))]
4372 - "TARGET_ARM"
4373 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
4374 "@
4375 cmp\\t%1, %2\;movcc\\t%0, %2
4376 cmp\\t%1, %2\;movcs\\t%0, %1
4377 @@ -3019,7 +3108,7 @@
4378 (umin:SI (match_operand:SI 1 "s_register_operand" "")
4379 (match_operand:SI 2 "arm_rhs_operand" "")))
4380 (clobber (reg:CC CC_REGNUM))])]
4381 - "TARGET_32BIT"
4382 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
4383 ""
4384 )
4385
4386 @@ -3028,7 +3117,7 @@
4387 (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
4388 (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
4389 (clobber (reg:CC CC_REGNUM))]
4390 - "TARGET_ARM"
4391 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
4392 "@
4393 cmp\\t%1, %2\;movcs\\t%0, %2
4394 cmp\\t%1, %2\;movcc\\t%0, %1
4395 @@ -3043,7 +3132,7 @@
4396 [(match_operand:SI 1 "s_register_operand" "r")
4397 (match_operand:SI 2 "s_register_operand" "r")]))
4398 (clobber (reg:CC CC_REGNUM))]
4399 - "TARGET_32BIT"
4400 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
4401 "*
4402 operands[3] = gen_rtx_fmt_ee (minmax_code (operands[3]), SImode,
4403 operands[1], operands[2]);
4404 @@ -3163,11 +3252,23 @@
4405 [(set (match_operand:SI 0 "register_operand" "=l,l")
4406 (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
4407 (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4408 - "TARGET_THUMB1"
4409 + "TARGET_THUMB1 && !janus2_code"
4410 "lsl\\t%0, %1, %2"
4411 [(set_attr "length" "2")]
4412 )
4413
4414 +(define_insn "*thumb1_ashlsi3_janus2"
4415 + [(set (match_operand:SI 0 "register_operand" "=l,l")
4416 + (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
4417 + (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4418 + "TARGET_THUMB1 && janus2_code"
4419 + "@
4420 + lsl\\t%0, %1, %2
4421 + lsl\\t%0, %1, %2\;nop"
4422 + [(set_attr "length" "2,4")]
4423 +)
4424 +
4425 +
4426 (define_expand "ashrdi3"
4427 [(set (match_operand:DI 0 "s_register_operand" "")
4428 (ashiftrt:DI (match_operand:DI 1 "s_register_operand" "")
4429 @@ -3200,6 +3301,7 @@
4430 "TARGET_32BIT"
4431 "movs\\t%R0, %R1, asr #1\;mov\\t%Q0, %Q1, rrx"
4432 [(set_attr "conds" "clob")
4433 + (set_attr "insn" "mov")
4434 (set_attr "length" "8")]
4435 )
4436
4437 @@ -3219,11 +3321,22 @@
4438 [(set (match_operand:SI 0 "register_operand" "=l,l")
4439 (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
4440 (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4441 - "TARGET_THUMB1"
4442 + "TARGET_THUMB1 && !janus2_code"
4443 "asr\\t%0, %1, %2"
4444 [(set_attr "length" "2")]
4445 )
4446
4447 +(define_insn "*thumb1_ashrsi3_janus2"
4448 + [(set (match_operand:SI 0 "register_operand" "=l,l")
4449 + (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
4450 + (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4451 + "TARGET_THUMB1 && janus2_code"
4452 + "@
4453 + asr\\t%0, %1, %2
4454 + asr\\t%0, %1, %2\;nop"
4455 + [(set_attr "length" "2,4")]
4456 +)
4457 +
4458 (define_expand "lshrdi3"
4459 [(set (match_operand:DI 0 "s_register_operand" "")
4460 (lshiftrt:DI (match_operand:DI 1 "s_register_operand" "")
4461 @@ -3256,6 +3369,7 @@
4462 "TARGET_32BIT"
4463 "movs\\t%R0, %R1, lsr #1\;mov\\t%Q0, %Q1, rrx"
4464 [(set_attr "conds" "clob")
4465 + (set_attr "insn" "mov")
4466 (set_attr "length" "8")]
4467 )
4468
4469 @@ -3278,11 +3392,22 @@
4470 [(set (match_operand:SI 0 "register_operand" "=l,l")
4471 (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
4472 (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4473 - "TARGET_THUMB1"
4474 + "TARGET_THUMB1 && !janus2_code"
4475 "lsr\\t%0, %1, %2"
4476 [(set_attr "length" "2")]
4477 )
4478
4479 +(define_insn "*thumb1_lshrsi3_janus2"
4480 + [(set (match_operand:SI 0 "register_operand" "=l,l")
4481 + (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
4482 + (match_operand:SI 2 "nonmemory_operand" "N,l")))]
4483 + "TARGET_THUMB1 && janus2_code"
4484 + "@
4485 + lsr\\t%0, %1, %2
4486 + lsr\\t%0, %1, %2; nop"
4487 + [(set_attr "length" "2,4")]
4488 +)
4489 +
4490 (define_expand "rotlsi3"
4491 [(set (match_operand:SI 0 "s_register_operand" "")
4492 (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
4493 @@ -3324,11 +3449,20 @@
4494 [(set (match_operand:SI 0 "register_operand" "=l")
4495 (rotatert:SI (match_operand:SI 1 "register_operand" "0")
4496 (match_operand:SI 2 "register_operand" "l")))]
4497 - "TARGET_THUMB1"
4498 + "TARGET_THUMB1 && !janus2_code"
4499 "ror\\t%0, %0, %2"
4500 [(set_attr "length" "2")]
4501 )
4502
4503 +(define_insn "*thumb1_rotrsi3_janus2"
4504 + [(set (match_operand:SI 0 "register_operand" "=l")
4505 + (rotatert:SI (match_operand:SI 1 "register_operand" "0")
4506 + (match_operand:SI 2 "register_operand" "l")))]
4507 + "TARGET_THUMB1 && janus2_code"
4508 + "ror\\t%0, %0, %2; nop"
4509 + [(set_attr "length" "4")]
4510 +)
4511 +
4512 (define_insn "*arm_shiftsi3"
4513 [(set (match_operand:SI 0 "s_register_operand" "=r")
4514 (match_operator:SI 3 "shift_operator"
4515 @@ -3340,7 +3474,11 @@
4516 (set_attr "shift" "1")
4517 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
4518 (const_string "alu_shift")
4519 - (const_string "alu_shift_reg")))]
4520 + (const_string "alu_shift_reg")))
4521 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4522 + (eq_attr "fix_janus" "yes"))
4523 + (const_int 8)
4524 + (const_int 4)))]
4525 )
4526
4527 (define_insn "*shiftsi3_compare0"
4528 @@ -3357,7 +3495,11 @@
4529 (set_attr "shift" "1")
4530 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
4531 (const_string "alu_shift")
4532 - (const_string "alu_shift_reg")))]
4533 + (const_string "alu_shift_reg")))
4534 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4535 + (eq_attr "fix_janus" "yes"))
4536 + (const_int 8)
4537 + (const_int 4)))]
4538 )
4539
4540 (define_insn "*shiftsi3_compare0_scratch"
4541 @@ -3370,7 +3512,11 @@
4542 "TARGET_32BIT"
4543 "* return arm_output_shift(operands, 1);"
4544 [(set_attr "conds" "set")
4545 - (set_attr "shift" "1")]
4546 + (set_attr "shift" "1")
4547 + (set (attr "length") (if_then_else (and (match_operand 2 "s_register_operand" "")
4548 + (eq_attr "fix_janus" "yes"))
4549 + (const_int 8)
4550 + (const_int 4)))]
4551 )
4552
4553 (define_insn "*arm_notsi_shiftsi"
4554 @@ -3382,9 +3528,14 @@
4555 "mvn%?\\t%0, %1%S3"
4556 [(set_attr "predicable" "yes")
4557 (set_attr "shift" "1")
4558 + (set_attr "insn" "mvn")
4559 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
4560 (const_string "alu_shift")
4561 - (const_string "alu_shift_reg")))]
4562 + (const_string "alu_shift_reg")))
4563 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4564 + (eq_attr "fix_janus" "yes"))
4565 + (const_int 8)
4566 + (const_int 4)))]
4567 )
4568
4569 (define_insn "*arm_notsi_shiftsi_compare0"
4570 @@ -3399,9 +3550,14 @@
4571 "mvn%.\\t%0, %1%S3"
4572 [(set_attr "conds" "set")
4573 (set_attr "shift" "1")
4574 + (set_attr "insn" "mvn")
4575 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
4576 (const_string "alu_shift")
4577 - (const_string "alu_shift_reg")))]
4578 + (const_string "alu_shift_reg")))
4579 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4580 + (eq_attr "fix_janus" "yes"))
4581 + (const_int 8)
4582 + (const_int 4)))]
4583 )
4584
4585 (define_insn "*arm_not_shiftsi_compare0_scratch"
4586 @@ -3415,9 +3571,14 @@
4587 "mvn%.\\t%0, %1%S3"
4588 [(set_attr "conds" "set")
4589 (set_attr "shift" "1")
4590 + (set_attr "insn" "mvn")
4591 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
4592 (const_string "alu_shift")
4593 - (const_string "alu_shift_reg")))]
4594 + (const_string "alu_shift_reg")))
4595 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
4596 + (eq_attr "fix_janus" "yes"))
4597 + (const_int 8)
4598 + (const_int 4)))]
4599 )
4600
4601 ;; We don't really have extzv, but defining this using shifts helps
4602 @@ -3550,12 +3711,12 @@
4603 (define_expand "negdf2"
4604 [(set (match_operand:DF 0 "s_register_operand" "")
4605 (neg:DF (match_operand:DF 1 "s_register_operand" "")))]
4606 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
4607 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP_DOUBLE)"
4608 "")
4609
4610 ;; abssi2 doesn't really clobber the condition codes if a different register
4611 ;; is being set. To keep things simple, assume during rtl manipulations that
4612 -;; it does, but tell the final scan operator the truth. Similarly for
4613 +;; it does, and the splitter will eliminate it. Similarly for
4614 ;; (neg (abs...))
4615
4616 (define_expand "abssi2"
4617 @@ -3567,22 +3728,28 @@
4618 "
4619 if (TARGET_THUMB1)
4620 operands[2] = gen_rtx_SCRATCH (SImode);
4621 + else if (TARGET_NO_SINGLE_COND_EXEC)
4622 + {
4623 + emit_insn(gen_rtx_SET(VOIDmode, operands[0],
4624 + gen_rtx_ABS(SImode, operands[1])));
4625 + DONE;
4626 + }
4627 else
4628 operands[2] = gen_rtx_REG (CCmode, CC_REGNUM);
4629 ")
4630
4631 (define_insn "*arm_abssi2"
4632 - [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
4633 - (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
4634 + [(set (match_operand:SI 0 "s_register_operand" "=r")
4635 + (abs:SI (match_operand:SI 1 "s_register_operand" "r")))
4636 (clobber (reg:CC CC_REGNUM))]
4637 - "TARGET_ARM"
4638 - "@
4639 - cmp\\t%0, #0\;rsblt\\t%0, %0, #0
4640 - eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
4641 - [(set_attr "conds" "clob,*")
4642 - (set_attr "shift" "1")
4643 + "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC"
4644 + "#"
4645 + [(set_attr "shift" "1")
4646 ;; predicable can't be set based on the variant, so left as no
4647 - (set_attr "length" "8")]
4648 + (set (attr "length")
4649 + (if_then_else (eq_attr "is_thumb" "yes")
4650 + (const_int 10)
4651 + (const_int 8)))]
4652 )
4653
4654 (define_insn_and_split "*thumb1_abssi2"
4655 @@ -3600,17 +3767,17 @@
4656 )
4657
4658 (define_insn "*arm_neg_abssi2"
4659 - [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
4660 - (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
4661 + [(set (match_operand:SI 0 "s_register_operand" "=r")
4662 + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "r"))))
4663 (clobber (reg:CC CC_REGNUM))]
4664 - "TARGET_ARM"
4665 - "@
4666 - cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
4667 - eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
4668 - [(set_attr "conds" "clob,*")
4669 - (set_attr "shift" "1")
4670 + "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC"
4671 + "#"
4672 + [(set_attr "shift" "1")
4673 ;; predicable can't be set based on the variant, so left as no
4674 - (set_attr "length" "8")]
4675 + (set (attr "length")
4676 + (if_then_else (eq_attr "is_thumb" "yes")
4677 + (const_int 10)
4678 + (const_int 8)))]
4679 )
4680
4681 (define_insn_and_split "*thumb1_neg_abssi2"
4682 @@ -3627,6 +3794,93 @@
4683 [(set_attr "length" "6")]
4684 )
4685
4686 +;; Simplified version for when avoiding conditional execution
4687 +(define_insn "*arm_nocond_abssi2"
4688 + [(set (match_operand:SI 0 "s_register_operand" "=&r")
4689 + (abs:SI (match_operand:SI 1 "s_register_operand" "r")))]
4690 + "TARGET_32BIT && TARGET_NO_SINGLE_COND_EXEC"
4691 + "#"
4692 + [(set_attr "shift" "1")
4693 + (set_attr "length" "8")
4694 + (set_attr "predicable" "yes")]
4695 +)
4696 +
4697 +(define_insn "*arm_nocond_neg_abssi2"
4698 + [(set (match_operand:SI 0 "s_register_operand" "=&r")
4699 + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "r"))))]
4700 + "TARGET_32BIT && TARGET_NO_SINGLE_COND_EXEC"
4701 + "#"
4702 + [(set_attr "shift" "1")
4703 + (set_attr "length" "8")
4704 + (set_attr "predicable" "yes")]
4705 +)
4706 +
4707 +;; Splitters for ABS patterns.
4708 +
4709 +(define_split
4710 + [(set (match_operand:SI 0 "s_register_operand" "")
4711 + (abs:SI (match_operand:SI 1 "s_register_operand" "")))
4712 + (clobber (reg:CC CC_REGNUM))]
4713 + "TARGET_32BIT && reload_completed && rtx_equal_p(operands[0], operands[1])"
4714 + [(set (reg:CC CC_REGNUM) (compare:CC (match_dup 1) (const_int 0)))
4715 + (cond_exec (lt (reg:CC CC_REGNUM) (const_int 0))
4716 + (set (match_dup 0) (neg:SI (match_dup 1))))]
4717 +)
4718 +
4719 +(define_split
4720 + [(set (match_operand:SI 0 "s_register_operand" "")
4721 + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" ""))))
4722 + (clobber (reg:CC CC_REGNUM))]
4723 + "TARGET_32BIT && reload_completed && rtx_equal_p(operands[0], operands[1])"
4724 + [(set (reg:CC CC_REGNUM) (compare:CC (match_dup 1) (const_int 0)))
4725 + (cond_exec (gt (reg:CC CC_REGNUM) (const_int 0))
4726 + (set (match_dup 0) (neg:SI (match_dup 1))))]
4727 +)
4728 +
4729 +;; GCC does not add/remove clobbers when matching splitters, so we need
4730 +;; variants with and without the CC clobber.
4731 +(define_split
4732 + [(set (match_operand:SI 0 "s_register_operand" "")
4733 + (abs:SI (match_operand:SI 1 "s_register_operand" "")))]
4734 + "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])"
4735 + [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4736 + (match_dup 1)))
4737 + (set (match_dup 0) (minus:SI (match_dup 0)
4738 + (ashiftrt:SI (match_dup 1) (const_int 31))))]
4739 +)
4740 +
4741 +(define_split
4742 + [(set (match_operand:SI 0 "s_register_operand" "")
4743 + (abs:SI (match_operand:SI 1 "s_register_operand" "")))
4744 + (clobber (reg:CC CC_REGNUM))]
4745 + "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])"
4746 + [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4747 + (match_dup 1)))
4748 + (set (match_dup 0) (minus:SI (match_dup 0)
4749 + (ashiftrt:SI (match_dup 1) (const_int 31))))]
4750 +)
4751 +
4752 +(define_split
4753 + [(set (match_operand:SI 0 "s_register_operand" "")
4754 + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" ""))))]
4755 + "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])"
4756 + [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4757 + (match_dup 1)))
4758 + (set (match_dup 0) (minus:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4759 + (match_dup 0)))]
4760 +)
4761 +
4762 +(define_split
4763 + [(set (match_operand:SI 0 "s_register_operand" "")
4764 + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" ""))))
4765 + (clobber (reg:CC CC_REGNUM))]
4766 + "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])"
4767 + [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4768 + (match_dup 1)))
4769 + (set (match_dup 0) (minus:SI (ashiftrt:SI (match_dup 1) (const_int 31))
4770 + (match_dup 0)))]
4771 +)
4772 +
4773 (define_expand "abssf2"
4774 [(set (match_operand:SF 0 "s_register_operand" "")
4775 (abs:SF (match_operand:SF 1 "s_register_operand" "")))]
4776 @@ -3636,7 +3890,7 @@
4777 (define_expand "absdf2"
4778 [(set (match_operand:DF 0 "s_register_operand" "")
4779 (abs:DF (match_operand:DF 1 "s_register_operand" "")))]
4780 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4781 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4782 "")
4783
4784 (define_expand "sqrtsf2"
4785 @@ -3648,7 +3902,7 @@
4786 (define_expand "sqrtdf2"
4787 [(set (match_operand:DF 0 "s_register_operand" "")
4788 (sqrt:DF (match_operand:DF 1 "s_register_operand" "")))]
4789 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
4790 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP_DOUBLE)"
4791 "")
4792
4793 (define_insn_and_split "one_cmpldi2"
4794 @@ -3682,7 +3936,8 @@
4795 (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
4796 "TARGET_32BIT"
4797 "mvn%?\\t%0, %1"
4798 - [(set_attr "predicable" "yes")]
4799 + [(set_attr "predicable" "yes")
4800 + (set_attr "insn" "mvn")]
4801 )
4802
4803 (define_insn "*thumb1_one_cmplsi2"
4804 @@ -3690,7 +3945,8 @@
4805 (not:SI (match_operand:SI 1 "register_operand" "l")))]
4806 "TARGET_THUMB1"
4807 "mvn\\t%0, %1"
4808 - [(set_attr "length" "2")]
4809 + [(set_attr "length" "2")
4810 + (set_attr "insn" "mvn")]
4811 )
4812
4813 (define_insn "*notsi_compare0"
4814 @@ -3701,7 +3957,8 @@
4815 (not:SI (match_dup 1)))]
4816 "TARGET_32BIT"
4817 "mvn%.\\t%0, %1"
4818 - [(set_attr "conds" "set")]
4819 + [(set_attr "conds" "set")
4820 + (set_attr "insn" "mvn")]
4821 )
4822
4823 (define_insn "*notsi_compare0_scratch"
4824 @@ -3711,11 +3968,40 @@
4825 (clobber (match_scratch:SI 0 "=r"))]
4826 "TARGET_32BIT"
4827 "mvn%.\\t%0, %1"
4828 - [(set_attr "conds" "set")]
4829 + [(set_attr "conds" "set")
4830 + (set_attr "insn" "mvn")]
4831 )
4832 \f
4833 ;; Fixed <--> Floating conversion insns
4834
4835 +(define_expand "floatsihf2"
4836 + [(set (match_operand:HF 0 "general_operand" "")
4837 + (float:HF (match_operand:SI 1 "general_operand" "")))]
4838 + "TARGET_EITHER"
4839 + "
4840 + {
4841 + rtx op1 = gen_reg_rtx (SFmode);
4842 + expand_float (op1, operands[1], 0);
4843 + op1 = convert_to_mode (HFmode, op1, 0);
4844 + emit_move_insn (operands[0], op1);
4845 + DONE;
4846 + }"
4847 +)
4848 +
4849 +(define_expand "floatdihf2"
4850 + [(set (match_operand:HF 0 "general_operand" "")
4851 + (float:HF (match_operand:DI 1 "general_operand" "")))]
4852 + "TARGET_EITHER"
4853 + "
4854 + {
4855 + rtx op1 = gen_reg_rtx (SFmode);
4856 + expand_float (op1, operands[1], 0);
4857 + op1 = convert_to_mode (HFmode, op1, 0);
4858 + emit_move_insn (operands[0], op1);
4859 + DONE;
4860 + }"
4861 +)
4862 +
4863 (define_expand "floatsisf2"
4864 [(set (match_operand:SF 0 "s_register_operand" "")
4865 (float:SF (match_operand:SI 1 "s_register_operand" "")))]
4866 @@ -3731,7 +4017,7 @@
4867 (define_expand "floatsidf2"
4868 [(set (match_operand:DF 0 "s_register_operand" "")
4869 (float:DF (match_operand:SI 1 "s_register_operand" "")))]
4870 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4871 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4872 "
4873 if (TARGET_MAVERICK)
4874 {
4875 @@ -3740,6 +4026,30 @@
4876 }
4877 ")
4878
4879 +(define_expand "fix_trunchfsi2"
4880 + [(set (match_operand:SI 0 "general_operand" "")
4881 + (fix:SI (fix:HF (match_operand:HF 1 "general_operand" ""))))]
4882 + "TARGET_EITHER"
4883 + "
4884 + {
4885 + rtx op1 = convert_to_mode (SFmode, operands[1], 0);
4886 + expand_fix (operands[0], op1, 0);
4887 + DONE;
4888 + }"
4889 +)
4890 +
4891 +(define_expand "fix_trunchfdi2"
4892 + [(set (match_operand:DI 0 "general_operand" "")
4893 + (fix:DI (fix:HF (match_operand:HF 1 "general_operand" ""))))]
4894 + "TARGET_EITHER"
4895 + "
4896 + {
4897 + rtx op1 = convert_to_mode (SFmode, operands[1], 0);
4898 + expand_fix (operands[0], op1, 0);
4899 + DONE;
4900 + }"
4901 +)
4902 +
4903 (define_expand "fix_truncsfsi2"
4904 [(set (match_operand:SI 0 "s_register_operand" "")
4905 (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" ""))))]
4906 @@ -3759,7 +4069,7 @@
4907 (define_expand "fix_truncdfsi2"
4908 [(set (match_operand:SI 0 "s_register_operand" "")
4909 (fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" ""))))]
4910 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4911 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4912 "
4913 if (TARGET_MAVERICK)
4914 {
4915 @@ -3776,9 +4086,25 @@
4916 [(set (match_operand:SF 0 "s_register_operand" "")
4917 (float_truncate:SF
4918 (match_operand:DF 1 "s_register_operand" "")))]
4919 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4920 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4921 ""
4922 )
4923 +
4924 +/* DFmode -> HFmode conversions have to go through SFmode. */
4925 +(define_expand "truncdfhf2"
4926 + [(set (match_operand:HF 0 "general_operand" "")
4927 + (float_truncate:HF
4928 + (match_operand:DF 1 "general_operand" "")))]
4929 + "TARGET_EITHER"
4930 + "
4931 + {
4932 + rtx op1;
4933 + op1 = convert_to_mode (SFmode, operands[1], 0);
4934 + op1 = convert_to_mode (HFmode, op1, 0);
4935 + emit_move_insn (operands[0], op1);
4936 + DONE;
4937 + }"
4938 +)
4939 \f
4940 ;; Zero and sign extension instructions.
4941
4942 @@ -3800,6 +4126,7 @@
4943 return \"mov%?\\t%R0, #0\";
4944 "
4945 [(set_attr "length" "8")
4946 + (set_attr "insn" "mov")
4947 (set_attr "predicable" "yes")]
4948 )
4949
4950 @@ -3843,6 +4170,7 @@
4951 "
4952 [(set_attr "length" "8")
4953 (set_attr "shift" "1")
4954 + (set_attr "insn" "mov")
4955 (set_attr "predicable" "yes")]
4956 )
4957
4958 @@ -4123,6 +4451,28 @@
4959 ""
4960 )
4961
4962 +(define_code_iterator ior_xor [ior xor])
4963 +
4964 +(define_split
4965 + [(set (match_operand:SI 0 "s_register_operand" "")
4966 + (ior_xor:SI (and:SI (ashift:SI
4967 + (match_operand:SI 1 "s_register_operand" "")
4968 + (match_operand:SI 2 "const_int_operand" ""))
4969 + (match_operand:SI 3 "const_int_operand" ""))
4970 + (zero_extend:SI
4971 + (match_operator 5 "subreg_lowpart_operator"
4972 + [(match_operand:SI 4 "s_register_operand" "")]))))]
4973 + "TARGET_32BIT
4974 + && (INTVAL (operands[3])
4975 + == (GET_MODE_MASK (GET_MODE (operands[5]))
4976 + & (GET_MODE_MASK (GET_MODE (operands[5]))
4977 + << (INTVAL (operands[2])))))"
4978 + [(set (match_dup 0) (ior_xor:SI (ashift:SI (match_dup 1) (match_dup 2))
4979 + (match_dup 4)))
4980 + (set (match_dup 0) (zero_extend:SI (match_dup 5)))]
4981 + "operands[5] = gen_lowpart (GET_MODE (operands[5]), operands[0]);"
4982 +)
4983 +
4984 (define_insn "*compareqi_eq0"
4985 [(set (reg:CC_Z CC_REGNUM)
4986 (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
4987 @@ -4639,9 +4989,24 @@
4988 (define_expand "extendsfdf2"
4989 [(set (match_operand:DF 0 "s_register_operand" "")
4990 (float_extend:DF (match_operand:SF 1 "s_register_operand" "")))]
4991 - "TARGET_32BIT && TARGET_HARD_FLOAT"
4992 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
4993 ""
4994 )
4995 +
4996 +/* HFmode -> DFmode conversions have to go through SFmode. */
4997 +(define_expand "extendhfdf2"
4998 + [(set (match_operand:DF 0 "general_operand" "")
4999 + (float_extend:DF (match_operand:HF 1 "general_operand" "")))]
5000 + "TARGET_EITHER"
5001 + "
5002 + {
5003 + rtx op1;
5004 + op1 = convert_to_mode (SFmode, operands[1], 0);
5005 + op1 = convert_to_mode (DFmode, op1, 0);
5006 + emit_insn (gen_movdf (operands[0], op1));
5007 + DONE;
5008 + }"
5009 +)
5010 \f
5011 ;; Move insns (including loads and stores)
5012
5013 @@ -4877,6 +5242,7 @@
5014 }"
5015 [(set_attr "length" "4,4,6,2,2,6,4,4")
5016 (set_attr "type" "*,*,*,load2,store2,load2,store2,*")
5017 + (set_attr "insn" "*,mov,*,*,*,*,*,mov")
5018 (set_attr "pool_range" "*,*,*,*,*,1020,*,*")]
5019 )
5020
5021 @@ -4903,14 +5269,6 @@
5022 optimize && can_create_pseudo_p ());
5023 DONE;
5024 }
5025 -
5026 - if (TARGET_USE_MOVT && !target_word_relocations
5027 - && GET_CODE (operands[1]) == SYMBOL_REF
5028 - && !flag_pic && !arm_tls_referenced_p (operands[1]))
5029 - {
5030 - arm_emit_movpair (operands[0], operands[1]);
5031 - DONE;
5032 - }
5033 }
5034 else /* TARGET_THUMB1... */
5035 {
5036 @@ -4984,18 +5342,9 @@
5037 (set_attr "length" "4")]
5038 )
5039
5040 -(define_insn "*arm_movw"
5041 - [(set (match_operand:SI 0 "nonimmediate_operand" "=r")
5042 - (high:SI (match_operand:SI 1 "general_operand" "i")))]
5043 - "TARGET_32BIT"
5044 - "movw%?\t%0, #:lower16:%c1"
5045 - [(set_attr "predicable" "yes")
5046 - (set_attr "length" "4")]
5047 -)
5048 -
5049 (define_insn "*arm_movsi_insn"
5050 [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m")
5051 - (match_operand:SI 1 "general_operand" "rk, I,K,N,mi,rk"))]
5052 + (match_operand:SI 1 "general_operand" "rk, I,K,j,mi,rk"))]
5053 "TARGET_ARM && ! TARGET_IWMMXT
5054 && !(TARGET_HARD_FLOAT && TARGET_VFP)
5055 && ( register_operand (operands[0], SImode)
5056 @@ -5008,6 +5357,7 @@
5057 ldr%?\\t%0, %1
5058 str%?\\t%1, %0"
5059 [(set_attr "type" "*,*,*,*,load1,store1")
5060 + (set_attr "insn" "mov,mov,mvn,mov,*,*")
5061 (set_attr "predicable" "yes")
5062 (set_attr "pool_range" "*,*,*,*,4096,*")
5063 (set_attr "neg_pool_range" "*,*,*,*,4084,*")]
5064 @@ -5027,6 +5377,19 @@
5065 "
5066 )
5067
5068 +(define_split
5069 + [(set (match_operand:SI 0 "arm_general_register_operand" "")
5070 + (match_operand:SI 1 "general_operand" ""))]
5071 + "TARGET_32BIT
5072 + && TARGET_USE_MOVT && GET_CODE (operands[1]) == SYMBOL_REF
5073 + && !flag_pic && !target_word_relocations
5074 + && !arm_tls_referenced_p (operands[1])"
5075 + [(clobber (const_int 0))]
5076 +{
5077 + arm_emit_movpair (operands[0], operands[1]);
5078 + DONE;
5079 +})
5080 +
5081 (define_insn "*thumb1_movsi_insn"
5082 [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*lhk")
5083 (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*lhk"))]
5084 @@ -5065,7 +5428,7 @@
5085 (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
5086 "
5087 {
5088 - unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
5089 + unsigned HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffffffffu;
5090 unsigned HOST_WIDE_INT mask = 0xff;
5091 int i;
5092
5093 @@ -5627,6 +5990,7 @@
5094 ldr%(h%)\\t%0, %1\\t%@ movhi"
5095 [(set_attr "type" "*,*,store1,load1")
5096 (set_attr "predicable" "yes")
5097 + (set_attr "insn" "mov,mvn,*,*")
5098 (set_attr "pool_range" "*,*,*,256")
5099 (set_attr "neg_pool_range" "*,*,*,244")]
5100 )
5101 @@ -5638,7 +6002,8 @@
5102 "@
5103 mov%?\\t%0, %1\\t%@ movhi
5104 mvn%?\\t%0, #%B1\\t%@ movhi"
5105 - [(set_attr "predicable" "yes")]
5106 + [(set_attr "predicable" "yes")
5107 + (set_attr "insn" "mov,mvn")]
5108 )
5109
5110 (define_expand "thumb_movhi_clobber"
5111 @@ -5769,6 +6134,7 @@
5112 ldr%(b%)\\t%0, %1
5113 str%(b%)\\t%1, %0"
5114 [(set_attr "type" "*,*,load1,store1")
5115 + (set_attr "insn" "mov,mvn,*,*")
5116 (set_attr "predicable" "yes")]
5117 )
5118
5119 @@ -5787,9 +6153,111 @@
5120 mov\\t%0, %1"
5121 [(set_attr "length" "2")
5122 (set_attr "type" "*,load1,store1,*,*,*")
5123 + (set_attr "insn" "*,*,*,mov,mov,mov")
5124 (set_attr "pool_range" "*,32,*,*,*,*")]
5125 )
5126
5127 +;; HFmode moves
5128 +(define_expand "movhf"
5129 + [(set (match_operand:HF 0 "general_operand" "")
5130 + (match_operand:HF 1 "general_operand" ""))]
5131 + "TARGET_EITHER"
5132 + "
5133 + if (TARGET_32BIT)
5134 + {
5135 + if (GET_CODE (operands[0]) == MEM)
5136 + operands[1] = force_reg (HFmode, operands[1]);
5137 + }
5138 + else /* TARGET_THUMB1 */
5139 + {
5140 + if (can_create_pseudo_p ())
5141 + {
5142 + if (GET_CODE (operands[0]) != REG)
5143 + operands[1] = force_reg (HFmode, operands[1]);
5144 + }
5145 + }
5146 + "
5147 +)
5148 +
5149 +(define_insn "*arm32_movhf"
5150 + [(set (match_operand:HF 0 "nonimmediate_operand" "=r,m,r,r")
5151 + (match_operand:HF 1 "general_operand" " m,r,r,F"))]
5152 + "TARGET_32BIT && !(TARGET_HARD_FLOAT && TARGET_FP16)
5153 + && ( s_register_operand (operands[0], HFmode)
5154 + || s_register_operand (operands[1], HFmode))"
5155 + "*
5156 + switch (which_alternative)
5157 + {
5158 + case 0: /* ARM register from memory */
5159 + return \"ldr%(h%)\\t%0, %1\\t%@ __fp16\";
5160 + case 1: /* memory from ARM register */
5161 + return \"str%(h%)\\t%1, %0\\t%@ __fp16\";
5162 + case 2: /* ARM register from ARM register */
5163 + return \"mov%?\\t%0, %1\\t%@ __fp16\";
5164 + case 3: /* ARM register from constant */
5165 + {
5166 + REAL_VALUE_TYPE r;
5167 + long bits;
5168 + rtx ops[4];
5169 +
5170 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
5171 + bits = real_to_target (NULL, &r, HFmode);
5172 + ops[0] = operands[0];
5173 + ops[1] = GEN_INT (bits);
5174 + ops[2] = GEN_INT (bits & 0xff00);
5175 + ops[3] = GEN_INT (bits & 0x00ff);
5176 +
5177 + if (arm_arch_thumb2)
5178 + output_asm_insn (\"movw%?\\t%0, %1\", ops);
5179 + else
5180 + output_asm_insn (\"mov%?\\t%0, %2\;orr%?\\t%0, %0, %3\", ops);
5181 + return \"\";
5182 + }
5183 + default:
5184 + gcc_unreachable ();
5185 + }
5186 + "
5187 + [(set_attr "conds" "unconditional")
5188 + (set_attr "type" "load1,store1,*,*")
5189 + (set_attr "length" "4,4,4,8")
5190 + (set_attr "predicable" "yes")
5191 + ]
5192 +)
5193 +
5194 +(define_insn "*thumb1_movhf"
5195 + [(set (match_operand:HF 0 "nonimmediate_operand" "=l,l,m,*r,*h")
5196 + (match_operand:HF 1 "general_operand" "l,mF,l,*h,*r"))]
5197 + "TARGET_THUMB1
5198 + && ( s_register_operand (operands[0], HFmode)
5199 + || s_register_operand (operands[1], HFmode))"
5200 + "*
5201 + switch (which_alternative)
5202 + {
5203 + case 1:
5204 + {
5205 + rtx addr;
5206 + gcc_assert (GET_CODE(operands[1]) == MEM);
5207 + addr = XEXP (operands[1], 0);
5208 + if (GET_CODE (addr) == LABEL_REF
5209 + || (GET_CODE (addr) == CONST
5210 + && GET_CODE (XEXP (addr, 0)) == PLUS
5211 + && GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF
5212 + && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT))
5213 + {
5214 + /* Constant pool entry. */
5215 + return \"ldr\\t%0, %1\";
5216 + }
5217 + return \"ldrh\\t%0, %1\";
5218 + }
5219 + case 2: return \"strh\\t%1, %0\";
5220 + default: return \"mov\\t%0, %1\";
5221 + }
5222 + "
5223 + [(set_attr "length" "2")
5224 + (set_attr "type" "*,load1,store1,*,*")
5225 + (set_attr "pool_range" "*,1020,*,*,*")]
5226 +)
5227 +
5228 (define_expand "movsf"
5229 [(set (match_operand:SF 0 "general_operand" "")
5230 (match_operand:SF 1 "general_operand" ""))]
5231 @@ -5842,6 +6310,7 @@
5232 [(set_attr "length" "4,4,4")
5233 (set_attr "predicable" "yes")
5234 (set_attr "type" "*,load1,store1")
5235 + (set_attr "insn" "mov,*,*")
5236 (set_attr "pool_range" "*,4096,*")
5237 (set_attr "neg_pool_range" "*,4084,*")]
5238 )
5239 @@ -6297,7 +6766,7 @@
5240 (match_operand:BLK 1 "general_operand" "")
5241 (match_operand:SI 2 "const_int_operand" "")
5242 (match_operand:SI 3 "const_int_operand" "")]
5243 - "TARGET_EITHER"
5244 + "TARGET_EITHER && !low_irq_latency"
5245 "
5246 if (TARGET_32BIT)
5247 {
5248 @@ -7476,7 +7945,7 @@
5249 (define_expand "cmpdf"
5250 [(match_operand:DF 0 "s_register_operand" "")
5251 (match_operand:DF 1 "arm_float_compare_operand" "")]
5252 - "TARGET_32BIT && TARGET_HARD_FLOAT"
5253 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE"
5254 "
5255 arm_compare_op0 = operands[0];
5256 arm_compare_op1 = operands[1];
5257 @@ -7507,7 +7976,11 @@
5258 (set_attr "shift" "1")
5259 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
5260 (const_string "alu_shift")
5261 - (const_string "alu_shift_reg")))]
5262 + (const_string "alu_shift_reg")))
5263 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5264 + (eq_attr "fix_janus" "yes"))
5265 + (const_int 8)
5266 + (const_int 4)))]
5267 )
5268
5269 (define_insn "*arm_cmpsi_shiftsi_swp"
5270 @@ -7522,7 +7995,11 @@
5271 (set_attr "shift" "1")
5272 (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
5273 (const_string "alu_shift")
5274 - (const_string "alu_shift_reg")))]
5275 + (const_string "alu_shift_reg")))
5276 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5277 + (eq_attr "fix_janus" "yes"))
5278 + (const_int 8)
5279 + (const_int 4)))]
5280 )
5281
5282 (define_insn "*arm_cmpsi_negshiftsi_si"
5283 @@ -7537,7 +8014,11 @@
5284 [(set_attr "conds" "set")
5285 (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
5286 (const_string "alu_shift")
5287 - (const_string "alu_shift_reg")))]
5288 + (const_string "alu_shift_reg")))
5289 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5290 + (eq_attr "fix_janus" "yes"))
5291 + (const_int 8)
5292 + (const_int 4)))]
5293 )
5294
5295 ;; Cirrus SF compare instruction
5296 @@ -7879,77 +8360,77 @@
5297 (define_expand "seq"
5298 [(set (match_operand:SI 0 "s_register_operand" "")
5299 (eq:SI (match_dup 1) (const_int 0)))]
5300 - "TARGET_32BIT"
5301 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5302 "operands[1] = arm_gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1);"
5303 )
5304
5305 (define_expand "sne"
5306 [(set (match_operand:SI 0 "s_register_operand" "")
5307 (ne:SI (match_dup 1) (const_int 0)))]
5308 - "TARGET_32BIT"
5309 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5310 "operands[1] = arm_gen_compare_reg (NE, arm_compare_op0, arm_compare_op1);"
5311 )
5312
5313 (define_expand "sgt"
5314 [(set (match_operand:SI 0 "s_register_operand" "")
5315 (gt:SI (match_dup 1) (const_int 0)))]
5316 - "TARGET_32BIT"
5317 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5318 "operands[1] = arm_gen_compare_reg (GT, arm_compare_op0, arm_compare_op1);"
5319 )
5320
5321 (define_expand "sle"
5322 [(set (match_operand:SI 0 "s_register_operand" "")
5323 (le:SI (match_dup 1) (const_int 0)))]
5324 - "TARGET_32BIT"
5325 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5326 "operands[1] = arm_gen_compare_reg (LE, arm_compare_op0, arm_compare_op1);"
5327 )
5328
5329 (define_expand "sge"
5330 [(set (match_operand:SI 0 "s_register_operand" "")
5331 (ge:SI (match_dup 1) (const_int 0)))]
5332 - "TARGET_32BIT"
5333 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5334 "operands[1] = arm_gen_compare_reg (GE, arm_compare_op0, arm_compare_op1);"
5335 )
5336
5337 (define_expand "slt"
5338 [(set (match_operand:SI 0 "s_register_operand" "")
5339 (lt:SI (match_dup 1) (const_int 0)))]
5340 - "TARGET_32BIT"
5341 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5342 "operands[1] = arm_gen_compare_reg (LT, arm_compare_op0, arm_compare_op1);"
5343 )
5344
5345 (define_expand "sgtu"
5346 [(set (match_operand:SI 0 "s_register_operand" "")
5347 (gtu:SI (match_dup 1) (const_int 0)))]
5348 - "TARGET_32BIT"
5349 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5350 "operands[1] = arm_gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1);"
5351 )
5352
5353 (define_expand "sleu"
5354 [(set (match_operand:SI 0 "s_register_operand" "")
5355 (leu:SI (match_dup 1) (const_int 0)))]
5356 - "TARGET_32BIT"
5357 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5358 "operands[1] = arm_gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1);"
5359 )
5360
5361 (define_expand "sgeu"
5362 [(set (match_operand:SI 0 "s_register_operand" "")
5363 (geu:SI (match_dup 1) (const_int 0)))]
5364 - "TARGET_32BIT"
5365 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5366 "operands[1] = arm_gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1);"
5367 )
5368
5369 (define_expand "sltu"
5370 [(set (match_operand:SI 0 "s_register_operand" "")
5371 (ltu:SI (match_dup 1) (const_int 0)))]
5372 - "TARGET_32BIT"
5373 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5374 "operands[1] = arm_gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1);"
5375 )
5376
5377 (define_expand "sunordered"
5378 [(set (match_operand:SI 0 "s_register_operand" "")
5379 (unordered:SI (match_dup 1) (const_int 0)))]
5380 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5381 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5382 "operands[1] = arm_gen_compare_reg (UNORDERED, arm_compare_op0,
5383 arm_compare_op1);"
5384 )
5385 @@ -7957,7 +8438,7 @@
5386 (define_expand "sordered"
5387 [(set (match_operand:SI 0 "s_register_operand" "")
5388 (ordered:SI (match_dup 1) (const_int 0)))]
5389 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5390 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5391 "operands[1] = arm_gen_compare_reg (ORDERED, arm_compare_op0,
5392 arm_compare_op1);"
5393 )
5394 @@ -7965,7 +8446,7 @@
5395 (define_expand "sungt"
5396 [(set (match_operand:SI 0 "s_register_operand" "")
5397 (ungt:SI (match_dup 1) (const_int 0)))]
5398 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5399 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5400 "operands[1] = arm_gen_compare_reg (UNGT, arm_compare_op0,
5401 arm_compare_op1);"
5402 )
5403 @@ -7973,7 +8454,7 @@
5404 (define_expand "sunge"
5405 [(set (match_operand:SI 0 "s_register_operand" "")
5406 (unge:SI (match_dup 1) (const_int 0)))]
5407 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5408 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5409 "operands[1] = arm_gen_compare_reg (UNGE, arm_compare_op0,
5410 arm_compare_op1);"
5411 )
5412 @@ -7981,7 +8462,7 @@
5413 (define_expand "sunlt"
5414 [(set (match_operand:SI 0 "s_register_operand" "")
5415 (unlt:SI (match_dup 1) (const_int 0)))]
5416 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5417 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5418 "operands[1] = arm_gen_compare_reg (UNLT, arm_compare_op0,
5419 arm_compare_op1);"
5420 )
5421 @@ -7989,7 +8470,7 @@
5422 (define_expand "sunle"
5423 [(set (match_operand:SI 0 "s_register_operand" "")
5424 (unle:SI (match_dup 1) (const_int 0)))]
5425 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5426 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC"
5427 "operands[1] = arm_gen_compare_reg (UNLE, arm_compare_op0,
5428 arm_compare_op1);"
5429 )
5430 @@ -8018,6 +8499,7 @@
5431 "TARGET_ARM"
5432 "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
5433 [(set_attr "conds" "use")
5434 + (set_attr "insn" "mov")
5435 (set_attr "length" "8")]
5436 )
5437
5438 @@ -8028,6 +8510,7 @@
5439 "TARGET_ARM"
5440 "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
5441 [(set_attr "conds" "use")
5442 + (set_attr "insn" "mov")
5443 (set_attr "length" "8")]
5444 )
5445
5446 @@ -8038,6 +8521,7 @@
5447 "TARGET_ARM"
5448 "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
5449 [(set_attr "conds" "use")
5450 + (set_attr "insn" "mov")
5451 (set_attr "length" "8")]
5452 )
5453
5454 @@ -8241,7 +8725,7 @@
5455 (if_then_else:SI (match_operand 1 "arm_comparison_operator" "")
5456 (match_operand:SI 2 "arm_not_operand" "")
5457 (match_operand:SI 3 "arm_not_operand" "")))]
5458 - "TARGET_32BIT"
5459 + "TARGET_32BIT && !TARGET_NO_COND_EXEC"
5460 "
5461 {
5462 enum rtx_code code = GET_CODE (operands[1]);
5463 @@ -8260,7 +8744,7 @@
5464 (if_then_else:SF (match_operand 1 "arm_comparison_operator" "")
5465 (match_operand:SF 2 "s_register_operand" "")
5466 (match_operand:SF 3 "nonmemory_operand" "")))]
5467 - "TARGET_32BIT && TARGET_HARD_FLOAT"
5468 + "TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_NO_COND_EXEC"
5469 "
5470 {
5471 enum rtx_code code = GET_CODE (operands[1]);
5472 @@ -8285,7 +8769,7 @@
5473 (if_then_else:DF (match_operand 1 "arm_comparison_operator" "")
5474 (match_operand:DF 2 "s_register_operand" "")
5475 (match_operand:DF 3 "arm_float_add_operand" "")))]
5476 - "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
5477 + "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP_DOUBLE) && !TARGET_NO_COND_EXEC"
5478 "
5479 {
5480 enum rtx_code code = GET_CODE (operands[1]);
5481 @@ -8317,7 +8801,8 @@
5482 mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
5483 mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
5484 [(set_attr "length" "4,4,4,4,8,8,8,8")
5485 - (set_attr "conds" "use")]
5486 + (set_attr "conds" "use")
5487 + (set_attr "insn" "mov,mvn,mov,mvn,mov,mov,mvn,mvn")]
5488 )
5489
5490 (define_insn "*movsfcc_soft_insn"
5491 @@ -8330,7 +8815,8 @@
5492 "@
5493 mov%D3\\t%0, %2
5494 mov%d3\\t%0, %1"
5495 - [(set_attr "conds" "use")]
5496 + [(set_attr "conds" "use")
5497 + (set_attr "insn" "mov")]
5498 )
5499
5500 \f
5501 @@ -8733,7 +9219,7 @@
5502 [(match_operand 1 "cc_register" "") (const_int 0)])
5503 (return)
5504 (pc)))]
5505 - "TARGET_ARM && USE_RETURN_INSN (TRUE)"
5506 + "TARGET_ARM && USE_RETURN_INSN (TRUE) && !TARGET_NO_COND_EXEC"
5507 "*
5508 {
5509 if (arm_ccfsm_state == 2)
5510 @@ -8754,7 +9240,7 @@
5511 [(match_operand 1 "cc_register" "") (const_int 0)])
5512 (pc)
5513 (return)))]
5514 - "TARGET_ARM && USE_RETURN_INSN (TRUE)"
5515 + "TARGET_ARM && USE_RETURN_INSN (TRUE) && !TARGET_NO_COND_EXEC"
5516 "*
5517 {
5518 if (arm_ccfsm_state == 2)
5519 @@ -9072,7 +9558,11 @@
5520 (set_attr "shift" "4")
5521 (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
5522 (const_string "alu_shift")
5523 - (const_string "alu_shift_reg")))]
5524 + (const_string "alu_shift_reg")))
5525 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5526 + (eq_attr "fix_janus" "yes"))
5527 + (const_int 8)
5528 + (const_int 4)))]
5529 )
5530
5531 (define_split
5532 @@ -9110,7 +9600,11 @@
5533 (set_attr "shift" "4")
5534 (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
5535 (const_string "alu_shift")
5536 - (const_string "alu_shift_reg")))]
5537 + (const_string "alu_shift_reg")))
5538 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5539 + (eq_attr "fix_janus" "yes"))
5540 + (const_int 8)
5541 + (const_int 4)))]
5542 )
5543
5544 (define_insn "*arith_shiftsi_compare0_scratch"
5545 @@ -9128,7 +9622,11 @@
5546 (set_attr "shift" "4")
5547 (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
5548 (const_string "alu_shift")
5549 - (const_string "alu_shift_reg")))]
5550 + (const_string "alu_shift_reg")))
5551 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5552 + (eq_attr "fix_janus" "yes"))
5553 + (const_int 8)
5554 + (const_int 4)))]
5555 )
5556
5557 (define_insn "*sub_shiftsi"
5558 @@ -9143,7 +9641,11 @@
5559 (set_attr "shift" "3")
5560 (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
5561 (const_string "alu_shift")
5562 - (const_string "alu_shift_reg")))]
5563 + (const_string "alu_shift_reg")))
5564 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5565 + (eq_attr "fix_janus" "yes"))
5566 + (const_int 8)
5567 + (const_int 4)))]
5568 )
5569
5570 (define_insn "*sub_shiftsi_compare0"
5571 @@ -9163,7 +9665,11 @@
5572 (set_attr "shift" "3")
5573 (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
5574 (const_string "alu_shift")
5575 - (const_string "alu_shift_reg")))]
5576 + (const_string "alu_shift_reg")))
5577 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5578 + (eq_attr "fix_janus" "yes"))
5579 + (const_int 8)
5580 + (const_int 4)))]
5581 )
5582
5583 (define_insn "*sub_shiftsi_compare0_scratch"
5584 @@ -9181,7 +9687,11 @@
5585 (set_attr "shift" "3")
5586 (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
5587 (const_string "alu_shift")
5588 - (const_string "alu_shift_reg")))]
5589 + (const_string "alu_shift_reg")))
5590 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5591 + (eq_attr "fix_janus" "yes"))
5592 + (const_int 8)
5593 + (const_int 4)))]
5594 )
5595
5596 \f
5597 @@ -9194,6 +9704,7 @@
5598 "TARGET_ARM"
5599 "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
5600 [(set_attr "conds" "use")
5601 + (set_attr "insn" "mov")
5602 (set_attr "length" "8")]
5603 )
5604
5605 @@ -9207,6 +9718,7 @@
5606 orr%d2\\t%0, %1, #1
5607 mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
5608 [(set_attr "conds" "use")
5609 + (set_attr "insn" "orr")
5610 (set_attr "length" "4,8")]
5611 )
5612
5613 @@ -9216,7 +9728,7 @@
5614 [(match_operand:SI 2 "s_register_operand" "r,r")
5615 (match_operand:SI 3 "arm_add_operand" "rI,L")]))
5616 (clobber (reg:CC CC_REGNUM))]
5617 - "TARGET_ARM"
5618 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
5619 "*
5620 if (operands[3] == const0_rtx)
5621 {
5622 @@ -9271,6 +9783,7 @@
5623 return \"\";
5624 "
5625 [(set_attr "conds" "use")
5626 + (set_attr "insn" "mov")
5627 (set_attr "length" "4,4,8")]
5628 )
5629
5630 @@ -9282,7 +9795,7 @@
5631 (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
5632 (match_operand:SI 1 "s_register_operand" "0,?r")]))
5633 (clobber (reg:CC CC_REGNUM))]
5634 - "TARGET_ARM"
5635 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5636 "*
5637 if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
5638 return \"%i5\\t%0, %1, %2, lsr #31\";
5639 @@ -9678,7 +10191,7 @@
5640 (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
5641 (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
5642 (clobber (reg:CC CC_REGNUM))]
5643 - "TARGET_ARM"
5644 + "TARGET_ARM && !TARGET_NO_COND_EXEC"
5645 "*
5646 if (GET_CODE (operands[5]) == LT
5647 && (operands[4] == const0_rtx))
5648 @@ -9744,7 +10257,7 @@
5649 (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
5650 (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
5651 (clobber (reg:CC CC_REGNUM))]
5652 - "TARGET_ARM"
5653 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5654 "#"
5655 [(set_attr "conds" "clob")
5656 (set_attr "length" "8,12")]
5657 @@ -9780,7 +10293,7 @@
5658 (match_operand:SI 2 "s_register_operand" "r,r")
5659 (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
5660 (clobber (reg:CC CC_REGNUM))]
5661 - "TARGET_ARM"
5662 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5663 "#"
5664 [(set_attr "conds" "clob")
5665 (set_attr "length" "8,12")]
5666 @@ -9818,7 +10331,7 @@
5667 [(match_operand:SI 3 "s_register_operand" "r")
5668 (match_operand:SI 4 "arm_rhs_operand" "rI")])))
5669 (clobber (reg:CC CC_REGNUM))]
5670 - "TARGET_ARM"
5671 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5672 "#"
5673 [(set_attr "conds" "clob")
5674 (set_attr "length" "12")]
5675 @@ -9968,7 +10481,7 @@
5676 (not:SI
5677 (match_operand:SI 2 "s_register_operand" "r,r"))))
5678 (clobber (reg:CC CC_REGNUM))]
5679 - "TARGET_ARM"
5680 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5681 "#"
5682 [(set_attr "conds" "clob")
5683 (set_attr "length" "8,12")]
5684 @@ -9987,6 +10500,7 @@
5685 mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
5686 mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
5687 [(set_attr "conds" "use")
5688 + (set_attr "insn" "mvn")
5689 (set_attr "length" "4,8,8")]
5690 )
5691
5692 @@ -10000,7 +10514,7 @@
5693 (match_operand:SI 2 "s_register_operand" "r,r"))
5694 (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
5695 (clobber (reg:CC CC_REGNUM))]
5696 - "TARGET_ARM"
5697 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5698 "#"
5699 [(set_attr "conds" "clob")
5700 (set_attr "length" "8,12")]
5701 @@ -10019,6 +10533,7 @@
5702 mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
5703 mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
5704 [(set_attr "conds" "use")
5705 + (set_attr "insn" "mvn")
5706 (set_attr "length" "4,8,8")]
5707 )
5708
5709 @@ -10033,7 +10548,7 @@
5710 (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
5711 (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
5712 (clobber (reg:CC CC_REGNUM))]
5713 - "TARGET_ARM"
5714 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5715 "#"
5716 [(set_attr "conds" "clob")
5717 (set_attr "length" "8,12")]
5718 @@ -10055,10 +10570,23 @@
5719 mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
5720 [(set_attr "conds" "use")
5721 (set_attr "shift" "2")
5722 - (set_attr "length" "4,8,8")
5723 + (set_attr "insn" "mov")
5724 (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
5725 (const_string "alu_shift")
5726 - (const_string "alu_shift_reg")))]
5727 + (const_string "alu_shift_reg")))
5728 + (set_attr_alternative "length"
5729 + [(if_then_else (and (eq_attr "type" "alu_shift_reg")
5730 + (eq_attr "fix_janus" "yes"))
5731 + (const_int 8)
5732 + (const_int 4))
5733 + (if_then_else (and (eq_attr "type" "alu_shift_reg")
5734 + (eq_attr "fix_janus" "yes"))
5735 + (const_int 12)
5736 + (const_int 8))
5737 + (if_then_else (and (eq_attr "type" "alu_shift_reg")
5738 + (eq_attr "fix_janus" "yes"))
5739 + (const_int 12)
5740 + (const_int 8))])]
5741 )
5742
5743 (define_insn "*ifcompare_move_shift"
5744 @@ -10072,7 +10600,7 @@
5745 [(match_operand:SI 2 "s_register_operand" "r,r")
5746 (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
5747 (clobber (reg:CC CC_REGNUM))]
5748 - "TARGET_ARM"
5749 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5750 "#"
5751 [(set_attr "conds" "clob")
5752 (set_attr "length" "8,12")]
5753 @@ -10094,10 +10622,24 @@
5754 mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
5755 [(set_attr "conds" "use")
5756 (set_attr "shift" "2")
5757 - (set_attr "length" "4,8,8")
5758 + (set_attr "insn" "mov")
5759 (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
5760 (const_string "alu_shift")
5761 - (const_string "alu_shift_reg")))]
5762 + (const_string "alu_shift_reg")))
5763 + (set_attr_alternative "length"
5764 + [(if_then_else (and (eq_attr "type" "alu_shift_reg")
5765 + (eq_attr "fix_janus" "yes"))
5766 + (const_int 8)
5767 + (const_int 4))
5768 + (if_then_else (and (eq_attr "type" "alu_shift_reg")
5769 + (eq_attr "fix_janus" "yes"))
5770 + (const_int 12)
5771 + (const_int 8))
5772 + (if_then_else (and (eq_attr "type" "alu_shift_reg")
5773 + (eq_attr "fix_janus" "yes"))
5774 + (const_int 12)
5775 + (const_int 8))])
5776 + (set_attr "insn" "mov")]
5777 )
5778
5779 (define_insn "*ifcompare_shift_shift"
5780 @@ -10113,7 +10655,7 @@
5781 [(match_operand:SI 3 "s_register_operand" "r")
5782 (match_operand:SI 4 "arm_rhs_operand" "rM")])))
5783 (clobber (reg:CC CC_REGNUM))]
5784 - "TARGET_ARM"
5785 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5786 "#"
5787 [(set_attr "conds" "clob")
5788 (set_attr "length" "12")]
5789 @@ -10134,12 +10676,16 @@
5790 "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
5791 [(set_attr "conds" "use")
5792 (set_attr "shift" "1")
5793 - (set_attr "length" "8")
5794 + (set_attr "insn" "mov")
5795 (set (attr "type") (if_then_else
5796 (and (match_operand 2 "const_int_operand" "")
5797 (match_operand 4 "const_int_operand" ""))
5798 (const_string "alu_shift")
5799 - (const_string "alu_shift_reg")))]
5800 + (const_string "alu_shift_reg")))
5801 + (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg")
5802 + (eq_attr "fix_janus" "yes"))
5803 + (const_int 16)
5804 + (const_int 8)))]
5805 )
5806
5807 (define_insn "*ifcompare_not_arith"
5808 @@ -10153,7 +10699,7 @@
5809 [(match_operand:SI 2 "s_register_operand" "r")
5810 (match_operand:SI 3 "arm_rhs_operand" "rI")])))
5811 (clobber (reg:CC CC_REGNUM))]
5812 - "TARGET_ARM"
5813 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5814 "#"
5815 [(set_attr "conds" "clob")
5816 (set_attr "length" "12")]
5817 @@ -10171,6 +10717,7 @@
5818 "TARGET_ARM"
5819 "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
5820 [(set_attr "conds" "use")
5821 + (set_attr "insn" "mvn")
5822 (set_attr "length" "8")]
5823 )
5824
5825 @@ -10185,7 +10732,7 @@
5826 (match_operand:SI 3 "arm_rhs_operand" "rI")])
5827 (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
5828 (clobber (reg:CC CC_REGNUM))]
5829 - "TARGET_ARM"
5830 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5831 "#"
5832 [(set_attr "conds" "clob")
5833 (set_attr "length" "12")]
5834 @@ -10203,6 +10750,7 @@
5835 "TARGET_ARM"
5836 "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
5837 [(set_attr "conds" "use")
5838 + (set_attr "insn" "mvn")
5839 (set_attr "length" "8")]
5840 )
5841
5842 @@ -10215,7 +10763,7 @@
5843 (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
5844 (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
5845 (clobber (reg:CC CC_REGNUM))]
5846 - "TARGET_ARM"
5847 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5848 "#"
5849 [(set_attr "conds" "clob")
5850 (set_attr "length" "8,12")]
5851 @@ -10246,7 +10794,7 @@
5852 (match_operand:SI 1 "arm_not_operand" "0,?rIK")
5853 (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
5854 (clobber (reg:CC CC_REGNUM))]
5855 - "TARGET_ARM"
5856 + "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC"
5857 "#"
5858 [(set_attr "conds" "clob")
5859 (set_attr "length" "8,12")]
5860 @@ -10614,7 +11162,7 @@
5861 (match_dup 0)
5862 (match_operand 4 "" "")))
5863 (clobber (reg:CC CC_REGNUM))]
5864 - "TARGET_ARM && reload_completed"
5865 + "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC"
5866 [(set (match_dup 5) (match_dup 6))
5867 (cond_exec (match_dup 7)
5868 (set (match_dup 0) (match_dup 4)))]
5869 @@ -10642,7 +11190,7 @@
5870 (match_operand 4 "" "")
5871 (match_dup 0)))
5872 (clobber (reg:CC CC_REGNUM))]
5873 - "TARGET_ARM && reload_completed"
5874 + "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC"
5875 [(set (match_dup 5) (match_dup 6))
5876 (cond_exec (match_op_dup 1 [(match_dup 5) (const_int 0)])
5877 (set (match_dup 0) (match_dup 4)))]
5878 @@ -10663,7 +11211,7 @@
5879 (match_operand 4 "" "")
5880 (match_operand 5 "" "")))
5881 (clobber (reg:CC CC_REGNUM))]
5882 - "TARGET_ARM && reload_completed"
5883 + "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC"
5884 [(set (match_dup 6) (match_dup 7))
5885 (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)])
5886 (set (match_dup 0) (match_dup 4)))
5887 @@ -10695,7 +11243,7 @@
5888 (not:SI
5889 (match_operand:SI 5 "s_register_operand" ""))))
5890 (clobber (reg:CC CC_REGNUM))]
5891 - "TARGET_ARM && reload_completed"
5892 + "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC"
5893 [(set (match_dup 6) (match_dup 7))
5894 (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)])
5895 (set (match_dup 0) (match_dup 4)))
5896 @@ -10730,6 +11278,7 @@
5897 mvn%D4\\t%0, %2
5898 mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
5899 [(set_attr "conds" "use")
5900 + (set_attr "insn" "mvn")
5901 (set_attr "length" "4,8")]
5902 )
5903
5904 @@ -10864,6 +11413,24 @@
5905 "
5906 )
5907
5908 +(define_insn "align_16"
5909 + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN16)]
5910 + "TARGET_EITHER"
5911 + "*
5912 + assemble_align (128);
5913 + return \"\";
5914 + "
5915 +)
5916 +
5917 +(define_insn "align_32"
5918 + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN32)]
5919 + "TARGET_EITHER"
5920 + "*
5921 + assemble_align (256);
5922 + return \"\";
5923 + "
5924 +)
5925 +
5926 (define_insn "consttable_end"
5927 [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
5928 "TARGET_EITHER"
5929 @@ -10890,6 +11457,7 @@
5930 "TARGET_THUMB1"
5931 "*
5932 making_const_table = TRUE;
5933 + gcc_assert (GET_MODE_CLASS (GET_MODE (operands[0])) != MODE_FLOAT);
5934 assemble_integer (operands[0], 2, BITS_PER_WORD, 1);
5935 assemble_zeros (2);
5936 return \"\";
5937 @@ -10902,19 +11470,30 @@
5938 "TARGET_EITHER"
5939 "*
5940 {
5941 + rtx x = operands[0];
5942 making_const_table = TRUE;
5943 - switch (GET_MODE_CLASS (GET_MODE (operands[0])))
5944 + switch (GET_MODE_CLASS (GET_MODE (x)))
5945 {
5946 case MODE_FLOAT:
5947 - {
5948 - REAL_VALUE_TYPE r;
5949 - REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
5950 - assemble_real (r, GET_MODE (operands[0]), BITS_PER_WORD);
5951 - break;
5952 - }
5953 + if (GET_MODE (x) == HFmode)
5954 + arm_emit_fp16_const (x);
5955 + else
5956 + {
5957 + REAL_VALUE_TYPE r;
5958 + REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5959 + assemble_real (r, GET_MODE (x), BITS_PER_WORD);
5960 + }
5961 + break;
5962 default:
5963 - assemble_integer (operands[0], 4, BITS_PER_WORD, 1);
5964 - mark_symbol_refs_as_used (operands[0]);
5965 + /* XXX: Sometimes gcc does something really dumb and ends up with
5966 + a HIGH in a constant pool entry, usually because it's trying to
5967 + load into a VFP register. We know this will always be used in
5968 + combination with a LO_SUM which ignores the high bits, so just
5969 + strip off the HIGH. */
5970 + if (GET_CODE (x) == HIGH)
5971 + x = XEXP (x, 0);
5972 + assemble_integer (x, 4, BITS_PER_WORD, 1);
5973 + mark_symbol_refs_as_used (x);
5974 break;
5975 }
5976 return \"\";
5977 @@ -11008,6 +11587,28 @@
5978 [(set_attr "predicable" "yes")
5979 (set_attr "insn" "clz")])
5980
5981 +(define_insn "rbitsi2"
5982 + [(set (match_operand:SI 0 "s_register_operand" "=r")
5983 + (unspec:SI [(match_operand:SI 1 "s_register_operand" "r")] UNSPEC_RBIT))]
5984 + "TARGET_32BIT && arm_arch_thumb2"
5985 + "rbit%?\\t%0, %1"
5986 + [(set_attr "predicable" "yes")
5987 + (set_attr "insn" "clz")])
5988 +
5989 +(define_expand "ctzsi2"
5990 + [(set (match_operand:SI 0 "s_register_operand" "")
5991 + (ctz:SI (match_operand:SI 1 "s_register_operand" "")))]
5992 + "TARGET_32BIT && arm_arch_thumb2"
5993 + "
5994 + {
5995 + rtx tmp = gen_reg_rtx (SImode);
5996 + emit_insn (gen_rbitsi2 (tmp, operands[1]));
5997 + emit_insn (gen_clzsi2 (operands[0], tmp));
5998 + }
5999 + DONE;
6000 + "
6001 +)
6002 +
6003 ;; V5E instructions.
6004
6005 (define_insn "prefetch"
6006 @@ -11017,13 +11618,15 @@
6007 "TARGET_32BIT && arm_arch5e"
6008 "pld\\t%a0")
6009
6010 -;; General predication pattern
6011 +;; General predication pattern.
6012 +;; Conditional branches are available as both arm_cond_branch and
6013 +;; predicated arm_jump, so it doesn't matter if we disable the latter.
6014
6015 (define_cond_exec
6016 [(match_operator 0 "arm_comparison_operator"
6017 [(match_operand 1 "cc_register" "")
6018 (const_int 0)])]
6019 - "TARGET_32BIT"
6020 + "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC"
6021 ""
6022 )
6023
6024 --- a/gcc/config/arm/arm-modes.def
6025 +++ b/gcc/config/arm/arm-modes.def
6026 @@ -25,6 +25,11 @@
6027 FIXME What format is this? */
6028 FLOAT_MODE (XF, 12, 0);
6029
6030 +/* Half-precision floating point */
6031 +FLOAT_MODE (HF, 2, 0);
6032 +ADJUST_FLOAT_FORMAT (HF, ((arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
6033 + ? &arm_half_format : &ieee_half_format));
6034 +
6035 /* CCFPEmode should be used with floating inequalities,
6036 CCFPmode should be used with floating equalities.
6037 CC_NOOVmode should be used with SImode integer equalities.
6038 @@ -62,6 +67,4 @@
6039 INT_MODE (EI, 24);
6040 INT_MODE (OI, 32);
6041 INT_MODE (CI, 48);
6042 -/* ??? This should actually have 512 bits but the precision only has 9
6043 - bits. */
6044 -FRACTIONAL_INT_MODE (XI, 511, 64);
6045 +INT_MODE (XI, 64);
6046 --- a/gcc/config/arm/arm_neon.h
6047 +++ b/gcc/config/arm/arm_neon.h
6048 @@ -61,7 +61,7 @@
6049 typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
6050 typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
6051
6052 -typedef __builtin_neon_sf float32_t;
6053 +typedef float float32_t;
6054 typedef __builtin_neon_poly8 poly8_t;
6055 typedef __builtin_neon_poly16 poly16_t;
6056
6057 @@ -5085,7 +5085,7 @@
6058 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6059 vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
6060 {
6061 - return (float32x2_t)__builtin_neon_vset_lanev2sf (__a, __b, __c);
6062 + return (float32x2_t)__builtin_neon_vset_lanev2sf ((__builtin_neon_sf) __a, __b, __c);
6063 }
6064
6065 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6066 @@ -5151,7 +5151,7 @@
6067 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6068 vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c)
6069 {
6070 - return (float32x4_t)__builtin_neon_vset_lanev4sf (__a, __b, __c);
6071 + return (float32x4_t)__builtin_neon_vset_lanev4sf ((__builtin_neon_sf) __a, __b, __c);
6072 }
6073
6074 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6075 @@ -5283,7 +5283,7 @@
6076 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6077 vdup_n_f32 (float32_t __a)
6078 {
6079 - return (float32x2_t)__builtin_neon_vdup_nv2sf (__a);
6080 + return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
6081 }
6082
6083 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6084 @@ -5349,7 +5349,7 @@
6085 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6086 vdupq_n_f32 (float32_t __a)
6087 {
6088 - return (float32x4_t)__builtin_neon_vdup_nv4sf (__a);
6089 + return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
6090 }
6091
6092 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6093 @@ -5415,7 +5415,7 @@
6094 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6095 vmov_n_f32 (float32_t __a)
6096 {
6097 - return (float32x2_t)__builtin_neon_vdup_nv2sf (__a);
6098 + return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
6099 }
6100
6101 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6102 @@ -5481,7 +5481,7 @@
6103 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6104 vmovq_n_f32 (float32_t __a)
6105 {
6106 - return (float32x4_t)__builtin_neon_vdup_nv4sf (__a);
6107 + return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
6108 }
6109
6110 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6111 @@ -6591,7 +6591,7 @@
6112 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6113 vmul_n_f32 (float32x2_t __a, float32_t __b)
6114 {
6115 - return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, __b, 3);
6116 + return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b, 3);
6117 }
6118
6119 __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
6120 @@ -6621,7 +6621,7 @@
6121 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6122 vmulq_n_f32 (float32x4_t __a, float32_t __b)
6123 {
6124 - return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, __b, 3);
6125 + return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b, 3);
6126 }
6127
6128 __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
6129 @@ -6735,7 +6735,7 @@
6130 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6131 vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
6132 {
6133 - return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, __c, 3);
6134 + return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
6135 }
6136
6137 __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
6138 @@ -6765,7 +6765,7 @@
6139 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6140 vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
6141 {
6142 - return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, __c, 3);
6143 + return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
6144 }
6145
6146 __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
6147 @@ -6831,7 +6831,7 @@
6148 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6149 vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
6150 {
6151 - return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, __c, 3);
6152 + return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
6153 }
6154
6155 __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
6156 @@ -6861,7 +6861,7 @@
6157 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6158 vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
6159 {
6160 - return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, __c, 3);
6161 + return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
6162 }
6163
6164 __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
6165 @@ -7851,7 +7851,7 @@
6166 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6167 vld1_f32 (const float32_t * __a)
6168 {
6169 - return (float32x2_t)__builtin_neon_vld1v2sf (__a);
6170 + return (float32x2_t)__builtin_neon_vld1v2sf ((const __builtin_neon_sf *) __a);
6171 }
6172
6173 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6174 @@ -7917,7 +7917,7 @@
6175 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6176 vld1q_f32 (const float32_t * __a)
6177 {
6178 - return (float32x4_t)__builtin_neon_vld1v4sf (__a);
6179 + return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a);
6180 }
6181
6182 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6183 @@ -7977,7 +7977,7 @@
6184 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6185 vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
6186 {
6187 - return (float32x2_t)__builtin_neon_vld1_lanev2sf (__a, __b, __c);
6188 + return (float32x2_t)__builtin_neon_vld1_lanev2sf ((const __builtin_neon_sf *) __a, __b, __c);
6189 }
6190
6191 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6192 @@ -8043,7 +8043,7 @@
6193 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6194 vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c)
6195 {
6196 - return (float32x4_t)__builtin_neon_vld1_lanev4sf (__a, __b, __c);
6197 + return (float32x4_t)__builtin_neon_vld1_lanev4sf ((const __builtin_neon_sf *) __a, __b, __c);
6198 }
6199
6200 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6201 @@ -8109,7 +8109,7 @@
6202 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
6203 vld1_dup_f32 (const float32_t * __a)
6204 {
6205 - return (float32x2_t)__builtin_neon_vld1_dupv2sf (__a);
6206 + return (float32x2_t)__builtin_neon_vld1_dupv2sf ((const __builtin_neon_sf *) __a);
6207 }
6208
6209 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
6210 @@ -8175,7 +8175,7 @@
6211 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
6212 vld1q_dup_f32 (const float32_t * __a)
6213 {
6214 - return (float32x4_t)__builtin_neon_vld1_dupv4sf (__a);
6215 + return (float32x4_t)__builtin_neon_vld1_dupv4sf ((const __builtin_neon_sf *) __a);
6216 }
6217
6218 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
6219 @@ -8247,7 +8247,7 @@
6220 __extension__ static __inline void __attribute__ ((__always_inline__))
6221 vst1_f32 (float32_t * __a, float32x2_t __b)
6222 {
6223 - __builtin_neon_vst1v2sf (__a, __b);
6224 + __builtin_neon_vst1v2sf ((__builtin_neon_sf *) __a, __b);
6225 }
6226
6227 __extension__ static __inline void __attribute__ ((__always_inline__))
6228 @@ -8313,7 +8313,7 @@
6229 __extension__ static __inline void __attribute__ ((__always_inline__))
6230 vst1q_f32 (float32_t * __a, float32x4_t __b)
6231 {
6232 - __builtin_neon_vst1v4sf (__a, __b);
6233 + __builtin_neon_vst1v4sf ((__builtin_neon_sf *) __a, __b);
6234 }
6235
6236 __extension__ static __inline void __attribute__ ((__always_inline__))
6237 @@ -8373,7 +8373,7 @@
6238 __extension__ static __inline void __attribute__ ((__always_inline__))
6239 vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c)
6240 {
6241 - __builtin_neon_vst1_lanev2sf (__a, __b, __c);
6242 + __builtin_neon_vst1_lanev2sf ((__builtin_neon_sf *) __a, __b, __c);
6243 }
6244
6245 __extension__ static __inline void __attribute__ ((__always_inline__))
6246 @@ -8439,7 +8439,7 @@
6247 __extension__ static __inline void __attribute__ ((__always_inline__))
6248 vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c)
6249 {
6250 - __builtin_neon_vst1_lanev4sf (__a, __b, __c);
6251 + __builtin_neon_vst1_lanev4sf ((__builtin_neon_sf *) __a, __b, __c);
6252 }
6253
6254 __extension__ static __inline void __attribute__ ((__always_inline__))
6255 @@ -8512,7 +8512,7 @@
6256 vld2_f32 (const float32_t * __a)
6257 {
6258 union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
6259 - __rv.__o = __builtin_neon_vld2v2sf (__a);
6260 + __rv.__o = __builtin_neon_vld2v2sf ((const __builtin_neon_sf *) __a);
6261 return __rv.__i;
6262 }
6263
6264 @@ -8600,7 +8600,7 @@
6265 vld2q_f32 (const float32_t * __a)
6266 {
6267 union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
6268 - __rv.__o = __builtin_neon_vld2v4sf (__a);
6269 + __rv.__o = __builtin_neon_vld2v4sf ((const __builtin_neon_sf *) __a);
6270 return __rv.__i;
6271 }
6272
6273 @@ -8676,7 +8676,7 @@
6274 {
6275 union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
6276 union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
6277 - __rv.__o = __builtin_neon_vld2_lanev2sf (__a, __bu.__o, __c);
6278 + __rv.__o = __builtin_neon_vld2_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6279 return __rv.__i;
6280 }
6281
6282 @@ -8748,7 +8748,7 @@
6283 {
6284 union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
6285 union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
6286 - __rv.__o = __builtin_neon_vld2_lanev4sf (__a, __bu.__o, __c);
6287 + __rv.__o = __builtin_neon_vld2_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6288 return __rv.__i;
6289 }
6290
6291 @@ -8807,7 +8807,7 @@
6292 vld2_dup_f32 (const float32_t * __a)
6293 {
6294 union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
6295 - __rv.__o = __builtin_neon_vld2_dupv2sf (__a);
6296 + __rv.__o = __builtin_neon_vld2_dupv2sf ((const __builtin_neon_sf *) __a);
6297 return __rv.__i;
6298 }
6299
6300 @@ -8892,7 +8892,7 @@
6301 vst2_f32 (float32_t * __a, float32x2x2_t __b)
6302 {
6303 union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
6304 - __builtin_neon_vst2v2sf (__a, __bu.__o);
6305 + __builtin_neon_vst2v2sf ((__builtin_neon_sf *) __a, __bu.__o);
6306 }
6307
6308 __extension__ static __inline void __attribute__ ((__always_inline__))
6309 @@ -8969,7 +8969,7 @@
6310 vst2q_f32 (float32_t * __a, float32x4x2_t __b)
6311 {
6312 union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
6313 - __builtin_neon_vst2v4sf (__a, __bu.__o);
6314 + __builtin_neon_vst2v4sf ((__builtin_neon_sf *) __a, __bu.__o);
6315 }
6316
6317 __extension__ static __inline void __attribute__ ((__always_inline__))
6318 @@ -9032,7 +9032,7 @@
6319 vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c)
6320 {
6321 union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
6322 - __builtin_neon_vst2_lanev2sf (__a, __bu.__o, __c);
6323 + __builtin_neon_vst2_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6324 }
6325
6326 __extension__ static __inline void __attribute__ ((__always_inline__))
6327 @@ -9088,7 +9088,7 @@
6328 vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c)
6329 {
6330 union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
6331 - __builtin_neon_vst2_lanev4sf (__a, __bu.__o, __c);
6332 + __builtin_neon_vst2_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6333 }
6334
6335 __extension__ static __inline void __attribute__ ((__always_inline__))
6336 @@ -9140,7 +9140,7 @@
6337 vld3_f32 (const float32_t * __a)
6338 {
6339 union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
6340 - __rv.__o = __builtin_neon_vld3v2sf (__a);
6341 + __rv.__o = __builtin_neon_vld3v2sf ((const __builtin_neon_sf *) __a);
6342 return __rv.__i;
6343 }
6344
6345 @@ -9228,7 +9228,7 @@
6346 vld3q_f32 (const float32_t * __a)
6347 {
6348 union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
6349 - __rv.__o = __builtin_neon_vld3v4sf (__a);
6350 + __rv.__o = __builtin_neon_vld3v4sf ((const __builtin_neon_sf *) __a);
6351 return __rv.__i;
6352 }
6353
6354 @@ -9304,7 +9304,7 @@
6355 {
6356 union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
6357 union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
6358 - __rv.__o = __builtin_neon_vld3_lanev2sf (__a, __bu.__o, __c);
6359 + __rv.__o = __builtin_neon_vld3_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6360 return __rv.__i;
6361 }
6362
6363 @@ -9376,7 +9376,7 @@
6364 {
6365 union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
6366 union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
6367 - __rv.__o = __builtin_neon_vld3_lanev4sf (__a, __bu.__o, __c);
6368 + __rv.__o = __builtin_neon_vld3_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6369 return __rv.__i;
6370 }
6371
6372 @@ -9435,7 +9435,7 @@
6373 vld3_dup_f32 (const float32_t * __a)
6374 {
6375 union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
6376 - __rv.__o = __builtin_neon_vld3_dupv2sf (__a);
6377 + __rv.__o = __builtin_neon_vld3_dupv2sf ((const __builtin_neon_sf *) __a);
6378 return __rv.__i;
6379 }
6380
6381 @@ -9520,7 +9520,7 @@
6382 vst3_f32 (float32_t * __a, float32x2x3_t __b)
6383 {
6384 union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
6385 - __builtin_neon_vst3v2sf (__a, __bu.__o);
6386 + __builtin_neon_vst3v2sf ((__builtin_neon_sf *) __a, __bu.__o);
6387 }
6388
6389 __extension__ static __inline void __attribute__ ((__always_inline__))
6390 @@ -9597,7 +9597,7 @@
6391 vst3q_f32 (float32_t * __a, float32x4x3_t __b)
6392 {
6393 union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
6394 - __builtin_neon_vst3v4sf (__a, __bu.__o);
6395 + __builtin_neon_vst3v4sf ((__builtin_neon_sf *) __a, __bu.__o);
6396 }
6397
6398 __extension__ static __inline void __attribute__ ((__always_inline__))
6399 @@ -9660,7 +9660,7 @@
6400 vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c)
6401 {
6402 union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
6403 - __builtin_neon_vst3_lanev2sf (__a, __bu.__o, __c);
6404 + __builtin_neon_vst3_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6405 }
6406
6407 __extension__ static __inline void __attribute__ ((__always_inline__))
6408 @@ -9716,7 +9716,7 @@
6409 vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c)
6410 {
6411 union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
6412 - __builtin_neon_vst3_lanev4sf (__a, __bu.__o, __c);
6413 + __builtin_neon_vst3_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6414 }
6415
6416 __extension__ static __inline void __attribute__ ((__always_inline__))
6417 @@ -9768,7 +9768,7 @@
6418 vld4_f32 (const float32_t * __a)
6419 {
6420 union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
6421 - __rv.__o = __builtin_neon_vld4v2sf (__a);
6422 + __rv.__o = __builtin_neon_vld4v2sf ((const __builtin_neon_sf *) __a);
6423 return __rv.__i;
6424 }
6425
6426 @@ -9856,7 +9856,7 @@
6427 vld4q_f32 (const float32_t * __a)
6428 {
6429 union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
6430 - __rv.__o = __builtin_neon_vld4v4sf (__a);
6431 + __rv.__o = __builtin_neon_vld4v4sf ((const __builtin_neon_sf *) __a);
6432 return __rv.__i;
6433 }
6434
6435 @@ -9932,7 +9932,7 @@
6436 {
6437 union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
6438 union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
6439 - __rv.__o = __builtin_neon_vld4_lanev2sf (__a, __bu.__o, __c);
6440 + __rv.__o = __builtin_neon_vld4_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6441 return __rv.__i;
6442 }
6443
6444 @@ -10004,7 +10004,7 @@
6445 {
6446 union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
6447 union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
6448 - __rv.__o = __builtin_neon_vld4_lanev4sf (__a, __bu.__o, __c);
6449 + __rv.__o = __builtin_neon_vld4_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
6450 return __rv.__i;
6451 }
6452
6453 @@ -10063,7 +10063,7 @@
6454 vld4_dup_f32 (const float32_t * __a)
6455 {
6456 union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
6457 - __rv.__o = __builtin_neon_vld4_dupv2sf (__a);
6458 + __rv.__o = __builtin_neon_vld4_dupv2sf ((const __builtin_neon_sf *) __a);
6459 return __rv.__i;
6460 }
6461
6462 @@ -10148,7 +10148,7 @@
6463 vst4_f32 (float32_t * __a, float32x2x4_t __b)
6464 {
6465 union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
6466 - __builtin_neon_vst4v2sf (__a, __bu.__o);
6467 + __builtin_neon_vst4v2sf ((__builtin_neon_sf *) __a, __bu.__o);
6468 }
6469
6470 __extension__ static __inline void __attribute__ ((__always_inline__))
6471 @@ -10225,7 +10225,7 @@
6472 vst4q_f32 (float32_t * __a, float32x4x4_t __b)
6473 {
6474 union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
6475 - __builtin_neon_vst4v4sf (__a, __bu.__o);
6476 + __builtin_neon_vst4v4sf ((__builtin_neon_sf *) __a, __bu.__o);
6477 }
6478
6479 __extension__ static __inline void __attribute__ ((__always_inline__))
6480 @@ -10288,7 +10288,7 @@
6481 vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c)
6482 {
6483 union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
6484 - __builtin_neon_vst4_lanev2sf (__a, __bu.__o, __c);
6485 + __builtin_neon_vst4_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6486 }
6487
6488 __extension__ static __inline void __attribute__ ((__always_inline__))
6489 @@ -10344,7 +10344,7 @@
6490 vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c)
6491 {
6492 union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
6493 - __builtin_neon_vst4_lanev4sf (__a, __bu.__o, __c);
6494 + __builtin_neon_vst4_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
6495 }
6496
6497 __extension__ static __inline void __attribute__ ((__always_inline__))
6498 --- a/gcc/config/arm/arm.opt
6499 +++ b/gcc/config/arm/arm.opt
6500 @@ -78,6 +78,10 @@
6501 mfp=
6502 Target RejectNegative Joined Undocumented Var(target_fpe_name)
6503
6504 +mfp16-format=
6505 +Target RejectNegative Joined Var(target_fp16_format_name)
6506 +Specify the __fp16 floating-point format
6507 +
6508 ;; Now ignored.
6509 mfpe
6510 Target RejectNegative Mask(FPE) Undocumented
6511 @@ -93,6 +97,10 @@
6512 Target RejectNegative
6513 Alias for -mfloat-abi=hard
6514
6515 +mfix-janus-2cc
6516 +Target Report Mask(FIX_JANUS)
6517 +Work around hardware errata for Avalent Janus 2CC cores.
6518 +
6519 mlittle-endian
6520 Target Report RejectNegative InverseMask(BIG_END)
6521 Assume target CPU is configured as little endian
6522 @@ -101,6 +109,10 @@
6523 Target Report Mask(LONG_CALLS)
6524 Generate call insns as indirect calls, if necessary
6525
6526 +mmarvell-div
6527 +Target Report Mask(MARVELL_DIV)
6528 +Generate hardware integer division instructions supported by some Marvell cores.
6529 +
6530 mpic-register=
6531 Target RejectNegative Joined Var(arm_pic_register_string)
6532 Specify the register to be used for PIC addressing
6533 @@ -157,6 +169,10 @@
6534 Target Report Mask(NEON_VECTORIZE_QUAD)
6535 Use Neon quad-word (rather than double-word) registers for vectorization
6536
6537 +mlow-irq-latency
6538 +Target Report Var(low_irq_latency)
6539 +Try to reduce interrupt latency of the generated code
6540 +
6541 mword-relocations
6542 Target Report Var(target_word_relocations) Init(TARGET_DEFAULT_WORD_RELOCATIONS)
6543 Only generate absolute relocations on word sized values.
6544 --- a/gcc/config/arm/arm-protos.h
6545 +++ b/gcc/config/arm/arm-protos.h
6546 @@ -88,7 +88,7 @@
6547
6548 extern int cirrus_memory_offset (rtx);
6549 extern int arm_coproc_mem_operand (rtx, bool);
6550 -extern int neon_vector_mem_operand (rtx, bool);
6551 +extern int neon_vector_mem_operand (rtx, int);
6552 extern int neon_struct_mem_operand (rtx);
6553 extern int arm_no_early_store_addr_dep (rtx, rtx);
6554 extern int arm_no_early_alu_shift_dep (rtx, rtx);
6555 @@ -144,6 +144,7 @@
6556 extern int arm_debugger_arg_offset (int, rtx);
6557 extern bool arm_is_long_call_p (tree);
6558 extern int arm_emit_vector_const (FILE *, rtx);
6559 +extern void arm_emit_fp16_const (rtx c);
6560 extern const char * arm_output_load_gr (rtx *);
6561 extern const char *vfp_output_fstmd (rtx *);
6562 extern void arm_set_return_address (rtx, rtx);
6563 @@ -154,13 +155,15 @@
6564
6565 #if defined TREE_CODE
6566 extern rtx arm_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
6567 +extern void arm_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
6568 + tree, bool);
6569 extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
6570 extern bool arm_pad_arg_upward (enum machine_mode, const_tree);
6571 extern bool arm_pad_reg_upward (enum machine_mode, tree, int);
6572 extern bool arm_needs_doubleword_align (enum machine_mode, tree);
6573 -extern rtx arm_function_value(const_tree, const_tree);
6574 #endif
6575 extern int arm_apply_result_size (void);
6576 +extern rtx aapcs_libcall_value (enum machine_mode);
6577
6578 #endif /* RTX_CODE */
6579
6580 --- a/gcc/config/arm/arm-tune.md
6581 +++ b/gcc/config/arm/arm-tune.md
6582 @@ -1,5 +1,5 @@
6583 ;; -*- buffer-read-only: t -*-
6584 ;; Generated automatically by gentune.sh from arm-cores.def
6585 (define_attr "tune"
6586 - "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexa9,cortexr4,cortexr4f,cortexm3,cortexm1"
6587 + "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,marvell_f,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa5,cortexa8,cortexa9,cortexr4,cortexr4f,cortexm3,cortexm1,cortexm0"
6588 (const (symbol_ref "arm_tune")))
6589 --- a/gcc/config/arm/bpabi.h
6590 +++ b/gcc/config/arm/bpabi.h
6591 @@ -30,7 +30,7 @@
6592
6593 /* Section 4.1 of the AAPCS requires the use of VFP format. */
6594 #undef FPUTYPE_DEFAULT
6595 -#define FPUTYPE_DEFAULT FPUTYPE_VFP
6596 +#define FPUTYPE_DEFAULT "vfp"
6597
6598 /* TARGET_BIG_ENDIAN_DEFAULT is set in
6599 config.gcc for big endian configurations. */
6600 @@ -53,6 +53,8 @@
6601
6602 #define TARGET_FIX_V4BX_SPEC " %{mcpu=arm8|mcpu=arm810|mcpu=strongarm*|march=armv4:--fix-v4bx}"
6603
6604 +#define BE8_LINK_SPEC " %{mbig-endian:%{march=armv7-a|mcpu=cortex-a5|mcpu=cortex-a8|mcpu=cortex-a9:%{!r:--be8}}}"
6605 +
6606 /* Tell the assembler to build BPABI binaries. */
6607 #undef SUBTARGET_EXTRA_ASM_SPEC
6608 #define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=5}" TARGET_FIX_V4BX_SPEC
6609 @@ -65,7 +67,7 @@
6610 #define BPABI_LINK_SPEC \
6611 "%{mbig-endian:-EB} %{mlittle-endian:-EL} " \
6612 "%{static:-Bstatic} %{shared:-shared} %{symbolic:-Bsymbolic} " \
6613 - "-X" SUBTARGET_EXTRA_LINK_SPEC TARGET_FIX_V4BX_SPEC
6614 + "-X" SUBTARGET_EXTRA_LINK_SPEC TARGET_FIX_V4BX_SPEC BE8_LINK_SPEC
6615
6616 #undef LINK_SPEC
6617 #define LINK_SPEC BPABI_LINK_SPEC
6618 @@ -90,16 +92,22 @@
6619 #define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, lmul)
6620 #endif
6621 #ifdef L_fixdfdi
6622 -#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, d2lz)
6623 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, d2lz) \
6624 + extern DWtype __fixdfdi (DFtype) __attribute__((pcs("aapcs"))); \
6625 + extern UDWtype __fixunsdfdi (DFtype) __asm__("__aeabi_d2ulz") __attribute__((pcs("aapcs")));
6626 #endif
6627 #ifdef L_fixunsdfdi
6628 -#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, d2ulz)
6629 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, d2ulz) \
6630 + extern UDWtype __fixunsdfdi (DFtype) __attribute__((pcs("aapcs")));
6631 #endif
6632 #ifdef L_fixsfdi
6633 -#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f2lz)
6634 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f2lz) \
6635 + extern DWtype __fixsfdi (SFtype) __attribute__((pcs("aapcs"))); \
6636 + extern UDWtype __fixunssfdi (SFtype) __asm__("__aeabi_f2ulz") __attribute__((pcs("aapcs")));
6637 #endif
6638 #ifdef L_fixunssfdi
6639 -#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f2ulz)
6640 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f2ulz) \
6641 + extern UDWtype __fixunssfdi (SFtype) __attribute__((pcs("aapcs")));
6642 #endif
6643 #ifdef L_floatdidf
6644 #define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, l2d)
6645 --- a/gcc/config/arm/bpabi.S
6646 +++ b/gcc/config/arm/bpabi.S
6647 @@ -64,20 +64,69 @@
6648
6649 #endif /* L_aeabi_ulcmp */
6650
6651 +.macro test_div_by_zero signed
6652 +/* Tail-call to divide-by-zero handlers which may be overridden by the user,
6653 + so unwinding works properly. */
6654 +#if defined(__thumb2__)
6655 + cbnz yyh, 1f
6656 + cbnz yyl, 1f
6657 + cmp xxh, #0
6658 + do_it eq
6659 + cmpeq xxl, #0
6660 + .ifc \signed, unsigned
6661 + beq 2f
6662 + mov xxh, #0xffffffff
6663 + mov xxl, xxh
6664 +2:
6665 + .else
6666 + do_it lt, t
6667 + movlt xxl, #0
6668 + movlt xxh, #0x80000000
6669 + do_it gt, t
6670 + movgt xxh, #0x7fffffff
6671 + movgt xxl, #0xffffffff
6672 + .endif
6673 + b SYM (__aeabi_ldiv0) __PLT__
6674 +1:
6675 +#else
6676 + /* Note: Thumb-1 code calls via an ARM shim on processors which
6677 + support ARM mode. */
6678 + cmp yyh, #0
6679 + cmpeq yyl, #0
6680 + bne 2f
6681 + cmp xxh, #0
6682 + cmpeq xxl, #0
6683 + .ifc \signed, unsigned
6684 + movne xxh, #0xffffffff
6685 + movne xxl, #0xffffffff
6686 + .else
6687 + movlt xxh, #0x80000000
6688 + movlt xxl, #0
6689 + movgt xxh, #0x7fffffff
6690 + movgt xxl, #0xffffffff
6691 + .endif
6692 + b SYM (__aeabi_ldiv0) __PLT__
6693 +2:
6694 +#endif
6695 +.endm
6696 +
6697 #ifdef L_aeabi_ldivmod
6698
6699 ARM_FUNC_START aeabi_ldivmod
6700 + test_div_by_zero signed
6701 +
6702 sub sp, sp, #8
6703 -#if defined(__thumb2__)
6704 +/* Low latency and Thumb-2 do_push implementations can't push sp directly. */
6705 +#if defined(__thumb2__) || defined(__irq_low_latency__)
6706 mov ip, sp
6707 - push {ip, lr}
6708 + do_push (ip, lr)
6709 #else
6710 - do_push {sp, lr}
6711 + stmfd sp!, {sp, lr}
6712 #endif
6713 bl SYM(__gnu_ldivmod_helper) __PLT__
6714 ldr lr, [sp, #4]
6715 add sp, sp, #8
6716 - do_pop {r2, r3}
6717 + do_pop (r2, r3)
6718 RET
6719
6720 #endif /* L_aeabi_ldivmod */
6721 @@ -85,17 +134,20 @@
6722 #ifdef L_aeabi_uldivmod
6723
6724 ARM_FUNC_START aeabi_uldivmod
6725 + test_div_by_zero unsigned
6726 +
6727 sub sp, sp, #8
6728 -#if defined(__thumb2__)
6729 +/* Low latency and Thumb-2 do_push implementations can't push sp directly. */
6730 +#if defined(__thumb2__) || defined(__irq_low_latency__)
6731 mov ip, sp
6732 - push {ip, lr}
6733 + do_push (ip, lr)
6734 #else
6735 - do_push {sp, lr}
6736 + stmfd sp!, {sp, lr}
6737 #endif
6738 bl SYM(__gnu_uldivmod_helper) __PLT__
6739 ldr lr, [sp, #4]
6740 add sp, sp, #8
6741 - do_pop {r2, r3}
6742 + do_pop (r2, r3)
6743 RET
6744
6745 #endif /* L_aeabi_divmod */
6746 --- a/gcc/config/arm/bpabi-v6m.S
6747 +++ b/gcc/config/arm/bpabi-v6m.S
6748 @@ -69,9 +69,52 @@
6749
6750 #endif /* L_aeabi_ulcmp */
6751
6752 +.macro test_div_by_zero signed
6753 + cmp yyh, #0
6754 + bne 7f
6755 + cmp yyl, #0
6756 + bne 7f
6757 + cmp xxh, #0
6758 + bne 2f
6759 + cmp xxl, #0
6760 +2:
6761 + .ifc \signed, unsigned
6762 + beq 3f
6763 + mov xxh, #0
6764 + mvn xxh, xxh @ 0xffffffff
6765 + mov xxl, xxh
6766 +3:
6767 + .else
6768 + beq 5f
6769 + blt 6f
6770 + mov xxl, #0
6771 + mvn xxl, xxl @ 0xffffffff
6772 + lsr xxh, xxl, #1 @ 0x7fffffff
6773 + b 5f
6774 +6: mov xxh, #0x80
6775 + lsl xxh, xxh, #24 @ 0x80000000
6776 + mov xxl, #0
6777 +5:
6778 + .endif
6779 + @ tailcalls are tricky on v6-m.
6780 + push {r0, r1, r2}
6781 + ldr r0, 1f
6782 + adr r1, 1f
6783 + add r0, r1
6784 + str r0, [sp, #8]
6785 + @ We know we are not on armv4t, so pop pc is safe.
6786 + pop {r0, r1, pc}
6787 + .align 2
6788 +1:
6789 + .word __aeabi_ldiv0 - 1b
6790 +7:
6791 +.endm
6792 +
6793 #ifdef L_aeabi_ldivmod
6794
6795 FUNC_START aeabi_ldivmod
6796 + test_div_by_zero signed
6797 +
6798 push {r0, r1}
6799 mov r0, sp
6800 push {r0, lr}
6801 @@ -89,6 +132,8 @@
6802 #ifdef L_aeabi_uldivmod
6803
6804 FUNC_START aeabi_uldivmod
6805 + test_div_by_zero unsigned
6806 +
6807 push {r0, r1}
6808 mov r0, sp
6809 push {r0, lr}
6810 --- a/gcc/config/arm/constraints.md
6811 +++ b/gcc/config/arm/constraints.md
6812 @@ -25,14 +25,15 @@
6813 ;; In ARM state, 'l' is an alias for 'r'
6814
6815 ;; The following normal constraints have been used:
6816 -;; in ARM/Thumb-2 state: G, H, I, J, K, L, M
6817 +;; in ARM/Thumb-2 state: G, H, I, j, J, K, L, M
6818 ;; in Thumb-1 state: I, J, K, L, M, N, O
6819
6820 ;; The following multi-letter normal constraints have been used:
6821 -;; in ARM/Thumb-2 state: Da, Db, Dc, Dn, Dl, DL, Dv
6822 +;; in ARM/Thumb-2 state: Da, Db, Dc, Dn, Dl, DL, Dv, Dy
6823 +;; in Thumb-1 state: Pa, Pb
6824
6825 ;; The following memory constraints have been used:
6826 -;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Us
6827 +;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Um, Us
6828 ;; in ARM state: Uq
6829
6830
6831 @@ -65,6 +66,13 @@
6832 (define_register_constraint "h" "TARGET_THUMB ? HI_REGS : NO_REGS"
6833 "In Thumb state the core registers @code{r8}-@code{r15}.")
6834
6835 +(define_constraint "j"
6836 + "A constant suitable for a MOVW instruction. (ARM/Thumb-2)"
6837 + (and (match_test "TARGET_32BIT && arm_arch_thumb2")
6838 + (ior (match_code "high")
6839 + (and (match_code "const_int")
6840 + (match_test "(ival & 0xffff0000) == 0")))))
6841 +
6842 (define_register_constraint "k" "STACK_REG"
6843 "@internal The stack register.")
6844
6845 @@ -116,11 +124,9 @@
6846 : ((ival >= 0 && ival <= 1020) && ((ival & 3) == 0))")))
6847
6848 (define_constraint "N"
6849 - "In ARM/Thumb-2 state a constant suitable for a MOVW instruction.
6850 - In Thumb-1 state a constant in the range 0-31."
6851 + "Thumb-1 state a constant in the range 0-31."
6852 (and (match_code "const_int")
6853 - (match_test "TARGET_32BIT ? arm_arch_thumb2 && ((ival & 0xffff0000) == 0)
6854 - : (ival >= 0 && ival <= 31)")))
6855 + (match_test "!TARGET_32BIT && (ival >= 0 && ival <= 31)")))
6856
6857 (define_constraint "O"
6858 "In Thumb-1 state a constant that is a multiple of 4 in the range
6859 @@ -129,6 +135,18 @@
6860 (match_test "TARGET_THUMB1 && ival >= -508 && ival <= 508
6861 && ((ival & 3) == 0)")))
6862
6863 +(define_constraint "Pa"
6864 + "@internal In Thumb-1 state a constant in the range -510 to +510"
6865 + (and (match_code "const_int")
6866 + (match_test "TARGET_THUMB1 && ival >= -510 && ival <= 510
6867 + && (ival > 255 || ival < -255)")))
6868 +
6869 +(define_constraint "Pb"
6870 + "@internal In Thumb-1 state a constant in the range -262 to +262"
6871 + (and (match_code "const_int")
6872 + (match_test "TARGET_THUMB1 && ival >= -262 && ival <= 262
6873 + && (ival > 255 || ival < -255)")))
6874 +
6875 (define_constraint "G"
6876 "In ARM/Thumb-2 state a valid FPA immediate constant."
6877 (and (match_code "const_double")
6878 @@ -189,10 +207,17 @@
6879 (define_constraint "Dv"
6880 "@internal
6881 In ARM/Thumb-2 state a const_double which can be used with a VFP fconsts
6882 - or fconstd instruction."
6883 + instruction."
6884 (and (match_code "const_double")
6885 (match_test "TARGET_32BIT && vfp3_const_double_rtx (op)")))
6886
6887 +(define_constraint "Dy"
6888 + "@internal
6889 + In ARM/Thumb-2 state a const_double which can be used with a VFP fconstd
6890 + instruction."
6891 + (and (match_code "const_double")
6892 + (match_test "TARGET_32BIT && TARGET_VFP_DOUBLE && vfp3_const_double_rtx (op)")))
6893 +
6894 (define_memory_constraint "Ut"
6895 "@internal
6896 In ARM/Thumb-2 state an address valid for loading/storing opaque structure
6897 @@ -214,17 +239,24 @@
6898
6899 (define_memory_constraint "Un"
6900 "@internal
6901 + In ARM/Thumb-2 state a valid address for Neon doubleword vector
6902 + load/store instructions."
6903 + (and (match_code "mem")
6904 + (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 0)")))
6905 +
6906 +(define_memory_constraint "Um"
6907 + "@internal
6908 In ARM/Thumb-2 state a valid address for Neon element and structure
6909 load/store instructions."
6910 (and (match_code "mem")
6911 - (match_test "TARGET_32BIT && neon_vector_mem_operand (op, FALSE)")))
6912 + (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 2)")))
6913
6914 (define_memory_constraint "Us"
6915 "@internal
6916 In ARM/Thumb-2 state a valid address for non-offset loads/stores of
6917 quad-word values in four ARM registers."
6918 (and (match_code "mem")
6919 - (match_test "TARGET_32BIT && neon_vector_mem_operand (op, TRUE)")))
6920 + (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 1)")))
6921
6922 (define_memory_constraint "Uq"
6923 "@internal
6924 --- /dev/null
6925 +++ b/gcc/config/arm/fp16.c
6926 @@ -0,0 +1,145 @@
6927 +/* Half-float conversion routines.
6928 +
6929 + Copyright (C) 2008, 2009 Free Software Foundation, Inc.
6930 + Contributed by CodeSourcery.
6931 +
6932 + This file is free software; you can redistribute it and/or modify it
6933 + under the terms of the GNU General Public License as published by the
6934 + Free Software Foundation; either version 3, or (at your option) any
6935 + later version.
6936 +
6937 + This file is distributed in the hope that it will be useful, but
6938 + WITHOUT ANY WARRANTY; without even the implied warranty of
6939 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
6940 + General Public License for more details.
6941 +
6942 + Under Section 7 of GPL version 3, you are granted additional
6943 + permissions described in the GCC Runtime Library Exception, version
6944 + 3.1, as published by the Free Software Foundation.
6945 +
6946 + You should have received a copy of the GNU General Public License and
6947 + a copy of the GCC Runtime Library Exception along with this program;
6948 + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
6949 + <http://www.gnu.org/licenses/>. */
6950 +
6951 +static inline unsigned short
6952 +__gnu_f2h_internal(unsigned int a, int ieee)
6953 +{
6954 + unsigned short sign = (a >> 16) & 0x8000;
6955 + int aexp = (a >> 23) & 0xff;
6956 + unsigned int mantissa = a & 0x007fffff;
6957 + unsigned int mask;
6958 + unsigned int increment;
6959 +
6960 + if (aexp == 0xff)
6961 + {
6962 + if (!ieee)
6963 + return sign;
6964 + return sign | 0x7e00 | (mantissa >> 13);
6965 + }
6966 +
6967 + if (aexp == 0 && mantissa == 0)
6968 + return sign;
6969 +
6970 + aexp -= 127;
6971 +
6972 + /* Decimal point between bits 22 and 23. */
6973 + mantissa |= 0x00800000;
6974 + if (aexp < -14)
6975 + {
6976 + mask = 0x007fffff;
6977 + if (aexp < -25)
6978 + aexp = -26;
6979 + else if (aexp != -25)
6980 + mask >>= 24 + aexp;
6981 + }
6982 + else
6983 + mask = 0x00001fff;
6984 +
6985 + /* Round. */
6986 + if (mantissa & mask)
6987 + {
6988 + increment = (mask + 1) >> 1;
6989 + if ((mantissa & mask) == increment)
6990 + increment = mantissa & (increment << 1);
6991 + mantissa += increment;
6992 + if (mantissa >= 0x01000000)
6993 + {
6994 + mantissa >>= 1;
6995 + aexp++;
6996 + }
6997 + }
6998 +
6999 + if (ieee)
7000 + {
7001 + if (aexp > 15)
7002 + return sign | 0x7c00;
7003 + }
7004 + else
7005 + {
7006 + if (aexp > 16)
7007 + return sign | 0x7fff;
7008 + }
7009 +
7010 + if (aexp < -24)
7011 + return sign;
7012 +
7013 + if (aexp < -14)
7014 + {
7015 + mantissa >>= -14 - aexp;
7016 + aexp = -14;
7017 + }
7018 +
7019 + /* We leave the leading 1 in the mantissa, and subtract one
7020 + from the exponent bias to compensate. */
7021 + return sign | (((aexp + 14) << 10) + (mantissa >> 13));
7022 +}
7023 +
7024 +unsigned int
7025 +__gnu_h2f_internal(unsigned short a, int ieee)
7026 +{
7027 + unsigned int sign = (unsigned int)(a & 0x8000) << 16;
7028 + int aexp = (a >> 10) & 0x1f;
7029 + unsigned int mantissa = a & 0x3ff;
7030 +
7031 + if (aexp == 0x1f && ieee)
7032 + return sign | 0x7f800000 | (mantissa << 13);
7033 +
7034 + if (aexp == 0)
7035 + {
7036 + int shift;
7037 +
7038 + if (mantissa == 0)
7039 + return sign;
7040 +
7041 + shift = __builtin_clz(mantissa) - 21;
7042 + mantissa <<= shift;
7043 + aexp = -shift;
7044 + }
7045 +
7046 + return sign | (((aexp + 0x70) << 23) + (mantissa << 13));
7047 +}
7048 +
7049 +unsigned short
7050 +__gnu_f2h_ieee(unsigned int a)
7051 +{
7052 + return __gnu_f2h_internal(a, 1);
7053 +}
7054 +
7055 +unsigned int
7056 +__gnu_h2f_ieee(unsigned short a)
7057 +{
7058 + return __gnu_h2f_internal(a, 1);
7059 +}
7060 +
7061 +unsigned short
7062 +__gnu_f2h_alternative(unsigned int x)
7063 +{
7064 + return __gnu_f2h_internal(x, 0);
7065 +}
7066 +
7067 +unsigned int
7068 +__gnu_h2f_alternative(unsigned short a)
7069 +{
7070 + return __gnu_h2f_internal(a, 0);
7071 +}
7072 --- a/gcc/config/arm/fpa.md
7073 +++ b/gcc/config/arm/fpa.md
7074 @@ -599,10 +599,10 @@
7075 {
7076 default:
7077 case 0: return \"mvf%?e\\t%0, %1\";
7078 - case 1: if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
7079 + case 1: if (TARGET_FPA_EMU2)
7080 return \"ldf%?e\\t%0, %1\";
7081 return \"lfm%?\\t%0, 1, %1\";
7082 - case 2: if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
7083 + case 2: if (TARGET_FPA_EMU2)
7084 return \"stf%?e\\t%1, %0\";
7085 return \"sfm%?\\t%1, 1, %0\";
7086 }
7087 --- /dev/null
7088 +++ b/gcc/config/arm/hwdiv.md
7089 @@ -0,0 +1,41 @@
7090 +;; ARM instruction patterns for hardware division
7091 +;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
7092 +;; Written by CodeSourcery, LLC.
7093 +;;
7094 +;; This file is part of GCC.
7095 +;;
7096 +;; GCC is free software; you can redistribute it and/or modify it
7097 +;; under the terms of the GNU General Public License as published by
7098 +;; the Free Software Foundation; either version 2, or (at your option)
7099 +;; any later version.
7100 +;;
7101 +;; GCC is distributed in the hope that it will be useful, but
7102 +;; WITHOUT ANY WARRANTY; without even the implied warranty of
7103 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
7104 +;; General Public License for more details.
7105 +;;
7106 +;; You should have received a copy of the GNU General Public License
7107 +;; along with GCC; see the file COPYING. If not, write to
7108 +;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
7109 +;; Boston, MA 02110-1301, USA.
7110 +
7111 +(define_insn "divsi3"
7112 + [(set (match_operand:SI 0 "s_register_operand" "=r")
7113 + (div:SI (match_operand:SI 1 "s_register_operand" "r")
7114 + (match_operand:SI 2 "s_register_operand" "r")))]
7115 + "arm_arch_hwdiv"
7116 + "sdiv%?\t%0, %1, %2"
7117 + [(set_attr "predicable" "yes")
7118 + (set_attr "insn" "sdiv")]
7119 +)
7120 +
7121 +(define_insn "udivsi3"
7122 + [(set (match_operand:SI 0 "s_register_operand" "=r")
7123 + (udiv:SI (match_operand:SI 1 "s_register_operand" "r")
7124 + (match_operand:SI 2 "s_register_operand" "r")))]
7125 + "arm_arch_hwdiv"
7126 + "udiv%?\t%0, %1, %2"
7127 + [(set_attr "predicable" "yes")
7128 + (set_attr "insn" "udiv")]
7129 +)
7130 +
7131 --- a/gcc/config/arm/ieee754-df.S
7132 +++ b/gcc/config/arm/ieee754-df.S
7133 @@ -83,7 +83,7 @@
7134 ARM_FUNC_START adddf3
7135 ARM_FUNC_ALIAS aeabi_dadd adddf3
7136
7137 -1: do_push {r4, r5, lr}
7138 +1: do_push (r4, r5, lr)
7139
7140 @ Look for zeroes, equal values, INF, or NAN.
7141 shift1 lsl, r4, xh, #1
7142 @@ -427,7 +427,7 @@
7143 do_it eq, t
7144 moveq r1, #0
7145 RETc(eq)
7146 - do_push {r4, r5, lr}
7147 + do_push (r4, r5, lr)
7148 mov r4, #0x400 @ initial exponent
7149 add r4, r4, #(52-1 - 1)
7150 mov r5, #0 @ sign bit is 0
7151 @@ -447,7 +447,7 @@
7152 do_it eq, t
7153 moveq r1, #0
7154 RETc(eq)
7155 - do_push {r4, r5, lr}
7156 + do_push (r4, r5, lr)
7157 mov r4, #0x400 @ initial exponent
7158 add r4, r4, #(52-1 - 1)
7159 ands r5, r0, #0x80000000 @ sign bit in r5
7160 @@ -481,7 +481,7 @@
7161 RETc(eq) @ we are done already.
7162
7163 @ value was denormalized. We can normalize it now.
7164 - do_push {r4, r5, lr}
7165 + do_push (r4, r5, lr)
7166 mov r4, #0x380 @ setup corresponding exponent
7167 and r5, xh, #0x80000000 @ move sign bit in r5
7168 bic xh, xh, #0x80000000
7169 @@ -508,9 +508,9 @@
7170 @ compatibility.
7171 adr ip, LSYM(f0_ret)
7172 @ Push pc as well so that RETLDM works correctly.
7173 - do_push {r4, r5, ip, lr, pc}
7174 + do_push (r4, r5, ip, lr, pc)
7175 #else
7176 - do_push {r4, r5, lr}
7177 + do_push (r4, r5, lr)
7178 #endif
7179
7180 mov r5, #0
7181 @@ -534,9 +534,9 @@
7182 @ compatibility.
7183 adr ip, LSYM(f0_ret)
7184 @ Push pc as well so that RETLDM works correctly.
7185 - do_push {r4, r5, ip, lr, pc}
7186 + do_push (r4, r5, ip, lr, pc)
7187 #else
7188 - do_push {r4, r5, lr}
7189 + do_push (r4, r5, lr)
7190 #endif
7191
7192 ands r5, ah, #0x80000000 @ sign bit in r5
7193 @@ -585,7 +585,7 @@
7194 @ Legacy code expects the result to be returned in f0. Copy it
7195 @ there as well.
7196 LSYM(f0_ret):
7197 - do_push {r0, r1}
7198 + do_push (r0, r1)
7199 ldfd f0, [sp], #8
7200 RETLDM
7201
7202 @@ -602,7 +602,7 @@
7203
7204 ARM_FUNC_START muldf3
7205 ARM_FUNC_ALIAS aeabi_dmul muldf3
7206 - do_push {r4, r5, r6, lr}
7207 + do_push (r4, r5, r6, lr)
7208
7209 @ Mask out exponents, trap any zero/denormal/INF/NAN.
7210 mov ip, #0xff
7211 @@ -910,7 +910,7 @@
7212 ARM_FUNC_START divdf3
7213 ARM_FUNC_ALIAS aeabi_ddiv divdf3
7214
7215 - do_push {r4, r5, r6, lr}
7216 + do_push (r4, r5, r6, lr)
7217
7218 @ Mask out exponents, trap any zero/denormal/INF/NAN.
7219 mov ip, #0xff
7220 @@ -1117,7 +1117,7 @@
7221 ARM_FUNC_ALIAS eqdf2 cmpdf2
7222 mov ip, #1 @ how should we specify unordered here?
7223
7224 -1: str ip, [sp, #-4]
7225 +1: str ip, [sp, #-4]!
7226
7227 @ Trap any INF/NAN first.
7228 mov ip, xh, lsl #1
7229 @@ -1129,7 +1129,8 @@
7230
7231 @ Test for equality.
7232 @ Note that 0.0 is equal to -0.0.
7233 -2: orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
7234 +2: add sp, sp, #4
7235 + orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
7236 do_it eq, e
7237 COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
7238 teqne xh, yh @ or xh == yh
7239 @@ -1168,7 +1169,7 @@
7240 bne 2b
7241 orrs ip, yl, yh, lsl #12
7242 beq 2b @ y is not NAN
7243 -5: ldr r0, [sp, #-4] @ unordered return code
7244 +5: ldr r0, [sp], #4 @ unordered return code
7245 RET
7246
7247 FUNC_END gedf2
7248 @@ -1194,7 +1195,7 @@
7249
7250 @ The status-returning routines are required to preserve all
7251 @ registers except ip, lr, and cpsr.
7252 -6: do_push {r0, lr}
7253 +6: do_push (r0, lr)
7254 ARM_CALL cmpdf2
7255 @ Set the Z flag correctly, and the C flag unconditionally.
7256 cmp r0, #0
7257 --- a/gcc/config/arm/ieee754-sf.S
7258 +++ b/gcc/config/arm/ieee754-sf.S
7259 @@ -481,7 +481,7 @@
7260 and r3, ip, #0x80000000
7261
7262 @ Well, no way to make it shorter without the umull instruction.
7263 - do_push {r3, r4, r5}
7264 + do_push (r3, r4, r5)
7265 mov r4, r0, lsr #16
7266 mov r5, r1, lsr #16
7267 bic r0, r0, r4, lsl #16
7268 @@ -492,7 +492,7 @@
7269 mla r0, r4, r1, r0
7270 adds r3, r3, r0, lsl #16
7271 adc r1, ip, r0, lsr #16
7272 - do_pop {r0, r4, r5}
7273 + do_pop (r0, r4, r5)
7274
7275 #else
7276
7277 @@ -822,7 +822,7 @@
7278 ARM_FUNC_ALIAS eqsf2 cmpsf2
7279 mov ip, #1 @ how should we specify unordered here?
7280
7281 -1: str ip, [sp, #-4]
7282 +1: str ip, [sp, #-4]!
7283
7284 @ Trap any INF/NAN first.
7285 mov r2, r0, lsl #1
7286 @@ -834,7 +834,8 @@
7287
7288 @ Compare values.
7289 @ Note that 0.0 is equal to -0.0.
7290 -2: orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
7291 +2: add sp, sp, #4
7292 + orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
7293 do_it ne
7294 teqne r0, r1 @ if not 0 compare sign
7295 do_it pl
7296 @@ -858,7 +859,7 @@
7297 bne 2b
7298 movs ip, r1, lsl #9
7299 beq 2b @ r1 is not NAN
7300 -5: ldr r0, [sp, #-4] @ return unordered code.
7301 +5: ldr r0, [sp], #4 @ return unordered code.
7302 RET
7303
7304 FUNC_END gesf2
7305 @@ -881,7 +882,7 @@
7306
7307 @ The status-returning routines are required to preserve all
7308 @ registers except ip, lr, and cpsr.
7309 -6: do_push {r0, r1, r2, r3, lr}
7310 +6: do_push (r0, r1, r2, r3, lr)
7311 ARM_CALL cmpsf2
7312 @ Set the Z flag correctly, and the C flag unconditionally.
7313 cmp r0, #0
7314 --- a/gcc/config/arm/lib1funcs.asm
7315 +++ b/gcc/config/arm/lib1funcs.asm
7316 @@ -27,8 +27,17 @@
7317 #if defined(__ELF__) && defined(__linux__)
7318 .section .note.GNU-stack,"",%progbits
7319 .previous
7320 -#endif
7321 +#endif /* __ELF__ and __linux__ */
7322
7323 +#ifdef __ARM_EABI__
7324 +/* Some attributes that are common to all routines in this file. */
7325 + /* Tag_ABI_align8_needed: This code does not require 8-byte
7326 + alignment from the caller. */
7327 + /* .eabi_attribute 24, 0 -- default setting. */
7328 + /* Tag_ABI_align8_preserved: This code preserves 8-byte
7329 + alignment in any callee. */
7330 + .eabi_attribute 25, 1
7331 +#endif /* __ARM_EABI__ */
7332 /* ------------------------------------------------------------------------ */
7333
7334 /* We need to know what prefix to add to function names. */
7335 @@ -233,8 +242,8 @@
7336 .macro shift1 op, arg0, arg1, arg2
7337 \op \arg0, \arg1, \arg2
7338 .endm
7339 -#define do_push push
7340 -#define do_pop pop
7341 +#define do_push(...) push {__VA_ARGS__}
7342 +#define do_pop(...) pop {__VA_ARGS__}
7343 #define COND(op1, op2, cond) op1 ## op2 ## cond
7344 /* Perform an arithmetic operation with a variable shift operand. This
7345 requires two instructions and a scratch register on Thumb-2. */
7346 @@ -248,24 +257,133 @@
7347 .macro shift1 op, arg0, arg1, arg2
7348 mov \arg0, \arg1, \op \arg2
7349 .endm
7350 -#define do_push stmfd sp!,
7351 -#define do_pop ldmfd sp!,
7352 +#if defined(__low_irq_latency__)
7353 +#define do_push(...) \
7354 + _buildN1(do_push, _buildC1(__VA_ARGS__))( __VA_ARGS__)
7355 +#define _buildN1(BASE, X) _buildN2(BASE, X)
7356 +#define _buildN2(BASE, X) BASE##X
7357 +#define _buildC1(...) _buildC2(__VA_ARGS__,9,8,7,6,5,4,3,2,1)
7358 +#define _buildC2(a1,a2,a3,a4,a5,a6,a7,a8,a9,c,...) c
7359 +
7360 +#define do_push1(r1) str r1, [sp, #-4]!
7361 +#define do_push2(r1, r2) str r2, [sp, #-4]! ; str r1, [sp, #-4]!
7362 +#define do_push3(r1, r2, r3) str r3, [sp, #-4]! ; str r2, [sp, #-4]!; str r1, [sp, #-4]!
7363 +#define do_push4(r1, r2, r3, r4) \
7364 + do_push3 (r2, r3, r4);\
7365 + do_push1 (r1)
7366 +#define do_push5(r1, r2, r3, r4, r5) \
7367 + do_push4 (r2, r3, r4, r5);\
7368 + do_push1 (r1)
7369 +
7370 +#define do_pop(...) \
7371 +_buildN1(do_pop, _buildC1(__VA_ARGS__))( __VA_ARGS__)
7372 +
7373 +#define do_pop1(r1) ldr r1, [sp], #4
7374 +#define do_pop2(r1, r2) ldr r1, [sp], #4 ; ldr r2, [sp], #4
7375 +#define do_pop3(r1, r2, r3) ldr r1, [sp], #4 ; str r2, [sp], #4; str r3, [sp], #4
7376 +#define do_pop4(r1, r2, r3, r4) \
7377 + do_pop1 (r1);\
7378 + do_pup3 (r2, r3, r4)
7379 +#define do_pop5(r1, r2, r3, r4, r5) \
7380 + do_pop1 (r1);\
7381 + do_pop4 (r2, r3, r4, r5)
7382 +#else
7383 +#define do_push(...) stmfd sp!, { __VA_ARGS__}
7384 +#define do_pop(...) ldmfd sp!, {__VA_ARGS__}
7385 +#endif
7386 +
7387 +
7388 #define COND(op1, op2, cond) op1 ## cond ## op2
7389 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
7390 \name \dest, \src1, \src2, \shiftop \shiftreg
7391 .endm
7392 #endif
7393
7394 -.macro ARM_LDIV0 name
7395 +#ifdef __ARM_EABI__
7396 +.macro ARM_LDIV0 name signed
7397 + cmp r0, #0
7398 + .ifc \signed, unsigned
7399 + movne r0, #0xffffffff
7400 + .else
7401 + movgt r0, #0x7fffffff
7402 + movlt r0, #0x80000000
7403 + .endif
7404 + b SYM (__aeabi_idiv0) __PLT__
7405 +.endm
7406 +#else
7407 +.macro ARM_LDIV0 name signed
7408 str lr, [sp, #-8]!
7409 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
7410 bl SYM (__div0) __PLT__
7411 mov r0, #0 @ About as wrong as it could be.
7412 RETLDM unwind=98b
7413 .endm
7414 +#endif
7415
7416
7417 -.macro THUMB_LDIV0 name
7418 +#ifdef __ARM_EABI__
7419 +.macro THUMB_LDIV0 name signed
7420 +#if defined(__ARM_ARCH_6M__)
7421 + .ifc \signed, unsigned
7422 + cmp r0, #0
7423 + beq 1f
7424 + mov r0, #0
7425 + mvn r0, r0 @ 0xffffffff
7426 +1:
7427 + .else
7428 + cmp r0, #0
7429 + beq 2f
7430 + blt 3f
7431 + mov r0, #0
7432 + mvn r0, r0
7433 + lsr r0, r0, #1 @ 0x7fffffff
7434 + b 2f
7435 +3: mov r0, #0x80
7436 + lsl r0, r0, #24 @ 0x80000000
7437 +2:
7438 + .endif
7439 + push {r0, r1, r2}
7440 + ldr r0, 4f
7441 + adr r1, 4f
7442 + add r0, r1
7443 + str r0, [sp, #8]
7444 + @ We know we are not on armv4t, so pop pc is safe.
7445 + pop {r0, r1, pc}
7446 + .align 2
7447 +4:
7448 + .word __aeabi_idiv0 - 4b
7449 +#elif defined(__thumb2__)
7450 + .syntax unified
7451 + .ifc \signed, unsigned
7452 + cbz r0, 1f
7453 + mov r0, #0xffffffff
7454 +1:
7455 + .else
7456 + cmp r0, #0
7457 + do_it gt
7458 + movgt r0, #0x7fffffff
7459 + do_it lt
7460 + movlt r0, #0x80000000
7461 + .endif
7462 + b.w SYM(__aeabi_idiv0) __PLT__
7463 +#else
7464 + .align 2
7465 + bx pc
7466 + nop
7467 + .arm
7468 + cmp r0, #0
7469 + .ifc \signed, unsigned
7470 + movne r0, #0xffffffff
7471 + .else
7472 + movgt r0, #0x7fffffff
7473 + movlt r0, #0x80000000
7474 + .endif
7475 + b SYM(__aeabi_idiv0) __PLT__
7476 + .thumb
7477 +#endif
7478 +.endm
7479 +#else
7480 +.macro THUMB_LDIV0 name signed
7481 push { r1, lr }
7482 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
7483 bl SYM (__div0)
7484 @@ -277,18 +395,19 @@
7485 pop { r1, pc }
7486 #endif
7487 .endm
7488 +#endif
7489
7490 .macro FUNC_END name
7491 SIZE (__\name)
7492 .endm
7493
7494 -.macro DIV_FUNC_END name
7495 +.macro DIV_FUNC_END name signed
7496 cfi_start __\name, LSYM(Lend_div0)
7497 LSYM(Ldiv0):
7498 #ifdef __thumb__
7499 - THUMB_LDIV0 \name
7500 + THUMB_LDIV0 \name \signed
7501 #else
7502 - ARM_LDIV0 \name
7503 + ARM_LDIV0 \name \signed
7504 #endif
7505 cfi_end LSYM(Lend_div0)
7506 FUNC_END \name
7507 @@ -413,6 +532,12 @@
7508 #define yyl r2
7509 #endif
7510
7511 +#ifdef __ARM_EABI__
7512 +.macro WEAK name
7513 + .weak SYM (__\name)
7514 +.endm
7515 +#endif
7516 +
7517 #ifdef __thumb__
7518 /* Register aliases. */
7519
7520 @@ -437,6 +562,43 @@
7521
7522 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
7523
7524 +#if defined (__thumb2__)
7525 + clz \curbit, \dividend
7526 + clz \result, \divisor
7527 + sub \curbit, \result, \curbit
7528 + rsb \curbit, \curbit, #31
7529 + adr \result, 1f
7530 + add \curbit, \result, \curbit, lsl #4
7531 + mov \result, #0
7532 + mov pc, \curbit
7533 +.p2align 3
7534 +1:
7535 + .set shift, 32
7536 + .rept 32
7537 + .set shift, shift - 1
7538 + cmp.w \dividend, \divisor, lsl #shift
7539 + nop.n
7540 + adc.w \result, \result, \result
7541 + it cs
7542 + subcs.w \dividend, \dividend, \divisor, lsl #shift
7543 + .endr
7544 +#elif defined(__ARM_TUNE_MARVELL_F__)
7545 + clz \curbit, \dividend
7546 + clz \result, \divisor
7547 + sub \curbit, \result, \curbit
7548 + mov \divisor, \divisor, lsl \curbit
7549 + rsb \curbit, \curbit, #31
7550 + mov \curbit, \curbit, lsl #2
7551 + mov \result, #0
7552 + add pc, pc, \curbit, lsl #2
7553 + nop
7554 + .rept 32
7555 + cmp \dividend, \divisor
7556 + subcs \dividend, \dividend, \divisor
7557 + mov \divisor, \divisor, lsr #1
7558 + adc \result, \result, \result
7559 + .endr
7560 +#else /* ! defined(__ARM_TUNE_MARVELL_F__) */
7561 clz \curbit, \dividend
7562 clz \result, \divisor
7563 sub \curbit, \result, \curbit
7564 @@ -452,6 +614,7 @@
7565 adc \result, \result, \result
7566 subcs \dividend, \dividend, \divisor, lsl #shift
7567 .endr
7568 +#endif /* defined(__ARM_TUNE_MARVELL_F__) */
7569
7570 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
7571 #if __ARM_ARCH__ >= 5
7572 @@ -499,18 +662,23 @@
7573
7574 @ Division loop
7575 1: cmp \dividend, \divisor
7576 + do_it hs, t
7577 subhs \dividend, \dividend, \divisor
7578 orrhs \result, \result, \curbit
7579 cmp \dividend, \divisor, lsr #1
7580 + do_it hs, t
7581 subhs \dividend, \dividend, \divisor, lsr #1
7582 orrhs \result, \result, \curbit, lsr #1
7583 cmp \dividend, \divisor, lsr #2
7584 + do_it hs, t
7585 subhs \dividend, \dividend, \divisor, lsr #2
7586 orrhs \result, \result, \curbit, lsr #2
7587 cmp \dividend, \divisor, lsr #3
7588 + do_it hs, t
7589 subhs \dividend, \dividend, \divisor, lsr #3
7590 orrhs \result, \result, \curbit, lsr #3
7591 cmp \dividend, #0 @ Early termination?
7592 + do_it ne, t
7593 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
7594 movne \divisor, \divisor, lsr #4
7595 bne 1b
7596 @@ -799,13 +967,14 @@
7597 /* ------------------------------------------------------------------------ */
7598 #ifdef L_udivsi3
7599
7600 +#if defined(__ARM_ARCH_6M__)
7601 +
7602 FUNC_START udivsi3
7603 FUNC_ALIAS aeabi_uidiv udivsi3
7604
7605 -#ifdef __thumb__
7606 -
7607 cmp divisor, #0
7608 beq LSYM(Ldiv0)
7609 +LSYM(udivsi3_nodiv0):
7610 mov curbit, #1
7611 mov result, #0
7612
7613 @@ -819,9 +988,16 @@
7614 pop { work }
7615 RET
7616
7617 -#else /* ARM version. */
7618 +#else /* ARM/Thumb-2 version. */
7619 +
7620 + ARM_FUNC_START udivsi3
7621 + ARM_FUNC_ALIAS aeabi_uidiv udivsi3
7622
7623 + /* Note: if called via udivsi3_nodiv0, this will unnecessarily check
7624 + for division-by-zero a second time. */
7625 +LSYM(udivsi3_nodiv0):
7626 subs r2, r1, #1
7627 + do_it eq
7628 RETc(eq)
7629 bcc LSYM(Ldiv0)
7630 cmp r0, r1
7631 @@ -834,7 +1010,8 @@
7632 mov r0, r2
7633 RET
7634
7635 -11: moveq r0, #1
7636 +11: do_it eq, e
7637 + moveq r0, #1
7638 movne r0, #0
7639 RET
7640
7641 @@ -845,19 +1022,24 @@
7642
7643 #endif /* ARM version */
7644
7645 - DIV_FUNC_END udivsi3
7646 + DIV_FUNC_END udivsi3 unsigned
7647
7648 +#if defined(__ARM_ARCH_6M__)
7649 FUNC_START aeabi_uidivmod
7650 -#ifdef __thumb__
7651 + cmp r1, #0
7652 + beq LSYM(Ldiv0)
7653 push {r0, r1, lr}
7654 - bl SYM(__udivsi3)
7655 + bl LSYM(udivsi3_nodiv0)
7656 POP {r1, r2, r3}
7657 mul r2, r0
7658 sub r1, r1, r2
7659 bx r3
7660 #else
7661 +ARM_FUNC_START aeabi_uidivmod
7662 + cmp r1, #0
7663 + beq LSYM(Ldiv0)
7664 stmfd sp!, { r0, r1, lr }
7665 - bl SYM(__udivsi3)
7666 + bl LSYM(udivsi3_nodiv0)
7667 ldmfd sp!, { r1, r2, lr }
7668 mul r3, r2, r0
7669 sub r1, r1, r3
7670 @@ -904,19 +1086,20 @@
7671
7672 #endif /* ARM version. */
7673
7674 - DIV_FUNC_END umodsi3
7675 + DIV_FUNC_END umodsi3 unsigned
7676
7677 #endif /* L_umodsi3 */
7678 /* ------------------------------------------------------------------------ */
7679 #ifdef L_divsi3
7680
7681 +#if defined(__ARM_ARCH_6M__)
7682 +
7683 FUNC_START divsi3
7684 FUNC_ALIAS aeabi_idiv divsi3
7685
7686 -#ifdef __thumb__
7687 cmp divisor, #0
7688 beq LSYM(Ldiv0)
7689 -
7690 +LSYM(divsi3_nodiv0):
7691 push { work }
7692 mov work, dividend
7693 eor work, divisor @ Save the sign of the result.
7694 @@ -945,15 +1128,21 @@
7695 pop { work }
7696 RET
7697
7698 -#else /* ARM version. */
7699 +#else /* ARM/Thumb-2 version. */
7700
7701 + ARM_FUNC_START divsi3
7702 + ARM_FUNC_ALIAS aeabi_idiv divsi3
7703 +
7704 cmp r1, #0
7705 - eor ip, r0, r1 @ save the sign of the result.
7706 beq LSYM(Ldiv0)
7707 +LSYM(divsi3_nodiv0):
7708 + eor ip, r0, r1 @ save the sign of the result.
7709 + do_it mi
7710 rsbmi r1, r1, #0 @ loops below use unsigned.
7711 subs r2, r1, #1 @ division by 1 or -1 ?
7712 beq 10f
7713 movs r3, r0
7714 + do_it mi
7715 rsbmi r3, r0, #0 @ positive dividend value
7716 cmp r3, r1
7717 bls 11f
7718 @@ -963,14 +1152,18 @@
7719 ARM_DIV_BODY r3, r1, r0, r2
7720
7721 cmp ip, #0
7722 + do_it mi
7723 rsbmi r0, r0, #0
7724 RET
7725
7726 10: teq ip, r0 @ same sign ?
7727 + do_it mi
7728 rsbmi r0, r0, #0
7729 RET
7730
7731 -11: movlo r0, #0
7732 +11: do_it lo
7733 + movlo r0, #0
7734 + do_it eq,t
7735 moveq r0, ip, asr #31
7736 orreq r0, r0, #1
7737 RET
7738 @@ -979,24 +1172,30 @@
7739
7740 cmp ip, #0
7741 mov r0, r3, lsr r2
7742 + do_it mi
7743 rsbmi r0, r0, #0
7744 RET
7745
7746 #endif /* ARM version */
7747
7748 - DIV_FUNC_END divsi3
7749 + DIV_FUNC_END divsi3 signed
7750
7751 +#if defined(__ARM_ARCH_6M__)
7752 FUNC_START aeabi_idivmod
7753 -#ifdef __thumb__
7754 + cmp r1, #0
7755 + beq LSYM(Ldiv0)
7756 push {r0, r1, lr}
7757 - bl SYM(__divsi3)
7758 + bl LSYM(divsi3_nodiv0)
7759 POP {r1, r2, r3}
7760 mul r2, r0
7761 sub r1, r1, r2
7762 bx r3
7763 #else
7764 +ARM_FUNC_START aeabi_idivmod
7765 + cmp r1, #0
7766 + beq LSYM(Ldiv0)
7767 stmfd sp!, { r0, r1, lr }
7768 - bl SYM(__divsi3)
7769 + bl LSYM(divsi3_nodiv0)
7770 ldmfd sp!, { r1, r2, lr }
7771 mul r3, r2, r0
7772 sub r1, r1, r3
7773 @@ -1062,21 +1261,25 @@
7774
7775 #endif /* ARM version */
7776
7777 - DIV_FUNC_END modsi3
7778 + DIV_FUNC_END modsi3 signed
7779
7780 #endif /* L_modsi3 */
7781 /* ------------------------------------------------------------------------ */
7782 #ifdef L_dvmd_tls
7783
7784 - FUNC_START div0
7785 - FUNC_ALIAS aeabi_idiv0 div0
7786 - FUNC_ALIAS aeabi_ldiv0 div0
7787 -
7788 +#ifdef __ARM_EABI__
7789 + WEAK aeabi_idiv0
7790 + WEAK aeabi_ldiv0
7791 + FUNC_START aeabi_idiv0
7792 + FUNC_START aeabi_ldiv0
7793 RET
7794 -
7795 FUNC_END aeabi_ldiv0
7796 FUNC_END aeabi_idiv0
7797 +#else
7798 + FUNC_START div0
7799 + RET
7800 FUNC_END div0
7801 +#endif
7802
7803 #endif /* L_divmodsi_tools */
7804 /* ------------------------------------------------------------------------ */
7805 @@ -1086,16 +1289,49 @@
7806 /* Constant taken from <asm/signal.h>. */
7807 #define SIGFPE 8
7808
7809 +#ifdef __ARM_EABI__
7810 + WEAK aeabi_idiv0
7811 + WEAK aeabi_ldiv0
7812 + ARM_FUNC_START aeabi_idiv0
7813 + ARM_FUNC_START aeabi_ldiv0
7814 +#else
7815 ARM_FUNC_START div0
7816 +#endif
7817
7818 - do_push {r1, lr}
7819 + do_push (r1, lr)
7820 mov r0, #SIGFPE
7821 bl SYM(raise) __PLT__
7822 RETLDM r1
7823
7824 +#ifdef __ARM_EABI__
7825 + FUNC_END aeabi_ldiv0
7826 + FUNC_END aeabi_idiv0
7827 +#else
7828 FUNC_END div0
7829 +#endif
7830
7831 #endif /* L_dvmd_lnx */
7832 +#ifdef L_clear_cache
7833 +#if defined __ARM_EABI__ && defined __linux__
7834 +@ EABI GNU/Linux call to cacheflush syscall.
7835 + ARM_FUNC_START clear_cache
7836 + do_push (r7)
7837 +#if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
7838 + movw r7, #2
7839 + movt r7, #0xf
7840 +#else
7841 + mov r7, #0xf0000
7842 + add r7, r7, #2
7843 +#endif
7844 + mov r2, #0
7845 + swi 0
7846 + do_pop (r7)
7847 + RET
7848 + FUNC_END clear_cache
7849 +#else
7850 +#error "This is only for ARM EABI GNU/Linux"
7851 +#endif
7852 +#endif /* L_clear_cache */
7853 /* ------------------------------------------------------------------------ */
7854 /* Dword shift operations. */
7855 /* All the following Dword shift variants rely on the fact that
7856 @@ -1292,7 +1528,7 @@
7857 push {r4, lr}
7858 # else
7859 ARM_FUNC_START clzdi2
7860 - do_push {r4, lr}
7861 + do_push (r4, lr)
7862 # endif
7863 cmp xxh, #0
7864 bne 1f
7865 --- a/gcc/config/arm/linux-eabi.h
7866 +++ b/gcc/config/arm/linux-eabi.h
7867 @@ -66,22 +66,14 @@
7868 /* At this point, bpabi.h will have clobbered LINK_SPEC. We want to
7869 use the GNU/Linux version, not the generic BPABI version. */
7870 #undef LINK_SPEC
7871 -#define LINK_SPEC LINUX_TARGET_LINK_SPEC
7872 +#define LINK_SPEC LINUX_TARGET_LINK_SPEC BE8_LINK_SPEC
7873
7874 /* Use the default LIBGCC_SPEC, not the version in linux-elf.h, as we
7875 do not use -lfloat. */
7876 #undef LIBGCC_SPEC
7877
7878 -/* Clear the instruction cache from `beg' to `end'. This makes an
7879 - inline system call to SYS_cacheflush. */
7880 +/* Clear the instruction cache from `beg' to `end'. This is
7881 + implemented in lib1funcs.asm, so ensure an error if this definition
7882 + is used. */
7883 #undef CLEAR_INSN_CACHE
7884 -#define CLEAR_INSN_CACHE(BEG, END) \
7885 -{ \
7886 - register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
7887 - register unsigned long _end __asm ("a2") = (unsigned long) (END); \
7888 - register unsigned long _flg __asm ("a3") = 0; \
7889 - register unsigned long _scno __asm ("r7") = 0xf0002; \
7890 - __asm __volatile ("swi 0 @ sys_cacheflush" \
7891 - : "=r" (_beg) \
7892 - : "0" (_beg), "r" (_end), "r" (_flg), "r" (_scno)); \
7893 -}
7894 +#define CLEAR_INSN_CACHE(BEG, END) not used
7895 --- a/gcc/config/arm/linux-elf.h
7896 +++ b/gcc/config/arm/linux-elf.h
7897 @@ -98,7 +98,7 @@
7898
7899 /* NWFPE always understands FPA instructions. */
7900 #undef FPUTYPE_DEFAULT
7901 -#define FPUTYPE_DEFAULT FPUTYPE_FPA_EMU3
7902 +#define FPUTYPE_DEFAULT "fpe3"
7903
7904 /* Call the function profiler with a given profile label. */
7905 #undef ARM_FUNCTION_PROFILER
7906 --- /dev/null
7907 +++ b/gcc/config/arm/marvell-f.md
7908 @@ -0,0 +1,365 @@
7909 +;; Marvell 2850 pipeline description
7910 +;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
7911 +;; Written by Marvell and CodeSourcery, Inc.
7912 +
7913 +;; This file is part of GCC.
7914 +
7915 +;; GCC is free software; you can redistribute it and/or modify it
7916 +;; under the terms of the GNU General Public License as published
7917 +;; by the Free Software Foundation; either version 2, or (at your
7918 +;; option) any later version.
7919 +
7920 +;; GCC is distributed in the hope that it will be useful, but WITHOUT
7921 +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
7922 +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
7923 +;; License for more details.
7924 +
7925 +;; You should have received a copy of the GNU General Public License
7926 +;; along with GCC; see the file COPYING. If not, write to
7927 +;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
7928 +;; Boston, MA 02110-1301, USA.
7929 +
7930 +;; This automaton provides a pipeline description for the Marvell
7931 +;; 2850 core.
7932 +;;
7933 +;; The model given here assumes that the condition for all conditional
7934 +;; instructions is "true", i.e., that all of the instructions are
7935 +;; actually executed.
7936 +
7937 +(define_automaton "marvell_f")
7938 +
7939 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
7940 +;; Pipelines
7941 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
7942 +
7943 +;; This is a dual-issue processor with three pipelines:
7944 +;;
7945 +;; 1. Arithmetic and load/store pipeline A1.
7946 +;; Issue | E1 | E2 | OF | WR | WB for load-store instructions
7947 +;; Issue | E1 | E2 | WB for arithmetic instructions
7948 +;;
7949 +;; 2. Arithmetic pipeline A2.
7950 +;; Issue | E1 | E2 | WB
7951 +;;
7952 +;; 3. Multiply and multiply-accumulate pipeline.
7953 +;; Issue | MAC1 | MAC2 | MAC3 | WB
7954 +;;
7955 +;; There are various bypasses modelled to a greater or lesser extent.
7956 +;;
7957 +;; Latencies in this file correspond to the number of cycles after
7958 +;; the issue stage that it takes for the result of the instruction to
7959 +;; be computed, or for its side-effects to occur.
7960 +
7961 +(define_cpu_unit "a1_e1,a1_e2,a1_of,a1_wr,a1_wb" "marvell_f") ; ALU 1
7962 +(define_cpu_unit "a2_e1,a2_e2,a2_wb" "marvell_f") ; ALU 2
7963 +(define_cpu_unit "m_1,m_2,m_3,m_wb" "marvell_f") ; MAC
7964 +
7965 +;; We define an SRAM cpu unit to enable us to describe conflicts
7966 +;; between loads at the E2 stage and stores at the WR stage.
7967 +
7968 +(define_cpu_unit "sram" "marvell_f")
7969 +
7970 +;; Handling of dual-issue constraints.
7971 +;;
7972 +;; Certain pairs of instructions can be issued in parallel, and certain
7973 +;; pairs cannot. We divide a subset of the instructions into groups as
7974 +;; follows.
7975 +;;
7976 +;; - data processing 1 (mov, mvn);
7977 +;; - data processing 2 (adc, add, and, bic, cmn, cmp, eor, orr, rsb,
7978 +;; rsc, sbc, sub, teq, tst);
7979 +;; - load single (ldr, ldrb, ldrbt, ldrt, ldrh, ldrsb, ldrsh);
7980 +;; - store single (str, strb, strbt, strt, strh);
7981 +;; - swap (swp, swpb);
7982 +;; - pld;
7983 +;; - count leading zeros and DSP add/sub (clz, qadd, qdadd, qsub, qdsub);
7984 +;; - multiply 2 (mul, muls, smull, umull, smulxy, smulls, umulls);
7985 +;; - multiply 3 (mla, mlas, smlal, umlal, smlaxy, smlalxy, smlawx,
7986 +;; smlawy, smlals, umlals);
7987 +;; - branches (b, bl, blx, bx).
7988 +;;
7989 +;; Ignoring conditional execution, it is a good approximation to the core
7990 +;; to model that two instructions may only be issued in parallel if the
7991 +;; following conditions are met.
7992 +;; I. The instructions both fall into one of the above groups and their
7993 +;; corresponding groups have a entry in the matrix below that is not X.
7994 +;; II. The second instruction does not read any register updated by the
7995 +;; first instruction (already enforced by the GCC scheduler).
7996 +;; III. The second instruction does not need the carry flag updated by the
7997 +;; first instruction. Currently we do not model this.
7998 +;;
7999 +;; First Second instruction group
8000 +;; insn
8001 +;; DP1 DP2 L S SWP PLD CLZ M2 M3 B
8002 +;;
8003 +;; DP1 ok ok ok ok ok ok ok ok ok ok
8004 +;; DP2(1) ok ok ok ok ok ok ok ok ok ok
8005 +;; DP2(2) ok (2) ok (4) ok ok ok ok X ok
8006 +;; L }
8007 +;; SWP } ok ok X X X X ok ok ok ok
8008 +;; PLD }
8009 +;; S(3) ok ok X X X X ok ok ok ok
8010 +;; S(4) ok (2) X X X X ok ok X ok
8011 +;; CLZ ok ok ok ok ok ok ok ok ok ok
8012 +;; M2 ok ok ok ok ok ok ok X X ok
8013 +;; M3 ok (2) ok (4) ok ok ok X X ok
8014 +;; B ok ok ok ok ok ok ok ok ok ok
8015 +;;
8016 +;; (1) without register shift
8017 +;; (2) with register shift
8018 +;; (3) with immediate offset
8019 +;; (4) with register offset
8020 +;;
8021 +;; We define a fake cpu unit "reg_shift_lock" to enforce constraints
8022 +;; between instructions in groups DP2(2) and M3. All other
8023 +;; constraints are enforced automatically by virtue of the limited
8024 +;; number of pipelines available for the various operations, with
8025 +;; the exception of constraints involving S(4) that we do not model.
8026 +
8027 +(define_cpu_unit "reg_shift_lock" "marvell_f")
8028 +
8029 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8030 +;; ALU instructions
8031 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8032 +
8033 +;; 1. Certain logic operations can be retired after the E1 stage if
8034 +;; the pipeline is not already retiring another instruction. In this
8035 +;; model we assume this behaviour always holds for mov, mvn, and, orr, eor
8036 +;; instructions. If a register shift is involved and the instruction is
8037 +;; not mov or mvn, then a dual-issue constraint must be enforced.
8038 +
8039 +;; The first two cases are separate so they can be identified for
8040 +;; bypasses below.
8041 +
8042 +(define_insn_reservation "marvell_f_alu_early_retire" 1
8043 + (and (eq_attr "tune" "marvell_f")
8044 + (and (eq_attr "type" "alu")
8045 + (eq_attr "insn" "mov,mvn,and,orr,eor")))
8046 + "(a1_e1,a1_wb)|(a2_e1,a2_wb)")
8047 +
8048 +(define_insn_reservation "marvell_f_alu_early_retire_shift" 1
8049 + (and (eq_attr "tune" "marvell_f")
8050 + (and (eq_attr "type" "alu_shift_reg")
8051 + (eq_attr "insn" "mov,mvn,and,orr,eor")))
8052 + "(a1_e1,a1_wb)|(a2_e1,a2_wb)")
8053 +
8054 +(define_insn_reservation "marvell_f_alu_early_retire_reg_shift1" 1
8055 + (and (eq_attr "tune" "marvell_f")
8056 + (and (eq_attr "type" "alu_shift_reg")
8057 + (eq_attr "insn" "mov,mvn")))
8058 + "(a1_e1,a1_wb)|(a2_e1,a2_wb)")
8059 +
8060 +(define_insn_reservation "marvell_f_alu_early_retire_reg_shift2" 1
8061 + (and (eq_attr "tune" "marvell_f")
8062 + (and (eq_attr "type" "alu_shift_reg")
8063 + (eq_attr "insn" "and,orr,eor")))
8064 + "(reg_shift_lock+a1_e1,a1_wb)|(reg_shift_lock+a2_e1,a2_wb)")
8065 +
8066 +;; 2. ALU operations with no shifted operand. These bypass the E1 stage if
8067 +;; the E2 stage of the corresponding pipeline is clear; here, we always
8068 +;; model this scenario [*]. We give the operation a latency of 1 yet reserve
8069 +;; both E1 and E2 for it (thus preventing the GCC scheduler, in the case
8070 +;; where both E1 and E2 of one pipeline are clear, from issuing one
8071 +;; instruction to each).
8072 +;;
8073 +;; [*] The non-bypass case is a latency of two, reserving E1 on the first
8074 +;; cycle and E2 on the next. Due to the way the scheduler works we
8075 +;; have to choose between taking this as the default and taking the
8076 +;; above case (with latency one) as the default; we choose the latter.
8077 +
8078 +(define_insn_reservation "marvell_f_alu_op_bypass_e1" 1
8079 + (and (eq_attr "tune" "marvell_f")
8080 + (and (eq_attr "type" "alu")
8081 + (not (eq_attr "insn" "mov,mvn,and,orr,eor"))))
8082 + "(a1_e1+a1_e2,a1_wb)|(a2_e1+a2_e2,a2_wb)")
8083 +
8084 +;; 3. ALU operations with a shift-by-constant operand.
8085 +
8086 +(define_insn_reservation "marvell_f_alu_shift_op" 2
8087 + (and (eq_attr "tune" "marvell_f")
8088 + (and (eq_attr "type" "alu_shift")
8089 + (not (eq_attr "insn" "mov,mvn,and,orr,eor"))))
8090 + "(a1_e1,a1_e2,a1_wb)|(a2_e1,a2_e2,a2_wb)")
8091 +
8092 +;; 4. ALU operations with a shift-by-register operand. Since the
8093 +;; instruction is never mov or mvn, a dual-issue constraint must
8094 +;; be enforced.
8095 +
8096 +(define_insn_reservation "marvell_f_alu_shift_reg_op" 2
8097 + (and (eq_attr "tune" "marvell_f")
8098 + (and (eq_attr "type" "alu_shift_reg")
8099 + (not (eq_attr "insn" "mov,mvn,and,orr,eor"))))
8100 + "(reg_shift_lock+a1_e1,a1_e2,a1_wb)|(reg_shift_lock+a2_e1,a2_e2,a2_wb)")
8101 +
8102 +;; Given an ALU operation with shift (I1) followed by another ALU
8103 +;; operation (I2), with I2 depending on the destination register Rd of I1
8104 +;; and with I2 not using that value as the amount or the starting value for
8105 +;; a shift, then I1 and I2 may be issued to the same pipeline on
8106 +;; consecutive cycles. In terms of this model that corresponds to I1
8107 +;; having a latency of one cycle. There are three cases for various
8108 +;; I1 and I2 as follows.
8109 +
8110 +;; (a) I1 has a constant or register shift and I2 doesn't have a shift at all.
8111 +(define_bypass 1 "marvell_f_alu_shift_op,\
8112 + marvell_f_alu_shift_reg_op"
8113 + "marvell_f_alu_op_bypass_e1,marvell_f_alu_early_retire")
8114 +
8115 +;; (b) I1 has a constant or register shift and I2 has a constant shift.
8116 +;; Rd must not provide the starting value for the shift.
8117 +(define_bypass 1 "marvell_f_alu_shift_op,\
8118 + marvell_f_alu_shift_reg_op"
8119 + "marvell_f_alu_shift_op,marvell_f_alu_early_retire_shift"
8120 + "arm_no_early_alu_shift_value_dep")
8121 +
8122 +;; (c) I1 has a constant or register shift and I2 has a register shift.
8123 +;; Rd must not provide the amount by which to shift.
8124 +(define_bypass 1 "marvell_f_alu_shift_op,\
8125 + marvell_f_alu_shift_reg_op"
8126 + "marvell_f_alu_shift_reg_op,\
8127 + marvell_f_alu_early_retire_reg_shift1,\
8128 + marvell_f_alu_early_retire_reg_shift2"
8129 + "arm_no_early_alu_shift_dep")
8130 +
8131 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8132 +;; Multiplication instructions
8133 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8134 +
8135 +;; Multiplication instructions in group "Multiply 2".
8136 +
8137 +(define_insn_reservation "marvell_f_multiply_2" 3
8138 + (and (eq_attr "tune" "marvell_f")
8139 + (eq_attr "insn" "mul,muls,smull,umull,smulxy,smulls,umulls"))
8140 + "m_1,m_2,m_3,m_wb")
8141 +
8142 +;; Multiplication instructions in group "Multiply 3". There is a
8143 +;; dual-issue constraint with non-multiplication ALU instructions
8144 +;; to be respected here.
8145 +
8146 +(define_insn_reservation "marvell_f_multiply_3" 3
8147 + (and (eq_attr "tune" "marvell_f")
8148 + (eq_attr "insn" "mla,mlas,smlal,umlal,smlaxy,smlalxy,smlawx,\
8149 + smlawy,smlals,umlals"))
8150 + "reg_shift_lock+m_1,m_2,m_3,m_wb")
8151 +
8152 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8153 +;; Branch instructions
8154 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8155 +
8156 +;; Conditional backward b instructions can have a zero-cycle penalty, and
8157 +;; other conditional b and bl instructions have a one-cycle penalty if
8158 +;; predicted correctly. Currently we model the zero-cycle case for all
8159 +;; branches.
8160 +
8161 +(define_insn_reservation "marvell_f_branches" 0
8162 + (and (eq_attr "tune" "marvell_f")
8163 + (eq_attr "type" "branch"))
8164 + "nothing")
8165 +
8166 +;; Call latencies are not predictable; a semi-arbitrary very large
8167 +;; number is used as "positive infinity" for such latencies.
8168 +
8169 +(define_insn_reservation "marvell_f_call" 32
8170 + (and (eq_attr "tune" "marvell_f")
8171 + (eq_attr "type" "call"))
8172 + "nothing")
8173 +
8174 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8175 +;; Load/store instructions
8176 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8177 +
8178 +;; The models for load/store instructions do not accurately describe
8179 +;; the difference between operations with a base register writeback.
8180 +;; These models assume that all memory references hit in dcache.
8181 +
8182 +;; 1. Load/store for single registers.
8183 +
8184 +;; The worst case for a load is when the load result is needed in E1
8185 +;; (for example for a register shift), giving a latency of four. Loads
8186 +;; skip E1 and access memory at the E2 stage.
8187 +
8188 +(define_insn_reservation "marvell_f_load1" 4
8189 + (and (eq_attr "tune" "marvell_f")
8190 + (eq_attr "type" "load1,load_byte"))
8191 + "a1_e2+sram,a1_of,a1_wr,a1_wb")
8192 +
8193 +;; The result for a load may be bypassed (to be available at the same
8194 +;; time as the load arrives in the WR stage, so effectively at the OF
8195 +;; stage) to the Rn operand at E2 with a latency of two. The result may
8196 +;; be bypassed to a non-Rn operand at E2 with a latency of three. For
8197 +;; instructions without shifts, detection of an Rn bypass situation is
8198 +;; difficult (because some of the instruction patterns switch their
8199 +;; operands), and so we do not model that here. For instructions with
8200 +;; shifts, the operand used at E2 will always be Rn, and so we can
8201 +;; model the latency-two bypass for these.
8202 +
8203 +(define_bypass 2 "marvell_f_load1"
8204 + "marvell_f_alu_shift_op"
8205 + "arm_no_early_alu_shift_value_dep")
8206 +
8207 +(define_bypass 2 "marvell_f_load1"
8208 + "marvell_f_alu_shift_reg_op"
8209 + "arm_no_early_alu_shift_dep")
8210 +
8211 +;; Stores write at the WR stage and loads read at the E2 stage, giving
8212 +;; a store latency of three.
8213 +
8214 +(define_insn_reservation "marvell_f_store1" 3
8215 + (and (eq_attr "tune" "marvell_f")
8216 + (eq_attr "type" "store1"))
8217 + "a1_e2,a1_of,a1_wr+sram,a1_wb")
8218 +
8219 +;; 2. Load/store for two consecutive registers. These may be dealt
8220 +;; with in the same number of cycles as single loads and stores.
8221 +
8222 +(define_insn_reservation "marvell_f_load2" 4
8223 + (and (eq_attr "tune" "marvell_f")
8224 + (eq_attr "type" "load2"))
8225 + "a1_e2+sram,a1_of,a1_wr,a1_wb")
8226 +
8227 +(define_insn_reservation "marvell_f_store2" 3
8228 + (and (eq_attr "tune" "marvell_f")
8229 + (eq_attr "type" "store2"))
8230 + "a1_e2,a1_of,a1_wr+sram,a1_wb")
8231 +
8232 +;; The first word of a doubleword load is eligible for the latency-two
8233 +;; bypass described above for single loads, but this is not modelled here.
8234 +;; We do however assume that either word may also be bypassed with
8235 +;; latency three for ALU operations with shifts (where the shift value and
8236 +;; amount do not depend on the loaded value) and latency four for ALU
8237 +;; operations without shifts. The latency four case is of course the default.
8238 +
8239 +(define_bypass 3 "marvell_f_load2"
8240 + "marvell_f_alu_shift_op"
8241 + "arm_no_early_alu_shift_value_dep")
8242 +
8243 +(define_bypass 3 "marvell_f_load2"
8244 + "marvell_f_alu_shift_reg_op"
8245 + "arm_no_early_alu_shift_dep")
8246 +
8247 +;; 3. Load/store for more than two registers.
8248 +
8249 +;; These instructions stall for an extra cycle in the decode stage;
8250 +;; individual load/store instructions for each register are then issued.
8251 +;; The load/store multiple instruction itself is removed from the decode
8252 +;; stage at the same time as the final load/store instruction is issued.
8253 +;; To complicate matters, pairs of loads/stores referencing two
8254 +;; consecutive registers will be issued together as doubleword operations.
8255 +;; We model a 3-word load as an LDR plus an LDRD, and a 4-word load
8256 +;; as two LDRDs; thus, these are allocated the same latencies (the
8257 +;; latency for two consecutive loads plus one for the setup stall).
8258 +;; The extra stall is modelled by reserving E1.
8259 +
8260 +(define_insn_reservation "marvell_f_load3_4" 6
8261 + (and (eq_attr "tune" "marvell_f")
8262 + (eq_attr "type" "load3,load4"))
8263 + "a1_e1,a1_e1+a1_e2+sram,a1_e2+sram+a1_of,a1_of+a1_wr,a1_wr+a1_wb,a1_wb")
8264 +
8265 +;; Bypasses are possible for ldm as for single loads, but we do not
8266 +;; model them here since the order of the constituent loads is
8267 +;; difficult to predict.
8268 +
8269 +(define_insn_reservation "marvell_f_store3_4" 5
8270 + (and (eq_attr "tune" "marvell_f")
8271 + (eq_attr "type" "store3,store4"))
8272 + "a1_e1,a1_e1+a1_e2,a1_e2+a1_of,a1_of+a1_wr+sram,a1_wr+sram+a1_wb,a1_wb")
8273 +
8274 --- /dev/null
8275 +++ b/gcc/config/arm/marvell-f-vfp.md
8276 @@ -0,0 +1,153 @@
8277 +;; Marvell 2850 VFP pipeline description
8278 +;; Copyright (C) 2007 Free Software Foundation, Inc.
8279 +;; Written by CodeSourcery, Inc.
8280 +
8281 +;; This file is part of GCC.
8282 +
8283 +;; GCC is distributed in the hope that it will be useful, but WITHOUT
8284 +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
8285 +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
8286 +;; License for more details.
8287 +
8288 +;; You should have received a copy of the GNU General Public License
8289 +;; along with GCC; see the file COPYING. If not, write to
8290 +;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
8291 +;; Boston, MA 02110-1301, USA.
8292 +
8293 +;; This automaton provides a pipeline description for the Marvell
8294 +;; 2850 core.
8295 +;;
8296 +;; The model given here assumes that the condition for all conditional
8297 +;; instructions is "true", i.e., that all of the instructions are
8298 +;; actually executed.
8299 +
8300 +(define_automaton "marvell_f_vfp")
8301 +
8302 +;; This is a single-issue VFPv2 implementation with the following execution
8303 +;; units:
8304 +;;
8305 +;; 1. Addition/subtraction unit; takes three cycles, pipelined.
8306 +;; 2. Multiplication unit; takes four cycles, pipelined.
8307 +;; 3. Add buffer, used for multiply-accumulate (see below).
8308 +;; 4. Divide/square root unit, not pipelined.
8309 +;; For single-precision: takes sixteen cycles, can accept another insn
8310 +;; after fifteen cycles.
8311 +;; For double-precision: takes thirty-one cycles, can accept another insn
8312 +;; after thirty cycles.
8313 +;; 5. Single-cycle unit, pipelined.
8314 +;; This does absolute value/copy/negate/compare in one cycle and
8315 +;; conversion in two cycles.
8316 +;;
8317 +;; When all three operands of a multiply-accumulate instruction are ready,
8318 +;; one is issued to the add buffer (which can hold six operands in a FIFO)
8319 +;; and the two to be multiplied are issued to the multiply unit. After
8320 +;; four cycles in the multiply unit, one cycle is taken to issue the
8321 +;; operand from the add buffer plus the multiplication result to the
8322 +;; addition/subtraction unit. That issue takes priority over any add/sub
8323 +;; instruction waiting at the normal issue stage, but may be performed in
8324 +;; parallel with the issue of a non-add/sub instruction. The total time
8325 +;; for a multiply-accumulate instruction to pass through the execution
8326 +;; units is hence eight cycles.
8327 +;;
8328 +;; We do not need to explicitly model the add buffer because it can
8329 +;; always issue the instruction at the head of its FIFO (due to the above
8330 +;; priority rule) and there are more spaces in the add buffer (six) than
8331 +;; there are stages (four) in the multiplication unit.
8332 +;;
8333 +;; Two instructions may be retired at once from the head of an 8-entry
8334 +;; reorder buffer. Data from these first two instructions only may be
8335 +;; forwarded to the inputs of the issue unit. We assume that the
8336 +;; pressure on the reorder buffer will be sufficiently low that every
8337 +;; instruction entering it will be eligible for data forwarding. Since
8338 +;; data is forwarded to the issue unit and not the execution units (so
8339 +;; for example single-cycle instructions cannot be issued back-to-back),
8340 +;; the latencies given below are the cycle counts above plus one.
8341 +
8342 +(define_cpu_unit "mf_vfp_issue" "marvell_f_vfp")
8343 +(define_cpu_unit "mf_vfp_add" "marvell_f_vfp")
8344 +(define_cpu_unit "mf_vfp_mul" "marvell_f_vfp")
8345 +(define_cpu_unit "mf_vfp_div" "marvell_f_vfp")
8346 +(define_cpu_unit "mf_vfp_single_cycle" "marvell_f_vfp")
8347 +
8348 +;; An attribute to indicate whether our reservations are applicable.
8349 +
8350 +(define_attr "marvell_f_vfp" "yes,no"
8351 + (const (if_then_else (and (eq_attr "tune" "marvell_f")
8352 + (eq_attr "fpu" "vfp"))
8353 + (const_string "yes") (const_string "no"))))
8354 +
8355 +;; Reservations of functional units. The nothing*2 reservations at the
8356 +;; start of many of the reservation strings correspond to the decode
8357 +;; stages. We need to have these reservations so that we can correctly
8358 +;; reserve parts of the core's A1 pipeline for loads and stores. For
8359 +;; that case (since loads skip E1) the pipelines line up thus:
8360 +;; A1 pipe: Issue E2 OF WR WB ...
8361 +;; VFP pipe: Fetch Decode1 Decode2 Issue Execute1 ...
8362 +;; For a load, we need to make a reservation of E2, and thus we must
8363 +;; use Decode1 as the starting point for all VFP reservations here.
8364 +;;
8365 +;; For reservations of pipelined VFP execution units we only reserve
8366 +;; the execution unit for the first execution cycle, omitting any trailing
8367 +;; "nothing" reservations.
8368 +
8369 +(define_insn_reservation "marvell_f_vfp_add" 4
8370 + (and (eq_attr "marvell_f_vfp" "yes")
8371 + (eq_attr "type" "farith"))
8372 + "nothing*2,mf_vfp_issue,mf_vfp_add")
8373 +
8374 +(define_insn_reservation "marvell_f_vfp_mul" 5
8375 + (and (eq_attr "marvell_f_vfp" "yes")
8376 + (eq_attr "type" "fmuls,fmuld"))
8377 + "nothing*2,mf_vfp_issue,mf_vfp_mul")
8378 +
8379 +(define_insn_reservation "marvell_f_vfp_divs" 17
8380 + (and (eq_attr "marvell_f_vfp" "yes")
8381 + (eq_attr "type" "fdivs"))
8382 + "nothing*2,mf_vfp_issue,mf_vfp_div*15")
8383 +
8384 +(define_insn_reservation "marvell_f_vfp_divd" 32
8385 + (and (eq_attr "marvell_f_vfp" "yes")
8386 + (eq_attr "type" "fdivd"))
8387 + "nothing*2,mf_vfp_issue,mf_vfp_div*30")
8388 +
8389 +;; The DFA lookahead is small enough that the "add" reservation here
8390 +;; will always take priority over any addition/subtraction instruction
8391 +;; issued five cycles after the multiply-accumulate instruction, as
8392 +;; required.
8393 +(define_insn_reservation "marvell_f_vfp_mac" 9
8394 + (and (eq_attr "marvell_f_vfp" "yes")
8395 + (eq_attr "type" "fmacs,fmacd"))
8396 + "nothing*2,mf_vfp_issue,mf_vfp_mul,nothing*4,mf_vfp_add")
8397 +
8398 +(define_insn_reservation "marvell_f_vfp_single" 2
8399 + (and (eq_attr "marvell_f_vfp" "yes")
8400 + (eq_attr "type" "ffarith"))
8401 + "nothing*2,mf_vfp_issue,mf_vfp_single_cycle")
8402 +
8403 +(define_insn_reservation "marvell_f_vfp_convert" 3
8404 + (and (eq_attr "marvell_f_vfp" "yes")
8405 + (eq_attr "type" "f_cvt"))
8406 + "nothing*2,mf_vfp_issue,mf_vfp_single_cycle")
8407 +
8408 +(define_insn_reservation "marvell_f_vfp_load" 2
8409 + (and (eq_attr "marvell_f_vfp" "yes")
8410 + (eq_attr "type" "f_loads,f_loadd"))
8411 + "a1_e2+sram,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle")
8412 +
8413 +(define_insn_reservation "marvell_f_vfp_from_core" 2
8414 + (and (eq_attr "marvell_f_vfp" "yes")
8415 + (eq_attr "type" "r_2_f"))
8416 + "a1_e2,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle")
8417 +
8418 +;; The interaction between the core and VFP pipelines during VFP
8419 +;; store operations and core <-> VFP moves is not clear, so we guess.
8420 +(define_insn_reservation "marvell_f_vfp_store" 3
8421 + (and (eq_attr "marvell_f_vfp" "yes")
8422 + (eq_attr "type" "f_stores,f_stored"))
8423 + "a1_e2,a1_of,mf_vfp_issue,a1_wr+sram+mf_vfp_single_cycle")
8424 +
8425 +(define_insn_reservation "marvell_f_vfp_to_core" 4
8426 + (and (eq_attr "marvell_f_vfp" "yes")
8427 + (eq_attr "type" "f_2_r"))
8428 + "a1_e2,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle")
8429 +
8430 --- /dev/null
8431 +++ b/gcc/config/arm/montavista-linux.h
8432 @@ -0,0 +1,33 @@
8433 +/* MontaVista GNU/Linux Configuration.
8434 + Copyright (C) 2009
8435 + Free Software Foundation, Inc.
8436 +
8437 +This file is part of GCC.
8438 +
8439 +GCC is free software; you can redistribute it and/or modify
8440 +it under the terms of the GNU General Public License as published by
8441 +the Free Software Foundation; either version 3, or (at your option)
8442 +any later version.
8443 +
8444 +GCC is distributed in the hope that it will be useful,
8445 +but WITHOUT ANY WARRANTY; without even the implied warranty of
8446 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8447 +GNU General Public License for more details.
8448 +
8449 +You should have received a copy of the GNU General Public License
8450 +along with GCC; see the file COPYING3. If not see
8451 +<http://www.gnu.org/licenses/>. */
8452 +
8453 +/* Add -tarmv6 and -tthumb2 options for convenience in generating multilibs.
8454 +*/
8455 +#undef CC1_SPEC
8456 +#define CC1_SPEC " \
8457 + %{tarmv6: -march=armv6 -mfloat-abi=softfp ; \
8458 + tthumb2: -mthumb -march=armv7-a -mfloat-abi=softfp ; \
8459 + : -march=armv5t}"
8460 +
8461 +/* The various C libraries each have their own subdirectory. */
8462 +#undef SYSROOT_SUFFIX_SPEC
8463 +#define SYSROOT_SUFFIX_SPEC \
8464 + "%{tarmv6:/armv6 ; \
8465 + tthumb2:/thumb2}"
8466 --- a/gcc/config/arm/neon-gen.ml
8467 +++ b/gcc/config/arm/neon-gen.ml
8468 @@ -122,6 +122,7 @@
8469 | T_uint16 | T_int16 -> T_intHI
8470 | T_uint32 | T_int32 -> T_intSI
8471 | T_uint64 | T_int64 -> T_intDI
8472 + | T_float32 -> T_floatSF
8473 | T_poly8 -> T_intQI
8474 | T_poly16 -> T_intHI
8475 | T_arrayof (n, elt) -> T_arrayof (n, signed_ctype elt)
8476 @@ -320,7 +321,7 @@
8477 typeinfo;
8478 Format.print_newline ();
8479 (* Extra types not in <stdint.h>. *)
8480 - Format.printf "typedef __builtin_neon_sf float32_t;\n";
8481 + Format.printf "typedef float float32_t;\n";
8482 Format.printf "typedef __builtin_neon_poly8 poly8_t;\n";
8483 Format.printf "typedef __builtin_neon_poly16 poly16_t;\n"
8484
8485 --- a/gcc/config/arm/neon.md
8486 +++ b/gcc/config/arm/neon.md
8487 @@ -159,7 +159,8 @@
8488 (UNSPEC_VUZP1 201)
8489 (UNSPEC_VUZP2 202)
8490 (UNSPEC_VZIP1 203)
8491 - (UNSPEC_VZIP2 204)])
8492 + (UNSPEC_VZIP2 204)
8493 + (UNSPEC_MISALIGNED_ACCESS 205)])
8494
8495 ;; Double-width vector modes.
8496 (define_mode_iterator VD [V8QI V4HI V2SI V2SF])
8497 @@ -459,7 +460,9 @@
8498 "=w,Uv,w, w, ?r,?w,?r,?r, ?Us")
8499 (match_operand:VD 1 "general_operand"
8500 " w,w, Dn,Uvi, w, r, r, Usi,r"))]
8501 - "TARGET_NEON"
8502 + "TARGET_NEON
8503 + && (register_operand (operands[0], <MODE>mode)
8504 + || register_operand (operands[1], <MODE>mode))"
8505 {
8506 if (which_alternative == 2)
8507 {
8508 @@ -481,7 +484,7 @@
8509
8510 /* FIXME: If the memory layout is changed in big-endian mode, output_move_vfp
8511 below must be changed to output_move_neon (which will use the
8512 - element/structure loads/stores), and the constraint changed to 'Un' instead
8513 + element/structure loads/stores), and the constraint changed to 'Um' instead
8514 of 'Uv'. */
8515
8516 switch (which_alternative)
8517 @@ -506,7 +509,9 @@
8518 "=w,Un,w, w, ?r,?w,?r,?r, ?Us")
8519 (match_operand:VQXMOV 1 "general_operand"
8520 " w,w, Dn,Uni, w, r, r, Usi, r"))]
8521 - "TARGET_NEON"
8522 + "TARGET_NEON
8523 + && (register_operand (operands[0], <MODE>mode)
8524 + || register_operand (operands[1], <MODE>mode))"
8525 {
8526 if (which_alternative == 2)
8527 {
8528 @@ -549,6 +554,11 @@
8529 (match_operand:TI 1 "general_operand" ""))]
8530 "TARGET_NEON"
8531 {
8532 + if (can_create_pseudo_p ())
8533 + {
8534 + if (GET_CODE (operands[0]) != REG)
8535 + operands[1] = force_reg (TImode, operands[1]);
8536 + }
8537 })
8538
8539 (define_expand "mov<mode>"
8540 @@ -556,12 +566,19 @@
8541 (match_operand:VSTRUCT 1 "general_operand" ""))]
8542 "TARGET_NEON"
8543 {
8544 + if (can_create_pseudo_p ())
8545 + {
8546 + if (GET_CODE (operands[0]) != REG)
8547 + operands[1] = force_reg (<MODE>mode, operands[1]);
8548 + }
8549 })
8550
8551 (define_insn "*neon_mov<mode>"
8552 [(set (match_operand:VSTRUCT 0 "nonimmediate_operand" "=w,Ut,w")
8553 (match_operand:VSTRUCT 1 "general_operand" " w,w, Ut"))]
8554 - "TARGET_NEON"
8555 + "TARGET_NEON
8556 + && (register_operand (operands[0], <MODE>mode)
8557 + || register_operand (operands[1], <MODE>mode))"
8558 {
8559 switch (which_alternative)
8560 {
8561 @@ -658,6 +675,49 @@
8562 neon_disambiguate_copy (operands, dest, src, 4);
8563 })
8564
8565 +(define_expand "movmisalign<mode>"
8566 + [(set (match_operand:VDQX 0 "nonimmediate_operand" "")
8567 + (unspec:VDQX [(match_operand:VDQX 1 "general_operand" "")]
8568 + UNSPEC_MISALIGNED_ACCESS))]
8569 + "TARGET_NEON && !BYTES_BIG_ENDIAN"
8570 +{
8571 + if (!s_register_operand (operands[0], <MODE>mode)
8572 + && !s_register_operand (operands[1], <MODE>mode))
8573 + FAIL;
8574 +})
8575 +
8576 +(define_insn "*movmisalign<mode>_neon_store"
8577 + [(set (match_operand:VDX 0 "memory_operand" "=Um")
8578 + (unspec:VDX [(match_operand:VDX 1 "s_register_operand" " w")]
8579 + UNSPEC_MISALIGNED_ACCESS))]
8580 + "TARGET_NEON && !BYTES_BIG_ENDIAN"
8581 + "vst1.<V_sz_elem>\t{%P1}, %A0"
8582 + [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs")])
8583 +
8584 +(define_insn "*movmisalign<mode>_neon_load"
8585 + [(set (match_operand:VDX 0 "s_register_operand" "=w")
8586 + (unspec:VDX [(match_operand:VDX 1 "memory_operand" " Um")]
8587 + UNSPEC_MISALIGNED_ACCESS))]
8588 + "TARGET_NEON && !BYTES_BIG_ENDIAN"
8589 + "vld1.<V_sz_elem>\t{%P0}, %A1"
8590 + [(set_attr "neon_type" "neon_vld1_1_2_regs")])
8591 +
8592 +(define_insn "*movmisalign<mode>_neon_store"
8593 + [(set (match_operand:VQX 0 "memory_operand" "=Um")
8594 + (unspec:VQX [(match_operand:VQX 1 "s_register_operand" " w")]
8595 + UNSPEC_MISALIGNED_ACCESS))]
8596 + "TARGET_NEON && !BYTES_BIG_ENDIAN"
8597 + "vst1.<V_sz_elem>\t{%q1}, %A0"
8598 + [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs")])
8599 +
8600 +(define_insn "*movmisalign<mode>_neon_load"
8601 + [(set (match_operand:VQX 0 "s_register_operand" "=w")
8602 + (unspec:VQX [(match_operand:VQX 1 "general_operand" " Um")]
8603 + UNSPEC_MISALIGNED_ACCESS))]
8604 + "TARGET_NEON && !BYTES_BIG_ENDIAN"
8605 + "vld1.<V_sz_elem>\t{%q0}, %A1"
8606 + [(set_attr "neon_type" "neon_vld1_1_2_regs")])
8607 +
8608 (define_insn "vec_set<mode>_internal"
8609 [(set (match_operand:VD 0 "s_register_operand" "=w")
8610 (vec_merge:VD
8611 @@ -862,6 +922,50 @@
8612 (const_string "neon_mul_qqq_8_16_32_ddd_32")))))]
8613 )
8614
8615 +(define_insn "*mul<mode>3add<mode>_neon"
8616 + [(set (match_operand:VDQ 0 "s_register_operand" "=w")
8617 + (plus:VDQ (mult:VDQ (match_operand:VDQ 2 "s_register_operand" "w")
8618 + (match_operand:VDQ 3 "s_register_operand" "w"))
8619 + (match_operand:VDQ 1 "s_register_operand" "0")))]
8620 + "TARGET_NEON"
8621 + "vmla.<V_if_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
8622 + [(set (attr "neon_type")
8623 + (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
8624 + (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
8625 + (const_string "neon_fp_vmla_ddd")
8626 + (const_string "neon_fp_vmla_qqq"))
8627 + (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
8628 + (if_then_else
8629 + (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
8630 + (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
8631 + (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))
8632 + (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
8633 + (const_string "neon_mla_qqq_8_16")
8634 + (const_string "neon_mla_qqq_32_qqd_32_scalar")))))]
8635 +)
8636 +
8637 +(define_insn "*mul<mode>3neg<mode>add<mode>_neon"
8638 + [(set (match_operand:VDQ 0 "s_register_operand" "=w")
8639 + (minus:VDQ (match_operand:VDQ 1 "s_register_operand" "0")
8640 + (mult:VDQ (match_operand:VDQ 2 "s_register_operand" "w")
8641 + (match_operand:VDQ 3 "s_register_operand" "w"))))]
8642 + "TARGET_NEON"
8643 + "vmls.<V_if_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
8644 + [(set (attr "neon_type")
8645 + (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
8646 + (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
8647 + (const_string "neon_fp_vmla_ddd")
8648 + (const_string "neon_fp_vmla_qqq"))
8649 + (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
8650 + (if_then_else
8651 + (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
8652 + (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
8653 + (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))
8654 + (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
8655 + (const_string "neon_mla_qqq_8_16")
8656 + (const_string "neon_mla_qqq_32_qqd_32_scalar")))))]
8657 +)
8658 +
8659 (define_insn "ior<mode>3"
8660 [(set (match_operand:VDQ 0 "s_register_operand" "=w,w")
8661 (ior:VDQ (match_operand:VDQ 1 "s_register_operand" "w,0")
8662 @@ -3611,7 +3715,8 @@
8663 UNSPEC_VSHLL_N))]
8664 "TARGET_NEON"
8665 {
8666 - neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
8667 + /* The boundaries are: 0 < imm <= size. */
8668 + neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
8669 return "vshll.%T3%#<V_sz_elem>\t%q0, %P1, %2";
8670 }
8671 [(set_attr "neon_type" "neon_shift_1")]
8672 --- a/gcc/config/arm/neon.ml
8673 +++ b/gcc/config/arm/neon.ml
8674 @@ -50,7 +50,7 @@
8675 | T_ptrto of vectype | T_const of vectype
8676 | T_void | T_intQI
8677 | T_intHI | T_intSI
8678 - | T_intDI
8679 + | T_intDI | T_floatSF
8680
8681 (* The meanings of the following are:
8682 TImode : "Tetra", two registers (four words).
8683 @@ -1693,6 +1693,7 @@
8684 | T_intHI -> "__builtin_neon_hi"
8685 | T_intSI -> "__builtin_neon_si"
8686 | T_intDI -> "__builtin_neon_di"
8687 + | T_floatSF -> "__builtin_neon_sf"
8688 | T_arrayof (num, base) ->
8689 let basename = name (fun x -> x) base in
8690 affix (Printf.sprintf "%sx%d" basename num)
8691 --- a/gcc/config/arm/neon-testgen.ml
8692 +++ b/gcc/config/arm/neon-testgen.ml
8693 @@ -51,8 +51,8 @@
8694 Printf.fprintf chan "/* This file was autogenerated by neon-testgen. */\n\n";
8695 Printf.fprintf chan "/* { dg-do assemble } */\n";
8696 Printf.fprintf chan "/* { dg-require-effective-target arm_neon_ok } */\n";
8697 - Printf.fprintf chan
8698 - "/* { dg-options \"-save-temps -O0 -mfpu=neon -mfloat-abi=softfp\" } */\n";
8699 + Printf.fprintf chan "/* { dg-options \"-save-temps -O0\" } */\n";
8700 + Printf.fprintf chan "/* { dg-add-options arm_neon } */\n";
8701 Printf.fprintf chan "\n#include \"arm_neon.h\"\n\n";
8702 Printf.fprintf chan "void test_%s (void)\n{\n" test_name
8703
8704 --- a/gcc/config/arm/netbsd-elf.h
8705 +++ b/gcc/config/arm/netbsd-elf.h
8706 @@ -153,5 +153,5 @@
8707 while (0)
8708
8709 #undef FPUTYPE_DEFAULT
8710 -#define FPUTYPE_DEFAULT FPUTYPE_VFP
8711 +#define FPUTYPE_DEFAULT "vfp"
8712
8713 --- /dev/null
8714 +++ b/gcc/config/arm/nocrt0.h
8715 @@ -0,0 +1,25 @@
8716 +/* Definitions for generic libgloss based cofigs where crt0 is supplied by
8717 + the linker script.
8718 + Copyright (C) 2006 Free Software Foundation, Inc.
8719 +
8720 + This file is part of GCC.
8721 +
8722 + GCC is free software; you can redistribute it and/or modify it
8723 + under the terms of the GNU General Public License as published
8724 + by the Free Software Foundation; either version 3, or (at your
8725 + option) any later version.
8726 +
8727 + GCC is distributed in the hope that it will be useful, but WITHOUT
8728 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
8729 + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
8730 + License for more details.
8731 +
8732 + You should have received a copy of the GNU General Public License
8733 + along with GCC; see the file COPYING3. If not see
8734 + <http://www.gnu.org/licenses/>. */
8735 +
8736 +#undef STARTFILE_SPEC
8737 +#define STARTFILE_SPEC " crti%O%s crtbegin%O%s"
8738 +
8739 +#undef LIB_SPEC
8740 +#define LIB_SPEC "-lc"
8741 --- a/gcc/config/arm/predicates.md
8742 +++ b/gcc/config/arm/predicates.md
8743 @@ -73,6 +73,10 @@
8744 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
8745 })
8746
8747 +(define_special_predicate "subreg_lowpart_operator"
8748 + (and (match_code "subreg")
8749 + (match_test "subreg_lowpart_p (op)")))
8750 +
8751 ;; Reg, subreg(reg) or const_int.
8752 (define_predicate "reg_or_int_operand"
8753 (ior (match_code "const_int")
8754 @@ -168,6 +172,11 @@
8755 (and (match_code "plus,minus,ior,xor,and")
8756 (match_test "mode == GET_MODE (op)")))
8757
8758 +;; True for plus/minus operators
8759 +(define_special_predicate "plusminus_operator"
8760 + (and (match_code "plus,minus")
8761 + (match_test "mode == GET_MODE (op)")))
8762 +
8763 ;; True for logical binary operators.
8764 (define_special_predicate "logical_binary_operator"
8765 (and (match_code "ior,xor,and")
8766 @@ -295,6 +304,9 @@
8767 HOST_WIDE_INT i = 1, base = 0;
8768 rtx elt;
8769
8770 + if (low_irq_latency)
8771 + return false;
8772 +
8773 if (count <= 1
8774 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
8775 return false;
8776 @@ -352,6 +364,9 @@
8777 HOST_WIDE_INT i = 1, base = 0;
8778 rtx elt;
8779
8780 + if (low_irq_latency)
8781 + return false;
8782 +
8783 if (count <= 1
8784 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
8785 return false;
8786 --- a/gcc/config/arm/sfp-machine.h
8787 +++ b/gcc/config/arm/sfp-machine.h
8788 @@ -14,9 +14,11 @@
8789 #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
8790 #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
8791
8792 +#define _FP_NANFRAC_H ((_FP_QNANBIT_H << 1) - 1)
8793 #define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
8794 #define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
8795 #define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
8796 +#define _FP_NANSIGN_H 0
8797 #define _FP_NANSIGN_S 0
8798 #define _FP_NANSIGN_D 0
8799 #define _FP_NANSIGN_Q 0
8800 @@ -92,5 +94,7 @@
8801 #define __fixdfdi __aeabi_d2lz
8802 #define __fixunsdfdi __aeabi_d2ulz
8803 #define __floatdidf __aeabi_l2d
8804 +#define __extendhfsf2 __gnu_h2f_ieee
8805 +#define __truncsfhf2 __gnu_f2h_ieee
8806
8807 #endif /* __ARM_EABI__ */
8808 --- a/gcc/config/arm/t-arm
8809 +++ b/gcc/config/arm/t-arm
8810 @@ -13,7 +13,9 @@
8811 $(srcdir)/config/arm/iwmmxt.md \
8812 $(srcdir)/config/arm/vfp.md \
8813 $(srcdir)/config/arm/neon.md \
8814 - $(srcdir)/config/arm/thumb2.md
8815 + $(srcdir)/config/arm/thumb2.md \
8816 + $(srcdir)/config/arm/marvell-f.md \
8817 + $(srcdir)/config/arm/hwdiv.md
8818
8819 s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
8820 s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
8821 --- a/gcc/config/arm/t-arm-elf
8822 +++ b/gcc/config/arm/t-arm-elf
8823 @@ -24,10 +24,18 @@
8824 #MULTILIB_MATCHES += march?armv7=march?armv7-a
8825 #MULTILIB_MATCHES += march?armv7=march?armv7-r
8826 #MULTILIB_MATCHES += march?armv7=march?armv7-m
8827 +#MULTILIB_MATCHES += march?armv7=march?armv7e-m
8828 #MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8
8829 #MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4
8830 #MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3
8831
8832 +# Not quite true. We can support hard-vfp calling in Thumb2, but how do we
8833 +# express that here? Also, we really need architecture v5e or later
8834 +# (mcrr etc).
8835 +MULTILIB_OPTIONS += mfloat-abi=hard
8836 +MULTILIB_DIRNAMES += fpu
8837 +MULTILIB_EXCEPTIONS += *mthumb/*mfloat-abi=hard*
8838 +
8839 # MULTILIB_OPTIONS += mcpu=ep9312
8840 # MULTILIB_DIRNAMES += ep9312
8841 # MULTILIB_EXCEPTIONS += *mthumb/*mcpu=ep9312*
8842 --- a/gcc/config/arm/t-bpabi
8843 +++ b/gcc/config/arm/t-bpabi
8844 @@ -1,10 +1,13 @@
8845 # Add the bpabi.S functions.
8846 -LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod
8847 +LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod \
8848 + _aeabi_idiv0 _aeabi_ldiv0
8849
8850 # Add the BPABI C functions.
8851 LIB2FUNCS_EXTRA = $(srcdir)/config/arm/bpabi.c \
8852 $(srcdir)/config/arm/unaligned-funcs.c
8853
8854 +LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/arm/fp16.c
8855 +
8856 UNWIND_H = $(srcdir)/config/arm/unwind-arm.h
8857 LIB2ADDEH = $(srcdir)/config/arm/unwind-arm.c \
8858 $(srcdir)/config/arm/libunwind.S \
8859 --- a/gcc/config/arm/thumb2.md
8860 +++ b/gcc/config/arm/thumb2.md
8861 @@ -24,6 +24,8 @@
8862 ;; changes made in armv5t as "thumb2". These are considered part
8863 ;; the 16-bit Thumb-1 instruction set.
8864
8865 +(include "hwdiv.md")
8866 +
8867 (define_insn "*thumb2_incscc"
8868 [(set (match_operand:SI 0 "s_register_operand" "=r,r")
8869 (plus:SI (match_operator:SI 2 "arm_comparison_operator"
8870 @@ -172,34 +174,6 @@
8871 (set_attr "length" "8")]
8872 )
8873
8874 -(define_insn "*thumb2_abssi2"
8875 - [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
8876 - (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
8877 - (clobber (reg:CC CC_REGNUM))]
8878 - "TARGET_THUMB2"
8879 - "@
8880 - cmp\\t%0, #0\;it\tlt\;rsblt\\t%0, %0, #0
8881 - eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
8882 - [(set_attr "conds" "clob,*")
8883 - (set_attr "shift" "1")
8884 - ;; predicable can't be set based on the variant, so left as no
8885 - (set_attr "length" "10,8")]
8886 -)
8887 -
8888 -(define_insn "*thumb2_neg_abssi2"
8889 - [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
8890 - (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
8891 - (clobber (reg:CC CC_REGNUM))]
8892 - "TARGET_THUMB2"
8893 - "@
8894 - cmp\\t%0, #0\;it\\tgt\;rsbgt\\t%0, %0, #0
8895 - eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
8896 - [(set_attr "conds" "clob,*")
8897 - (set_attr "shift" "1")
8898 - ;; predicable can't be set based on the variant, so left as no
8899 - (set_attr "length" "10,8")]
8900 -)
8901 -
8902 (define_insn "*thumb2_movdi"
8903 [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, r, r, m")
8904 (match_operand:DI 1 "di_operand" "rDa,Db,Dc,mi,r"))]
8905 @@ -223,9 +197,14 @@
8906 (set_attr "neg_pool_range" "*,*,*,0,*")]
8907 )
8908
8909 +;; We have two alternatives here for memory loads (and similarly for stores)
8910 +;; to reflect the fact that the permissible constant pool ranges differ
8911 +;; between ldr instructions taking low regs and ldr instructions taking high
8912 +;; regs. The high register alternatives are not taken into account when
8913 +;; choosing register preferences in order to reflect their expense.
8914 (define_insn "*thumb2_movsi_insn"
8915 - [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m")
8916 - (match_operand:SI 1 "general_operand" "rk ,I,K,N,mi,rk"))]
8917 + [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,l,*hk,m,*m")
8918 + (match_operand:SI 1 "general_operand" "rk ,I,K,j,mi,*mi,l,*hk"))]
8919 "TARGET_THUMB2 && ! TARGET_IWMMXT
8920 && !(TARGET_HARD_FLOAT && TARGET_VFP)
8921 && ( register_operand (operands[0], SImode)
8922 @@ -236,11 +215,13 @@
8923 mvn%?\\t%0, #%B1
8924 movw%?\\t%0, %1
8925 ldr%?\\t%0, %1
8926 + ldr%?\\t%0, %1
8927 + str%?\\t%1, %0
8928 str%?\\t%1, %0"
8929 - [(set_attr "type" "*,*,*,*,load1,store1")
8930 + [(set_attr "type" "*,*,*,*,load1,load1,store1,store1")
8931 (set_attr "predicable" "yes")
8932 - (set_attr "pool_range" "*,*,*,*,4096,*")
8933 - (set_attr "neg_pool_range" "*,*,*,*,0,*")]
8934 + (set_attr "pool_range" "*,*,*,*,1020,4096,*,*")
8935 + (set_attr "neg_pool_range" "*,*,*,*,0,0,*,*")]
8936 )
8937
8938 ;; ??? We can probably do better with thumb2
8939 @@ -1128,27 +1109,7 @@
8940 return \"add%!\\t%0, %1, %2\";
8941 "
8942 [(set_attr "predicable" "yes")
8943 - (set_attr "length" "2")]
8944 -)
8945 -
8946 -(define_insn "divsi3"
8947 - [(set (match_operand:SI 0 "s_register_operand" "=r")
8948 - (div:SI (match_operand:SI 1 "s_register_operand" "r")
8949 - (match_operand:SI 2 "s_register_operand" "r")))]
8950 - "TARGET_THUMB2 && arm_arch_hwdiv"
8951 - "sdiv%?\t%0, %1, %2"
8952 - [(set_attr "predicable" "yes")
8953 - (set_attr "insn" "sdiv")]
8954 -)
8955 -
8956 -(define_insn "udivsi3"
8957 - [(set (match_operand:SI 0 "s_register_operand" "=r")
8958 - (udiv:SI (match_operand:SI 1 "s_register_operand" "r")
8959 - (match_operand:SI 2 "s_register_operand" "r")))]
8960 - "TARGET_THUMB2 && arm_arch_hwdiv"
8961 - "udiv%?\t%0, %1, %2"
8962 - [(set_attr "predicable" "yes")
8963 - (set_attr "insn" "udiv")]
8964 + (set_attr "length" "4")]
8965 )
8966
8967 (define_insn "*thumb2_subsi_short"
8968 @@ -1162,6 +1123,71 @@
8969 (set_attr "length" "2")]
8970 )
8971
8972 +;; 16-bit encodings of "muls" and "mul<c>". We only use these when
8973 +;; optimizing for size since "muls" is slow on all known
8974 +;; implementations and since "mul<c>" will be generated by
8975 +;; "*arm_mulsi3_v6" anyhow. The assembler will use a 16-bit encoding
8976 +;; for "mul<c>" whenever possible anyhow.
8977 +(define_peephole2
8978 + [(set (match_operand:SI 0 "low_register_operand" "")
8979 + (mult:SI (match_operand:SI 1 "low_register_operand" "")
8980 + (match_dup 0)))]
8981 + "TARGET_THUMB2 && optimize_size && peep2_regno_dead_p (0, CC_REGNUM)"
8982 + [(parallel
8983 + [(set (match_dup 0)
8984 + (mult:SI (match_dup 0) (match_dup 1)))
8985 + (clobber (reg:CC CC_REGNUM))])]
8986 + ""
8987 +)
8988 +
8989 +(define_peephole2
8990 + [(set (match_operand:SI 0 "low_register_operand" "")
8991 + (mult:SI (match_dup 0)
8992 + (match_operand:SI 1 "low_register_operand" "")))]
8993 + "TARGET_THUMB2 && optimize_size && peep2_regno_dead_p (0, CC_REGNUM)"
8994 + [(parallel
8995 + [(set (match_dup 0)
8996 + (mult:SI (match_dup 0) (match_dup 1)))
8997 + (clobber (reg:CC CC_REGNUM))])]
8998 + ""
8999 +)
9000 +
9001 +(define_insn "*thumb2_mulsi_short"
9002 + [(set (match_operand:SI 0 "low_register_operand" "=l")
9003 + (mult:SI (match_operand:SI 1 "low_register_operand" "%0")
9004 + (match_operand:SI 2 "low_register_operand" "l")))
9005 + (clobber (reg:CC CC_REGNUM))]
9006 + "TARGET_THUMB2 && optimize_size && reload_completed"
9007 + "mul%!\\t%0, %2, %0"
9008 + [(set_attr "predicable" "yes")
9009 + (set_attr "length" "2")
9010 + (set_attr "insn" "muls")])
9011 +
9012 +(define_insn "*thumb2_mulsi_short_compare0"
9013 + [(set (reg:CC_NOOV CC_REGNUM)
9014 + (compare:CC_NOOV
9015 + (mult:SI (match_operand:SI 1 "register_operand" "%0")
9016 + (match_operand:SI 2 "register_operand" "l"))
9017 + (const_int 0)))
9018 + (set (match_operand:SI 0 "register_operand" "=l")
9019 + (mult:SI (match_dup 1) (match_dup 2)))]
9020 + "TARGET_THUMB2 && optimize_size"
9021 + "muls\\t%0, %2, %0"
9022 + [(set_attr "length" "2")
9023 + (set_attr "insn" "muls")])
9024 +
9025 +(define_insn "*thumb2_mulsi_short_compare0_scratch"
9026 + [(set (reg:CC_NOOV CC_REGNUM)
9027 + (compare:CC_NOOV
9028 + (mult:SI (match_operand:SI 1 "register_operand" "%0")
9029 + (match_operand:SI 2 "register_operand" "l"))
9030 + (const_int 0)))
9031 + (clobber (match_scratch:SI 0 "=r"))]
9032 + "TARGET_THUMB2 && optimize_size"
9033 + "muls\\t%0, %2, %0"
9034 + [(set_attr "length" "2")
9035 + (set_attr "insn" "muls")])
9036 +
9037 (define_insn "*thumb2_cbz"
9038 [(set (pc) (if_then_else
9039 (eq (match_operand:SI 0 "s_register_operand" "l,?r")
9040 @@ -1171,7 +1197,7 @@
9041 (clobber (reg:CC CC_REGNUM))]
9042 "TARGET_THUMB2"
9043 "*
9044 - if (get_attr_length (insn) == 2 && which_alternative == 0)
9045 + if (get_attr_length (insn) == 2)
9046 return \"cbz\\t%0, %l1\";
9047 else
9048 return \"cmp\\t%0, #0\;beq\\t%l1\";
9049 @@ -1179,7 +1205,8 @@
9050 [(set (attr "length")
9051 (if_then_else
9052 (and (ge (minus (match_dup 1) (pc)) (const_int 2))
9053 - (le (minus (match_dup 1) (pc)) (const_int 128)))
9054 + (le (minus (match_dup 1) (pc)) (const_int 128))
9055 + (eq (symbol_ref ("which_alternative")) (const_int 0)))
9056 (const_int 2)
9057 (const_int 8)))]
9058 )
9059 @@ -1193,7 +1220,7 @@
9060 (clobber (reg:CC CC_REGNUM))]
9061 "TARGET_THUMB2"
9062 "*
9063 - if (get_attr_length (insn) == 2 && which_alternative == 0)
9064 + if (get_attr_length (insn) == 2)
9065 return \"cbnz\\t%0, %l1\";
9066 else
9067 return \"cmp\\t%0, #0\;bne\\t%l1\";
9068 @@ -1201,7 +1228,8 @@
9069 [(set (attr "length")
9070 (if_then_else
9071 (and (ge (minus (match_dup 1) (pc)) (const_int 2))
9072 - (le (minus (match_dup 1) (pc)) (const_int 128)))
9073 + (le (minus (match_dup 1) (pc)) (const_int 128))
9074 + (eq (symbol_ref ("which_alternative")) (const_int 0)))
9075 (const_int 2)
9076 (const_int 8)))]
9077 )
9078 --- a/gcc/config/arm/t-linux-eabi
9079 +++ b/gcc/config/arm/t-linux-eabi
9080 @@ -6,8 +6,8 @@
9081 MULTILIB_OPTIONS =
9082 MULTILIB_DIRNAMES =
9083
9084 -# Use a version of div0 which raises SIGFPE.
9085 -LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx
9086 +# Use a version of div0 which raises SIGFPE, and a special __clear_cache.
9087 +LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx _clear_cache
9088
9089 # Multilib the standard Linux files. Don't include crti.o or crtn.o,
9090 # which are provided by glibc.
9091 --- a/gcc/config/arm/t-symbian
9092 +++ b/gcc/config/arm/t-symbian
9093 @@ -17,6 +17,9 @@
9094 LIB2ADDEH = $(srcdir)/unwind-c.c $(srcdir)/config/arm/pr-support.c
9095 LIB2ADDEHDEP = $(UNWIND_H)
9096
9097 +# Include half-float helpers.
9098 +LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/arm/fp16.c
9099 +
9100 # Create a multilib for processors with VFP floating-point, and a
9101 # multilib for those without -- using the soft-float ABI in both
9102 # cases. Symbian OS object should be compiled with interworking
9103 --- a/gcc/config/arm/uclinux-eabi.h
9104 +++ b/gcc/config/arm/uclinux-eabi.h
9105 @@ -50,6 +50,10 @@
9106 #undef ARM_DEFAULT_ABI
9107 #define ARM_DEFAULT_ABI ARM_ABI_AAPCS_LINUX
9108
9109 +#undef LINK_GCC_C_SEQUENCE_SPEC
9110 +#define LINK_GCC_C_SEQUENCE_SPEC \
9111 + "--start-group %G %L --end-group"
9112 +
9113 /* Clear the instruction cache from `beg' to `end'. This makes an
9114 inline system call to SYS_cacheflush. */
9115 #undef CLEAR_INSN_CACHE
9116 --- a/gcc/config/arm/unwind-arm.c
9117 +++ b/gcc/config/arm/unwind-arm.c
9118 @@ -1000,7 +1000,6 @@
9119 while (code != _URC_END_OF_STACK
9120 && code != _URC_FAILURE);
9121
9122 - finish:
9123 restore_non_core_regs (&saved_vrs);
9124 return code;
9125 }
9126 @@ -1168,6 +1167,9 @@
9127 {
9128 matched = (void *)(ucbp + 1);
9129 rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
9130 + /* There is no way to encode an exception
9131 + specification for 'class X * &', so
9132 + always pass false for is_reference. */
9133 if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
9134 &matched))
9135 break;
9136 @@ -1197,8 +1199,6 @@
9137 ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
9138
9139 if (data[0] & uint32_highbit)
9140 - phase2_call_unexpected_after_unwind = 1;
9141 - else
9142 {
9143 data += rtti_count + 1;
9144 /* Setup for entry to the handler. */
9145 @@ -1208,6 +1208,8 @@
9146 _Unwind_SetGR (context, 0, (_uw) ucbp);
9147 return _URC_INSTALL_CONTEXT;
9148 }
9149 + else
9150 + phase2_call_unexpected_after_unwind = 1;
9151 }
9152 if (data[0] & uint32_highbit)
9153 data++;
9154 --- a/gcc/config/arm/unwind-arm.h
9155 +++ b/gcc/config/arm/unwind-arm.h
9156 @@ -229,9 +229,10 @@
9157 return 0;
9158
9159 #if (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__)
9160 - /* Pc-relative indirect. */
9161 + /* Pc-relative indirect. Propagate the bottom 2 bits, which can
9162 + contain referenceness information in gnu unwinding tables. */
9163 tmp += ptr;
9164 - tmp = *(_Unwind_Word *) tmp;
9165 + tmp = *(_Unwind_Word *) (tmp & ~(_Unwind_Word)3) | (tmp & 3);
9166 #elif defined(__symbian__) || defined(__uClinux__)
9167 /* Absolute pointer. Nothing more to do. */
9168 #else
9169 --- a/gcc/config/arm/vec-common.md
9170 +++ b/gcc/config/arm/vec-common.md
9171 @@ -38,6 +38,11 @@
9172 "TARGET_NEON
9173 || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
9174 {
9175 + if (can_create_pseudo_p ())
9176 + {
9177 + if (GET_CODE (operands[0]) != REG)
9178 + operands[1] = force_reg (<MODE>mode, operands[1]);
9179 + }
9180 })
9181
9182 ;; Vector arithmetic. Expanders are blank, then unnamed insns implement
9183 --- a/gcc/config/arm/vfp.md
9184 +++ b/gcc/config/arm/vfp.md
9185 @@ -51,7 +51,7 @@
9186 ;; problems because small constants get converted into adds.
9187 (define_insn "*arm_movsi_vfp"
9188 [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m ,*t,r,*t,*t, *Uv")
9189 - (match_operand:SI 1 "general_operand" "rk, I,K,N,mi,rk,r,*t,*t,*Uvi,*t"))]
9190 + (match_operand:SI 1 "general_operand" "rk, I,K,j,mi,rk,r,*t,*t,*Uvi,*t"))]
9191 "TARGET_ARM && TARGET_VFP && TARGET_HARD_FLOAT
9192 && ( s_register_operand (operands[0], SImode)
9193 || s_register_operand (operands[1], SImode))"
9194 @@ -82,13 +82,17 @@
9195 "
9196 [(set_attr "predicable" "yes")
9197 (set_attr "type" "*,*,*,*,load1,store1,r_2_f,f_2_r,fcpys,f_loads,f_stores")
9198 + (set_attr "neon_type" "*,*,*,*,*,*,neon_mcr,neon_mrc,neon_vmov,*,*")
9199 + (set_attr "insn" "mov,mov,mvn,mov,*,*,*,*,*,*,*")
9200 (set_attr "pool_range" "*,*,*,*,4096,*,*,*,*,1020,*")
9201 (set_attr "neg_pool_range" "*,*,*,*,4084,*,*,*,*,1008,*")]
9202 )
9203
9204 +;; See thumb2.md:thumb2_movsi_insn for an explanation of the split
9205 +;; high/low register alternatives for loads and stores here.
9206 (define_insn "*thumb2_movsi_vfp"
9207 - [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m,*t,r, *t,*t, *Uv")
9208 - (match_operand:SI 1 "general_operand" "rk, I,K,N,mi,rk,r,*t,*t,*Uvi,*t"))]
9209 + [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,l,*hk,m,*m,*t,r, *t,*t, *Uv")
9210 + (match_operand:SI 1 "general_operand" "rk, I,K,j,mi,*mi,l,*hk,r,*t,*t,*Uvi,*t"))]
9211 "TARGET_THUMB2 && TARGET_VFP && TARGET_HARD_FLOAT
9212 && ( s_register_operand (operands[0], SImode)
9213 || s_register_operand (operands[1], SImode))"
9214 @@ -102,25 +106,29 @@
9215 case 3:
9216 return \"movw%?\\t%0, %1\";
9217 case 4:
9218 - return \"ldr%?\\t%0, %1\";
9219 case 5:
9220 - return \"str%?\\t%1, %0\";
9221 + return \"ldr%?\\t%0, %1\";
9222 case 6:
9223 - return \"fmsr%?\\t%0, %1\\t%@ int\";
9224 case 7:
9225 - return \"fmrs%?\\t%0, %1\\t%@ int\";
9226 + return \"str%?\\t%1, %0\";
9227 case 8:
9228 + return \"fmsr%?\\t%0, %1\\t%@ int\";
9229 + case 9:
9230 + return \"fmrs%?\\t%0, %1\\t%@ int\";
9231 + case 10:
9232 return \"fcpys%?\\t%0, %1\\t%@ int\";
9233 - case 9: case 10:
9234 + case 11: case 12:
9235 return output_move_vfp (operands);
9236 default:
9237 gcc_unreachable ();
9238 }
9239 "
9240 [(set_attr "predicable" "yes")
9241 - (set_attr "type" "*,*,*,*,load1,store1,r_2_f,f_2_r,fcpys,f_load,f_store")
9242 - (set_attr "pool_range" "*,*,*,*,4096,*,*,*,*,1020,*")
9243 - (set_attr "neg_pool_range" "*,*,*,*, 0,*,*,*,*,1008,*")]
9244 + (set_attr "type" "*,*,*,*,load1,load1,store1,store1,r_2_f,f_2_r,fcpys,f_load,f_store")
9245 + (set_attr "neon_type" "*,*,*,*,*,*,*,*,neon_mcr,neon_mrc,neon_vmov,*,*")
9246 + (set_attr "insn" "mov,mov,mvn,mov,*,*,*,*,*,*,*,*,*")
9247 + (set_attr "pool_range" "*,*,*,*,1020,4096,*,*,*,*,*,1020,*")
9248 + (set_attr "neg_pool_range" "*,*,*,*, 0, 0,*,*,*,*,*,1008,*")]
9249 )
9250
9251
9252 @@ -145,7 +153,10 @@
9253 case 4:
9254 return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
9255 case 5:
9256 - return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
9257 + if (TARGET_VFP_SINGLE)
9258 + return \"fcpys%?\\t%0, %1\\t%@ int\;fcpys%?\\t%p0, %p1\\t%@ int\";
9259 + else
9260 + return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
9261 case 6: case 7:
9262 return output_move_vfp (operands);
9263 default:
9264 @@ -153,7 +164,14 @@
9265 }
9266 "
9267 [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarithd,f_loadd,f_stored")
9268 - (set_attr "length" "8,8,8,4,4,4,4,4")
9269 + (set_attr "neon_type" "*,*,*,neon_mcr_2_mcrr,neon_mrrc,neon_vmov,*,*")
9270 + (set (attr "length") (cond [(eq_attr "alternative" "0,1,2") (const_int 8)
9271 + (eq_attr "alternative" "5")
9272 + (if_then_else
9273 + (eq (symbol_ref "TARGET_VFP_SINGLE") (const_int 1))
9274 + (const_int 8)
9275 + (const_int 4))]
9276 + (const_int 4)))
9277 (set_attr "pool_range" "*,1020,*,*,*,*,1020,*")
9278 (set_attr "neg_pool_range" "*,1008,*,*,*,*,1008,*")]
9279 )
9280 @@ -172,7 +190,10 @@
9281 case 4:
9282 return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
9283 case 5:
9284 - return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
9285 + if (TARGET_VFP_SINGLE)
9286 + return \"fcpys%?\\t%0, %1\\t%@ int\;fcpys%?\\t%p0, %p1\\t%@ int\";
9287 + else
9288 + return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
9289 case 6: case 7:
9290 return output_move_vfp (operands);
9291 default:
9292 @@ -180,11 +201,123 @@
9293 }
9294 "
9295 [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarithd,f_load,f_store")
9296 - (set_attr "length" "8,8,8,4,4,4,4,4")
9297 + (set_attr "neon_type" "*,*,*,neon_mcr_2_mcrr,neon_mrrc,neon_vmov,*,*")
9298 + (set (attr "length") (cond [(eq_attr "alternative" "0,1,2") (const_int 8)
9299 + (eq_attr "alternative" "5")
9300 + (if_then_else
9301 + (eq (symbol_ref "TARGET_VFP_SINGLE") (const_int 1))
9302 + (const_int 8)
9303 + (const_int 4))]
9304 + (const_int 4)))
9305 (set_attr "pool_range" "*,4096,*,*,*,*,1020,*")
9306 (set_attr "neg_pool_range" "*, 0,*,*,*,*,1008,*")]
9307 )
9308
9309 +;; HFmode moves
9310 +(define_insn "*movhf_vfp_neon"
9311 + [(set (match_operand:HF 0 "nonimmediate_operand" "= t,Um,r,m,t,r,t,r,r")
9312 + (match_operand:HF 1 "general_operand" " Um, t,m,r,t,r,r,t,F"))]
9313 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON_FP16
9314 + && ( s_register_operand (operands[0], HFmode)
9315 + || s_register_operand (operands[1], HFmode))"
9316 + "*
9317 + switch (which_alternative)
9318 + {
9319 + case 0: /* S register from memory */
9320 + return \"vld1.16\\t{%z0}, %A1\";
9321 + case 1: /* memory from S register */
9322 + return \"vst1.16\\t{%z1}, %A0\";
9323 + case 2: /* ARM register from memory */
9324 + return \"ldrh\\t%0, %1\\t%@ __fp16\";
9325 + case 3: /* memory from ARM register */
9326 + return \"strh\\t%1, %0\\t%@ __fp16\";
9327 + case 4: /* S register from S register */
9328 + return \"fcpys\\t%0, %1\";
9329 + case 5: /* ARM register from ARM register */
9330 + return \"mov\\t%0, %1\\t%@ __fp16\";
9331 + case 6: /* S register from ARM register */
9332 + return \"fmsr\\t%0, %1\";
9333 + case 7: /* ARM register from S register */
9334 + return \"fmrs\\t%0, %1\";
9335 + case 8: /* ARM register from constant */
9336 + {
9337 + REAL_VALUE_TYPE r;
9338 + long bits;
9339 + rtx ops[4];
9340 +
9341 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
9342 + bits = real_to_target (NULL, &r, HFmode);
9343 + ops[0] = operands[0];
9344 + ops[1] = GEN_INT (bits);
9345 + ops[2] = GEN_INT (bits & 0xff00);
9346 + ops[3] = GEN_INT (bits & 0x00ff);
9347 +
9348 + if (arm_arch_thumb2)
9349 + output_asm_insn (\"movw\\t%0, %1\", ops);
9350 + else
9351 + output_asm_insn (\"mov\\t%0, %2\;orr\\t%0, %0, %3\", ops);
9352 + return \"\";
9353 + }
9354 + default:
9355 + gcc_unreachable ();
9356 + }
9357 + "
9358 + [(set_attr "conds" "unconditional")
9359 + (set_attr "type" "*,*,load1,store1,fcpys,*,r_2_f,f_2_r,*")
9360 + (set_attr "neon_type" "neon_vld1_1_2_regs,neon_vst1_1_2_regs_vst2_2_regs,*,*,*,*,*,*,*")
9361 + (set_attr "length" "4,4,4,4,4,4,4,4,8")]
9362 +)
9363 +
9364 +;; FP16 without element load/store instructions.
9365 +(define_insn "*movhf_vfp"
9366 + [(set (match_operand:HF 0 "nonimmediate_operand" "=r,m,t,r,t,r,r")
9367 + (match_operand:HF 1 "general_operand" " m,r,t,r,r,t,F"))]
9368 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FP16 && !TARGET_NEON_FP16
9369 + && ( s_register_operand (operands[0], HFmode)
9370 + || s_register_operand (operands[1], HFmode))"
9371 + "*
9372 + switch (which_alternative)
9373 + {
9374 + case 0: /* ARM register from memory */
9375 + return \"ldrh\\t%0, %1\\t%@ __fp16\";
9376 + case 1: /* memory from ARM register */
9377 + return \"strh\\t%1, %0\\t%@ __fp16\";
9378 + case 2: /* S register from S register */
9379 + return \"fcpys\\t%0, %1\";
9380 + case 3: /* ARM register from ARM register */
9381 + return \"mov\\t%0, %1\\t%@ __fp16\";
9382 + case 4: /* S register from ARM register */
9383 + return \"fmsr\\t%0, %1\";
9384 + case 5: /* ARM register from S register */
9385 + return \"fmrs\\t%0, %1\";
9386 + case 6: /* ARM register from constant */
9387 + {
9388 + REAL_VALUE_TYPE r;
9389 + long bits;
9390 + rtx ops[4];
9391 +
9392 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
9393 + bits = real_to_target (NULL, &r, HFmode);
9394 + ops[0] = operands[0];
9395 + ops[1] = GEN_INT (bits);
9396 + ops[2] = GEN_INT (bits & 0xff00);
9397 + ops[3] = GEN_INT (bits & 0x00ff);
9398 +
9399 + if (arm_arch_thumb2)
9400 + output_asm_insn (\"movw\\t%0, %1\", ops);
9401 + else
9402 + output_asm_insn (\"mov\\t%0, %2\;orr\\t%0, %0, %3\", ops);
9403 + return \"\";
9404 + }
9405 + default:
9406 + gcc_unreachable ();
9407 + }
9408 + "
9409 + [(set_attr "conds" "unconditional")
9410 + (set_attr "type" "load1,store1,fcpys,*,r_2_f,f_2_r,*")
9411 + (set_attr "length" "4,4,4,4,4,4,8")]
9412 +)
9413 +
9414
9415 ;; SFmode moves
9416 ;; Disparage the w<->r cases because reloading an invalid address is
9417 @@ -222,6 +355,8 @@
9418 [(set_attr "predicable" "yes")
9419 (set_attr "type"
9420 "r_2_f,f_2_r,fconsts,f_loads,f_stores,load1,store1,fcpys,*")
9421 + (set_attr "neon_type" "neon_mcr,neon_mrc,*,*,*,*,*,neon_vmov,*")
9422 + (set_attr "insn" "*,*,*,*,*,*,*,*,mov")
9423 (set_attr "pool_range" "*,*,*,1020,*,4096,*,*,*")
9424 (set_attr "neg_pool_range" "*,*,*,1008,*,4080,*,*,*")]
9425 )
9426 @@ -258,6 +393,8 @@
9427 [(set_attr "predicable" "yes")
9428 (set_attr "type"
9429 "r_2_f,f_2_r,fconsts,f_load,f_store,load1,store1,fcpys,*")
9430 + (set_attr "neon_type" "neon_mcr,neon_mrc,*,*,*,*,*,neon_vmov,*")
9431 + (set_attr "insn" "*,*,*,*,*,*,*,*,mov")
9432 (set_attr "pool_range" "*,*,*,1020,*,4092,*,*,*")
9433 (set_attr "neg_pool_range" "*,*,*,1008,*,0,*,*,*")]
9434 )
9435 @@ -267,7 +404,7 @@
9436
9437 (define_insn "*movdf_vfp"
9438 [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
9439 - (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
9440 + (match_operand:DF 1 "soft_df_operand" " ?r,w,Dy,mF,r,UvF,w, w,r"))]
9441 "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
9442 && ( register_operand (operands[0], DFmode)
9443 || register_operand (operands[1], DFmode))"
9444 @@ -280,13 +417,17 @@
9445 case 1:
9446 return \"fmrrd%?\\t%Q0, %R0, %P1\";
9447 case 2:
9448 + gcc_assert (TARGET_VFP_DOUBLE);
9449 return \"fconstd%?\\t%P0, #%G1\";
9450 case 3: case 4:
9451 return output_move_double (operands);
9452 case 5: case 6:
9453 return output_move_vfp (operands);
9454 case 7:
9455 - return \"fcpyd%?\\t%P0, %P1\";
9456 + if (TARGET_VFP_SINGLE)
9457 + return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
9458 + else
9459 + return \"fcpyd%?\\t%P0, %P1\";
9460 case 8:
9461 return \"#\";
9462 default:
9463 @@ -296,14 +437,21 @@
9464 "
9465 [(set_attr "type"
9466 "r_2_f,f_2_r,fconstd,f_loadd,f_stored,load2,store2,ffarithd,*")
9467 - (set_attr "length" "4,4,4,8,8,4,4,4,8")
9468 + (set_attr "neon_type" "neon_mcr_2_mcrr,neon_mrrc,*,*,*,*,*,neon_vmov,*")
9469 + (set (attr "length") (cond [(eq_attr "alternative" "3,4,8") (const_int 8)
9470 + (eq_attr "alternative" "7")
9471 + (if_then_else
9472 + (eq (symbol_ref "TARGET_VFP_SINGLE") (const_int 1))
9473 + (const_int 8)
9474 + (const_int 4))]
9475 + (const_int 4)))
9476 (set_attr "pool_range" "*,*,*,1020,*,1020,*,*,*")
9477 (set_attr "neg_pool_range" "*,*,*,1008,*,1008,*,*,*")]
9478 )
9479
9480 (define_insn "*thumb2_movdf_vfp"
9481 [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
9482 - (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
9483 + (match_operand:DF 1 "soft_df_operand" " ?r,w,Dy,mF,r,UvF,w, w,r"))]
9484 "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
9485 "*
9486 {
9487 @@ -314,13 +462,17 @@
9488 case 1:
9489 return \"fmrrd%?\\t%Q0, %R0, %P1\";
9490 case 2:
9491 + gcc_assert (TARGET_VFP_DOUBLE);
9492 return \"fconstd%?\\t%P0, #%G1\";
9493 case 3: case 4: case 8:
9494 return output_move_double (operands);
9495 case 5: case 6:
9496 return output_move_vfp (operands);
9497 case 7:
9498 - return \"fcpyd%?\\t%P0, %P1\";
9499 + if (TARGET_VFP_SINGLE)
9500 + return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
9501 + else
9502 + return \"fcpyd%?\\t%P0, %P1\";
9503 default:
9504 abort ();
9505 }
9506 @@ -328,7 +480,14 @@
9507 "
9508 [(set_attr "type"
9509 "r_2_f,f_2_r,fconstd,load2,store2,f_load,f_store,ffarithd,*")
9510 - (set_attr "length" "4,4,4,8,8,4,4,4,8")
9511 + (set_attr "neon_type" "neon_mcr_2_mcrr,neon_mrrc,*,*,*,*,*,neon_vmov,*")
9512 + (set (attr "length") (cond [(eq_attr "alternative" "3,4,8") (const_int 8)
9513 + (eq_attr "alternative" "7")
9514 + (if_then_else
9515 + (eq (symbol_ref "TARGET_VFP_SINGLE") (const_int 1))
9516 + (const_int 8)
9517 + (const_int 4))]
9518 + (const_int 4)))
9519 (set_attr "pool_range" "*,*,*,4096,*,1020,*,*,*")
9520 (set_attr "neg_pool_range" "*,*,*,0,*,1008,*,*,*")]
9521 )
9522 @@ -356,7 +515,8 @@
9523 fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1"
9524 [(set_attr "conds" "use")
9525 (set_attr "length" "4,4,8,4,4,8,4,4,8")
9526 - (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
9527 + (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")
9528 + (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr,neon_mcr,neon_mcr,neon_mrc,neon_mrc,neon_mrc")]
9529 )
9530
9531 (define_insn "*thumb2_movsfcc_vfp"
9532 @@ -379,7 +539,8 @@
9533 ite\\t%D3\;fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1"
9534 [(set_attr "conds" "use")
9535 (set_attr "length" "6,6,10,6,6,10,6,6,10")
9536 - (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
9537 + (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")
9538 + (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr,neon_mcr,neon_mcr,neon_mrc,neon_mrc,neon_mrc")]
9539 )
9540
9541 (define_insn "*movdfcc_vfp"
9542 @@ -389,7 +550,7 @@
9543 [(match_operand 4 "cc_register" "") (const_int 0)])
9544 (match_operand:DF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
9545 (match_operand:DF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
9546 - "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
9547 + "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9548 "@
9549 fcpyd%D3\\t%P0, %P2
9550 fcpyd%d3\\t%P0, %P1
9551 @@ -402,7 +563,8 @@
9552 fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1"
9553 [(set_attr "conds" "use")
9554 (set_attr "length" "4,4,8,4,4,8,4,4,8")
9555 - (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
9556 + (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")
9557 + (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mrrc,neon_mrrc,neon_mrrc")]
9558 )
9559
9560 (define_insn "*thumb2_movdfcc_vfp"
9561 @@ -412,7 +574,7 @@
9562 [(match_operand 4 "cc_register" "") (const_int 0)])
9563 (match_operand:DF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
9564 (match_operand:DF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
9565 - "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
9566 + "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9567 "@
9568 it\\t%D3\;fcpyd%D3\\t%P0, %P2
9569 it\\t%d3\;fcpyd%d3\\t%P0, %P1
9570 @@ -425,7 +587,8 @@
9571 ite\\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1"
9572 [(set_attr "conds" "use")
9573 (set_attr "length" "6,6,10,6,6,10,6,6,10")
9574 - (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
9575 + (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")
9576 + (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mrrc,neon_mrrc,neon_mrrc")]
9577 )
9578
9579
9580 @@ -443,7 +606,7 @@
9581 (define_insn "*absdf2_vfp"
9582 [(set (match_operand:DF 0 "s_register_operand" "=w")
9583 (abs:DF (match_operand:DF 1 "s_register_operand" "w")))]
9584 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9585 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9586 "fabsd%?\\t%P0, %P1"
9587 [(set_attr "predicable" "yes")
9588 (set_attr "type" "ffarithd")]
9589 @@ -463,12 +626,12 @@
9590 (define_insn_and_split "*negdf2_vfp"
9591 [(set (match_operand:DF 0 "s_register_operand" "=w,?r,?r")
9592 (neg:DF (match_operand:DF 1 "s_register_operand" "w,0,r")))]
9593 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9594 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9595 "@
9596 fnegd%?\\t%P0, %P1
9597 #
9598 #"
9599 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && reload_completed
9600 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE && reload_completed
9601 && arm_general_register_operand (operands[0], DFmode)"
9602 [(set (match_dup 0) (match_dup 1))]
9603 "
9604 @@ -523,7 +686,7 @@
9605 [(set (match_operand:DF 0 "s_register_operand" "=w")
9606 (plus:DF (match_operand:DF 1 "s_register_operand" "w")
9607 (match_operand:DF 2 "s_register_operand" "w")))]
9608 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9609 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9610 "faddd%?\\t%P0, %P1, %P2"
9611 [(set_attr "predicable" "yes")
9612 (set_attr "type" "faddd")]
9613 @@ -544,7 +707,7 @@
9614 [(set (match_operand:DF 0 "s_register_operand" "=w")
9615 (minus:DF (match_operand:DF 1 "s_register_operand" "w")
9616 (match_operand:DF 2 "s_register_operand" "w")))]
9617 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9618 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9619 "fsubd%?\\t%P0, %P1, %P2"
9620 [(set_attr "predicable" "yes")
9621 (set_attr "type" "faddd")]
9622 @@ -567,7 +730,7 @@
9623 [(set (match_operand:DF 0 "s_register_operand" "+w")
9624 (div:DF (match_operand:DF 1 "s_register_operand" "w")
9625 (match_operand:DF 2 "s_register_operand" "w")))]
9626 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9627 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9628 "fdivd%?\\t%P0, %P1, %P2"
9629 [(set_attr "predicable" "yes")
9630 (set_attr "type" "fdivd")]
9631 @@ -590,7 +753,7 @@
9632 [(set (match_operand:DF 0 "s_register_operand" "+w")
9633 (mult:DF (match_operand:DF 1 "s_register_operand" "w")
9634 (match_operand:DF 2 "s_register_operand" "w")))]
9635 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9636 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9637 "fmuld%?\\t%P0, %P1, %P2"
9638 [(set_attr "predicable" "yes")
9639 (set_attr "type" "fmuld")]
9640 @@ -611,7 +774,7 @@
9641 [(set (match_operand:DF 0 "s_register_operand" "+w")
9642 (mult:DF (neg:DF (match_operand:DF 1 "s_register_operand" "w"))
9643 (match_operand:DF 2 "s_register_operand" "w")))]
9644 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9645 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9646 "fnmuld%?\\t%P0, %P1, %P2"
9647 [(set_attr "predicable" "yes")
9648 (set_attr "type" "fmuld")]
9649 @@ -626,7 +789,8 @@
9650 (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
9651 (match_operand:SF 3 "s_register_operand" "t"))
9652 (match_operand:SF 1 "s_register_operand" "0")))]
9653 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9654 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP
9655 + && (!arm_tune_marvell_f || optimize_size)"
9656 "fmacs%?\\t%0, %2, %3"
9657 [(set_attr "predicable" "yes")
9658 (set_attr "type" "fmacs")]
9659 @@ -637,7 +801,8 @@
9660 (plus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
9661 (match_operand:DF 3 "s_register_operand" "w"))
9662 (match_operand:DF 1 "s_register_operand" "0")))]
9663 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9664 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE
9665 + && (!arm_tune_marvell_f || optimize_size)"
9666 "fmacd%?\\t%P0, %P2, %P3"
9667 [(set_attr "predicable" "yes")
9668 (set_attr "type" "fmacd")]
9669 @@ -649,7 +814,8 @@
9670 (minus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
9671 (match_operand:SF 3 "s_register_operand" "t"))
9672 (match_operand:SF 1 "s_register_operand" "0")))]
9673 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9674 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP
9675 + && (!arm_tune_marvell_f || optimize_size)"
9676 "fmscs%?\\t%0, %2, %3"
9677 [(set_attr "predicable" "yes")
9678 (set_attr "type" "fmacs")]
9679 @@ -660,7 +826,8 @@
9680 (minus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
9681 (match_operand:DF 3 "s_register_operand" "w"))
9682 (match_operand:DF 1 "s_register_operand" "0")))]
9683 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9684 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE
9685 + && (!arm_tune_marvell_f || optimize_size)"
9686 "fmscd%?\\t%P0, %P2, %P3"
9687 [(set_attr "predicable" "yes")
9688 (set_attr "type" "fmacd")]
9689 @@ -672,7 +839,8 @@
9690 (minus:SF (match_operand:SF 1 "s_register_operand" "0")
9691 (mult:SF (match_operand:SF 2 "s_register_operand" "t")
9692 (match_operand:SF 3 "s_register_operand" "t"))))]
9693 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9694 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP
9695 + && (!arm_tune_marvell_f || optimize_size)"
9696 "fnmacs%?\\t%0, %2, %3"
9697 [(set_attr "predicable" "yes")
9698 (set_attr "type" "fmacs")]
9699 @@ -683,7 +851,8 @@
9700 (minus:DF (match_operand:DF 1 "s_register_operand" "0")
9701 (mult:DF (match_operand:DF 2 "s_register_operand" "w")
9702 (match_operand:DF 3 "s_register_operand" "w"))))]
9703 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9704 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE
9705 + && (!arm_tune_marvell_f || optimize_size)"
9706 "fnmacd%?\\t%P0, %P2, %P3"
9707 [(set_attr "predicable" "yes")
9708 (set_attr "type" "fmacd")]
9709 @@ -697,7 +866,8 @@
9710 (neg:SF (match_operand:SF 2 "s_register_operand" "t"))
9711 (match_operand:SF 3 "s_register_operand" "t"))
9712 (match_operand:SF 1 "s_register_operand" "0")))]
9713 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9714 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP
9715 + && (!arm_tune_marvell_f || optimize_size)"
9716 "fnmscs%?\\t%0, %2, %3"
9717 [(set_attr "predicable" "yes")
9718 (set_attr "type" "fmacs")]
9719 @@ -709,7 +879,8 @@
9720 (neg:DF (match_operand:DF 2 "s_register_operand" "w"))
9721 (match_operand:DF 3 "s_register_operand" "w"))
9722 (match_operand:DF 1 "s_register_operand" "0")))]
9723 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9724 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE
9725 + && (!arm_tune_marvell_f || optimize_size)"
9726 "fnmscd%?\\t%P0, %P2, %P3"
9727 [(set_attr "predicable" "yes")
9728 (set_attr "type" "fmacd")]
9729 @@ -721,7 +892,7 @@
9730 (define_insn "*extendsfdf2_vfp"
9731 [(set (match_operand:DF 0 "s_register_operand" "=w")
9732 (float_extend:DF (match_operand:SF 1 "s_register_operand" "t")))]
9733 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9734 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9735 "fcvtds%?\\t%P0, %1"
9736 [(set_attr "predicable" "yes")
9737 (set_attr "type" "f_cvt")]
9738 @@ -730,12 +901,30 @@
9739 (define_insn "*truncdfsf2_vfp"
9740 [(set (match_operand:SF 0 "s_register_operand" "=t")
9741 (float_truncate:SF (match_operand:DF 1 "s_register_operand" "w")))]
9742 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9743 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9744 "fcvtsd%?\\t%0, %P1"
9745 [(set_attr "predicable" "yes")
9746 (set_attr "type" "f_cvt")]
9747 )
9748
9749 +(define_insn "extendhfsf2"
9750 + [(set (match_operand:SF 0 "s_register_operand" "=t")
9751 + (float_extend:SF (match_operand:HF 1 "s_register_operand" "t")))]
9752 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FP16"
9753 + "vcvtb%?.f32.f16\\t%0, %1"
9754 + [(set_attr "predicable" "yes")
9755 + (set_attr "type" "f_cvt")]
9756 +)
9757 +
9758 +(define_insn "truncsfhf2"
9759 + [(set (match_operand:HF 0 "s_register_operand" "=t")
9760 + (float_truncate:HF (match_operand:SF 1 "s_register_operand" "t")))]
9761 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FP16"
9762 + "vcvtb%?.f16.f32\\t%0, %1"
9763 + [(set_attr "predicable" "yes")
9764 + (set_attr "type" "f_cvt")]
9765 +)
9766 +
9767 (define_insn "*truncsisf2_vfp"
9768 [(set (match_operand:SI 0 "s_register_operand" "=t")
9769 (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))]
9770 @@ -748,7 +937,7 @@
9771 (define_insn "*truncsidf2_vfp"
9772 [(set (match_operand:SI 0 "s_register_operand" "=t")
9773 (fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "w"))))]
9774 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9775 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9776 "ftosizd%?\\t%0, %P1"
9777 [(set_attr "predicable" "yes")
9778 (set_attr "type" "f_cvt")]
9779 @@ -767,7 +956,7 @@
9780 (define_insn "fixuns_truncdfsi2"
9781 [(set (match_operand:SI 0 "s_register_operand" "=t")
9782 (unsigned_fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "t"))))]
9783 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9784 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9785 "ftouizd%?\\t%0, %P1"
9786 [(set_attr "predicable" "yes")
9787 (set_attr "type" "f_cvt")]
9788 @@ -786,7 +975,7 @@
9789 (define_insn "*floatsidf2_vfp"
9790 [(set (match_operand:DF 0 "s_register_operand" "=w")
9791 (float:DF (match_operand:SI 1 "s_register_operand" "t")))]
9792 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9793 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9794 "fsitod%?\\t%P0, %1"
9795 [(set_attr "predicable" "yes")
9796 (set_attr "type" "f_cvt")]
9797 @@ -805,7 +994,7 @@
9798 (define_insn "floatunssidf2"
9799 [(set (match_operand:DF 0 "s_register_operand" "=w")
9800 (unsigned_float:DF (match_operand:SI 1 "s_register_operand" "t")))]
9801 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9802 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9803 "fuitod%?\\t%P0, %1"
9804 [(set_attr "predicable" "yes")
9805 (set_attr "type" "f_cvt")]
9806 @@ -826,7 +1015,7 @@
9807 (define_insn "*sqrtdf2_vfp"
9808 [(set (match_operand:DF 0 "s_register_operand" "=w")
9809 (sqrt:DF (match_operand:DF 1 "s_register_operand" "w")))]
9810 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9811 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9812 "fsqrtd%?\\t%P0, %P1"
9813 [(set_attr "predicable" "yes")
9814 (set_attr "type" "fdivd")]
9815 @@ -878,9 +1067,9 @@
9816 [(set (reg:CCFP CC_REGNUM)
9817 (compare:CCFP (match_operand:DF 0 "s_register_operand" "w")
9818 (match_operand:DF 1 "vfp_compare_operand" "wG")))]
9819 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9820 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9821 "#"
9822 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9823 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9824 [(set (reg:CCFP VFPCC_REGNUM)
9825 (compare:CCFP (match_dup 0)
9826 (match_dup 1)))
9827 @@ -893,9 +1082,9 @@
9828 [(set (reg:CCFPE CC_REGNUM)
9829 (compare:CCFPE (match_operand:DF 0 "s_register_operand" "w")
9830 (match_operand:DF 1 "vfp_compare_operand" "wG")))]
9831 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9832 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9833 "#"
9834 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9835 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9836 [(set (reg:CCFPE VFPCC_REGNUM)
9837 (compare:CCFPE (match_dup 0)
9838 (match_dup 1)))
9839 @@ -935,7 +1124,7 @@
9840 [(set (reg:CCFP VFPCC_REGNUM)
9841 (compare:CCFP (match_operand:DF 0 "s_register_operand" "w,w")
9842 (match_operand:DF 1 "vfp_compare_operand" "w,G")))]
9843 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9844 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9845 "@
9846 fcmpd%?\\t%P0, %P1
9847 fcmpzd%?\\t%P0"
9848 @@ -947,7 +1136,7 @@
9849 [(set (reg:CCFPE VFPCC_REGNUM)
9850 (compare:CCFPE (match_operand:DF 0 "s_register_operand" "w,w")
9851 (match_operand:DF 1 "vfp_compare_operand" "w,G")))]
9852 - "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
9853 + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
9854 "@
9855 fcmped%?\\t%P0, %P1
9856 fcmpezd%?\\t%P0"
9857 --- /dev/null
9858 +++ b/gcc/config/i386/atom.md
9859 @@ -0,0 +1,795 @@
9860 +;; Atom Scheduling
9861 +;; Copyright (C) 2009 Free Software Foundation, Inc.
9862 +;;
9863 +;; This file is part of GCC.
9864 +;;
9865 +;; GCC is free software; you can redistribute it and/or modify
9866 +;; it under the terms of the GNU General Public License as published by
9867 +;; the Free Software Foundation; either version 3, or (at your option)
9868 +;; any later version.
9869 +;;
9870 +;; GCC is distributed in the hope that it will be useful,
9871 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
9872 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9873 +;; GNU General Public License for more details.
9874 +;;
9875 +;; You should have received a copy of the GNU General Public License
9876 +;; along with GCC; see the file COPYING3. If not see
9877 +;; <http://www.gnu.org/licenses/>.
9878 +;;
9879 +;; Atom is an in-order core with two integer pipelines.
9880 +
9881 +
9882 +(define_attr "atom_unit" "sishuf,simul,jeu,complex,other"
9883 + (const_string "other"))
9884 +
9885 +(define_attr "atom_sse_attr" "rcp,movdup,lfence,fence,prefetch,sqrt,mxcsr,other"
9886 + (const_string "other"))
9887 +
9888 +(define_automaton "atom")
9889 +
9890 +;; Atom has two ports: port 0 and port 1 connecting to all execution units
9891 +(define_cpu_unit "atom-port-0,atom-port-1" "atom")
9892 +
9893 +;; EU: Execution Unit
9894 +;; Atom EUs are connected by port 0 or port 1.
9895 +
9896 +(define_cpu_unit "atom-eu-0, atom-eu-1,
9897 + atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4"
9898 + "atom")
9899 +
9900 +;; Some EUs have duplicated copied and can be accessed via either
9901 +;; port 0 or port 1
9902 +;; (define_reservation "atom-port-either" "(atom-port-0 | atom-port-1)")
9903 +
9904 +;;; Some instructions is dual-pipe execution, need both ports
9905 +;;; Complex multi-op macro-instructoins need both ports and all EUs
9906 +(define_reservation "atom-port-dual" "(atom-port-0 + atom-port-1)")
9907 +(define_reservation "atom-all-eu" "(atom-eu-0 + atom-eu-1 +
9908 + atom-imul-1 + atom-imul-2 + atom-imul-3 +
9909 + atom-imul-4)")
9910 +
9911 +;;; Most of simple instructions have 1 cycle latency. Some of them
9912 +;;; issue in port 0, some in port 0 and some in either port.
9913 +(define_reservation "atom-simple-0" "(atom-port-0 + atom-eu-0)")
9914 +(define_reservation "atom-simple-1" "(atom-port-1 + atom-eu-1)")
9915 +(define_reservation "atom-simple-either" "(atom-simple-0 | atom-simple-1)")
9916 +
9917 +;;; Some insn issues in port 0 with 3 cycle latency and 1 cycle tput
9918 +(define_reservation "atom-eu-0-3-1" "(atom-port-0 + atom-eu-0, nothing*2)")
9919 +
9920 +;;; fmul insn can have 4 or 5 cycles latency
9921 +(define_reservation "atom-fmul-5c" "(atom-port-0 + atom-eu-0), nothing*4")
9922 +(define_reservation "atom-fmul-4c" "(atom-port-0 + atom-eu-0), nothing*3")
9923 +
9924 +;;; fadd can has 5 cycles latency depends on instruction forms
9925 +(define_reservation "atom-fadd-5c" "(atom-port-1 + atom-eu-1), nothing*5")
9926 +
9927 +;;; imul insn has 5 cycles latency
9928 +(define_reservation "atom-imul-32"
9929 + "atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4,
9930 + atom-port-0")
9931 +;;; imul instruction excludes other non-FP instructions.
9932 +(exclusion_set "atom-eu-0, atom-eu-1"
9933 + "atom-imul-1, atom-imul-2, atom-imul-3, atom-imul-4")
9934 +
9935 +;;; dual-execution instructions can have 1,2,4,5 cycles latency depends on
9936 +;;; instruction forms
9937 +(define_reservation "atom-dual-1c" "(atom-port-dual + atom-eu-0 + atom-eu-1)")
9938 +(define_reservation "atom-dual-2c"
9939 + "(atom-port-dual + atom-eu-0 + atom-eu-1, nothing)")
9940 +(define_reservation "atom-dual-5c"
9941 + "(atom-port-dual + atom-eu-0 + atom-eu-1, nothing*4)")
9942 +
9943 +;;; Complex macro-instruction has variants of latency, and uses both ports.
9944 +(define_reservation "atom-complex" "(atom-port-dual + atom-all-eu)")
9945 +
9946 +(define_insn_reservation "atom_other" 9
9947 + (and (eq_attr "cpu" "atom")
9948 + (and (eq_attr "type" "other")
9949 + (eq_attr "atom_unit" "!jeu")))
9950 + "atom-complex, atom-all-eu*8")
9951 +
9952 +;; return has type "other" with atom_unit "jeu"
9953 +(define_insn_reservation "atom_other_2" 1
9954 + (and (eq_attr "cpu" "atom")
9955 + (and (eq_attr "type" "other")
9956 + (eq_attr "atom_unit" "jeu")))
9957 + "atom-dual-1c")
9958 +
9959 +(define_insn_reservation "atom_multi" 9
9960 + (and (eq_attr "cpu" "atom")
9961 + (eq_attr "type" "multi"))
9962 + "atom-complex, atom-all-eu*8")
9963 +
9964 +;; Normal alu insns without carry
9965 +(define_insn_reservation "atom_alu" 1
9966 + (and (eq_attr "cpu" "atom")
9967 + (and (eq_attr "type" "alu")
9968 + (and (eq_attr "memory" "none")
9969 + (eq_attr "use_carry" "0"))))
9970 + "atom-simple-either")
9971 +
9972 +;; Normal alu insns without carry
9973 +(define_insn_reservation "atom_alu_mem" 1
9974 + (and (eq_attr "cpu" "atom")
9975 + (and (eq_attr "type" "alu")
9976 + (and (eq_attr "memory" "!none")
9977 + (eq_attr "use_carry" "0"))))
9978 + "atom-simple-either")
9979 +
9980 +;; Alu insn consuming CF, such as add/sbb
9981 +(define_insn_reservation "atom_alu_carry" 1
9982 + (and (eq_attr "cpu" "atom")
9983 + (and (eq_attr "type" "alu")
9984 + (and (eq_attr "memory" "none")
9985 + (eq_attr "use_carry" "1"))))
9986 + "atom-simple-either")
9987 +
9988 +;; Alu insn consuming CF, such as add/sbb
9989 +(define_insn_reservation "atom_alu_carry_mem" 1
9990 + (and (eq_attr "cpu" "atom")
9991 + (and (eq_attr "type" "alu")
9992 + (and (eq_attr "memory" "!none")
9993 + (eq_attr "use_carry" "1"))))
9994 + "atom-simple-either")
9995 +
9996 +(define_insn_reservation "atom_alu1" 1
9997 + (and (eq_attr "cpu" "atom")
9998 + (and (eq_attr "type" "alu1")
9999 + (eq_attr "memory" "none")))
10000 + "atom-simple-either")
10001 +
10002 +(define_insn_reservation "atom_alu1_mem" 1
10003 + (and (eq_attr "cpu" "atom")
10004 + (and (eq_attr "type" "alu1")
10005 + (eq_attr "memory" "!none")))
10006 + "atom-simple-either")
10007 +
10008 +(define_insn_reservation "atom_negnot" 1
10009 + (and (eq_attr "cpu" "atom")
10010 + (and (eq_attr "type" "negnot")
10011 + (eq_attr "memory" "none")))
10012 + "atom-simple-either")
10013 +
10014 +(define_insn_reservation "atom_negnot_mem" 1
10015 + (and (eq_attr "cpu" "atom")
10016 + (and (eq_attr "type" "negnot")
10017 + (eq_attr "memory" "!none")))
10018 + "atom-simple-either")
10019 +
10020 +(define_insn_reservation "atom_imov" 1
10021 + (and (eq_attr "cpu" "atom")
10022 + (and (eq_attr "type" "imov")
10023 + (eq_attr "memory" "none")))
10024 + "atom-simple-either")
10025 +
10026 +(define_insn_reservation "atom_imov_mem" 1
10027 + (and (eq_attr "cpu" "atom")
10028 + (and (eq_attr "type" "imov")
10029 + (eq_attr "memory" "!none")))
10030 + "atom-simple-either")
10031 +
10032 +;; 16<-16, 32<-32
10033 +(define_insn_reservation "atom_imovx" 1
10034 + (and (eq_attr "cpu" "atom")
10035 + (and (eq_attr "type" "imovx")
10036 + (and (eq_attr "memory" "none")
10037 + (ior (and (match_operand:HI 0 "register_operand")
10038 + (match_operand:HI 1 "general_operand"))
10039 + (and (match_operand:SI 0 "register_operand")
10040 + (match_operand:SI 1 "general_operand"))))))
10041 + "atom-simple-either")
10042 +
10043 +;; 16<-16, 32<-32, mem
10044 +(define_insn_reservation "atom_imovx_mem" 1
10045 + (and (eq_attr "cpu" "atom")
10046 + (and (eq_attr "type" "imovx")
10047 + (and (eq_attr "memory" "!none")
10048 + (ior (and (match_operand:HI 0 "register_operand")
10049 + (match_operand:HI 1 "general_operand"))
10050 + (and (match_operand:SI 0 "register_operand")
10051 + (match_operand:SI 1 "general_operand"))))))
10052 + "atom-simple-either")
10053 +
10054 +;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8
10055 +(define_insn_reservation "atom_imovx_2" 1
10056 + (and (eq_attr "cpu" "atom")
10057 + (and (eq_attr "type" "imovx")
10058 + (and (eq_attr "memory" "none")
10059 + (ior (match_operand:QI 0 "register_operand")
10060 + (ior (and (match_operand:SI 0 "register_operand")
10061 + (not (match_operand:SI 1 "general_operand")))
10062 + (match_operand:DI 0 "register_operand"))))))
10063 + "atom-simple-0")
10064 +
10065 +;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8, mem
10066 +(define_insn_reservation "atom_imovx_2_mem" 1
10067 + (and (eq_attr "cpu" "atom")
10068 + (and (eq_attr "type" "imovx")
10069 + (and (eq_attr "memory" "!none")
10070 + (ior (match_operand:QI 0 "register_operand")
10071 + (ior (and (match_operand:SI 0 "register_operand")
10072 + (not (match_operand:SI 1 "general_operand")))
10073 + (match_operand:DI 0 "register_operand"))))))
10074 + "atom-simple-0")
10075 +
10076 +;; 16<-8
10077 +(define_insn_reservation "atom_imovx_3" 3
10078 + (and (eq_attr "cpu" "atom")
10079 + (and (eq_attr "type" "imovx")
10080 + (and (match_operand:HI 0 "register_operand")
10081 + (match_operand:QI 1 "general_operand"))))
10082 + "atom-complex, atom-all-eu*2")
10083 +
10084 +(define_insn_reservation "atom_lea" 1
10085 + (and (eq_attr "cpu" "atom")
10086 + (and (eq_attr "type" "lea")
10087 + (eq_attr "mode" "!HI")))
10088 + "atom-simple-either")
10089 +
10090 +;; lea 16bit address is complex insn
10091 +(define_insn_reservation "atom_lea_2" 2
10092 + (and (eq_attr "cpu" "atom")
10093 + (and (eq_attr "type" "lea")
10094 + (eq_attr "mode" "HI")))
10095 + "atom-complex, atom-all-eu")
10096 +
10097 +(define_insn_reservation "atom_incdec" 1
10098 + (and (eq_attr "cpu" "atom")
10099 + (and (eq_attr "type" "incdec")
10100 + (eq_attr "memory" "none")))
10101 + "atom-simple-either")
10102 +
10103 +(define_insn_reservation "atom_incdec_mem" 1
10104 + (and (eq_attr "cpu" "atom")
10105 + (and (eq_attr "type" "incdec")
10106 + (eq_attr "memory" "!none")))
10107 + "atom-simple-either")
10108 +
10109 +;; simple shift instruction use SHIFT eu, none memory
10110 +(define_insn_reservation "atom_ishift" 1
10111 + (and (eq_attr "cpu" "atom")
10112 + (and (eq_attr "type" "ishift")
10113 + (and (eq_attr "memory" "none") (eq_attr "prefix_0f" "0"))))
10114 + "atom-simple-0")
10115 +
10116 +;; simple shift instruction use SHIFT eu, memory
10117 +(define_insn_reservation "atom_ishift_mem" 1
10118 + (and (eq_attr "cpu" "atom")
10119 + (and (eq_attr "type" "ishift")
10120 + (and (eq_attr "memory" "!none") (eq_attr "prefix_0f" "0"))))
10121 + "atom-simple-0")
10122 +
10123 +;; DF shift (prefixed with 0f) is complex insn with latency of 7 cycles
10124 +(define_insn_reservation "atom_ishift_3" 7
10125 + (and (eq_attr "cpu" "atom")
10126 + (and (eq_attr "type" "ishift")
10127 + (eq_attr "prefix_0f" "1")))
10128 + "atom-complex, atom-all-eu*6")
10129 +
10130 +(define_insn_reservation "atom_ishift1" 1
10131 + (and (eq_attr "cpu" "atom")
10132 + (and (eq_attr "type" "ishift1")
10133 + (eq_attr "memory" "none")))
10134 + "atom-simple-0")
10135 +
10136 +(define_insn_reservation "atom_ishift1_mem" 1
10137 + (and (eq_attr "cpu" "atom")
10138 + (and (eq_attr "type" "ishift1")
10139 + (eq_attr "memory" "!none")))
10140 + "atom-simple-0")
10141 +
10142 +(define_insn_reservation "atom_rotate" 1
10143 + (and (eq_attr "cpu" "atom")
10144 + (and (eq_attr "type" "rotate")
10145 + (eq_attr "memory" "none")))
10146 + "atom-simple-0")
10147 +
10148 +(define_insn_reservation "atom_rotate_mem" 1
10149 + (and (eq_attr "cpu" "atom")
10150 + (and (eq_attr "type" "rotate")
10151 + (eq_attr "memory" "!none")))
10152 + "atom-simple-0")
10153 +
10154 +(define_insn_reservation "atom_rotate1" 1
10155 + (and (eq_attr "cpu" "atom")
10156 + (and (eq_attr "type" "rotate1")
10157 + (eq_attr "memory" "none")))
10158 + "atom-simple-0")
10159 +
10160 +(define_insn_reservation "atom_rotate1_mem" 1
10161 + (and (eq_attr "cpu" "atom")
10162 + (and (eq_attr "type" "rotate1")
10163 + (eq_attr "memory" "!none")))
10164 + "atom-simple-0")
10165 +
10166 +(define_insn_reservation "atom_imul" 5
10167 + (and (eq_attr "cpu" "atom")
10168 + (and (eq_attr "type" "imul")
10169 + (and (eq_attr "memory" "none") (eq_attr "mode" "SI"))))
10170 + "atom-imul-32")
10171 +
10172 +(define_insn_reservation "atom_imul_mem" 5
10173 + (and (eq_attr "cpu" "atom")
10174 + (and (eq_attr "type" "imul")
10175 + (and (eq_attr "memory" "!none") (eq_attr "mode" "SI"))))
10176 + "atom-imul-32")
10177 +
10178 +;; latency set to 10 as common 64x64 imul
10179 +(define_insn_reservation "atom_imul_3" 10
10180 + (and (eq_attr "cpu" "atom")
10181 + (and (eq_attr "type" "imul")
10182 + (eq_attr "mode" "!SI")))
10183 + "atom-complex, atom-all-eu*9")
10184 +
10185 +(define_insn_reservation "atom_idiv" 65
10186 + (and (eq_attr "cpu" "atom")
10187 + (eq_attr "type" "idiv"))
10188 + "atom-complex, atom-all-eu*32, nothing*32")
10189 +
10190 +(define_insn_reservation "atom_icmp" 1
10191 + (and (eq_attr "cpu" "atom")
10192 + (and (eq_attr "type" "icmp")
10193 + (eq_attr "memory" "none")))
10194 + "atom-simple-either")
10195 +
10196 +(define_insn_reservation "atom_icmp_mem" 1
10197 + (and (eq_attr "cpu" "atom")
10198 + (and (eq_attr "type" "icmp")
10199 + (eq_attr "memory" "!none")))
10200 + "atom-simple-either")
10201 +
10202 +(define_insn_reservation "atom_test" 1
10203 + (and (eq_attr "cpu" "atom")
10204 + (and (eq_attr "type" "test")
10205 + (eq_attr "memory" "none")))
10206 + "atom-simple-either")
10207 +
10208 +(define_insn_reservation "atom_test_mem" 1
10209 + (and (eq_attr "cpu" "atom")
10210 + (and (eq_attr "type" "test")
10211 + (eq_attr "memory" "!none")))
10212 + "atom-simple-either")
10213 +
10214 +(define_insn_reservation "atom_ibr" 1
10215 + (and (eq_attr "cpu" "atom")
10216 + (and (eq_attr "type" "ibr")
10217 + (eq_attr "memory" "!load")))
10218 + "atom-simple-1")
10219 +
10220 +;; complex if jump target is from address
10221 +(define_insn_reservation "atom_ibr_2" 2
10222 + (and (eq_attr "cpu" "atom")
10223 + (and (eq_attr "type" "ibr")
10224 + (eq_attr "memory" "load")))
10225 + "atom-complex, atom-all-eu")
10226 +
10227 +(define_insn_reservation "atom_setcc" 1
10228 + (and (eq_attr "cpu" "atom")
10229 + (and (eq_attr "type" "setcc")
10230 + (eq_attr "memory" "!store")))
10231 + "atom-simple-either")
10232 +
10233 +;; 2 cycles complex if target is in memory
10234 +(define_insn_reservation "atom_setcc_2" 2
10235 + (and (eq_attr "cpu" "atom")
10236 + (and (eq_attr "type" "setcc")
10237 + (eq_attr "memory" "store")))
10238 + "atom-complex, atom-all-eu")
10239 +
10240 +(define_insn_reservation "atom_icmov" 1
10241 + (and (eq_attr "cpu" "atom")
10242 + (and (eq_attr "type" "icmov")
10243 + (eq_attr "memory" "none")))
10244 + "atom-simple-either")
10245 +
10246 +(define_insn_reservation "atom_icmov_mem" 1
10247 + (and (eq_attr "cpu" "atom")
10248 + (and (eq_attr "type" "icmov")
10249 + (eq_attr "memory" "!none")))
10250 + "atom-simple-either")
10251 +
10252 +;; UCODE if segreg, ignored
10253 +(define_insn_reservation "atom_push" 2
10254 + (and (eq_attr "cpu" "atom")
10255 + (eq_attr "type" "push"))
10256 + "atom-dual-2c")
10257 +
10258 +;; pop r64 is 1 cycle. UCODE if segreg, ignored
10259 +(define_insn_reservation "atom_pop" 1
10260 + (and (eq_attr "cpu" "atom")
10261 + (and (eq_attr "type" "pop")
10262 + (eq_attr "mode" "DI")))
10263 + "atom-dual-1c")
10264 +
10265 +;; pop non-r64 is 2 cycles. UCODE if segreg, ignored
10266 +(define_insn_reservation "atom_pop_2" 2
10267 + (and (eq_attr "cpu" "atom")
10268 + (and (eq_attr "type" "pop")
10269 + (eq_attr "mode" "!DI")))
10270 + "atom-dual-2c")
10271 +
10272 +;; UCODE if segreg, ignored
10273 +(define_insn_reservation "atom_call" 1
10274 + (and (eq_attr "cpu" "atom")
10275 + (eq_attr "type" "call"))
10276 + "atom-dual-1c")
10277 +
10278 +(define_insn_reservation "atom_callv" 1
10279 + (and (eq_attr "cpu" "atom")
10280 + (eq_attr "type" "callv"))
10281 + "atom-dual-1c")
10282 +
10283 +(define_insn_reservation "atom_leave" 3
10284 + (and (eq_attr "cpu" "atom")
10285 + (eq_attr "type" "leave"))
10286 + "atom-complex, atom-all-eu*2")
10287 +
10288 +(define_insn_reservation "atom_str" 3
10289 + (and (eq_attr "cpu" "atom")
10290 + (eq_attr "type" "str"))
10291 + "atom-complex, atom-all-eu*2")
10292 +
10293 +(define_insn_reservation "atom_sselog" 1
10294 + (and (eq_attr "cpu" "atom")
10295 + (and (eq_attr "type" "sselog")
10296 + (eq_attr "memory" "none")))
10297 + "atom-simple-either")
10298 +
10299 +(define_insn_reservation "atom_sselog_mem" 1
10300 + (and (eq_attr "cpu" "atom")
10301 + (and (eq_attr "type" "sselog")
10302 + (eq_attr "memory" "!none")))
10303 + "atom-simple-either")
10304 +
10305 +(define_insn_reservation "atom_sselog1" 1
10306 + (and (eq_attr "cpu" "atom")
10307 + (and (eq_attr "type" "sselog1")
10308 + (eq_attr "memory" "none")))
10309 + "atom-simple-0")
10310 +
10311 +(define_insn_reservation "atom_sselog1_mem" 1
10312 + (and (eq_attr "cpu" "atom")
10313 + (and (eq_attr "type" "sselog1")
10314 + (eq_attr "memory" "!none")))
10315 + "atom-simple-0")
10316 +
10317 +;; not pmad, not psad
10318 +(define_insn_reservation "atom_sseiadd" 1
10319 + (and (eq_attr "cpu" "atom")
10320 + (and (eq_attr "type" "sseiadd")
10321 + (and (not (match_operand:V2DI 0 "register_operand"))
10322 + (and (eq_attr "atom_unit" "!simul")
10323 + (eq_attr "atom_unit" "!complex")))))
10324 + "atom-simple-either")
10325 +
10326 +;; pmad, psad and 64
10327 +(define_insn_reservation "atom_sseiadd_2" 4
10328 + (and (eq_attr "cpu" "atom")
10329 + (and (eq_attr "type" "sseiadd")
10330 + (and (not (match_operand:V2DI 0 "register_operand"))
10331 + (and (eq_attr "atom_unit" "simul" )
10332 + (eq_attr "mode" "DI")))))
10333 + "atom-fmul-4c")
10334 +
10335 +;; pmad, psad and 128
10336 +(define_insn_reservation "atom_sseiadd_3" 5
10337 + (and (eq_attr "cpu" "atom")
10338 + (and (eq_attr "type" "sseiadd")
10339 + (and (not (match_operand:V2DI 0 "register_operand"))
10340 + (and (eq_attr "atom_unit" "simul" )
10341 + (eq_attr "mode" "TI")))))
10342 + "atom-fmul-5c")
10343 +
10344 +;; if paddq(64 bit op), phadd/phsub
10345 +(define_insn_reservation "atom_sseiadd_4" 6
10346 + (and (eq_attr "cpu" "atom")
10347 + (and (eq_attr "type" "sseiadd")
10348 + (ior (match_operand:V2DI 0 "register_operand")
10349 + (eq_attr "atom_unit" "complex"))))
10350 + "atom-complex, atom-all-eu*5")
10351 +
10352 +;; if immediate op.
10353 +(define_insn_reservation "atom_sseishft" 1
10354 + (and (eq_attr "cpu" "atom")
10355 + (and (eq_attr "type" "sseishft")
10356 + (and (eq_attr "atom_unit" "!sishuf")
10357 + (match_operand 2 "immediate_operand"))))
10358 + "atom-simple-either")
10359 +
10360 +;; if palignr or psrldq
10361 +(define_insn_reservation "atom_sseishft_2" 1
10362 + (and (eq_attr "cpu" "atom")
10363 + (and (eq_attr "type" "sseishft")
10364 + (and (eq_attr "atom_unit" "sishuf")
10365 + (match_operand 2 "immediate_operand"))))
10366 + "atom-simple-0")
10367 +
10368 +;; if reg/mem op
10369 +(define_insn_reservation "atom_sseishft_3" 2
10370 + (and (eq_attr "cpu" "atom")
10371 + (and (eq_attr "type" "sseishft")
10372 + (not (match_operand 2 "immediate_operand"))))
10373 + "atom-complex, atom-all-eu")
10374 +
10375 +(define_insn_reservation "atom_sseimul" 1
10376 + (and (eq_attr "cpu" "atom")
10377 + (eq_attr "type" "sseimul"))
10378 + "atom-simple-0")
10379 +
10380 +;; rcpss or rsqrtss
10381 +(define_insn_reservation "atom_sse" 4
10382 + (and (eq_attr "cpu" "atom")
10383 + (and (eq_attr "type" "sse")
10384 + (and (eq_attr "atom_sse_attr" "rcp") (eq_attr "mode" "SF"))))
10385 + "atom-fmul-4c")
10386 +
10387 +;; movshdup, movsldup. Suggest to type sseishft
10388 +(define_insn_reservation "atom_sse_2" 1
10389 + (and (eq_attr "cpu" "atom")
10390 + (and (eq_attr "type" "sse")
10391 + (eq_attr "atom_sse_attr" "movdup")))
10392 + "atom-simple-0")
10393 +
10394 +;; lfence
10395 +(define_insn_reservation "atom_sse_3" 1
10396 + (and (eq_attr "cpu" "atom")
10397 + (and (eq_attr "type" "sse")
10398 + (eq_attr "atom_sse_attr" "lfence")))
10399 + "atom-simple-either")
10400 +
10401 +;; sfence,clflush,mfence, prefetch
10402 +(define_insn_reservation "atom_sse_4" 1
10403 + (and (eq_attr "cpu" "atom")
10404 + (and (eq_attr "type" "sse")
10405 + (ior (eq_attr "atom_sse_attr" "fence")
10406 + (eq_attr "atom_sse_attr" "prefetch"))))
10407 + "atom-simple-0")
10408 +
10409 +;; rcpps, rsqrtss, sqrt, ldmxcsr
10410 +(define_insn_reservation "atom_sse_5" 7
10411 + (and (eq_attr "cpu" "atom")
10412 + (and (eq_attr "type" "sse")
10413 + (ior (ior (eq_attr "atom_sse_attr" "sqrt")
10414 + (eq_attr "atom_sse_attr" "mxcsr"))
10415 + (and (eq_attr "atom_sse_attr" "rcp")
10416 + (eq_attr "mode" "V4SF")))))
10417 + "atom-complex, atom-all-eu*6")
10418 +
10419 +;; xmm->xmm
10420 +(define_insn_reservation "atom_ssemov" 1
10421 + (and (eq_attr "cpu" "atom")
10422 + (and (eq_attr "type" "ssemov")
10423 + (and (match_operand 0 "register_operand" "xy") (match_operand 1 "register_operand" "xy"))))
10424 + "atom-simple-either")
10425 +
10426 +;; reg->xmm
10427 +(define_insn_reservation "atom_ssemov_2" 1
10428 + (and (eq_attr "cpu" "atom")
10429 + (and (eq_attr "type" "ssemov")
10430 + (and (match_operand 0 "register_operand" "xy") (match_operand 1 "register_operand" "r"))))
10431 + "atom-simple-0")
10432 +
10433 +;; xmm->reg
10434 +(define_insn_reservation "atom_ssemov_3" 3
10435 + (and (eq_attr "cpu" "atom")
10436 + (and (eq_attr "type" "ssemov")
10437 + (and (match_operand 0 "register_operand" "r") (match_operand 1 "register_operand" "xy"))))
10438 + "atom-eu-0-3-1")
10439 +
10440 +;; mov mem
10441 +(define_insn_reservation "atom_ssemov_4" 1
10442 + (and (eq_attr "cpu" "atom")
10443 + (and (eq_attr "type" "ssemov")
10444 + (and (eq_attr "movu" "0") (eq_attr "memory" "!none"))))
10445 + "atom-simple-0")
10446 +
10447 +;; movu mem
10448 +(define_insn_reservation "atom_ssemov_5" 2
10449 + (and (eq_attr "cpu" "atom")
10450 + (and (eq_attr "type" "ssemov")
10451 + (ior (eq_attr "movu" "1") (eq_attr "memory" "!none"))))
10452 + "atom-complex, atom-all-eu")
10453 +
10454 +;; no memory simple
10455 +(define_insn_reservation "atom_sseadd" 5
10456 + (and (eq_attr "cpu" "atom")
10457 + (and (eq_attr "type" "sseadd")
10458 + (and (eq_attr "memory" "none")
10459 + (and (eq_attr "mode" "!V2DF")
10460 + (eq_attr "atom_unit" "!complex")))))
10461 + "atom-fadd-5c")
10462 +
10463 +;; memory simple
10464 +(define_insn_reservation "atom_sseadd_mem" 5
10465 + (and (eq_attr "cpu" "atom")
10466 + (and (eq_attr "type" "sseadd")
10467 + (and (eq_attr "memory" "!none")
10468 + (and (eq_attr "mode" "!V2DF")
10469 + (eq_attr "atom_unit" "!complex")))))
10470 + "atom-dual-5c")
10471 +
10472 +;; maxps, minps, *pd, hadd, hsub
10473 +(define_insn_reservation "atom_sseadd_3" 8
10474 + (and (eq_attr "cpu" "atom")
10475 + (and (eq_attr "type" "sseadd")
10476 + (ior (eq_attr "mode" "V2DF") (eq_attr "atom_unit" "complex"))))
10477 + "atom-complex, atom-all-eu*7")
10478 +
10479 +;; Except dppd/dpps
10480 +(define_insn_reservation "atom_ssemul" 5
10481 + (and (eq_attr "cpu" "atom")
10482 + (and (eq_attr "type" "ssemul")
10483 + (eq_attr "mode" "!SF")))
10484 + "atom-fmul-5c")
10485 +
10486 +;; Except dppd/dpps, 4 cycle if mulss
10487 +(define_insn_reservation "atom_ssemul_2" 4
10488 + (and (eq_attr "cpu" "atom")
10489 + (and (eq_attr "type" "ssemul")
10490 + (eq_attr "mode" "SF")))
10491 + "atom-fmul-4c")
10492 +
10493 +(define_insn_reservation "atom_ssecmp" 1
10494 + (and (eq_attr "cpu" "atom")
10495 + (eq_attr "type" "ssecmp"))
10496 + "atom-simple-either")
10497 +
10498 +(define_insn_reservation "atom_ssecomi" 10
10499 + (and (eq_attr "cpu" "atom")
10500 + (eq_attr "type" "ssecomi"))
10501 + "atom-complex, atom-all-eu*9")
10502 +
10503 +;; no memory and cvtpi2ps, cvtps2pi, cvttps2pi
10504 +(define_insn_reservation "atom_ssecvt" 5
10505 + (and (eq_attr "cpu" "atom")
10506 + (and (eq_attr "type" "ssecvt")
10507 + (ior (and (match_operand:V2SI 0 "register_operand")
10508 + (match_operand:V4SF 1 "register_operand"))
10509 + (and (match_operand:V4SF 0 "register_operand")
10510 + (match_operand:V2SI 1 "register_operand")))))
10511 + "atom-fadd-5c")
10512 +
10513 +;; memory and cvtpi2ps, cvtps2pi, cvttps2pi
10514 +(define_insn_reservation "atom_ssecvt_2" 5
10515 + (and (eq_attr "cpu" "atom")
10516 + (and (eq_attr "type" "ssecvt")
10517 + (ior (and (match_operand:V2SI 0 "register_operand")
10518 + (match_operand:V4SF 1 "memory_operand"))
10519 + (and (match_operand:V4SF 0 "register_operand")
10520 + (match_operand:V2SI 1 "memory_operand")))))
10521 + "atom-dual-5c")
10522 +
10523 +;; otherwise. 7 cycles average for cvtss2sd
10524 +(define_insn_reservation "atom_ssecvt_3" 7
10525 + (and (eq_attr "cpu" "atom")
10526 + (and (eq_attr "type" "ssecvt")
10527 + (not (ior (and (match_operand:V2SI 0 "register_operand")
10528 + (match_operand:V4SF 1 "nonimmediate_operand"))
10529 + (and (match_operand:V4SF 0 "register_operand")
10530 + (match_operand:V2SI 1 "nonimmediate_operand"))))))
10531 + "atom-complex, atom-all-eu*6")
10532 +
10533 +;; memory and cvtsi2sd
10534 +(define_insn_reservation "atom_sseicvt" 5
10535 + (and (eq_attr "cpu" "atom")
10536 + (and (eq_attr "type" "sseicvt")
10537 + (and (match_operand:V2DF 0 "register_operand")
10538 + (match_operand:SI 1 "memory_operand"))))
10539 + "atom-dual-5c")
10540 +
10541 +;; otherwise. 8 cycles average for cvtsd2si
10542 +(define_insn_reservation "atom_sseicvt_2" 8
10543 + (and (eq_attr "cpu" "atom")
10544 + (and (eq_attr "type" "sseicvt")
10545 + (not (and (match_operand:V2DF 0 "register_operand")
10546 + (match_operand:SI 1 "memory_operand")))))
10547 + "atom-complex, atom-all-eu*7")
10548 +
10549 +(define_insn_reservation "atom_ssediv" 62
10550 + (and (eq_attr "cpu" "atom")
10551 + (eq_attr "type" "ssediv"))
10552 + "atom-complex, atom-all-eu*12, nothing*49")
10553 +
10554 +;; simple for fmov
10555 +(define_insn_reservation "atom_fmov" 1
10556 + (and (eq_attr "cpu" "atom")
10557 + (and (eq_attr "type" "fmov")
10558 + (eq_attr "memory" "none")))
10559 + "atom-simple-either")
10560 +
10561 +;; simple for fmov
10562 +(define_insn_reservation "atom_fmov_mem" 1
10563 + (and (eq_attr "cpu" "atom")
10564 + (and (eq_attr "type" "fmov")
10565 + (eq_attr "memory" "!none")))
10566 + "atom-simple-either")
10567 +
10568 +;; Define bypass here
10569 +
10570 +;; There will be no stall from lea to non-mem EX insns
10571 +(define_bypass 0 "atom_lea"
10572 + "atom_alu_carry,
10573 + atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
10574 + atom_incdec, atom_setcc, atom_icmov, atom_pop")
10575 +
10576 +(define_bypass 0 "atom_lea"
10577 + "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10578 + atom_imovx_mem, atom_imovx_2_mem,
10579 + atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
10580 + "!ix86_agi_dependent")
10581 +
10582 +;; There will be 3 cycles stall from EX insns to AGAN insns LEA
10583 +(define_bypass 4 "atom_alu_carry,
10584 + atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
10585 + atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
10586 + atom_rotate1, atom_setcc, atom_icmov, atom_pop,
10587 + atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10588 + atom_imovx_mem, atom_imovx_2_mem,
10589 + atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
10590 + "atom_lea")
10591 +
10592 +;; There will be 3 cycles stall from EX insns to insns need addr calculation
10593 +(define_bypass 4 "atom_alu_carry,
10594 + atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
10595 + atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
10596 + atom_rotate1, atom_setcc, atom_icmov, atom_pop,
10597 + atom_imovx_mem, atom_imovx_2_mem,
10598 + atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10599 + atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
10600 + "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10601 + atom_negnot_mem, atom_imov_mem, atom_incdec_mem,
10602 + atom_imovx_mem, atom_imovx_2_mem,
10603 + atom_imul_mem, atom_icmp_mem,
10604 + atom_test_mem, atom_icmov_mem, atom_sselog_mem,
10605 + atom_sselog1_mem, atom_fmov_mem, atom_sseadd_mem,
10606 + atom_ishift_mem, atom_ishift1_mem,
10607 + atom_rotate_mem, atom_rotate1_mem"
10608 + "ix86_agi_dependent")
10609 +
10610 +;; Stall from imul to lea is 8 cycles.
10611 +(define_bypass 9 "atom_imul, atom_imul_mem" "atom_lea")
10612 +
10613 +;; Stall from imul to memory address is 8 cycles.
10614 +(define_bypass 9 "atom_imul, atom_imul_mem"
10615 + "atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10616 + atom_negnot_mem, atom_imov_mem, atom_incdec_mem,
10617 + atom_ishift_mem, atom_ishift1_mem, atom_rotate_mem,
10618 + atom_rotate1_mem, atom_imul_mem, atom_icmp_mem,
10619 + atom_test_mem, atom_icmov_mem, atom_sselog_mem,
10620 + atom_sselog1_mem, atom_fmov_mem, atom_sseadd_mem"
10621 + "ix86_agi_dependent")
10622 +
10623 +;; There will be 0 cycle stall from cmp/test to jcc
10624 +
10625 +;; There will be 1 cycle stall from flag producer to cmov and adc/sbb
10626 +(define_bypass 2 "atom_icmp, atom_test, atom_alu, atom_alu_carry,
10627 + atom_alu1, atom_negnot, atom_incdec, atom_ishift,
10628 + atom_ishift1, atom_rotate, atom_rotate1"
10629 + "atom_icmov, atom_alu_carry")
10630 +
10631 +;; lea to shift count stall is 2 cycles
10632 +(define_bypass 3 "atom_lea"
10633 + "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1,
10634 + atom_ishift_mem, atom_ishift1_mem,
10635 + atom_rotate_mem, atom_rotate1_mem"
10636 + "ix86_dep_by_shift_count")
10637 +
10638 +;; lea to shift source stall is 1 cycle
10639 +(define_bypass 2 "atom_lea"
10640 + "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1"
10641 + "!ix86_dep_by_shift_count")
10642 +
10643 +;; non-lea to shift count stall is 1 cycle
10644 +(define_bypass 2 "atom_alu_carry,
10645 + atom_alu,atom_alu1,atom_negnot,atom_imov,atom_imovx,
10646 + atom_incdec,atom_ishift,atom_ishift1,atom_rotate,
10647 + atom_rotate1, atom_setcc, atom_icmov, atom_pop,
10648 + atom_alu_mem, atom_alu_carry_mem, atom_alu1_mem,
10649 + atom_imovx_mem, atom_imovx_2_mem,
10650 + atom_imov_mem, atom_icmov_mem, atom_fmov_mem"
10651 + "atom_ishift, atom_ishift1, atom_rotate, atom_rotate1,
10652 + atom_ishift_mem, atom_ishift1_mem,
10653 + atom_rotate_mem, atom_rotate1_mem"
10654 + "ix86_dep_by_shift_count")
10655 --- a/gcc/config/i386/cpuid.h
10656 +++ b/gcc/config/i386/cpuid.h
10657 @@ -29,6 +29,7 @@
10658 #define bit_CMPXCHG16B (1 << 13)
10659 #define bit_SSE4_1 (1 << 19)
10660 #define bit_SSE4_2 (1 << 20)
10661 +#define bit_MOVBE (1 << 22)
10662 #define bit_POPCNT (1 << 23)
10663 #define bit_AES (1 << 25)
10664 #define bit_XSAVE (1 << 26)
10665 --- a/gcc/config/i386/cygming.h
10666 +++ b/gcc/config/i386/cygming.h
10667 @@ -34,7 +34,7 @@
10668 #endif
10669
10670 #undef TARGET_64BIT_MS_ABI
10671 -#define TARGET_64BIT_MS_ABI (!cfun ? DEFAULT_ABI == MS_ABI : TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
10672 +#define TARGET_64BIT_MS_ABI (!cfun ? ix86_abi == MS_ABI : TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
10673
10674 #undef DEFAULT_ABI
10675 #define DEFAULT_ABI (TARGET_64BIT ? MS_ABI : SYSV_ABI)
10676 @@ -203,7 +203,7 @@
10677 #define CHECK_STACK_LIMIT 4000
10678
10679 #undef STACK_BOUNDARY
10680 -#define STACK_BOUNDARY (DEFAULT_ABI == MS_ABI ? 128 : BITS_PER_WORD)
10681 +#define STACK_BOUNDARY (ix86_abi == MS_ABI ? 128 : BITS_PER_WORD)
10682
10683 /* By default, target has a 80387, uses IEEE compatible arithmetic,
10684 returns float values in the 387 and needs stack probes.
10685 --- a/gcc/config/i386/cygming.opt
10686 +++ b/gcc/config/i386/cygming.opt
10687 @@ -45,3 +45,7 @@
10688 mwindows
10689 Target
10690 Create GUI application
10691 +
10692 +mpe-aligned-commons
10693 +Target Var(use_pe_aligned_common) Init(HAVE_GAS_ALIGNED_COMM)
10694 +Use the GNU extension to the PE format for aligned common data
10695 --- a/gcc/config/i386/driver-i386.c
10696 +++ b/gcc/config/i386/driver-i386.c
10697 @@ -378,7 +378,7 @@
10698 /* Extended features */
10699 unsigned int has_lahf_lm = 0, has_sse4a = 0;
10700 unsigned int has_longmode = 0, has_3dnowp = 0, has_3dnow = 0;
10701 - unsigned int has_sse4_1 = 0, has_sse4_2 = 0;
10702 + unsigned int has_movbe = 0, has_sse4_1 = 0, has_sse4_2 = 0;
10703 unsigned int has_popcnt = 0, has_aes = 0, has_avx = 0;
10704 unsigned int has_pclmul = 0;
10705
10706 @@ -398,9 +398,22 @@
10707
10708 __cpuid (1, eax, ebx, ecx, edx);
10709
10710 - /* We don't care for extended family. */
10711 model = (eax >> 4) & 0x0f;
10712 family = (eax >> 8) & 0x0f;
10713 + if (vendor == SIG_INTEL)
10714 + {
10715 + unsigned int extended_model, extended_family;
10716 +
10717 + extended_model = (eax >> 12) & 0xf0;
10718 + extended_family = (eax >> 20) & 0xff;
10719 + if (family == 0x0f)
10720 + {
10721 + family += extended_family;
10722 + model += extended_model;
10723 + }
10724 + else if (family == 0x06)
10725 + model += extended_model;
10726 + }
10727
10728 has_sse3 = ecx & bit_SSE3;
10729 has_ssse3 = ecx & bit_SSSE3;
10730 @@ -408,6 +421,7 @@
10731 has_sse4_2 = ecx & bit_SSE4_2;
10732 has_avx = ecx & bit_AVX;
10733 has_cmpxchg16b = ecx & bit_CMPXCHG16B;
10734 + has_movbe = ecx & bit_MOVBE;
10735 has_popcnt = ecx & bit_POPCNT;
10736 has_aes = ecx & bit_AES;
10737 has_pclmul = ecx & bit_PCLMUL;
10738 @@ -505,8 +519,8 @@
10739 break;
10740 case PROCESSOR_PENTIUMPRO:
10741 if (has_longmode)
10742 - /* It is Core 2 Duo. */
10743 - cpu = "core2";
10744 + /* It is Core 2 or Atom. */
10745 + cpu = (model == 28) ? "atom" : "core2";
10746 else if (arch)
10747 {
10748 if (has_sse3)
10749 @@ -597,6 +611,8 @@
10750 options = concat (options, "-mcx16 ", NULL);
10751 if (has_lahf_lm)
10752 options = concat (options, "-msahf ", NULL);
10753 + if (has_movbe)
10754 + options = concat (options, "-mmovbe ", NULL);
10755 if (has_aes)
10756 options = concat (options, "-maes ", NULL);
10757 if (has_pclmul)
10758 --- a/gcc/config/i386/i386.c
10759 +++ b/gcc/config/i386/i386.c
10760 @@ -1036,6 +1036,79 @@
10761 1, /* cond_not_taken_branch_cost. */
10762 };
10763
10764 +static const
10765 +struct processor_costs atom_cost = {
10766 + COSTS_N_INSNS (1), /* cost of an add instruction */
10767 + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
10768 + COSTS_N_INSNS (1), /* variable shift costs */
10769 + COSTS_N_INSNS (1), /* constant shift costs */
10770 + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
10771 + COSTS_N_INSNS (4), /* HI */
10772 + COSTS_N_INSNS (3), /* SI */
10773 + COSTS_N_INSNS (4), /* DI */
10774 + COSTS_N_INSNS (2)}, /* other */
10775 + 0, /* cost of multiply per each bit set */
10776 + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
10777 + COSTS_N_INSNS (26), /* HI */
10778 + COSTS_N_INSNS (42), /* SI */
10779 + COSTS_N_INSNS (74), /* DI */
10780 + COSTS_N_INSNS (74)}, /* other */
10781 + COSTS_N_INSNS (1), /* cost of movsx */
10782 + COSTS_N_INSNS (1), /* cost of movzx */
10783 + 8, /* "large" insn */
10784 + 17, /* MOVE_RATIO */
10785 + 2, /* cost for loading QImode using movzbl */
10786 + {4, 4, 4}, /* cost of loading integer registers
10787 + in QImode, HImode and SImode.
10788 + Relative to reg-reg move (2). */
10789 + {4, 4, 4}, /* cost of storing integer registers */
10790 + 4, /* cost of reg,reg fld/fst */
10791 + {12, 12, 12}, /* cost of loading fp registers
10792 + in SFmode, DFmode and XFmode */
10793 + {6, 6, 8}, /* cost of storing fp registers
10794 + in SFmode, DFmode and XFmode */
10795 + 2, /* cost of moving MMX register */
10796 + {8, 8}, /* cost of loading MMX registers
10797 + in SImode and DImode */
10798 + {8, 8}, /* cost of storing MMX registers
10799 + in SImode and DImode */
10800 + 2, /* cost of moving SSE register */
10801 + {8, 8, 8}, /* cost of loading SSE registers
10802 + in SImode, DImode and TImode */
10803 + {8, 8, 8}, /* cost of storing SSE registers
10804 + in SImode, DImode and TImode */
10805 + 5, /* MMX or SSE register to integer */
10806 + 32, /* size of l1 cache. */
10807 + 256, /* size of l2 cache. */
10808 + 64, /* size of prefetch block */
10809 + 6, /* number of parallel prefetches */
10810 + 3, /* Branch cost */
10811 + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
10812 + COSTS_N_INSNS (8), /* cost of FMUL instruction. */
10813 + COSTS_N_INSNS (20), /* cost of FDIV instruction. */
10814 + COSTS_N_INSNS (8), /* cost of FABS instruction. */
10815 + COSTS_N_INSNS (8), /* cost of FCHS instruction. */
10816 + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
10817 + {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
10818 + {libcall, {{32, loop}, {64, rep_prefix_4_byte},
10819 + {8192, rep_prefix_8_byte}, {-1, libcall}}}},
10820 + {{libcall, {{8, loop}, {15, unrolled_loop},
10821 + {2048, rep_prefix_4_byte}, {-1, libcall}}},
10822 + {libcall, {{24, loop}, {32, unrolled_loop},
10823 + {8192, rep_prefix_8_byte}, {-1, libcall}}}},
10824 + 1, /* scalar_stmt_cost. */
10825 + 1, /* scalar load_cost. */
10826 + 1, /* scalar_store_cost. */
10827 + 1, /* vec_stmt_cost. */
10828 + 1, /* vec_to_scalar_cost. */
10829 + 1, /* scalar_to_vec_cost. */
10830 + 1, /* vec_align_load_cost. */
10831 + 2, /* vec_unalign_load_cost. */
10832 + 1, /* vec_store_cost. */
10833 + 3, /* cond_taken_branch_cost. */
10834 + 1, /* cond_not_taken_branch_cost. */
10835 +};
10836 +
10837 /* Generic64 should produce code tuned for Nocona and K8. */
10838 static const
10839 struct processor_costs generic64_cost = {
10840 @@ -1194,6 +1267,7 @@
10841 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
10842 #define m_NOCONA (1<<PROCESSOR_NOCONA)
10843 #define m_CORE2 (1<<PROCESSOR_CORE2)
10844 +#define m_ATOM (1<<PROCESSOR_ATOM)
10845
10846 #define m_GEODE (1<<PROCESSOR_GEODE)
10847 #define m_K6 (1<<PROCESSOR_K6)
10848 @@ -1231,10 +1305,11 @@
10849 m_486 | m_PENT,
10850
10851 /* X86_TUNE_UNROLL_STRLEN */
10852 - m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
10853 + m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
10854 + | m_CORE2 | m_GENERIC,
10855
10856 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
10857 - m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
10858 + m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
10859
10860 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
10861 on simulation result. But after P4 was made, no performance benefit
10862 @@ -1246,12 +1321,12 @@
10863 ~m_386,
10864
10865 /* X86_TUNE_USE_SAHF */
10866 - m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
10867 + m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
10868 | m_NOCONA | m_CORE2 | m_GENERIC,
10869
10870 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
10871 partial dependencies. */
10872 - m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
10873 + m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
10874 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
10875
10876 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
10877 @@ -1271,13 +1346,13 @@
10878 m_386 | m_486 | m_K6_GEODE,
10879
10880 /* X86_TUNE_USE_SIMODE_FIOP */
10881 - ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
10882 + ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
10883
10884 /* X86_TUNE_USE_MOV0 */
10885 m_K6,
10886
10887 /* X86_TUNE_USE_CLTD */
10888 - ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
10889 + ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
10890
10891 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
10892 m_PENT4,
10893 @@ -1292,8 +1367,8 @@
10894 ~(m_PENT | m_PPRO),
10895
10896 /* X86_TUNE_PROMOTE_QIMODE */
10897 - m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
10898 - | m_GENERIC /* | m_PENT4 ? */,
10899 + m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
10900 + | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
10901
10902 /* X86_TUNE_FAST_PREFIX */
10903 ~(m_PENT | m_486 | m_386),
10904 @@ -1317,26 +1392,28 @@
10905 m_PPRO,
10906
10907 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
10908 - m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10909 + m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
10910 + | m_CORE2 | m_GENERIC,
10911
10912 /* X86_TUNE_ADD_ESP_8 */
10913 - m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
10914 + m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
10915 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10916
10917 /* X86_TUNE_SUB_ESP_4 */
10918 - m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10919 + m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
10920 + | m_GENERIC,
10921
10922 /* X86_TUNE_SUB_ESP_8 */
10923 - m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
10924 + m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
10925 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10926
10927 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
10928 for DFmode copies */
10929 - ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
10930 + ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
10931 | m_GENERIC | m_GEODE),
10932
10933 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
10934 - m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10935 + m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10936
10937 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
10938 conflict here in between PPro/Pentium4 based chips that thread 128bit
10939 @@ -1347,7 +1424,8 @@
10940 shows that disabling this option on P4 brings over 20% SPECfp regression,
10941 while enabling it on K8 brings roughly 2.4% regression that can be partly
10942 masked by careful scheduling of moves. */
10943 - m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
10944 + m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
10945 + | m_AMDFAM10,
10946
10947 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
10948 m_AMDFAM10,
10949 @@ -1365,13 +1443,13 @@
10950 m_PPRO | m_PENT4 | m_NOCONA,
10951
10952 /* X86_TUNE_MEMORY_MISMATCH_STALL */
10953 - m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10954 + m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10955
10956 /* X86_TUNE_PROLOGUE_USING_MOVE */
10957 - m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
10958 + m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
10959
10960 /* X86_TUNE_EPILOGUE_USING_MOVE */
10961 - m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
10962 + m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
10963
10964 /* X86_TUNE_SHIFT1 */
10965 ~m_486,
10966 @@ -1380,29 +1458,32 @@
10967 m_AMD_MULTIPLE,
10968
10969 /* X86_TUNE_INTER_UNIT_MOVES */
10970 - ~(m_AMD_MULTIPLE | m_GENERIC),
10971 + ~(m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
10972
10973 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
10974 ~(m_AMDFAM10),
10975
10976 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
10977 than 4 branch instructions in the 16 byte window. */
10978 - m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
10979 + m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
10980 + | m_GENERIC,
10981
10982 /* X86_TUNE_SCHEDULE */
10983 - m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
10984 + m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
10985 + | m_GENERIC,
10986
10987 /* X86_TUNE_USE_BT */
10988 - m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
10989 + m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
10990
10991 /* X86_TUNE_USE_INCDEC */
10992 - ~(m_PENT4 | m_NOCONA | m_GENERIC),
10993 + ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
10994
10995 /* X86_TUNE_PAD_RETURNS */
10996 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
10997
10998 /* X86_TUNE_EXT_80387_CONSTANTS */
10999 - m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
11000 + m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
11001 + | m_CORE2 | m_GENERIC,
11002
11003 /* X86_TUNE_SHORTEN_X87_SSE */
11004 ~m_K8,
11005 @@ -1447,6 +1528,10 @@
11006 with a subsequent conditional jump instruction into a single
11007 compare-and-branch uop. */
11008 m_CORE2,
11009 +
11010 + /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
11011 + will impact LEA instruction selection. */
11012 + m_ATOM,
11013 };
11014
11015 /* Feature tests against the various architecture variations. */
11016 @@ -1472,10 +1557,11 @@
11017 };
11018
11019 static const unsigned int x86_accumulate_outgoing_args
11020 - = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
11021 + = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
11022 + | m_GENERIC;
11023
11024 static const unsigned int x86_arch_always_fancy_math_387
11025 - = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
11026 + = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
11027 | m_NOCONA | m_CORE2 | m_GENERIC;
11028
11029 static enum stringop_alg stringop_alg = no_stringop;
11030 @@ -1743,6 +1829,9 @@
11031 /* Alignment for incoming stack boundary in bits. */
11032 unsigned int ix86_incoming_stack_boundary;
11033
11034 +/* The abi used by target. */
11035 +enum calling_abi ix86_abi;
11036 +
11037 /* Values 1-5: see jump.c */
11038 int ix86_branch_cost;
11039
11040 @@ -1819,6 +1908,8 @@
11041 static bool ix86_can_inline_p (tree, tree);
11042 static void ix86_set_current_function (tree);
11043
11044 +static enum calling_abi ix86_function_abi (const_tree);
11045 +
11046 \f
11047 /* The svr4 ABI for the i386 says that records and unions are returned
11048 in memory. */
11049 @@ -1877,9 +1968,11 @@
11050
11051 #define OPTION_MASK_ISA_ABM_SET \
11052 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
11053 +
11054 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
11055 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
11056 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
11057 +#define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
11058
11059 /* Define a set of ISAs which aren't available when a given ISA is
11060 disabled. MMX and SSE ISAs are handled separately. */
11061 @@ -1921,6 +2014,7 @@
11062 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
11063 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
11064 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
11065 +#define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
11066
11067 /* Vectorization library interface and handlers. */
11068 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
11069 @@ -1953,7 +2047,8 @@
11070 {&core2_cost, 16, 10, 16, 10, 16},
11071 {&generic32_cost, 16, 7, 16, 7, 16},
11072 {&generic64_cost, 16, 10, 16, 10, 16},
11073 - {&amdfam10_cost, 32, 24, 32, 7, 32}
11074 + {&amdfam10_cost, 32, 24, 32, 7, 32},
11075 + {&atom_cost, 16, 7, 16, 7, 16}
11076 };
11077
11078 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
11079 @@ -1971,6 +2066,7 @@
11080 "prescott",
11081 "nocona",
11082 "core2",
11083 + "atom",
11084 "geode",
11085 "k6",
11086 "k6-2",
11087 @@ -2209,6 +2305,19 @@
11088 }
11089 return true;
11090
11091 + case OPT_mmovbe:
11092 + if (value)
11093 + {
11094 + ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
11095 + ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
11096 + }
11097 + else
11098 + {
11099 + ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
11100 + ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
11101 + }
11102 + return true;
11103 +
11104 case OPT_maes:
11105 if (value)
11106 {
11107 @@ -2271,6 +2380,7 @@
11108 { "-mmmx", OPTION_MASK_ISA_MMX },
11109 { "-mabm", OPTION_MASK_ISA_ABM },
11110 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
11111 + { "-mmovbe", OPTION_MASK_ISA_MOVBE },
11112 { "-maes", OPTION_MASK_ISA_AES },
11113 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
11114 };
11115 @@ -2487,7 +2597,8 @@
11116 PTA_AES = 1 << 17,
11117 PTA_PCLMUL = 1 << 18,
11118 PTA_AVX = 1 << 19,
11119 - PTA_FMA = 1 << 20
11120 + PTA_FMA = 1 << 20,
11121 + PTA_MOVBE = 1 << 21
11122 };
11123
11124 static struct pta
11125 @@ -2529,6 +2640,9 @@
11126 {"core2", PROCESSOR_CORE2, CPU_CORE2,
11127 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
11128 | PTA_SSSE3 | PTA_CX16},
11129 + {"atom", PROCESSOR_ATOM, CPU_ATOM,
11130 + PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
11131 + | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
11132 {"geode", PROCESSOR_GEODE, CPU_GEODE,
11133 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
11134 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
11135 @@ -2716,6 +2830,20 @@
11136 error ("bad value (%s) for %sarch=%s %s",
11137 ix86_arch_string, prefix, suffix, sw);
11138
11139 + /* Validate -mabi= value. */
11140 + if (ix86_abi_string)
11141 + {
11142 + if (strcmp (ix86_abi_string, "sysv") == 0)
11143 + ix86_abi = SYSV_ABI;
11144 + else if (strcmp (ix86_abi_string, "ms") == 0)
11145 + ix86_abi = MS_ABI;
11146 + else
11147 + error ("unknown ABI (%s) for %sabi=%s %s",
11148 + ix86_abi_string, prefix, suffix, sw);
11149 + }
11150 + else
11151 + ix86_abi = DEFAULT_ABI;
11152 +
11153 if (ix86_cmodel_string != 0)
11154 {
11155 if (!strcmp (ix86_cmodel_string, "small"))
11156 @@ -2828,6 +2956,9 @@
11157 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
11158 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
11159 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
11160 + if (processor_alias_table[i].flags & PTA_MOVBE
11161 + && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
11162 + ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
11163 if (processor_alias_table[i].flags & PTA_AES
11164 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
11165 ix86_isa_flags |= OPTION_MASK_ISA_AES;
11166 @@ -4592,14 +4723,14 @@
11167 default ABI. */
11168
11169 /* RAX is used as hidden argument to va_arg functions. */
11170 - if (DEFAULT_ABI == SYSV_ABI && regno == AX_REG)
11171 + if (ix86_abi == SYSV_ABI && regno == AX_REG)
11172 return true;
11173
11174 - if (DEFAULT_ABI == MS_ABI)
11175 + if (ix86_abi == MS_ABI)
11176 parm_regs = x86_64_ms_abi_int_parameter_registers;
11177 else
11178 parm_regs = x86_64_int_parameter_registers;
11179 - for (i = 0; i < (DEFAULT_ABI == MS_ABI ? X64_REGPARM_MAX
11180 + for (i = 0; i < (ix86_abi == MS_ABI ? X64_REGPARM_MAX
11181 : X86_64_REGPARM_MAX); i++)
11182 if (regno == parm_regs[i])
11183 return true;
11184 @@ -4627,7 +4758,7 @@
11185 int
11186 ix86_reg_parm_stack_space (const_tree fndecl)
11187 {
11188 - int call_abi = SYSV_ABI;
11189 + enum calling_abi call_abi = SYSV_ABI;
11190 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
11191 call_abi = ix86_function_abi (fndecl);
11192 else
11193 @@ -4639,37 +4770,39 @@
11194
11195 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
11196 call abi used. */
11197 -int
11198 +enum calling_abi
11199 ix86_function_type_abi (const_tree fntype)
11200 {
11201 if (TARGET_64BIT && fntype != NULL)
11202 {
11203 - int abi;
11204 - if (DEFAULT_ABI == SYSV_ABI)
11205 - abi = lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)) ? MS_ABI : SYSV_ABI;
11206 - else
11207 - abi = lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)) ? SYSV_ABI : MS_ABI;
11208 -
11209 + enum calling_abi abi = ix86_abi;
11210 + if (abi == SYSV_ABI)
11211 + {
11212 + if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
11213 + abi = MS_ABI;
11214 + }
11215 + else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
11216 + abi = SYSV_ABI;
11217 return abi;
11218 }
11219 - return DEFAULT_ABI;
11220 + return ix86_abi;
11221 }
11222
11223 -int
11224 +static enum calling_abi
11225 ix86_function_abi (const_tree fndecl)
11226 {
11227 if (! fndecl)
11228 - return DEFAULT_ABI;
11229 + return ix86_abi;
11230 return ix86_function_type_abi (TREE_TYPE (fndecl));
11231 }
11232
11233 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
11234 call abi used. */
11235 -int
11236 +enum calling_abi
11237 ix86_cfun_abi (void)
11238 {
11239 if (! cfun || ! TARGET_64BIT)
11240 - return DEFAULT_ABI;
11241 + return ix86_abi;
11242 return cfun->machine->call_abi;
11243 }
11244
11245 @@ -4683,7 +4816,7 @@
11246 ix86_call_abi_override (const_tree fndecl)
11247 {
11248 if (fndecl == NULL_TREE)
11249 - cfun->machine->call_abi = DEFAULT_ABI;
11250 + cfun->machine->call_abi = ix86_abi;
11251 else
11252 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
11253 }
11254 @@ -4724,8 +4857,8 @@
11255 cum->nregs = ix86_regparm;
11256 if (TARGET_64BIT)
11257 {
11258 - if (cum->call_abi != DEFAULT_ABI)
11259 - cum->nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX
11260 + if (cum->call_abi != ix86_abi)
11261 + cum->nregs = ix86_abi != SYSV_ABI ? X86_64_REGPARM_MAX
11262 : X64_REGPARM_MAX;
11263 }
11264 if (TARGET_SSE)
11265 @@ -4733,8 +4866,8 @@
11266 cum->sse_nregs = SSE_REGPARM_MAX;
11267 if (TARGET_64BIT)
11268 {
11269 - if (cum->call_abi != DEFAULT_ABI)
11270 - cum->sse_nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
11271 + if (cum->call_abi != ix86_abi)
11272 + cum->sse_nregs = ix86_abi != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
11273 : X64_SSE_REGPARM_MAX;
11274 }
11275 }
11276 @@ -5700,7 +5833,7 @@
11277 if (type)
11278 mode = type_natural_mode (type, NULL);
11279
11280 - if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
11281 + if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
11282 function_arg_advance_ms_64 (cum, bytes, words);
11283 else if (TARGET_64BIT)
11284 function_arg_advance_64 (cum, mode, type, words, named);
11285 @@ -5846,9 +5979,9 @@
11286 if (mode == VOIDmode)
11287 return GEN_INT (cum->maybe_vaarg
11288 ? (cum->sse_nregs < 0
11289 - ? (cum->call_abi == DEFAULT_ABI
11290 + ? (cum->call_abi == ix86_abi
11291 ? SSE_REGPARM_MAX
11292 - : (DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
11293 + : (ix86_abi != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
11294 : X64_SSE_REGPARM_MAX))
11295 : cum->sse_regno)
11296 : -1);
11297 @@ -5942,7 +6075,7 @@
11298 if (type && TREE_CODE (type) == VECTOR_TYPE)
11299 mode = type_natural_mode (type, cum);
11300
11301 - if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
11302 + if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
11303 return function_arg_ms_64 (cum, mode, omode, named, bytes);
11304 else if (TARGET_64BIT)
11305 return function_arg_64 (cum, mode, omode, type, named);
11306 @@ -5962,7 +6095,7 @@
11307 const_tree type, bool named ATTRIBUTE_UNUSED)
11308 {
11309 /* See Windows x64 Software Convention. */
11310 - if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
11311 + if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
11312 {
11313 int msize = (int) GET_MODE_SIZE (mode);
11314 if (type)
11315 @@ -6102,7 +6235,7 @@
11316 /* TODO: The function should depend on current function ABI but
11317 builtins.c would need updating then. Therefore we use the
11318 default ABI. */
11319 - if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
11320 + if (TARGET_64BIT && ix86_abi == MS_ABI)
11321 return false;
11322 return TARGET_FLOAT_RETURNS_IN_80387;
11323
11324 @@ -6498,13 +6631,13 @@
11325 static tree
11326 ix86_build_builtin_va_list (void)
11327 {
11328 - tree ret = ix86_build_builtin_va_list_abi (DEFAULT_ABI);
11329 + tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
11330
11331 /* Initialize abi specific va_list builtin types. */
11332 if (TARGET_64BIT)
11333 {
11334 tree t;
11335 - if (DEFAULT_ABI == MS_ABI)
11336 + if (ix86_abi == MS_ABI)
11337 {
11338 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
11339 if (TREE_CODE (t) != RECORD_TYPE)
11340 @@ -6518,7 +6651,7 @@
11341 t = build_variant_type_copy (t);
11342 sysv_va_list_type_node = t;
11343 }
11344 - if (DEFAULT_ABI != MS_ABI)
11345 + if (ix86_abi != MS_ABI)
11346 {
11347 t = ix86_build_builtin_va_list_abi (MS_ABI);
11348 if (TREE_CODE (t) != RECORD_TYPE)
11349 @@ -6551,8 +6684,8 @@
11350 int i;
11351 int regparm = ix86_regparm;
11352
11353 - if (cum->call_abi != DEFAULT_ABI)
11354 - regparm = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
11355 + if (cum->call_abi != ix86_abi)
11356 + regparm = ix86_abi != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
11357
11358 /* GPR size of varargs save area. */
11359 if (cfun->va_list_gpr_size)
11360 @@ -6705,7 +6838,7 @@
11361 return true;
11362 canonic = ix86_canonical_va_list_type (type);
11363 return (canonic == ms_va_list_type_node
11364 - || (DEFAULT_ABI == MS_ABI && canonic == va_list_type_node));
11365 + || (ix86_abi == MS_ABI && canonic == va_list_type_node));
11366 }
11367
11368 /* Implement va_start. */
11369 @@ -12987,6 +13120,316 @@
11370 emit_move_insn (operands[0], dst);
11371 }
11372
11373 +#define LEA_SEARCH_THRESHOLD 12
11374 +
11375 +/* Search backward for non-agu definition of register number REGNO1
11376 + or register number REGNO2 in INSN's basic block until
11377 + 1. Pass LEA_SEARCH_THRESHOLD instructions, or
11378 + 2. Reach BB boundary, or
11379 + 3. Reach agu definition.
11380 + Returns the distance between the non-agu definition point and INSN.
11381 + If no definition point, returns -1. */
11382 +
11383 +static int
11384 +distance_non_agu_define (unsigned int regno1, unsigned int regno2,
11385 + rtx insn)
11386 +{
11387 + basic_block bb = BLOCK_FOR_INSN (insn);
11388 + int distance = 0;
11389 + df_ref *def_rec;
11390 + enum attr_type insn_type;
11391 +
11392 + if (insn != BB_HEAD (bb))
11393 + {
11394 + rtx prev = PREV_INSN (insn);
11395 + while (prev && distance < LEA_SEARCH_THRESHOLD)
11396 + {
11397 + if (INSN_P (prev))
11398 + {
11399 + distance++;
11400 + for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
11401 + if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
11402 + && !DF_REF_IS_ARTIFICIAL (*def_rec)
11403 + && (regno1 == DF_REF_REGNO (*def_rec)
11404 + || regno2 == DF_REF_REGNO (*def_rec)))
11405 + {
11406 + insn_type = get_attr_type (prev);
11407 + if (insn_type != TYPE_LEA)
11408 + goto done;
11409 + }
11410 + }
11411 + if (prev == BB_HEAD (bb))
11412 + break;
11413 + prev = PREV_INSN (prev);
11414 + }
11415 + }
11416 +
11417 + if (distance < LEA_SEARCH_THRESHOLD)
11418 + {
11419 + edge e;
11420 + edge_iterator ei;
11421 + bool simple_loop = false;
11422 +
11423 + FOR_EACH_EDGE (e, ei, bb->preds)
11424 + if (e->src == bb)
11425 + {
11426 + simple_loop = true;
11427 + break;
11428 + }
11429 +
11430 + if (simple_loop)
11431 + {
11432 + rtx prev = BB_END (bb);
11433 + while (prev
11434 + && prev != insn
11435 + && distance < LEA_SEARCH_THRESHOLD)
11436 + {
11437 + if (INSN_P (prev))
11438 + {
11439 + distance++;
11440 + for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
11441 + if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
11442 + && !DF_REF_IS_ARTIFICIAL (*def_rec)
11443 + && (regno1 == DF_REF_REGNO (*def_rec)
11444 + || regno2 == DF_REF_REGNO (*def_rec)))
11445 + {
11446 + insn_type = get_attr_type (prev);
11447 + if (insn_type != TYPE_LEA)
11448 + goto done;
11449 + }
11450 + }
11451 + prev = PREV_INSN (prev);
11452 + }
11453 + }
11454 + }
11455 +
11456 + distance = -1;
11457 +
11458 +done:
11459 + /* get_attr_type may modify recog data. We want to make sure
11460 + that recog data is valid for instruction INSN, on which
11461 + distance_non_agu_define is called. INSN is unchanged here. */
11462 + extract_insn_cached (insn);
11463 + return distance;
11464 +}
11465 +
11466 +/* Return the distance between INSN and the next insn that uses
11467 + register number REGNO0 in memory address. Return -1 if no such
11468 + a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
11469 +
11470 +static int
11471 +distance_agu_use (unsigned int regno0, rtx insn)
11472 +{
11473 + basic_block bb = BLOCK_FOR_INSN (insn);
11474 + int distance = 0;
11475 + df_ref *def_rec;
11476 + df_ref *use_rec;
11477 +
11478 + if (insn != BB_END (bb))
11479 + {
11480 + rtx next = NEXT_INSN (insn);
11481 + while (next && distance < LEA_SEARCH_THRESHOLD)
11482 + {
11483 + if (INSN_P (next))
11484 + {
11485 + distance++;
11486 +
11487 + for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
11488 + if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
11489 + || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
11490 + && regno0 == DF_REF_REGNO (*use_rec))
11491 + {
11492 + /* Return DISTANCE if OP0 is used in memory
11493 + address in NEXT. */
11494 + return distance;
11495 + }
11496 +
11497 + for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
11498 + if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
11499 + && !DF_REF_IS_ARTIFICIAL (*def_rec)
11500 + && regno0 == DF_REF_REGNO (*def_rec))
11501 + {
11502 + /* Return -1 if OP0 is set in NEXT. */
11503 + return -1;
11504 + }
11505 + }
11506 + if (next == BB_END (bb))
11507 + break;
11508 + next = NEXT_INSN (next);
11509 + }
11510 + }
11511 +
11512 + if (distance < LEA_SEARCH_THRESHOLD)
11513 + {
11514 + edge e;
11515 + edge_iterator ei;
11516 + bool simple_loop = false;
11517 +
11518 + FOR_EACH_EDGE (e, ei, bb->succs)
11519 + if (e->dest == bb)
11520 + {
11521 + simple_loop = true;
11522 + break;
11523 + }
11524 +
11525 + if (simple_loop)
11526 + {
11527 + rtx next = BB_HEAD (bb);
11528 + while (next
11529 + && next != insn
11530 + && distance < LEA_SEARCH_THRESHOLD)
11531 + {
11532 + if (INSN_P (next))
11533 + {
11534 + distance++;
11535 +
11536 + for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
11537 + if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
11538 + || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
11539 + && regno0 == DF_REF_REGNO (*use_rec))
11540 + {
11541 + /* Return DISTANCE if OP0 is used in memory
11542 + address in NEXT. */
11543 + return distance;
11544 + }
11545 +
11546 + for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
11547 + if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
11548 + && !DF_REF_IS_ARTIFICIAL (*def_rec)
11549 + && regno0 == DF_REF_REGNO (*def_rec))
11550 + {
11551 + /* Return -1 if OP0 is set in NEXT. */
11552 + return -1;
11553 + }
11554 +
11555 + }
11556 + next = NEXT_INSN (next);
11557 + }
11558 + }
11559 + }
11560 +
11561 + return -1;
11562 +}
11563 +
11564 +/* Define this macro to tune LEA priority vs ADD, it take effect when
11565 + there is a dilemma of choicing LEA or ADD
11566 + Negative value: ADD is more preferred than LEA
11567 + Zero: Netrual
11568 + Positive value: LEA is more preferred than ADD*/
11569 +#define IX86_LEA_PRIORITY 2
11570 +
11571 +/* Return true if it is ok to optimize an ADD operation to LEA
11572 + operation to avoid flag register consumation. For the processors
11573 + like ATOM, if the destination register of LEA holds an actual
11574 + address which will be used soon, LEA is better and otherwise ADD
11575 + is better. */
11576 +
11577 +bool
11578 +ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
11579 + rtx insn, rtx operands[])
11580 +{
11581 + unsigned int regno0 = true_regnum (operands[0]);
11582 + unsigned int regno1 = true_regnum (operands[1]);
11583 + unsigned int regno2;
11584 +
11585 + if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
11586 + return regno0 != regno1;
11587 +
11588 + regno2 = true_regnum (operands[2]);
11589 +
11590 + /* If a = b + c, (a!=b && a!=c), must use lea form. */
11591 + if (regno0 != regno1 && regno0 != regno2)
11592 + return true;
11593 + else
11594 + {
11595 + int dist_define, dist_use;
11596 + dist_define = distance_non_agu_define (regno1, regno2, insn);
11597 + if (dist_define <= 0)
11598 + return true;
11599 +
11600 + /* If this insn has both backward non-agu dependence and forward
11601 + agu dependence, the one with short distance take effect. */
11602 + dist_use = distance_agu_use (regno0, insn);
11603 + if (dist_use <= 0
11604 + || (dist_define + IX86_LEA_PRIORITY) < dist_use)
11605 + return false;
11606 +
11607 + return true;
11608 + }
11609 +}
11610 +
11611 +/* Return true if destination reg of SET_BODY is shift count of
11612 + USE_BODY. */
11613 +
11614 +static bool
11615 +ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
11616 +{
11617 + rtx set_dest;
11618 + rtx shift_rtx;
11619 + int i;
11620 +
11621 + /* Retrieve destination of SET_BODY. */
11622 + switch (GET_CODE (set_body))
11623 + {
11624 + case SET:
11625 + set_dest = SET_DEST (set_body);
11626 + if (!set_dest || !REG_P (set_dest))
11627 + return false;
11628 + break;
11629 + case PARALLEL:
11630 + for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
11631 + if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
11632 + use_body))
11633 + return true;
11634 + default:
11635 + return false;
11636 + break;
11637 + }
11638 +
11639 + /* Retrieve shift count of USE_BODY. */
11640 + switch (GET_CODE (use_body))
11641 + {
11642 + case SET:
11643 + shift_rtx = XEXP (use_body, 1);
11644 + break;
11645 + case PARALLEL:
11646 + for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
11647 + if (ix86_dep_by_shift_count_body (set_body,
11648 + XVECEXP (use_body, 0, i)))
11649 + return true;
11650 + default:
11651 + return false;
11652 + break;
11653 + }
11654 +
11655 + if (shift_rtx
11656 + && (GET_CODE (shift_rtx) == ASHIFT
11657 + || GET_CODE (shift_rtx) == LSHIFTRT
11658 + || GET_CODE (shift_rtx) == ASHIFTRT
11659 + || GET_CODE (shift_rtx) == ROTATE
11660 + || GET_CODE (shift_rtx) == ROTATERT))
11661 + {
11662 + rtx shift_count = XEXP (shift_rtx, 1);
11663 +
11664 + /* Return true if shift count is dest of SET_BODY. */
11665 + if (REG_P (shift_count)
11666 + && true_regnum (set_dest) == true_regnum (shift_count))
11667 + return true;
11668 + }
11669 +
11670 + return false;
11671 +}
11672 +
11673 +/* Return true if destination reg of SET_INSN is shift count of
11674 + USE_INSN. */
11675 +
11676 +bool
11677 +ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
11678 +{
11679 + return ix86_dep_by_shift_count_body (PATTERN (set_insn),
11680 + PATTERN (use_insn));
11681 +}
11682 +
11683 /* Return TRUE or FALSE depending on whether the unary operator meets the
11684 appropriate constraints. */
11685
11686 @@ -18838,7 +19281,7 @@
11687 f = GGC_CNEW (struct machine_function);
11688 f->use_fast_prologue_epilogue_nregs = -1;
11689 f->tls_descriptor_call_expanded_p = 0;
11690 - f->call_abi = DEFAULT_ABI;
11691 + f->call_abi = ix86_abi;
11692
11693 return f;
11694 }
11695 @@ -19099,6 +19542,7 @@
11696 switch (ix86_tune)
11697 {
11698 case PROCESSOR_PENTIUM:
11699 + case PROCESSOR_ATOM:
11700 case PROCESSOR_K6:
11701 return 2;
11702
11703 @@ -19165,41 +19609,21 @@
11704 return 1;
11705 }
11706
11707 -/* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11708 - address with operands set by DEP_INSN. */
11709 +/* Return true iff USE_INSN has a memory address with operands set by
11710 + SET_INSN. */
11711
11712 -static int
11713 -ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
11714 +bool
11715 +ix86_agi_dependent (rtx set_insn, rtx use_insn)
11716 {
11717 - rtx addr;
11718 -
11719 - if (insn_type == TYPE_LEA
11720 - && TARGET_PENTIUM)
11721 - {
11722 - addr = PATTERN (insn);
11723 -
11724 - if (GET_CODE (addr) == PARALLEL)
11725 - addr = XVECEXP (addr, 0, 0);
11726 -
11727 - gcc_assert (GET_CODE (addr) == SET);
11728 -
11729 - addr = SET_SRC (addr);
11730 - }
11731 - else
11732 - {
11733 - int i;
11734 - extract_insn_cached (insn);
11735 - for (i = recog_data.n_operands - 1; i >= 0; --i)
11736 - if (MEM_P (recog_data.operand[i]))
11737 - {
11738 - addr = XEXP (recog_data.operand[i], 0);
11739 - goto found;
11740 - }
11741 - return 0;
11742 - found:;
11743 - }
11744 -
11745 - return modified_in_p (addr, dep_insn);
11746 + int i;
11747 + extract_insn_cached (use_insn);
11748 + for (i = recog_data.n_operands - 1; i >= 0; --i)
11749 + if (MEM_P (recog_data.operand[i]))
11750 + {
11751 + rtx addr = XEXP (recog_data.operand[i], 0);
11752 + return modified_in_p (addr, set_insn) != 0;
11753 + }
11754 + return false;
11755 }
11756
11757 static int
11758 @@ -19227,7 +19651,20 @@
11759 {
11760 case PROCESSOR_PENTIUM:
11761 /* Address Generation Interlock adds a cycle of latency. */
11762 - if (ix86_agi_dependent (insn, dep_insn, insn_type))
11763 + if (insn_type == TYPE_LEA)
11764 + {
11765 + rtx addr = PATTERN (insn);
11766 +
11767 + if (GET_CODE (addr) == PARALLEL)
11768 + addr = XVECEXP (addr, 0, 0);
11769 +
11770 + gcc_assert (GET_CODE (addr) == SET);
11771 +
11772 + addr = SET_SRC (addr);
11773 + if (modified_in_p (addr, dep_insn))
11774 + cost += 1;
11775 + }
11776 + else if (ix86_agi_dependent (dep_insn, insn))
11777 cost += 1;
11778
11779 /* ??? Compares pair with jump/setcc. */
11780 @@ -19237,7 +19674,7 @@
11781 /* Floating point stores require value to be ready one cycle earlier. */
11782 if (insn_type == TYPE_FMOV
11783 && get_attr_memory (insn) == MEMORY_STORE
11784 - && !ix86_agi_dependent (insn, dep_insn, insn_type))
11785 + && !ix86_agi_dependent (dep_insn, insn))
11786 cost += 1;
11787 break;
11788
11789 @@ -19260,7 +19697,7 @@
11790 in parallel with previous instruction in case
11791 previous instruction is not needed to compute the address. */
11792 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11793 - && !ix86_agi_dependent (insn, dep_insn, insn_type))
11794 + && !ix86_agi_dependent (dep_insn, insn))
11795 {
11796 /* Claim moves to take one cycle, as core can issue one load
11797 at time and the next load can start cycle later. */
11798 @@ -19289,7 +19726,7 @@
11799 in parallel with previous instruction in case
11800 previous instruction is not needed to compute the address. */
11801 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11802 - && !ix86_agi_dependent (insn, dep_insn, insn_type))
11803 + && !ix86_agi_dependent (dep_insn, insn))
11804 {
11805 /* Claim moves to take one cycle, as core can issue one load
11806 at time and the next load can start cycle later. */
11807 @@ -19306,6 +19743,7 @@
11808 case PROCESSOR_ATHLON:
11809 case PROCESSOR_K8:
11810 case PROCESSOR_AMDFAM10:
11811 + case PROCESSOR_ATOM:
11812 case PROCESSOR_GENERIC32:
11813 case PROCESSOR_GENERIC64:
11814 memory = get_attr_memory (insn);
11815 @@ -19314,7 +19752,7 @@
11816 in parallel with previous instruction in case
11817 previous instruction is not needed to compute the address. */
11818 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11819 - && !ix86_agi_dependent (insn, dep_insn, insn_type))
11820 + && !ix86_agi_dependent (dep_insn, insn))
11821 {
11822 enum attr_unit unit = get_attr_unit (insn);
11823 int loadcost = 3;
11824 @@ -29594,14 +30032,11 @@
11825 tree
11826 ix86_fn_abi_va_list (tree fndecl)
11827 {
11828 - int abi;
11829 -
11830 if (!TARGET_64BIT)
11831 return va_list_type_node;
11832 gcc_assert (fndecl != NULL_TREE);
11833 - abi = ix86_function_abi ((const_tree) fndecl);
11834
11835 - if (abi == MS_ABI)
11836 + if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
11837 return ms_va_list_type_node;
11838 else
11839 return sysv_va_list_type_node;
11840 --- a/gcc/config/i386/i386-c.c
11841 +++ b/gcc/config/i386/i386-c.c
11842 @@ -119,6 +119,10 @@
11843 def_or_undef (parse_in, "__core2");
11844 def_or_undef (parse_in, "__core2__");
11845 break;
11846 + case PROCESSOR_ATOM:
11847 + def_or_undef (parse_in, "__atom");
11848 + def_or_undef (parse_in, "__atom__");
11849 + break;
11850 /* use PROCESSOR_max to not set/unset the arch macro. */
11851 case PROCESSOR_max:
11852 break;
11853 @@ -187,6 +191,9 @@
11854 case PROCESSOR_CORE2:
11855 def_or_undef (parse_in, "__tune_core2__");
11856 break;
11857 + case PROCESSOR_ATOM:
11858 + def_or_undef (parse_in, "__tune_atom__");
11859 + break;
11860 case PROCESSOR_GENERIC32:
11861 case PROCESSOR_GENERIC64:
11862 break;
11863 --- a/gcc/config/i386/i386.h
11864 +++ b/gcc/config/i386/i386.h
11865 @@ -59,6 +59,7 @@
11866 #define TARGET_ABM OPTION_ISA_ABM
11867 #define TARGET_POPCNT OPTION_ISA_POPCNT
11868 #define TARGET_SAHF OPTION_ISA_SAHF
11869 +#define TARGET_MOVBE OPTION_ISA_MOVBE
11870 #define TARGET_AES OPTION_ISA_AES
11871 #define TARGET_PCLMUL OPTION_ISA_PCLMUL
11872 #define TARGET_CMPXCHG16B OPTION_ISA_CX16
11873 @@ -236,6 +237,7 @@
11874 #define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64)
11875 #define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64)
11876 #define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
11877 +#define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM)
11878
11879 /* Feature tests against the various tunings. */
11880 enum ix86_tune_indices {
11881 @@ -300,6 +302,7 @@
11882 X86_TUNE_USE_VECTOR_FP_CONVERTS,
11883 X86_TUNE_USE_VECTOR_CONVERTS,
11884 X86_TUNE_FUSE_CMP_AND_BRANCH,
11885 + X86_TUNE_OPT_AGU,
11886
11887 X86_TUNE_LAST
11888 };
11889 @@ -387,6 +390,7 @@
11890 ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
11891 #define TARGET_FUSE_CMP_AND_BRANCH \
11892 ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH]
11893 +#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
11894
11895 /* Feature tests against the various architecture variations. */
11896 enum ix86_arch_indices {
11897 @@ -470,7 +474,10 @@
11898 MS_ABI = 1
11899 };
11900
11901 -/* The default abi form used by target. */
11902 +/* The abi used by target. */
11903 +extern enum calling_abi ix86_abi;
11904 +
11905 +/* The default abi used by target. */
11906 #define DEFAULT_ABI SYSV_ABI
11907
11908 /* Subtargets may reset this to 1 in order to enable 96-bit long double
11909 @@ -569,6 +576,7 @@
11910 TARGET_CPU_DEFAULT_prescott,
11911 TARGET_CPU_DEFAULT_nocona,
11912 TARGET_CPU_DEFAULT_core2,
11913 + TARGET_CPU_DEFAULT_atom,
11914
11915 TARGET_CPU_DEFAULT_geode,
11916 TARGET_CPU_DEFAULT_k6,
11917 @@ -658,7 +666,7 @@
11918
11919 /* Boundary (in *bits*) on which stack pointer should be aligned. */
11920 #define STACK_BOUNDARY \
11921 - (TARGET_64BIT && DEFAULT_ABI == MS_ABI ? 128 : BITS_PER_WORD)
11922 + (TARGET_64BIT && ix86_abi == MS_ABI ? 128 : BITS_PER_WORD)
11923
11924 /* Stack boundary of the main function guaranteed by OS. */
11925 #define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
11926 @@ -1584,7 +1592,7 @@
11927 int maybe_vaarg; /* true for calls to possibly vardic fncts. */
11928 int float_in_sse; /* 1 if in 32-bit mode SFmode (2 for DFmode) should
11929 be passed in SSE registers. Otherwise 0. */
11930 - int call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
11931 + enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
11932 MS_ABI for ms abi. */
11933 } CUMULATIVE_ARGS;
11934
11935 @@ -2230,6 +2238,7 @@
11936 PROCESSOR_GENERIC32,
11937 PROCESSOR_GENERIC64,
11938 PROCESSOR_AMDFAM10,
11939 + PROCESSOR_ATOM,
11940 PROCESSOR_max
11941 };
11942
11943 @@ -2403,7 +2412,7 @@
11944 int tls_descriptor_call_expanded_p;
11945 /* This value is used for amd64 targets and specifies the current abi
11946 to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
11947 - int call_abi;
11948 + enum calling_abi call_abi;
11949 };
11950
11951 #define ix86_stack_locals (cfun->machine->stack_locals)
11952 --- a/gcc/config/i386/i386.md
11953 +++ b/gcc/config/i386/i386.md
11954 @@ -316,7 +316,7 @@
11955
11956 \f
11957 ;; Processor type.
11958 -(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,
11959 +(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,atom,
11960 generic64,amdfam10"
11961 (const (symbol_ref "ix86_schedule")))
11962
11963 @@ -612,6 +612,12 @@
11964 (define_attr "i387_cw" "trunc,floor,ceil,mask_pm,uninitialized,any"
11965 (const_string "any"))
11966
11967 +;; Define attribute to classify add/sub insns that consumes carry flag (CF)
11968 +(define_attr "use_carry" "0,1" (const_string "0"))
11969 +
11970 +;; Define attribute to indicate unaligned ssemov insns
11971 +(define_attr "movu" "0,1" (const_string "0"))
11972 +
11973 ;; Describe a user's asm statement.
11974 (define_asm_attributes
11975 [(set_attr "length" "128")
11976 @@ -727,6 +733,7 @@
11977 (include "k6.md")
11978 (include "athlon.md")
11979 (include "geode.md")
11980 +(include "atom.md")
11981
11982 \f
11983 ;; Operand and operator predicates and constraints
11984 @@ -5790,6 +5797,7 @@
11985 "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
11986 "adc{q}\t{%2, %0|%0, %2}"
11987 [(set_attr "type" "alu")
11988 + (set_attr "use_carry" "1")
11989 (set_attr "pent_pair" "pu")
11990 (set_attr "mode" "DI")])
11991
11992 @@ -5864,6 +5872,7 @@
11993 "ix86_binary_operator_ok (PLUS, QImode, operands)"
11994 "adc{b}\t{%2, %0|%0, %2}"
11995 [(set_attr "type" "alu")
11996 + (set_attr "use_carry" "1")
11997 (set_attr "pent_pair" "pu")
11998 (set_attr "mode" "QI")])
11999
12000 @@ -5876,6 +5885,7 @@
12001 "ix86_binary_operator_ok (PLUS, HImode, operands)"
12002 "adc{w}\t{%2, %0|%0, %2}"
12003 [(set_attr "type" "alu")
12004 + (set_attr "use_carry" "1")
12005 (set_attr "pent_pair" "pu")
12006 (set_attr "mode" "HI")])
12007
12008 @@ -5888,6 +5898,7 @@
12009 "ix86_binary_operator_ok (PLUS, SImode, operands)"
12010 "adc{l}\t{%2, %0|%0, %2}"
12011 [(set_attr "type" "alu")
12012 + (set_attr "use_carry" "1")
12013 (set_attr "pent_pair" "pu")
12014 (set_attr "mode" "SI")])
12015
12016 @@ -5901,6 +5912,7 @@
12017 "TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)"
12018 "adc{l}\t{%2, %k0|%k0, %2}"
12019 [(set_attr "type" "alu")
12020 + (set_attr "use_carry" "1")
12021 (set_attr "pent_pair" "pu")
12022 (set_attr "mode" "SI")])
12023
12024 @@ -6130,9 +6142,9 @@
12025 (set_attr "mode" "SI")])
12026
12027 (define_insn "*adddi_1_rex64"
12028 - [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r")
12029 - (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,r")
12030 - (match_operand:DI 2 "x86_64_general_operand" "rme,re,le")))
12031 + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r,r")
12032 + (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,r,r")
12033 + (match_operand:DI 2 "x86_64_general_operand" "rme,re,0,le")))
12034 (clobber (reg:CC FLAGS_REG))]
12035 "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
12036 {
12037 @@ -6153,6 +6165,10 @@
12038 }
12039
12040 default:
12041 + /* Use add as much as possible to replace lea for AGU optimization. */
12042 + if (which_alternative == 2 && TARGET_OPT_AGU)
12043 + return "add{q}\t{%1, %0|%0, %1}";
12044 +
12045 gcc_assert (rtx_equal_p (operands[0], operands[1]));
12046
12047 /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
12048 @@ -6171,8 +6187,11 @@
12049 }
12050 }
12051 [(set (attr "type")
12052 - (cond [(eq_attr "alternative" "2")
12053 + (cond [(and (eq_attr "alternative" "2")
12054 + (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
12055 (const_string "lea")
12056 + (eq_attr "alternative" "3")
12057 + (const_string "lea")
12058 ; Current assemblers are broken and do not allow @GOTOFF in
12059 ; ought but a memory context.
12060 (match_operand:DI 2 "pic_symbolic_operand" "")
12061 @@ -6189,8 +6208,8 @@
12062 (plus:DI (match_operand:DI 1 "register_operand" "")
12063 (match_operand:DI 2 "x86_64_nonmemory_operand" "")))
12064 (clobber (reg:CC FLAGS_REG))]
12065 - "TARGET_64BIT && reload_completed
12066 - && true_regnum (operands[0]) != true_regnum (operands[1])"
12067 + "TARGET_64BIT && reload_completed
12068 + && ix86_lea_for_add_ok (PLUS, insn, operands)"
12069 [(set (match_dup 0)
12070 (plus:DI (match_dup 1)
12071 (match_dup 2)))]
12072 @@ -6394,9 +6413,9 @@
12073
12074
12075 (define_insn "*addsi_1"
12076 - [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r")
12077 - (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r")
12078 - (match_operand:SI 2 "general_operand" "g,ri,li")))
12079 + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r,r")
12080 + (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r,r")
12081 + (match_operand:SI 2 "general_operand" "g,ri,0,li")))
12082 (clobber (reg:CC FLAGS_REG))]
12083 "ix86_binary_operator_ok (PLUS, SImode, operands)"
12084 {
12085 @@ -6417,6 +6436,10 @@
12086 }
12087
12088 default:
12089 + /* Use add as much as possible to replace lea for AGU optimization. */
12090 + if (which_alternative == 2 && TARGET_OPT_AGU)
12091 + return "add{l}\t{%1, %0|%0, %1}";
12092 +
12093 gcc_assert (rtx_equal_p (operands[0], operands[1]));
12094
12095 /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
12096 @@ -6433,7 +6456,10 @@
12097 }
12098 }
12099 [(set (attr "type")
12100 - (cond [(eq_attr "alternative" "2")
12101 + (cond [(and (eq_attr "alternative" "2")
12102 + (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
12103 + (const_string "lea")
12104 + (eq_attr "alternative" "3")
12105 (const_string "lea")
12106 ; Current assemblers are broken and do not allow @GOTOFF in
12107 ; ought but a memory context.
12108 @@ -6451,8 +6477,7 @@
12109 (plus (match_operand 1 "register_operand" "")
12110 (match_operand 2 "nonmemory_operand" "")))
12111 (clobber (reg:CC FLAGS_REG))]
12112 - "reload_completed
12113 - && true_regnum (operands[0]) != true_regnum (operands[1])"
12114 + "reload_completed && ix86_lea_for_add_ok (PLUS, insn, operands)"
12115 [(const_int 0)]
12116 {
12117 rtx pat;
12118 @@ -7553,6 +7578,7 @@
12119 "TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)"
12120 "sbb{q}\t{%2, %0|%0, %2}"
12121 [(set_attr "type" "alu")
12122 + (set_attr "use_carry" "1")
12123 (set_attr "pent_pair" "pu")
12124 (set_attr "mode" "DI")])
12125
12126 @@ -7601,6 +7627,7 @@
12127 "ix86_binary_operator_ok (MINUS, QImode, operands)"
12128 "sbb{b}\t{%2, %0|%0, %2}"
12129 [(set_attr "type" "alu")
12130 + (set_attr "use_carry" "1")
12131 (set_attr "pent_pair" "pu")
12132 (set_attr "mode" "QI")])
12133
12134 @@ -7613,6 +7640,7 @@
12135 "ix86_binary_operator_ok (MINUS, HImode, operands)"
12136 "sbb{w}\t{%2, %0|%0, %2}"
12137 [(set_attr "type" "alu")
12138 + (set_attr "use_carry" "1")
12139 (set_attr "pent_pair" "pu")
12140 (set_attr "mode" "HI")])
12141
12142 @@ -7625,6 +7653,7 @@
12143 "ix86_binary_operator_ok (MINUS, SImode, operands)"
12144 "sbb{l}\t{%2, %0|%0, %2}"
12145 [(set_attr "type" "alu")
12146 + (set_attr "use_carry" "1")
12147 (set_attr "pent_pair" "pu")
12148 (set_attr "mode" "SI")])
12149
12150 @@ -15155,7 +15184,7 @@
12151 ? gen_rtx_REG (XCmode, FIRST_FLOAT_REG) : NULL),
12152 operands[0], const0_rtx,
12153 GEN_INT ((TARGET_64BIT
12154 - ? (DEFAULT_ABI == SYSV_ABI
12155 + ? (ix86_abi == SYSV_ABI
12156 ? X86_64_SSE_REGPARM_MAX
12157 : X64_SSE_REGPARM_MAX)
12158 : X86_32_SSE_REGPARM_MAX)
12159 @@ -15235,6 +15264,7 @@
12160 "reload_completed"
12161 "ret"
12162 [(set_attr "length" "1")
12163 + (set_attr "atom_unit" "jeu")
12164 (set_attr "length_immediate" "0")
12165 (set_attr "modrm" "0")])
12166
12167 @@ -15247,6 +15277,7 @@
12168 "reload_completed"
12169 "rep\;ret"
12170 [(set_attr "length" "1")
12171 + (set_attr "atom_unit" "jeu")
12172 (set_attr "length_immediate" "0")
12173 (set_attr "prefix_rep" "1")
12174 (set_attr "modrm" "0")])
12175 @@ -15257,6 +15288,7 @@
12176 "reload_completed"
12177 "ret\t%0"
12178 [(set_attr "length" "3")
12179 + (set_attr "atom_unit" "jeu")
12180 (set_attr "length_immediate" "2")
12181 (set_attr "modrm" "0")])
12182
12183 @@ -15610,7 +15642,7 @@
12184 (bswap:SI (match_operand:SI 1 "register_operand" "")))]
12185 ""
12186 {
12187 - if (!TARGET_BSWAP)
12188 + if (!(TARGET_BSWAP || TARGET_MOVBE))
12189 {
12190 rtx x = operands[0];
12191
12192 @@ -15622,6 +15654,21 @@
12193 }
12194 })
12195
12196 +(define_insn "*bswapsi_movbe"
12197 + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,m")
12198 + (bswap:SI (match_operand:SI 1 "nonimmediate_operand" "0,m,r")))]
12199 + "TARGET_MOVBE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12200 + "@
12201 + bswap\t%0
12202 + movbe\t{%1, %0|%0, %1}
12203 + movbe\t{%1, %0|%0, %1}"
12204 + [(set_attr "type" "*,imov,imov")
12205 + (set_attr "modrm" "*,1,1")
12206 + (set_attr "prefix_0f" "1")
12207 + (set_attr "prefix_extra" "*,1,1")
12208 + (set_attr "length" "2,*,*")
12209 + (set_attr "mode" "SI")])
12210 +
12211 (define_insn "*bswapsi_1"
12212 [(set (match_operand:SI 0 "register_operand" "=r")
12213 (bswap:SI (match_operand:SI 1 "register_operand" "0")))]
12214 @@ -15650,7 +15697,29 @@
12215 [(set_attr "length" "4")
12216 (set_attr "mode" "HI")])
12217
12218 -(define_insn "bswapdi2"
12219 +(define_expand "bswapdi2"
12220 + [(set (match_operand:DI 0 "register_operand" "")
12221 + (bswap:DI (match_operand:DI 1 "register_operand" "")))]
12222 + "TARGET_64BIT"
12223 + "")
12224 +
12225 +(define_insn "*bswapdi_movbe"
12226 + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m")
12227 + (bswap:DI (match_operand:DI 1 "nonimmediate_operand" "0,m,r")))]
12228 + "TARGET_64BIT && TARGET_MOVBE
12229 + && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12230 + "@
12231 + bswap\t%0
12232 + movbe\t{%1, %0|%0, %1}
12233 + movbe\t{%1, %0|%0, %1}"
12234 + [(set_attr "type" "*,imov,imov")
12235 + (set_attr "modrm" "*,1,1")
12236 + (set_attr "prefix_0f" "1")
12237 + (set_attr "prefix_extra" "*,1,1")
12238 + (set_attr "length" "3,*,*")
12239 + (set_attr "mode" "DI")])
12240 +
12241 +(define_insn "*bswapdi_1"
12242 [(set (match_operand:DI 0 "register_operand" "=r")
12243 (bswap:DI (match_operand:DI 1 "register_operand" "0")))]
12244 "TARGET_64BIT"
12245 @@ -16378,6 +16447,7 @@
12246 "TARGET_SSE_MATH"
12247 "%vrcpss\t{%1, %d0|%d0, %1}"
12248 [(set_attr "type" "sse")
12249 + (set_attr "atom_sse_attr" "rcp")
12250 (set_attr "prefix" "maybe_vex")
12251 (set_attr "mode" "SF")])
12252
12253 @@ -16729,6 +16799,7 @@
12254 "TARGET_SSE_MATH"
12255 "%vrsqrtss\t{%1, %d0|%d0, %1}"
12256 [(set_attr "type" "sse")
12257 + (set_attr "atom_sse_attr" "rcp")
12258 (set_attr "prefix" "maybe_vex")
12259 (set_attr "mode" "SF")])
12260
12261 @@ -16749,6 +16820,7 @@
12262 "SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH"
12263 "%vsqrts<ssemodefsuffix>\t{%1, %d0|%d0, %1}"
12264 [(set_attr "type" "sse")
12265 + (set_attr "atom_sse_attr" "sqrt")
12266 (set_attr "prefix" "maybe_vex")
12267 (set_attr "mode" "<MODE>")
12268 (set_attr "athlon_decode" "*")
12269 @@ -19802,6 +19874,7 @@
12270 ; Since we don't have the proper number of operands for an alu insn,
12271 ; fill in all the blanks.
12272 [(set_attr "type" "alu")
12273 + (set_attr "use_carry" "1")
12274 (set_attr "pent_pair" "pu")
12275 (set_attr "memory" "none")
12276 (set_attr "imm_disp" "false")
12277 @@ -19817,6 +19890,7 @@
12278 ""
12279 "sbb{q}\t%0, %0"
12280 [(set_attr "type" "alu")
12281 + (set_attr "use_carry" "1")
12282 (set_attr "pent_pair" "pu")
12283 (set_attr "memory" "none")
12284 (set_attr "imm_disp" "false")
12285 @@ -19860,6 +19934,7 @@
12286 ; Since we don't have the proper number of operands for an alu insn,
12287 ; fill in all the blanks.
12288 [(set_attr "type" "alu")
12289 + (set_attr "use_carry" "1")
12290 (set_attr "pent_pair" "pu")
12291 (set_attr "memory" "none")
12292 (set_attr "imm_disp" "false")
12293 @@ -19875,6 +19950,7 @@
12294 ""
12295 "sbb{l}\t%0, %0"
12296 [(set_attr "type" "alu")
12297 + (set_attr "use_carry" "1")
12298 (set_attr "pent_pair" "pu")
12299 (set_attr "memory" "none")
12300 (set_attr "imm_disp" "false")
12301 @@ -20207,7 +20283,8 @@
12302 }
12303 }
12304 [(set (attr "type")
12305 - (cond [(eq_attr "alternative" "0")
12306 + (cond [(and (eq_attr "alternative" "0")
12307 + (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
12308 (const_string "alu")
12309 (match_operand:SI 2 "const0_operand" "")
12310 (const_string "imov")
12311 @@ -20250,7 +20327,8 @@
12312 }
12313 }
12314 [(set (attr "type")
12315 - (cond [(eq_attr "alternative" "0")
12316 + (cond [(and (eq_attr "alternative" "0")
12317 + (eq (symbol_ref "TARGET_OPT_AGU") (const_int 0)))
12318 (const_string "alu")
12319 (match_operand:DI 2 "const0_operand" "")
12320 (const_string "imov")
12321 @@ -21734,6 +21812,7 @@
12322 return patterns[locality];
12323 }
12324 [(set_attr "type" "sse")
12325 + (set_attr "atom_sse_attr" "prefetch")
12326 (set_attr "memory" "none")])
12327
12328 (define_insn "*prefetch_sse_rex"
12329 @@ -21752,6 +21831,7 @@
12330 return patterns[locality];
12331 }
12332 [(set_attr "type" "sse")
12333 + (set_attr "atom_sse_attr" "prefetch")
12334 (set_attr "memory" "none")])
12335
12336 (define_insn "*prefetch_3dnow"
12337 --- a/gcc/config/i386/i386.opt
12338 +++ b/gcc/config/i386/i386.opt
12339 @@ -228,6 +228,10 @@
12340 Target RejectNegative Joined Var(ix86_tune_string)
12341 Schedule code for given CPU
12342
12343 +mabi=
12344 +Target RejectNegative Joined Var(ix86_abi_string)
12345 +Generate code that conforms to the given ABI
12346 +
12347 mveclibabi=
12348 Target RejectNegative Joined Var(ix86_veclibabi_string)
12349 Vector library ABI to use
12350 @@ -335,6 +339,10 @@
12351 Target Report Mask(ISA_SAHF) Var(ix86_isa_flags) VarExists Save
12352 Support code generation of sahf instruction in 64bit x86-64 code.
12353
12354 +mmovbe
12355 +Target Report Mask(ISA_MOVBE) Var(ix86_isa_flags) VarExists Save
12356 +Support code generation of movbe instruction.
12357 +
12358 maes
12359 Target Report Mask(ISA_AES) Var(ix86_isa_flags) VarExists Save
12360 Support AES built-in functions and code generation
12361 --- a/gcc/config/i386/i386-protos.h
12362 +++ b/gcc/config/i386/i386-protos.h
12363 @@ -86,6 +86,9 @@
12364 extern void ix86_expand_binary_operator (enum rtx_code,
12365 enum machine_mode, rtx[]);
12366 extern int ix86_binary_operator_ok (enum rtx_code, enum machine_mode, rtx[]);
12367 +extern bool ix86_lea_for_add_ok (enum rtx_code, rtx, rtx[]);
12368 +extern bool ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn);
12369 +extern bool ix86_agi_dependent (rtx set_insn, rtx use_insn);
12370 extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode,
12371 rtx[]);
12372 extern rtx ix86_build_const_vector (enum machine_mode, bool, rtx);
12373 @@ -140,9 +143,8 @@
12374 extern bool ix86_sol10_return_in_memory (const_tree,const_tree);
12375 extern rtx ix86_force_to_memory (enum machine_mode, rtx);
12376 extern void ix86_free_from_memory (enum machine_mode);
12377 -extern int ix86_cfun_abi (void);
12378 -extern int ix86_function_abi (const_tree);
12379 -extern int ix86_function_type_abi (const_tree);
12380 +extern enum calling_abi ix86_cfun_abi (void);
12381 +extern enum calling_abi ix86_function_type_abi (const_tree);
12382 extern void ix86_call_abi_override (const_tree);
12383 extern tree ix86_fn_abi_va_list (tree);
12384 extern tree ix86_canonical_va_list_type (tree);
12385 --- a/gcc/config/i386/mingw32.h
12386 +++ b/gcc/config/i386/mingw32.h
12387 @@ -38,7 +38,7 @@
12388 builtin_define_std ("WINNT"); \
12389 builtin_define_with_int_value ("_INTEGRAL_MAX_BITS", \
12390 TYPE_PRECISION (intmax_type_node));\
12391 - if (TARGET_64BIT && DEFAULT_ABI == MS_ABI) \
12392 + if (TARGET_64BIT && ix86_abi == MS_ABI) \
12393 { \
12394 builtin_define ("__MINGW64__"); \
12395 builtin_define_std ("WIN64"); \
12396 --- a/gcc/config/i386/sse.md
12397 +++ b/gcc/config/i386/sse.md
12398 @@ -338,6 +338,7 @@
12399 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12400 "vmovup<avxmodesuffixf2c>\t{%1, %0|%0, %1}"
12401 [(set_attr "type" "ssemov")
12402 + (set_attr "movu" "1")
12403 (set_attr "prefix" "vex")
12404 (set_attr "mode" "<MODE>")])
12405
12406 @@ -363,6 +364,7 @@
12407 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12408 "movup<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
12409 [(set_attr "type" "ssemov")
12410 + (set_attr "movu" "1")
12411 (set_attr "mode" "<MODE>")])
12412
12413 (define_insn "avx_movdqu<avxmodesuffix>"
12414 @@ -373,6 +375,7 @@
12415 "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12416 "vmovdqu\t{%1, %0|%0, %1}"
12417 [(set_attr "type" "ssemov")
12418 + (set_attr "movu" "1")
12419 (set_attr "prefix" "vex")
12420 (set_attr "mode" "<avxvecmode>")])
12421
12422 @@ -383,6 +386,7 @@
12423 "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
12424 "movdqu\t{%1, %0|%0, %1}"
12425 [(set_attr "type" "ssemov")
12426 + (set_attr "movu" "1")
12427 (set_attr "prefix_data16" "1")
12428 (set_attr "mode" "TI")])
12429
12430 @@ -424,7 +428,7 @@
12431 UNSPEC_MOVNT))]
12432 "TARGET_SSE2"
12433 "movntdq\t{%1, %0|%0, %1}"
12434 - [(set_attr "type" "ssecvt")
12435 + [(set_attr "type" "ssemov")
12436 (set_attr "prefix_data16" "1")
12437 (set_attr "mode" "TI")])
12438
12439 @@ -434,7 +438,7 @@
12440 UNSPEC_MOVNT))]
12441 "TARGET_SSE2"
12442 "movnti\t{%1, %0|%0, %1}"
12443 - [(set_attr "type" "ssecvt")
12444 + [(set_attr "type" "ssemov")
12445 (set_attr "mode" "V2DF")])
12446
12447 (define_insn "avx_lddqu<avxmodesuffix>"
12448 @@ -445,6 +449,7 @@
12449 "TARGET_AVX"
12450 "vlddqu\t{%1, %0|%0, %1}"
12451 [(set_attr "type" "ssecvt")
12452 + (set_attr "movu" "1")
12453 (set_attr "prefix" "vex")
12454 (set_attr "mode" "<avxvecmode>")])
12455
12456 @@ -454,7 +459,8 @@
12457 UNSPEC_LDDQU))]
12458 "TARGET_SSE3"
12459 "lddqu\t{%1, %0|%0, %1}"
12460 - [(set_attr "type" "ssecvt")
12461 + [(set_attr "type" "ssemov")
12462 + (set_attr "movu" "1")
12463 (set_attr "prefix_rep" "1")
12464 (set_attr "mode" "TI")])
12465
12466 @@ -761,6 +767,7 @@
12467 "TARGET_SSE"
12468 "%vrcpps\t{%1, %0|%0, %1}"
12469 [(set_attr "type" "sse")
12470 + (set_attr "atom_sse_attr" "rcp")
12471 (set_attr "prefix" "maybe_vex")
12472 (set_attr "mode" "V4SF")])
12473
12474 @@ -787,6 +794,7 @@
12475 "TARGET_SSE"
12476 "rcpss\t{%1, %0|%0, %1}"
12477 [(set_attr "type" "sse")
12478 + (set_attr "atom_sse_attr" "rcp")
12479 (set_attr "mode" "SF")])
12480
12481 (define_expand "sqrtv8sf2"
12482 @@ -832,6 +840,7 @@
12483 "TARGET_SSE"
12484 "%vsqrtps\t{%1, %0|%0, %1}"
12485 [(set_attr "type" "sse")
12486 + (set_attr "atom_sse_attr" "sqrt")
12487 (set_attr "prefix" "maybe_vex")
12488 (set_attr "mode" "V4SF")])
12489
12490 @@ -876,6 +885,7 @@
12491 "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
12492 "sqrts<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
12493 [(set_attr "type" "sse")
12494 + (set_attr "atom_sse_attr" "sqrt")
12495 (set_attr "mode" "<ssescalarmode>")])
12496
12497 (define_expand "rsqrtv8sf2"
12498 @@ -1039,7 +1049,7 @@
12499 (const_int 1)))]
12500 "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
12501 "<maxminfprefix>s<ssemodesuffixf2c>\t{%2, %0|%0, %2}"
12502 - [(set_attr "type" "sse")
12503 + [(set_attr "type" "sseadd")
12504 (set_attr "mode" "<ssescalarmode>")])
12505
12506 ;; These versions of the min/max patterns implement exactly the operations
12507 @@ -1175,6 +1185,7 @@
12508 "TARGET_SSE3"
12509 "addsubpd\t{%2, %0|%0, %2}"
12510 [(set_attr "type" "sseadd")
12511 + (set_attr "atom_unit" "complex")
12512 (set_attr "mode" "V2DF")])
12513
12514 (define_insn "avx_h<plusminus_insn>v4df3"
12515 @@ -1298,6 +1309,7 @@
12516 "TARGET_SSE3"
12517 "h<plusminus_mnemonic>ps\t{%2, %0|%0, %2}"
12518 [(set_attr "type" "sseadd")
12519 + (set_attr "atom_unit" "complex")
12520 (set_attr "prefix_rep" "1")
12521 (set_attr "mode" "V4SF")])
12522
12523 @@ -5066,6 +5078,7 @@
12524 "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
12525 "pmaddwd\t{%2, %0|%0, %2}"
12526 [(set_attr "type" "sseiadd")
12527 + (set_attr "atom_unit" "simul")
12528 (set_attr "prefix_data16" "1")
12529 (set_attr "mode" "TI")])
12530
12531 @@ -7025,6 +7038,7 @@
12532 movq\t{%H1, %0|%0, %H1}
12533 mov{q}\t{%H1, %0|%0, %H1}"
12534 [(set_attr "type" "ssemov,sseishft,ssemov,imov")
12535 + (set_attr "atom_unit" "*,sishuf,*,*")
12536 (set_attr "memory" "*,none,*,*")
12537 (set_attr "mode" "V2SF,TI,TI,DI")])
12538
12539 @@ -7057,6 +7071,7 @@
12540 psrldq\t{$8, %0|%0, 8}
12541 movq\t{%H1, %0|%0, %H1}"
12542 [(set_attr "type" "ssemov,sseishft,ssemov")
12543 + (set_attr "atom_unit" "*,sishuf,*")
12544 (set_attr "memory" "*,none,*")
12545 (set_attr "mode" "V2SF,TI,TI")])
12546
12547 @@ -7614,6 +7629,7 @@
12548 "TARGET_SSE2"
12549 "psadbw\t{%2, %0|%0, %2}"
12550 [(set_attr "type" "sseiadd")
12551 + (set_attr "atom_unit" "simul")
12552 (set_attr "prefix_data16" "1")
12553 (set_attr "mode" "TI")])
12554
12555 @@ -7635,7 +7651,7 @@
12556 UNSPEC_MOVMSK))]
12557 "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
12558 "%vmovmskp<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
12559 - [(set_attr "type" "ssecvt")
12560 + [(set_attr "type" "ssemov")
12561 (set_attr "prefix" "maybe_vex")
12562 (set_attr "mode" "<MODE>")])
12563
12564 @@ -7645,7 +7661,7 @@
12565 UNSPEC_MOVMSK))]
12566 "TARGET_SSE2"
12567 "%vpmovmskb\t{%1, %0|%0, %1}"
12568 - [(set_attr "type" "ssecvt")
12569 + [(set_attr "type" "ssemov")
12570 (set_attr "prefix_data16" "1")
12571 (set_attr "prefix" "maybe_vex")
12572 (set_attr "mode" "SI")])
12573 @@ -7668,7 +7684,7 @@
12574 "TARGET_SSE2 && !TARGET_64BIT"
12575 ;; @@@ check ordering of operands in intel/nonintel syntax
12576 "%vmaskmovdqu\t{%2, %1|%1, %2}"
12577 - [(set_attr "type" "ssecvt")
12578 + [(set_attr "type" "ssemov")
12579 (set_attr "prefix_data16" "1")
12580 (set_attr "prefix" "maybe_vex")
12581 (set_attr "mode" "TI")])
12582 @@ -7682,7 +7698,7 @@
12583 "TARGET_SSE2 && TARGET_64BIT"
12584 ;; @@@ check ordering of operands in intel/nonintel syntax
12585 "%vmaskmovdqu\t{%2, %1|%1, %2}"
12586 - [(set_attr "type" "ssecvt")
12587 + [(set_attr "type" "ssemov")
12588 (set_attr "prefix_data16" "1")
12589 (set_attr "prefix" "maybe_vex")
12590 (set_attr "mode" "TI")])
12591 @@ -7693,6 +7709,7 @@
12592 "TARGET_SSE"
12593 "%vldmxcsr\t%0"
12594 [(set_attr "type" "sse")
12595 + (set_attr "atom_sse_attr" "mxcsr")
12596 (set_attr "prefix" "maybe_vex")
12597 (set_attr "memory" "load")])
12598
12599 @@ -7702,6 +7719,7 @@
12600 "TARGET_SSE"
12601 "%vstmxcsr\t%0"
12602 [(set_attr "type" "sse")
12603 + (set_attr "atom_sse_attr" "mxcsr")
12604 (set_attr "prefix" "maybe_vex")
12605 (set_attr "memory" "store")])
12606
12607 @@ -7720,6 +7738,7 @@
12608 "TARGET_SSE || TARGET_3DNOW_A"
12609 "sfence"
12610 [(set_attr "type" "sse")
12611 + (set_attr "atom_sse_attr" "fence")
12612 (set_attr "memory" "unknown")])
12613
12614 (define_insn "sse2_clflush"
12615 @@ -7728,6 +7747,7 @@
12616 "TARGET_SSE2"
12617 "clflush\t%a0"
12618 [(set_attr "type" "sse")
12619 + (set_attr "atom_sse_attr" "fence")
12620 (set_attr "memory" "unknown")])
12621
12622 (define_expand "sse2_mfence"
12623 @@ -7745,6 +7765,7 @@
12624 "TARGET_64BIT || TARGET_SSE2"
12625 "mfence"
12626 [(set_attr "type" "sse")
12627 + (set_attr "atom_sse_attr" "fence")
12628 (set_attr "memory" "unknown")])
12629
12630 (define_expand "sse2_lfence"
12631 @@ -7762,6 +7783,7 @@
12632 "TARGET_SSE2"
12633 "lfence"
12634 [(set_attr "type" "sse")
12635 + (set_attr "atom_sse_attr" "lfence")
12636 (set_attr "memory" "unknown")])
12637
12638 (define_insn "sse3_mwait"
12639 @@ -7885,6 +7907,7 @@
12640 "TARGET_SSSE3"
12641 "phaddw\t{%2, %0|%0, %2}"
12642 [(set_attr "type" "sseiadd")
12643 + (set_attr "atom_unit" "complex")
12644 (set_attr "prefix_data16" "1")
12645 (set_attr "prefix_extra" "1")
12646 (set_attr "mode" "TI")])
12647 @@ -7913,6 +7936,7 @@
12648 "TARGET_SSSE3"
12649 "phaddw\t{%2, %0|%0, %2}"
12650 [(set_attr "type" "sseiadd")
12651 + (set_attr "atom_unit" "complex")
12652 (set_attr "prefix_extra" "1")
12653 (set_attr "mode" "DI")])
12654
12655 @@ -7967,6 +7991,7 @@
12656 "TARGET_SSSE3"
12657 "phaddd\t{%2, %0|%0, %2}"
12658 [(set_attr "type" "sseiadd")
12659 + (set_attr "atom_unit" "complex")
12660 (set_attr "prefix_data16" "1")
12661 (set_attr "prefix_extra" "1")
12662 (set_attr "mode" "TI")])
12663 @@ -7987,6 +8012,7 @@
12664 "TARGET_SSSE3"
12665 "phaddd\t{%2, %0|%0, %2}"
12666 [(set_attr "type" "sseiadd")
12667 + (set_attr "atom_unit" "complex")
12668 (set_attr "prefix_extra" "1")
12669 (set_attr "mode" "DI")])
12670
12671 @@ -8073,6 +8099,7 @@
12672 "TARGET_SSSE3"
12673 "phaddsw\t{%2, %0|%0, %2}"
12674 [(set_attr "type" "sseiadd")
12675 + (set_attr "atom_unit" "complex")
12676 (set_attr "prefix_data16" "1")
12677 (set_attr "prefix_extra" "1")
12678 (set_attr "mode" "TI")])
12679 @@ -8101,6 +8128,7 @@
12680 "TARGET_SSSE3"
12681 "phaddsw\t{%2, %0|%0, %2}"
12682 [(set_attr "type" "sseiadd")
12683 + (set_attr "atom_unit" "complex")
12684 (set_attr "prefix_extra" "1")
12685 (set_attr "mode" "DI")])
12686
12687 @@ -8187,6 +8215,7 @@
12688 "TARGET_SSSE3"
12689 "phsubw\t{%2, %0|%0, %2}"
12690 [(set_attr "type" "sseiadd")
12691 + (set_attr "atom_unit" "complex")
12692 (set_attr "prefix_data16" "1")
12693 (set_attr "prefix_extra" "1")
12694 (set_attr "mode" "TI")])
12695 @@ -8215,6 +8244,7 @@
12696 "TARGET_SSSE3"
12697 "phsubw\t{%2, %0|%0, %2}"
12698 [(set_attr "type" "sseiadd")
12699 + (set_attr "atom_unit" "complex")
12700 (set_attr "prefix_extra" "1")
12701 (set_attr "mode" "DI")])
12702
12703 @@ -8269,6 +8299,7 @@
12704 "TARGET_SSSE3"
12705 "phsubd\t{%2, %0|%0, %2}"
12706 [(set_attr "type" "sseiadd")
12707 + (set_attr "atom_unit" "complex")
12708 (set_attr "prefix_data16" "1")
12709 (set_attr "prefix_extra" "1")
12710 (set_attr "mode" "TI")])
12711 @@ -8289,6 +8320,7 @@
12712 "TARGET_SSSE3"
12713 "phsubd\t{%2, %0|%0, %2}"
12714 [(set_attr "type" "sseiadd")
12715 + (set_attr "atom_unit" "complex")
12716 (set_attr "prefix_extra" "1")
12717 (set_attr "mode" "DI")])
12718
12719 @@ -8375,6 +8407,7 @@
12720 "TARGET_SSSE3"
12721 "phsubsw\t{%2, %0|%0, %2}"
12722 [(set_attr "type" "sseiadd")
12723 + (set_attr "atom_unit" "complex")
12724 (set_attr "prefix_data16" "1")
12725 (set_attr "prefix_extra" "1")
12726 (set_attr "mode" "TI")])
12727 @@ -8403,6 +8436,7 @@
12728 "TARGET_SSSE3"
12729 "phsubsw\t{%2, %0|%0, %2}"
12730 [(set_attr "type" "sseiadd")
12731 + (set_attr "atom_unit" "complex")
12732 (set_attr "prefix_extra" "1")
12733 (set_attr "mode" "DI")])
12734
12735 @@ -8509,6 +8543,7 @@
12736 "TARGET_SSSE3"
12737 "pmaddubsw\t{%2, %0|%0, %2}"
12738 [(set_attr "type" "sseiadd")
12739 + (set_attr "atom_unit" "simul")
12740 (set_attr "prefix_data16" "1")
12741 (set_attr "prefix_extra" "1")
12742 (set_attr "mode" "TI")])
12743 @@ -8547,6 +8582,7 @@
12744 "TARGET_SSSE3"
12745 "pmaddubsw\t{%2, %0|%0, %2}"
12746 [(set_attr "type" "sseiadd")
12747 + (set_attr "atom_unit" "simul")
12748 (set_attr "prefix_extra" "1")
12749 (set_attr "mode" "DI")])
12750
12751 @@ -8754,6 +8790,7 @@
12752 return "palignr\t{%3, %2, %0|%0, %2, %3}";
12753 }
12754 [(set_attr "type" "sseishft")
12755 + (set_attr "atom_unit" "sishuf")
12756 (set_attr "prefix_data16" "1")
12757 (set_attr "prefix_extra" "1")
12758 (set_attr "mode" "TI")])
12759 @@ -8770,6 +8807,7 @@
12760 return "palignr\t{%3, %2, %0|%0, %2, %3}";
12761 }
12762 [(set_attr "type" "sseishft")
12763 + (set_attr "atom_unit" "sishuf")
12764 (set_attr "prefix_extra" "1")
12765 (set_attr "mode" "DI")])
12766
12767 @@ -8956,7 +8994,7 @@
12768 UNSPEC_MOVNTDQA))]
12769 "TARGET_SSE4_1"
12770 "%vmovntdqa\t{%1, %0|%0, %1}"
12771 - [(set_attr "type" "ssecvt")
12772 + [(set_attr "type" "ssemov")
12773 (set_attr "prefix_extra" "1")
12774 (set_attr "prefix" "maybe_vex")
12775 (set_attr "mode" "TI")])
12776 --- a/gcc/config/i386/winnt.c
12777 +++ b/gcc/config/i386/winnt.c
12778 @@ -499,8 +499,11 @@
12779 {
12780 HOST_WIDE_INT rounded;
12781
12782 - /* Compute as in assemble_noswitch_variable, since we don't actually
12783 - support aligned common. */
12784 + /* Compute as in assemble_noswitch_variable, since we don't have
12785 + support for aligned common on older binutils. We must also
12786 + avoid emitting a common symbol of size zero, as this is the
12787 + overloaded representation that indicates an undefined external
12788 + symbol in the PE object file format. */
12789 rounded = size ? size : 1;
12790 rounded += (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1;
12791 rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
12792 @@ -510,9 +513,13 @@
12793
12794 fprintf (stream, "\t.comm\t");
12795 assemble_name (stream, name);
12796 - fprintf (stream, ", " HOST_WIDE_INT_PRINT_DEC "\t" ASM_COMMENT_START
12797 - " " HOST_WIDE_INT_PRINT_DEC "\n",
12798 - rounded, size);
12799 + if (use_pe_aligned_common)
12800 + fprintf (stream, ", " HOST_WIDE_INT_PRINT_DEC ", %d\n",
12801 + size ? size : (HOST_WIDE_INT) 1,
12802 + exact_log2 (align) - exact_log2 (CHAR_BIT));
12803 + else
12804 + fprintf (stream, ", " HOST_WIDE_INT_PRINT_DEC "\t" ASM_COMMENT_START
12805 + " " HOST_WIDE_INT_PRINT_DEC "\n", rounded, size);
12806 }
12807 \f
12808 /* The Microsoft linker requires that every function be marked as
12809 --- a/gcc/config/m68k/constraints.md
12810 +++ b/gcc/config/m68k/constraints.md
12811 @@ -124,6 +124,11 @@
12812 (and (match_code "const_int")
12813 (match_test "ival < -0x8000 || ival > 0x7FFF")))
12814
12815 +(define_constraint "Cu"
12816 + "16-bit offset for wrapped symbols"
12817 + (and (match_code "const")
12818 + (match_test "m68k_unwrap_symbol (op, false) != op")))
12819 +
12820 (define_constraint "CQ"
12821 "Integers valid for mvq."
12822 (and (match_code "const_int")
12823 --- a/gcc/config/m68k/lb1sf68.asm
12824 +++ b/gcc/config/m68k/lb1sf68.asm
12825 @@ -163,6 +163,8 @@
12826 #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
12827 lea \addr-.-8,a0
12828 jsr pc@(a0)
12829 +#elif defined (__mcfisab__) || defined (__mcfisac__)
12830 + bsr.l \addr
12831 #else
12832 bsr \addr
12833 #endif
12834 @@ -202,6 +204,8 @@
12835 #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
12836 lea \addr-.-8,a0
12837 jsr pc@(a0)
12838 +#elif defined (__mcfisab__) || defined (__mcfisac__)
12839 + bsr.l \addr
12840 #else
12841 bsr \addr
12842 #endif
12843 --- a/gcc/config/m68k/linux-unwind.h
12844 +++ b/gcc/config/m68k/linux-unwind.h
12845 @@ -77,9 +77,15 @@
12846 fs->regs.reg[9].how = REG_SAVED_OFFSET;
12847 fs->regs.reg[9].loc.offset = (long) &sc->sc_a1 - cfa;
12848
12849 +#ifdef __uClinux__
12850 + fs->regs.reg[13].how = REG_SAVED_OFFSET;
12851 + fs->regs.reg[13].loc.offset = (long) &sc->sc_a5 - cfa;
12852 +#endif
12853 +
12854 fs->regs.reg[24].how = REG_SAVED_OFFSET;
12855 fs->regs.reg[24].loc.offset = (long) &sc->sc_pc - cfa;
12856
12857 +#if defined __mcffpu__ && !defined __uClinux__
12858 if (*(int *) sc->sc_fpstate)
12859 {
12860 int *fpregs = (int *) sc->sc_fpregs;
12861 @@ -89,11 +95,19 @@
12862 fs->regs.reg[17].how = REG_SAVED_OFFSET;
12863 fs->regs.reg[17].loc.offset = (long) &fpregs[M68K_FP_SIZE/4] - cfa;
12864 }
12865 +#elif defined __mcffpu__
12866 +# error Implement this when uClinux kernel is ported to an FPU architecture
12867 +#endif
12868 }
12869 #ifdef __mcoldfire__
12870 /* move.l #__NR_rt_sigreturn,%d0; trap #0 */
12871 - else if (pc[0] == 0x203c && pc[1] == 0x0000 &&
12872 - pc[2] == 0x00ad && pc[3] == 0x4e40)
12873 + else if ((pc[0] == 0x203c && pc[1] == 0x0000 &&
12874 + pc[2] == 0x00ad && pc[3] == 0x4e40) ||
12875 + /* Don't ask me why, this is just what some kernels do:
12876 + moveq #-__NR_rt_sigreturn,%d0; andil 0xff,%d0; trap #0;
12877 + Sigh... */
12878 + (pc[0] == 0x70ad && pc[1] == 0x0280 && pc[2] == 0x0000 &&
12879 + pc[3] == 0x00ff && pc[4] == 0x4e40 && pc[5] == 0x0000))
12880 #else
12881 /* moveq #~__NR_rt_sigreturn,%d0; not.b %d0; trap #0 */
12882 else if (pc[0] == 0x7052 && pc[1] == 0x4600 && pc[2] == 0x4e40)
12883 --- a/gcc/config/m68k/m68k.c
12884 +++ b/gcc/config/m68k/m68k.c
12885 @@ -46,6 +46,7 @@
12886 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
12887 #include "sched-int.h"
12888 #include "insn-codes.h"
12889 +#include "ggc.h"
12890
12891 enum reg_class regno_reg_class[] =
12892 {
12893 @@ -146,10 +147,12 @@
12894 static void m68k_compute_frame_layout (void);
12895 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
12896 static bool m68k_ok_for_sibcall_p (tree, tree);
12897 +static bool m68k_tls_symbol_p (rtx);
12898 static bool m68k_rtx_costs (rtx, int, int, int *, bool);
12899 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
12900 static bool m68k_return_in_memory (const_tree, const_tree);
12901 #endif
12902 +static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
12903 \f
12904
12905 /* Specify the identification number of the library being built */
12906 @@ -252,6 +255,14 @@
12907 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
12908 #endif
12909
12910 +#ifdef HAVE_AS_TLS
12911 +#undef TARGET_HAVE_TLS
12912 +#define TARGET_HAVE_TLS (true)
12913 +
12914 +#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12915 +#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
12916 +#endif
12917 +
12918 static const struct attribute_spec m68k_attribute_table[] =
12919 {
12920 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
12921 @@ -1150,8 +1161,7 @@
12922 current_frame.reg_mask, true, true));
12923 }
12924
12925 - if (flag_pic
12926 - && !TARGET_SEP_DATA
12927 + if (!TARGET_SEP_DATA
12928 && crtl->uses_pic_offset_table)
12929 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
12930 }
12931 @@ -1425,6 +1435,86 @@
12932 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
12933 }
12934
12935 +/* Convert X to a legitimate address and return it if successful. Otherwise
12936 + return X.
12937 +
12938 + For the 68000, we handle X+REG by loading X into a register R and
12939 + using R+REG. R will go in an address reg and indexing will be used.
12940 + However, if REG is a broken-out memory address or multiplication,
12941 + nothing needs to be done because REG can certainly go in an address reg. */
12942 +
12943 +rtx
12944 +m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
12945 +{
12946 + if (m68k_tls_symbol_p (x))
12947 + return m68k_legitimize_tls_address (x);
12948 +
12949 + if (GET_CODE (x) == PLUS)
12950 + {
12951 + int ch = (x) != (oldx);
12952 + int copied = 0;
12953 +
12954 +#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
12955 +
12956 + if (GET_CODE (XEXP (x, 0)) == MULT)
12957 + {
12958 + COPY_ONCE (x);
12959 + XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
12960 + }
12961 + if (GET_CODE (XEXP (x, 1)) == MULT)
12962 + {
12963 + COPY_ONCE (x);
12964 + XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
12965 + }
12966 + if (ch)
12967 + {
12968 + if (GET_CODE (XEXP (x, 1)) == REG
12969 + && GET_CODE (XEXP (x, 0)) == REG)
12970 + {
12971 + if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
12972 + {
12973 + COPY_ONCE (x);
12974 + x = force_operand (x, 0);
12975 + }
12976 + return x;
12977 + }
12978 + if (memory_address_p (mode, x))
12979 + return x;
12980 + }
12981 + if (GET_CODE (XEXP (x, 0)) == REG
12982 + || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
12983 + && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
12984 + && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
12985 + {
12986 + rtx temp = gen_reg_rtx (Pmode);
12987 + rtx val = force_operand (XEXP (x, 1), 0);
12988 + emit_move_insn (temp, val);
12989 + COPY_ONCE (x);
12990 + XEXP (x, 1) = temp;
12991 + if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
12992 + && GET_CODE (XEXP (x, 0)) == REG)
12993 + x = force_operand (x, 0);
12994 + }
12995 + else if (GET_CODE (XEXP (x, 1)) == REG
12996 + || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
12997 + && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
12998 + && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
12999 + {
13000 + rtx temp = gen_reg_rtx (Pmode);
13001 + rtx val = force_operand (XEXP (x, 0), 0);
13002 + emit_move_insn (temp, val);
13003 + COPY_ONCE (x);
13004 + XEXP (x, 0) = temp;
13005 + if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
13006 + && GET_CODE (XEXP (x, 1)) == REG)
13007 + x = force_operand (x, 0);
13008 + }
13009 + }
13010 +
13011 + return x;
13012 +}
13013 +
13014 +
13015 /* Output a dbCC; jCC sequence. Note we do not handle the
13016 floating point version of this sequence (Fdbcc). We also
13017 do not handle alternative conditions when CC_NO_OVERFLOW is
13018 @@ -1713,15 +1803,16 @@
13019 whether we need strict checking. */
13020
13021 bool
13022 -m68k_legitimate_index_reg_p (rtx x, bool strict_p)
13023 +m68k_legitimate_index_reg_p (enum machine_mode mode, rtx x, bool strict_p)
13024 {
13025 if (!strict_p && GET_CODE (x) == SUBREG)
13026 x = SUBREG_REG (x);
13027
13028 return (REG_P (x)
13029 && (strict_p
13030 - ? REGNO_OK_FOR_INDEX_P (REGNO (x))
13031 - : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
13032 + ? REGNO_MODE_OK_FOR_INDEX_P (REGNO (x), mode)
13033 + : (MODE_OK_FOR_INDEX_P (mode)
13034 + && REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))));
13035 }
13036
13037 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
13038 @@ -1729,7 +1820,8 @@
13039 ADDRESS if so. STRICT_P says whether we need strict checking. */
13040
13041 static bool
13042 -m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
13043 +m68k_decompose_index (enum machine_mode mode, rtx x, bool strict_p,
13044 + struct m68k_address *address)
13045 {
13046 int scale;
13047
13048 @@ -1753,7 +1845,7 @@
13049 && GET_MODE (XEXP (x, 0)) == HImode)
13050 x = XEXP (x, 0);
13051
13052 - if (m68k_legitimate_index_reg_p (x, strict_p))
13053 + if (m68k_legitimate_index_reg_p (mode, x, strict_p))
13054 {
13055 address->scale = scale;
13056 address->index = x;
13057 @@ -1777,7 +1869,7 @@
13058 && !offset_within_block_p (base, INTVAL (offset)))
13059 return true;
13060 }
13061 - return false;
13062 + return m68k_tls_reference_p (x, false);
13063 }
13064
13065 /* Return true if X is a legitimate constant address that can reach
13066 @@ -1805,7 +1897,7 @@
13067 return false;
13068 }
13069
13070 - return true;
13071 + return !m68k_tls_reference_p (x, false);
13072 }
13073
13074 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
13075 @@ -1872,15 +1964,17 @@
13076 /* Check for GOT loads. These are (bd,An,Xn) addresses if
13077 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
13078 addresses. */
13079 - if (flag_pic
13080 - && GET_CODE (x) == PLUS
13081 - && XEXP (x, 0) == pic_offset_table_rtx
13082 - && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
13083 - || GET_CODE (XEXP (x, 1)) == LABEL_REF))
13084 + if (GET_CODE (x) == PLUS
13085 + && XEXP (x, 0) == pic_offset_table_rtx)
13086 {
13087 - address->base = XEXP (x, 0);
13088 - address->offset = XEXP (x, 1);
13089 - return true;
13090 + /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
13091 + they are invalid in this context. */
13092 + if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
13093 + {
13094 + address->base = XEXP (x, 0);
13095 + address->offset = XEXP (x, 1);
13096 + return true;
13097 + }
13098 }
13099
13100 /* The ColdFire FPU only accepts addressing modes 2-5. */
13101 @@ -1905,7 +1999,7 @@
13102 accesses to unplaced labels in other cases. */
13103 if (GET_CODE (x) == PLUS
13104 && m68k_jump_table_ref_p (XEXP (x, 1))
13105 - && m68k_decompose_index (XEXP (x, 0), strict_p, address))
13106 + && m68k_decompose_index (mode, XEXP (x, 0), strict_p, address))
13107 {
13108 address->offset = XEXP (x, 1);
13109 return true;
13110 @@ -1937,7 +2031,7 @@
13111 worse code. */
13112 if (address->offset
13113 && symbolic_operand (address->offset, VOIDmode)
13114 - && m68k_decompose_index (x, strict_p, address))
13115 + && m68k_decompose_index (mode, x, strict_p, address))
13116 return true;
13117 }
13118 else
13119 @@ -1956,14 +2050,14 @@
13120 if (GET_CODE (x) == PLUS)
13121 {
13122 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
13123 - && m68k_decompose_index (XEXP (x, 1), strict_p, address))
13124 + && m68k_decompose_index (mode, XEXP (x, 1), strict_p, address))
13125 {
13126 address->base = XEXP (x, 0);
13127 return true;
13128 }
13129
13130 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
13131 - && m68k_decompose_index (XEXP (x, 0), strict_p, address))
13132 + && m68k_decompose_index (mode, XEXP (x, 0), strict_p, address))
13133 {
13134 address->base = XEXP (x, 1);
13135 return true;
13136 @@ -2025,6 +2119,243 @@
13137 && !address.index);
13138 }
13139
13140 +/* Return GOT pointer. */
13141 +
13142 +static rtx
13143 +m68k_get_gp (void)
13144 +{
13145 + if (pic_offset_table_rtx == NULL_RTX)
13146 + pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
13147 +
13148 + crtl->uses_pic_offset_table = 1;
13149 +
13150 + return pic_offset_table_rtx;
13151 +}
13152 +
13153 +/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
13154 + wrappers. */
13155 +enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
13156 + RELOC_TLSIE, RELOC_TLSLE };
13157 +
13158 +#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
13159 +
13160 +/* Wrap symbol X into unspec representing relocation RELOC.
13161 + BASE_REG - register that should be added to the result.
13162 + TEMP_REG - if non-null, temporary register. */
13163 +
13164 +static rtx
13165 +m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
13166 +{
13167 + bool use_x_p;
13168 +
13169 + use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
13170 +
13171 + if (TARGET_COLDFIRE && use_x_p)
13172 + /* When compiling with -mx{got, tls} switch the code will look like this:
13173 +
13174 + move.l <X>@<RELOC>,<TEMP_REG>
13175 + add.l <BASE_REG>,<TEMP_REG> */
13176 + {
13177 + /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
13178 + to put @RELOC after reference. */
13179 + x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
13180 + UNSPEC_RELOC32);
13181 + x = gen_rtx_CONST (Pmode, x);
13182 +
13183 + if (temp_reg == NULL)
13184 + {
13185 + gcc_assert (can_create_pseudo_p ());
13186 + temp_reg = gen_reg_rtx (Pmode);
13187 + }
13188 +
13189 + emit_move_insn (temp_reg, x);
13190 + emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
13191 + x = temp_reg;
13192 + }
13193 + else
13194 + {
13195 + x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
13196 + UNSPEC_RELOC16);
13197 + x = gen_rtx_CONST (Pmode, x);
13198 +
13199 + x = gen_rtx_PLUS (Pmode, base_reg, x);
13200 + }
13201 +
13202 + return x;
13203 +}
13204 +
13205 +/* Helper for m68k_unwrap_symbol.
13206 + Also, if unwrapping was successful (that is if (ORIG != <return value>)),
13207 + sets *RELOC_PTR to relocation type for the symbol. */
13208 +
13209 +static rtx
13210 +m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
13211 + enum m68k_reloc *reloc_ptr)
13212 +{
13213 + if (GET_CODE (orig) == CONST)
13214 + {
13215 + rtx x;
13216 + enum m68k_reloc dummy;
13217 +
13218 + x = XEXP (orig, 0);
13219 +
13220 + if (reloc_ptr == NULL)
13221 + reloc_ptr = &dummy;
13222 +
13223 + /* Handle an addend. */
13224 + if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
13225 + && CONST_INT_P (XEXP (x, 1)))
13226 + x = XEXP (x, 0);
13227 +
13228 + if (GET_CODE (x) == UNSPEC)
13229 + {
13230 + switch (XINT (x, 1))
13231 + {
13232 + case UNSPEC_RELOC16:
13233 + orig = XVECEXP (x, 0, 0);
13234 + *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
13235 + break;
13236 +
13237 + case UNSPEC_RELOC32:
13238 + if (unwrap_reloc32_p)
13239 + {
13240 + orig = XVECEXP (x, 0, 0);
13241 + *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
13242 + }
13243 + break;
13244 +
13245 + default:
13246 + break;
13247 + }
13248 + }
13249 + }
13250 +
13251 + return orig;
13252 +}
13253 +
13254 +/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
13255 + UNSPEC_RELOC32 wrappers. */
13256 +
13257 +rtx
13258 +m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
13259 +{
13260 + return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
13261 +}
13262 +
13263 +/* Helper for m68k_final_prescan_insn. */
13264 +
13265 +static int
13266 +m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
13267 +{
13268 + rtx x = *x_ptr;
13269 +
13270 + if (m68k_unwrap_symbol (x, true) != x)
13271 + /* For rationale of the below, see comment in m68k_final_prescan_insn. */
13272 + {
13273 + rtx plus;
13274 +
13275 + gcc_assert (GET_CODE (x) == CONST);
13276 + plus = XEXP (x, 0);
13277 +
13278 + if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
13279 + {
13280 + rtx unspec;
13281 + rtx addend;
13282 +
13283 + unspec = XEXP (plus, 0);
13284 + gcc_assert (GET_CODE (unspec) == UNSPEC);
13285 + addend = XEXP (plus, 1);
13286 + gcc_assert (CONST_INT_P (addend));
13287 +
13288 + /* We now have all the pieces, rearrange them. */
13289 +
13290 + /* Move symbol to plus. */
13291 + XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
13292 +
13293 + /* Move plus inside unspec. */
13294 + XVECEXP (unspec, 0, 0) = plus;
13295 +
13296 + /* Move unspec to top level of const. */
13297 + XEXP (x, 0) = unspec;
13298 + }
13299 +
13300 + return -1;
13301 + }
13302 +
13303 + return 0;
13304 +}
13305 +
13306 +/* Prescan insn before outputing assembler for it. */
13307 +
13308 +void
13309 +m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
13310 + rtx *operands, int n_operands)
13311 +{
13312 + int i;
13313 +
13314 + /* Combine and, possibly, other optimizations may do good job
13315 + converting
13316 + (const (unspec [(symbol)]))
13317 + into
13318 + (const (plus (unspec [(symbol)])
13319 + (const_int N))).
13320 + The problem with this is emitting @TLS or @GOT decorations.
13321 + The decoration is emitted when processing (unspec), so the
13322 + result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
13323 +
13324 + It seems that the easiest solution to this is to convert such
13325 + operands to
13326 + (const (unspec [(plus (symbol)
13327 + (const_int N))])).
13328 + Note, that the top level of operand remains intact, so we don't have
13329 + to patch up anything outside of the operand. */
13330 +
13331 + for (i = 0; i < n_operands; ++i)
13332 + {
13333 + rtx op;
13334 +
13335 + op = operands[i];
13336 +
13337 + for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
13338 + }
13339 +}
13340 +
13341 +/* Move X to a register and add REG_EQUAL note pointing to ORIG.
13342 + If REG is non-null, use it; generate new pseudo otherwise. */
13343 +
13344 +static rtx
13345 +m68k_move_to_reg (rtx x, rtx orig, rtx reg)
13346 +{
13347 + rtx insn;
13348 +
13349 + if (reg == NULL_RTX)
13350 + {
13351 + gcc_assert (can_create_pseudo_p ());
13352 + reg = gen_reg_rtx (Pmode);
13353 + }
13354 +
13355 + insn = emit_move_insn (reg, x);
13356 + /* Put a REG_EQUAL note on this insn, so that it can be optimized
13357 + by loop. */
13358 + set_unique_reg_note (insn, REG_EQUAL, orig);
13359 +
13360 + return reg;
13361 +}
13362 +
13363 +/* Does the same as m68k_wrap_symbol, but returns a memory reference to
13364 + GOT slot. */
13365 +
13366 +static rtx
13367 +m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
13368 +{
13369 + x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
13370 +
13371 + x = gen_rtx_MEM (Pmode, x);
13372 + MEM_READONLY_P (x) = 1;
13373 +
13374 + return x;
13375 +}
13376 +
13377 /* Legitimize PIC addresses. If the address is already
13378 position-independent, we return ORIG. Newly generated
13379 position-independent addresses go to REG. If we need more
13380 @@ -2076,42 +2407,15 @@
13381 {
13382 gcc_assert (reg);
13383
13384 - if (TARGET_COLDFIRE && TARGET_XGOT)
13385 - /* When compiling with -mxgot switch the code for the above
13386 - example will look like this:
13387 -
13388 - movel a5, a0
13389 - addl _foo@GOT, a0
13390 - movel a0@, a0
13391 - movel #12345, a0@ */
13392 - {
13393 - rtx pic_offset;
13394 -
13395 - /* Wrap ORIG in UNSPEC_GOTOFF to tip m68k_output_addr_const_extra
13396 - to put @GOT after reference. */
13397 - pic_offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig),
13398 - UNSPEC_GOTOFF);
13399 - pic_offset = gen_rtx_CONST (Pmode, pic_offset);
13400 - emit_move_insn (reg, pic_offset);
13401 - emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
13402 - pic_ref = gen_rtx_MEM (Pmode, reg);
13403 - }
13404 - else
13405 - pic_ref = gen_rtx_MEM (Pmode,
13406 - gen_rtx_PLUS (Pmode,
13407 - pic_offset_table_rtx, orig));
13408 - crtl->uses_pic_offset_table = 1;
13409 - MEM_READONLY_P (pic_ref) = 1;
13410 - emit_move_insn (reg, pic_ref);
13411 - return reg;
13412 + pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
13413 + pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
13414 }
13415 else if (GET_CODE (orig) == CONST)
13416 {
13417 rtx base;
13418
13419 /* Make sure this has not already been legitimized. */
13420 - if (GET_CODE (XEXP (orig, 0)) == PLUS
13421 - && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
13422 + if (m68k_unwrap_symbol (orig, true) != orig)
13423 return orig;
13424
13425 gcc_assert (reg);
13426 @@ -2124,13 +2428,257 @@
13427 base == reg ? 0 : reg);
13428
13429 if (GET_CODE (orig) == CONST_INT)
13430 - return plus_constant (base, INTVAL (orig));
13431 - pic_ref = gen_rtx_PLUS (Pmode, base, orig);
13432 - /* Likewise, should we set special REG_NOTEs here? */
13433 + pic_ref = plus_constant (base, INTVAL (orig));
13434 + else
13435 + pic_ref = gen_rtx_PLUS (Pmode, base, orig);
13436 }
13437 +
13438 return pic_ref;
13439 }
13440
13441 +/* The __tls_get_addr symbol. */
13442 +static GTY(()) rtx m68k_tls_get_addr;
13443 +
13444 +/* Return SYMBOL_REF for __tls_get_addr. */
13445 +
13446 +static rtx
13447 +m68k_get_tls_get_addr (void)
13448 +{
13449 + if (m68k_tls_get_addr == NULL_RTX)
13450 + m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
13451 +
13452 + return m68k_tls_get_addr;
13453 +}
13454 +
13455 +/* Return libcall result in A0 instead of usual D0. */
13456 +static bool m68k_libcall_value_in_a0_p = false;
13457 +
13458 +/* Emit instruction sequence that calls __tls_get_addr. X is
13459 + the TLS symbol we are referencing and RELOC is the symbol type to use
13460 + (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
13461 + emitted. A pseudo register with result of __tls_get_addr call is
13462 + returned. */
13463 +
13464 +static rtx
13465 +m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
13466 +{
13467 + rtx a0;
13468 + rtx insns;
13469 + rtx dest;
13470 +
13471 + /* Emit the call sequence. */
13472 + start_sequence ();
13473 +
13474 + /* FIXME: Unfortunately, emit_library_call_value does not
13475 + consider (plus (%a5) (const (unspec))) to be a good enough
13476 + operand for push, so it forces it into a register. The bad
13477 + thing about this is that combiner, due to copy propagation and other
13478 + optimizations, sometimes can not later fix this. As a consequence,
13479 + additional register may be allocated resulting in a spill.
13480 + For reference, see args processing loops in
13481 + calls.c:emit_library_call_value_1.
13482 + For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
13483 + x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
13484 +
13485 + /* __tls_get_addr() is not a libcall, but emitting a libcall_value
13486 + is the simpliest way of generating a call. The difference between
13487 + __tls_get_addr() and libcall is that the result is returned in D0
13488 + instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
13489 + which temporarily switches returning the result to A0. */
13490 +
13491 + m68k_libcall_value_in_a0_p = true;
13492 + a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
13493 + Pmode, 1, x, Pmode);
13494 + m68k_libcall_value_in_a0_p = false;
13495 +
13496 + insns = get_insns ();
13497 + end_sequence ();
13498 +
13499 + gcc_assert (can_create_pseudo_p ());
13500 + dest = gen_reg_rtx (Pmode);
13501 + emit_libcall_block (insns, dest, a0, eqv);
13502 +
13503 + return dest;
13504 +}
13505 +
13506 +/* The __tls_get_addr symbol. */
13507 +static GTY(()) rtx m68k_read_tp;
13508 +
13509 +/* Return SYMBOL_REF for __m68k_read_tp. */
13510 +
13511 +static rtx
13512 +m68k_get_m68k_read_tp (void)
13513 +{
13514 + if (m68k_read_tp == NULL_RTX)
13515 + m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
13516 +
13517 + return m68k_read_tp;
13518 +}
13519 +
13520 +/* Emit instruction sequence that calls __m68k_read_tp.
13521 + A pseudo register with result of __m68k_read_tp call is returned. */
13522 +
13523 +static rtx
13524 +m68k_call_m68k_read_tp (void)
13525 +{
13526 + rtx a0;
13527 + rtx eqv;
13528 + rtx insns;
13529 + rtx dest;
13530 +
13531 + start_sequence ();
13532 +
13533 + /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
13534 + is the simpliest way of generating a call. The difference between
13535 + __m68k_read_tp() and libcall is that the result is returned in D0
13536 + instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
13537 + which temporarily switches returning the result to A0. */
13538 +
13539 + /* Emit the call sequence. */
13540 + m68k_libcall_value_in_a0_p = true;
13541 + a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
13542 + Pmode, 0);
13543 + m68k_libcall_value_in_a0_p = false;
13544 + insns = get_insns ();
13545 + end_sequence ();
13546 +
13547 + /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
13548 + share the m68k_read_tp result with other IE/LE model accesses. */
13549 + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
13550 +
13551 + gcc_assert (can_create_pseudo_p ());
13552 + dest = gen_reg_rtx (Pmode);
13553 + emit_libcall_block (insns, dest, a0, eqv);
13554 +
13555 + return dest;
13556 +}
13557 +
13558 +/* Return a legitimized address for accessing TLS SYMBOL_REF X.
13559 + For explanations on instructions sequences see TLS/NPTL ABI for m68k and
13560 + ColdFire. */
13561 +
13562 +rtx
13563 +m68k_legitimize_tls_address (rtx orig)
13564 +{
13565 + switch (SYMBOL_REF_TLS_MODEL (orig))
13566 + {
13567 + case TLS_MODEL_GLOBAL_DYNAMIC:
13568 + orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
13569 + break;
13570 +
13571 + case TLS_MODEL_LOCAL_DYNAMIC:
13572 + {
13573 + rtx eqv;
13574 + rtx a0;
13575 + rtx x;
13576 +
13577 + /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
13578 + share the LDM result with other LD model accesses. */
13579 + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
13580 + UNSPEC_RELOC32);
13581 +
13582 + a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
13583 +
13584 + x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
13585 +
13586 + if (can_create_pseudo_p ())
13587 + x = m68k_move_to_reg (x, orig, NULL_RTX);
13588 +
13589 + orig = x;
13590 + break;
13591 + }
13592 +
13593 + case TLS_MODEL_INITIAL_EXEC:
13594 + {
13595 + rtx a0;
13596 + rtx x;
13597 +
13598 + a0 = m68k_call_m68k_read_tp ();
13599 +
13600 + x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
13601 + x = gen_rtx_PLUS (Pmode, x, a0);
13602 +
13603 + if (can_create_pseudo_p ())
13604 + x = m68k_move_to_reg (x, orig, NULL_RTX);
13605 +
13606 + orig = x;
13607 + break;
13608 + }
13609 +
13610 + case TLS_MODEL_LOCAL_EXEC:
13611 + {
13612 + rtx a0;
13613 + rtx x;
13614 +
13615 + a0 = m68k_call_m68k_read_tp ();
13616 +
13617 + x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
13618 +
13619 + if (can_create_pseudo_p ())
13620 + x = m68k_move_to_reg (x, orig, NULL_RTX);
13621 +
13622 + orig = x;
13623 + break;
13624 + }
13625 +
13626 + default:
13627 + gcc_unreachable ();
13628 + }
13629 +
13630 + return orig;
13631 +}
13632 +
13633 +/* Return true if X is a TLS symbol. */
13634 +
13635 +static bool
13636 +m68k_tls_symbol_p (rtx x)
13637 +{
13638 + if (!TARGET_HAVE_TLS)
13639 + return false;
13640 +
13641 + if (GET_CODE (x) != SYMBOL_REF)
13642 + return false;
13643 +
13644 + return SYMBOL_REF_TLS_MODEL (x) != 0;
13645 +}
13646 +
13647 +/* Helper for m68k_tls_referenced_p. */
13648 +
13649 +static int
13650 +m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
13651 +{
13652 + /* Note: this is not the same as m68k_tls_symbol_p. */
13653 + if (GET_CODE (*x_ptr) == SYMBOL_REF)
13654 + return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
13655 +
13656 + /* Don't recurse into legitimate TLS references. */
13657 + if (m68k_tls_reference_p (*x_ptr, true))
13658 + return -1;
13659 +
13660 + return 0;
13661 +}
13662 +
13663 +/* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
13664 + though illegitimate one.
13665 + If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
13666 +
13667 +bool
13668 +m68k_tls_reference_p (rtx x, bool legitimate_p)
13669 +{
13670 + if (!TARGET_HAVE_TLS)
13671 + return false;
13672 +
13673 + if (!legitimate_p)
13674 + return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
13675 + else
13676 + {
13677 + enum m68k_reloc reloc = RELOC_GOT;
13678 +
13679 + return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
13680 + && TLS_RELOC_P (reloc));
13681 + }
13682 +}
13683 +
13684 \f
13685
13686 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
13687 @@ -3918,18 +4466,92 @@
13688 }
13689 }
13690
13691 +/* Return string for TLS relocation RELOC. */
13692 +
13693 +static const char *
13694 +m68k_get_reloc_decoration (enum m68k_reloc reloc)
13695 +{
13696 + /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
13697 + gcc_assert (MOTOROLA || reloc == RELOC_GOT);
13698 +
13699 + switch (reloc)
13700 + {
13701 + case RELOC_GOT:
13702 + if (MOTOROLA)
13703 + {
13704 + if (flag_pic == 1 && TARGET_68020)
13705 + return "@GOT.w";
13706 + else
13707 + return "@GOT";
13708 + }
13709 + else
13710 + {
13711 + if (TARGET_68020)
13712 + {
13713 + switch (flag_pic)
13714 + {
13715 + case 1:
13716 + return ":w";
13717 + case 2:
13718 + return ":l";
13719 + default:
13720 + return "";
13721 + }
13722 + }
13723 + }
13724 +
13725 + case RELOC_TLSGD:
13726 + return "@TLSGD";
13727 +
13728 + case RELOC_TLSLDM:
13729 + return "@TLSLDM";
13730 +
13731 + case RELOC_TLSLDO:
13732 + return "@TLSLDO";
13733 +
13734 + case RELOC_TLSIE:
13735 + return "@TLSIE";
13736 +
13737 + case RELOC_TLSLE:
13738 + return "@TLSLE";
13739 +
13740 + default:
13741 + gcc_unreachable ();
13742 + }
13743 +}
13744 +
13745 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */
13746
13747 bool
13748 m68k_output_addr_const_extra (FILE *file, rtx x)
13749 {
13750 - if (GET_CODE (x) != UNSPEC || XINT (x, 1) != UNSPEC_GOTOFF)
13751 - return false;
13752 + if (GET_CODE (x) == UNSPEC)
13753 + {
13754 + switch (XINT (x, 1))
13755 + {
13756 + case UNSPEC_RELOC16:
13757 + case UNSPEC_RELOC32:
13758 + output_addr_const (file, XVECEXP (x, 0, 0));
13759 + fputs (m68k_get_reloc_decoration (INTVAL (XVECEXP (x, 0, 1))), file);
13760 + return true;
13761
13762 - output_addr_const (file, XVECEXP (x, 0, 0));
13763 - /* ??? What is the non-MOTOROLA syntax? */
13764 - fputs ("@GOT", file);
13765 - return true;
13766 + default:
13767 + break;
13768 + }
13769 + }
13770 +
13771 + return false;
13772 +}
13773 +
13774 +/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
13775 +
13776 +static void
13777 +m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
13778 +{
13779 + gcc_assert (size == 4);
13780 + fputs ("\t.long\t", file);
13781 + output_addr_const (file, x);
13782 + fputs ("@TLSLDO+0x8000", file);
13783 }
13784
13785 \f
13786 @@ -4019,15 +4641,8 @@
13787 else
13788 {
13789 if (address.offset)
13790 - {
13791 - output_addr_const (file, address.offset);
13792 - if (flag_pic && address.base == pic_offset_table_rtx)
13793 - {
13794 - fprintf (file, "@GOT");
13795 - if (flag_pic == 1 && TARGET_68020)
13796 - fprintf (file, ".w");
13797 - }
13798 - }
13799 + output_addr_const (file, address.offset);
13800 +
13801 putc ('(', file);
13802 if (address.base)
13803 fputs (M68K_REGNAME (REGNO (address.base)), file);
13804 @@ -4060,19 +4675,7 @@
13805 fputs (M68K_REGNAME (REGNO (address.base)), file);
13806 fprintf (file, "@(");
13807 if (address.offset)
13808 - {
13809 - output_addr_const (file, address.offset);
13810 - if (address.base == pic_offset_table_rtx && TARGET_68020)
13811 - switch (flag_pic)
13812 - {
13813 - case 1:
13814 - fprintf (file, ":w"); break;
13815 - case 2:
13816 - fprintf (file, ":l"); break;
13817 - default:
13818 - break;
13819 - }
13820 - }
13821 + output_addr_const (file, address.offset);
13822 }
13823 /* Print the ",index" component, if any. */
13824 if (address.index)
13825 @@ -4580,7 +5183,8 @@
13826 default:
13827 break;
13828 }
13829 - return gen_rtx_REG (mode, D0_REG);
13830 +
13831 + return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
13832 }
13833
13834 rtx
13835 @@ -4846,9 +5450,8 @@
13836 return OP_TYPE_IMM_L;
13837
13838 default:
13839 - if (GET_CODE (op) == SYMBOL_REF)
13840 - /* ??? Just a guess. Probably we can guess better using length
13841 - attribute of the instructions. */
13842 + if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
13843 + /* Just a guess. */
13844 return OP_TYPE_IMM_W;
13845
13846 return OP_TYPE_IMM_L;
13847 @@ -5793,3 +6396,5 @@
13848 return 0;
13849 }
13850 }
13851 +
13852 +#include "gt-m68k.h"
13853 --- a/gcc/config/m68k/m68k-devices.def
13854 +++ b/gcc/config/m68k/m68k-devices.def
13855 @@ -72,8 +72,8 @@
13856 /* 680x0 series processors. */
13857 M68K_DEVICE ("68000", m68000, "68000", "68000", 68000, isa_00, 0)
13858 M68K_DEVICE ("68010", m68010, "68010", "68000", 68010, isa_10, 0)
13859 -M68K_DEVICE ("68020", m68020, "68020", "68020", 68020, isa_20, FL_MMU)
13860 -M68K_DEVICE ("68030", m68030, "68030", "68020", 68030, isa_20, FL_MMU)
13861 +M68K_DEVICE ("68020", m68020, "68020", "68020", 68020, isa_20, FL_MMU | FL_UCLINUX)
13862 +M68K_DEVICE ("68030", m68030, "68030", "68020", 68030, isa_20, FL_MMU | FL_UCLINUX)
13863 M68K_DEVICE ("68040", m68040, "68040", "68040", 68040, isa_40, FL_MMU)
13864 M68K_DEVICE ("68060", m68060, "68060", "68060", 68060, isa_40, FL_MMU)
13865 M68K_DEVICE ("68302", m68302, "68302", "68000", 68000, isa_00, FL_MMU)
13866 @@ -81,7 +81,13 @@
13867 M68K_DEVICE ("cpu32", cpu32, "cpu32", "cpu32", cpu32, isa_cpu32, FL_MMU)
13868
13869 /* ColdFire CFV1 processor. */
13870 -M68K_DEVICE ("51qe", mcf51qe, "51qe", "51qe", cfv1, isa_c, FL_CF_USP)
13871 +/* For historical reasons, the 51 multilib is named 51qe. */
13872 +M68K_DEVICE ("51", mcf51, "51", "51qe", cfv1, isa_c, FL_CF_USP)
13873 +M68K_DEVICE ("51ac", mcf51ac, "51", "51qe", cfv1, isa_c, FL_CF_USP)
13874 +M68K_DEVICE ("51cn", mcf51cn, "51", "51qe", cfv1, isa_c, FL_CF_USP)
13875 +M68K_DEVICE ("51em", mcf51em, "51", "51qe", cfv1, isa_c, FL_CF_USP | FL_CF_MAC)
13876 +M68K_DEVICE ("51jm", mcf51jm, "51", "51qe", cfv1, isa_c, FL_CF_USP)
13877 +M68K_DEVICE ("51qe", mcf51qe, "51", "51qe", cfv1, isa_c, FL_CF_USP)
13878
13879 /* ColdFire CFV2 processors. */
13880 M68K_DEVICE ("5202", mcf5202, "5206", "5206", cfv2, isa_a, 0)
13881 @@ -97,6 +103,7 @@
13882 M68K_DEVICE ("5213", mcf5213, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13883 M68K_DEVICE ("5214", mcf5214, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13884 M68K_DEVICE ("5216", mcf5216, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13885 +M68K_DEVICE ("5221x", mcf5221x, "5221x", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13886 M68K_DEVICE ("52221", mcf52221, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13887 M68K_DEVICE ("52223", mcf52223, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13888 M68K_DEVICE ("52230", mcf52230, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13889 @@ -107,6 +114,14 @@
13890 M68K_DEVICE ("52235", mcf52235, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13891 M68K_DEVICE ("5224", mcf5224, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13892 M68K_DEVICE ("5225", mcf5225, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC)
13893 +M68K_DEVICE ("52252", mcf52252, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13894 +M68K_DEVICE ("52254", mcf52254, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13895 +M68K_DEVICE ("52255", mcf52255, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13896 +M68K_DEVICE ("52256", mcf52256, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13897 +M68K_DEVICE ("52258", mcf52258, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13898 +M68K_DEVICE ("52259", mcf52259, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13899 +M68K_DEVICE ("52274", mcf52274, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13900 +M68K_DEVICE ("52277", mcf52277, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13901 M68K_DEVICE ("5232", mcf5232, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13902 M68K_DEVICE ("5233", mcf5233, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13903 M68K_DEVICE ("5234", mcf5234, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13904 @@ -126,6 +141,13 @@
13905 M68K_DEVICE ("528x", mcf528x, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13906
13907 /* CFV3 processors. */
13908 +M68K_DEVICE ("53011", mcf53011, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13909 +M68K_DEVICE ("53012", mcf53012, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13910 +M68K_DEVICE ("53013", mcf53013, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13911 +M68K_DEVICE ("53014", mcf53014, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13912 +M68K_DEVICE ("53015", mcf53015, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13913 +M68K_DEVICE ("53016", mcf53016, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13914 +M68K_DEVICE ("53017", mcf53017, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13915 M68K_DEVICE ("5307", mcf5307, "5307", "5307", cfv3, isa_a, FL_CF_HWDIV | FL_CF_MAC)
13916 M68K_DEVICE ("5327", mcf5327, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13917 M68K_DEVICE ("5328", mcf5328, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC)
13918 @@ -137,12 +159,17 @@
13919
13920 /* CFV4/CFV4e processors. */
13921 M68K_DEVICE ("5407", mcf5407, "5407", "5407", cfv4, isa_b, FL_CF_MAC)
13922 -M68K_DEVICE ("54450", mcf54450, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13923 -M68K_DEVICE ("54451", mcf54451, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13924 -M68K_DEVICE ("54452", mcf54452, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13925 -M68K_DEVICE ("54453", mcf54453, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13926 -M68K_DEVICE ("54454", mcf54454, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13927 -M68K_DEVICE ("54455", mcf54455, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU)
13928 +M68K_DEVICE ("54410", mcf54410, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13929 +M68K_DEVICE ("54415", mcf54415, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13930 +M68K_DEVICE ("54416", mcf54416, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13931 +M68K_DEVICE ("54417", mcf54417, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13932 +M68K_DEVICE ("54418", mcf54418, "54418", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13933 +M68K_DEVICE ("54450", mcf54450, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13934 +M68K_DEVICE ("54451", mcf54451, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13935 +M68K_DEVICE ("54452", mcf54452, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13936 +M68K_DEVICE ("54453", mcf54453, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13937 +M68K_DEVICE ("54454", mcf54454, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13938 +M68K_DEVICE ("54455", mcf54455, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX)
13939 M68K_DEVICE ("5470", mcf5470, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
13940 M68K_DEVICE ("5471", mcf5471, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
13941 M68K_DEVICE ("5472", mcf5472, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU)
13942 --- a/gcc/config/m68k/m68k.h
13943 +++ b/gcc/config/m68k/m68k.h
13944 @@ -232,6 +232,7 @@
13945 #define FL_ISA_C (1 << 16)
13946 #define FL_FIDOA (1 << 17)
13947 #define FL_MMU 0 /* Used by multilib machinery. */
13948 +#define FL_UCLINUX 0 /* Used by multilib machinery. */
13949
13950 #define TARGET_68010 ((m68k_cpu_flags & FL_ISA_68010) != 0)
13951 #define TARGET_68020 ((m68k_cpu_flags & FL_ISA_68020) != 0)
13952 @@ -501,7 +502,8 @@
13953
13954 extern enum reg_class regno_reg_class[];
13955 #define REGNO_REG_CLASS(REGNO) (regno_reg_class[(REGNO)])
13956 -#define INDEX_REG_CLASS GENERAL_REGS
13957 +#define MODE_INDEX_REG_CLASS(MODE) \
13958 + (MODE_OK_FOR_INDEX_P (MODE) ? GENERAL_REGS : NO_REGS)
13959 #define BASE_REG_CLASS ADDR_REGS
13960
13961 #define PREFERRED_RELOAD_CLASS(X,CLASS) \
13962 @@ -644,7 +646,7 @@
13963 (though the operand list is empty). */
13964 #define TRANSFER_FROM_TRAMPOLINE \
13965 void \
13966 -__transfer_from_trampoline () \
13967 +__transfer_from_trampoline (void) \
13968 { \
13969 register char *a0 asm (M68K_STATIC_CHAIN_REG_NAME); \
13970 asm (GLOBAL_ASM_OP "___trampoline"); \
13971 @@ -675,6 +677,10 @@
13972 #define HAVE_POST_INCREMENT 1
13973 #define HAVE_PRE_DECREMENT 1
13974
13975 +/* Return true if addresses of mode MODE can have an index register. */
13976 +#define MODE_OK_FOR_INDEX_P(MODE) \
13977 + (!TARGET_COLDFIRE_FPU || GET_MODE_CLASS (MODE) != MODE_FLOAT)
13978 +
13979 /* Macros to check register numbers against specific register classes. */
13980
13981 /* True for data registers, D0 through D7. */
13982 @@ -689,9 +695,10 @@
13983 /* True for floating point registers, FP0 through FP7. */
13984 #define FP_REGNO_P(REGNO) IN_RANGE (REGNO, 16, 23)
13985
13986 -#define REGNO_OK_FOR_INDEX_P(REGNO) \
13987 - (INT_REGNO_P (REGNO) \
13988 - || INT_REGNO_P (reg_renumber[REGNO]))
13989 +#define REGNO_MODE_OK_FOR_INDEX_P(REGNO, MODE) \
13990 + (MODE_OK_FOR_INDEX_P (MODE) \
13991 + && (INT_REGNO_P (REGNO) \
13992 + || INT_REGNO_P (reg_renumber[REGNO])))
13993
13994 #define REGNO_OK_FOR_BASE_P(REGNO) \
13995 (ADDRESS_REGNO_P (REGNO) \
13996 @@ -751,13 +758,14 @@
13997
13998 #define LEGITIMATE_PIC_OPERAND_P(X) \
13999 (!symbolic_operand (X, VOIDmode) \
14000 - || (TARGET_PCREL && REG_STRICT_P))
14001 + || (TARGET_PCREL && REG_STRICT_P) \
14002 + || m68k_tls_reference_p (X, true))
14003
14004 #define REG_OK_FOR_BASE_P(X) \
14005 m68k_legitimate_base_reg_p (X, REG_STRICT_P)
14006
14007 -#define REG_OK_FOR_INDEX_P(X) \
14008 - m68k_legitimate_index_reg_p (X, REG_STRICT_P)
14009 +#define REG_MODE_OK_FOR_INDEX_P(X, MODE) \
14010 + m68k_legitimate_index_reg_p (MODE, X, REG_STRICT_P)
14011
14012 #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
14013 do \
14014 @@ -770,52 +778,19 @@
14015 /* This address is OK as it stands. */
14016 #define PIC_CASE_VECTOR_ADDRESS(index) index
14017 \f
14018 -/* For the 68000, we handle X+REG by loading X into a register R and
14019 - using R+REG. R will go in an address reg and indexing will be used.
14020 - However, if REG is a broken-out memory address or multiplication,
14021 - nothing needs to be done because REG can certainly go in an address reg. */
14022 -#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
14023 -#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
14024 -{ register int ch = (X) != (OLDX); \
14025 - if (GET_CODE (X) == PLUS) \
14026 - { int copied = 0; \
14027 - if (GET_CODE (XEXP (X, 0)) == MULT) \
14028 - { COPY_ONCE (X); XEXP (X, 0) = force_operand (XEXP (X, 0), 0);} \
14029 - if (GET_CODE (XEXP (X, 1)) == MULT) \
14030 - { COPY_ONCE (X); XEXP (X, 1) = force_operand (XEXP (X, 1), 0);} \
14031 - if (ch && GET_CODE (XEXP (X, 1)) == REG \
14032 - && GET_CODE (XEXP (X, 0)) == REG) \
14033 - { if (TARGET_COLDFIRE_FPU \
14034 - && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
14035 - { COPY_ONCE (X); X = force_operand (X, 0);} \
14036 - goto WIN; } \
14037 - if (ch) { GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); } \
14038 - if (GET_CODE (XEXP (X, 0)) == REG \
14039 - || (GET_CODE (XEXP (X, 0)) == SIGN_EXTEND \
14040 - && GET_CODE (XEXP (XEXP (X, 0), 0)) == REG \
14041 - && GET_MODE (XEXP (XEXP (X, 0), 0)) == HImode)) \
14042 - { register rtx temp = gen_reg_rtx (Pmode); \
14043 - register rtx val = force_operand (XEXP (X, 1), 0); \
14044 - emit_move_insn (temp, val); \
14045 - COPY_ONCE (X); \
14046 - XEXP (X, 1) = temp; \
14047 - if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \
14048 - && GET_CODE (XEXP (X, 0)) == REG) \
14049 - X = force_operand (X, 0); \
14050 - goto WIN; } \
14051 - else if (GET_CODE (XEXP (X, 1)) == REG \
14052 - || (GET_CODE (XEXP (X, 1)) == SIGN_EXTEND \
14053 - && GET_CODE (XEXP (XEXP (X, 1), 0)) == REG \
14054 - && GET_MODE (XEXP (XEXP (X, 1), 0)) == HImode)) \
14055 - { register rtx temp = gen_reg_rtx (Pmode); \
14056 - register rtx val = force_operand (XEXP (X, 0), 0); \
14057 - emit_move_insn (temp, val); \
14058 - COPY_ONCE (X); \
14059 - XEXP (X, 0) = temp; \
14060 - if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \
14061 - && GET_CODE (XEXP (X, 1)) == REG) \
14062 - X = force_operand (X, 0); \
14063 - goto WIN; }}}
14064 +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
14065 +do { \
14066 + rtx __x; \
14067 + \
14068 + __x = m68k_legitimize_address (X, OLDX, MODE); \
14069 + if (__x != NULL_RTX) \
14070 + { \
14071 + X = __x; \
14072 + \
14073 + if (memory_address_p (MODE, X)) \
14074 + goto WIN; \
14075 + } \
14076 +} while (0)
14077
14078 /* On the 68000, only predecrement and postincrement address depend thus
14079 (the amount of decrement or increment being the length of the operand).
14080 @@ -1028,6 +1003,9 @@
14081 assemble_name ((FILE), (NAME)), \
14082 fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
14083
14084 +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
14085 + m68k_final_prescan_insn (INSN, OPVEC, NOPERANDS)
14086 +
14087 /* On the 68000, we use several CODE characters:
14088 '.' for dot needed in Motorola-style opcode names.
14089 '-' for an operand pushing on the stack:
14090 --- a/gcc/config/m68k/m68k.md
14091 +++ b/gcc/config/m68k/m68k.md
14092 @@ -116,7 +116,8 @@
14093 (UNSPEC_GOT 3)
14094 (UNSPEC_IB 4)
14095 (UNSPEC_TIE 5)
14096 - (UNSPEC_GOTOFF 6)
14097 + (UNSPEC_RELOC16 6)
14098 + (UNSPEC_RELOC32 7)
14099 ])
14100
14101 ;; UNSPEC_VOLATILE usage:
14102 @@ -414,7 +415,7 @@
14103
14104 (define_insn "tst<mode>_cf"
14105 [(set (cc0)
14106 - (match_operand:FP 0 "general_operand" "f<FP:dreg><Q>U"))]
14107 + (match_operand:FP 0 "general_operand" "f<FP:dreg>m"))]
14108 "TARGET_COLDFIRE_FPU"
14109 {
14110 cc_status.flags = CC_IN_68881;
14111 @@ -570,8 +571,8 @@
14112
14113 (define_insn "*cmp<mode>_cf"
14114 [(set (cc0)
14115 - (compare (match_operand:FP 0 "fp_src_operand" "f,f,<FP:dreg><Q>U")
14116 - (match_operand:FP 1 "fp_src_operand" "f,<FP:dreg><Q>U,f")))]
14117 + (compare (match_operand:FP 0 "fp_src_operand" "f,f,<FP:dreg>m")
14118 + (match_operand:FP 1 "fp_src_operand" "f,<FP:dreg>m,f")))]
14119 "TARGET_COLDFIRE_FPU
14120 && (register_operand (operands[0], <MODE>mode)
14121 || register_operand (operands[1], <MODE>mode))"
14122 @@ -779,7 +780,41 @@
14123 {
14124 rtx tmp, base, offset;
14125
14126 - if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode))
14127 + /* Recognize the case where operand[1] is a reference to thread-local
14128 + data and load its address to a register. */
14129 + if (!TARGET_PCREL && m68k_tls_reference_p (operands[1], false))
14130 + {
14131 + rtx tmp = operands[1];
14132 + rtx addend = NULL;
14133 +
14134 + if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
14135 + {
14136 + addend = XEXP (XEXP (tmp, 0), 1);
14137 + tmp = XEXP (XEXP (tmp, 0), 0);
14138 + }
14139 +
14140 + gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
14141 + gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0);
14142 +
14143 + tmp = m68k_legitimize_tls_address (tmp);
14144 +
14145 + if (addend)
14146 + {
14147 + if (!REG_P (tmp))
14148 + {
14149 + rtx reg;
14150 +
14151 + reg = gen_reg_rtx (Pmode);
14152 + emit_move_insn (reg, tmp);
14153 + tmp = reg;
14154 + }
14155 +
14156 + tmp = gen_rtx_PLUS (SImode, tmp, addend);
14157 + }
14158 +
14159 + operands[1] = tmp;
14160 + }
14161 + else if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode))
14162 {
14163 /* The source is an address which requires PIC relocation.
14164 Call legitimize_pic_address with the source, mode, and a relocation
14165 @@ -1070,10 +1105,8 @@
14166 ;; SFmode MEMs are restricted to modes 2-4 if TARGET_COLDFIRE_FPU.
14167 ;; The move instructions can handle all combinations.
14168 (define_insn "movsf_cf_hard"
14169 - [(set (match_operand:SF 0 "nonimmediate_operand" "=r<Q>U, f, f,mr,f,r<Q>,f
14170 -,m")
14171 - (match_operand:SF 1 "general_operand" " f, r<Q>U,f,rm,F,F, m
14172 -,f"))]
14173 + [(set (match_operand:SF 0 "nonimmediate_operand" "=rm,f, f,rm,f,r<Q>,f,m")
14174 + (match_operand:SF 1 "general_operand" " f, rm,f,rm,F,F, m,f"))]
14175 "TARGET_COLDFIRE_FPU"
14176 {
14177 if (which_alternative == 4 || which_alternative == 5) {
14178 @@ -1215,8 +1248,8 @@
14179 })
14180
14181 (define_insn "movdf_cf_hard"
14182 - [(set (match_operand:DF 0 "nonimmediate_operand" "=f, <Q>U,r,f,r,r,m,f")
14183 - (match_operand:DF 1 "general_operand" " f<Q>U,f, f,r,r,m,r,E"))]
14184 + [(set (match_operand:DF 0 "nonimmediate_operand" "=f, m,r,f,r,r,m,f")
14185 + (match_operand:DF 1 "general_operand" " fm,f,f,r,r,m,r,E"))]
14186 "TARGET_COLDFIRE_FPU"
14187 {
14188 rtx xoperands[3];
14189 @@ -1857,7 +1890,7 @@
14190 (define_insn "extendsfdf2_cf"
14191 [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f")
14192 (float_extend:DF
14193 - (match_operand:SF 1 "general_operand" "f,<Q>U")))]
14194 + (match_operand:SF 1 "general_operand" "f,m")))]
14195 "TARGET_COLDFIRE_FPU"
14196 {
14197 if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
14198 @@ -1897,9 +1930,9 @@
14199 })
14200
14201 (define_insn "truncdfsf2_cf"
14202 - [(set (match_operand:SF 0 "nonimmediate_operand" "=f,d<Q>U")
14203 + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,dm")
14204 (float_truncate:SF
14205 - (match_operand:DF 1 "general_operand" "<Q>U,f")))]
14206 + (match_operand:DF 1 "general_operand" "m,f")))]
14207 "TARGET_COLDFIRE_FPU"
14208 "@
14209 fsmove%.d %1,%0
14210 @@ -2045,7 +2078,7 @@
14211
14212 (define_insn "ftrunc<mode>2_cf"
14213 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14214 - (fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
14215 + (fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
14216 "TARGET_COLDFIRE_FPU"
14217 {
14218 if (FP_REG_P (operands[1]))
14219 @@ -2338,9 +2371,9 @@
14220 "* return output_addsi3 (operands);")
14221
14222 (define_insn_and_split "*addsi3_5200"
14223 - [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,a,m,r, ?a, ?a,?a,?a")
14224 - (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0,0,0, a, a, r, a")
14225 - (match_operand:SI 2 "general_src_operand" " I, L, J,d,mrKi,Cj, r, a, J")))]
14226 + [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,a, m,r, ?a, ?a,?a,?a")
14227 + (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0, 0,0, a, a, r, a")
14228 + (match_operand:SI 2 "general_src_operand" " I, L, JCu,d,mrKi,Cj, r, a, JCu")))]
14229 "TARGET_COLDFIRE"
14230 {
14231 switch (which_alternative)
14232 @@ -2382,9 +2415,9 @@
14233 (plus:SI (match_dup 0)
14234 (match_dup 1)))]
14235 ""
14236 - [(set_attr "type" "aluq_l,aluq_l,lea,alu_l,alu_l,*,lea,lea,lea")
14237 - (set_attr "opy" "2,2,*,2,2,*,*,*,*")
14238 - (set_attr "opy_type" "*,*,mem5,*,*,*,mem6,mem6,mem5")])
14239 + [(set_attr "type" "aluq_l,aluq_l,lea, alu_l,alu_l,*,lea, lea, lea")
14240 + (set_attr "opy" "2, 2, *, 2, 2, *,*, *, *")
14241 + (set_attr "opy_type" "*, *, mem5,*, *, *,mem6,mem6,mem5")])
14242
14243 (define_insn ""
14244 [(set (match_operand:SI 0 "nonimmediate_operand" "=a")
14245 @@ -2666,7 +2699,7 @@
14246 (define_insn "add<mode>3_cf"
14247 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14248 (plus:FP (match_operand:FP 1 "general_operand" "%0")
14249 - (match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
14250 + (match_operand:FP 2 "general_operand" "f<FP:dreg>m")))]
14251 "TARGET_COLDFIRE_FPU"
14252 {
14253 if (FP_REG_P (operands[2]))
14254 @@ -2889,7 +2922,7 @@
14255 (define_insn "sub<mode>3_cf"
14256 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14257 (minus:FP (match_operand:FP 1 "general_operand" "0")
14258 - (match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
14259 + (match_operand:FP 2 "general_operand" "f<FP:dreg>m")))]
14260 "TARGET_COLDFIRE_FPU"
14261 {
14262 if (FP_REG_P (operands[2]))
14263 @@ -3245,7 +3278,7 @@
14264 (define_insn "fmul<mode>3_cf"
14265 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14266 (mult:FP (match_operand:FP 1 "general_operand" "%0")
14267 - (match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
14268 + (match_operand:FP 2 "general_operand" "fm<FP:dreg>")))]
14269 "TARGET_COLDFIRE_FPU"
14270 {
14271 if (FP_REG_P (operands[2]))
14272 @@ -3315,7 +3348,7 @@
14273 (define_insn "div<mode>3_cf"
14274 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14275 (div:FP (match_operand:FP 1 "general_operand" "0")
14276 - (match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
14277 + (match_operand:FP 2 "general_operand" "fm<FP:dreg>")))]
14278 "TARGET_COLDFIRE_FPU"
14279 {
14280 if (FP_REG_P (operands[2]))
14281 @@ -4163,7 +4196,7 @@
14282
14283 (define_insn "neg<mode>2_cf"
14284 [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
14285 - (neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
14286 + (neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m,0")))]
14287 "TARGET_COLDFIRE_FPU"
14288 {
14289 if (DATA_REG_P (operands[0]))
14290 @@ -4197,7 +4230,7 @@
14291
14292 (define_insn "sqrt<mode>2_cf"
14293 [(set (match_operand:FP 0 "nonimmediate_operand" "=f")
14294 - (sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
14295 + (sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
14296 "TARGET_COLDFIRE_FPU"
14297 {
14298 if (FP_REG_P (operands[1]))
14299 @@ -4316,7 +4349,7 @@
14300
14301 (define_insn "abs<mode>2_cf"
14302 [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
14303 - (abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
14304 + (abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m,0")))]
14305 "TARGET_COLDFIRE_FPU"
14306 {
14307 if (DATA_REG_P (operands[0]))
14308 --- a/gcc/config/m68k/m68k.opt
14309 +++ b/gcc/config/m68k/m68k.opt
14310 @@ -182,3 +182,7 @@
14311 mxgot
14312 Target Report Mask(XGOT)
14313 Support more than 8192 GOT entries on ColdFire
14314 +
14315 +mxtls
14316 +Target Report Mask(XTLS)
14317 +Support TLS segment larger than 64K
14318 --- a/gcc/config/m68k/m68k-protos.h
14319 +++ b/gcc/config/m68k/m68k-protos.h
14320 @@ -54,19 +54,27 @@
14321 extern bool m68k_output_addr_const_extra (FILE *, rtx);
14322 extern void notice_update_cc (rtx, rtx);
14323 extern bool m68k_legitimate_base_reg_p (rtx, bool);
14324 -extern bool m68k_legitimate_index_reg_p (rtx, bool);
14325 +extern bool m68k_legitimate_index_reg_p (enum machine_mode, rtx, bool);
14326 extern bool m68k_illegitimate_symbolic_constant_p (rtx);
14327 extern bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
14328 extern bool m68k_matches_q_p (rtx);
14329 extern bool m68k_matches_u_p (rtx);
14330 extern rtx legitimize_pic_address (rtx, enum machine_mode, rtx);
14331 +extern rtx m68k_legitimize_tls_address (rtx);
14332 +extern bool m68k_tls_reference_p (rtx, bool);
14333 +extern rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
14334 extern int valid_dbcc_comparison_p_2 (rtx, enum machine_mode);
14335 extern rtx m68k_libcall_value (enum machine_mode);
14336 extern rtx m68k_function_value (const_tree, const_tree);
14337 extern int emit_move_sequence (rtx *, enum machine_mode, rtx);
14338 extern bool m68k_movem_pattern_p (rtx, rtx, HOST_WIDE_INT, bool);
14339 extern const char *m68k_output_movem (rtx *, rtx, HOST_WIDE_INT, bool);
14340 +extern void m68k_final_prescan_insn (rtx, rtx *, int);
14341
14342 +/* Functions from m68k.c used in constraints.md. */
14343 +extern rtx m68k_unwrap_symbol (rtx, bool);
14344 +
14345 +/* Functions from m68k.c used in genattrtab. */
14346 #ifdef HAVE_ATTR_cpu
14347 extern enum attr_cpu m68k_sched_cpu;
14348 extern enum attr_mac m68k_sched_mac;
14349 --- a/gcc/config/m68k/predicates.md
14350 +++ b/gcc/config/m68k/predicates.md
14351 @@ -130,7 +130,9 @@
14352 (match_code "sign_extend,zero_extend"))
14353
14354 ;; Returns true if OP is either a symbol reference or a sum of a
14355 -;; symbol reference and a constant.
14356 +;; symbol reference and a constant. This predicate is for "raw"
14357 +;; symbol references not yet processed by legitimize*_address,
14358 +;; hence we do not handle UNSPEC_{XGOT, TLS, XTLS} here.
14359
14360 (define_predicate "symbolic_operand"
14361 (match_code "symbol_ref,label_ref,const")
14362 --- a/gcc/config/m68k/t-uclinux
14363 +++ b/gcc/config/m68k/t-uclinux
14364 @@ -1,8 +1,8 @@
14365 # crti and crtn are provided by uClibc.
14366 EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o
14367
14368 -# Only include multilibs for the 68020 and for CPUs without an MMU.
14369 -M68K_MLIB_CPU += && (MLIB == "68020" || !match(FLAGS, "FL_MMU"))
14370 +# Include multilibs for CPUs without an MMU or with FL_UCLINUX
14371 +M68K_MLIB_CPU += && (!match(FLAGS, "FL_MMU") || match(FLAGS, "FL_UCLINUX"))
14372
14373 # Add multilibs for execute-in-place and shared-library code.
14374 M68K_MLIB_OPTIONS += msep-data/mid-shared-library
14375 --- a/gcc/config/mips/74k.md
14376 +++ b/gcc/config/mips/74k.md
14377 @@ -118,8 +118,7 @@
14378 ;; stores
14379 (define_insn_reservation "r74k_int_store" 1
14380 (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14381 - (and (eq_attr "type" "store")
14382 - (eq_attr "mode" "!unknown")))
14383 + (eq_attr "type" "store"))
14384 "r74k_agen")
14385
14386
14387 @@ -145,33 +144,123 @@
14388 ;; load->load base: 4 cycles
14389 ;; load->store base: 4 cycles
14390 (define_bypass 4 "r74k_int_load" "r74k_int_load")
14391 -(define_bypass 4 "r74k_int_load" "r74k_int_store" "!store_data_bypass_p")
14392 +(define_bypass 4 "r74k_int_load" "r74k_int_store" "!mips_store_data_bypass_p")
14393
14394 ;; logical/move/slt/signext->next use : 1 cycles (Default)
14395 ;; logical/move/slt/signext->load base: 2 cycles
14396 ;; logical/move/slt/signext->store base: 2 cycles
14397 (define_bypass 2 "r74k_int_logical" "r74k_int_load")
14398 -(define_bypass 2 "r74k_int_logical" "r74k_int_store" "!store_data_bypass_p")
14399 +(define_bypass 2 "r74k_int_logical" "r74k_int_store"
14400 + "!mips_store_data_bypass_p")
14401
14402 ;; arith->next use : 2 cycles (Default)
14403 ;; arith->load base: 3 cycles
14404 ;; arith->store base: 3 cycles
14405 (define_bypass 3 "r74k_int_arith" "r74k_int_load")
14406 -(define_bypass 3 "r74k_int_arith" "r74k_int_store" "!store_data_bypass_p")
14407 +(define_bypass 3 "r74k_int_arith" "r74k_int_store" "!mips_store_data_bypass_p")
14408
14409 ;; cmove->next use : 4 cycles (Default)
14410 ;; cmove->load base: 5 cycles
14411 ;; cmove->store base: 5 cycles
14412 (define_bypass 5 "r74k_int_cmove" "r74k_int_load")
14413 -(define_bypass 5 "r74k_int_cmove" "r74k_int_store" "!store_data_bypass_p")
14414 +(define_bypass 5 "r74k_int_cmove" "r74k_int_store"
14415 + "!mips_store_data_bypass_p")
14416
14417 ;; mult/madd/msub->int_mfhilo : 4 cycles (default)
14418 ;; mult->madd/msub : 1 cycles
14419 ;; madd/msub->madd/msub : 1 cycles
14420 -(define_bypass 1 "r74k_int_mult,r74k_int_mul3" "r74k_int_madd"
14421 - "mips_linked_madd_p")
14422 -(define_bypass 1 "r74k_int_madd" "r74k_int_madd"
14423 - "mips_linked_madd_p")
14424 +(define_bypass 1 "r74k_int_mult" "r74k_int_madd")
14425 +(define_bypass 1 "r74k_int_madd" "r74k_int_madd")
14426 +
14427 +(define_bypass 1 "r74k_int_mul3" "r74k_int_madd"
14428 + "mips_mult_madd_chain_bypass_p")
14429 +
14430 +
14431 +;; --------------------------------------------------------------
14432 +;; DSP instructins
14433 +;; --------------------------------------------------------------
14434 +
14435 +;; Non-saturating insn have the same latency as normal ALU operations,
14436 +(define_insn_reservation "r74k_dsp_alu" 2
14437 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14438 + (eq_attr "type" "dspalu"))
14439 + "r74k_alu")
14440 +
14441 +;; Saturating insn takes an extra cycle.
14442 +(define_insn_reservation "r74k_dsp_alu_sat" 3
14443 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14444 + (eq_attr "type" "dspalusat"))
14445 + "r74k_alu")
14446 +
14447 +;; dpaq_s, dpau, dpsq_s, dpsu, maq_s, mulsaq
14448 +;; - delivers result to hi/lo in 6 cycle (bypass at M4)
14449 +(define_insn_reservation "r74k_dsp_mac" 6
14450 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14451 + (eq_attr "type" "dspmac"))
14452 + "r74k_alu+r74k_mul")
14453 +
14454 +;; dpaq_sa, dpsq_sa, maq_sa
14455 +;; - delivers result to hi/lo in 7 cycle (bypass at WB)
14456 +(define_insn_reservation "r74k_dsp_mac_sat" 7
14457 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14458 + (eq_attr "type" "dspmacsat"))
14459 + "r74k_alu+r74k_mul")
14460 +
14461 +;; extp, extpdp, extpdpv, extpv, extr, extrv
14462 +;; - same latency as "mul"
14463 +(define_insn_reservation "r74k_dsp_acc_ext" 7
14464 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14465 + (eq_attr "type" "accext"))
14466 + "r74k_alu+r74k_mul")
14467 +
14468 +;; mthlip, shilo, shilov
14469 +;; - same latency as "mul"
14470 +(define_insn_reservation "r74k_dsp_acc_mod" 7
14471 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
14472 + (eq_attr "type" "accmod"))
14473 + "r74k_alu+r74k_mul")
14474 +
14475 +;; dspalu ->load/store base
14476 +;; dspalusat->load/store base
14477 +;; - we should never see these in real life.
14478 +
14479 +;; dsp_mac->dsp_mac : 1 cycles (repeat rate of 1)
14480 +;; dsp_mac->dsp_mac_sat : 1 cycles (repeat rate of 1)
14481 +(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac")
14482 +(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac_sat")
14483 +
14484 +;; dsp_mac_sat->dsp_mac_sat : 2 cycles (repeat rate of 2)
14485 +;; dsp_mac_sat->dsp_mac : 2 cycles (repeat rate of 2)
14486 +(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac_sat")
14487 +(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac")
14488 +
14489 +(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac")
14490 +(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac_sat")
14491 +
14492 +;; Before reload, all multiplier is registered as imul3 (which has a long
14493 +;; latency). We temporary jig the latency such that the macc groups
14494 +;; are scheduled closely together during the first scheduler pass.
14495 +(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac"
14496 + "mips_mult_madd_chain_bypass_p")
14497 +(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac_sat"
14498 + "mips_mult_madd_chain_bypass_p")
14499 +
14500 +;; Assuming the following is true (bypass at M4)
14501 +;; AP AF AM MB M1 M2 M3 M4 WB GR GC
14502 +;; AP AF AM MB M1 M2 M3 M4 WB GR GC
14503 +;; dsp_mac->dsp_acc_ext : 4 cycles
14504 +;; dsp_mac->dsp_acc_mod : 4 cycles
14505 +(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_ext")
14506 +(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_mod")
14507 +
14508 +;; Assuming the following is true (bypass at WB)
14509 +;; AP AF AM MB M1 M2 M3 M4 WB GR GC
14510 +;; AP AF AM MB M1 M2 M3 M4 WB GR GC
14511 +;; dsp_mac_sat->dsp_acc_ext : 5 cycles
14512 +;; dsp_mac_sat->dsp_acc_mod : 5 cycles
14513 +(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_ext")
14514 +(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_mod")
14515 +
14516
14517 ;; --------------------------------------------------------------
14518 ;; Floating Point Instructions
14519 --- /dev/null
14520 +++ b/gcc/config/mips/crtfastmath.c
14521 @@ -0,0 +1,53 @@
14522 +/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
14523 +
14524 + This file is part of GCC.
14525 +
14526 + GCC is free software; you can redistribute it and/or modify it
14527 + under the terms of the GNU General Public License as published by
14528 + the Free Software Foundation; either version 3, or (at your option)
14529 + any later version.
14530 +
14531 + GCC is distributed in the hope that it will be useful, but WITHOUT
14532 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14533 + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14534 + License for more details.
14535 +
14536 + Under Section 7 of GPL version 3, you are granted additional
14537 + permissions described in the GCC Runtime Library Exception, version
14538 + 3.1, as published by the Free Software Foundation.
14539 +
14540 + You should have received a copy of the GNU General Public License
14541 + and a copy of the GCC Runtime Library Exception along with this
14542 + program; see the files COPYING3 and COPYING.RUNTIME respectively.
14543 + If not, see <http://www.gnu.org/licenses/>. */
14544 +
14545 +#ifdef __mips_hard_float
14546 +
14547 +/* flush denormalized numbers to zero */
14548 +#define _FPU_FLUSH_TZ 0x1000000
14549 +
14550 +/* rounding control */
14551 +#define _FPU_RC_NEAREST 0x0 /* RECOMMENDED */
14552 +#define _FPU_RC_ZERO 0x1
14553 +#define _FPU_RC_UP 0x2
14554 +#define _FPU_RC_DOWN 0x3
14555 +
14556 +/* enable interrupts for IEEE exceptions */
14557 +#define _FPU_IEEE 0x00000F80
14558 +
14559 +/* Macros for accessing the hardware control word. */
14560 +#define _FPU_GETCW(cw) __asm__ ("cfc1 %0,$31" : "=r" (cw))
14561 +#define _FPU_SETCW(cw) __asm__ ("ctc1 %0,$31" : : "r" (cw))
14562 +
14563 +static void __attribute__((constructor))
14564 +set_fast_math (void)
14565 +{
14566 + unsigned int fcr;
14567 +
14568 + /* fastmath: flush to zero, round to nearest, ieee exceptions disabled */
14569 + fcr = _FPU_FLUSH_TZ | _FPU_RC_NEAREST;
14570 +
14571 + _FPU_SETCW(fcr);
14572 +}
14573 +
14574 +#endif /* __mips_hard_float */
14575 --- a/gcc/config/mips/linux64.h
14576 +++ b/gcc/config/mips/linux64.h
14577 @@ -69,3 +69,9 @@
14578 ieee_quad_format is the default, but let's put this here to make
14579 sure nobody thinks we just forgot to set it to something else. */
14580 #define MIPS_TFMODE_FORMAT mips_quad_format
14581 +
14582 +/* Similar to standard Linux, but adding -ffast-math support. */
14583 +#undef ENDFILE_SPEC
14584 +#define ENDFILE_SPEC \
14585 + "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
14586 + %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
14587 --- a/gcc/config/mips/linux.h
14588 +++ b/gcc/config/mips/linux.h
14589 @@ -147,3 +147,17 @@
14590 #define DRIVER_SELF_SPECS \
14591 BASE_DRIVER_SELF_SPECS, \
14592 LINUX_DRIVER_SELF_SPECS
14593 +
14594 +/* Similar to standard Linux, but adding -ffast-math support. */
14595 +#undef ENDFILE_SPEC
14596 +#define ENDFILE_SPEC \
14597 + "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
14598 + %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
14599 +
14600 +#undef SUBTARGET_OVERRIDE_OPTIONS
14601 +#define SUBTARGET_OVERRIDE_OPTIONS \
14602 +do { \
14603 + /* __thread_support is not supported by uClibc. */ \
14604 + if (linux_uclibc) \
14605 + targetm.have_tls = 0; \
14606 +} while (0)
14607 --- a/gcc/config/mips/mips.c
14608 +++ b/gcc/config/mips/mips.c
14609 @@ -261,18 +261,29 @@
14610 /* Likewise FPR X. */
14611 unsigned int fmask;
14612
14613 - /* The number of GPRs and FPRs saved. */
14614 + /* Likewise doubleword accumulator X ($acX). */
14615 + unsigned int acc_mask;
14616 +
14617 + /* The number of GPRs, FPRs, doubleword accumulators and COP0
14618 + registers saved. */
14619 unsigned int num_gp;
14620 unsigned int num_fp;
14621 + unsigned int num_acc;
14622 + unsigned int num_cop0_regs;
14623
14624 - /* The offset of the topmost GPR and FPR save slots from the top of
14625 - the frame, or zero if no such slots are needed. */
14626 + /* The offset of the topmost GPR, FPR, accumulator and COP0-register
14627 + save slots from the top of the frame, or zero if no such slots are
14628 + needed. */
14629 HOST_WIDE_INT gp_save_offset;
14630 HOST_WIDE_INT fp_save_offset;
14631 + HOST_WIDE_INT acc_save_offset;
14632 + HOST_WIDE_INT cop0_save_offset;
14633
14634 /* Likewise, but giving offsets from the bottom of the frame. */
14635 HOST_WIDE_INT gp_sp_offset;
14636 HOST_WIDE_INT fp_sp_offset;
14637 + HOST_WIDE_INT acc_sp_offset;
14638 + HOST_WIDE_INT cop0_sp_offset;
14639
14640 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
14641 HOST_WIDE_INT arg_pointer_offset;
14642 @@ -310,6 +321,20 @@
14643 /* True if we have emitted an instruction to initialize
14644 mips16_gp_pseudo_rtx. */
14645 bool initialized_mips16_gp_pseudo_p;
14646 +
14647 + /* True if this is an interrupt handler. */
14648 + bool interrupt_handler_p;
14649 +
14650 + /* True if this is an interrupt handler that uses shadow registers. */
14651 + bool use_shadow_register_set_p;
14652 +
14653 + /* True if this is an interrupt handler that should keep interrupts
14654 + masked. */
14655 + bool keep_interrupts_masked_p;
14656 +
14657 + /* True if this is an interrupt handler that should use DERET
14658 + instead of ERET. */
14659 + bool use_debug_exception_return_p;
14660 };
14661
14662 /* Information about a single argument. */
14663 @@ -542,9 +567,16 @@
14664 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
14665 };
14666
14667 +#ifdef CVMX_SHARED_BSS_FLAGS
14668 +static tree octeon_handle_cvmx_shared_attribute (tree *, tree, tree, int, bool *);
14669 +#endif
14670 +
14671 /* The value of TARGET_ATTRIBUTE_TABLE. */
14672 const struct attribute_spec mips_attribute_table[] = {
14673 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
14674 +#ifdef CVMX_SHARED_BSS_FLAGS
14675 + { "cvmx_shared", 0, 0, true, false, false, octeon_handle_cvmx_shared_attribute },
14676 +#endif
14677 { "long_call", 0, 0, false, true, true, NULL },
14678 { "far", 0, 0, false, true, true, NULL },
14679 { "near", 0, 0, false, true, true, NULL },
14680 @@ -554,6 +586,11 @@
14681 code generation but don't carry other semantics. */
14682 { "mips16", 0, 0, true, false, false, NULL },
14683 { "nomips16", 0, 0, true, false, false, NULL },
14684 + /* Allow functions to be specified as interrupt handlers */
14685 + { "interrupt", 0, 0, false, true, true, NULL },
14686 + { "use_shadow_register_set", 0, 0, false, true, true, NULL },
14687 + { "keep_interrupts_masked", 0, 0, false, true, true, NULL },
14688 + { "use_debug_exception_return", 0, 0, false, true, true, NULL },
14689 { NULL, 0, 0, false, false, false, NULL }
14690 };
14691 \f
14692 @@ -659,6 +696,11 @@
14693 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
14694 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
14695
14696 + { "1004kc", PROCESSOR_24KC, 33, 0 }, /* 1004K with MT/DSP. */
14697 + { "1004kf2_1", PROCESSOR_24KF2_1, 33, 0 },
14698 + { "1004kf", PROCESSOR_24KF2_1, 33, 0 },
14699 + { "1004kf1_1", PROCESSOR_24KF1_1, 33, 0 },
14700 +
14701 /* MIPS64 processors. */
14702 { "5kc", PROCESSOR_5KC, 64, 0 },
14703 { "5kf", PROCESSOR_5KF, 64, 0 },
14704 @@ -1064,13 +1106,7 @@
14705 DEFAULT_COSTS
14706 },
14707 { /* XLR */
14708 - /* Need to replace first five with the costs of calling the appropriate
14709 - libgcc routine. */
14710 - COSTS_N_INSNS (256), /* fp_add */
14711 - COSTS_N_INSNS (256), /* fp_mult_sf */
14712 - COSTS_N_INSNS (256), /* fp_mult_df */
14713 - COSTS_N_INSNS (256), /* fp_div_sf */
14714 - COSTS_N_INSNS (256), /* fp_div_df */
14715 + SOFT_FP_COSTS,
14716 COSTS_N_INSNS (8), /* int_mult_si */
14717 COSTS_N_INSNS (8), /* int_mult_di */
14718 COSTS_N_INSNS (72), /* int_div_si */
14719 @@ -1172,6 +1208,42 @@
14720 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
14721 }
14722
14723 +/* Check if the interrupt attribute is set for a function. */
14724 +
14725 +static bool
14726 +mips_interrupt_type_p (tree type)
14727 +{
14728 + return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
14729 +}
14730 +
14731 +/* Check if the attribute to use shadow register set is set for a function. */
14732 +
14733 +static bool
14734 +mips_use_shadow_register_set_p (tree type)
14735 +{
14736 + return lookup_attribute ("use_shadow_register_set",
14737 + TYPE_ATTRIBUTES (type)) != NULL;
14738 +}
14739 +
14740 +/* Check if the attribute to keep interrupts masked is set for a function. */
14741 +
14742 +static bool
14743 +mips_keep_interrupts_masked_p (tree type)
14744 +{
14745 + return lookup_attribute ("keep_interrupts_masked",
14746 + TYPE_ATTRIBUTES (type)) != NULL;
14747 +}
14748 +
14749 +/* Check if the attribute to use debug exception return is set for
14750 + a function. */
14751 +
14752 +static bool
14753 +mips_use_debug_exception_return_p (tree type)
14754 +{
14755 + return lookup_attribute ("use_debug_exception_return",
14756 + TYPE_ATTRIBUTES (type)) != NULL;
14757 +}
14758 +
14759 /* Return true if function DECL is a MIPS16 function. Return the ambient
14760 setting if DECL is null. */
14761
14762 @@ -2795,7 +2867,7 @@
14763 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
14764 {
14765 rtx base, addr;
14766 - HOST_WIDE_INT offset;
14767 + HOST_WIDE_INT intval, high, offset;
14768
14769 if (mips_tls_symbol_p (*xloc))
14770 {
14771 @@ -2820,6 +2892,32 @@
14772 *xloc = mips_force_address (addr, mode);
14773 return true;
14774 }
14775 +
14776 + /* Handle references to constant addresses by loading the high part
14777 + into a register and using an offset for the low part. */
14778 + if (GET_CODE (base) == CONST_INT)
14779 + {
14780 + intval = INTVAL (base);
14781 + high = trunc_int_for_mode (CONST_HIGH_PART (intval), Pmode);
14782 + offset = CONST_LOW_PART (intval);
14783 + /* Ignore cases in which a positive address would be accessed by a
14784 + negative offset from a negative address. The required wraparound
14785 + does not occur for 32-bit addresses on 64-bit targets, and it is
14786 + very unlikely that such an access would occur in real code anyway.
14787 +
14788 + If the low offset is not legitimate for MODE, prefer to load
14789 + the constant normally, instead of using mips_force_address on
14790 + the legitimized address. The latter option would cause us to
14791 + use (D)ADDIU unconditionally, but LUI/ORI is more efficient
14792 + than LUI/ADDIU on some targets. */
14793 + if ((intval < 0 || high > 0)
14794 + && mips_valid_offset_p (GEN_INT (offset), mode))
14795 + {
14796 + base = mips_force_temporary (NULL, GEN_INT (high));
14797 + *xloc = plus_constant (base, offset);
14798 + return true;
14799 + }
14800 + }
14801 return false;
14802 }
14803
14804 @@ -6188,6 +6286,11 @@
14805 if (!TARGET_SIBCALLS)
14806 return false;
14807
14808 + /* Interrupt handlers need special epilogue code and therefore can't
14809 + use sibcalls. */
14810 + if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
14811 + return false;
14812 +
14813 /* We can't do a sibcall if the called function is a MIPS16 function
14814 because there is no direct "jx" instruction equivalent to "jalx" to
14815 switch the ISA mode. We only care about cases where the sibling
14816 @@ -6608,6 +6711,15 @@
14817 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
14818 return false;
14819
14820 + if (ISA_HAS_UL_US)
14821 + {
14822 + if (GET_MODE (dest) == DImode)
14823 + emit_insn (gen_mov_uld (dest, src, left));
14824 + else
14825 + emit_insn (gen_mov_ulw (dest, src, left));
14826 + return true;
14827 + }
14828 +
14829 temp = gen_reg_rtx (GET_MODE (dest));
14830 if (GET_MODE (dest) == DImode)
14831 {
14832 @@ -6642,6 +6754,16 @@
14833
14834 mode = mode_for_size (width, MODE_INT, 0);
14835 src = gen_lowpart (mode, src);
14836 +
14837 + if (ISA_HAS_UL_US)
14838 + {
14839 + if (GET_MODE (src) == DImode)
14840 + emit_insn (gen_mov_usd (dest, src, left));
14841 + else
14842 + emit_insn (gen_mov_usw (dest, src, left));
14843 + return true;
14844 + }
14845 +
14846 if (mode == DImode)
14847 {
14848 emit_insn (gen_mov_sdl (dest, src, left));
14849 @@ -7229,7 +7351,11 @@
14850 || (letter == 'L' && TARGET_BIG_ENDIAN)
14851 || letter == 'D')
14852 regno++;
14853 - fprintf (file, "%s", reg_names[regno]);
14854 + /* We need to print $0 .. $31 for COP0 registers. */
14855 + if (COP0_REG_P (regno))
14856 + fprintf (file, "$%s", &reg_names[regno][4]);
14857 + else
14858 + fprintf (file, "%s", reg_names[regno]);
14859 }
14860 break;
14861
14862 @@ -7369,6 +7495,12 @@
14863 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
14864 return false;
14865
14866 +#ifdef CVMX_SHARED_BSS_FLAGS
14867 + if (TARGET_OCTEON && TREE_CODE (decl) == VAR_DECL
14868 + && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl)))
14869 + return false;
14870 +#endif
14871 +
14872 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
14873 {
14874 const char *name;
14875 @@ -7595,6 +7727,37 @@
14876 return NULL_RTX;
14877 }
14878
14879 +/* DSP ALU can bypass data with no delays for the following pairs. */
14880 +enum insn_code dspalu_bypass_table[][2] =
14881 +{
14882 + {CODE_FOR_mips_addsc, CODE_FOR_mips_addwc},
14883 + {CODE_FOR_mips_cmpu_eq_qb, CODE_FOR_mips_pick_qb},
14884 + {CODE_FOR_mips_cmpu_lt_qb, CODE_FOR_mips_pick_qb},
14885 + {CODE_FOR_mips_cmpu_le_qb, CODE_FOR_mips_pick_qb},
14886 + {CODE_FOR_mips_cmp_eq_ph, CODE_FOR_mips_pick_ph},
14887 + {CODE_FOR_mips_cmp_lt_ph, CODE_FOR_mips_pick_ph},
14888 + {CODE_FOR_mips_cmp_le_ph, CODE_FOR_mips_pick_ph},
14889 + {CODE_FOR_mips_wrdsp, CODE_FOR_mips_insv}
14890 +};
14891 +
14892 +int
14893 +mips_dspalu_bypass_p (rtx out_insn, rtx in_insn)
14894 +{
14895 + int i;
14896 + int num_bypass = (sizeof (dspalu_bypass_table)
14897 + / (2 * sizeof (enum insn_code)));
14898 + enum insn_code out_icode = INSN_CODE (out_insn);
14899 + enum insn_code in_icode = INSN_CODE (in_insn);
14900 +
14901 + for (i = 0; i < num_bypass; i++)
14902 + {
14903 + if (out_icode == dspalu_bypass_table[i][0]
14904 + && in_icode == dspalu_bypass_table[i][1])
14905 + return true;
14906 + }
14907 +
14908 + return false;
14909 +}
14910 /* Implement ASM_OUTPUT_ASCII. */
14911
14912 void
14913 @@ -7819,11 +7982,19 @@
14914 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
14915
14916 #ifdef HAVE_AS_GNU_ATTRIBUTE
14917 +#ifdef TARGET_MIPS_SDEMTK
14918 + fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
14919 + (!TARGET_NO_FLOAT
14920 + ? (TARGET_HARD_FLOAT
14921 + ? (TARGET_DOUBLE_FLOAT
14922 + ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3) : 0));
14923 +#else
14924 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
14925 (TARGET_HARD_FLOAT_ABI
14926 ? (TARGET_DOUBLE_FLOAT
14927 ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
14928 #endif
14929 +#endif
14930 }
14931
14932 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
14933 @@ -8436,12 +8607,53 @@
14934 return GLOBAL_POINTER_REGNUM;
14935 }
14936
14937 +/* Return true if REGNO is a register that is ordinarily call-clobbered
14938 + but must nevertheless be preserved by an interrupt handler. */
14939 +
14940 +static bool
14941 +mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
14942 +{
14943 + if (MD_REG_P (regno))
14944 + return true;
14945 +
14946 + if (TARGET_DSP && DSP_ACC_REG_P (regno))
14947 + return true;
14948 +
14949 + if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
14950 + {
14951 + /* $0 is hard-wired. */
14952 + if (regno == GP_REG_FIRST)
14953 + return false;
14954 +
14955 + /* The interrupt handler can treat kernel registers as
14956 + scratch registers. */
14957 + if (KERNEL_REG_P (regno))
14958 + return false;
14959 +
14960 + /* The function will return the stack pointer to its original value
14961 + anyway. */
14962 + if (regno == STACK_POINTER_REGNUM)
14963 + return false;
14964 +
14965 + /* Otherwise, return true for registers that aren't ordinarily
14966 + call-clobbered. */
14967 + return call_really_used_regs[regno];
14968 + }
14969 +
14970 + return false;
14971 +}
14972 +
14973 /* Return true if the current function should treat register REGNO
14974 as call-saved. */
14975
14976 static bool
14977 mips_cfun_call_saved_reg_p (unsigned int regno)
14978 {
14979 + /* Interrupt handlers need to save extra registers. */
14980 + if (cfun->machine->interrupt_handler_p
14981 + && mips_interrupt_extra_call_saved_reg_p (regno))
14982 + return true;
14983 +
14984 /* call_insns preserve $28 unless they explicitly say otherwise,
14985 so call_really_used_regs[] treats $28 as call-saved. However,
14986 we want the ABI property rather than the default call_insn
14987 @@ -8490,6 +8702,13 @@
14988 if (regno == GP_REG_FIRST + 31 && mips16_cfun_returns_in_fpr_p ())
14989 return true;
14990
14991 + /* If REGNO is ordinarily call-clobbered, we must assume that any
14992 + called function could modify it. */
14993 + if (cfun->machine->interrupt_handler_p
14994 + && !current_function_is_leaf
14995 + && mips_interrupt_extra_call_saved_reg_p (regno))
14996 + return true;
14997 +
14998 return false;
14999 }
15000
15001 @@ -8545,6 +8764,14 @@
15002 C | callee-allocated save area |
15003 | for register varargs |
15004 | |
15005 + +-------------------------------+ <-- frame_pointer_rtx
15006 + | | + cop0_sp_offset
15007 + | COP0 reg save area | + UNITS_PER_WORD
15008 + | |
15009 + +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
15010 + | | + UNITS_PER_WORD
15011 + | accumulator save area |
15012 + | |
15013 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
15014 | | + UNITS_PER_HWFPVALUE
15015 | FPR save area |
15016 @@ -8588,6 +8815,28 @@
15017 HOST_WIDE_INT offset, size;
15018 unsigned int regno, i;
15019
15020 + /* Set this function's interrupt properties. */
15021 + if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
15022 + {
15023 + if (!ISA_MIPS32R2)
15024 + error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
15025 + else if (TARGET_HARD_FLOAT)
15026 + error ("the %<interrupt%> attribute requires %<-msoft-float%>");
15027 + else if (TARGET_MIPS16)
15028 + error ("interrupt handlers cannot be MIPS16 functions");
15029 + else
15030 + {
15031 + cfun->machine->interrupt_handler_p = true;
15032 + cfun->machine->use_shadow_register_set_p =
15033 + mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
15034 + cfun->machine->keep_interrupts_masked_p =
15035 + mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
15036 + cfun->machine->use_debug_exception_return_p =
15037 + mips_use_debug_exception_return_p (TREE_TYPE
15038 + (current_function_decl));
15039 + }
15040 + }
15041 +
15042 frame = &cfun->machine->frame;
15043 memset (frame, 0, sizeof (*frame));
15044 size = get_frame_size ();
15045 @@ -8657,7 +8906,7 @@
15046 }
15047
15048 /* Find out which FPRs we need to save. This loop must iterate over
15049 - the same space as its companion in mips_for_each_saved_reg. */
15050 + the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
15051 if (TARGET_HARD_FLOAT)
15052 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
15053 if (mips_save_reg_p (regno))
15054 @@ -8673,6 +8922,47 @@
15055 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
15056 }
15057
15058 + /* Add in space for the interrupt context information. */
15059 + if (cfun->machine->interrupt_handler_p)
15060 + {
15061 + /* Check HI/LO. */
15062 + if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
15063 + {
15064 + frame->num_acc++;
15065 + frame->acc_mask |= (1 << 0);
15066 + }
15067 +
15068 + /* Check accumulators 1, 2, 3. */
15069 + for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
15070 + if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
15071 + {
15072 + frame->num_acc++;
15073 + frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
15074 + }
15075 +
15076 + /* All interrupt context functions need space to preserve STATUS. */
15077 + frame->num_cop0_regs++;
15078 +
15079 + /* If we don't keep interrupts masked, we need to save EPC. */
15080 + if (!cfun->machine->keep_interrupts_masked_p)
15081 + frame->num_cop0_regs++;
15082 + }
15083 +
15084 + /* Move above the accumulator save area. */
15085 + if (frame->num_acc > 0)
15086 + {
15087 + /* Each accumulator needs 2 words. */
15088 + offset += frame->num_acc * 2 * UNITS_PER_WORD;
15089 + frame->acc_sp_offset = offset - UNITS_PER_WORD;
15090 + }
15091 +
15092 + /* Move above the COP0 register save area. */
15093 + if (frame->num_cop0_regs > 0)
15094 + {
15095 + offset += frame->num_cop0_regs * UNITS_PER_WORD;
15096 + frame->cop0_sp_offset = offset - UNITS_PER_WORD;
15097 + }
15098 +
15099 /* Move above the callee-allocated varargs save area. */
15100 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
15101 frame->arg_pointer_offset = offset;
15102 @@ -8686,6 +8976,10 @@
15103 frame->gp_save_offset = frame->gp_sp_offset - offset;
15104 if (frame->fp_sp_offset > 0)
15105 frame->fp_save_offset = frame->fp_sp_offset - offset;
15106 + if (frame->acc_sp_offset > 0)
15107 + frame->acc_save_offset = frame->acc_sp_offset - offset;
15108 + if (frame->num_cop0_regs > 0)
15109 + frame->cop0_save_offset = frame->cop0_sp_offset - offset;
15110
15111 /* MIPS16 code offsets the frame pointer by the size of the outgoing
15112 arguments. This tends to increase the chances of using unextended
15113 @@ -8882,12 +9176,41 @@
15114 fn (gen_rtx_REG (mode, regno), mem);
15115 }
15116
15117 +/* Call FN for each accumlator that is saved by the current function.
15118 + SP_OFFSET is the offset of the current stack pointer from the start
15119 + of the frame. */
15120 +
15121 +static void
15122 +mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
15123 +{
15124 + HOST_WIDE_INT offset;
15125 + int regno;
15126 +
15127 + offset = cfun->machine->frame.acc_sp_offset - sp_offset;
15128 + if (BITSET_P (cfun->machine->frame.acc_mask, 0))
15129 + {
15130 + mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
15131 + offset -= UNITS_PER_WORD;
15132 + mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
15133 + offset -= UNITS_PER_WORD;
15134 + }
15135 +
15136 + for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
15137 + if (BITSET_P (cfun->machine->frame.acc_mask,
15138 + ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
15139 + {
15140 + mips_save_restore_reg (word_mode, regno, offset, fn);
15141 + offset -= UNITS_PER_WORD;
15142 + }
15143 +}
15144 +
15145 /* Call FN for each register that is saved by the current function.
15146 SP_OFFSET is the offset of the current stack pointer from the start
15147 of the frame. */
15148
15149 static void
15150 -mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
15151 +mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
15152 + mips_save_restore_fn fn)
15153 {
15154 enum machine_mode fpr_mode;
15155 HOST_WIDE_INT offset;
15156 @@ -9075,13 +9398,24 @@
15157 }
15158 else
15159 {
15160 - if (TARGET_MIPS16
15161 - && REGNO (reg) != GP_REG_FIRST + 31
15162 - && !M16_REG_P (REGNO (reg)))
15163 - {
15164 - /* Save a non-MIPS16 register by moving it through a temporary.
15165 - We don't need to do this for $31 since there's a special
15166 - instruction for it. */
15167 + if (REGNO (reg) == HI_REGNUM)
15168 + {
15169 + if (TARGET_64BIT)
15170 + emit_insn (gen_mfhidi_ti (MIPS_PROLOGUE_TEMP (DImode),
15171 + gen_rtx_REG (TImode, MD_REG_FIRST)));
15172 + else
15173 + emit_insn (gen_mfhisi_di (MIPS_PROLOGUE_TEMP (SImode),
15174 + gen_rtx_REG (DImode, MD_REG_FIRST)));
15175 + mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
15176 + }
15177 + else if ((TARGET_MIPS16
15178 + && REGNO (reg) != GP_REG_FIRST + 31
15179 + && !M16_REG_P (REGNO (reg)))
15180 + || ACC_REG_P (REGNO (reg)))
15181 + {
15182 + /* If the register has no direct store instruction, move it
15183 + through a temporary. Note that there's a special MIPS16
15184 + instruction to save $31. */
15185 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
15186 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
15187 }
15188 @@ -9153,6 +9487,14 @@
15189 emit_insn (gen_loadgp_blockage ());
15190 }
15191
15192 +/* A for_each_rtx callback. Stop the search if *X is a kernel register. */
15193 +
15194 +static int
15195 +mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
15196 +{
15197 + return GET_CODE (*x) == REG && KERNEL_REG_P (REGNO (*x));
15198 +}
15199 +
15200 /* Expand the "prologue" pattern. */
15201
15202 void
15203 @@ -9172,7 +9514,8 @@
15204 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
15205 bytes beforehand; this is enough to cover the register save area
15206 without going out of range. */
15207 - if ((frame->mask | frame->fmask) != 0)
15208 + if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
15209 + || frame->num_cop0_regs > 0)
15210 {
15211 HOST_WIDE_INT step1;
15212
15213 @@ -9203,12 +9546,97 @@
15214 }
15215 else
15216 {
15217 - insn = gen_add3_insn (stack_pointer_rtx,
15218 - stack_pointer_rtx,
15219 - GEN_INT (-step1));
15220 - RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
15221 - size -= step1;
15222 - mips_for_each_saved_reg (size, mips_save_reg);
15223 + if (cfun->machine->interrupt_handler_p)
15224 + {
15225 + HOST_WIDE_INT offset;
15226 + rtx mem;
15227 +
15228 + /* If this interrupt is using a shadow register set, we need to
15229 + get the stack pointer from the previous register set. */
15230 + if (cfun->machine->use_shadow_register_set_p)
15231 + emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
15232 + stack_pointer_rtx));
15233 +
15234 + if (!cfun->machine->keep_interrupts_masked_p)
15235 + {
15236 + /* Move from COP0 Cause to K0. */
15237 + emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
15238 + gen_rtx_REG (SImode,
15239 + COP0_CAUSE_REG_NUM)));
15240 + /* Move from COP0 EPC to K1. */
15241 + emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
15242 + gen_rtx_REG (SImode,
15243 + COP0_EPC_REG_NUM)));
15244 + }
15245 +
15246 + /* Allocate the first part of the frame. */
15247 + insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
15248 + GEN_INT (-step1));
15249 + RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
15250 + size -= step1;
15251 +
15252 + /* Start at the uppermost location for saving. */
15253 + offset = frame->cop0_sp_offset - size;
15254 + if (!cfun->machine->keep_interrupts_masked_p)
15255 + {
15256 + /* Push EPC into its stack slot. */
15257 + mem = gen_frame_mem (word_mode,
15258 + plus_constant (stack_pointer_rtx,
15259 + offset));
15260 + mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
15261 + offset -= UNITS_PER_WORD;
15262 + }
15263 +
15264 + /* Move from COP0 Status to K1. */
15265 + emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
15266 + gen_rtx_REG (SImode,
15267 + COP0_STATUS_REG_NUM)));
15268 +
15269 + /* Right justify the RIPL in k0. */
15270 + if (!cfun->machine->keep_interrupts_masked_p)
15271 + emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
15272 + gen_rtx_REG (SImode, K0_REG_NUM),
15273 + GEN_INT (CAUSE_IPL)));
15274 +
15275 + /* Push Status into its stack slot. */
15276 + mem = gen_frame_mem (word_mode,
15277 + plus_constant (stack_pointer_rtx, offset));
15278 + mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
15279 + offset -= UNITS_PER_WORD;
15280 +
15281 + /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
15282 + if (!cfun->machine->keep_interrupts_masked_p)
15283 + emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
15284 + GEN_INT (6),
15285 + GEN_INT (SR_IPL),
15286 + gen_rtx_REG (SImode, K0_REG_NUM)));
15287 +
15288 + if (!cfun->machine->keep_interrupts_masked_p)
15289 + /* Enable interrupts by clearing the KSU ERL and EXL bits.
15290 + IE is already the correct value, so we don't have to do
15291 + anything explicit. */
15292 + emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
15293 + GEN_INT (4),
15294 + GEN_INT (SR_EXL),
15295 + gen_rtx_REG (SImode, GP_REG_FIRST)));
15296 + else
15297 + /* Disable interrupts by clearing the KSU, ERL, EXL,
15298 + and IE bits. */
15299 + emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
15300 + GEN_INT (5),
15301 + GEN_INT (SR_IE),
15302 + gen_rtx_REG (SImode, GP_REG_FIRST)));
15303 + }
15304 + else
15305 + {
15306 + insn = gen_add3_insn (stack_pointer_rtx,
15307 + stack_pointer_rtx,
15308 + GEN_INT (-step1));
15309 + RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
15310 + size -= step1;
15311 + }
15312 + mips_for_each_saved_acc (size, mips_save_reg);
15313 + mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
15314 }
15315 }
15316
15317 @@ -9293,6 +9721,20 @@
15318 pic_offset_table_rtx);
15319 }
15320
15321 + /* We need to search back to the last use of K0 or K1. */
15322 + if (cfun->machine->interrupt_handler_p)
15323 + {
15324 + for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
15325 + if (INSN_P (insn)
15326 + && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
15327 + break;
15328 + /* Emit a move from K1 to COP0 Status after insn. */
15329 + gcc_assert (insn != NULL_RTX);
15330 + emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
15331 + gen_rtx_REG (SImode, K1_REG_NUM)),
15332 + insn);
15333 + }
15334 +
15335 /* If we are profiling, make sure no instructions are scheduled before
15336 the call to mcount. */
15337 if (crtl->profile)
15338 @@ -9309,7 +9751,20 @@
15339 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
15340 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
15341
15342 - if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
15343 + if (REGNO (reg) == HI_REGNUM)
15344 + {
15345 + mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
15346 + if (TARGET_64BIT)
15347 + emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
15348 + MIPS_EPILOGUE_TEMP (DImode),
15349 + gen_rtx_REG (DImode, LO_REGNUM)));
15350 + else
15351 + emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
15352 + MIPS_EPILOGUE_TEMP (SImode),
15353 + gen_rtx_REG (SImode, LO_REGNUM)));
15354 + }
15355 + else if ((TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
15356 + || ACC_REG_P (REGNO (reg)))
15357 {
15358 /* Can't restore directly; move through a temporary. */
15359 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
15360 @@ -9345,7 +9800,7 @@
15361 {
15362 const struct mips_frame_info *frame;
15363 HOST_WIDE_INT step1, step2;
15364 - rtx base, target;
15365 + rtx base, target, insn;
15366
15367 if (!sibcall_p && mips_can_use_return_insn ())
15368 {
15369 @@ -9378,7 +9833,8 @@
15370
15371 /* If we need to restore registers, deallocate as much stack as
15372 possible in the second step without going out of range. */
15373 - if ((frame->mask | frame->fmask) != 0)
15374 + if ((frame->mask | frame->fmask | frame->acc_mask) != 0
15375 + || frame->num_cop0_regs > 0)
15376 {
15377 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
15378 step1 -= step2;
15379 @@ -9440,13 +9896,53 @@
15380 else
15381 {
15382 /* Restore the registers. */
15383 - mips_for_each_saved_reg (frame->total_size - step2, mips_restore_reg);
15384 + mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
15385 + mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
15386 + mips_restore_reg);
15387
15388 - /* Deallocate the final bit of the frame. */
15389 - if (step2 > 0)
15390 - emit_insn (gen_add3_insn (stack_pointer_rtx,
15391 - stack_pointer_rtx,
15392 - GEN_INT (step2)));
15393 + if (cfun->machine->interrupt_handler_p)
15394 + {
15395 + HOST_WIDE_INT offset;
15396 + rtx mem;
15397 +
15398 + offset = frame->cop0_sp_offset - (frame->total_size - step2);
15399 + if (!cfun->machine->keep_interrupts_masked_p)
15400 + {
15401 + /* Restore the original EPC. */
15402 + mem = gen_frame_mem (word_mode,
15403 + plus_constant (stack_pointer_rtx, offset));
15404 + mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
15405 + offset -= UNITS_PER_WORD;
15406 +
15407 + /* Move to COP0 EPC. */
15408 + emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
15409 + gen_rtx_REG (SImode, K0_REG_NUM)));
15410 + }
15411 +
15412 + /* Restore the original Status. */
15413 + mem = gen_frame_mem (word_mode,
15414 + plus_constant (stack_pointer_rtx, offset));
15415 + mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
15416 + offset -= UNITS_PER_WORD;
15417 +
15418 + /* If we don't use shoadow register set, we need to update SP. */
15419 + if (!cfun->machine->use_shadow_register_set_p && step2 > 0)
15420 + emit_insn (gen_add3_insn (stack_pointer_rtx,
15421 + stack_pointer_rtx,
15422 + GEN_INT (step2)));
15423 +
15424 + /* Move to COP0 Status. */
15425 + emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
15426 + gen_rtx_REG (SImode, K0_REG_NUM)));
15427 + }
15428 + else
15429 + {
15430 + /* Deallocate the final bit of the frame. */
15431 + if (step2 > 0)
15432 + emit_insn (gen_add3_insn (stack_pointer_rtx,
15433 + stack_pointer_rtx,
15434 + GEN_INT (step2)));
15435 + }
15436 }
15437
15438 /* Add in the __builtin_eh_return stack adjustment. We need to
15439 @@ -9469,18 +9965,44 @@
15440
15441 if (!sibcall_p)
15442 {
15443 - unsigned int regno;
15444 -
15445 - /* When generating MIPS16 code, the normal mips_for_each_saved_reg
15446 - path will restore the return address into $7 rather than $31. */
15447 - if (TARGET_MIPS16
15448 - && !GENERATE_MIPS16E_SAVE_RESTORE
15449 - && BITSET_P (frame->mask, 31))
15450 - regno = GP_REG_FIRST + 7;
15451 - else
15452 - regno = GP_REG_FIRST + 31;
15453 mips_expand_before_return ();
15454 - emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
15455 + if (cfun->machine->interrupt_handler_p)
15456 + {
15457 + /* Interrupt handlers generate eret or deret. */
15458 + if (cfun->machine->use_debug_exception_return_p)
15459 + emit_jump_insn (gen_mips_deret ());
15460 + else
15461 + emit_jump_insn (gen_mips_eret ());
15462 + }
15463 + else
15464 + {
15465 + unsigned int regno;
15466 +
15467 + /* When generating MIPS16 code, the normal
15468 + mips_for_each_saved_gpr_and_fpr path will restore the return
15469 + address into $7 rather than $31. */
15470 + if (TARGET_MIPS16
15471 + && !GENERATE_MIPS16E_SAVE_RESTORE
15472 + && BITSET_P (frame->mask, 31))
15473 + regno = GP_REG_FIRST + 7;
15474 + else
15475 + regno = GP_REG_FIRST + 31;
15476 + emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
15477 + }
15478 + }
15479 +
15480 + /* Search from the beginning to the first use of K0 or K1. */
15481 + if (cfun->machine->interrupt_handler_p
15482 + && !cfun->machine->keep_interrupts_masked_p)
15483 + {
15484 + for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
15485 + if (INSN_P (insn)
15486 + && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
15487 + break;
15488 + gcc_assert (insn != NULL_RTX);
15489 + /* Insert disable interrupts before the first use of K0 or K1. */
15490 + emit_insn_before (gen_mips_di (), insn);
15491 + emit_insn_before (gen_mips_ehb (), insn);
15492 }
15493 }
15494 \f
15495 @@ -9491,6 +10013,10 @@
15496 bool
15497 mips_can_use_return_insn (void)
15498 {
15499 + /* Interrupt handlers need to go through the epilogue. */
15500 + if (cfun->machine->interrupt_handler_p)
15501 + return false;
15502 +
15503 if (!reload_completed)
15504 return false;
15505
15506 @@ -10422,10 +10948,15 @@
15507 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
15508 }
15509 else if (GENERATE_DIVIDE_TRAPS)
15510 - {
15511 - output_asm_insn (s, operands);
15512 - s = "teq\t%2,%.,7";
15513 - }
15514 + {
15515 + if (TUNE_74K)
15516 + output_asm_insn ("teq\t%2,%.,7", operands);
15517 + else
15518 + {
15519 + output_asm_insn (s, operands);
15520 + s = "teq\t%2,%.,7";
15521 + }
15522 + }
15523 else
15524 {
15525 output_asm_insn ("%(bne\t%2,%.,1f", operands);
15526 @@ -10737,7 +11268,17 @@
15527 ready[pos2] = temp;
15528 }
15529 }
15530 -\f
15531 +
15532 +int
15533 +mips_mult_madd_chain_bypass_p (rtx out_insn ATTRIBUTE_UNUSED,
15534 + rtx in_insn ATTRIBUTE_UNUSED)
15535 +{
15536 + if (reload_completed)
15537 + return false;
15538 + else
15539 + return true;
15540 +}
15541 +
15542 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
15543 that may clobber hi or lo. */
15544 static rtx mips_macc_chains_last_hilo;
15545 @@ -13910,6 +14451,14 @@
15546 long as any indirect jumps use $25. */
15547 flag_pic = 1;
15548
15549 + /* For SDE, switch on ABICALLS mode if -fpic or -fpie were used, and the
15550 + user hasn't explicitly disabled these modes. */
15551 + if (TARGET_MIPS_SDE
15552 + && (flag_pic || flag_pie) && !TARGET_ABICALLS
15553 + && !((target_flags_explicit & MASK_ABICALLS))
15554 + && mips_abi != ABI_EABI)
15555 + target_flags |= MASK_ABICALLS;
15556 +
15557 /* -mvr4130-align is a "speed over size" optimization: it usually produces
15558 faster code, but at the expense of more nops. Enable it at -O3 and
15559 above. */
15560 @@ -13984,26 +14533,46 @@
15561 if (TARGET_DSPR2)
15562 target_flags |= MASK_DSP;
15563
15564 - /* .eh_frame addresses should be the same width as a C pointer.
15565 - Most MIPS ABIs support only one pointer size, so the assembler
15566 - will usually know exactly how big an .eh_frame address is.
15567 -
15568 - Unfortunately, this is not true of the 64-bit EABI. The ABI was
15569 - originally defined to use 64-bit pointers (i.e. it is LP64), and
15570 - this is still the default mode. However, we also support an n32-like
15571 - ILP32 mode, which is selected by -mlong32. The problem is that the
15572 - assembler has traditionally not had an -mlong option, so it has
15573 - traditionally not known whether we're using the ILP32 or LP64 form.
15574 -
15575 - As it happens, gas versions up to and including 2.19 use _32-bit_
15576 - addresses for EABI64 .cfi_* directives. This is wrong for the
15577 - default LP64 mode, so we can't use the directives by default.
15578 - Moreover, since gas's current behavior is at odds with gcc's
15579 - default behavior, it seems unwise to rely on future versions
15580 - of gas behaving the same way. We therefore avoid using .cfi
15581 - directives for -mlong32 as well. */
15582 - if (mips_abi == ABI_EABI && TARGET_64BIT)
15583 - flag_dwarf2_cfi_asm = 0;
15584 + /* Use the traditional method of generating .eh_frames.
15585 + We need this for two reasons:
15586 +
15587 + - .eh_frame addresses should be the same width as a C pointer.
15588 + Most MIPS ABIs support only one pointer size, so the assembler
15589 + will usually know exactly how big an .eh_frame address is.
15590 +
15591 + Unfortunately, this is not true of the 64-bit EABI. The ABI was
15592 + originally defined to use 64-bit pointers (i.e. it is LP64), and
15593 + this is still the default mode. However, we also support an n32-like
15594 + ILP32 mode, which is selected by -mlong32. The problem is that the
15595 + assembler has traditionally not had an -mlong option, so it has
15596 + traditionally not known whether we're using the ILP32 or LP64 form.
15597 +
15598 + As it happens, gas versions up to and including 2.19 use _32-bit_
15599 + addresses for EABI64 .cfi_* directives. This is wrong for the
15600 + default LP64 mode, so we can't use the directives by default.
15601 + Moreover, since gas's current behavior is at odds with gcc's
15602 + default behavior, it seems unwise to rely on future versions
15603 + of gas behaving the same way. We therefore avoid using .cfi
15604 + directives for -mlong32 as well.
15605 +
15606 + - .cfi* directives generate read-only .eh_frame sections.
15607 + However, MIPS has traditionally not allowed directives like:
15608 +
15609 + .long x-.
15610 +
15611 + in cases where "x" is in a different section, or is not defined
15612 + in the same assembly file. We have therefore traditionally
15613 + used absolute addresses and a writable .eh_frame instead.
15614 +
15615 + The linker is able to convert most of these absolute addresses
15616 + into PC-relative form where doing so is necessary to avoid
15617 + relocations. However, until 2.21, it wasn't able to do this
15618 + for indirect encodings or personality routines.
15619 +
15620 + GNU ld 2.21 and GCC 4.5 have support for read-only .eh_frames,
15621 + but for the time being, we should stick to the approach used
15622 + in 4.3 and earlier. */
15623 + flag_dwarf2_cfi_asm = 0;
15624
15625 mips_init_print_operand_punct ();
15626
15627 @@ -14242,6 +14811,178 @@
15628 reg_alloc_order[24] = 0;
15629 }
15630 }
15631 +
15632 +/* Implement EPILOGUE_USES. */
15633 +
15634 +bool
15635 +mips_epilogue_uses (unsigned int regno)
15636 +{
15637 + /* Say that the epilogue uses the return address register. Note that
15638 + in the case of sibcalls, the values "used by the epilogue" are
15639 + considered live at the start of the called function. */
15640 + if (regno == 31)
15641 + return true;
15642 +
15643 + /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
15644 + See the comment above load_call<mode> for details. */
15645 + if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
15646 + return true;
15647 +
15648 + /* An interrupt handler must preserve some registers that are
15649 + ordinarily call-clobbered. */
15650 + if (cfun->machine->interrupt_handler_p
15651 + && mips_interrupt_extra_call_saved_reg_p (regno))
15652 + return true;
15653 +
15654 + return false;
15655 +}
15656 +\f
15657 +#ifdef CVMX_SHARED_BSS_FLAGS
15658 +/* Handle a "cvmx_shared" attribute; arguments as in
15659 + struct attribute_spec.handler. */
15660 +
15661 +static tree
15662 +octeon_handle_cvmx_shared_attribute (tree *node, tree name,
15663 + tree args ATTRIBUTE_UNUSED,
15664 + int flags ATTRIBUTE_UNUSED,
15665 + bool *no_add_attrs)
15666 +{
15667 + if (TREE_CODE (*node) != VAR_DECL)
15668 + {
15669 + warning (OPT_Wattributes, "%qs attribute only applies to variables",
15670 + IDENTIFIER_POINTER (name));
15671 + *no_add_attrs = true;
15672 + }
15673 +
15674 + return NULL_TREE;
15675 +}
15676 +\f
15677 +/* Switch to the appropriate section for output of DECL.
15678 + DECL is either a `VAR_DECL' node or a constant of some sort.
15679 + RELOC indicates whether forming the initial value of DECL requires
15680 + link-time relocations. */
15681 +
15682 +static section *
15683 +octeon_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align)
15684 +{
15685 + if (decl && TREE_CODE (decl) == VAR_DECL
15686 + && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl)))
15687 + {
15688 + const char *sname = NULL;
15689 + unsigned int flags = SECTION_WRITE;
15690 +
15691 + switch (categorize_decl_for_section (decl, reloc))
15692 + {
15693 + case SECCAT_DATA:
15694 + case SECCAT_SDATA:
15695 + case SECCAT_RODATA:
15696 + case SECCAT_SRODATA:
15697 + case SECCAT_RODATA_MERGE_STR:
15698 + case SECCAT_RODATA_MERGE_STR_INIT:
15699 + case SECCAT_RODATA_MERGE_CONST:
15700 + case SECCAT_DATA_REL:
15701 + case SECCAT_DATA_REL_LOCAL:
15702 + case SECCAT_DATA_REL_RO:
15703 + case SECCAT_DATA_REL_RO_LOCAL:
15704 + sname = ".cvmx_shared";
15705 + break;
15706 + case SECCAT_BSS:
15707 + case SECCAT_SBSS:
15708 + sname = ".cvmx_shared_bss";
15709 + flags |= SECTION_BSS;
15710 + break;
15711 + case SECCAT_TEXT:
15712 + case SECCAT_TDATA:
15713 + case SECCAT_TBSS:
15714 + break;
15715 + }
15716 + if (sname)
15717 + {
15718 + return get_section (sname, flags, decl);
15719 + }
15720 + }
15721 + return default_elf_select_section (decl, reloc, align);
15722 +}
15723 +\f
15724 +/* Build up a unique section name, expressed as a
15725 + STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
15726 + RELOC indicates whether the initial value of EXP requires
15727 + link-time relocations. */
15728 +
15729 +static void
15730 +octeon_unique_section (tree decl, int reloc)
15731 +{
15732 + if (decl && TREE_CODE (decl) == VAR_DECL
15733 + && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl)))
15734 + {
15735 + const char *sname = NULL;
15736 +
15737 + if (! DECL_ONE_ONLY (decl))
15738 + {
15739 + section *sect;
15740 + sect = octeon_select_section (decl, reloc, DECL_ALIGN (decl));
15741 + DECL_SECTION_NAME (decl) = build_string (strlen (sect->named.name),
15742 + sect->named.name);
15743 + return;
15744 + }
15745 +
15746 + switch (categorize_decl_for_section (decl, reloc))
15747 + {
15748 + case SECCAT_BSS:
15749 + case SECCAT_SBSS:
15750 + sname = ".cvmx_shared_bss.linkonce.";
15751 + break;
15752 + case SECCAT_SDATA:
15753 + case SECCAT_DATA:
15754 + case SECCAT_DATA_REL:
15755 + case SECCAT_DATA_REL_LOCAL:
15756 + case SECCAT_DATA_REL_RO:
15757 + case SECCAT_DATA_REL_RO_LOCAL:
15758 + case SECCAT_RODATA:
15759 + case SECCAT_SRODATA:
15760 + case SECCAT_RODATA_MERGE_STR:
15761 + case SECCAT_RODATA_MERGE_STR_INIT:
15762 + case SECCAT_RODATA_MERGE_CONST:
15763 + sname = ".cvmx_shared.linkonce.";
15764 + break;
15765 + case SECCAT_TEXT:
15766 + case SECCAT_TDATA:
15767 + case SECCAT_TBSS:
15768 + break;
15769 + }
15770 + if (sname)
15771 + {
15772 + const char *name;
15773 + size_t plen, nlen;
15774 + char *string;
15775 + plen = strlen (sname);
15776 +
15777 + name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
15778 + name = targetm.strip_name_encoding (name);
15779 + nlen = strlen (name);
15780 +
15781 + string = alloca (plen + nlen + 1);
15782 + memcpy (string, sname, plen);
15783 + memcpy (string + plen, name, nlen + 1);
15784 + DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
15785 + return;
15786 + }
15787 + }
15788 + default_unique_section (decl, reloc);
15789 +}
15790 +\f
15791 +/* Emit an uninitialized cvmx_shared variable. */
15792 +void
15793 +octeon_output_shared_variable (FILE *stream, tree decl, const char *name,
15794 + unsigned HOST_WIDE_INT size, int align)
15795 +{
15796 + switch_to_section (get_section (".cvmx_shared_bss", CVMX_SHARED_BSS_FLAGS,
15797 + NULL_TREE));
15798 + ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
15799 + ASM_DECLARE_OBJECT_NAME (stream, name, decl);
15800 + ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
15801 +}
15802 +#endif
15803 \f
15804 /* Initialize the GCC target structure. */
15805 #undef TARGET_ASM_ALIGNED_HI_OP
15806 --- a/gcc/config/mips/mips-dsp.md
15807 +++ b/gcc/config/mips/mips-dsp.md
15808 @@ -42,9 +42,9 @@
15809 (match_operand:DSPV 2 "register_operand" "d")))
15810 (set (reg:CCDSP CCDSP_OU_REGNUM)
15811 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ))])]
15812 - ""
15813 + "ISA_HAS_DSP"
15814 "add<DSPV:dspfmt1>.<DSPV:dspfmt2>\t%0,%1,%2"
15815 - [(set_attr "type" "arith")
15816 + [(set_attr "type" "dspalu")
15817 (set_attr "mode" "SI")])
15818
15819 (define_insn "mips_add<DSP:dspfmt1>_s_<DSP:dspfmt2>"
15820 @@ -55,9 +55,9 @@
15821 UNSPEC_ADDQ_S))
15822 (set (reg:CCDSP CCDSP_OU_REGNUM)
15823 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ_S))])]
15824 - ""
15825 + "ISA_HAS_DSP"
15826 "add<DSP:dspfmt1>_s.<DSP:dspfmt2>\t%0,%1,%2"
15827 - [(set_attr "type" "arith")
15828 + [(set_attr "type" "dspalusat")
15829 (set_attr "mode" "SI")])
15830
15831 ;; SUBQ*
15832 @@ -70,7 +70,7 @@
15833 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ))])]
15834 "ISA_HAS_DSP"
15835 "sub<DSPV:dspfmt1>.<DSPV:dspfmt2>\t%0,%1,%2"
15836 - [(set_attr "type" "arith")
15837 + [(set_attr "type" "dspalu")
15838 (set_attr "mode" "SI")])
15839
15840 (define_insn "mips_sub<DSP:dspfmt1>_s_<DSP:dspfmt2>"
15841 @@ -83,7 +83,7 @@
15842 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ_S))])]
15843 "ISA_HAS_DSP"
15844 "sub<DSP:dspfmt1>_s.<DSP:dspfmt2>\t%0,%1,%2"
15845 - [(set_attr "type" "arith")
15846 + [(set_attr "type" "dspalusat")
15847 (set_attr "mode" "SI")])
15848
15849 ;; ADDSC
15850 @@ -97,7 +97,7 @@
15851 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDSC))])]
15852 "ISA_HAS_DSP"
15853 "addsc\t%0,%1,%2"
15854 - [(set_attr "type" "arith")
15855 + [(set_attr "type" "dspalu")
15856 (set_attr "mode" "SI")])
15857
15858 ;; ADDWC
15859 @@ -112,7 +112,7 @@
15860 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDWC))])]
15861 "ISA_HAS_DSP"
15862 "addwc\t%0,%1,%2"
15863 - [(set_attr "type" "arith")
15864 + [(set_attr "type" "dspalu")
15865 (set_attr "mode" "SI")])
15866
15867 ;; MODSUB
15868 @@ -123,7 +123,7 @@
15869 UNSPEC_MODSUB))]
15870 "ISA_HAS_DSP"
15871 "modsub\t%0,%1,%2"
15872 - [(set_attr "type" "arith")
15873 + [(set_attr "type" "dspalu")
15874 (set_attr "mode" "SI")])
15875
15876 ;; RADDU*
15877 @@ -133,7 +133,7 @@
15878 UNSPEC_RADDU_W_QB))]
15879 "ISA_HAS_DSP"
15880 "raddu.w.qb\t%0,%1"
15881 - [(set_attr "type" "arith")
15882 + [(set_attr "type" "dspalu")
15883 (set_attr "mode" "SI")])
15884
15885 ;; ABSQ*
15886 @@ -146,7 +146,7 @@
15887 (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S))])]
15888 "ISA_HAS_DSP"
15889 "absq_s.<DSPQ:dspfmt2>\t%0,%1"
15890 - [(set_attr "type" "arith")
15891 + [(set_attr "type" "dspalusat")
15892 (set_attr "mode" "SI")])
15893
15894 ;; PRECRQ*
15895 @@ -157,7 +157,7 @@
15896 UNSPEC_PRECRQ_QB_PH))]
15897 "ISA_HAS_DSP"
15898 "precrq.qb.ph\t%0,%1,%2"
15899 - [(set_attr "type" "arith")
15900 + [(set_attr "type" "dspalu")
15901 (set_attr "mode" "SI")])
15902
15903 (define_insn "mips_precrq_ph_w"
15904 @@ -167,7 +167,7 @@
15905 UNSPEC_PRECRQ_PH_W))]
15906 "ISA_HAS_DSP"
15907 "precrq.ph.w\t%0,%1,%2"
15908 - [(set_attr "type" "arith")
15909 + [(set_attr "type" "dspalu")
15910 (set_attr "mode" "SI")])
15911
15912 (define_insn "mips_precrq_rs_ph_w"
15913 @@ -181,7 +181,7 @@
15914 UNSPEC_PRECRQ_RS_PH_W))])]
15915 "ISA_HAS_DSP"
15916 "precrq_rs.ph.w\t%0,%1,%2"
15917 - [(set_attr "type" "arith")
15918 + [(set_attr "type" "dspalu")
15919 (set_attr "mode" "SI")])
15920
15921 ;; PRECRQU*
15922 @@ -196,7 +196,7 @@
15923 UNSPEC_PRECRQU_S_QB_PH))])]
15924 "ISA_HAS_DSP"
15925 "precrqu_s.qb.ph\t%0,%1,%2"
15926 - [(set_attr "type" "arith")
15927 + [(set_attr "type" "dspalusat")
15928 (set_attr "mode" "SI")])
15929
15930 ;; PRECEQ*
15931 @@ -206,7 +206,7 @@
15932 UNSPEC_PRECEQ_W_PHL))]
15933 "ISA_HAS_DSP"
15934 "preceq.w.phl\t%0,%1"
15935 - [(set_attr "type" "arith")
15936 + [(set_attr "type" "dspalu")
15937 (set_attr "mode" "SI")])
15938
15939 (define_insn "mips_preceq_w_phr"
15940 @@ -215,7 +215,7 @@
15941 UNSPEC_PRECEQ_W_PHR))]
15942 "ISA_HAS_DSP"
15943 "preceq.w.phr\t%0,%1"
15944 - [(set_attr "type" "arith")
15945 + [(set_attr "type" "dspalu")
15946 (set_attr "mode" "SI")])
15947
15948 ;; PRECEQU*
15949 @@ -225,7 +225,7 @@
15950 UNSPEC_PRECEQU_PH_QBL))]
15951 "ISA_HAS_DSP"
15952 "precequ.ph.qbl\t%0,%1"
15953 - [(set_attr "type" "arith")
15954 + [(set_attr "type" "dspalu")
15955 (set_attr "mode" "SI")])
15956
15957 (define_insn "mips_precequ_ph_qbr"
15958 @@ -234,7 +234,7 @@
15959 UNSPEC_PRECEQU_PH_QBR))]
15960 "ISA_HAS_DSP"
15961 "precequ.ph.qbr\t%0,%1"
15962 - [(set_attr "type" "arith")
15963 + [(set_attr "type" "dspalu")
15964 (set_attr "mode" "SI")])
15965
15966 (define_insn "mips_precequ_ph_qbla"
15967 @@ -243,7 +243,7 @@
15968 UNSPEC_PRECEQU_PH_QBLA))]
15969 "ISA_HAS_DSP"
15970 "precequ.ph.qbla\t%0,%1"
15971 - [(set_attr "type" "arith")
15972 + [(set_attr "type" "dspalu")
15973 (set_attr "mode" "SI")])
15974
15975 (define_insn "mips_precequ_ph_qbra"
15976 @@ -252,7 +252,7 @@
15977 UNSPEC_PRECEQU_PH_QBRA))]
15978 "ISA_HAS_DSP"
15979 "precequ.ph.qbra\t%0,%1"
15980 - [(set_attr "type" "arith")
15981 + [(set_attr "type" "dspalu")
15982 (set_attr "mode" "SI")])
15983
15984 ;; PRECEU*
15985 @@ -262,7 +262,7 @@
15986 UNSPEC_PRECEU_PH_QBL))]
15987 "ISA_HAS_DSP"
15988 "preceu.ph.qbl\t%0,%1"
15989 - [(set_attr "type" "arith")
15990 + [(set_attr "type" "dspalu")
15991 (set_attr "mode" "SI")])
15992
15993 (define_insn "mips_preceu_ph_qbr"
15994 @@ -271,7 +271,7 @@
15995 UNSPEC_PRECEU_PH_QBR))]
15996 "ISA_HAS_DSP"
15997 "preceu.ph.qbr\t%0,%1"
15998 - [(set_attr "type" "arith")
15999 + [(set_attr "type" "dspalu")
16000 (set_attr "mode" "SI")])
16001
16002 (define_insn "mips_preceu_ph_qbla"
16003 @@ -280,7 +280,7 @@
16004 UNSPEC_PRECEU_PH_QBLA))]
16005 "ISA_HAS_DSP"
16006 "preceu.ph.qbla\t%0,%1"
16007 - [(set_attr "type" "arith")
16008 + [(set_attr "type" "dspalu")
16009 (set_attr "mode" "SI")])
16010
16011 (define_insn "mips_preceu_ph_qbra"
16012 @@ -289,7 +289,7 @@
16013 UNSPEC_PRECEU_PH_QBRA))]
16014 "ISA_HAS_DSP"
16015 "preceu.ph.qbra\t%0,%1"
16016 - [(set_attr "type" "arith")
16017 + [(set_attr "type" "dspalu")
16018 (set_attr "mode" "SI")])
16019
16020 ;; Table 2-2. MIPS DSP ASE Instructions: Shift
16021 @@ -313,7 +313,7 @@
16022 }
16023 return "shllv.<DSPV:dspfmt2>\t%0,%1,%2";
16024 }
16025 - [(set_attr "type" "shift")
16026 + [(set_attr "type" "dspalu")
16027 (set_attr "mode" "SI")])
16028
16029 (define_insn "mips_shll_s_<DSPQ:dspfmt2>"
16030 @@ -335,7 +335,7 @@
16031 }
16032 return "shllv_s.<DSPQ:dspfmt2>\t%0,%1,%2";
16033 }
16034 - [(set_attr "type" "shift")
16035 + [(set_attr "type" "dspalusat")
16036 (set_attr "mode" "SI")])
16037
16038 ;; SHRL*
16039 @@ -354,7 +354,7 @@
16040 }
16041 return "shrlv.qb\t%0,%1,%2";
16042 }
16043 - [(set_attr "type" "shift")
16044 + [(set_attr "type" "dspalu")
16045 (set_attr "mode" "SI")])
16046
16047 ;; SHRA*
16048 @@ -373,7 +373,7 @@
16049 }
16050 return "shrav.ph\t%0,%1,%2";
16051 }
16052 - [(set_attr "type" "shift")
16053 + [(set_attr "type" "dspalu")
16054 (set_attr "mode" "SI")])
16055
16056 (define_insn "mips_shra_r_<DSPQ:dspfmt2>"
16057 @@ -392,7 +392,7 @@
16058 }
16059 return "shrav_r.<DSPQ:dspfmt2>\t%0,%1,%2";
16060 }
16061 - [(set_attr "type" "shift")
16062 + [(set_attr "type" "dspalu")
16063 (set_attr "mode" "SI")])
16064
16065 ;; Table 2-3. MIPS DSP ASE Instructions: Multiply
16066 @@ -478,7 +478,7 @@
16067 UNSPEC_DPAU_H_QBL))]
16068 "ISA_HAS_DSP && !TARGET_64BIT"
16069 "dpau.h.qbl\t%q0,%2,%3"
16070 - [(set_attr "type" "imadd")
16071 + [(set_attr "type" "dspmac")
16072 (set_attr "mode" "SI")])
16073
16074 (define_insn "mips_dpau_h_qbr"
16075 @@ -489,7 +489,7 @@
16076 UNSPEC_DPAU_H_QBR))]
16077 "ISA_HAS_DSP && !TARGET_64BIT"
16078 "dpau.h.qbr\t%q0,%2,%3"
16079 - [(set_attr "type" "imadd")
16080 + [(set_attr "type" "dspmac")
16081 (set_attr "mode" "SI")])
16082
16083 ;; DPSU*
16084 @@ -501,7 +501,7 @@
16085 UNSPEC_DPSU_H_QBL))]
16086 "ISA_HAS_DSP && !TARGET_64BIT"
16087 "dpsu.h.qbl\t%q0,%2,%3"
16088 - [(set_attr "type" "imadd")
16089 + [(set_attr "type" "dspmac")
16090 (set_attr "mode" "SI")])
16091
16092 (define_insn "mips_dpsu_h_qbr"
16093 @@ -512,7 +512,7 @@
16094 UNSPEC_DPSU_H_QBR))]
16095 "ISA_HAS_DSP && !TARGET_64BIT"
16096 "dpsu.h.qbr\t%q0,%2,%3"
16097 - [(set_attr "type" "imadd")
16098 + [(set_attr "type" "dspmac")
16099 (set_attr "mode" "SI")])
16100
16101 ;; DPAQ*
16102 @@ -528,7 +528,7 @@
16103 UNSPEC_DPAQ_S_W_PH))])]
16104 "ISA_HAS_DSP && !TARGET_64BIT"
16105 "dpaq_s.w.ph\t%q0,%2,%3"
16106 - [(set_attr "type" "imadd")
16107 + [(set_attr "type" "dspmac")
16108 (set_attr "mode" "SI")])
16109
16110 ;; DPSQ*
16111 @@ -544,7 +544,7 @@
16112 UNSPEC_DPSQ_S_W_PH))])]
16113 "ISA_HAS_DSP && !TARGET_64BIT"
16114 "dpsq_s.w.ph\t%q0,%2,%3"
16115 - [(set_attr "type" "imadd")
16116 + [(set_attr "type" "dspmac")
16117 (set_attr "mode" "SI")])
16118
16119 ;; MULSAQ*
16120 @@ -560,7 +560,7 @@
16121 UNSPEC_MULSAQ_S_W_PH))])]
16122 "ISA_HAS_DSP && !TARGET_64BIT"
16123 "mulsaq_s.w.ph\t%q0,%2,%3"
16124 - [(set_attr "type" "imadd")
16125 + [(set_attr "type" "dspmac")
16126 (set_attr "mode" "SI")])
16127
16128 ;; DPAQ*
16129 @@ -576,7 +576,7 @@
16130 UNSPEC_DPAQ_SA_L_W))])]
16131 "ISA_HAS_DSP && !TARGET_64BIT"
16132 "dpaq_sa.l.w\t%q0,%2,%3"
16133 - [(set_attr "type" "imadd")
16134 + [(set_attr "type" "dspmacsat")
16135 (set_attr "mode" "SI")])
16136
16137 ;; DPSQ*
16138 @@ -592,7 +592,7 @@
16139 UNSPEC_DPSQ_SA_L_W))])]
16140 "ISA_HAS_DSP && !TARGET_64BIT"
16141 "dpsq_sa.l.w\t%q0,%2,%3"
16142 - [(set_attr "type" "imadd")
16143 + [(set_attr "type" "dspmacsat")
16144 (set_attr "mode" "SI")])
16145
16146 ;; MAQ*
16147 @@ -608,7 +608,7 @@
16148 UNSPEC_MAQ_S_W_PHL))])]
16149 "ISA_HAS_DSP && !TARGET_64BIT"
16150 "maq_s.w.phl\t%q0,%2,%3"
16151 - [(set_attr "type" "imadd")
16152 + [(set_attr "type" "dspmac")
16153 (set_attr "mode" "SI")])
16154
16155 (define_insn "mips_maq_s_w_phr"
16156 @@ -623,7 +623,7 @@
16157 UNSPEC_MAQ_S_W_PHR))])]
16158 "ISA_HAS_DSP && !TARGET_64BIT"
16159 "maq_s.w.phr\t%q0,%2,%3"
16160 - [(set_attr "type" "imadd")
16161 + [(set_attr "type" "dspmac")
16162 (set_attr "mode" "SI")])
16163
16164 ;; MAQ_SA*
16165 @@ -639,7 +639,7 @@
16166 UNSPEC_MAQ_SA_W_PHL))])]
16167 "ISA_HAS_DSP && !TARGET_64BIT"
16168 "maq_sa.w.phl\t%q0,%2,%3"
16169 - [(set_attr "type" "imadd")
16170 + [(set_attr "type" "dspmacsat")
16171 (set_attr "mode" "SI")])
16172
16173 (define_insn "mips_maq_sa_w_phr"
16174 @@ -654,7 +654,7 @@
16175 UNSPEC_MAQ_SA_W_PHR))])]
16176 "ISA_HAS_DSP && !TARGET_64BIT"
16177 "maq_sa.w.phr\t%q0,%2,%3"
16178 - [(set_attr "type" "imadd")
16179 + [(set_attr "type" "dspmacsat")
16180 (set_attr "mode" "SI")])
16181
16182 ;; Table 2-4. MIPS DSP ASE Instructions: General Bit/Manipulation
16183 @@ -665,7 +665,7 @@
16184 UNSPEC_BITREV))]
16185 "ISA_HAS_DSP"
16186 "bitrev\t%0,%1"
16187 - [(set_attr "type" "arith")
16188 + [(set_attr "type" "dspalu")
16189 (set_attr "mode" "SI")])
16190
16191 ;; INSV
16192 @@ -678,7 +678,7 @@
16193 UNSPEC_INSV))]
16194 "ISA_HAS_DSP"
16195 "insv\t%0,%2"
16196 - [(set_attr "type" "arith")
16197 + [(set_attr "type" "dspalu")
16198 (set_attr "mode" "SI")])
16199
16200 ;; REPL*
16201 @@ -696,7 +696,7 @@
16202 }
16203 return "replv.qb\t%0,%1";
16204 }
16205 - [(set_attr "type" "arith")
16206 + [(set_attr "type" "dspalu")
16207 (set_attr "mode" "SI")])
16208
16209 (define_insn "mips_repl_ph"
16210 @@ -707,7 +707,7 @@
16211 "@
16212 repl.ph\t%0,%1
16213 replv.ph\t%0,%1"
16214 - [(set_attr "type" "arith")
16215 + [(set_attr "type" "dspalu")
16216 (set_attr "mode" "SI")])
16217
16218 ;; Table 2-5. MIPS DSP ASE Instructions: Compare-Pick
16219 @@ -720,7 +720,7 @@
16220 UNSPEC_CMP_EQ))]
16221 "ISA_HAS_DSP"
16222 "cmp<DSPV:dspfmt1_1>.eq.<DSPV:dspfmt2>\t%0,%1"
16223 - [(set_attr "type" "arith")
16224 + [(set_attr "type" "dspalu")
16225 (set_attr "mode" "SI")])
16226
16227 (define_insn "mips_cmp<DSPV:dspfmt1_1>_lt_<DSPV:dspfmt2>"
16228 @@ -731,7 +731,7 @@
16229 UNSPEC_CMP_LT))]
16230 "ISA_HAS_DSP"
16231 "cmp<DSPV:dspfmt1_1>.lt.<DSPV:dspfmt2>\t%0,%1"
16232 - [(set_attr "type" "arith")
16233 + [(set_attr "type" "dspalu")
16234 (set_attr "mode" "SI")])
16235
16236 (define_insn "mips_cmp<DSPV:dspfmt1_1>_le_<DSPV:dspfmt2>"
16237 @@ -742,7 +742,7 @@
16238 UNSPEC_CMP_LE))]
16239 "ISA_HAS_DSP"
16240 "cmp<DSPV:dspfmt1_1>.le.<DSPV:dspfmt2>\t%0,%1"
16241 - [(set_attr "type" "arith")
16242 + [(set_attr "type" "dspalu")
16243 (set_attr "mode" "SI")])
16244
16245 (define_insn "mips_cmpgu_eq_qb"
16246 @@ -752,7 +752,7 @@
16247 UNSPEC_CMPGU_EQ_QB))]
16248 "ISA_HAS_DSP"
16249 "cmpgu.eq.qb\t%0,%1,%2"
16250 - [(set_attr "type" "arith")
16251 + [(set_attr "type" "dspalu")
16252 (set_attr "mode" "SI")])
16253
16254 (define_insn "mips_cmpgu_lt_qb"
16255 @@ -762,7 +762,7 @@
16256 UNSPEC_CMPGU_LT_QB))]
16257 "ISA_HAS_DSP"
16258 "cmpgu.lt.qb\t%0,%1,%2"
16259 - [(set_attr "type" "arith")
16260 + [(set_attr "type" "dspalu")
16261 (set_attr "mode" "SI")])
16262
16263 (define_insn "mips_cmpgu_le_qb"
16264 @@ -772,7 +772,7 @@
16265 UNSPEC_CMPGU_LE_QB))]
16266 "ISA_HAS_DSP"
16267 "cmpgu.le.qb\t%0,%1,%2"
16268 - [(set_attr "type" "arith")
16269 + [(set_attr "type" "dspalu")
16270 (set_attr "mode" "SI")])
16271
16272 ;; PICK*
16273 @@ -784,7 +784,7 @@
16274 UNSPEC_PICK))]
16275 "ISA_HAS_DSP"
16276 "pick.<DSPV:dspfmt2>\t%0,%1,%2"
16277 - [(set_attr "type" "arith")
16278 + [(set_attr "type" "dspalu")
16279 (set_attr "mode" "SI")])
16280
16281 ;; PACKRL*
16282 @@ -795,7 +795,7 @@
16283 UNSPEC_PACKRL_PH))]
16284 "ISA_HAS_DSP"
16285 "packrl.ph\t%0,%1,%2"
16286 - [(set_attr "type" "arith")
16287 + [(set_attr "type" "dspalu")
16288 (set_attr "mode" "SI")])
16289
16290 ;; Table 2-6. MIPS DSP ASE Instructions: Accumulator and DSPControl Access
16291 @@ -818,7 +818,7 @@
16292 }
16293 return "extrv.w\t%0,%q1,%2";
16294 }
16295 - [(set_attr "type" "mfhilo")
16296 + [(set_attr "type" "accext")
16297 (set_attr "mode" "SI")])
16298
16299 (define_insn "mips_extr_r_w"
16300 @@ -839,7 +839,7 @@
16301 }
16302 return "extrv_r.w\t%0,%q1,%2";
16303 }
16304 - [(set_attr "type" "mfhilo")
16305 + [(set_attr "type" "accext")
16306 (set_attr "mode" "SI")])
16307
16308 (define_insn "mips_extr_rs_w"
16309 @@ -860,7 +860,7 @@
16310 }
16311 return "extrv_rs.w\t%0,%q1,%2";
16312 }
16313 - [(set_attr "type" "mfhilo")
16314 + [(set_attr "type" "accext")
16315 (set_attr "mode" "SI")])
16316
16317 ;; EXTR*_S.H
16318 @@ -882,7 +882,7 @@
16319 }
16320 return "extrv_s.h\t%0,%q1,%2";
16321 }
16322 - [(set_attr "type" "mfhilo")
16323 + [(set_attr "type" "accext")
16324 (set_attr "mode" "SI")])
16325
16326 ;; EXTP*
16327 @@ -905,7 +905,7 @@
16328 }
16329 return "extpv\t%0,%q1,%2";
16330 }
16331 - [(set_attr "type" "mfhilo")
16332 + [(set_attr "type" "accext")
16333 (set_attr "mode" "SI")])
16334
16335 (define_insn "mips_extpdp"
16336 @@ -930,7 +930,7 @@
16337 }
16338 return "extpdpv\t%0,%q1,%2";
16339 }
16340 - [(set_attr "type" "mfhilo")
16341 + [(set_attr "type" "accext")
16342 (set_attr "mode" "SI")])
16343
16344 ;; SHILO*
16345 @@ -949,7 +949,7 @@
16346 }
16347 return "shilov\t%q0,%2";
16348 }
16349 - [(set_attr "type" "mfhilo")
16350 + [(set_attr "type" "accmod")
16351 (set_attr "mode" "SI")])
16352
16353 ;; MTHLIP*
16354 @@ -965,7 +965,7 @@
16355 (reg:CCDSP CCDSP_PO_REGNUM)] UNSPEC_MTHLIP))])]
16356 "ISA_HAS_DSP && !TARGET_64BIT"
16357 "mthlip\t%2,%q0"
16358 - [(set_attr "type" "mfhilo")
16359 + [(set_attr "type" "accmod")
16360 (set_attr "mode" "SI")])
16361
16362 ;; WRDSP
16363 @@ -987,7 +987,7 @@
16364 (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))])]
16365 "ISA_HAS_DSP"
16366 "wrdsp\t%0,%1"
16367 - [(set_attr "type" "arith")
16368 + [(set_attr "type" "dspalu")
16369 (set_attr "mode" "SI")])
16370
16371 ;; RDDSP
16372 @@ -1003,7 +1003,7 @@
16373 UNSPEC_RDDSP))]
16374 "ISA_HAS_DSP"
16375 "rddsp\t%0,%1"
16376 - [(set_attr "type" "arith")
16377 + [(set_attr "type" "dspalu")
16378 (set_attr "mode" "SI")])
16379
16380 ;; Table 2-7. MIPS DSP ASE Instructions: Indexed-Load
16381 --- a/gcc/config/mips/mips-dspr2.md
16382 +++ b/gcc/config/mips/mips-dspr2.md
16383 @@ -9,7 +9,7 @@
16384 (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S_QB))])]
16385 "ISA_HAS_DSPR2"
16386 "absq_s.qb\t%0,%z1"
16387 - [(set_attr "type" "arith")
16388 + [(set_attr "type" "dspalusat")
16389 (set_attr "mode" "SI")])
16390
16391 (define_insn "mips_addu_ph"
16392 @@ -21,7 +21,7 @@
16393 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_PH))])]
16394 "ISA_HAS_DSPR2"
16395 "addu.ph\t%0,%z1,%z2"
16396 - [(set_attr "type" "arith")
16397 + [(set_attr "type" "dspalu")
16398 (set_attr "mode" "SI")])
16399
16400 (define_insn "mips_addu_s_ph"
16401 @@ -34,7 +34,7 @@
16402 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_S_PH))])]
16403 "ISA_HAS_DSPR2"
16404 "addu_s.ph\t%0,%z1,%z2"
16405 - [(set_attr "type" "arith")
16406 + [(set_attr "type" "dspalusat")
16407 (set_attr "mode" "SI")])
16408
16409 (define_insn "mips_adduh_qb"
16410 @@ -44,7 +44,7 @@
16411 UNSPEC_ADDUH_QB))]
16412 "ISA_HAS_DSPR2"
16413 "adduh.qb\t%0,%z1,%z2"
16414 - [(set_attr "type" "arith")
16415 + [(set_attr "type" "dspalu")
16416 (set_attr "mode" "SI")])
16417
16418 (define_insn "mips_adduh_r_qb"
16419 @@ -54,7 +54,7 @@
16420 UNSPEC_ADDUH_R_QB))]
16421 "ISA_HAS_DSPR2"
16422 "adduh_r.qb\t%0,%z1,%z2"
16423 - [(set_attr "type" "arith")
16424 + [(set_attr "type" "dspalusat")
16425 (set_attr "mode" "SI")])
16426
16427 (define_insn "mips_append"
16428 @@ -69,7 +69,7 @@
16429 operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
16430 return "append\t%0,%z2,%3";
16431 }
16432 - [(set_attr "type" "arith")
16433 + [(set_attr "type" "dspalu")
16434 (set_attr "mode" "SI")])
16435
16436 (define_insn "mips_balign"
16437 @@ -84,7 +84,7 @@
16438 operands[2] = GEN_INT (INTVAL (operands[2]) & 3);
16439 return "balign\t%0,%z2,%3";
16440 }
16441 - [(set_attr "type" "arith")
16442 + [(set_attr "type" "dspalu")
16443 (set_attr "mode" "SI")])
16444
16445 (define_insn "mips_cmpgdu_eq_qb"
16446 @@ -99,7 +99,7 @@
16447 UNSPEC_CMPGDU_EQ_QB))])]
16448 "ISA_HAS_DSPR2"
16449 "cmpgdu.eq.qb\t%0,%z1,%z2"
16450 - [(set_attr "type" "arith")
16451 + [(set_attr "type" "dspalu")
16452 (set_attr "mode" "SI")])
16453
16454 (define_insn "mips_cmpgdu_lt_qb"
16455 @@ -114,7 +114,7 @@
16456 UNSPEC_CMPGDU_LT_QB))])]
16457 "ISA_HAS_DSPR2"
16458 "cmpgdu.lt.qb\t%0,%z1,%z2"
16459 - [(set_attr "type" "arith")
16460 + [(set_attr "type" "dspalu")
16461 (set_attr "mode" "SI")])
16462
16463 (define_insn "mips_cmpgdu_le_qb"
16464 @@ -129,7 +129,7 @@
16465 UNSPEC_CMPGDU_LE_QB))])]
16466 "ISA_HAS_DSPR2"
16467 "cmpgdu.le.qb\t%0,%z1,%z2"
16468 - [(set_attr "type" "arith")
16469 + [(set_attr "type" "dspalu")
16470 (set_attr "mode" "SI")])
16471
16472 (define_insn "mips_dpa_w_ph"
16473 @@ -140,7 +140,7 @@
16474 UNSPEC_DPA_W_PH))]
16475 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16476 "dpa.w.ph\t%q0,%z2,%z3"
16477 - [(set_attr "type" "imadd")
16478 + [(set_attr "type" "dspmac")
16479 (set_attr "mode" "SI")])
16480
16481 (define_insn "mips_dps_w_ph"
16482 @@ -151,7 +151,7 @@
16483 UNSPEC_DPS_W_PH))]
16484 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16485 "dps.w.ph\t%q0,%z2,%z3"
16486 - [(set_attr "type" "imadd")
16487 + [(set_attr "type" "dspmac")
16488 (set_attr "mode" "SI")])
16489
16490 (define_expand "mips_madd<u>"
16491 @@ -247,7 +247,7 @@
16492 UNSPEC_MULSA_W_PH))]
16493 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16494 "mulsa.w.ph\t%q0,%z2,%z3"
16495 - [(set_attr "type" "imadd")
16496 + [(set_attr "type" "dspmac")
16497 (set_attr "mode" "SI")])
16498
16499 (define_insn "mips_mult"
16500 @@ -277,7 +277,7 @@
16501 UNSPEC_PRECR_QB_PH))]
16502 "ISA_HAS_DSPR2"
16503 "precr.qb.ph\t%0,%z1,%z2"
16504 - [(set_attr "type" "arith")
16505 + [(set_attr "type" "dspalu")
16506 (set_attr "mode" "SI")])
16507
16508 (define_insn "mips_precr_sra_ph_w"
16509 @@ -292,7 +292,7 @@
16510 operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
16511 return "precr_sra.ph.w\t%0,%z2,%3";
16512 }
16513 - [(set_attr "type" "arith")
16514 + [(set_attr "type" "dspalu")
16515 (set_attr "mode" "SI")])
16516
16517 (define_insn "mips_precr_sra_r_ph_w"
16518 @@ -307,7 +307,7 @@
16519 operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
16520 return "precr_sra_r.ph.w\t%0,%z2,%3";
16521 }
16522 - [(set_attr "type" "arith")
16523 + [(set_attr "type" "dspalu")
16524 (set_attr "mode" "SI")])
16525
16526 (define_insn "mips_prepend"
16527 @@ -322,7 +322,7 @@
16528 operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
16529 return "prepend\t%0,%z2,%3";
16530 }
16531 - [(set_attr "type" "arith")
16532 + [(set_attr "type" "dspalu")
16533 (set_attr "mode" "SI")])
16534
16535 (define_insn "mips_shra_qb"
16536 @@ -340,7 +340,7 @@
16537 }
16538 return "shrav.qb\t%0,%z1,%2";
16539 }
16540 - [(set_attr "type" "shift")
16541 + [(set_attr "type" "dspalu")
16542 (set_attr "mode" "SI")])
16543
16544
16545 @@ -359,7 +359,7 @@
16546 }
16547 return "shrav_r.qb\t%0,%z1,%2";
16548 }
16549 - [(set_attr "type" "shift")
16550 + [(set_attr "type" "dspalu")
16551 (set_attr "mode" "SI")])
16552
16553 (define_insn "mips_shrl_ph"
16554 @@ -377,7 +377,7 @@
16555 }
16556 return "shrlv.ph\t%0,%z1,%2";
16557 }
16558 - [(set_attr "type" "shift")
16559 + [(set_attr "type" "dspalu")
16560 (set_attr "mode" "SI")])
16561
16562 (define_insn "mips_subu_ph"
16563 @@ -390,7 +390,7 @@
16564 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_PH))])]
16565 "ISA_HAS_DSPR2"
16566 "subu.ph\t%0,%z1,%z2"
16567 - [(set_attr "type" "arith")
16568 + [(set_attr "type" "dspalu")
16569 (set_attr "mode" "SI")])
16570
16571 (define_insn "mips_subu_s_ph"
16572 @@ -403,7 +403,7 @@
16573 (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_S_PH))])]
16574 "ISA_HAS_DSPR2"
16575 "subu_s.ph\t%0,%z1,%z2"
16576 - [(set_attr "type" "arith")
16577 + [(set_attr "type" "dspalusat")
16578 (set_attr "mode" "SI")])
16579
16580 (define_insn "mips_subuh_qb"
16581 @@ -413,7 +413,7 @@
16582 UNSPEC_SUBUH_QB))]
16583 "ISA_HAS_DSPR2"
16584 "subuh.qb\t%0,%z1,%z2"
16585 - [(set_attr "type" "arith")
16586 + [(set_attr "type" "dspalu")
16587 (set_attr "mode" "SI")])
16588
16589 (define_insn "mips_subuh_r_qb"
16590 @@ -423,7 +423,7 @@
16591 UNSPEC_SUBUH_R_QB))]
16592 "ISA_HAS_DSPR2"
16593 "subuh_r.qb\t%0,%z1,%z2"
16594 - [(set_attr "type" "arith")
16595 + [(set_attr "type" "dspalu")
16596 (set_attr "mode" "SI")])
16597
16598 (define_insn "mips_addqh_ph"
16599 @@ -433,7 +433,7 @@
16600 UNSPEC_ADDQH_PH))]
16601 "ISA_HAS_DSPR2"
16602 "addqh.ph\t%0,%z1,%z2"
16603 - [(set_attr "type" "arith")
16604 + [(set_attr "type" "dspalu")
16605 (set_attr "mode" "SI")])
16606
16607 (define_insn "mips_addqh_r_ph"
16608 @@ -443,7 +443,7 @@
16609 UNSPEC_ADDQH_R_PH))]
16610 "ISA_HAS_DSPR2"
16611 "addqh_r.ph\t%0,%z1,%z2"
16612 - [(set_attr "type" "arith")
16613 + [(set_attr "type" "dspalu")
16614 (set_attr "mode" "SI")])
16615
16616 (define_insn "mips_addqh_w"
16617 @@ -453,7 +453,7 @@
16618 UNSPEC_ADDQH_W))]
16619 "ISA_HAS_DSPR2"
16620 "addqh.w\t%0,%z1,%z2"
16621 - [(set_attr "type" "arith")
16622 + [(set_attr "type" "dspalu")
16623 (set_attr "mode" "SI")])
16624
16625 (define_insn "mips_addqh_r_w"
16626 @@ -463,7 +463,7 @@
16627 UNSPEC_ADDQH_R_W))]
16628 "ISA_HAS_DSPR2"
16629 "addqh_r.w\t%0,%z1,%z2"
16630 - [(set_attr "type" "arith")
16631 + [(set_attr "type" "dspalu")
16632 (set_attr "mode" "SI")])
16633
16634 (define_insn "mips_subqh_ph"
16635 @@ -473,7 +473,7 @@
16636 UNSPEC_SUBQH_PH))]
16637 "ISA_HAS_DSPR2"
16638 "subqh.ph\t%0,%z1,%z2"
16639 - [(set_attr "type" "arith")
16640 + [(set_attr "type" "dspalu")
16641 (set_attr "mode" "SI")])
16642
16643 (define_insn "mips_subqh_r_ph"
16644 @@ -483,7 +483,7 @@
16645 UNSPEC_SUBQH_R_PH))]
16646 "ISA_HAS_DSPR2"
16647 "subqh_r.ph\t%0,%z1,%z2"
16648 - [(set_attr "type" "arith")
16649 + [(set_attr "type" "dspalu")
16650 (set_attr "mode" "SI")])
16651
16652 (define_insn "mips_subqh_w"
16653 @@ -493,7 +493,7 @@
16654 UNSPEC_SUBQH_W))]
16655 "ISA_HAS_DSPR2"
16656 "subqh.w\t%0,%z1,%z2"
16657 - [(set_attr "type" "arith")
16658 + [(set_attr "type" "dspalu")
16659 (set_attr "mode" "SI")])
16660
16661 (define_insn "mips_subqh_r_w"
16662 @@ -503,7 +503,7 @@
16663 UNSPEC_SUBQH_R_W))]
16664 "ISA_HAS_DSPR2"
16665 "subqh_r.w\t%0,%z1,%z2"
16666 - [(set_attr "type" "arith")
16667 + [(set_attr "type" "dspalu")
16668 (set_attr "mode" "SI")])
16669
16670 (define_insn "mips_dpax_w_ph"
16671 @@ -514,7 +514,7 @@
16672 UNSPEC_DPAX_W_PH))]
16673 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16674 "dpax.w.ph\t%q0,%z2,%z3"
16675 - [(set_attr "type" "imadd")
16676 + [(set_attr "type" "dspmac")
16677 (set_attr "mode" "SI")])
16678
16679 (define_insn "mips_dpsx_w_ph"
16680 @@ -525,7 +525,7 @@
16681 UNSPEC_DPSX_W_PH))]
16682 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16683 "dpsx.w.ph\t%q0,%z2,%z3"
16684 - [(set_attr "type" "imadd")
16685 + [(set_attr "type" "dspmac")
16686 (set_attr "mode" "SI")])
16687
16688 (define_insn "mips_dpaqx_s_w_ph"
16689 @@ -540,7 +540,7 @@
16690 UNSPEC_DPAQX_S_W_PH))])]
16691 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16692 "dpaqx_s.w.ph\t%q0,%z2,%z3"
16693 - [(set_attr "type" "imadd")
16694 + [(set_attr "type" "dspmac")
16695 (set_attr "mode" "SI")])
16696
16697 (define_insn "mips_dpaqx_sa_w_ph"
16698 @@ -555,7 +555,7 @@
16699 UNSPEC_DPAQX_SA_W_PH))])]
16700 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16701 "dpaqx_sa.w.ph\t%q0,%z2,%z3"
16702 - [(set_attr "type" "imadd")
16703 + [(set_attr "type" "dspmacsat")
16704 (set_attr "mode" "SI")])
16705
16706 (define_insn "mips_dpsqx_s_w_ph"
16707 @@ -570,7 +570,7 @@
16708 UNSPEC_DPSQX_S_W_PH))])]
16709 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16710 "dpsqx_s.w.ph\t%q0,%z2,%z3"
16711 - [(set_attr "type" "imadd")
16712 + [(set_attr "type" "dspmac")
16713 (set_attr "mode" "SI")])
16714
16715 (define_insn "mips_dpsqx_sa_w_ph"
16716 @@ -585,5 +585,43 @@
16717 UNSPEC_DPSQX_SA_W_PH))])]
16718 "ISA_HAS_DSPR2 && !TARGET_64BIT"
16719 "dpsqx_sa.w.ph\t%q0,%z2,%z3"
16720 - [(set_attr "type" "imadd")
16721 + [(set_attr "type" "dspmacsat")
16722 + (set_attr "mode" "SI")])
16723 +
16724 +;; Convert mtlo $ac[1-3],$0 => mult $ac[1-3],$0,$0
16725 +;; mthi $ac[1-3],$0
16726 +(define_peephole2
16727 + [(set (match_operand:SI 0 "register_operand" "")
16728 + (const_int 0))
16729 + (set (match_operand:SI 1 "register_operand" "")
16730 + (const_int 0))]
16731 + "ISA_HAS_DSPR2
16732 + && !TARGET_MIPS16
16733 + && !TARGET_64BIT
16734 + && (((true_regnum (operands[0]) == AC1LO_REGNUM
16735 + && true_regnum (operands[1]) == AC1HI_REGNUM)
16736 + || (true_regnum (operands[0]) == AC1HI_REGNUM
16737 + && true_regnum (operands[1]) == AC1LO_REGNUM))
16738 + || ((true_regnum (operands[0]) == AC2LO_REGNUM
16739 + && true_regnum (operands[1]) == AC2HI_REGNUM)
16740 + || (true_regnum (operands[0]) == AC2HI_REGNUM
16741 + && true_regnum (operands[1]) == AC2LO_REGNUM))
16742 + || ((true_regnum (operands[0]) == AC3LO_REGNUM
16743 + && true_regnum (operands[1]) == AC3HI_REGNUM)
16744 + || (true_regnum (operands[0]) == AC3HI_REGNUM
16745 + && true_regnum (operands[1]) == AC3LO_REGNUM)))"
16746 + [(parallel [(set (match_dup 0) (const_int 0))
16747 + (set (match_dup 1) (const_int 0))])]
16748 +)
16749 +
16750 +(define_insn "*mips_acc_init"
16751 + [(parallel [(set (match_operand:SI 0 "register_operand" "=a")
16752 + (const_int 0))
16753 + (set (match_operand:SI 1 "register_operand" "=a")
16754 + (const_int 0))])]
16755 + "ISA_HAS_DSPR2
16756 + && !TARGET_MIPS16
16757 + && !TARGET_64BIT"
16758 + "mult\t%q0,$0,$0\t\t# Clear ACC HI/LO"
16759 + [(set_attr "type" "imul")
16760 (set_attr "mode" "SI")])
16761 --- a/gcc/config/mips/mips.h
16762 +++ b/gcc/config/mips/mips.h
16763 @@ -342,6 +342,9 @@
16764 #define TARGET_IRIX 0
16765 #define TARGET_IRIX6 0
16766
16767 +/* SDE specific stuff. */
16768 +#define TARGET_MIPS_SDE 0
16769 +
16770 /* Define preprocessor macros for the -march and -mtune options.
16771 PREFIX is either _MIPS_ARCH or _MIPS_TUNE, INFO is the selected
16772 processor. If INFO's canonical name is "foo", define PREFIX to
16773 @@ -708,8 +711,9 @@
16774 |march=r10000|march=r12000|march=r14000|march=r16000:-mips4} \
16775 %{march=mips32|march=4kc|march=4km|march=4kp|march=4ksc:-mips32} \
16776 %{march=mips32r2|march=m4k|march=4ke*|march=4ksd|march=24k* \
16777 - |march=34k*|march=74k*: -mips32r2} \
16778 - %{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000: -mips64} \
16779 + |march=34k*|march=74k*|march=1004k*: -mips32r2} \
16780 + %{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000 \
16781 + |march=xlr: -mips64} \
16782 %{march=mips64r2|march=octeon: -mips64r2} \
16783 %{!march=*: -" MULTILIB_ISA_DEFAULT "}}"
16784
16785 @@ -720,7 +724,8 @@
16786 #define MIPS_ARCH_FLOAT_SPEC \
16787 "%{mhard-float|msoft-float|march=mips*:; \
16788 march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \
16789 - |march=34kc|march=74kc|march=5kc|march=octeon: -msoft-float; \
16790 + |march=34kc|march=74kc|march=1004kc|march=5kc \
16791 + |march=octeon|march=xlr: -msoft-float; \
16792 march=*: -mhard-float}"
16793
16794 /* A spec condition that matches 32-bit options. It only works if
16795 @@ -731,8 +736,9 @@
16796
16797 /* Support for a compile-time default CPU, et cetera. The rules are:
16798 --with-arch is ignored if -march is specified or a -mips is specified
16799 - (other than -mips16).
16800 - --with-tune is ignored if -mtune is specified.
16801 + (other than -mips16); likewise --with-arch-32 and --with-arch-64.
16802 + --with-tune is ignored if -mtune is specified; likewise
16803 + --with-tune-32 and --with-tune-64.
16804 --with-abi is ignored if -mabi is specified.
16805 --with-float is ignored if -mhard-float or -msoft-float are
16806 specified.
16807 @@ -740,7 +746,11 @@
16808 specified. */
16809 #define OPTION_DEFAULT_SPECS \
16810 {"arch", "%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \
16811 + {"arch_32", "%{!mabi=*|mabi=32:%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \
16812 + {"arch_64", "%{mabi=n32|mabi=64:%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \
16813 {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
16814 + {"tune_32", "%{!mabi=*|mabi=32:%{!mtune=*:-mtune=%(VALUE)}}" }, \
16815 + {"tune_64", "%{mabi=n32|mabi=64:%{!mtune=*:-mtune=%(VALUE)}}" }, \
16816 {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
16817 {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
16818 {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" }, \
16819 @@ -750,7 +760,7 @@
16820
16821 /* A spec that infers the -mdsp setting from an -march argument. */
16822 #define BASE_DRIVER_SELF_SPECS \
16823 - "%{!mno-dsp:%{march=24ke*|march=34k*|march=74k*: -mdsp}}"
16824 + "%{!mno-dsp:%{march=24ke*|march=34k*|march=74k*|march=1004k*: -mdsp}}"
16825
16826 #define DRIVER_SELF_SPECS BASE_DRIVER_SELF_SPECS
16827
16828 @@ -1038,6 +1048,11 @@
16829 /* ISA includes the bbit* instructions. */
16830 #define ISA_HAS_BBIT (TARGET_OCTEON && !TARGET_MIPS16)
16831
16832 +/* ISA has single-instruction unaligned load/store support. */
16833 +#define ISA_HAS_UL_US (TARGET_OCTEON \
16834 + && TARGET_OCTEON_UNALIGNED \
16835 + && !TARGET_MIPS16)
16836 +
16837 /* ISA includes the cins instruction. */
16838 #define ISA_HAS_CINS (TARGET_OCTEON && !TARGET_MIPS16)
16839
16840 @@ -1055,6 +1070,7 @@
16841
16842 /* The CACHE instruction is available. */
16843 #define ISA_HAS_CACHE (TARGET_CACHE_BUILTIN && !TARGET_MIPS16)
16844 +
16845 \f
16846 /* Add -G xx support. */
16847
16848 @@ -1152,6 +1168,7 @@
16849 %{mshared} %{mno-shared} \
16850 %{msym32} %{mno-sym32} \
16851 %{mtune=*} %{v} \
16852 +%{mocteon-useun} %{mno-octeon-useun} \
16853 %(subtarget_asm_spec)"
16854
16855 /* Extra switches sometimes passed to the linker. */
16856 @@ -1622,6 +1639,9 @@
16857 #define GP_REG_LAST 31
16858 #define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
16859 #define GP_DBX_FIRST 0
16860 +#define K0_REG_NUM (GP_REG_FIRST + 26)
16861 +#define K1_REG_NUM (GP_REG_FIRST + 27)
16862 +#define KERNEL_REG_P(REGNO) (IN_RANGE (REGNO, K0_REG_NUM, K1_REG_NUM))
16863
16864 #define FP_REG_FIRST 32
16865 #define FP_REG_LAST 63
16866 @@ -1649,6 +1669,10 @@
16867 #define COP0_REG_LAST 111
16868 #define COP0_REG_NUM (COP0_REG_LAST - COP0_REG_FIRST + 1)
16869
16870 +#define COP0_STATUS_REG_NUM (COP0_REG_FIRST + 12)
16871 +#define COP0_CAUSE_REG_NUM (COP0_REG_FIRST + 13)
16872 +#define COP0_EPC_REG_NUM (COP0_REG_FIRST + 14)
16873 +
16874 #define COP2_REG_FIRST 112
16875 #define COP2_REG_LAST 143
16876 #define COP2_REG_NUM (COP2_REG_LAST - COP2_REG_FIRST + 1)
16877 @@ -1666,6 +1690,29 @@
16878 #define AT_REGNUM (GP_REG_FIRST + 1)
16879 #define HI_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST : MD_REG_FIRST + 1)
16880 #define LO_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST + 1 : MD_REG_FIRST)
16881 +#define AC1HI_REGNUM (TARGET_BIG_ENDIAN \
16882 + ? DSP_ACC_REG_FIRST : DSP_ACC_REG_FIRST + 1)
16883 +#define AC1LO_REGNUM (TARGET_BIG_ENDIAN \
16884 + ? DSP_ACC_REG_FIRST + 1 : DSP_ACC_REG_FIRST)
16885 +#define AC2HI_REGNUM (TARGET_BIG_ENDIAN \
16886 + ? DSP_ACC_REG_FIRST + 2 : DSP_ACC_REG_FIRST + 3)
16887 +#define AC2LO_REGNUM (TARGET_BIG_ENDIAN \
16888 + ? DSP_ACC_REG_FIRST + 3 : DSP_ACC_REG_FIRST + 2)
16889 +#define AC3HI_REGNUM (TARGET_BIG_ENDIAN \
16890 + ? DSP_ACC_REG_FIRST + 4 : DSP_ACC_REG_FIRST + 5)
16891 +#define AC3LO_REGNUM (TARGET_BIG_ENDIAN \
16892 + ? DSP_ACC_REG_FIRST + 5 : DSP_ACC_REG_FIRST + 4)
16893 +
16894 +/* A few bitfield locations for the coprocessor registers. */
16895 +/* Request Interrupt Priority Level is from bit 10 to bit 15 of
16896 + the cause register for the EIC interrupt mode. */
16897 +#define CAUSE_IPL 10
16898 +/* Interrupt Priority Level is from bit 10 to bit 15 of the status register. */
16899 +#define SR_IPL 10
16900 +/* Exception Level is at bit 1 of the status register. */
16901 +#define SR_EXL 1
16902 +/* Interrupt Enable is at bit 0 of the status register. */
16903 +#define SR_IE 0
16904
16905 /* FPSW_REGNUM is the single condition code used if !ISA_HAS_8CC.
16906 If ISA_HAS_8CC, it should not be used, and an arbitrary ST_REG
16907 @@ -1754,11 +1801,18 @@
16908 incoming arguments, the static chain pointer, or the frame pointer.
16909 The epilogue temporary mustn't conflict with the return registers,
16910 the PIC call register ($25), the frame pointer, the EH stack adjustment,
16911 - or the EH data registers. */
16912 + or the EH data registers.
16913 +
16914 + If we're generating interrupt handlers, we use K0 as a temporary register
16915 + in prologue/epilogue code. */
16916
16917 #define MIPS16_PIC_TEMP_REGNUM (GP_REG_FIRST + 2)
16918 -#define MIPS_PROLOGUE_TEMP_REGNUM (GP_REG_FIRST + 3)
16919 -#define MIPS_EPILOGUE_TEMP_REGNUM (GP_REG_FIRST + (TARGET_MIPS16 ? 6 : 8))
16920 +#define MIPS_PROLOGUE_TEMP_REGNUM \
16921 + (cfun->machine->interrupt_handler_p ? K0_REG_NUM : GP_REG_FIRST + 3)
16922 +#define MIPS_EPILOGUE_TEMP_REGNUM \
16923 + (cfun->machine->interrupt_handler_p \
16924 + ? K0_REG_NUM \
16925 + : GP_REG_FIRST + (TARGET_MIPS16 ? 6 : 8))
16926
16927 #define MIPS16_PIC_TEMP gen_rtx_REG (Pmode, MIPS16_PIC_TEMP_REGNUM)
16928 #define MIPS_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, MIPS_PROLOGUE_TEMP_REGNUM)
16929 @@ -2284,14 +2338,7 @@
16930 (mips_abi == ABI_EABI && UNITS_PER_FPVALUE >= UNITS_PER_DOUBLE)
16931
16932 \f
16933 -/* Say that the epilogue uses the return address register. Note that
16934 - in the case of sibcalls, the values "used by the epilogue" are
16935 - considered live at the start of the called function.
16936 -
16937 - If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
16938 - See the comment above load_call<mode> for details. */
16939 -#define EPILOGUE_USES(REGNO) \
16940 - ((REGNO) == 31 || (TARGET_USE_GOT && (REGNO) == GOT_VERSION_REGNUM))
16941 +#define EPILOGUE_USES(REGNO) mips_epilogue_uses (REGNO)
16942
16943 /* Treat LOC as a byte offset from the stack pointer and round it up
16944 to the next fully-aligned offset. */
16945 --- a/gcc/config/mips/mips.md
16946 +++ b/gcc/config/mips/mips.md
16947 @@ -67,7 +67,16 @@
16948 (UNSPEC_SET_GOT_VERSION 46)
16949 (UNSPEC_UPDATE_GOT_VERSION 47)
16950 (UNSPEC_COPYGP 48)
16951 + (UNSPEC_ERET 49)
16952 + (UNSPEC_DERET 50)
16953 + (UNSPEC_DI 51)
16954 + (UNSPEC_EHB 52)
16955 + (UNSPEC_RDPGPR 53)
16956 + (UNSPEC_COP0 54)
16957
16958 + (UNSPEC_UNALIGNED_LOAD 60)
16959 + (UNSPEC_UNALIGNED_STORE 61)
16960 +
16961 (UNSPEC_ADDRESS_FIRST 100)
16962
16963 (TLS_GET_TP_REGNUM 3)
16964 @@ -372,6 +381,12 @@
16965 ;; frsqrt floating point reciprocal square root
16966 ;; frsqrt1 floating point reciprocal square root step1
16967 ;; frsqrt2 floating point reciprocal square root step2
16968 +;; dspmac DSP MAC instructions not saturating the accumulator
16969 +;; dspmacsat DSP MAC instructions that saturate the accumulator
16970 +;; accext DSP accumulator extract instructions
16971 +;; accmod DSP accumulator modify instructions
16972 +;; dspalu DSP ALU instructions not saturating the result
16973 +;; dspalusat DSP ALU instructions that saturate the result
16974 ;; multi multiword sequence (or user asm statements)
16975 ;; nop no operation
16976 ;; ghost an instruction that produces no real code
16977 @@ -380,7 +395,7 @@
16978 prefetch,prefetchx,condmove,mtc,mfc,mthilo,mfhilo,const,arith,logical,
16979 shift,slt,signext,clz,pop,trap,imul,imul3,imul3nc,imadd,idiv,idiv3,move,
16980 fmove,fadd,fmul,fmadd,fdiv,frdiv,frdiv1,frdiv2,fabs,fneg,fcmp,fcvt,fsqrt,
16981 - frsqrt,frsqrt1,frsqrt2,multi,nop,ghost"
16982 + frsqrt,frsqrt1,frsqrt2,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat,multi,nop,ghost"
16983 (cond [(eq_attr "jal" "!unset") (const_string "call")
16984 (eq_attr "got" "load") (const_string "load")
16985
16986 @@ -3565,7 +3580,9 @@
16987 (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m")
16988 (match_operand:QI 2 "memory_operand" "m")]
16989 UNSPEC_LOAD_LEFT))]
16990 - "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
16991 + "!TARGET_MIPS16
16992 + && !ISA_HAS_UL_US
16993 + && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
16994 "<load>l\t%0,%2"
16995 [(set_attr "move_type" "load")
16996 (set_attr "mode" "<MODE>")])
16997 @@ -3576,7 +3593,9 @@
16998 (match_operand:QI 2 "memory_operand" "m")
16999 (match_operand:GPR 3 "register_operand" "0")]
17000 UNSPEC_LOAD_RIGHT))]
17001 - "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
17002 + "!TARGET_MIPS16
17003 + && !ISA_HAS_UL_US
17004 + && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
17005 "<load>r\t%0,%2"
17006 [(set_attr "move_type" "load")
17007 (set_attr "mode" "<MODE>")])
17008 @@ -3586,7 +3605,9 @@
17009 (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
17010 (match_operand:QI 2 "memory_operand" "m")]
17011 UNSPEC_STORE_LEFT))]
17012 - "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[0])"
17013 + "!TARGET_MIPS16
17014 + && !ISA_HAS_UL_US
17015 + && mips_mem_fits_mode_p (<MODE>mode, operands[0])"
17016 "<store>l\t%z1,%2"
17017 [(set_attr "move_type" "store")
17018 (set_attr "mode" "<MODE>")])
17019 @@ -3602,6 +3623,28 @@
17020 [(set_attr "move_type" "store")
17021 (set_attr "mode" "<MODE>")])
17022
17023 +;; Unaligned load and store patterns.
17024 +
17025 +(define_insn "mov_u<load>"
17026 + [(set (match_operand:GPR 0 "register_operand" "=d")
17027 + (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m")
17028 + (match_operand:QI 2 "memory_operand" "m")]
17029 + UNSPEC_UNALIGNED_LOAD))]
17030 + "ISA_HAS_UL_US && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
17031 + "u<load>\t%0,%2"
17032 + [(set_attr "type" "load")
17033 + (set_attr "mode" "<MODE>")])
17034 +
17035 +(define_insn "mov_u<store>"
17036 + [(set (match_operand:BLK 0 "memory_operand" "=m")
17037 + (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
17038 + (match_operand:QI 2 "memory_operand" "m")]
17039 + UNSPEC_UNALIGNED_STORE))]
17040 + "ISA_HAS_UL_US && mips_mem_fits_mode_p (<MODE>mode, operands[0])"
17041 + "u<store>\t%z1,%2"
17042 + [(set_attr "type" "store")
17043 + (set_attr "mode" "<MODE>")])
17044 +
17045 ;; An instruction to calculate the high part of a 64-bit SYMBOL_ABSOLUTE.
17046 ;; The required value is:
17047 ;;
17048 @@ -5472,6 +5515,26 @@
17049 return "%*b\t%l0%/";
17050 else
17051 {
17052 + if (final_sequence && (mips_abi == ABI_32 || mips_abi == ABI_O64))
17053 + {
17054 + /* If the delay slot contains a $gp restore, we need to
17055 + do that first, because we need it for the load
17056 + label. Other ABIs do not have caller-save $gp. */
17057 + rtx next = NEXT_INSN (insn);
17058 + if (INSN_P (next) && !INSN_DELETED_P (next))
17059 + {
17060 + rtx pat = PATTERN (next);
17061 + if (GET_CODE (pat) == SET
17062 + && REG_P (SET_DEST (pat))
17063 + && REGNO (SET_DEST (pat)) == PIC_OFFSET_TABLE_REGNUM)
17064 + {
17065 + rtx ops[2];
17066 + ops[0] = SET_DEST (pat);
17067 + ops[1] = SET_SRC (pat);
17068 + output_asm_insn (mips_output_move (ops[0], ops[1]), ops);
17069 + }
17070 + }
17071 + }
17072 output_asm_insn (mips_output_load_label (), operands);
17073 return "%*jr\t%@%/%]";
17074 }
17075 @@ -5490,7 +5553,13 @@
17076 (lt (abs (minus (match_dup 0)
17077 (plus (pc) (const_int 4))))
17078 (const_int 131072)))
17079 - (const_int 4) (const_int 16)))])
17080 + (const_int 4)
17081 + (if_then_else
17082 + ;; for these two ABIs we may need to move a restore of $gp
17083 + (ior (eq (symbol_ref "mips_abi") (symbol_ref "ABI_32"))
17084 + (eq (symbol_ref "mips_abi") (symbol_ref "ABI_O64")))
17085 + (const_int 20)
17086 + (const_int 16))))])
17087
17088 ;; We need a different insn for the mips16, because a mips16 branch
17089 ;; does not have a delay slot.
17090 @@ -5679,6 +5748,60 @@
17091 [(set_attr "type" "jump")
17092 (set_attr "mode" "none")])
17093
17094 +;; Exception return.
17095 +(define_insn "mips_eret"
17096 + [(return)
17097 + (unspec_volatile [(const_int 0)] UNSPEC_ERET)]
17098 + ""
17099 + "eret"
17100 + [(set_attr "type" "trap")
17101 + (set_attr "mode" "none")])
17102 +
17103 +;; Debug exception return.
17104 +(define_insn "mips_deret"
17105 + [(return)
17106 + (unspec_volatile [(const_int 0)] UNSPEC_DERET)]
17107 + ""
17108 + "deret"
17109 + [(set_attr "type" "trap")
17110 + (set_attr "mode" "none")])
17111 +
17112 +;; Disable interrupts.
17113 +(define_insn "mips_di"
17114 + [(unspec_volatile [(const_int 0)] UNSPEC_DI)]
17115 + ""
17116 + "di"
17117 + [(set_attr "type" "trap")
17118 + (set_attr "mode" "none")])
17119 +
17120 +;; Execution hazard barrier.
17121 +(define_insn "mips_ehb"
17122 + [(unspec_volatile [(const_int 0)] UNSPEC_EHB)]
17123 + ""
17124 + "ehb"
17125 + [(set_attr "type" "trap")
17126 + (set_attr "mode" "none")])
17127 +
17128 +;; Read GPR from previous shadow register set.
17129 +(define_insn "mips_rdpgpr"
17130 + [(set (match_operand:SI 0 "register_operand" "=d")
17131 + (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "d")]
17132 + UNSPEC_RDPGPR))]
17133 + ""
17134 + "rdpgpr\t%0,%1"
17135 + [(set_attr "type" "move")
17136 + (set_attr "mode" "SI")])
17137 +
17138 +;; Move involving COP0 registers.
17139 +(define_insn "cop0_move"
17140 + [(set (match_operand:SI 0 "register_operand" "=B,d")
17141 + (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "d,B")]
17142 + UNSPEC_COP0))]
17143 + ""
17144 +{ return mips_output_move (operands[0], operands[1]); }
17145 + [(set_attr "type" "mtc,mfc")
17146 + (set_attr "mode" "SI")])
17147 +
17148 ;; This is used in compiling the unwind routines.
17149 (define_expand "eh_return"
17150 [(use (match_operand 0 "general_operand"))]
17151 --- a/gcc/config/mips/mips.opt
17152 +++ b/gcc/config/mips/mips.opt
17153 @@ -184,6 +184,10 @@
17154 Target Report RejectNegative Mask(MIPS16)
17155 Generate MIPS16 code
17156
17157 +mips16e
17158 +Target Report RejectNegative Mask(MIPS16) MaskExists
17159 +Deprecated; alias for -mips16
17160 +
17161 mips3d
17162 Target Report RejectNegative Mask(MIPS3D)
17163 Use MIPS-3D instructions
17164 @@ -236,6 +240,10 @@
17165 Target Report RejectNegative InverseMask(MIPS3D)
17166 Do not use MIPS-3D instructions
17167
17168 +mocteon-useun
17169 +Target Report Mask(OCTEON_UNALIGNED)
17170 +Use Octeon-specific unaligned loads/stores for 32/64-bit data
17171 +
17172 mpaired-single
17173 Target Report Mask(PAIRED_SINGLE_FLOAT)
17174 Use paired-single floating-point instructions
17175 --- a/gcc/config/mips/mips-protos.h
17176 +++ b/gcc/config/mips/mips-protos.h
17177 @@ -261,6 +261,8 @@
17178 extern void mips_output_external (FILE *, tree, const char *);
17179 extern void mips_output_filename (FILE *, const char *);
17180 extern void mips_output_ascii (FILE *, const char *, size_t);
17181 +extern void octeon_output_shared_variable (FILE *, tree, const char *,
17182 + unsigned HOST_WIDE_INT, int);
17183 extern void mips_output_aligned_decl_common (FILE *, tree, const char *,
17184 unsigned HOST_WIDE_INT,
17185 unsigned int);
17186 @@ -307,6 +309,8 @@
17187 extern bool mips_linked_madd_p (rtx, rtx);
17188 extern bool mips_store_data_bypass_p (rtx, rtx);
17189 extern rtx mips_prefetch_cookie (rtx, rtx);
17190 +extern int mips_mult_madd_chain_bypass_p (rtx, rtx);
17191 +extern int mips_dspalu_bypass_p (rtx, rtx);
17192
17193 extern void irix_asm_output_align (FILE *, unsigned);
17194 extern const char *current_section_name (void);
17195 @@ -332,4 +336,6 @@
17196
17197 extern void mips_expand_vector_init (rtx, rtx);
17198
17199 +extern bool mips_epilogue_uses (unsigned int);
17200 +
17201 #endif /* ! GCC_MIPS_PROTOS_H */
17202 --- /dev/null
17203 +++ b/gcc/config/mips/octeon-elf.h
17204 @@ -0,0 +1,98 @@
17205 +/* Macros for mips*-octeon-elf target.
17206 + Copyright (C) 2004, 2005, 2006 Cavium Networks.
17207 +
17208 +This file is part of GCC.
17209 +
17210 +GCC is free software; you can redistribute it and/or modify
17211 +it under the terms of the GNU General Public License as published by
17212 +the Free Software Foundation; either version 2, or (at your option)
17213 +any later version.
17214 +
17215 +GCC is distributed in the hope that it will be useful,
17216 +but WITHOUT ANY WARRANTY; without even the implied warranty of
17217 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17218 +GNU General Public License for more details.
17219 +
17220 +You should have received a copy of the GNU General Public License
17221 +along with GCC; see the file COPYING. If not, write to
17222 +the Free Software Foundation, 51 Franklin Street, Fifth Floor,
17223 +Boston, MA 02110-1301, USA. */
17224 +
17225 +/* Add MASK_SOFT_FLOAT and MASK_OCTEON_UNALIGNED. */
17226 +
17227 +#undef TARGET_DEFAULT
17228 +#define TARGET_DEFAULT (MASK_SOFT_FLOAT_ABI | MASK_OCTEON_UNALIGNED)
17229 +
17230 +/* Forward -m*octeon-useun. */
17231 +
17232 +#undef SUBTARGET_ASM_SPEC
17233 +#define SUBTARGET_ASM_SPEC "%{mno-octeon-useun} %{!mno-octeon-useun:-mocteon-useun}"
17234 +
17235 +/* Enable backtrace including on machine exceptions by default. */
17236 +
17237 +#undef SUBTARGET_CC1_SPEC
17238 +#define SUBTARGET_CC1_SPEC "%{!fno-asynchronous-unwind-tables:-fasynchronous-unwind-tables}"
17239 +
17240 +/* Without ASM_PREFERRED_EH_DATA_FORMAT, output_call_frame_info emits
17241 + pointer-sized addresses for FDE addresses. For 64-bit targets, it does
17242 + it without properly "switching over" to 64-bit as described in the DWARF3
17243 + spec. GDB can fall back on .eh_frames and misinterpret FDE addresses.
17244 + Instead let's be explicit and use augmentation to describe the encoding if
17245 + pointer size is 64. */
17246 +
17247 +#undef ASM_PREFERRED_EH_DATA_FORMAT
17248 +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
17249 + ((CODE) == 1 && POINTER_SIZE == 64 \
17250 + ? (ABI_HAS_64BIT_SYMBOLS ? DW_EH_PE_udata8 : DW_EH_PE_udata4) \
17251 + : DW_EH_PE_absptr)
17252 +
17253 +/* Link to libc library. */
17254 +
17255 +#undef LIB_SPEC
17256 +#define LIB_SPEC "-lc"
17257 +
17258 +/* Link to startup file. */
17259 +
17260 +#undef STARTFILE_SPEC
17261 +#define STARTFILE_SPEC "crti%O%s crtbegin%O%s crt0%O%s"
17262 +
17263 +/* Default our test-only n64 configuration to -G0 since that is what
17264 + the kernel uses. */
17265 +
17266 +#undef SUBTARGET_SELF_SPECS
17267 +#define SUBTARGET_SELF_SPECS \
17268 +"%{mabi=64:%{!G*: -G0}}"
17269 +
17270 +/* Pass linker emulation mode for N32. */
17271 +
17272 +#undef LINK_SPEC
17273 +#define LINK_SPEC "\
17274 +%(endian_spec) \
17275 +%{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} %{mips64} \
17276 +%{mips64r2} %{bestGnum} %{shared} %{non_shared} \
17277 +%{mabi=n32:-melf32e%{!EL:b}%{EL:l}octeonn32} \
17278 +%{mabi=64:-melf64e%{!EL:b}%{EL:l}octeon}"
17279 +
17280 +/* Override because of N32. */
17281 +
17282 +#undef LOCAL_LABEL_PREFIX
17283 +#define LOCAL_LABEL_PREFIX ((mips_abi == ABI_N32) ? "." : "$")
17284 +
17285 +/* Append the core number to the GCOV filename FN. */
17286 +
17287 +#define GCOV_TARGET_SUFFIX_LENGTH 2
17288 +#define ADD_GCOV_TARGET_SUFFIX(FN) \
17289 +do \
17290 + { \
17291 + char *fn = FN; \
17292 + int core; \
17293 + char s[3]; \
17294 + \
17295 + asm ("rdhwr %0, $0" : "=r"(core)); \
17296 + sprintf (s, "%d", core); \
17297 + strcat (fn, s); \
17298 + } \
17299 +while (0)
17300 +
17301 +/* Code to unwind through the exception frame. */
17302 +#define MD_UNWIND_SUPPORT "config/mips/octeon-elf-unwind.h"
17303 --- /dev/null
17304 +++ b/gcc/config/mips/octeon-elf-unwind.h
17305 @@ -0,0 +1,57 @@
17306 +/* Stack unwinding support through the first exception frame.
17307 + Copyright (C) 2007 Cavium Networks.
17308 +
17309 +This file is part of GCC.
17310 +
17311 +GCC is free software; you can redistribute it and/or modify
17312 +it under the terms of the GNU General Public License as published by
17313 +the Free Software Foundation; either version 2, or (at your option)
17314 +any later version.
17315 +
17316 +GCC is distributed in the hope that it will be useful,
17317 +but WITHOUT ANY WARRANTY; without even the implied warranty of
17318 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17319 +GNU General Public License for more details.
17320 +
17321 +You should have received a copy of the GNU General Public License
17322 +along with GCC; see the file COPYING. If not, write to
17323 +the Free Software Foundation, 51 Franklin Street, Fifth Floor,
17324 +Boston, MA 02110-1301, USA. */
17325 +
17326 +#define MD_FALLBACK_FRAME_STATE_FOR octeon_elf_fallback_frame_state
17327 +
17328 +/* Check whether this is the cvmx_interrupt_stage2 frame. If the
17329 + function call was dispatched via k0 assume we are in
17330 + cvmx_interrupt_stage2. In this case the sp in point to the saved
17331 + register array. */
17332 +
17333 +static _Unwind_Reason_Code
17334 +octeon_elf_fallback_frame_state (struct _Unwind_Context *context,
17335 + _Unwind_FrameState *fs)
17336 +{
17337 + unsigned i;
17338 + unsigned *pc = context->ra;
17339 +
17340 + /* Look for "jalr k0". */
17341 + if (pc[-2] != 0x0340f809)
17342 + return _URC_END_OF_STACK;
17343 +
17344 + for (i = 0; i < 32; i++)
17345 + {
17346 + fs->regs.reg[i].how = REG_SAVED_OFFSET;
17347 + fs->regs.reg[i].loc.offset = 8 * i;
17348 + }
17349 +
17350 + /* Keep the next frame's sp. This way we have a CFA that points
17351 + exactly to the register array. */
17352 + fs->regs.cfa_how = CFA_REG_OFFSET;
17353 + fs->regs.cfa_reg = STACK_POINTER_REGNUM;
17354 + fs->regs.cfa_offset = 0;
17355 +
17356 + /* DEPC is saved as the 35. register. */
17357 + fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].how = REG_SAVED_OFFSET;
17358 + fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].loc.offset = 8 * 35;
17359 + fs->retaddr_column = DWARF_ALT_FRAME_RETURN_COLUMN;
17360 +
17361 + return _URC_NO_REASON;
17362 +}
17363 --- /dev/null
17364 +++ b/gcc/config/mips/octeon.h
17365 @@ -0,0 +1,68 @@
17366 +/* Macros for mips*-octeon-* target.
17367 + Copyright (C) 2004, 2005, 2006 Cavium Networks.
17368 +
17369 +This file is part of GCC.
17370 +
17371 +GCC is free software; you can redistribute it and/or modify
17372 +it under the terms of the GNU General Public License as published by
17373 +the Free Software Foundation; either version 2, or (at your option)
17374 +any later version.
17375 +
17376 +GCC is distributed in the hope that it will be useful,
17377 +but WITHOUT ANY WARRANTY; without even the implied warranty of
17378 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17379 +GNU General Public License for more details.
17380 +
17381 +You should have received a copy of the GNU General Public License
17382 +along with GCC; see the file COPYING. If not, write to
17383 +the Free Software Foundation, 51 Franklin Street, Fifth Floor,
17384 +Boston, MA 02110-1301, USA. */
17385 +
17386 +#define CVMX_SHARED_BSS_FLAGS (SECTION_WRITE | SECTION_BSS)
17387 +
17388 +#undef TARGET_ASM_SELECT_SECTION
17389 +#define TARGET_ASM_SELECT_SECTION octeon_select_section
17390 +
17391 +#undef TARGET_ASM_UNIQUE_SECTION
17392 +#define TARGET_ASM_UNIQUE_SECTION octeon_unique_section
17393 +
17394 +/* Implement ASM_OUTPUT_ALIGNED_DECL_LOCAL. This differs from the
17395 + generic version only in the use of cvmx_shared attribute. */
17396 +
17397 +#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
17398 +#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(STREAM, DECL, NAME, SIZE, ALIGN) \
17399 + do \
17400 + { \
17401 + if ((DECL) && TREE_CODE ((DECL)) == VAR_DECL \
17402 + && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (DECL))) \
17403 + { \
17404 + fprintf ((STREAM), "%s", LOCAL_ASM_OP); \
17405 + assemble_name ((STREAM), (NAME)); \
17406 + fprintf ((STREAM), "\n"); \
17407 + octeon_output_shared_variable ((STREAM), (DECL), (NAME), \
17408 + (SIZE), (ALIGN)); \
17409 + } \
17410 + else \
17411 + ASM_OUTPUT_ALIGNED_LOCAL (STREAM, NAME, SIZE, ALIGN); \
17412 + } \
17413 + while (0)
17414 +
17415 +\f
17416 +/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This differs from the mips
17417 + version only in the use of cvmx_shared attribute. */
17418 +
17419 +#undef ASM_OUTPUT_ALIGNED_DECL_COMMON
17420 +#define ASM_OUTPUT_ALIGNED_DECL_COMMON(STREAM, DECL, NAME, SIZE, ALIGN) \
17421 + { \
17422 + if (TREE_CODE ((DECL)) == VAR_DECL \
17423 + && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES ((DECL)))) \
17424 + { \
17425 + if (TREE_PUBLIC ((DECL)) && DECL_NAME ((DECL))) \
17426 + targetm.asm_out.globalize_label (asm_out_file, (NAME)); \
17427 + octeon_output_shared_variable ((STREAM), (DECL), (NAME), \
17428 + (SIZE), (ALIGN)); \
17429 + } \
17430 + else \
17431 + mips_output_aligned_decl_common ((STREAM), (DECL), (NAME), (SIZE), \
17432 + (ALIGN)); \
17433 + }
17434 --- a/gcc/config/mips/predicates.md
17435 +++ b/gcc/config/mips/predicates.md
17436 @@ -211,6 +211,20 @@
17437 }
17438 })
17439
17440 +(define_predicate "mask_low_and_shift_operator"
17441 + (and (match_code "and")
17442 + (match_test "GET_CODE (XEXP (op, 0)) == ASHIFT
17443 + && GET_CODE (XEXP (op, 1)) == CONST_INT
17444 + && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT"))
17445 +{
17446 + int len;
17447 +
17448 + len = mask_low_and_shift_len (GET_MODE (op),
17449 + INTVAL (XEXP (XEXP (op, 0), 1)),
17450 + INTVAL (XEXP (op, 1)));
17451 + return 0 < len && len <= 32;
17452 +})
17453 +
17454 (define_predicate "consttable_operand"
17455 (match_test "CONSTANT_P (op)"))
17456
17457 --- a/gcc/config/mips/sde.h
17458 +++ b/gcc/config/mips/sde.h
17459 @@ -19,6 +19,9 @@
17460 along with GCC; see the file COPYING3. If not see
17461 <http://www.gnu.org/licenses/>. */
17462
17463 +#undef TARGET_MIPS_SDE
17464 +#define TARGET_MIPS_SDE 1
17465 +
17466 #undef DRIVER_SELF_SPECS
17467 #define DRIVER_SELF_SPECS \
17468 /* Make sure a -mips option is present. This helps us to pick \
17469 @@ -90,7 +93,8 @@
17470
17471 /* Use $5 as a temporary for both MIPS16 and non-MIPS16. */
17472 #undef MIPS_EPILOGUE_TEMP_REGNUM
17473 -#define MIPS_EPILOGUE_TEMP_REGNUM (GP_REG_FIRST + 5)
17474 +#define MIPS_EPILOGUE_TEMP_REGNUM \
17475 + (cfun->machine->interrupt_handler_p ? K0_REG_NUM : GP_REG_FIRST + 5)
17476
17477 /* Using long will always be right for size_t and ptrdiff_t, since
17478 sizeof(long) must equal sizeof(void *), following from the setting
17479 --- a/gcc/config/mips/sdemtk.h
17480 +++ b/gcc/config/mips/sdemtk.h
17481 @@ -19,6 +19,8 @@
17482 along with GCC; see the file COPYING3. If not see
17483 <http://www.gnu.org/licenses/>. */
17484
17485 +#define TARGET_MIPS_SDEMTK 1
17486 +
17487 #define TARGET_OS_CPP_BUILTINS() \
17488 do \
17489 { \
17490 @@ -113,3 +115,12 @@
17491 /* ...nor does the call sequence preserve $31. */
17492 #undef MIPS_SAVE_REG_FOR_PROFILING_P
17493 #define MIPS_SAVE_REG_FOR_PROFILING_P(REGNO) ((REGNO) == GP_REG_FIRST + 31)
17494 +
17495 +/* From mips.h, with mno-float option added. */
17496 +
17497 +#undef MIPS_ARCH_FLOAT_SPEC
17498 +#define MIPS_ARCH_FLOAT_SPEC \
17499 + "%{mhard-float|msoft-float|mno-float|march=mips*:; \
17500 + march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \
17501 + |march=34kc|march=74kc|march=1004kc|march=5kc|march=octeon|march=xlr: -msoft-float; \
17502 + march=*: -mhard-float}"
17503 --- /dev/null
17504 +++ b/gcc/config/mips/t-crtfm
17505 @@ -0,0 +1,9 @@
17506 +
17507 +EXTRA_MULTILIB_PARTS += crtfastmath.o
17508 +
17509 +EXTRA_PARTS += crtfastmath.o
17510 +
17511 +$(T)crtfastmath.o: $(srcdir)/config/mips/crtfastmath.c $(GCC_PASSES)
17512 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
17513 + -c -o $(T)crtfastmath.o $(srcdir)/config/mips/crtfastmath.c
17514 +
17515 --- /dev/null
17516 +++ b/gcc/config/mips/t-octeon-elf
17517 @@ -0,0 +1,41 @@
17518 +# Don't let CTOR_LIST end up in sdata section.
17519 +
17520 +CRTSTUFF_T_CFLAGS = -G 0 -fno-asynchronous-unwind-tables
17521 +
17522 +# Assemble startup files.
17523 +
17524 +$(T)crti.o: $(srcdir)/config/mips/crti.asm $(GCC_PASSES)
17525 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
17526 + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/mips/crti.asm
17527 +
17528 +$(T)crtn.o: $(srcdir)/config/mips/crtn.asm $(GCC_PASSES)
17529 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
17530 + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/mips/crtn.asm
17531 +
17532 +# N32 uses TFmode for long double.
17533 +
17534 +TPBIT = tp-bit.c
17535 +
17536 +tp-bit.c: $(srcdir)/config/fp-bit.c
17537 + echo '#ifdef __MIPSEL__' > tp-bit.c
17538 + echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
17539 + echo '#endif' >> tp-bit.c
17540 + echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
17541 + echo '#define QUIET_NAN_NEGATED' >> tp-bit.c
17542 + echo '# define TFLOAT' >> tp-bit.c
17543 + cat $(srcdir)/config/fp-bit.c >> tp-bit.c
17544 + echo '#endif' >> tp-bit.c
17545 +
17546 +# We must build libgcc2.a with -G 0, in case the user wants to link
17547 +# without the $gp register.
17548 +
17549 +TARGET_LIBGCC2_CFLAGS = -G 0
17550 +
17551 +# Build both ABIs.
17552 +
17553 +MULTILIB_OPTIONS = mabi=n32/mabi=eabi/mabi=64
17554 +MULTILIB_DIRNAMES = n32 eabi n64
17555 +EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
17556 +
17557 +LIBGCC = stmp-multilib
17558 +INSTALL_LIBGCC = install-multilib
17559 --- a/gcc/config/mips/xlr.md
17560 +++ b/gcc/config/mips/xlr.md
17561 @@ -1,5 +1,5 @@
17562 ;; DFA-based pipeline description for the XLR.
17563 -;; Copyright (C) 2008 Free Software Foundation, Inc.
17564 +;; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
17565 ;;
17566 ;; xlr.md Machine Description for the RMI XLR Microprocessor
17567 ;; This file is part of GCC.
17568 @@ -31,7 +31,7 @@
17569 ;; Integer arithmetic instructions.
17570 (define_insn_reservation "ir_xlr_alu" 1
17571 (and (eq_attr "cpu" "xlr")
17572 - (eq_attr "type" "arith,shift,clz,const,unknown,multi,nop,trap"))
17573 + (eq_attr "type" "move,arith,shift,clz,logical,signext,const,unknown,multi,nop,trap"))
17574 "xlr_main_pipe")
17575
17576 ;; Integer arithmetic instructions.
17577 --- /dev/null
17578 +++ b/gcc/config/rs6000/e500mc.h
17579 @@ -0,0 +1,46 @@
17580 +/* Core target definitions for GNU compiler
17581 + for IBM RS/6000 PowerPC targeted to embedded ELF systems.
17582 + Copyright (C) 1995, 1996, 2000, 2003, 2004, 2007 Free Software Foundation, Inc.
17583 + Contributed by Cygnus Support.
17584 +
17585 + This file is part of GCC.
17586 +
17587 + GCC is free software; you can redistribute it and/or modify it
17588 + under the terms of the GNU General Public License as published
17589 + by the Free Software Foundation; either version 3, or (at your
17590 + option) any later version.
17591 +
17592 + GCC is distributed in the hope that it will be useful, but WITHOUT
17593 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17594 + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17595 + License for more details.
17596 +
17597 + You should have received a copy of the GNU General Public License
17598 + along with GCC; see the file COPYING3. If not see
17599 + <http://www.gnu.org/licenses/>. */
17600 +
17601 +/* Add -meabi to target flags. */
17602 +#undef TARGET_DEFAULT
17603 +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI)
17604 +
17605 +#undef TARGET_VERSION
17606 +#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)");
17607 +
17608 +#undef TARGET_OS_CPP_BUILTINS
17609 +#define TARGET_OS_CPP_BUILTINS() \
17610 + do \
17611 + { \
17612 + builtin_define_std ("PPC"); \
17613 + builtin_define ("__embedded__"); \
17614 + builtin_assert ("system=embedded"); \
17615 + builtin_assert ("cpu=powerpc"); \
17616 + builtin_assert ("machine=powerpc"); \
17617 + TARGET_OS_SYSV_CPP_BUILTINS (); \
17618 + } \
17619 + while (0)
17620 +
17621 +#undef CC1_EXTRA_SPEC
17622 +#define CC1_EXTRA_SPEC "-maix-struct-return"
17623 +
17624 +#undef ASM_DEFAULT_SPEC
17625 +#define ASM_DEFAULT_SPEC "-mppc%{m64:64} -me500mc"
17626 --- a/gcc/config/rs6000/eabi.asm
17627 +++ b/gcc/config/rs6000/eabi.asm
17628 @@ -230,7 +230,7 @@
17629 r11 has the address of .LCTOC1 in it.
17630 r12 has the value to add to each pointer
17631 r13 .. r31 are unchanged */
17632 -
17633 +#ifdef _RELOCATABLE
17634 FUNC_START(__eabi_convert)
17635 cmplw 1,3,4 /* any pointers to convert? */
17636 subf 5,3,4 /* calculate number of words to convert */
17637 @@ -285,5 +285,5 @@
17638 blr
17639
17640 FUNC_END(__eabi_uconvert)
17641 -
17642 +#endif
17643 #endif
17644 --- a/gcc/config/rs6000/eabi-ci.asm
17645 +++ b/gcc/config/rs6000/eabi-ci.asm
17646 @@ -98,6 +98,7 @@
17647 /* Head of __init function used for static constructors. */
17648 .section ".init","ax"
17649 .align 2
17650 +FUNC_START(_init)
17651 FUNC_START(__init)
17652 stwu 1,-16(1)
17653 mflr 0
17654 @@ -106,6 +107,7 @@
17655 /* Head of __fini function used for static destructors. */
17656 .section ".fini","ax"
17657 .align 2
17658 +FUNC_START(_fini)
17659 FUNC_START(__fini)
17660 stwu 1,-16(1)
17661 mflr 0
17662 --- a/gcc/config/rs6000/eabi.h
17663 +++ b/gcc/config/rs6000/eabi.h
17664 @@ -23,10 +23,6 @@
17665 #undef TARGET_DEFAULT
17666 #define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI)
17667
17668 -/* Invoke an initializer function to set up the GOT. */
17669 -#define NAME__MAIN "__eabi"
17670 -#define INVOKE__main
17671 -
17672 #undef TARGET_VERSION
17673 #define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)");
17674
17675 @@ -42,3 +38,20 @@
17676 TARGET_OS_SYSV_CPP_BUILTINS (); \
17677 } \
17678 while (0)
17679 +
17680 +/* Add -te500v1 and -te500v2 options for convenience in generating
17681 + multilibs. */
17682 +#undef CC1_EXTRA_SPEC
17683 +#define CC1_EXTRA_SPEC \
17684 + "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} " \
17685 + "%{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe} " \
17686 + "%{te600: -mcpu=7400 -maltivec -mabi=altivec}" \
17687 + "%{te500mc: -mcpu=e500mc -maix-struct-return}"
17688 +
17689 +#undef ASM_DEFAULT_SPEC
17690 +#define ASM_DEFAULT_SPEC \
17691 + "%{te500v1:-mppc -mspe -me500 ; \
17692 + te500v2:-mppc -mspe -me500 ; \
17693 + te600:-mppc -maltivec ; \
17694 + te500mc:-mppc -me500mc ; \
17695 + :-mppc%{m64:64}}"
17696 --- a/gcc/config/rs6000/linux.h
17697 +++ b/gcc/config/rs6000/linux.h
17698 @@ -128,3 +128,29 @@
17699 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
17700 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
17701 #endif
17702 +
17703 +/* Add -te500v1 and -te500v2 options for convenience in generating
17704 + multilibs. */
17705 +#undef CC1_EXTRA_SPEC
17706 +#define CC1_EXTRA_SPEC \
17707 + "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} " \
17708 + "%{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe} " \
17709 + "%{te600: -mcpu=7400 -maltivec -mabi=altivec}" \
17710 + "%{te500mc: -mcpu=e500mc}"
17711 +
17712 +#undef ASM_DEFAULT_SPEC
17713 +#define ASM_DEFAULT_SPEC \
17714 + "%{te500v1:-mppc -mspe -me500 ; \
17715 + te500v2:-mppc -mspe -me500 ; \
17716 + te600:-mppc -maltivec ; \
17717 + te500mc:-me500mc ; \
17718 + :-mppc%{m64:64}}"
17719 +
17720 +/* The various C libraries each have their own subdirectory. */
17721 +#undef SYSROOT_SUFFIX_SPEC
17722 +#define SYSROOT_SUFFIX_SPEC \
17723 + "%{msoft-float:/nof ; \
17724 + te600:/te600 ; \
17725 + te500v1:/te500v1 ; \
17726 + te500v2:/te500v2 ; \
17727 + te500mc:/te500mc}"
17728 --- /dev/null
17729 +++ b/gcc/config/rs6000/option-defaults.h
17730 @@ -0,0 +1,64 @@
17731 +/* Definitions of default options for config/rs6000 configurations.
17732 + Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
17733 + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
17734 + Free Software Foundation, Inc.
17735 +
17736 + This file is part of GCC.
17737 +
17738 + GCC is free software; you can redistribute it and/or modify it
17739 + under the terms of the GNU General Public License as published
17740 + by the Free Software Foundation; either version 3, or (at your
17741 + option) any later version.
17742 +
17743 + GCC is distributed in the hope that it will be useful, but WITHOUT
17744 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17745 + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17746 + License for more details.
17747 +
17748 + Under Section 7 of GPL version 3, you are granted additional
17749 + permissions described in the GCC Runtime Library Exception, version
17750 + 3.1, as published by the Free Software Foundation.
17751 +
17752 + You should have received a copy of the GNU General Public License and
17753 + a copy of the GCC Runtime Library Exception along with this program;
17754 + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
17755 + <http://www.gnu.org/licenses/>. */
17756 +
17757 +/* This header needs to be included after any other headers affecting
17758 + TARGET_DEFAULT. */
17759 +
17760 +#if TARGET_AIX
17761 +#define OPT_64 "maix64"
17762 +#define OPT_32 "maix32"
17763 +#else
17764 +#define OPT_64 "m64"
17765 +#define OPT_32 "m32"
17766 +#endif
17767 +
17768 +#ifndef MASK_64BIT
17769 +#define MASK_64BIT 0
17770 +#endif
17771 +
17772 +#if TARGET_DEFAULT & MASK_64BIT
17773 +#define OPT_ARCH64 "!"OPT_32
17774 +#define OPT_ARCH32 OPT_32
17775 +#else
17776 +#define OPT_ARCH64 OPT_64
17777 +#define OPT_ARCH32 "!"OPT_64
17778 +#endif
17779 +
17780 +/* Support for a compile-time default CPU, et cetera. The rules are:
17781 + --with-cpu is ignored if -mcpu is specified; likewise --with-cpu-32
17782 + and --with-cpu-64.
17783 + --with-tune is ignored if -mtune is specified; likewise --with-tune-32
17784 + and --with-tune-64.
17785 + --with-float is ignored if -mhard-float or -msoft-float are
17786 + specified. */
17787 +#define OPTION_DEFAULT_SPECS \
17788 + {"cpu", "%{mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}" }, \
17789 + {"cpu_32", "%{" OPT_ARCH32 ":%{mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}}" }, \
17790 + {"cpu_64", "%{" OPT_ARCH64 ":%{mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}}" }, \
17791 + {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
17792 + {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \
17793 + {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \
17794 + {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }
17795 --- a/gcc/config/rs6000/paired.md
17796 +++ b/gcc/config/rs6000/paired.md
17797 @@ -27,7 +27,7 @@
17798 (UNSPEC_EXTODD_V2SF 333)
17799 ])
17800
17801 -(define_insn "negv2sf2"
17802 +(define_insn "paired_negv2sf2"
17803 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17804 (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
17805 "TARGET_PAIRED_FLOAT"
17806 @@ -41,7 +41,7 @@
17807 "ps_rsqrte %0,%1"
17808 [(set_attr "type" "fp")])
17809
17810 -(define_insn "absv2sf2"
17811 +(define_insn "paired_absv2sf2"
17812 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17813 (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
17814 "TARGET_PAIRED_FLOAT"
17815 @@ -55,7 +55,7 @@
17816 "ps_nabs %0,%1"
17817 [(set_attr "type" "fp")])
17818
17819 -(define_insn "addv2sf3"
17820 +(define_insn "paired_addv2sf3"
17821 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17822 (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
17823 (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
17824 @@ -63,7 +63,7 @@
17825 "ps_add %0,%1,%2"
17826 [(set_attr "type" "fp")])
17827
17828 -(define_insn "subv2sf3"
17829 +(define_insn "paired_subv2sf3"
17830 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17831 (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
17832 (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
17833 @@ -71,7 +71,7 @@
17834 "ps_sub %0,%1,%2"
17835 [(set_attr "type" "fp")])
17836
17837 -(define_insn "mulv2sf3"
17838 +(define_insn "paired_mulv2sf3"
17839 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17840 (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
17841 (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
17842 @@ -86,7 +86,7 @@
17843 "ps_res %0,%1"
17844 [(set_attr "type" "fp")])
17845
17846 -(define_insn "divv2sf3"
17847 +(define_insn "paired_divv2sf3"
17848 [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
17849 (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
17850 (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
17851 --- a/gcc/config/rs6000/rs6000.c
17852 +++ b/gcc/config/rs6000/rs6000.c
17853 @@ -919,6 +919,7 @@
17854 static bool rs6000_is_opaque_type (const_tree);
17855 static rtx rs6000_dwarf_register_span (rtx);
17856 static void rs6000_init_dwarf_reg_sizes_extra (tree);
17857 +static int rs6000_commutative_operand_precedence (const_rtx, int);
17858 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
17859 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
17860 static rtx rs6000_tls_get_addr (void);
17861 @@ -1194,6 +1195,10 @@
17862 #undef TARGET_VECTOR_OPAQUE_P
17863 #define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
17864
17865 +#undef TARGET_COMMUTATIVE_OPERAND_PRECEDENCE
17866 +#define TARGET_COMMUTATIVE_OPERAND_PRECEDENCE \
17867 + rs6000_commutative_operand_precedence
17868 +
17869 #undef TARGET_DWARF_REGISTER_SPAN
17870 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
17871
17872 @@ -4682,16 +4687,19 @@
17873 if (TARGET_ALTIVEC)
17874 global_regs[VSCR_REGNO] = 1;
17875
17876 - if (TARGET_ALTIVEC_ABI)
17877 - {
17878 - for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
17879 - call_used_regs[i] = call_really_used_regs[i] = 1;
17880 + /* If we are not using the AltiVec ABI, pretend that the normally
17881 + call-saved registers are also call-used. We could use them
17882 + normally if we saved and restored them in the prologue; that
17883 + would require using the alignment padding around the register
17884 + save area, and some care with unwinding information. */
17885 + if (! TARGET_ALTIVEC_ABI)
17886 + for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17887 + call_used_regs[i] = call_really_used_regs[i] = 1;
17888
17889 - /* AIX reserves VR20:31 in non-extended ABI mode. */
17890 - if (TARGET_XCOFF)
17891 - for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
17892 - fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
17893 - }
17894 + if (TARGET_ALTIVEC_ABI && TARGET_XCOFF)
17895 + /* AIX reserves VR20:31 in non-extended ABI mode. */
17896 + for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
17897 + fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
17898 }
17899 \f
17900 /* Try to output insns to set TARGET equal to the constant C if it can
17901 @@ -7507,10 +7515,10 @@
17902 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
17903 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
17904
17905 - { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
17906 - { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
17907 - { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
17908 - { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
17909 + { 0, CODE_FOR_paired_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
17910 + { 0, CODE_FOR_paired_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
17911 + { 0, CODE_FOR_paired_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
17912 + { 0, CODE_FOR_paired_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
17913 { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
17914 { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
17915 { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
17916 @@ -7519,10 +7527,10 @@
17917 { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
17918
17919 /* Place holder, leave as first spe builtin. */
17920 - { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
17921 - { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
17922 + { 0, CODE_FOR_addv2si3, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
17923 + { 0, CODE_FOR_andv2si3, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
17924 { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
17925 - { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
17926 + { 0, CODE_FOR_divv2si3, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
17927 { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
17928 { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
17929 { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
17930 @@ -7798,7 +7806,7 @@
17931
17932 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
17933 end with SPE_BUILTIN_EVSUBFUSIAAW. */
17934 - { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
17935 + { 0, CODE_FOR_absv2si2, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
17936 { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
17937 { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
17938 { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
17939 @@ -7830,9 +7838,9 @@
17940 /* Place-holder. Leave as last unary SPE builtin. */
17941 { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
17942
17943 - { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
17944 + { 0, CODE_FOR_paired_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
17945 { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
17946 - { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
17947 + { 0, CODE_FOR_paired_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
17948 { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
17949 { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
17950 };
17951 @@ -9370,6 +9378,8 @@
17952 static void
17953 rs6000_init_builtins (void)
17954 {
17955 + tree tdecl;
17956 +
17957 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17958 V2SF_type_node = build_vector_type (float_type_node, 2);
17959 V4HI_type_node = build_vector_type (intHI_type_node, 4);
17960 @@ -9407,60 +9417,75 @@
17961 float_type_internal_node = float_type_node;
17962 void_type_internal_node = void_type_node;
17963
17964 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
17965 - get_identifier ("__bool char"),
17966 - bool_char_type_node));
17967 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
17968 - get_identifier ("__bool short"),
17969 - bool_short_type_node));
17970 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
17971 - get_identifier ("__bool int"),
17972 - bool_int_type_node));
17973 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
17974 - get_identifier ("__pixel"),
17975 - pixel_type_node));
17976 + tdecl = build_decl (TYPE_DECL, get_identifier ("__bool char"),
17977 + bool_char_type_node);
17978 + TYPE_NAME (bool_char_type_node) = tdecl;
17979 + (*lang_hooks.decls.pushdecl) (tdecl);
17980 + tdecl = build_decl (TYPE_DECL, get_identifier ("__bool short"),
17981 + bool_short_type_node);
17982 + TYPE_NAME (bool_short_type_node) = tdecl;
17983 + (*lang_hooks.decls.pushdecl) (tdecl);
17984 + tdecl = build_decl (TYPE_DECL, get_identifier ("__bool int"),
17985 + bool_int_type_node);
17986 + TYPE_NAME (bool_int_type_node) = tdecl;
17987 + (*lang_hooks.decls.pushdecl) (tdecl);
17988 + tdecl = build_decl (TYPE_DECL, get_identifier ("__pixel"),
17989 + pixel_type_node);
17990 + TYPE_NAME (pixel_type_node) = tdecl;
17991 + (*lang_hooks.decls.pushdecl) (tdecl);
17992
17993 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
17994 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
17995 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
17996 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
17997
17998 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
17999 - get_identifier ("__vector unsigned char"),
18000 - unsigned_V16QI_type_node));
18001 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18002 - get_identifier ("__vector signed char"),
18003 - V16QI_type_node));
18004 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18005 - get_identifier ("__vector __bool char"),
18006 - bool_V16QI_type_node));
18007 -
18008 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18009 - get_identifier ("__vector unsigned short"),
18010 - unsigned_V8HI_type_node));
18011 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18012 - get_identifier ("__vector signed short"),
18013 - V8HI_type_node));
18014 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18015 - get_identifier ("__vector __bool short"),
18016 - bool_V8HI_type_node));
18017 -
18018 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18019 - get_identifier ("__vector unsigned int"),
18020 - unsigned_V4SI_type_node));
18021 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18022 - get_identifier ("__vector signed int"),
18023 - V4SI_type_node));
18024 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18025 - get_identifier ("__vector __bool int"),
18026 - bool_V4SI_type_node));
18027 -
18028 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18029 - get_identifier ("__vector float"),
18030 - V4SF_type_node));
18031 - (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
18032 - get_identifier ("__vector __pixel"),
18033 - pixel_V8HI_type_node));
18034 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned char"),
18035 + unsigned_V16QI_type_node);
18036 + TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
18037 + (*lang_hooks.decls.pushdecl) (tdecl);
18038 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed char"),
18039 + V16QI_type_node);
18040 + TYPE_NAME (V16QI_type_node) = tdecl;
18041 + (*lang_hooks.decls.pushdecl) (tdecl);
18042 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool char"),
18043 + bool_V16QI_type_node);
18044 + TYPE_NAME ( bool_V16QI_type_node) = tdecl;
18045 + (*lang_hooks.decls.pushdecl) (tdecl);
18046 +
18047 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned short"),
18048 + unsigned_V8HI_type_node);
18049 + TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
18050 + (*lang_hooks.decls.pushdecl) (tdecl);
18051 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed short"),
18052 + V8HI_type_node);
18053 + TYPE_NAME (V8HI_type_node) = tdecl;
18054 + (*lang_hooks.decls.pushdecl) (tdecl);
18055 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool short"),
18056 + bool_V8HI_type_node);
18057 + TYPE_NAME (bool_V8HI_type_node) = tdecl;
18058 + (*lang_hooks.decls.pushdecl) (tdecl);
18059 +
18060 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned int"),
18061 + unsigned_V4SI_type_node);
18062 + TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
18063 + (*lang_hooks.decls.pushdecl) (tdecl);
18064 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed int"),
18065 + V4SI_type_node);
18066 + TYPE_NAME (V4SI_type_node) = tdecl;
18067 + (*lang_hooks.decls.pushdecl) (tdecl);
18068 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool int"),
18069 + bool_V4SI_type_node);
18070 + TYPE_NAME (bool_V4SI_type_node) = tdecl;
18071 + (*lang_hooks.decls.pushdecl) (tdecl);
18072 +
18073 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector float"),
18074 + V4SF_type_node);
18075 + TYPE_NAME (V4SF_type_node) = tdecl;
18076 + (*lang_hooks.decls.pushdecl) (tdecl);
18077 + tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __pixel"),
18078 + pixel_V8HI_type_node);
18079 + TYPE_NAME (pixel_V8HI_type_node) = tdecl;
18080 + (*lang_hooks.decls.pushdecl) (tdecl);
18081
18082 if (TARGET_PAIRED_FLOAT)
18083 paired_init_builtins ();
18084 @@ -15843,7 +15868,7 @@
18085 no_global_regs_above (int first, bool gpr)
18086 {
18087 int i;
18088 - for (i = first; i < gpr ? 32 : 64 ; i++)
18089 + for (i = first; i < (gpr ? 32 : 64); i++)
18090 if (global_regs[i])
18091 return false;
18092 return true;
18093 @@ -15869,11 +15894,11 @@
18094 int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
18095 rtx sym;
18096 int select = ((savep ? 1 : 0) << 2
18097 - | (gpr
18098 + | (TARGET_SPE_ABI
18099 /* On the SPE, we never have any FPRs, but we do have
18100 32/64-bit versions of the routines. */
18101 - ? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
18102 - : 0) << 1
18103 + ? (info->spe_64bit_regs_used ? 1 : 0)
18104 + : (gpr ? 1 : 0)) << 1
18105 | (exitp ? 1: 0));
18106
18107 /* Don't generate bogus routine names. */
18108 @@ -15908,6 +15933,7 @@
18109
18110 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
18111 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
18112 + SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
18113 }
18114
18115 return sym;
18116 @@ -16098,6 +16124,14 @@
18117 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
18118 }
18119
18120 + /* Code intended for use in shared libraries cannot be reliably linked
18121 + with out-of-line prologues and epilogues. */
18122 + if (flag_pic)
18123 + {
18124 + savres_gprs_inline = 1;
18125 + savres_fprs_inline = 1;
18126 + }
18127 +
18128 return (using_multiple_p
18129 | (savres_fprs_inline << 1)
18130 | (savres_gprs_inline << 2));
18131 @@ -16122,7 +16156,7 @@
18132 int using_store_multiple;
18133 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
18134 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
18135 - && !call_used_regs[STATIC_CHAIN_REGNUM]);
18136 + && call_used_regs[STATIC_CHAIN_REGNUM]);
18137 HOST_WIDE_INT sp_offset = 0;
18138
18139 if (TARGET_FIX_AND_CONTINUE)
18140 @@ -16924,8 +16958,9 @@
18141 || (cfun->calls_alloca
18142 && !frame_pointer_needed));
18143 restore_lr = (info->lr_save_p
18144 - && restoring_GPRs_inline
18145 - && restoring_FPRs_inline);
18146 + && (restoring_GPRs_inline
18147 + || (restoring_FPRs_inline
18148 + && info->first_fp_reg_save < 64)));
18149
18150 if (WORLD_SAVE_P (info))
18151 {
18152 @@ -17197,7 +17232,7 @@
18153
18154 /* Get the old lr if we saved it. If we are restoring registers
18155 out-of-line, then the out-of-line routines can do this for us. */
18156 - if (restore_lr)
18157 + if (restore_lr && restoring_GPRs_inline)
18158 {
18159 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
18160 info->lr_save_offset + sp_offset);
18161 @@ -17216,7 +17251,7 @@
18162 }
18163
18164 /* Set LR here to try to overlap restores below. */
18165 - if (restore_lr)
18166 + if (restore_lr && restoring_GPRs_inline)
18167 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
18168 gen_rtx_REG (Pmode, 0));
18169
18170 @@ -17396,6 +17431,18 @@
18171 }
18172 }
18173
18174 + if (restore_lr && !restoring_GPRs_inline)
18175 + {
18176 + rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
18177 + info->lr_save_offset + sp_offset);
18178 +
18179 + emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
18180 + }
18181 +
18182 + if (restore_lr && !restoring_GPRs_inline)
18183 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
18184 + gen_rtx_REG (Pmode, 0));
18185 +
18186 /* Restore fpr's if we need to do it without calling a function. */
18187 if (restoring_FPRs_inline)
18188 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
18189 @@ -22163,6 +22210,30 @@
18190 return 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
18191 }
18192
18193 +/* Return a value indicating whether OP, an operand of a commutative
18194 + operation, is preferred as the first or second operand. The higher
18195 + the value, the stronger the preference for being the first operand.
18196 + We use negative values to indicate a preference for the first operand
18197 + and positive values for the second operand.
18198 + VALUE is the default precedence for OP; see rtlanal.c:
18199 + commutative_operand_precendece. */
18200 +
18201 +static int
18202 +rs6000_commutative_operand_precedence (const_rtx op, int value)
18203 +{
18204 + /* Prefer pointer objects over non pointer objects.
18205 + For rationale see PR28690. */
18206 + if (GET_RTX_CLASS (GET_CODE (op)) == RTX_OBJ
18207 + && ((REG_P (op) && REG_POINTER (op))
18208 + || (MEM_P (op) && MEM_POINTER (op))))
18209 + /* value = -1 */;
18210 + else
18211 + /* value = -2 */
18212 + --value;
18213 +
18214 + return value;
18215 +}
18216 +
18217 /* Returns a code for a target-specific builtin that implements
18218 reciprocal of the function, or NULL_TREE if not available. */
18219
18220 @@ -22686,12 +22757,16 @@
18221 static rtx
18222 rs6000_dwarf_register_span (rtx reg)
18223 {
18224 - unsigned regno;
18225 + rtx parts[8];
18226 + int i, words;
18227 + unsigned regno = REGNO (reg);
18228 + enum machine_mode mode = GET_MODE (reg);
18229
18230 if (TARGET_SPE
18231 + && regno < 32
18232 && (SPE_VECTOR_MODE (GET_MODE (reg))
18233 - || (TARGET_E500_DOUBLE
18234 - && (GET_MODE (reg) == DFmode || GET_MODE (reg) == DDmode))))
18235 + || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
18236 + && mode != SFmode && mode != SDmode && mode != SCmode)))
18237 ;
18238 else
18239 return NULL_RTX;
18240 @@ -22701,15 +22776,23 @@
18241 /* The duality of the SPE register size wreaks all kinds of havoc.
18242 This is a way of distinguishing r0 in 32-bits from r0 in
18243 64-bits. */
18244 - return
18245 - gen_rtx_PARALLEL (VOIDmode,
18246 - BYTES_BIG_ENDIAN
18247 - ? gen_rtvec (2,
18248 - gen_rtx_REG (SImode, regno + 1200),
18249 - gen_rtx_REG (SImode, regno))
18250 - : gen_rtvec (2,
18251 - gen_rtx_REG (SImode, regno),
18252 - gen_rtx_REG (SImode, regno + 1200)));
18253 + words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
18254 + gcc_assert (words <= 4);
18255 + for (i = 0; i < words; i++, regno++)
18256 + {
18257 + if (BYTES_BIG_ENDIAN)
18258 + {
18259 + parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
18260 + parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
18261 + }
18262 + else
18263 + {
18264 + parts[2 * i] = gen_rtx_REG (SImode, regno);
18265 + parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
18266 + }
18267 + }
18268 +
18269 + return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
18270 }
18271
18272 /* Fill in sizes for SPE register high parts in table used by unwinder. */
18273 --- a/gcc/config/rs6000/rs6000.h
18274 +++ b/gcc/config/rs6000/rs6000.h
18275 @@ -368,16 +368,6 @@
18276 previous_group
18277 };
18278
18279 -/* Support for a compile-time default CPU, et cetera. The rules are:
18280 - --with-cpu is ignored if -mcpu is specified.
18281 - --with-tune is ignored if -mtune is specified.
18282 - --with-float is ignored if -mhard-float or -msoft-float are
18283 - specified. */
18284 -#define OPTION_DEFAULT_SPECS \
18285 - {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \
18286 - {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
18287 - {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }
18288 -
18289 /* rs6000_select[0] is reserved for the default cpu defined via --with-cpu */
18290 struct rs6000_cpu_select
18291 {
18292 @@ -794,8 +784,8 @@
18293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18294 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
18295 /* AltiVec registers. */ \
18296 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18297 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18298 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
18299 + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18300 1, 1 \
18301 , 1, 1, 1 \
18302 }
18303 @@ -813,8 +803,8 @@
18304 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18305 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
18306 /* AltiVec registers. */ \
18307 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18308 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18309 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
18310 + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
18311 0, 0 \
18312 , 0, 0, 0 \
18313 }
18314 --- a/gcc/config/rs6000/rs6000.md
18315 +++ b/gcc/config/rs6000/rs6000.md
18316 @@ -14703,9 +14703,9 @@
18317 [(match_parallel 0 "any_parallel_operand"
18318 [(clobber (reg:P 65))
18319 (use (match_operand:P 1 "symbol_ref_operand" "s"))
18320 - (use (match_operand:P 2 "gpc_reg_operand" "r"))
18321 - (set (match_operand:P 3 "memory_operand" "=m")
18322 - (match_operand:P 4 "gpc_reg_operand" "r"))])]
18323 + (use (reg:P 11))
18324 + (set (match_operand:P 2 "memory_operand" "=m")
18325 + (match_operand:P 3 "gpc_reg_operand" "r"))])]
18326 ""
18327 "bl %z1"
18328 [(set_attr "type" "branch")
18329 @@ -14715,9 +14715,9 @@
18330 [(match_parallel 0 "any_parallel_operand"
18331 [(clobber (reg:P 65))
18332 (use (match_operand:P 1 "symbol_ref_operand" "s"))
18333 - (use (match_operand:P 2 "gpc_reg_operand" "r"))
18334 - (set (match_operand:DF 3 "memory_operand" "=m")
18335 - (match_operand:DF 4 "gpc_reg_operand" "f"))])]
18336 + (use (reg:P 11))
18337 + (set (match_operand:DF 2 "memory_operand" "=m")
18338 + (match_operand:DF 3 "gpc_reg_operand" "f"))])]
18339 ""
18340 "bl %z1"
18341 [(set_attr "type" "branch")
18342 @@ -14810,9 +14810,9 @@
18343 [(match_parallel 0 "any_parallel_operand"
18344 [(clobber (match_operand:P 1 "register_operand" "=l"))
18345 (use (match_operand:P 2 "symbol_ref_operand" "s"))
18346 - (use (match_operand:P 3 "gpc_reg_operand" "r"))
18347 - (set (match_operand:P 4 "gpc_reg_operand" "=r")
18348 - (match_operand:P 5 "memory_operand" "m"))])]
18349 + (use (reg:P 11))
18350 + (set (match_operand:P 3 "gpc_reg_operand" "=r")
18351 + (match_operand:P 4 "memory_operand" "m"))])]
18352 ""
18353 "bl %z2"
18354 [(set_attr "type" "branch")
18355 @@ -14823,9 +14823,9 @@
18356 [(return)
18357 (clobber (match_operand:P 1 "register_operand" "=l"))
18358 (use (match_operand:P 2 "symbol_ref_operand" "s"))
18359 - (use (match_operand:P 3 "gpc_reg_operand" "r"))
18360 - (set (match_operand:P 4 "gpc_reg_operand" "=r")
18361 - (match_operand:P 5 "memory_operand" "m"))])]
18362 + (use (reg:P 11))
18363 + (set (match_operand:P 3 "gpc_reg_operand" "=r")
18364 + (match_operand:P 4 "memory_operand" "m"))])]
18365 ""
18366 "b %z2"
18367 [(set_attr "type" "branch")
18368 @@ -14836,9 +14836,9 @@
18369 [(return)
18370 (clobber (match_operand:P 1 "register_operand" "=l"))
18371 (use (match_operand:P 2 "symbol_ref_operand" "s"))
18372 - (use (match_operand:P 3 "gpc_reg_operand" "r"))
18373 - (set (match_operand:DF 4 "gpc_reg_operand" "=f")
18374 - (match_operand:DF 5 "memory_operand" "m"))])]
18375 + (use (reg:P 11))
18376 + (set (match_operand:DF 3 "gpc_reg_operand" "=f")
18377 + (match_operand:DF 4 "memory_operand" "m"))])]
18378 ""
18379 "b %z2"
18380 [(set_attr "type" "branch")
18381 @@ -14889,6 +14889,120 @@
18382 }"
18383 [(set_attr "type" "load")])
18384 \f
18385 +;;; Expanders for vector insn patterns shared between the SPE and TARGET_PAIRED systems.
18386 +
18387 +(define_expand "absv2sf2"
18388 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18389 + (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))]
18390 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18391 + "
18392 +{
18393 + if (TARGET_SPE)
18394 + {
18395 + /* We need to make a note that we clobber SPEFSCR. */
18396 + emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18397 + gen_rtx_ABS (V2SFmode, operands[1])));
18398 + emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)));
18399 + DONE;
18400 + }
18401 +}")
18402 +
18403 +(define_expand "negv2sf2"
18404 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18405 + (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))]
18406 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18407 + "
18408 +{
18409 + if (TARGET_SPE)
18410 + {
18411 + /* We need to make a note that we clobber SPEFSCR. */
18412 + emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18413 + gen_rtx_NEG (V2SFmode, operands[1])));
18414 + emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)));
18415 + DONE;
18416 + }
18417 +}")
18418 +
18419 +(define_expand "addv2sf3"
18420 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18421 + (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
18422 + (match_operand:V2SF 2 "gpc_reg_operand" "")))]
18423 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18424 + "
18425 +{
18426 + if (TARGET_SPE)
18427 + {
18428 + /* We need to make a note that we clobber SPEFSCR. */
18429 + rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
18430 +
18431 + XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
18432 + gen_rtx_PLUS (V2SFmode, operands[1], operands[2]));
18433 + XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
18434 + emit_insn (par);
18435 + DONE;
18436 + }
18437 +}")
18438 +
18439 +(define_expand "subv2sf3"
18440 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18441 + (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
18442 + (match_operand:V2SF 2 "gpc_reg_operand" "")))]
18443 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18444 + "
18445 +{
18446 + if (TARGET_SPE)
18447 + {
18448 + /* We need to make a note that we clobber SPEFSCR. */
18449 + rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
18450 +
18451 + XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
18452 + gen_rtx_MINUS (V2SFmode, operands[1], operands[2]));
18453 + XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
18454 + emit_insn (par);
18455 + DONE;
18456 + }
18457 +}")
18458 +
18459 +(define_expand "mulv2sf3"
18460 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18461 + (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
18462 + (match_operand:V2SF 2 "gpc_reg_operand" "")))]
18463 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18464 + "
18465 +{
18466 + if (TARGET_SPE)
18467 + {
18468 + /* We need to make a note that we clobber SPEFSCR. */
18469 + rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
18470 +
18471 + XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
18472 + gen_rtx_MULT (V2SFmode, operands[1], operands[2]));
18473 + XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
18474 + emit_insn (par);
18475 + DONE;
18476 + }
18477 +}")
18478 +
18479 +(define_expand "divv2sf3"
18480 + [(set (match_operand:V2SF 0 "gpc_reg_operand" "")
18481 + (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")
18482 + (match_operand:V2SF 2 "gpc_reg_operand" "")))]
18483 + "TARGET_PAIRED_FLOAT || TARGET_SPE"
18484 + "
18485 +{
18486 + if (TARGET_SPE)
18487 + {
18488 + /* We need to make a note that we clobber SPEFSCR. */
18489 + rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
18490 +
18491 + XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0],
18492 + gen_rtx_DIV (V2SFmode, operands[1], operands[2]));
18493 + XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO));
18494 + emit_insn (par);
18495 + DONE;
18496 + }
18497 +}")
18498 +\f
18499
18500 (include "sync.md")
18501 (include "altivec.md")
18502 --- a/gcc/config/rs6000/spe.md
18503 +++ b/gcc/config/rs6000/spe.md
18504 @@ -164,7 +164,7 @@
18505
18506 ;; SPE SIMD instructions
18507
18508 -(define_insn "spe_evabs"
18509 +(define_insn "absv2si2"
18510 [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
18511 (abs:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
18512 "TARGET_SPE"
18513 @@ -181,7 +181,7 @@
18514 [(set_attr "type" "vecsimple")
18515 (set_attr "length" "4")])
18516
18517 -(define_insn "spe_evand"
18518 +(define_insn "andv2si3"
18519 [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
18520 (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
18521 (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
18522 @@ -1898,7 +1898,7 @@
18523 [(set_attr "type" "veccomplex")
18524 (set_attr "length" "4")])
18525
18526 -(define_insn "spe_evaddw"
18527 +(define_insn "addv2si3"
18528 [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
18529 (plus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
18530 (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
18531 @@ -2028,7 +2028,7 @@
18532 [(set_attr "type" "veccomplex")
18533 (set_attr "length" "4")])
18534
18535 -(define_insn "spe_evdivws"
18536 +(define_insn "divv2si3"
18537 [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
18538 (div:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
18539 (match_operand:V2SI 2 "gpc_reg_operand" "r")))
18540 @@ -3156,9 +3156,9 @@
18541 [(match_parallel 0 "any_parallel_operand"
18542 [(clobber (reg:P 65))
18543 (use (match_operand:P 1 "symbol_ref_operand" "s"))
18544 - (use (match_operand:P 2 "gpc_reg_operand" "r"))
18545 - (set (match_operand:V2SI 3 "memory_operand" "=m")
18546 - (match_operand:V2SI 4 "gpc_reg_operand" "r"))])]
18547 + (use (reg:P 11))
18548 + (set (match_operand:V2SI 2 "memory_operand" "=m")
18549 + (match_operand:V2SI 3 "gpc_reg_operand" "r"))])]
18550 "TARGET_SPE_ABI"
18551 "bl %z1"
18552 [(set_attr "type" "branch")
18553 @@ -3168,9 +3168,9 @@
18554 [(match_parallel 0 "any_parallel_operand"
18555 [(clobber (reg:P 65))
18556 (use (match_operand:P 1 "symbol_ref_operand" "s"))
18557 - (use (match_operand:P 2 "gpc_reg_operand" "r"))
18558 - (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
18559 - (match_operand:V2SI 4 "memory_operand" "m"))])]
18560 + (use (reg:P 11))
18561 + (set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
18562 + (match_operand:V2SI 3 "memory_operand" "m"))])]
18563 "TARGET_SPE_ABI"
18564 "bl %z1"
18565 [(set_attr "type" "branch")
18566 @@ -3181,9 +3181,9 @@
18567 [(return)
18568 (clobber (reg:P 65))
18569 (use (match_operand:P 1 "symbol_ref_operand" "s"))
18570 - (use (match_operand:P 2 "gpc_reg_operand" "r"))
18571 - (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
18572 - (match_operand:V2SI 4 "memory_operand" "m"))])]
18573 + (use (reg:P 11))
18574 + (set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
18575 + (match_operand:V2SI 3 "memory_operand" "m"))])]
18576 "TARGET_SPE_ABI"
18577 "b %z1"
18578 [(set_attr "type" "branch")
18579 --- a/gcc/config/rs6000/sysv4.h
18580 +++ b/gcc/config/rs6000/sysv4.h
18581 @@ -619,6 +619,9 @@
18582 #define CC1_SECURE_PLT_DEFAULT_SPEC ""
18583 #endif
18584
18585 +#undef CC1_EXTRA_SPEC
18586 +#define CC1_EXTRA_SPEC ""
18587 +
18588 /* Pass -G xxx to the compiler and set correct endian mode. */
18589 #define CC1_SPEC "%{G*} %(cc1_cpu) \
18590 %{mlittle|mlittle-endian: %(cc1_endian_little); \
18591 @@ -643,7 +646,7 @@
18592 %{msdata: -msdata=default} \
18593 %{mno-sdata: -msdata=none} \
18594 %{!mbss-plt: %{!msecure-plt: %(cc1_secure_plt_default)}} \
18595 -%{profile: -p}"
18596 +%{profile: -p}" CC1_EXTRA_SPEC
18597
18598 /* Don't put -Y P,<path> for cross compilers. */
18599 #ifndef CROSS_DIRECTORY_STRUCTURE
18600 @@ -843,15 +846,15 @@
18601 #define CPP_OS_MVME_SPEC ""
18602
18603 /* PowerPC simulator based on netbsd system calls support. */
18604 -#define LIB_SIM_SPEC "--start-group -lsim -lc --end-group"
18605 +#define LIB_SIM_SPEC LIB_DEFAULT_SPEC
18606
18607 -#define STARTFILE_SIM_SPEC "ecrti.o%s sim-crt0.o%s crtbegin.o%s"
18608 +#define STARTFILE_SIM_SPEC "ecrti.o%s crtbegin.o%s"
18609
18610 -#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s"
18611 +#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s -Tsim-hosted.ld"
18612
18613 #define LINK_START_SIM_SPEC ""
18614
18615 -#define LINK_OS_SIM_SPEC "-m elf32ppcsim"
18616 +#define LINK_OS_SIM_SPEC ""
18617
18618 #define CPP_OS_SIM_SPEC ""
18619
18620 --- a/gcc/config/rs6000/t-ppccomm
18621 +++ b/gcc/config/rs6000/t-ppccomm
18622 @@ -3,10 +3,23 @@
18623 LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/darwin-ldouble.c
18624
18625 # These can't end up in shared libgcc
18626 -LIB2FUNCS_STATIC_EXTRA = eabi.S
18627 -
18628 -eabi.S: $(srcdir)/config/rs6000/eabi.asm
18629 - cat $(srcdir)/config/rs6000/eabi.asm > eabi.S
18630 +LIB2FUNCS_STATIC_EXTRA = \
18631 + crtsavfpr.S crtresfpr.S \
18632 + crtsavgpr.S crtresgpr.S \
18633 + crtresxfpr.S crtresxgpr.S \
18634 + e500crtres32gpr.S \
18635 + e500crtres64gpr.S \
18636 + e500crtres64gprctr.S \
18637 + e500crtrest32gpr.S \
18638 + e500crtrest64gpr.S \
18639 + e500crtresx32gpr.S \
18640 + e500crtresx64gpr.S \
18641 + e500crtsav32gpr.S \
18642 + e500crtsav64gpr.S \
18643 + e500crtsav64gprctr.S \
18644 + e500crtsavg32gpr.S \
18645 + e500crtsavg64gpr.S \
18646 + e500crtsavg64gprctr.S
18647
18648 tramp.S: $(srcdir)/config/rs6000/tramp.asm
18649 cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
18650 @@ -36,6 +49,63 @@
18651 ncrtn.S: $(srcdir)/config/rs6000/sol-cn.asm
18652 cat $(srcdir)/config/rs6000/sol-cn.asm >ncrtn.S
18653
18654 +crtsavfpr.S: $(srcdir)/config/rs6000/crtsavfpr.asm
18655 + cat $(srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S
18656 +
18657 +crtresfpr.S: $(srcdir)/config/rs6000/crtresfpr.asm
18658 + cat $(srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S
18659 +
18660 +crtsavgpr.S: $(srcdir)/config/rs6000/crtsavgpr.asm
18661 + cat $(srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S
18662 +
18663 +crtresgpr.S: $(srcdir)/config/rs6000/crtresgpr.asm
18664 + cat $(srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S
18665 +
18666 +crtresxfpr.S: $(srcdir)/config/rs6000/crtresxfpr.asm
18667 + cat $(srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S
18668 +
18669 +crtresxgpr.S: $(srcdir)/config/rs6000/crtresxgpr.asm
18670 + cat $(srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S
18671 +
18672 +e500crtres32gpr.S: $(srcdir)/config/rs6000/e500crtres32gpr.asm
18673 + cat $(srcdir)/config/rs6000/e500crtres32gpr.asm >e500crtres32gpr.S
18674 +
18675 +e500crtres64gpr.S: $(srcdir)/config/rs6000/e500crtres64gpr.asm
18676 + cat $(srcdir)/config/rs6000/e500crtres64gpr.asm >e500crtres64gpr.S
18677 +
18678 +e500crtres64gprctr.S: $(srcdir)/config/rs6000/e500crtres64gprctr.asm
18679 + cat $(srcdir)/config/rs6000/e500crtres64gprctr.asm >e500crtres64gprctr.S
18680 +
18681 +e500crtrest32gpr.S: $(srcdir)/config/rs6000/e500crtrest32gpr.asm
18682 + cat $(srcdir)/config/rs6000/e500crtrest32gpr.asm >e500crtrest32gpr.S
18683 +
18684 +e500crtrest64gpr.S: $(srcdir)/config/rs6000/e500crtrest64gpr.asm
18685 + cat $(srcdir)/config/rs6000/e500crtrest64gpr.asm >e500crtrest64gpr.S
18686 +
18687 +e500crtresx32gpr.S: $(srcdir)/config/rs6000/e500crtresx32gpr.asm
18688 + cat $(srcdir)/config/rs6000/e500crtresx32gpr.asm >e500crtresx32gpr.S
18689 +
18690 +e500crtresx64gpr.S: $(srcdir)/config/rs6000/e500crtresx64gpr.asm
18691 + cat $(srcdir)/config/rs6000/e500crtresx64gpr.asm >e500crtresx64gpr.S
18692 +
18693 +e500crtsav32gpr.S: $(srcdir)/config/rs6000/e500crtsav32gpr.asm
18694 + cat $(srcdir)/config/rs6000/e500crtsav32gpr.asm >e500crtsav32gpr.S
18695 +
18696 +e500crtsav64gpr.S: $(srcdir)/config/rs6000/e500crtsav64gpr.asm
18697 + cat $(srcdir)/config/rs6000/e500crtsav64gpr.asm >e500crtsav64gpr.S
18698 +
18699 +e500crtsav64gprctr.S: $(srcdir)/config/rs6000/e500crtsav64gprctr.asm
18700 + cat $(srcdir)/config/rs6000/e500crtsav64gprctr.asm >e500crtsav64gprctr.S
18701 +
18702 +e500crtsavg32gpr.S: $(srcdir)/config/rs6000/e500crtsavg32gpr.asm
18703 + cat $(srcdir)/config/rs6000/e500crtsavg32gpr.asm >e500crtsavg32gpr.S
18704 +
18705 +e500crtsavg64gpr.S: $(srcdir)/config/rs6000/e500crtsavg64gpr.asm
18706 + cat $(srcdir)/config/rs6000/e500crtsavg64gpr.asm >e500crtsavg64gpr.S
18707 +
18708 +e500crtsavg64gprctr.S: $(srcdir)/config/rs6000/e500crtsavg64gprctr.asm
18709 + cat $(srcdir)/config/rs6000/e500crtsavg64gprctr.asm >e500crtsavg64gprctr.S
18710 +
18711 # Build multiple copies of ?crt{i,n}.o, one for each target switch.
18712 $(T)ecrti$(objext): ecrti.S
18713 $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrti.S -o $(T)ecrti$(objext)
18714 @@ -49,6 +119,63 @@
18715 $(T)ncrtn$(objext): ncrtn.S
18716 $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrtn.S -o $(T)ncrtn$(objext)
18717
18718 +$(T)crtsavfpr$(objext): crtsavfpr.S
18719 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavfpr.S -o $(T)crtsavfpr$(objext)
18720 +
18721 +$(T)crtresfpr$(objext): crtresfpr.S
18722 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresfpr.S -o $(T)crtresfpr$(objext)
18723 +
18724 +$(T)crtsavgpr$(objext): crtsavgpr.S
18725 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavgpr.S -o $(T)crtsavgpr$(objext)
18726 +
18727 +$(T)crtresgpr$(objext): crtresgpr.S
18728 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresgpr.S -o $(T)crtresgpr$(objext)
18729 +
18730 +$(T)crtresxfpr$(objext): crtresxfpr.S
18731 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxfpr.S -o $(T)crtresxfpr$(objext)
18732 +
18733 +$(T)crtresxgpr$(objext): crtresxgpr.S
18734 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxgpr.S -o $(T)crtresxgpr$(objext)
18735 +
18736 +$(T)e500crtres32gpr$(objext): e500crtres32gpr.S
18737 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres32gpr.S -o $(T)e500crtres32gpr$(objext)
18738 +
18739 +$(T)e500crtres64gpr$(objext): e500crtres64gpr.S
18740 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres64gpr.S -o $(T)e500crtres64gpr$(objext)
18741 +
18742 +$(T)e500crtres64gprctr$(objext): e500crtres64gprctr.S
18743 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres64gprctr.S -o $(T)e500crtres64gprctr$(objext)
18744 +
18745 +$(T)e500crtrest32gpr$(objext): e500crtrest32gpr.S
18746 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtrest32gpr.S -o $(T)e500crtrest32gpr$(objext)
18747 +
18748 +$(T)e500crtrest64gpr$(objext): e500crtrest64gpr.S
18749 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtrest64gpr.S -o $(T)e500crtrest64gpr$(objext)
18750 +
18751 +$(T)e500crtresx32gpr$(objext): e500crtresx32gpr.S
18752 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtresx32gpr.S -o $(T)e500crtresx32gpr$(objext)
18753 +
18754 +$(T)e500crtresx64gpr$(objext): e500crtresx64gpr.S
18755 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtresx64gpr.S -o $(T)e500crtresx64gpr$(objext)
18756 +
18757 +$(T)e500crtsav32gpr$(objext): e500crtsav32gpr.S
18758 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav32gpr.S -o $(T)e500crtsav32gpr$(objext)
18759 +
18760 +$(T)e500crtsav64gpr$(objext): e500crtsav64gpr.S
18761 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav64gpr.S -o $(T)e500crtsav64gpr$(objext)
18762 +
18763 +$(T)e500crtsav64gprctr$(objext): e500crtsav64gprctr.S
18764 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav64gprctr.S -o $(T)e500crtsav64gprctr$(objext)
18765 +
18766 +$(T)e500crtsavg32gpr$(objext): e500crtsavg32gpr.S
18767 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg32gpr.S -o $(T)e500crtsavg32gpr$(objext)
18768 +
18769 +$(T)e500crtsavg64gpr$(objext): e500crtsavg64gpr.S
18770 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg64gpr.S -o $(T)e500crtsavg64gpr$(objext)
18771 +
18772 +$(T)e500crtsavg64gprctr$(objext): e500crtsavg64gprctr.S
18773 + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg64gprctr.S -o $(T)e500crtsavg64gprctr$(objext)
18774 +
18775 # It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata.
18776 CRTSTUFF_T_CFLAGS = -msdata=none
18777 # Make sure crt*.o are built with -fPIC even if configured with
18778 --- /dev/null
18779 +++ b/gcc/config/rs6000/t-ppc-e500mc
18780 @@ -0,0 +1,12 @@
18781 +# Multilibs for powerpc embedded ELF targets.
18782 +
18783 +MULTILIB_OPTIONS =
18784 +
18785 +MULTILIB_DIRNAMES =
18786 +
18787 +MULTILIB_EXCEPTIONS =
18788 +
18789 +MULTILIB_EXTRA_OPTS = mno-eabi mstrict-align
18790 +
18791 +MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \
18792 + ${MULTILIB_MATCHES_ENDIAN}
18793 --- a/gcc/config/sh/lib1funcs.asm
18794 +++ b/gcc/config/sh/lib1funcs.asm
18795 @@ -2080,8 +2080,9 @@
18796 GLOBAL(ic_invalidate):
18797 ocbwb @r4
18798 synco
18799 - rts
18800 icbi @r4
18801 + rts
18802 + nop
18803 ENDFUNC(GLOBAL(ic_invalidate))
18804 #elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__))
18805 /* For system code, we use ic_invalidate_line_i, but user code
18806 @@ -2147,8 +2148,10 @@
18807 GLOBAL(ic_invalidate_array):
18808 add r1,r4
18809 synco
18810 - rts
18811 icbi @r4
18812 + rts
18813 + nop
18814 + .align 2
18815 .long 0
18816 ENDFUNC(GLOBAL(ic_invalidate_array))
18817 #elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__))
18818 --- a/gcc/config/sh/linux-unwind.h
18819 +++ b/gcc/config/sh/linux-unwind.h
18820 @@ -24,7 +24,10 @@
18821
18822
18823 /* Do code reading to identify a signal frame, and set the frame
18824 - state data appropriately. See unwind-dw2.c for the structs. */
18825 + state data appropriately. See unwind-dw2.c for the structs.
18826 + Don't use this at all if inhibit_libc is used. */
18827 +
18828 +#ifndef inhibit_libc
18829
18830 #include <signal.h>
18831 #include <sys/ucontext.h>
18832 @@ -248,3 +251,5 @@
18833 return _URC_NO_REASON;
18834 }
18835 #endif /* defined (__SH5__) */
18836 +
18837 +#endif /* inhibit_libc */
18838 --- a/gcc/config/sh/sh.h
18839 +++ b/gcc/config/sh/sh.h
18840 @@ -712,8 +712,9 @@
18841 /* Never run scheduling before reload, since that can \
18842 break global alloc, and generates slower code anyway due \
18843 to the pressure on R0. */ \
18844 - /* Enable sched1 for SH4; ready queue will be reordered by \
18845 - the target hooks when pressure is high. We can not do this for \
18846 + /* Enable sched1 for SH4 if the user explicitly requests. \
18847 + When sched1 is enabled, the ready queue will be reordered by \
18848 + the target hooks if pressure is high. We can not do this for \
18849 PIC, SH3 and lower as they give spill failures for R0. */ \
18850 if (!TARGET_HARD_SH4 || flag_pic) \
18851 flag_schedule_insns = 0; \
18852 @@ -728,6 +729,8 @@
18853 warning (0, "ignoring -fschedule-insns because of exception handling bug"); \
18854 flag_schedule_insns = 0; \
18855 } \
18856 + else if (flag_schedule_insns == 2) \
18857 + flag_schedule_insns = 0; \
18858 } \
18859 \
18860 if (align_loops == 0) \
18861 --- a/gcc/config/sol2.h
18862 +++ b/gcc/config/sol2.h
18863 @@ -123,12 +123,12 @@
18864 %{YP,*} \
18865 %{R*} \
18866 %{compat-bsd: \
18867 - %{!YP,*:%{p|pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
18868 - %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
18869 - -R /usr/ucblib} \
18870 + %{!YP,*:%{p|pg:-Y P,%R/usr/ucblib:%R/usr/ccs/lib/libp:%R/usr/lib/libp:%R/usr/ccs/lib:%R/usr/lib} \
18871 + %{!p:%{!pg:-Y P,%R/usr/ucblib:%R/usr/ccs/lib:%R/usr/lib}}} \
18872 + -R %R/usr/ucblib} \
18873 %{!compat-bsd: \
18874 - %{!YP,*:%{p|pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
18875 - %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}}"
18876 + %{!YP,*:%{p|pg:-Y P,%R/usr/ccs/lib/libp:%R/usr/lib/libp:%R/usr/ccs/lib:%R/usr/lib} \
18877 + %{!p:%{!pg:-Y P,%R/usr/ccs/lib:%R/usr/lib}}}}"
18878
18879 #undef LINK_ARCH32_SPEC
18880 #define LINK_ARCH32_SPEC LINK_ARCH32_SPEC_BASE
18881 --- a/gcc/config/sparc/linux64.h
18882 +++ b/gcc/config/sparc/linux64.h
18883 @@ -40,10 +40,15 @@
18884 in a Medium/Low code model environment. */
18885
18886 #undef TARGET_DEFAULT
18887 +#ifdef BIARCH_32BIT_DEFAULT
18888 +#define TARGET_DEFAULT \
18889 + (MASK_APP_REGS + MASK_FPU)
18890 +#else
18891 #define TARGET_DEFAULT \
18892 (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \
18893 + MASK_STACK_BIAS + MASK_APP_REGS + MASK_FPU + MASK_LONG_DOUBLE_128)
18894 #endif
18895 +#endif
18896
18897 /* This must be v9a not just v9 because by default we enable
18898 -mvis. */
18899 --- a/gcc/config/sparc/sol2-bi.h
18900 +++ b/gcc/config/sparc/sol2-bi.h
18901 @@ -172,12 +172,12 @@
18902 %{YP,*} \
18903 %{R*} \
18904 %{compat-bsd: \
18905 - %{!YP,*:%{p|pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
18906 - %{!p:%{!pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/sparcv9}}} \
18907 - -R /usr/ucblib/sparcv9} \
18908 + %{!YP,*:%{p|pg:-Y P,%R/usr/ucblib/sparcv9:%R/usr/lib/libp/sparcv9:%R/usr/lib/sparcv9} \
18909 + %{!p:%{!pg:-Y P,%R/usr/ucblib/sparcv9:%R/usr/lib/sparcv9}}} \
18910 + -R %R/usr/ucblib/sparcv9} \
18911 %{!compat-bsd: \
18912 - %{!YP,*:%{p|pg:-Y P,/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
18913 - %{!p:%{!pg:-Y P,/usr/lib/sparcv9}}}}"
18914 + %{!YP,*:%{p|pg:-Y P,%R/usr/lib/libp/sparcv9:%R/usr/lib/sparcv9} \
18915 + %{!p:%{!pg:-Y P,%R/usr/lib/sparcv9}}}}"
18916
18917 #define LINK_ARCH64_SPEC LINK_ARCH64_SPEC_BASE
18918
18919 --- a/gcc/config/sparc/sparc.c
18920 +++ b/gcc/config/sparc/sparc.c
18921 @@ -6120,7 +6120,7 @@
18922 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
18923 {
18924 const char *qpfunc;
18925 - rtx slot0, slot1, result, tem, tem2;
18926 + rtx slot0, slot1, result, tem, tem2, libfunc;
18927 enum machine_mode mode;
18928 enum rtx_code new_comparison;
18929
18930 @@ -6183,7 +6183,8 @@
18931 emit_move_insn (slot1, y);
18932 }
18933
18934 - emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
18935 + libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
18936 + emit_library_call (libfunc, LCT_NORMAL,
18937 DImode, 2,
18938 XEXP (slot0, 0), Pmode,
18939 XEXP (slot1, 0), Pmode);
18940 @@ -6191,7 +6192,8 @@
18941 }
18942 else
18943 {
18944 - emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
18945 + libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
18946 + emit_library_call (libfunc, LCT_NORMAL,
18947 SImode, 2,
18948 x, TFmode, y, TFmode);
18949 mode = SImode;
18950 @@ -6202,7 +6204,7 @@
18951 register so reload doesn't clobber the value if it needs
18952 the return register for a spill reg. */
18953 result = gen_reg_rtx (mode);
18954 - emit_move_insn (result, hard_libcall_value (mode));
18955 + emit_move_insn (result, hard_libcall_value (mode, libfunc));
18956
18957 switch (comparison)
18958 {
18959 --- a/gcc/config/spu/spu.h
18960 +++ b/gcc/config/spu/spu.h
18961 @@ -270,7 +270,8 @@
18962
18963 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LINK_REGISTER_REGNUM)
18964
18965 -#define ARG_POINTER_CFA_OFFSET(FNDECL) (-STACK_POINTER_OFFSET)
18966 +#define ARG_POINTER_CFA_OFFSET(FNDECL) \
18967 + (crtl->args.pretend_args_size - STACK_POINTER_OFFSET)
18968
18969 \f
18970 /* Stack Checking */
18971 --- a/gcc/config.gcc
18972 +++ b/gcc/config.gcc
18973 @@ -1088,7 +1088,7 @@
18974 tmake_file="${tmake_file} i386/t-linux64"
18975 need_64bit_hwint=yes
18976 case X"${with_cpu}" in
18977 - Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
18978 + Xgeneric|Xatom|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
18979 ;;
18980 X)
18981 if test x$with_cpu_64 = x; then
18982 @@ -1097,7 +1097,7 @@
18983 ;;
18984 *)
18985 echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
18986 - echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
18987 + echo "generic atom core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
18988 exit 1
18989 ;;
18990 esac
18991 @@ -1202,7 +1202,7 @@
18992 # libgcc/configure.ac instead.
18993 need_64bit_hwint=yes
18994 case X"${with_cpu}" in
18995 - Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
18996 + Xgeneric|Xatom|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx)
18997 ;;
18998 X)
18999 if test x$with_cpu_64 = x; then
19000 @@ -1211,7 +1211,7 @@
19001 ;;
19002 *)
19003 echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
19004 - echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
19005 + echo "generic atom core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx" 1>&2
19006 exit 1
19007 ;;
19008 esac
19009 @@ -1566,6 +1566,7 @@
19010 tm_defines="${tm_defines} MIPS_ISA_DEFAULT=65"
19011 ;;
19012 esac
19013 + tmake_file="$tmake_file mips/t-crtfm"
19014 gnu_ld=yes
19015 gas=yes
19016 test x$with_llsc != x || with_llsc=yes
19017 @@ -1581,6 +1582,7 @@
19018 tm_defines="${tm_defines} MIPS_ISA_DEFAULT=32"
19019 esac
19020 test x$with_llsc != x || with_llsc=yes
19021 + tmake_file="$tmake_file mips/t-crtfm"
19022 ;;
19023 mips*-*-openbsd*)
19024 tm_defines="${tm_defines} OBSD_HAS_DECLARE_FUNCTION_NAME OBSD_HAS_DECLARE_OBJECT OBSD_HAS_CORRECT_SPECS"
19025 @@ -1796,6 +1798,10 @@
19026 tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h"
19027 extra_options="${extra_options} rs6000/sysv4.opt"
19028 tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
19029 + if test x$enable_powerpc_e500mc_elf = xyes; then
19030 + tm_file="${tm_file} rs6000/e500mc.h"
19031 + tmake_file="${tmake_file} rs6000/t-ppc-e500mc"
19032 + fi
19033 ;;
19034 powerpc-*-eabialtivec*)
19035 tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabialtivec.h"
19036 @@ -2420,6 +2426,8 @@
19037 i[34567]86-*-* | x86_64-*-*)
19038 tmake_file="${tmake_file} i386/t-gmm_malloc i386/t-i386"
19039 ;;
19040 +powerpc*-*-* | rs6000-*-*)
19041 + tm_file="${tm_file} rs6000/option-defaults.h"
19042 esac
19043
19044 # Support for --with-cpu and related options (and a few unrelated options,
19045 @@ -2646,8 +2654,8 @@
19046 | armv[23456] | armv2a | armv3m | armv4t | armv5t \
19047 | armv5te | armv6j |armv6k | armv6z | armv6zk | armv6-m \
19048 | armv7 | armv7-a | armv7-r | armv7-m \
19049 - | iwmmxt | ep9312)
19050 - # OK
19051 + | iwmmxt | ep9312 | marvell-f )
19052 + # OK
19053 ;;
19054 *)
19055 echo "Unknown arch used in --with-arch=$with_arch" 1>&2
19056 @@ -2668,7 +2676,10 @@
19057
19058 case "$with_fpu" in
19059 "" \
19060 - | fpa | fpe2 | fpe3 | maverick | vfp | vfp3 | neon )
19061 + | fpa | fpe2 | fpe3 | maverick \
19062 + | vfp | vfp3 | vfpv3 | vfpv3-fp16 | vfpv3-d16 \
19063 + | vfpv3-d16-fp16 | vfpv4 | vfpv4-d16 | fpv4-sp-d16 \
19064 + | neon | neon-fp16 | neon-vfpv4 )
19065 # OK
19066 ;;
19067 *)
19068 @@ -2805,7 +2816,7 @@
19069 esac
19070 # OK
19071 ;;
19072 - "" | amdfam10 | barcelona | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | generic)
19073 + "" | amdfam10 | barcelona | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | atom | generic)
19074 # OK
19075 ;;
19076 *)
19077 @@ -2817,7 +2828,7 @@
19078 ;;
19079
19080 mips*-*-*)
19081 - supported_defaults="abi arch float tune divide llsc mips-plt"
19082 + supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64 divide llsc mips-plt"
19083
19084 case ${with_float} in
19085 "" | soft | hard)
19086 @@ -2882,12 +2893,20 @@
19087 ;;
19088
19089 powerpc*-*-* | rs6000-*-*)
19090 - supported_defaults="cpu float tune"
19091 + supported_defaults="cpu cpu_32 cpu_64 float tune tune_32 tune_64"
19092
19093 - for which in cpu tune; do
19094 + for which in cpu cpu_32 cpu_64 tune tune_32 tune_64; do
19095 eval "val=\$with_$which"
19096 case ${val} in
19097 default32 | default64)
19098 + case $which in
19099 + cpu | tune)
19100 + ;;
19101 + *)
19102 + echo "$val only valid for --with-cpu and --with-tune." 1>&2
19103 + exit 1
19104 + ;;
19105 + esac
19106 with_which="with_$which"
19107 eval $with_which=
19108 ;;
19109 --- a/gcc/config.in
19110 +++ b/gcc/config.in
19111 @@ -108,6 +108,12 @@
19112 #endif
19113
19114
19115 +/* Define to warn for use of native system header directories */
19116 +#ifndef USED_FOR_TARGET
19117 +#undef ENABLE_POISON_SYSTEM_DIRECTORIES
19118 +#endif
19119 +
19120 +
19121 /* Define if you want all operations on RTL (the basic data structure of the
19122 optimizer and back end) to be checked for dynamic type safety at runtime.
19123 This is quite expensive. */
19124 @@ -821,6 +827,13 @@
19125 #endif
19126
19127
19128 +/* Define if your assembler supports specifying the alignment of objects
19129 + allocated using the GAS .comm command. */
19130 +#ifndef USED_FOR_TARGET
19131 +#undef HAVE_GAS_ALIGNED_COMM
19132 +#endif
19133 +
19134 +
19135 /* Define if your assembler supports .balign and .p2align. */
19136 #ifndef USED_FOR_TARGET
19137 #undef HAVE_GAS_BALIGN_AND_P2ALIGN
19138 --- a/gcc/configure
19139 +++ b/gcc/configure
19140 @@ -458,7 +458,7 @@
19141 # include <unistd.h>
19142 #endif"
19143
19144 -ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os target_noncanonical build_libsubdir build_subdir host_subdir target_subdir GENINSRC CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT GNATBIND ac_ct_GNATBIND GNATMAKE ac_ct_GNATMAKE NO_MINUS_C_MINUS_O OUTPUT_OPTION CPP EGREP loose_warn strict_warn warn_cflags nocommon_flag TREEBROWSER valgrind_path valgrind_path_defines valgrind_command coverage_flags enable_multilib enable_decimal_float enable_fixed_point enable_shared TARGET_SYSTEM_ROOT TARGET_SYSTEM_ROOT_DEFINE CROSS_SYSTEM_HEADER_DIR onestep PKGVERSION REPORT_BUGS_TO REPORT_BUGS_TEXI datarootdir docdir htmldir SET_MAKE AWK LN_S LN RANLIB ac_ct_RANLIB ranlib_flags INSTALL INSTALL_PROGRAM INSTALL_DATA make_compare_target have_mktemp_command MAKEINFO BUILD_INFO GENERATED_MANPAGES FLEX BISON NM AR COLLECT2_LIBS GNAT_LIBEXC LDEXP_LIB TARGET_GETGROUPS_T LIBICONV LTLIBICONV LIBICONV_DEP manext objext gthread_flags extra_modes_file extra_opt_files USE_NLS LIBINTL LIBINTL_DEP INCINTL XGETTEXT GMSGFMT POSUB CATALOGS DATADIRNAME INSTOBJEXT GENCAT CATOBJEXT CROSS ALL SYSTEM_HEADER_DIR inhibit_libc CC_FOR_BUILD BUILD_CFLAGS BUILD_LDFLAGS STMP_FIXINC STMP_FIXPROTO collect2 LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN OBJDUMP ac_ct_OBJDUMP ac_ct_AR STRIP ac_ct_STRIP lt_ECHO DSYMUTIL ac_ct_DSYMUTIL NMEDIT ac_ct_NMEDIT LIPO ac_ct_LIPO OTOOL ac_ct_OTOOL OTOOL64 ac_ct_OTOOL64 objdir enable_fast_install gcc_cv_as ORIGINAL_AS_FOR_TARGET gcc_cv_ld ORIGINAL_LD_FOR_TARGET gcc_cv_nm ORIGINAL_NM_FOR_TARGET gcc_cv_objdump gcc_cv_readelf libgcc_visibility GGC zlibdir zlibinc MAINT gcc_tooldir dollar slibdir subdirs srcdir all_compilers all_gtfiles all_lang_makefrags all_lang_makefiles all_languages all_selected_languages build_exeext build_install_headers_dir build_xm_file_list build_xm_include_list build_xm_defines build_file_translate check_languages cpp_install_dir xmake_file tmake_file extra_gcc_objs extra_headers_list extra_objs extra_parts extra_passes extra_programs float_h_file gcc_config_arguments gcc_gxx_include_dir host_exeext host_xm_file_list host_xm_include_list host_xm_defines out_host_hook_obj install lang_opt_files lang_specs_files lang_tree_files local_prefix md_file objc_boehm_gc out_file out_object_file thread_file tm_file_list tm_include_list tm_defines tm_p_file_list tm_p_include_list xm_file_list xm_include_list xm_defines c_target_objs cxx_target_objs fortran_target_objs target_cpu_default GMPLIBS GMPINC PPLLIBS PPLINC CLOOGLIBS CLOOGINC LIBOBJS LTLIBOBJS'
19145 +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os target_noncanonical licensedir build_libsubdir build_subdir host_subdir target_subdir GENINSRC CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT GNATBIND ac_ct_GNATBIND GNATMAKE ac_ct_GNATMAKE NO_MINUS_C_MINUS_O OUTPUT_OPTION CPP EGREP loose_warn strict_warn warn_cflags nocommon_flag TREEBROWSER valgrind_path valgrind_path_defines valgrind_command coverage_flags enable_multilib enable_decimal_float enable_fixed_point enable_shared TARGET_SYSTEM_ROOT TARGET_SYSTEM_ROOT_DEFINE CROSS_SYSTEM_HEADER_DIR EGLIBC_CONFIGS CONFIGURE_SPECS onestep PKGVERSION REPORT_BUGS_TO REPORT_BUGS_TEXI datarootdir docdir htmldir SET_MAKE AWK LN_S LN RANLIB ac_ct_RANLIB ranlib_flags INSTALL INSTALL_PROGRAM INSTALL_DATA make_compare_target have_mktemp_command MAKEINFO BUILD_INFO GENERATED_MANPAGES FLEX BISON NM AR COLLECT2_LIBS GNAT_LIBEXC LDEXP_LIB TARGET_GETGROUPS_T LIBICONV LTLIBICONV LIBICONV_DEP manext objext gthread_flags extra_modes_file extra_opt_files USE_NLS LIBINTL LIBINTL_DEP INCINTL XGETTEXT GMSGFMT POSUB CATALOGS DATADIRNAME INSTOBJEXT GENCAT CATOBJEXT CROSS ALL SYSTEM_HEADER_DIR inhibit_libc CC_FOR_BUILD BUILD_CFLAGS BUILD_LDFLAGS STMP_FIXINC STMP_FIXPROTO collect2 LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN OBJDUMP ac_ct_OBJDUMP ac_ct_AR STRIP ac_ct_STRIP lt_ECHO DSYMUTIL ac_ct_DSYMUTIL NMEDIT ac_ct_NMEDIT LIPO ac_ct_LIPO OTOOL ac_ct_OTOOL OTOOL64 ac_ct_OTOOL64 objdir enable_fast_install gcc_cv_as ORIGINAL_AS_FOR_TARGET gcc_cv_ld ORIGINAL_LD_FOR_TARGET gcc_cv_nm ORIGINAL_NM_FOR_TARGET gcc_cv_objdump gcc_cv_readelf libgcc_visibility GGC zlibdir zlibinc MAINT gcc_tooldir dollar slibdir subdirs srcdir all_compilers all_gtfiles all_lang_makefrags all_lang_makefiles all_languages all_selected_languages build_exeext build_install_headers_dir build_xm_file_list build_xm_include_list build_xm_defines build_file_translate check_languages cpp_install_dir xmake_file tmake_file TM_ENDIAN_CONFIG TM_MULTILIB_CONFIG TM_MULTILIB_EXCEPTIONS_CONFIG extra_gcc_objs extra_headers_list extra_objs extra_parts extra_passes extra_programs float_h_file gcc_config_arguments gcc_gxx_include_dir host_exeext host_xm_file_list host_xm_include_list host_xm_defines out_host_hook_obj install lang_opt_files lang_specs_files lang_tree_files local_prefix md_file objc_boehm_gc out_file out_object_file thread_file tm_file_list tm_include_list tm_defines tm_p_file_list tm_p_include_list xm_file_list xm_include_list xm_defines c_target_objs cxx_target_objs fortran_target_objs target_cpu_default GMPLIBS GMPINC PPLLIBS PPLINC CLOOGLIBS CLOOGINC LIBOBJS LTLIBOBJS'
19146 ac_subst_files='language_hooks'
19147 ac_pwd=`pwd`
19148
19149 @@ -1084,6 +1084,8 @@
19150 --enable-version-specific-runtime-libs
19151 specify that runtime libraries should be
19152 installed in a compiler-specific directory
19153 + --enable-poison-system-directories
19154 + warn for use of native system header directories
19155
19156 Optional Packages:
19157 --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
19158 @@ -22108,6 +22110,22 @@
19159 tls_first_minor=16
19160 tls_as_opt='-32 --fatal-warnings'
19161 ;;
19162 + m68k-*-*)
19163 + conftest_s='
19164 + .section .tdata,"awT",@progbits
19165 +x:
19166 + .word 2
19167 + .text
19168 +foo:
19169 + move.l x@TLSGD(%a5),%a0
19170 + move.l x@TLSLDM(%a5),%a0
19171 + move.l x@TLSLDO(%a5),%a0
19172 + move.l x@TLSIE(%a5),%a0
19173 + move.l x@TLSLE(%a5),%a0'
19174 + tls_first_major=2
19175 + tls_first_minor=19
19176 + tls_as_opt='--fatal-warnings'
19177 + ;;
19178 powerpc-*-*)
19179 conftest_s='
19180 .section ".tdata","awT",@progbits
19181 @@ -22739,6 +22757,44 @@
19182 i[34567]86-*-* | x86_64-*-*)
19183 case $target_os in
19184 cygwin* | pe | mingw32*)
19185 + # Recent binutils allows the three-operand form of ".comm" on PE. This
19186 + # definition is used unconditionally to initialise the default state of
19187 + # the target option variable that governs usage of the feature.
19188 + echo "$as_me:$LINENO: checking assembler for .comm with alignment" >&5
19189 +echo $ECHO_N "checking assembler for .comm with alignment... $ECHO_C" >&6
19190 +if test "${gcc_cv_as_comm_has_align+set}" = set; then
19191 + echo $ECHO_N "(cached) $ECHO_C" >&6
19192 +else
19193 + gcc_cv_as_comm_has_align=no
19194 + if test $in_tree_gas = yes; then
19195 + if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 19 \) \* 1000 + 52`
19196 + then gcc_cv_as_comm_has_align=yes
19197 +fi
19198 + elif test x$gcc_cv_as != x; then
19199 + echo '.comm foo,1,32' > conftest.s
19200 + if { ac_try='$gcc_cv_as -o conftest.o conftest.s >&5'
19201 + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
19202 + (eval $ac_try) 2>&5
19203 + ac_status=$?
19204 + echo "$as_me:$LINENO: \$? = $ac_status" >&5
19205 + (exit $ac_status); }; }
19206 + then
19207 + gcc_cv_as_comm_has_align=yes
19208 + else
19209 + echo "configure: failed program was" >&5
19210 + cat conftest.s >&5
19211 + fi
19212 + rm -f conftest.o conftest.s
19213 + fi
19214 +fi
19215 +echo "$as_me:$LINENO: result: $gcc_cv_as_comm_has_align" >&5
19216 +echo "${ECHO_T}$gcc_cv_as_comm_has_align" >&6
19217 +
19218 +
19219 +cat >>confdefs.h <<_ACEOF
19220 +#define HAVE_GAS_ALIGNED_COMM `if test $gcc_cv_as_comm_has_align = yes; then echo 1; else echo 0; fi`
19221 +_ACEOF
19222 +
19223 # Used for DWARF 2 in PE
19224 echo "$as_me:$LINENO: checking assembler for .secrel32 relocs" >&5
19225 echo $ECHO_N "checking assembler for .secrel32 relocs... $ECHO_C" >&6
19226 @@ -24711,6 +24767,21 @@
19227 fi;
19228
19229
19230 +# Check whether --enable-poison-system-directories or --disable-poison-system-directories was given.
19231 +if test "${enable_poison_system_directories+set}" = set; then
19232 + enableval="$enable_poison_system_directories"
19233 +
19234 +else
19235 + enable_poison_system_directories=no
19236 +fi;
19237 +if test "x${enable_poison_system_directories}" = "xyes"; then
19238 +
19239 +cat >>confdefs.h <<\_ACEOF
19240 +#define ENABLE_POISON_SYSTEM_DIRECTORIES 1
19241 +_ACEOF
19242 +
19243 +fi
19244 +
19245 # Substitute configuration variables
19246
19247
19248 --- a/gcc/configure.ac
19249 +++ b/gcc/configure.ac
19250 @@ -2576,6 +2576,22 @@
19251 tls_first_minor=16
19252 tls_as_opt='-32 --fatal-warnings'
19253 ;;
19254 + m68k-*-*)
19255 + conftest_s='
19256 + .section .tdata,"awT",@progbits
19257 +x:
19258 + .word 2
19259 + .text
19260 +foo:
19261 + move.l x@TLSGD(%a5),%a0
19262 + move.l x@TLSLDM(%a5),%a0
19263 + move.l x@TLSLDO(%a5),%a0
19264 + move.l x@TLSIE(%a5),%a0
19265 + move.l x@TLSLE(%a5),%a0'
19266 + tls_first_major=2
19267 + tls_first_minor=19
19268 + tls_as_opt='--fatal-warnings'
19269 + ;;
19270 powerpc-*-*)
19271 conftest_s='
19272 .section ".tdata","awT",@progbits
19273 @@ -2944,6 +2960,15 @@
19274 changequote([,])dnl
19275 case $target_os in
19276 cygwin* | pe | mingw32*)
19277 + # Recent binutils allows the three-operand form of ".comm" on PE. This
19278 + # definition is used unconditionally to initialise the default state of
19279 + # the target option variable that governs usage of the feature.
19280 + gcc_GAS_CHECK_FEATURE([.comm with alignment], gcc_cv_as_comm_has_align,
19281 + [2,19,52],,[.comm foo,1,32])
19282 + AC_DEFINE_UNQUOTED(HAVE_GAS_ALIGNED_COMM,
19283 + [`if test $gcc_cv_as_comm_has_align = yes; then echo 1; else echo 0; fi`],
19284 + [Define if your assembler supports specifying the alignment
19285 + of objects allocated using the GAS .comm command.])
19286 # Used for DWARF 2 in PE
19287 gcc_GAS_CHECK_FEATURE([.secrel32 relocs],
19288 gcc_cv_as_ix86_pe_secrel32,
19289 @@ -3891,6 +3916,16 @@
19290 htmldir='$(docdir)')
19291 AC_SUBST(htmldir)
19292
19293 +AC_ARG_ENABLE([poison-system-directories],
19294 + AS_HELP_STRING([--enable-poison-system-directories],
19295 + [warn for use of native system header directories]),,
19296 + [enable_poison_system_directories=no])
19297 +if test "x${enable_poison_system_directories}" = "xyes"; then
19298 + AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES],
19299 + [1],
19300 + [Define to warn for use of native system header directories])
19301 +fi
19302 +
19303 # Substitute configuration variables
19304 AC_SUBST(subdirs)
19305 AC_SUBST(srcdir)
19306 --- a/gcc/c.opt
19307 +++ b/gcc/c.opt
19308 @@ -716,6 +716,10 @@
19309 C ObjC C++ ObjC++
19310 Treat the input file as already preprocessed
19311
19312 +fremove-local-statics
19313 +C C++ Var(flag_remove_local_statics) Optimization
19314 +Convert function-local static variables to automatic variables when it is safe to do so
19315 +
19316 freplace-objc-classes
19317 ObjC ObjC++
19318 Used in Fix-and-Continue mode to indicate that object files may be swapped in at runtime
19319 --- a/gcc/c-opts.c
19320 +++ b/gcc/c-opts.c
19321 @@ -40,6 +40,7 @@
19322 #include "mkdeps.h"
19323 #include "target.h"
19324 #include "tm_p.h"
19325 +#include "c-tree.h" /* For c_cpp_error. */
19326
19327 #ifndef DOLLARS_IN_IDENTIFIERS
19328 # define DOLLARS_IN_IDENTIFIERS true
19329 @@ -201,6 +202,7 @@
19330 {
19331 static const unsigned int lang_flags[] = {CL_C, CL_ObjC, CL_CXX, CL_ObjCXX};
19332 unsigned int i, result;
19333 + struct cpp_callbacks *cb;
19334
19335 /* This is conditionalized only because that is the way the front
19336 ends used to do it. Maybe this should be unconditional? */
19337 @@ -216,6 +218,8 @@
19338
19339 parse_in = cpp_create_reader (c_dialect_cxx () ? CLK_GNUCXX: CLK_GNUC89,
19340 ident_hash, line_table);
19341 + cb = cpp_get_callbacks (parse_in);
19342 + cb->error = c_cpp_error;
19343
19344 cpp_opts = cpp_get_options (parse_in);
19345 cpp_opts->dollars_in_ident = DOLLARS_IN_IDENTIFIERS;
19346 @@ -333,12 +337,12 @@
19347 or environment var dependency generation is used. */
19348 cpp_opts->deps.style = (code == OPT_M ? DEPS_SYSTEM: DEPS_USER);
19349 flag_no_output = 1;
19350 - cpp_opts->inhibit_warnings = 1;
19351 break;
19352
19353 case OPT_MD:
19354 case OPT_MMD:
19355 cpp_opts->deps.style = (code == OPT_MD ? DEPS_SYSTEM: DEPS_USER);
19356 + cpp_opts->deps.need_preprocessor_output = true;
19357 deps_file = arg;
19358 break;
19359
19360 @@ -444,7 +448,6 @@
19361 break;
19362
19363 case OPT_Werror:
19364 - cpp_opts->warnings_are_errors = value;
19365 global_dc->warning_as_error_requested = value;
19366 break;
19367
19368 @@ -503,10 +506,6 @@
19369 warn_strict_null_sentinel = value;
19370 break;
19371
19372 - case OPT_Wsystem_headers:
19373 - cpp_opts->warn_system_headers = value;
19374 - break;
19375 -
19376 case OPT_Wtraditional:
19377 cpp_opts->warn_traditional = value;
19378 break;
19379 @@ -895,8 +894,6 @@
19380 c_common_post_options, so that a subsequent -Wno-endif-labels
19381 is not overridden. */
19382 case OPT_pedantic_errors:
19383 - cpp_opts->pedantic_errors = 1;
19384 - /* Fall through. */
19385 case OPT_pedantic:
19386 cpp_opts->pedantic = 1;
19387 cpp_opts->warn_endif_labels = 1;
19388 @@ -971,10 +968,6 @@
19389 flag_undef = 1;
19390 break;
19391
19392 - case OPT_w:
19393 - cpp_opts->inhibit_warnings = 1;
19394 - break;
19395 -
19396 case OPT_v:
19397 verbose = true;
19398 break;
19399 @@ -1159,10 +1152,6 @@
19400
19401 input_location = UNKNOWN_LOCATION;
19402
19403 - /* If an error has occurred in cpplib, note it so we fail
19404 - immediately. */
19405 - errorcount += cpp_errors (parse_in);
19406 -
19407 *pfilename = this_input_filename
19408 = cpp_read_main_file (parse_in, in_fnames[0]);
19409 /* Don't do any compilation or preprocessing if there is no input file. */
19410 @@ -1274,7 +1263,8 @@
19411 {
19412 FILE *deps_stream = NULL;
19413
19414 - if (cpp_opts->deps.style != DEPS_NONE)
19415 + /* Don't write the deps file if there are errors. */
19416 + if (cpp_opts->deps.style != DEPS_NONE && errorcount == 0)
19417 {
19418 /* If -M or -MM was seen without -MF, default output to the
19419 output stream. */
19420 @@ -1290,7 +1280,7 @@
19421
19422 /* For performance, avoid tearing down cpplib's internal structures
19423 with cpp_destroy (). */
19424 - errorcount += cpp_finish (parse_in, deps_stream);
19425 + cpp_finish (parse_in, deps_stream);
19426
19427 if (deps_stream && deps_stream != out_stream
19428 && (ferror (deps_stream) || fclose (deps_stream)))
19429 --- a/gcc/cp/class.c
19430 +++ b/gcc/cp/class.c
19431 @@ -6136,7 +6136,7 @@
19432 if (flags & tf_error)
19433 {
19434 error ("no matches converting function %qD to type %q#T",
19435 - DECL_NAME (OVL_FUNCTION (overload)),
19436 + DECL_NAME (OVL_CURRENT (overload)),
19437 target_type);
19438
19439 /* print_candidates expects a chain with the functions in
19440 @@ -6299,13 +6299,8 @@
19441 dependent on overload resolution. */
19442 gcc_assert (TREE_CODE (rhs) == ADDR_EXPR
19443 || TREE_CODE (rhs) == COMPONENT_REF
19444 - || TREE_CODE (rhs) == COMPOUND_EXPR
19445 - || really_overloaded_fn (rhs));
19446 -
19447 - /* We don't overwrite rhs if it is an overloaded function.
19448 - Copying it would destroy the tree link. */
19449 - if (TREE_CODE (rhs) != OVERLOAD)
19450 - rhs = copy_node (rhs);
19451 + || really_overloaded_fn (rhs)
19452 + || (flag_ms_extensions && TREE_CODE (rhs) == FUNCTION_DECL));
19453
19454 /* This should really only be used when attempting to distinguish
19455 what sort of a pointer to function we have. For now, any
19456 @@ -6357,19 +6352,6 @@
19457 /*explicit_targs=*/NULL_TREE,
19458 access_path);
19459
19460 - case COMPOUND_EXPR:
19461 - TREE_OPERAND (rhs, 0)
19462 - = instantiate_type (lhstype, TREE_OPERAND (rhs, 0), flags);
19463 - if (TREE_OPERAND (rhs, 0) == error_mark_node)
19464 - return error_mark_node;
19465 - TREE_OPERAND (rhs, 1)
19466 - = instantiate_type (lhstype, TREE_OPERAND (rhs, 1), flags);
19467 - if (TREE_OPERAND (rhs, 1) == error_mark_node)
19468 - return error_mark_node;
19469 -
19470 - TREE_TYPE (rhs) = lhstype;
19471 - return rhs;
19472 -
19473 case ADDR_EXPR:
19474 {
19475 if (PTRMEM_OK_P (rhs))
19476 --- a/gcc/cp/cp-tree.h
19477 +++ b/gcc/cp/cp-tree.h
19478 @@ -43,9 +43,6 @@
19479 #else
19480 #define ATTRIBUTE_GCC_CXXDIAG(m, n) ATTRIBUTE_NONNULL(m)
19481 #endif
19482 -extern void cp_cpp_error (cpp_reader *, int,
19483 - const char *, va_list *)
19484 - ATTRIBUTE_GCC_CXXDIAG(3,0);
19485 #ifdef GCC_TOPLEV_H
19486 #error \
19487 In order for the format checking to accept the C++ front end diagnostic \
19488 --- a/gcc/cp/cvt.c
19489 +++ b/gcc/cp/cvt.c
19490 @@ -581,6 +581,7 @@
19491 tree e = expr;
19492 enum tree_code code = TREE_CODE (type);
19493 const char *invalid_conv_diag;
19494 + tree e1;
19495
19496 if (error_operand_p (e) || type == error_mark_node)
19497 return error_mark_node;
19498 @@ -629,6 +630,10 @@
19499 }
19500 }
19501
19502 + e1 = targetm.convert_to_type (type, e);
19503 + if (e1)
19504 + return e1;
19505 +
19506 if (code == VOID_TYPE && (convtype & CONV_STATIC))
19507 {
19508 e = convert_to_void (e, /*implicit=*/NULL, tf_warning_or_error);
19509 @@ -1231,11 +1236,18 @@
19510 tree
19511 type_promotes_to (tree type)
19512 {
19513 + tree promoted_type;
19514 +
19515 if (type == error_mark_node)
19516 return error_mark_node;
19517
19518 type = TYPE_MAIN_VARIANT (type);
19519
19520 + /* Check for promotions of target-defined types first. */
19521 + promoted_type = targetm.promoted_type (type);
19522 + if (promoted_type)
19523 + return promoted_type;
19524 +
19525 /* bool always promotes to int (not unsigned), even if it's the same
19526 size. */
19527 if (type == boolean_type_node)
19528 --- a/gcc/cp/decl2.c
19529 +++ b/gcc/cp/decl2.c
19530 @@ -1717,6 +1717,10 @@
19531 || (DECL_ASSEMBLER_NAME_SET_P (decl)
19532 && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))))
19533 return true;
19534 + /* Functions marked "dllexport" must be emitted so that they are
19535 + visible to other DLLs. */
19536 + if (lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)))
19537 + return true;
19538 /* Otherwise, DECL does not need to be emitted -- yet. A subsequent
19539 reference to DECL might cause it to be emitted later. */
19540 return false;
19541 @@ -3802,6 +3806,15 @@
19542 }
19543
19544 TREE_USED (decl) = 1;
19545 + if (current_function_decl != NULL_TREE
19546 + && (TREE_CODE (decl) == VAR_DECL
19547 + || TREE_CODE (decl) == PARM_DECL
19548 + || TREE_CODE (decl) == FUNCTION_DECL))
19549 + {
19550 + tree context = decl_function_context (decl);
19551 + if (context != NULL_TREE && context != current_function_decl)
19552 + DECL_NONLOCAL (decl) = 1;
19553 + }
19554 if (DECL_CLONED_FUNCTION_P (decl))
19555 TREE_USED (DECL_CLONED_FUNCTION (decl)) = 1;
19556 if (TREE_CODE (decl) == FUNCTION_DECL
19557 --- a/gcc/cp/decl.c
19558 +++ b/gcc/cp/decl.c
19559 @@ -4515,7 +4515,7 @@
19560
19561 cp_apply_type_quals_to_decl (cp_type_quals (TREE_TYPE (decl)), decl);
19562
19563 - layout_decl (decl, 0);
19564 + relayout_decl (decl);
19565 }
19566 }
19567
19568 @@ -7620,6 +7620,7 @@
19569 bool parameter_pack_p = declarator? declarator->parameter_pack_p : false;
19570 bool set_no_warning = false;
19571 bool template_type_arg = false;
19572 + const char *errmsg;
19573
19574 signed_p = declspecs->specs[(int)ds_signed];
19575 unsigned_p = declspecs->specs[(int)ds_unsigned];
19576 @@ -8299,6 +8300,12 @@
19577 type_quals = TYPE_UNQUALIFIED;
19578 set_no_warning = true;
19579 }
19580 + errmsg = targetm.invalid_return_type (type);
19581 + if (errmsg)
19582 + {
19583 + error (errmsg);
19584 + type = integer_type_node;
19585 + }
19586
19587 /* Error about some types functions can't return. */
19588
19589 @@ -8841,8 +8848,13 @@
19590
19591 /* Replace the anonymous name with the real name everywhere. */
19592 for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
19593 - if (TYPE_NAME (t) == oldname)
19594 - TYPE_NAME (t) = decl;
19595 + {
19596 + if (ANON_AGGRNAME_P (TYPE_IDENTIFIER (t)))
19597 + {
19598 + debug_hooks->set_name (t, decl);
19599 + TYPE_NAME (t) = decl;
19600 + }
19601 + }
19602
19603 if (TYPE_LANG_SPECIFIC (type))
19604 TYPE_WAS_ANONYMOUS (type) = 1;
19605 @@ -9679,6 +9691,7 @@
19606 tree type = NULL_TREE;
19607 tree init = TREE_PURPOSE (parm);
19608 tree decl = TREE_VALUE (parm);
19609 + const char *errmsg;
19610
19611 if (parm == void_list_node)
19612 break;
19613 @@ -9712,6 +9725,14 @@
19614 init = NULL_TREE;
19615 }
19616
19617 + if (type != error_mark_node
19618 + && (errmsg = targetm.invalid_parameter_type (type)))
19619 + {
19620 + error (errmsg);
19621 + type = error_mark_node;
19622 + TREE_TYPE (decl) = error_mark_node;
19623 + }
19624 +
19625 if (type != error_mark_node)
19626 {
19627 if (deprecated_state != DEPRECATED_SUPPRESS)
19628 --- a/gcc/cp/error.c
19629 +++ b/gcc/cp/error.c
19630 @@ -2667,39 +2667,6 @@
19631 #undef next_int
19632 }
19633 \f
19634 -/* Callback from cpp_error for PFILE to print diagnostics arising from
19635 - interpreting strings. The diagnostic is of type LEVEL; MSG is the
19636 - translated message and AP the arguments. */
19637 -
19638 -void
19639 -cp_cpp_error (cpp_reader *pfile ATTRIBUTE_UNUSED, int level,
19640 - const char *msg, va_list *ap)
19641 -{
19642 - diagnostic_info diagnostic;
19643 - diagnostic_t dlevel;
19644 - switch (level)
19645 - {
19646 - case CPP_DL_WARNING:
19647 - case CPP_DL_WARNING_SYSHDR:
19648 - dlevel = DK_WARNING;
19649 - break;
19650 - case CPP_DL_PEDWARN:
19651 - dlevel = DK_PEDWARN;
19652 - break;
19653 - case CPP_DL_ERROR:
19654 - dlevel = DK_ERROR;
19655 - break;
19656 - case CPP_DL_ICE:
19657 - dlevel = DK_ICE;
19658 - break;
19659 - default:
19660 - gcc_unreachable ();
19661 - }
19662 - diagnostic_set_info_translated (&diagnostic, msg, ap,
19663 - input_location, dlevel);
19664 - report_diagnostic (&diagnostic);
19665 -}
19666 -
19667 /* Warn about the use of C++0x features when appropriate. */
19668 void
19669 maybe_warn_cpp0x (const char* str)
19670 --- a/gcc/cp/except.c
19671 +++ b/gcc/cp/except.c
19672 @@ -146,14 +146,26 @@
19673 static tree
19674 build_eh_type_type (tree type)
19675 {
19676 - tree exp = eh_type_info (type);
19677 + bool is_ref = TREE_CODE (type) == REFERENCE_TYPE;
19678 + tree exp;
19679 +
19680 + if (is_ref)
19681 + type = TREE_TYPE (type);
19682 +
19683 + exp = eh_type_info (type);
19684
19685 if (!exp)
19686 return NULL;
19687
19688 mark_used (exp);
19689
19690 - return convert (ptr_type_node, build_address (exp));
19691 + exp = build_address (exp);
19692 +
19693 + if (is_ref)
19694 + exp = targetm.cxx.ttype_ref_encode (exp);
19695 +
19696 + exp = convert (ptr_type_node, exp);
19697 + return exp;
19698 }
19699
19700 tree
19701 @@ -495,6 +507,16 @@
19702 initialize_handler_parm (decl, exp);
19703 }
19704
19705 + /* Preserve the reference type on the exception, as this affects
19706 + derived-to-base conversions in catch matching. Only do this when
19707 + the ABI supports it, as originally this case was (incorrectly)
19708 + treated just as catching a pointer-to-class by value. */
19709 + if (targetm.cxx.ttype_ref_encode
19710 + && decl && TREE_CODE (type) == POINTER_TYPE
19711 + && CLASS_TYPE_P (TREE_TYPE (type))
19712 + && TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
19713 + type = build_reference_type (type);
19714 +
19715 return type;
19716 }
19717
19718 @@ -538,10 +560,20 @@
19719 raw_raises && TREE_VALUE (raw_raises);
19720 raw_raises = TREE_CHAIN (raw_raises))
19721 {
19722 - tree type = prepare_eh_type (TREE_VALUE (raw_raises));
19723 + tree orig_type = TREE_VALUE (raw_raises);
19724 + tree type = prepare_eh_type (orig_type);
19725 tree tinfo = eh_type_info (type);
19726
19727 mark_used (tinfo);
19728 + /* Preserve the reference type on the exception, as this affects
19729 + derived-to-base conversions in catch matching. Only do this when
19730 + the ABI supports it, as originally this case was (incorrectly)
19731 + treated just as catching a pointer-to-class by value. */
19732 + if (targetm.cxx.ttype_ref_encode
19733 + && TREE_CODE (orig_type) == REFERENCE_TYPE
19734 + && TREE_CODE (type) == POINTER_TYPE
19735 + && CLASS_TYPE_P (TREE_TYPE (type)))
19736 + type = build_reference_type (type);
19737 raises = tree_cons (NULL_TREE, type, raises);
19738 }
19739
19740 @@ -956,24 +988,40 @@
19741 static int
19742 can_convert_eh (tree to, tree from)
19743 {
19744 - to = non_reference (to);
19745 - from = non_reference (from);
19746 + bool to_ref = TREE_CODE (to) == REFERENCE_TYPE;
19747 + int depth = to_ref;
19748 + bool outer_const = true;
19749
19750 - if (TREE_CODE (to) == POINTER_TYPE && TREE_CODE (from) == POINTER_TYPE)
19751 + if (to_ref)
19752 + to = TREE_TYPE (to);
19753 + from = non_reference (from);
19754 +
19755 + while (TREE_CODE (to) == POINTER_TYPE && TREE_CODE (from) == POINTER_TYPE)
19756 {
19757 + unsigned to_quals, from_quals;
19758 +
19759 + depth++;
19760 +
19761 to = TREE_TYPE (to);
19762 from = TREE_TYPE (from);
19763 + to_quals = TYPE_QUALS (to);
19764 + from_quals = TYPE_QUALS (from);
19765
19766 - if (! at_least_as_qualified_p (to, from))
19767 + if ((from_quals & ~to_quals)
19768 + || (!outer_const && to_quals & ~from_quals))
19769 return 0;
19770 -
19771 - if (TREE_CODE (to) == VOID_TYPE)
19772 - return 1;
19773 -
19774 - /* Else fall through. */
19775 +
19776 + if (!(to_quals & TYPE_QUAL_CONST))
19777 + outer_const = false;
19778 }
19779
19780 - if (CLASS_TYPE_P (to) && CLASS_TYPE_P (from)
19781 + if (same_type_ignoring_top_level_qualifiers_p (from, to))
19782 + return 1;
19783 +
19784 + if (depth == to_ref + 1 && TREE_CODE (to) == VOID_TYPE)
19785 + return 1;
19786 +
19787 + if (depth < 2 && CLASS_TYPE_P (to) && CLASS_TYPE_P (from)
19788 && PUBLICLY_UNIQUELY_DERIVED_P (to, from))
19789 return 1;
19790
19791 --- a/gcc/cp/parser.c
19792 +++ b/gcc/cp/parser.c
19793 @@ -309,8 +309,7 @@
19794
19795 /* Subsequent preprocessor diagnostics should use compiler
19796 diagnostic functions to get the compiler source location. */
19797 - cpp_get_options (parse_in)->client_diagnostic = true;
19798 - cpp_get_callbacks (parse_in)->error = cp_cpp_error;
19799 + done_lexing = true;
19800
19801 gcc_assert (lexer->next_token->type != CPP_PURGED);
19802 return lexer;
19803 --- a/gcc/cp/rtti.c
19804 +++ b/gcc/cp/rtti.c
19805 @@ -393,6 +393,7 @@
19806 return d;
19807 }
19808
19809 + gcc_assert (TREE_CODE (type) != REFERENCE_TYPE);
19810 name = mangle_typeinfo_for_type (type);
19811
19812 d = IDENTIFIER_GLOBAL_VALUE (name);
19813 --- a/gcc/cp/semantics.c
19814 +++ b/gcc/cp/semantics.c
19815 @@ -1120,7 +1120,11 @@
19816 type = expand_start_catch_block (decl);
19817 HANDLER_TYPE (handler) = type;
19818 if (!processing_template_decl && type)
19819 - mark_used (eh_type_info (type));
19820 + {
19821 + if (TREE_CODE (type) == REFERENCE_TYPE)
19822 + type = TREE_TYPE (type);
19823 + mark_used (eh_type_info (type));
19824 + }
19825 }
19826
19827 /* Finish a handler, which may be given by HANDLER. The BLOCKs are
19828 @@ -3243,8 +3247,10 @@
19829
19830 /* If the user wants us to keep all inline functions, then mark
19831 this function as needed so that finish_file will make sure to
19832 - output it later. */
19833 - if (flag_keep_inline_functions && DECL_DECLARED_INLINE_P (fn))
19834 + output it later. Similarly, all dllexport'd functions must
19835 + be emitted; there may be callers in other DLLs. */
19836 + if ((flag_keep_inline_functions && DECL_DECLARED_INLINE_P (fn))
19837 + || lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn)))
19838 mark_needed (fn);
19839 }
19840
19841 --- a/gcc/cp/typeck.c
19842 +++ b/gcc/cp/typeck.c
19843 @@ -1707,10 +1707,14 @@
19844 tree
19845 default_conversion (tree exp)
19846 {
19847 + /* Check for target-specific promotions. */
19848 + tree promoted_type = targetm.promoted_type (TREE_TYPE (exp));
19849 + if (promoted_type)
19850 + exp = cp_convert (promoted_type, exp);
19851 /* Perform the integral promotions first so that bitfield
19852 expressions (which may promote to "int", even if the bitfield is
19853 declared "unsigned") are promoted correctly. */
19854 - if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (exp)))
19855 + else if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (exp)))
19856 exp = perform_integral_promotions (exp);
19857 /* Perform the other conversions. */
19858 exp = decay_conversion (exp);
19859 @@ -3378,7 +3382,6 @@
19860
19861 /* If an error was already reported for one of the arguments,
19862 avoid reporting another error. */
19863 -
19864 if (code0 == ERROR_MARK || code1 == ERROR_MARK)
19865 return error_mark_node;
19866
19867 @@ -3389,6 +3392,25 @@
19868 return error_mark_node;
19869 }
19870
19871 + /* Issue warnings about peculiar, but valid, uses of NULL. */
19872 + if ((orig_op0 == null_node || orig_op1 == null_node)
19873 + /* It's reasonable to use pointer values as operands of &&
19874 + and ||, so NULL is no exception. */
19875 + && code != TRUTH_ANDIF_EXPR && code != TRUTH_ORIF_EXPR
19876 + && ( /* Both are NULL (or 0) and the operation was not a
19877 + comparison or a pointer subtraction. */
19878 + (null_ptr_cst_p (orig_op0) && null_ptr_cst_p (orig_op1)
19879 + && code != EQ_EXPR && code != NE_EXPR && code != MINUS_EXPR)
19880 + /* Or if one of OP0 or OP1 is neither a pointer nor NULL. */
19881 + || (!null_ptr_cst_p (orig_op0)
19882 + && !TYPE_PTR_P (type0) && !TYPE_PTR_TO_MEMBER_P (type0))
19883 + || (!null_ptr_cst_p (orig_op1)
19884 + && !TYPE_PTR_P (type1) && !TYPE_PTR_TO_MEMBER_P (type1)))
19885 + && (complain & tf_warning))
19886 + /* Some sort of arithmetic operation involving NULL was
19887 + performed. */
19888 + warning (OPT_Wpointer_arith, "NULL used in arithmetic");
19889 +
19890 switch (code)
19891 {
19892 case MINUS_EXPR:
19893 @@ -3979,25 +4001,6 @@
19894 }
19895 }
19896
19897 - /* Issue warnings about peculiar, but valid, uses of NULL. */
19898 - if ((orig_op0 == null_node || orig_op1 == null_node)
19899 - /* It's reasonable to use pointer values as operands of &&
19900 - and ||, so NULL is no exception. */
19901 - && code != TRUTH_ANDIF_EXPR && code != TRUTH_ORIF_EXPR
19902 - && ( /* Both are NULL (or 0) and the operation was not a comparison. */
19903 - (null_ptr_cst_p (orig_op0) && null_ptr_cst_p (orig_op1)
19904 - && code != EQ_EXPR && code != NE_EXPR)
19905 - /* Or if one of OP0 or OP1 is neither a pointer nor NULL. */
19906 - || (!null_ptr_cst_p (orig_op0) && TREE_CODE (TREE_TYPE (op0)) != POINTER_TYPE)
19907 - || (!null_ptr_cst_p (orig_op1) && TREE_CODE (TREE_TYPE (op1)) != POINTER_TYPE))
19908 - && (complain & tf_warning))
19909 - /* Some sort of arithmetic operation involving NULL was
19910 - performed. Note that pointer-difference and pointer-addition
19911 - have already been handled above, and so we don't end up here in
19912 - that case. */
19913 - warning (OPT_Wpointer_arith, "NULL used in arithmetic");
19914 -
19915 -
19916 /* If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
19917 Then the expression will be built.
19918 It will be given type FINAL_TYPE if that is nonzero;
19919 @@ -5024,6 +5027,12 @@
19920 return rhs;
19921 }
19922
19923 + if (type_unknown_p (rhs))
19924 + {
19925 + error ("no context to resolve type of %qE", rhs);
19926 + return error_mark_node;
19927 + }
19928 +
19929 return build2 (COMPOUND_EXPR, TREE_TYPE (rhs), lhs, rhs);
19930 }
19931
19932 --- a/gcc/c-ppoutput.c
19933 +++ b/gcc/c-ppoutput.c
19934 @@ -521,6 +521,7 @@
19935
19936 if (map != NULL)
19937 {
19938 + input_location = map->start_location;
19939 if (print.first_time)
19940 {
19941 /* Avoid printing foo.i when the main file is foo.c. */
19942 --- a/gcc/cse.c
19943 +++ b/gcc/cse.c
19944 @@ -5754,6 +5754,11 @@
19945 validate_change (object, &XEXP (x, i),
19946 cse_process_notes (XEXP (x, i), object, changed), 0);
19947
19948 + /* Rebuild a PLUS expression in canonical form if the first operand
19949 + ends up as a constant. */
19950 + if (code == PLUS && GET_CODE (XEXP (x, 0)) == CONST_INT)
19951 + return plus_constant (XEXP(x, 1), INTVAL (XEXP (x, 0)));
19952 +
19953 return x;
19954 }
19955
19956 --- a/gcc/c-tree.h
19957 +++ b/gcc/c-tree.h
19958 @@ -647,4 +647,8 @@
19959 extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_CDIAG(3,4);
19960 extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_CDIAG(3,4);
19961
19962 +extern bool c_cpp_error (cpp_reader *, int, location_t, unsigned int,
19963 + const char *, va_list *)
19964 + ATTRIBUTE_GCC_CDIAG(5,0);
19965 +
19966 #endif /* ! GCC_C_TREE_H */
19967 --- a/gcc/c-typeck.c
19968 +++ b/gcc/c-typeck.c
19969 @@ -1765,6 +1765,7 @@
19970 tree orig_exp;
19971 tree type = TREE_TYPE (exp);
19972 enum tree_code code = TREE_CODE (type);
19973 + tree promoted_type;
19974
19975 /* Functions and arrays have been converted during parsing. */
19976 gcc_assert (code != FUNCTION_TYPE);
19977 @@ -1801,6 +1802,10 @@
19978 if (exp == error_mark_node)
19979 return error_mark_node;
19980
19981 + promoted_type = targetm.promoted_type (type);
19982 + if (promoted_type)
19983 + return convert (promoted_type, exp);
19984 +
19985 if (INTEGRAL_TYPE_P (type))
19986 return perform_integral_promotions (exp);
19987
19988 --- a/gcc/dbxout.c
19989 +++ b/gcc/dbxout.c
19990 @@ -376,6 +376,7 @@
19991 dbxout_handle_pch, /* handle_pch */
19992 debug_nothing_rtx, /* var_location */
19993 debug_nothing_void, /* switch_text_section */
19994 + debug_nothing_tree_tree, /* set_name */
19995 0 /* start_end_main_source_file */
19996 };
19997 #endif /* DBX_DEBUGGING_INFO */
19998 @@ -408,6 +409,7 @@
19999 dbxout_handle_pch, /* handle_pch */
20000 debug_nothing_rtx, /* var_location */
20001 debug_nothing_void, /* switch_text_section */
20002 + debug_nothing_tree_tree, /* set_name */
20003 0 /* start_end_main_source_file */
20004 };
20005 #endif /* XCOFF_DEBUGGING_INFO */
20006 --- a/gcc/debug.c
20007 +++ b/gcc/debug.c
20008 @@ -50,6 +50,7 @@
20009 debug_nothing_int, /* handle_pch */
20010 debug_nothing_rtx, /* var_location */
20011 debug_nothing_void, /* switch_text_section */
20012 + debug_nothing_tree_tree, /* set_name */
20013 0 /* start_end_main_source_file */
20014 };
20015
20016 @@ -67,6 +68,12 @@
20017 }
20018
20019 void
20020 +debug_nothing_tree_tree (tree t1 ATTRIBUTE_UNUSED,
20021 + tree t2 ATTRIBUTE_UNUSED)
20022 +{
20023 +}
20024 +
20025 +void
20026 debug_nothing_tree_tree_tree_bool (tree t1 ATTRIBUTE_UNUSED,
20027 tree t2 ATTRIBUTE_UNUSED,
20028 tree t3 ATTRIBUTE_UNUSED,
20029 --- a/gcc/debug.h
20030 +++ b/gcc/debug.h
20031 @@ -126,6 +126,10 @@
20032 text sections. */
20033 void (* switch_text_section) (void);
20034
20035 + /* Called from grokdeclarator. Replaces the anonymous name with the
20036 + type name. */
20037 + void (* set_name) (tree, tree);
20038 +
20039 /* This is 1 if the debug writer wants to see start and end commands for the
20040 main source files, and 0 otherwise. */
20041 int start_end_main_source_file;
20042 @@ -140,6 +144,7 @@
20043 extern void debug_nothing_int (unsigned int);
20044 extern void debug_nothing_int_int (unsigned int, unsigned int);
20045 extern void debug_nothing_tree (tree);
20046 +extern void debug_nothing_tree_tree (tree, tree);
20047 extern void debug_nothing_tree_int (tree, int);
20048 extern void debug_nothing_tree_tree_tree_bool (tree, tree, tree, bool);
20049 extern bool debug_true_const_tree (const_tree);
20050 --- a/gcc/defaults.h
20051 +++ b/gcc/defaults.h
20052 @@ -902,7 +902,8 @@
20053
20054 /* On most machines, the CFA coincides with the first incoming parm. */
20055 #ifndef ARG_POINTER_CFA_OFFSET
20056 -#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL)
20057 +#define ARG_POINTER_CFA_OFFSET(FNDECL) \
20058 + (FIRST_PARM_OFFSET (FNDECL) + crtl->args.pretend_args_size)
20059 #endif
20060
20061 /* On most machines, we use the CFA as DW_AT_frame_base. */
20062 --- a/gcc/diagnostic.c
20063 +++ b/gcc/diagnostic.c
20064 @@ -126,6 +126,7 @@
20065 diagnostic->message.args_ptr = args;
20066 diagnostic->message.format_spec = msg;
20067 diagnostic->location = location;
20068 + diagnostic->override_column = 0;
20069 diagnostic->kind = kind;
20070 diagnostic->option_index = 0;
20071 }
20072 @@ -153,6 +154,8 @@
20073 };
20074 const char *text = _(diagnostic_kind_text[diagnostic->kind]);
20075 expanded_location s = expand_location (diagnostic->location);
20076 + if (diagnostic->override_column)
20077 + s.column = diagnostic->override_column;
20078 gcc_assert (diagnostic->kind < DK_LAST_DIAGNOSTIC_KIND);
20079
20080 return
20081 --- a/gcc/diagnostic.h
20082 +++ b/gcc/diagnostic.h
20083 @@ -41,6 +41,7 @@
20084 {
20085 text_info message;
20086 location_t location;
20087 + unsigned int override_column;
20088 /* TREE_BLOCK if the diagnostic is to be reported in some inline
20089 function inlined into other function, otherwise NULL. */
20090 tree abstract_origin;
20091 @@ -185,6 +186,10 @@
20092
20093 #define report_diagnostic(D) diagnostic_report_diagnostic (global_dc, D)
20094
20095 +/* Override the column number to be used for reporting a
20096 + diagnostic. */
20097 +#define diagnostic_override_column(DI, COL) (DI)->override_column = (COL)
20098 +
20099 /* Diagnostic related functions. */
20100 extern void diagnostic_initialize (diagnostic_context *);
20101 extern void diagnostic_report_current_module (diagnostic_context *);
20102 --- a/gcc/dwarf2out.c
20103 +++ b/gcc/dwarf2out.c
20104 @@ -2474,6 +2474,12 @@
20105 insn = PATTERN (insn);
20106
20107 dwarf2out_frame_debug_expr (insn, label);
20108 +
20109 + /* Check again. A parallel can save and update the same register.
20110 + We could probably check just once, here, but this is safer than
20111 + removing the check above. */
20112 + if (clobbers_queued_reg_save (insn))
20113 + flush_queued_reg_saves ();
20114 }
20115
20116 #endif
20117 @@ -4598,6 +4604,7 @@
20118 static void dwarf2out_abstract_function (tree);
20119 static void dwarf2out_var_location (rtx);
20120 static void dwarf2out_begin_function (tree);
20121 +static void dwarf2out_set_name (tree, tree);
20122
20123 /* The debug hooks structure. */
20124
20125 @@ -4631,6 +4638,7 @@
20126 debug_nothing_int, /* handle_pch */
20127 dwarf2out_var_location,
20128 dwarf2out_switch_text_section,
20129 + dwarf2out_set_name,
20130 1 /* start_end_main_source_file */
20131 };
20132 #endif
20133 @@ -5975,12 +5983,9 @@
20134 (const char *)x2) == 0;
20135 }
20136
20137 -/* Add a string attribute value to a DIE. */
20138 -
20139 -static inline void
20140 -add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str)
20141 +static struct indirect_string_node *
20142 +find_AT_string (const char *str)
20143 {
20144 - dw_attr_node attr;
20145 struct indirect_string_node *node;
20146 void **slot;
20147
20148 @@ -6001,6 +6006,18 @@
20149 node = (struct indirect_string_node *) *slot;
20150
20151 node->refcount++;
20152 + return node;
20153 +}
20154 +
20155 +/* Add a string attribute value to a DIE. */
20156 +
20157 +static inline void
20158 +add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str)
20159 +{
20160 + dw_attr_node attr;
20161 + struct indirect_string_node *node;
20162 +
20163 + node = find_AT_string (str);
20164
20165 attr.dw_attr = attr_kind;
20166 attr.dw_attr_val.val_class = dw_val_class_str;
20167 @@ -6637,6 +6654,8 @@
20168 static inline var_loc_list *
20169 lookup_decl_loc (const_tree decl)
20170 {
20171 + if (!decl_loc_table)
20172 + return NULL;
20173 return (var_loc_list *)
20174 htab_find_with_hash (decl_loc_table, decl, DECL_UID (decl));
20175 }
20176 @@ -13471,6 +13490,7 @@
20177 tree save_fn;
20178 tree context;
20179 int was_abstract = DECL_ABSTRACT (decl);
20180 + htab_t old_decl_loc_table;
20181
20182 /* Make sure we have the actual abstract inline, not a clone. */
20183 decl = DECL_ORIGIN (decl);
20184 @@ -13480,6 +13500,12 @@
20185 /* We've already generated the abstract instance. */
20186 return;
20187
20188 + /* We can be called while recursively when seeing block defining inlined subroutine
20189 + DIE. Be sure to not clobber the outer location table nor use it or we would
20190 + get locations in abstract instantces. */
20191 + old_decl_loc_table = decl_loc_table;
20192 + decl_loc_table = NULL;
20193 +
20194 /* Be sure we've emitted the in-class declaration DIE (if any) first, so
20195 we don't get confused by DECL_ABSTRACT. */
20196 if (debug_info_level > DINFO_LEVEL_TERSE)
20197 @@ -13501,6 +13527,7 @@
20198 set_decl_abstract_flags (decl, 0);
20199
20200 current_function_decl = save_fn;
20201 + decl_loc_table = old_decl_loc_table;
20202 pop_cfun ();
20203 }
20204
20205 @@ -15796,6 +15823,31 @@
20206 return fd->emitted_number;
20207 }
20208
20209 +/* Replace DW_AT_name for the decl with name. */
20210 +
20211 +static void
20212 +dwarf2out_set_name (tree decl, tree name)
20213 +{
20214 + dw_die_ref die;
20215 + dw_attr_ref attr;
20216 +
20217 + die = TYPE_SYMTAB_DIE (decl);
20218 + if (!die)
20219 + return;
20220 +
20221 + attr = get_AT (die, DW_AT_name);
20222 + if (attr)
20223 + {
20224 + struct indirect_string_node *node;
20225 +
20226 + node = find_AT_string (dwarf2_name (name, 0));
20227 + /* replace the string. */
20228 + attr->dw_attr_val.v.val_str = node;
20229 + }
20230 +
20231 + else
20232 + add_name_attribute (die, dwarf2_name (name, 0));
20233 +}
20234 /* Called by the final INSN scan whenever we see a var location. We
20235 use it to drop labels in the right places, and throw the location in
20236 our lookup table. */
20237 --- a/gcc/except.c
20238 +++ b/gcc/except.c
20239 @@ -3567,6 +3567,12 @@
20240 paths below go through assemble_integer, which would take
20241 care of this for us. */
20242 STRIP_NOPS (type);
20243 + if (TREE_CODE (type) == POINTER_PLUS_EXPR)
20244 + {
20245 + gcc_assert (TREE_CODE (TREE_OPERAND (type, 1)) == INTEGER_CST);
20246 + type = TREE_OPERAND (type, 0);
20247 + STRIP_NOPS (type);
20248 + }
20249 if (TREE_CODE (type) == ADDR_EXPR)
20250 {
20251 type = TREE_OPERAND (type, 0);
20252 --- a/gcc/explow.c
20253 +++ b/gcc/explow.c
20254 @@ -1491,9 +1491,9 @@
20255 in which a scalar value of mode MODE was returned by a library call. */
20256
20257 rtx
20258 -hard_libcall_value (enum machine_mode mode)
20259 +hard_libcall_value (enum machine_mode mode, rtx fun)
20260 {
20261 - return LIBCALL_VALUE (mode);
20262 + return targetm.calls.libcall_value (mode, fun);
20263 }
20264
20265 /* Look up the tree code for a given rtx code
20266 --- a/gcc/expmed.c
20267 +++ b/gcc/expmed.c
20268 @@ -103,7 +103,8 @@
20269 static int neg_cost[2][NUM_MACHINE_MODES];
20270 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
20271 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
20272 -static int shiftsub_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
20273 +static int shiftsub0_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
20274 +static int shiftsub1_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
20275 static int mul_cost[2][NUM_MACHINE_MODES];
20276 static int sdiv_cost[2][NUM_MACHINE_MODES];
20277 static int udiv_cost[2][NUM_MACHINE_MODES];
20278 @@ -130,7 +131,8 @@
20279 struct rtx_def shift; rtunion shift_fld1;
20280 struct rtx_def shift_mult; rtunion shift_mult_fld1;
20281 struct rtx_def shift_add; rtunion shift_add_fld1;
20282 - struct rtx_def shift_sub; rtunion shift_sub_fld1;
20283 + struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
20284 + struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
20285 } all;
20286
20287 rtx pow2[MAX_BITS_PER_WORD];
20288 @@ -201,9 +203,13 @@
20289 XEXP (&all.shift_add, 0) = &all.shift_mult;
20290 XEXP (&all.shift_add, 1) = &all.reg;
20291
20292 - PUT_CODE (&all.shift_sub, MINUS);
20293 - XEXP (&all.shift_sub, 0) = &all.shift_mult;
20294 - XEXP (&all.shift_sub, 1) = &all.reg;
20295 + PUT_CODE (&all.shift_sub0, MINUS);
20296 + XEXP (&all.shift_sub0, 0) = &all.shift_mult;
20297 + XEXP (&all.shift_sub0, 1) = &all.reg;
20298 +
20299 + PUT_CODE (&all.shift_sub1, MINUS);
20300 + XEXP (&all.shift_sub1, 0) = &all.reg;
20301 + XEXP (&all.shift_sub1, 1) = &all.shift_mult;
20302
20303 for (speed = 0; speed < 2; speed++)
20304 {
20305 @@ -226,7 +232,8 @@
20306 PUT_MODE (&all.shift, mode);
20307 PUT_MODE (&all.shift_mult, mode);
20308 PUT_MODE (&all.shift_add, mode);
20309 - PUT_MODE (&all.shift_sub, mode);
20310 + PUT_MODE (&all.shift_sub0, mode);
20311 + PUT_MODE (&all.shift_sub1, mode);
20312
20313 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
20314 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
20315 @@ -254,8 +261,8 @@
20316 }
20317
20318 shift_cost[speed][mode][0] = 0;
20319 - shiftadd_cost[speed][mode][0] = shiftsub_cost[speed][mode][0]
20320 - = add_cost[speed][mode];
20321 + shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
20322 + = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
20323
20324 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
20325 for (m = 1; m < n; m++)
20326 @@ -265,7 +272,8 @@
20327
20328 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
20329 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
20330 - shiftsub_cost[speed][mode][m] = rtx_cost (&all.shift_sub, SET, speed);
20331 + shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
20332 + shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
20333 }
20334 }
20335 }
20336 @@ -2397,6 +2405,7 @@
20337 struct mult_cost best_cost;
20338 struct mult_cost new_limit;
20339 int op_cost, op_latency;
20340 + unsigned HOST_WIDE_INT orig_t = t;
20341 unsigned HOST_WIDE_INT q;
20342 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
20343 int hash_index;
20344 @@ -2542,6 +2551,38 @@
20345 best_alg->log[best_alg->ops] = m;
20346 best_alg->op[best_alg->ops] = alg_shift;
20347 }
20348 +
20349 + /* See if treating ORIG_T as a signed number yields a better
20350 + sequence. Try this sequence only for a negative ORIG_T
20351 + as it would be useless for a non-negative ORIG_T. */
20352 + if ((HOST_WIDE_INT) orig_t < 0)
20353 + {
20354 + /* Shift ORIG_T as follows because a right shift of a
20355 + negative-valued signed type is implementation
20356 + defined. */
20357 + q = ~(~orig_t >> m);
20358 + /* The function expand_shift will choose between a shift
20359 + and a sequence of additions, so the observed cost is
20360 + given as MIN (m * add_cost[speed][mode],
20361 + shift_cost[speed][mode][m]). */
20362 + op_cost = m * add_cost[speed][mode];
20363 + if (shift_cost[speed][mode][m] < op_cost)
20364 + op_cost = shift_cost[speed][mode][m];
20365 + new_limit.cost = best_cost.cost - op_cost;
20366 + new_limit.latency = best_cost.latency - op_cost;
20367 + synth_mult (alg_in, q, &new_limit, mode);
20368 +
20369 + alg_in->cost.cost += op_cost;
20370 + alg_in->cost.latency += op_cost;
20371 + if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
20372 + {
20373 + struct algorithm *x;
20374 + best_cost = alg_in->cost;
20375 + x = alg_in, alg_in = best_alg, best_alg = x;
20376 + best_alg->log[best_alg->ops] = m;
20377 + best_alg->op[best_alg->ops] = alg_shift;
20378 + }
20379 + }
20380 }
20381 if (cache_hit)
20382 goto done;
20383 @@ -2604,6 +2645,29 @@
20384 best_alg->op[best_alg->ops] = alg_add_t_m2;
20385 }
20386 }
20387 +
20388 + /* We may be able to calculate a * -7, a * -15, a * -31, etc
20389 + quickly with a - a * n for some appropriate constant n. */
20390 + m = exact_log2 (-orig_t + 1);
20391 + if (m >= 0 && m < maxm)
20392 + {
20393 + op_cost = shiftsub1_cost[speed][mode][m];
20394 + new_limit.cost = best_cost.cost - op_cost;
20395 + new_limit.latency = best_cost.latency - op_cost;
20396 + synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
20397 +
20398 + alg_in->cost.cost += op_cost;
20399 + alg_in->cost.latency += op_cost;
20400 + if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
20401 + {
20402 + struct algorithm *x;
20403 + best_cost = alg_in->cost;
20404 + x = alg_in, alg_in = best_alg, best_alg = x;
20405 + best_alg->log[best_alg->ops] = m;
20406 + best_alg->op[best_alg->ops] = alg_sub_t_m2;
20407 + }
20408 + }
20409 +
20410 if (cache_hit)
20411 goto done;
20412 }
20413 @@ -2673,9 +2737,9 @@
20414 hardware the shift may be executed concurrently with the
20415 earlier steps in the algorithm. */
20416 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
20417 - if (shiftsub_cost[speed][mode][m] < op_cost)
20418 + if (shiftsub0_cost[speed][mode][m] < op_cost)
20419 {
20420 - op_cost = shiftsub_cost[speed][mode][m];
20421 + op_cost = shiftsub0_cost[speed][mode][m];
20422 op_latency = op_cost;
20423 }
20424 else
20425 @@ -2738,7 +2802,7 @@
20426 m = exact_log2 (q);
20427 if (m >= 0 && m < maxm)
20428 {
20429 - op_cost = shiftsub_cost[speed][mode][m];
20430 + op_cost = shiftsub0_cost[speed][mode][m];
20431 new_limit.cost = best_cost.cost - op_cost;
20432 new_limit.latency = best_cost.latency - op_cost;
20433 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
20434 --- a/gcc/expr.c
20435 +++ b/gcc/expr.c
20436 @@ -4391,6 +4391,29 @@
20437
20438 /* Compute FROM and store the value in the rtx we got. */
20439
20440 + if (TREE_CODE (to) == MISALIGNED_INDIRECT_REF)
20441 + {
20442 + rtx insn;
20443 + rtx from_rtx;
20444 + enum insn_code icode;
20445 + enum machine_mode mode = GET_MODE (to_rtx);
20446 +
20447 + icode = optab_handler (movmisalign_optab, mode)->insn_code;
20448 + gcc_assert (icode != CODE_FOR_nothing);
20449 +
20450 + from_rtx = expand_expr (from, NULL_RTX, mode, EXPAND_NORMAL);
20451 + insn = GEN_FCN (icode) (to_rtx, from_rtx);
20452 + /* If that failed then force the source into a reg and try again. */
20453 + if (!insn)
20454 + {
20455 + from_rtx = copy_to_mode_reg(mode, from_rtx);
20456 + insn = GEN_FCN (icode) (to_rtx, from_rtx);
20457 + gcc_assert(insn);
20458 + }
20459 + emit_insn (insn);
20460 + return;
20461 + }
20462 +
20463 push_temp_slots ();
20464 result = store_expr (from, to_rtx, 0, nontemporal);
20465 preserve_temp_slots (result);
20466 @@ -7291,6 +7314,19 @@
20467 decl_rtl = DECL_RTL (exp);
20468 gcc_assert (decl_rtl);
20469 decl_rtl = copy_rtx (decl_rtl);
20470 + /* Record writes to register variables. */
20471 + if (modifier == EXPAND_WRITE && REG_P(decl_rtl)
20472 + && REGNO(decl_rtl) < FIRST_PSEUDO_REGISTER)
20473 + {
20474 + int i = REGNO(decl_rtl);
20475 + int nregs = hard_regno_nregs[i][GET_MODE(decl_rtl)];
20476 + while (nregs)
20477 + {
20478 + SET_HARD_REG_BIT(crtl->asm_clobbers, i);
20479 + i++;
20480 + nregs--;
20481 + }
20482 + }
20483
20484 /* Ensure variable marked as used even if it doesn't go through
20485 a parser. If it hasn't be used yet, write out an external
20486 @@ -7538,14 +7574,15 @@
20487
20488 /* Resolve the misalignment now, so that we don't have to remember
20489 to resolve it later. Of course, this only works for reads. */
20490 - /* ??? When we get around to supporting writes, we'll have to handle
20491 - this in store_expr directly. The vectorizer isn't generating
20492 - those yet, however. */
20493 if (code == MISALIGNED_INDIRECT_REF)
20494 {
20495 int icode;
20496 rtx reg, insn;
20497
20498 + /* For writes produce a MEM, and expand_assignment will DTRT. */
20499 + if (modifier == EXPAND_WRITE)
20500 + return temp;
20501 +
20502 gcc_assert (modifier == EXPAND_NORMAL
20503 || modifier == EXPAND_STACK_PARM);
20504
20505 --- a/gcc/expr.h
20506 +++ b/gcc/expr.h
20507 @@ -757,7 +757,7 @@
20508
20509 /* Return an rtx that refers to the value returned by a library call
20510 in its original home. This becomes invalid if any more code is emitted. */
20511 -extern rtx hard_libcall_value (enum machine_mode);
20512 +extern rtx hard_libcall_value (enum machine_mode, rtx);
20513
20514 /* Return the mode desired by operand N of a particular bitfield
20515 insert/extract insn, or MAX_MACHINE_MODE if no such insn is
20516 --- a/gcc/final.c
20517 +++ b/gcc/final.c
20518 @@ -891,6 +891,7 @@
20519 if (LABEL_P (insn))
20520 {
20521 rtx next;
20522 + bool next_is_jumptable;
20523
20524 /* Merge in alignments computed by compute_alignments. */
20525 log = LABEL_TO_ALIGNMENT (insn);
20526 @@ -900,31 +901,30 @@
20527 max_skip = LABEL_TO_MAX_SKIP (insn);
20528 }
20529
20530 - log = LABEL_ALIGN (insn);
20531 - if (max_log < log)
20532 + next = next_nonnote_insn (insn);
20533 + next_is_jumptable = next && JUMP_TABLE_DATA_P (next);
20534 + if (!next_is_jumptable)
20535 {
20536 - max_log = log;
20537 - max_skip = LABEL_ALIGN_MAX_SKIP;
20538 + log = LABEL_ALIGN (insn);
20539 + if (max_log < log)
20540 + {
20541 + max_log = log;
20542 + max_skip = LABEL_ALIGN_MAX_SKIP;
20543 + }
20544 }
20545 - next = next_nonnote_insn (insn);
20546 /* ADDR_VECs only take room if read-only data goes into the text
20547 section. */
20548 - if (JUMP_TABLES_IN_TEXT_SECTION
20549 - || readonly_data_section == text_section)
20550 - if (next && JUMP_P (next))
20551 - {
20552 - rtx nextbody = PATTERN (next);
20553 - if (GET_CODE (nextbody) == ADDR_VEC
20554 - || GET_CODE (nextbody) == ADDR_DIFF_VEC)
20555 - {
20556 - log = ADDR_VEC_ALIGN (next);
20557 - if (max_log < log)
20558 - {
20559 - max_log = log;
20560 - max_skip = LABEL_ALIGN_MAX_SKIP;
20561 - }
20562 - }
20563 - }
20564 + if ((JUMP_TABLES_IN_TEXT_SECTION
20565 + || readonly_data_section == text_section)
20566 + && next_is_jumptable)
20567 + {
20568 + log = ADDR_VEC_ALIGN (next);
20569 + if (max_log < log)
20570 + {
20571 + max_log = log;
20572 + max_skip = LABEL_ALIGN_MAX_SKIP;
20573 + }
20574 + }
20575 LABEL_TO_ALIGNMENT (insn) = max_log;
20576 LABEL_TO_MAX_SKIP (insn) = max_skip;
20577 max_log = 0;
20578 @@ -2013,48 +2013,41 @@
20579 app_disable ();
20580
20581 next = next_nonnote_insn (insn);
20582 - if (next != 0 && JUMP_P (next))
20583 + /* If this label is followed by a jump-table, make sure we put
20584 + the label in the read-only section. Also possibly write the
20585 + label and jump table together. */
20586 + if (next != 0 && JUMP_TABLE_DATA_P (next))
20587 {
20588 - rtx nextbody = PATTERN (next);
20589 -
20590 - /* If this label is followed by a jump-table,
20591 - make sure we put the label in the read-only section. Also
20592 - possibly write the label and jump table together. */
20593 -
20594 - if (GET_CODE (nextbody) == ADDR_VEC
20595 - || GET_CODE (nextbody) == ADDR_DIFF_VEC)
20596 - {
20597 #if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC)
20598 - /* In this case, the case vector is being moved by the
20599 - target, so don't output the label at all. Leave that
20600 - to the back end macros. */
20601 + /* In this case, the case vector is being moved by the
20602 + target, so don't output the label at all. Leave that
20603 + to the back end macros. */
20604 #else
20605 - if (! JUMP_TABLES_IN_TEXT_SECTION)
20606 - {
20607 - int log_align;
20608 + if (! JUMP_TABLES_IN_TEXT_SECTION)
20609 + {
20610 + int log_align;
20611
20612 - switch_to_section (targetm.asm_out.function_rodata_section
20613 - (current_function_decl));
20614 + switch_to_section (targetm.asm_out.function_rodata_section
20615 + (current_function_decl));
20616
20617 #ifdef ADDR_VEC_ALIGN
20618 - log_align = ADDR_VEC_ALIGN (next);
20619 + log_align = ADDR_VEC_ALIGN (next);
20620 #else
20621 - log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT);
20622 + log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT);
20623 #endif
20624 - ASM_OUTPUT_ALIGN (file, log_align);
20625 - }
20626 - else
20627 - switch_to_section (current_function_section ());
20628 + ASM_OUTPUT_ALIGN (file, log_align);
20629 + }
20630 + else
20631 + switch_to_section (current_function_section ());
20632
20633 #ifdef ASM_OUTPUT_CASE_LABEL
20634 - ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn),
20635 - next);
20636 + ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn),
20637 + next);
20638 #else
20639 - targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn));
20640 + targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn));
20641 #endif
20642 #endif
20643 - break;
20644 - }
20645 + break;
20646 }
20647 if (LABEL_ALT_ENTRY_P (insn))
20648 output_alternate_entry_point (file, insn);
20649 --- a/gcc/fold-const.c
20650 +++ b/gcc/fold-const.c
20651 @@ -2289,7 +2289,24 @@
20652 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
20653 t = build_real (type, value);
20654
20655 - TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
20656 + /* If converting an infinity or NAN to a representation that doesn't
20657 + have one, set the overflow bit so that we can produce some kind of
20658 + error message at the appropriate point if necessary. It's not the
20659 + most user-friendly message, but it's better than nothing. */
20660 + if (REAL_VALUE_ISINF (TREE_REAL_CST (arg1))
20661 + && !MODE_HAS_INFINITIES (TYPE_MODE (type)))
20662 + TREE_OVERFLOW (t) = 1;
20663 + else if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))
20664 + && !MODE_HAS_NANS (TYPE_MODE (type)))
20665 + TREE_OVERFLOW (t) = 1;
20666 + /* Regular overflow, conversion produced an infinity in a mode that
20667 + can't represent them. */
20668 + else if (!MODE_HAS_INFINITIES (TYPE_MODE (type))
20669 + && REAL_VALUE_ISINF (value)
20670 + && !REAL_VALUE_ISINF (TREE_REAL_CST (arg1)))
20671 + TREE_OVERFLOW (t) = 1;
20672 + else
20673 + TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
20674 return t;
20675 }
20676
20677 --- a/gcc/fortran/cpp.c
20678 +++ b/gcc/fortran/cpp.c
20679 @@ -137,6 +137,9 @@
20680 static void cb_ident (cpp_reader *, source_location, const cpp_string *);
20681 static void cb_used_define (cpp_reader *, source_location, cpp_hashnode *);
20682 static void cb_used_undef (cpp_reader *, source_location, cpp_hashnode *);
20683 +static bool cb_cpp_error (cpp_reader *, int, location_t, unsigned int,
20684 + const char *, va_list *)
20685 + ATTRIBUTE_GCC_DIAG(5,0);
20686 void pp_dir_change (cpp_reader *, const char *);
20687
20688 static int dump_macro (cpp_reader *, cpp_hashnode *, void *);
20689 @@ -452,7 +455,6 @@
20690 cpp_option->cplusplus_comments = 0;
20691
20692 cpp_option->pedantic = pedantic;
20693 - cpp_option->inhibit_warnings = inhibit_warnings;
20694
20695 cpp_option->dollars_in_ident = gfc_option.flag_dollar_ok;
20696 cpp_option->discard_comments = gfc_cpp_option.discard_comments;
20697 @@ -465,9 +467,6 @@
20698
20699 cpp_post_options (cpp_in);
20700
20701 - /* If an error has occurred in cpplib, note it so we fail immediately. */
20702 - errorcount += cpp_errors (cpp_in);
20703 -
20704 gfc_cpp_register_include_paths ();
20705 }
20706
20707 @@ -482,6 +481,7 @@
20708 cb->line_change = cb_line_change;
20709 cb->ident = cb_ident;
20710 cb->def_pragma = cb_def_pragma;
20711 + cb->error = cb_cpp_error;
20712
20713 if (gfc_cpp_option.dump_includes)
20714 cb->include = cb_include;
20715 @@ -961,6 +961,57 @@
20716 cpp_define_queue = q;
20717 }
20718
20719 +/* Callback from cpp_error for PFILE to print diagnostics from the
20720 + preprocessor. The diagnostic is of type LEVEL, at location
20721 + LOCATION, with column number possibly overridden by COLUMN_OVERRIDE
20722 + if not zero; MSG is the translated message and AP the arguments.
20723 + Returns true if a diagnostic was emitted, false otherwise. */
20724 +
20725 +static bool
20726 +cb_cpp_error (cpp_reader *pfile ATTRIBUTE_UNUSED, int level,
20727 + location_t location, unsigned int column_override,
20728 + const char *msg, va_list *ap)
20729 +{
20730 + diagnostic_info diagnostic;
20731 + diagnostic_t dlevel;
20732 + int save_warn_system_headers = warn_system_headers;
20733 + bool ret;
20734 +
20735 + switch (level)
20736 + {
20737 + case CPP_DL_WARNING_SYSHDR:
20738 + warn_system_headers = 1;
20739 + /* Fall through. */
20740 + case CPP_DL_WARNING:
20741 + dlevel = DK_WARNING;
20742 + break;
20743 + case CPP_DL_PEDWARN:
20744 + dlevel = DK_PEDWARN;
20745 + break;
20746 + case CPP_DL_ERROR:
20747 + dlevel = DK_ERROR;
20748 + break;
20749 + case CPP_DL_ICE:
20750 + dlevel = DK_ICE;
20751 + break;
20752 + case CPP_DL_NOTE:
20753 + dlevel = DK_NOTE;
20754 + break;
20755 + case CPP_DL_FATAL:
20756 + dlevel = DK_FATAL;
20757 + break;
20758 + default:
20759 + gcc_unreachable ();
20760 + }
20761 + diagnostic_set_info_translated (&diagnostic, msg, ap,
20762 + location, dlevel);
20763 + if (column_override)
20764 + diagnostic_override_column (&diagnostic, column_override);
20765 + ret = report_diagnostic (&diagnostic);
20766 + if (level == CPP_DL_WARNING_SYSHDR)
20767 + warn_system_headers = save_warn_system_headers;
20768 + return ret;
20769 +}
20770
20771 /* Callback called when -fworking-director and -E to emit working
20772 directory in cpp output file. */
20773 --- a/gcc/function.c
20774 +++ b/gcc/function.c
20775 @@ -272,7 +272,10 @@
20776 if (! type)
20777 type = lang_hooks.types.type_for_mode (mode, 0);
20778
20779 - return STACK_SLOT_ALIGNMENT (type, mode, alignment);
20780 + return alignment_for_aligned_arrays (type,
20781 + STACK_SLOT_ALIGNMENT (type,
20782 + mode,
20783 + alignment));
20784 }
20785
20786 /* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
20787 @@ -5359,6 +5362,57 @@
20788 {
20789 return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (cfun->decl));
20790 }
20791 +
20792 +/* This function adjusts alignments as appropriate according to the
20793 + setting of -falign-arrays. If that is specified then the minimum
20794 + alignment for array variables is set to be the largest power of two
20795 + less than or equal to their total storage size, or the biggest
20796 + alignment used on the machine, whichever is smaller. */
20797 +
20798 +unsigned int
20799 +alignment_for_aligned_arrays (tree ty, unsigned int existing_alignment)
20800 +{
20801 + unsigned int min_alignment;
20802 + tree size;
20803 +
20804 + /* Return the existing alignment if not using -falign-arrays or if
20805 + the type is not an array type. */
20806 + if (!flag_align_arrays || !ty || TREE_CODE (ty) != ARRAY_TYPE)
20807 + return existing_alignment;
20808 +
20809 + /* Extract the total storage size of the array in bits. */
20810 + size = TYPE_SIZE (ty);
20811 + gcc_assert (size);
20812 +
20813 + /* At least for variable-length arrays, TREE_CODE (size) might not be an
20814 + integer constant; check it now. If it is not, give the array at
20815 + least BIGGEST_ALIGNMENT just to be safe. Furthermore, we assume that
20816 + alignments always fit into a host integer. So if we can't fit the
20817 + size of the array in bits into a host integer, it must also be large
20818 + enough to deserve at least BIGGEST_ALIGNMENT (see below). */
20819 + if (TREE_CODE (size) != INTEGER_CST || !host_integerp (size, 1))
20820 + min_alignment = BIGGEST_ALIGNMENT;
20821 + else
20822 + {
20823 + unsigned HOST_WIDE_INT bits = TREE_INT_CST_LOW (size);
20824 + bits = (bits ? bits : 1);
20825 +
20826 + /* An array with size greater than BIGGEST_ALIGNMENT is assigned
20827 + at least that alignment. In all other cases the minimum
20828 + alignment of the array is set to be the largest power of two
20829 + less than or equal to the total storage size of the array.
20830 + We assume that BIGGEST_ALIGNMENT fits in "unsigned int"; thus,
20831 + the shift below will not overflow. */
20832 + if (bits >= BIGGEST_ALIGNMENT)
20833 + min_alignment = BIGGEST_ALIGNMENT;
20834 + else
20835 + min_alignment = 1 << (floor_log2 (bits));
20836 + }
20837 +
20838 + /* Having computed the minimum permissible alignment, enlarge it
20839 + if EXISTING_ALIGNMENT is greater. */
20840 + return MAX (min_alignment, existing_alignment);
20841 +}
20842 \f
20843
20844 static unsigned int
20845 --- a/gcc/function.h
20846 +++ b/gcc/function.h
20847 @@ -25,6 +25,7 @@
20848 #include "tree.h"
20849 #include "hashtab.h"
20850 #include "varray.h"
20851 +#include "hard-reg-set.h"
20852
20853 /* Stack of pending (incomplete) sequences saved by `start_sequence'.
20854 Each element describes one pending sequence.
20855 @@ -441,6 +442,8 @@
20856
20857 /* True if dbr_schedule has already been called for this function. */
20858 bool dbr_scheduled_p;
20859 +
20860 + HARD_REG_SET asm_clobbers;
20861 };
20862
20863 #define return_label (crtl->x_return_label)
20864 @@ -687,4 +690,7 @@
20865 extern void used_types_insert (tree);
20866
20867 extern int get_next_funcdef_no (void);
20868 +
20869 +extern unsigned int alignment_for_aligned_arrays (tree, unsigned int);
20870 +
20871 #endif /* GCC_FUNCTION_H */
20872 --- a/gcc/gcc.c
20873 +++ b/gcc/gcc.c
20874 @@ -651,8 +651,32 @@
20875
20876 /* config.h can define SWITCHES_NEED_SPACES to control which options
20877 require spaces between the option and the argument. */
20878 +/* GCC Bugzilla PR11810 indicates that GCC does not correctly handle
20879 + "-ofoo.o", in that it records "-ofoo.o" as a temporary file to
20880 + delete, rather than "foo.o".
20881 +
20882 + Unfortunately, Eclipse's makefile generators use the "-ofoo.o"
20883 + form. See also CS Issue #3433. So, although most users probably
20884 + use "-o foo.o", the "-ofoo.o" form is used in practice.
20885 +
20886 + See this email thread for additional information:
20887 +
20888 + http://gcc.gnu.org/ml/gcc/2008-07/msg00395.html
20889 +
20890 + Therefore, we define SWITCHES_NEED_SPACES to include "o" by
20891 + default. This causes "-ofoo.o" to be split into "-o foo.o" during
20892 + the initial processing of the command-line, before being seen by
20893 + the specs machinery.
20894 +
20895 + A risk of this change is that tools which *require* the "-ofoo.o"
20896 + form will no longer work. However, we know of no such tools, and
20897 + they would not have worked with the "-o foo.o" form anyhow.
20898 +
20899 + If this general strategy is acceptable upstream, the best approach
20900 + might be simply to eliminate this macro, since the only definitions
20901 + in target files are also to the value "o". */
20902 #ifndef SWITCHES_NEED_SPACES
20903 -#define SWITCHES_NEED_SPACES ""
20904 +#define SWITCHES_NEED_SPACES "o"
20905 #endif
20906
20907 /* config.h can define ENDFILE_SPEC to override the default crtn files. */
20908 @@ -728,6 +752,8 @@
20909 %{!fsyntax-only:%{!c:%{!M:%{!MM:%{!E:%{!S:\
20910 %(linker) %l " LINK_PIE_SPEC "%X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} %{r}\
20911 %{s} %{t} %{u*} %{x} %{z} %{Z} %{!A:%{!nostdlib:%{!nostartfiles:%S}}}\
20912 + %{Wno-poison-system-directories:--no-poison-system-directories}\
20913 + %{Werror=poison-system-directories:--error-poison-system-directories}\
20914 %{static:} %{L*} %(mfwrap) %(link_libgcc) %o\
20915 %{fopenmp|ftree-parallelize-loops=*:%:include(libgomp.spec)%(link_gomp)} %(mflib)\
20916 %{fprofile-arcs|fprofile-generate|coverage:-lgcov}\
20917 @@ -4616,27 +4642,53 @@
20918
20919 if (argbuf_index > 0)
20920 {
20921 - int i, first;
20922 + int i, first, n;
20923
20924 first = n_switches;
20925 - n_switches += argbuf_index;
20926 - switches = XRESIZEVEC (struct switchstr, switches, n_switches + 1);
20927 + n = n_switches + argbuf_index;
20928 + switches = XRESIZEVEC (struct switchstr, switches, n + 1);
20929 + switches[n] = switches[first];
20930
20931 switches[n_switches] = switches[first];
20932 for (i = 0; i < argbuf_index; i++)
20933 {
20934 struct switchstr *sw;
20935 + const char *p = &argbuf[i][1];
20936 + int c = *p;
20937
20938 /* Each switch should start with '-'. */
20939 if (argbuf[i][0] != '-')
20940 fatal ("switch '%s' does not start with '-'", argbuf[i]);
20941
20942 - sw = &switches[i + first];
20943 + sw = &switches[n_switches];
20944 sw->part1 = &argbuf[i][1];
20945 sw->args = 0;
20946 sw->live_cond = 0;
20947 sw->validated = 0;
20948 sw->ordering = 0;
20949 +
20950 + /* Deal with option arguments in separate argv elements. */
20951 + if ((SWITCH_TAKES_ARG (c) > (p[1] != 0))
20952 + || WORD_SWITCH_TAKES_ARG (p))
20953 + {
20954 + int j = 0;
20955 + int n_args = WORD_SWITCH_TAKES_ARG (p);
20956 +
20957 + if (n_args == 0)
20958 + {
20959 + /* Count only the option arguments in separate argv elements. */
20960 + n_args = SWITCH_TAKES_ARG (c) - (p[1] != 0);
20961 + }
20962 + if (i + n_args >= argbuf_index)
20963 + fatal ("argument to '-%s' is missing", p);
20964 + switches[n_switches].args
20965 + = XNEWVEC (const char *, n_args + 1);
20966 + while (j < n_args)
20967 + switches[n_switches].args[j++] = argbuf[++i];
20968 + /* Null-terminate the vector. */
20969 + switches[n_switches].args[j] = 0;
20970 + }
20971 + n_switches++;
20972 }
20973 }
20974 }
20975 --- a/gcc/gcse.c
20976 +++ b/gcc/gcse.c
20977 @@ -172,6 +172,7 @@
20978 #include "hashtab.h"
20979 #include "df.h"
20980 #include "dbgcnt.h"
20981 +#include "target.h"
20982
20983 /* Propagate flow information through back edges and thus enable PRE's
20984 moving loop invariant calculations out of loops.
20985 @@ -1744,7 +1745,9 @@
20986 REG_EQUIV notes and if the argument slot is used somewhere
20987 explicitly, it means address of parameter has been taken,
20988 so we should not extend the lifetime of the pseudo. */
20989 - && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
20990 + && (note == NULL_RTX || ! MEM_P (XEXP (note, 0)))
20991 + && ! (targetm.cannot_copy_insn_p && INSN_P (insn)
20992 + && targetm.cannot_copy_insn_p (insn)))
20993 {
20994 /* An expression is not anticipatable if its operands are
20995 modified before this insn or if this is not the only SET in
20996 --- a/gcc/genautomata.c
20997 +++ b/gcc/genautomata.c
20998 @@ -1,5 +1,5 @@
20999 /* Pipeline hazard description translator.
21000 - Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
21001 + Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009
21002 Free Software Foundation, Inc.
21003
21004 Written by Vladimir Makarov <vmakarov@redhat.com>
21005 @@ -22,21 +22,25 @@
21006
21007 /* References:
21008
21009 - 1. Detecting pipeline structural hazards quickly. T. Proebsting,
21010 + 1. The finite state automaton based pipeline hazard recognizer and
21011 + instruction scheduler in GCC. V. Makarov. Proceedings of GCC
21012 + summit, 2003.
21013 +
21014 + 2. Detecting pipeline structural hazards quickly. T. Proebsting,
21015 C. Fraser. Proceedings of ACM SIGPLAN-SIGACT Symposium on
21016 Principles of Programming Languages, pages 280--286, 1994.
21017
21018 This article is a good start point to understand usage of finite
21019 state automata for pipeline hazard recognizers. But I'd
21020 - recommend the 2nd article for more deep understanding.
21021 + recommend the 1st and 3rd article for more deep understanding.
21022
21023 - 2. Efficient Instruction Scheduling Using Finite State Automata:
21024 + 3. Efficient Instruction Scheduling Using Finite State Automata:
21025 V. Bala and N. Rubin, Proceedings of MICRO-28. This is the best
21026 article about usage of finite state automata for pipeline hazard
21027 recognizers.
21028
21029 - The current implementation is different from the 2nd article in the
21030 - following:
21031 + The current implementation is described in the 1st article and it
21032 + is different from the 3rd article in the following:
21033
21034 1. New operator `|' (alternative) is permitted in functional unit
21035 reservation which can be treated deterministically and
21036 @@ -463,7 +467,10 @@
21037 insn. */
21038 int insn_num;
21039 /* The following field value is list of bypasses in which given insn
21040 - is output insn. */
21041 + is output insn. Bypasses with the same input insn stay one after
21042 + another in the list in the same order as their occurrences in the
21043 + description but the bypass without a guard stays always the last
21044 + in a row of bypasses with the same input insn. */
21045 struct bypass_decl *bypass_list;
21046
21047 /* The following fields are defined by automaton generator. */
21048 @@ -2367,18 +2374,67 @@
21049 }
21050
21051
21052 -/* The function searches for bypass with given IN_INSN_RESERV in given
21053 - BYPASS_LIST. */
21054 -static struct bypass_decl *
21055 -find_bypass (struct bypass_decl *bypass_list,
21056 - struct insn_reserv_decl *in_insn_reserv)
21057 -{
21058 - struct bypass_decl *bypass;
21059 -
21060 - for (bypass = bypass_list; bypass != NULL; bypass = bypass->next)
21061 - if (bypass->in_insn_reserv == in_insn_reserv)
21062 - break;
21063 - return bypass;
21064 +/* The function inserts BYPASS in the list of bypasses of the
21065 + corresponding output insn. The order of bypasses in the list is
21066 + decribed in a comment for member `bypass_list' (see above). If
21067 + there is already the same bypass in the list the function reports
21068 + this and does nothing. */
21069 +static void
21070 +insert_bypass (struct bypass_decl *bypass)
21071 +{
21072 + struct bypass_decl *curr, *last;
21073 + struct insn_reserv_decl *out_insn_reserv = bypass->out_insn_reserv;
21074 + struct insn_reserv_decl *in_insn_reserv = bypass->in_insn_reserv;
21075 +
21076 + for (curr = out_insn_reserv->bypass_list, last = NULL;
21077 + curr != NULL;
21078 + last = curr, curr = curr->next)
21079 + if (curr->in_insn_reserv == in_insn_reserv)
21080 + {
21081 + if ((bypass->bypass_guard_name != NULL
21082 + && curr->bypass_guard_name != NULL
21083 + && ! strcmp (bypass->bypass_guard_name, curr->bypass_guard_name))
21084 + || bypass->bypass_guard_name == curr->bypass_guard_name)
21085 + {
21086 + if (bypass->bypass_guard_name == NULL)
21087 + {
21088 + if (!w_flag)
21089 + error ("the same bypass `%s - %s' is already defined",
21090 + bypass->out_insn_name, bypass->in_insn_name);
21091 + else
21092 + warning (0, "the same bypass `%s - %s' is already defined",
21093 + bypass->out_insn_name, bypass->in_insn_name);
21094 + }
21095 + else if (!w_flag)
21096 + error ("the same bypass `%s - %s' (guard %s) is already defined",
21097 + bypass->out_insn_name, bypass->in_insn_name,
21098 + bypass->bypass_guard_name);
21099 + else
21100 + warning
21101 + (0, "the same bypass `%s - %s' (guard %s) is already defined",
21102 + bypass->out_insn_name, bypass->in_insn_name,
21103 + bypass->bypass_guard_name);
21104 + return;
21105 + }
21106 + if (curr->bypass_guard_name == NULL)
21107 + break;
21108 + if (curr->next == NULL || curr->next->in_insn_reserv != in_insn_reserv)
21109 + {
21110 + last = curr;
21111 + break;
21112 + }
21113 +
21114 + }
21115 + if (last == NULL)
21116 + {
21117 + bypass->next = out_insn_reserv->bypass_list;
21118 + out_insn_reserv->bypass_list = bypass;
21119 + }
21120 + else
21121 + {
21122 + bypass->next = last->next;
21123 + last->next = bypass;
21124 + }
21125 }
21126
21127 /* The function processes pipeline description declarations, checks
21128 @@ -2391,7 +2447,6 @@
21129 decl_t decl_in_table;
21130 decl_t out_insn_reserv;
21131 decl_t in_insn_reserv;
21132 - struct bypass_decl *bypass;
21133 int automaton_presence;
21134 int i;
21135
21136 @@ -2514,36 +2569,7 @@
21137 = DECL_INSN_RESERV (out_insn_reserv);
21138 DECL_BYPASS (decl)->in_insn_reserv
21139 = DECL_INSN_RESERV (in_insn_reserv);
21140 - bypass
21141 - = find_bypass (DECL_INSN_RESERV (out_insn_reserv)->bypass_list,
21142 - DECL_BYPASS (decl)->in_insn_reserv);
21143 - if (bypass != NULL)
21144 - {
21145 - if (DECL_BYPASS (decl)->latency == bypass->latency)
21146 - {
21147 - if (!w_flag)
21148 - error
21149 - ("the same bypass `%s - %s' is already defined",
21150 - DECL_BYPASS (decl)->out_insn_name,
21151 - DECL_BYPASS (decl)->in_insn_name);
21152 - else
21153 - warning
21154 - (0, "the same bypass `%s - %s' is already defined",
21155 - DECL_BYPASS (decl)->out_insn_name,
21156 - DECL_BYPASS (decl)->in_insn_name);
21157 - }
21158 - else
21159 - error ("bypass `%s - %s' is already defined",
21160 - DECL_BYPASS (decl)->out_insn_name,
21161 - DECL_BYPASS (decl)->in_insn_name);
21162 - }
21163 - else
21164 - {
21165 - DECL_BYPASS (decl)->next
21166 - = DECL_INSN_RESERV (out_insn_reserv)->bypass_list;
21167 - DECL_INSN_RESERV (out_insn_reserv)->bypass_list
21168 - = DECL_BYPASS (decl);
21169 - }
21170 + insert_bypass (DECL_BYPASS (decl));
21171 }
21172 }
21173 }
21174 @@ -8159,19 +8185,32 @@
21175 (advance_cycle_insn_decl)->insn_num));
21176 fprintf (output_file, " case %d:\n",
21177 bypass->in_insn_reserv->insn_num);
21178 - if (bypass->bypass_guard_name == NULL)
21179 - fprintf (output_file, " return %d;\n",
21180 - bypass->latency);
21181 - else
21182 + for (;;)
21183 {
21184 - fprintf (output_file,
21185 - " if (%s (%s, %s))\n",
21186 - bypass->bypass_guard_name, INSN_PARAMETER_NAME,
21187 - INSN2_PARAMETER_NAME);
21188 - fprintf (output_file,
21189 - " return %d;\n break;\n",
21190 - bypass->latency);
21191 + if (bypass->bypass_guard_name == NULL)
21192 + {
21193 + gcc_assert (bypass->next == NULL
21194 + || (bypass->in_insn_reserv
21195 + != bypass->next->in_insn_reserv));
21196 + fprintf (output_file, " return %d;\n",
21197 + bypass->latency);
21198 + }
21199 + else
21200 + {
21201 + fprintf (output_file,
21202 + " if (%s (%s, %s))\n",
21203 + bypass->bypass_guard_name, INSN_PARAMETER_NAME,
21204 + INSN2_PARAMETER_NAME);
21205 + fprintf (output_file, " return %d;\n",
21206 + bypass->latency);
21207 + }
21208 + if (bypass->next == NULL
21209 + || bypass->in_insn_reserv != bypass->next->in_insn_reserv)
21210 + break;
21211 + bypass = bypass->next;
21212 }
21213 + if (bypass->bypass_guard_name != NULL)
21214 + fprintf (output_file, " break;\n");
21215 }
21216 fputs (" }\n break;\n", output_file);
21217 }
21218 --- a/gcc/gengtype-lex.c
21219 +++ /dev/null
21220 @@ -1,2638 +0,0 @@
21221 -#line 2 "gengtype-lex.c"
21222 -
21223 -#line 4 "gengtype-lex.c"
21224 -
21225 -#define YY_INT_ALIGNED short int
21226 -
21227 -/* A lexical scanner generated by flex */
21228 -
21229 -#define FLEX_SCANNER
21230 -#define YY_FLEX_MAJOR_VERSION 2
21231 -#define YY_FLEX_MINOR_VERSION 5
21232 -#define YY_FLEX_SUBMINOR_VERSION 35
21233 -#if YY_FLEX_SUBMINOR_VERSION > 0
21234 -#define FLEX_BETA
21235 -#endif
21236 -
21237 -/* First, we deal with platform-specific or compiler-specific issues. */
21238 -
21239 -/* begin standard C headers. */
21240 -#include <stdio.h>
21241 -#include <string.h>
21242 -#include <errno.h>
21243 -#include <stdlib.h>
21244 -
21245 -/* end standard C headers. */
21246 -
21247 -/* flex integer type definitions */
21248 -
21249 -#ifndef FLEXINT_H
21250 -#define FLEXINT_H
21251 -
21252 -/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
21253 -
21254 -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
21255 -
21256 -/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
21257 - * if you want the limit (max/min) macros for int types.
21258 - */
21259 -#ifndef __STDC_LIMIT_MACROS
21260 -#define __STDC_LIMIT_MACROS 1
21261 -#endif
21262 -
21263 -#include <inttypes.h>
21264 -typedef int8_t flex_int8_t;
21265 -typedef uint8_t flex_uint8_t;
21266 -typedef int16_t flex_int16_t;
21267 -typedef uint16_t flex_uint16_t;
21268 -typedef int32_t flex_int32_t;
21269 -typedef uint32_t flex_uint32_t;
21270 -#else
21271 -typedef signed char flex_int8_t;
21272 -typedef short int flex_int16_t;
21273 -typedef int flex_int32_t;
21274 -typedef unsigned char flex_uint8_t;
21275 -typedef unsigned short int flex_uint16_t;
21276 -typedef unsigned int flex_uint32_t;
21277 -#endif /* ! C99 */
21278 -
21279 -/* Limits of integral types. */
21280 -#ifndef INT8_MIN
21281 -#define INT8_MIN (-128)
21282 -#endif
21283 -#ifndef INT16_MIN
21284 -#define INT16_MIN (-32767-1)
21285 -#endif
21286 -#ifndef INT32_MIN
21287 -#define INT32_MIN (-2147483647-1)
21288 -#endif
21289 -#ifndef INT8_MAX
21290 -#define INT8_MAX (127)
21291 -#endif
21292 -#ifndef INT16_MAX
21293 -#define INT16_MAX (32767)
21294 -#endif
21295 -#ifndef INT32_MAX
21296 -#define INT32_MAX (2147483647)
21297 -#endif
21298 -#ifndef UINT8_MAX
21299 -#define UINT8_MAX (255U)
21300 -#endif
21301 -#ifndef UINT16_MAX
21302 -#define UINT16_MAX (65535U)
21303 -#endif
21304 -#ifndef UINT32_MAX
21305 -#define UINT32_MAX (4294967295U)
21306 -#endif
21307 -
21308 -#endif /* ! FLEXINT_H */
21309 -
21310 -#ifdef __cplusplus
21311 -
21312 -/* The "const" storage-class-modifier is valid. */
21313 -#define YY_USE_CONST
21314 -
21315 -#else /* ! __cplusplus */
21316 -
21317 -/* C99 requires __STDC__ to be defined as 1. */
21318 -#if defined (__STDC__)
21319 -
21320 -#define YY_USE_CONST
21321 -
21322 -#endif /* defined (__STDC__) */
21323 -#endif /* ! __cplusplus */
21324 -
21325 -#ifdef YY_USE_CONST
21326 -#define yyconst const
21327 -#else
21328 -#define yyconst
21329 -#endif
21330 -
21331 -/* Returned upon end-of-file. */
21332 -#define YY_NULL 0
21333 -
21334 -/* Promotes a possibly negative, possibly signed char to an unsigned
21335 - * integer for use as an array index. If the signed char is negative,
21336 - * we want to instead treat it as an 8-bit unsigned char, hence the
21337 - * double cast.
21338 - */
21339 -#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
21340 -
21341 -/* Enter a start condition. This macro really ought to take a parameter,
21342 - * but we do it the disgusting crufty way forced on us by the ()-less
21343 - * definition of BEGIN.
21344 - */
21345 -#define BEGIN (yy_start) = 1 + 2 *
21346 -
21347 -/* Translate the current start state into a value that can be later handed
21348 - * to BEGIN to return to the state. The YYSTATE alias is for lex
21349 - * compatibility.
21350 - */
21351 -#define YY_START (((yy_start) - 1) / 2)
21352 -#define YYSTATE YY_START
21353 -
21354 -/* Action number for EOF rule of a given start state. */
21355 -#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
21356 -
21357 -/* Special action meaning "start processing a new file". */
21358 -#define YY_NEW_FILE yyrestart(yyin )
21359 -
21360 -#define YY_END_OF_BUFFER_CHAR 0
21361 -
21362 -/* Size of default input buffer. */
21363 -#ifndef YY_BUF_SIZE
21364 -#define YY_BUF_SIZE 16384
21365 -#endif
21366 -
21367 -/* The state buf must be large enough to hold one state per character in the main buffer.
21368 - */
21369 -#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
21370 -
21371 -#ifndef YY_TYPEDEF_YY_BUFFER_STATE
21372 -#define YY_TYPEDEF_YY_BUFFER_STATE
21373 -typedef struct yy_buffer_state *YY_BUFFER_STATE;
21374 -#endif
21375 -
21376 -extern int yyleng;
21377 -
21378 -extern FILE *yyin, *yyout;
21379 -
21380 -#define EOB_ACT_CONTINUE_SCAN 0
21381 -#define EOB_ACT_END_OF_FILE 1
21382 -#define EOB_ACT_LAST_MATCH 2
21383 -
21384 - #define YY_LESS_LINENO(n)
21385 -
21386 -/* Return all but the first "n" matched characters back to the input stream. */
21387 -#define yyless(n) \
21388 - do \
21389 - { \
21390 - /* Undo effects of setting up yytext. */ \
21391 - int yyless_macro_arg = (n); \
21392 - YY_LESS_LINENO(yyless_macro_arg);\
21393 - *yy_cp = (yy_hold_char); \
21394 - YY_RESTORE_YY_MORE_OFFSET \
21395 - (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
21396 - YY_DO_BEFORE_ACTION; /* set up yytext again */ \
21397 - } \
21398 - while ( 0 )
21399 -
21400 -#define unput(c) yyunput( c, (yytext_ptr) )
21401 -
21402 -#ifndef YY_TYPEDEF_YY_SIZE_T
21403 -#define YY_TYPEDEF_YY_SIZE_T
21404 -typedef size_t yy_size_t;
21405 -#endif
21406 -
21407 -#ifndef YY_STRUCT_YY_BUFFER_STATE
21408 -#define YY_STRUCT_YY_BUFFER_STATE
21409 -struct yy_buffer_state
21410 - {
21411 - FILE *yy_input_file;
21412 -
21413 - char *yy_ch_buf; /* input buffer */
21414 - char *yy_buf_pos; /* current position in input buffer */
21415 -
21416 - /* Size of input buffer in bytes, not including room for EOB
21417 - * characters.
21418 - */
21419 - yy_size_t yy_buf_size;
21420 -
21421 - /* Number of characters read into yy_ch_buf, not including EOB
21422 - * characters.
21423 - */
21424 - int yy_n_chars;
21425 -
21426 - /* Whether we "own" the buffer - i.e., we know we created it,
21427 - * and can realloc() it to grow it, and should free() it to
21428 - * delete it.
21429 - */
21430 - int yy_is_our_buffer;
21431 -
21432 - /* Whether this is an "interactive" input source; if so, and
21433 - * if we're using stdio for input, then we want to use getc()
21434 - * instead of fread(), to make sure we stop fetching input after
21435 - * each newline.
21436 - */
21437 - int yy_is_interactive;
21438 -
21439 - /* Whether we're considered to be at the beginning of a line.
21440 - * If so, '^' rules will be active on the next match, otherwise
21441 - * not.
21442 - */
21443 - int yy_at_bol;
21444 -
21445 - int yy_bs_lineno; /**< The line count. */
21446 - int yy_bs_column; /**< The column count. */
21447 -
21448 - /* Whether to try to fill the input buffer when we reach the
21449 - * end of it.
21450 - */
21451 - int yy_fill_buffer;
21452 -
21453 - int yy_buffer_status;
21454 -
21455 -#define YY_BUFFER_NEW 0
21456 -#define YY_BUFFER_NORMAL 1
21457 - /* When an EOF's been seen but there's still some text to process
21458 - * then we mark the buffer as YY_EOF_PENDING, to indicate that we
21459 - * shouldn't try reading from the input source any more. We might
21460 - * still have a bunch of tokens to match, though, because of
21461 - * possible backing-up.
21462 - *
21463 - * When we actually see the EOF, we change the status to "new"
21464 - * (via yyrestart()), so that the user can continue scanning by
21465 - * just pointing yyin at a new input file.
21466 - */
21467 -#define YY_BUFFER_EOF_PENDING 2
21468 -
21469 - };
21470 -#endif /* !YY_STRUCT_YY_BUFFER_STATE */
21471 -
21472 -/* Stack of input buffers. */
21473 -static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
21474 -static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
21475 -static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
21476 -
21477 -/* We provide macros for accessing buffer states in case in the
21478 - * future we want to put the buffer states in a more general
21479 - * "scanner state".
21480 - *
21481 - * Returns the top of the stack, or NULL.
21482 - */
21483 -#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
21484 - ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
21485 - : NULL)
21486 -
21487 -/* Same as previous macro, but useful when we know that the buffer stack is not
21488 - * NULL or when we need an lvalue. For internal use only.
21489 - */
21490 -#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
21491 -
21492 -/* yy_hold_char holds the character lost when yytext is formed. */
21493 -static char yy_hold_char;
21494 -static int yy_n_chars; /* number of characters read into yy_ch_buf */
21495 -int yyleng;
21496 -
21497 -/* Points to current character in buffer. */
21498 -static char *yy_c_buf_p = (char *) 0;
21499 -static int yy_init = 0; /* whether we need to initialize */
21500 -static int yy_start = 0; /* start state number */
21501 -
21502 -/* Flag which is used to allow yywrap()'s to do buffer switches
21503 - * instead of setting up a fresh yyin. A bit of a hack ...
21504 - */
21505 -static int yy_did_buffer_switch_on_eof;
21506 -
21507 -void yyrestart (FILE *input_file );
21508 -void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer );
21509 -YY_BUFFER_STATE yy_create_buffer (FILE *file,int size );
21510 -void yy_delete_buffer (YY_BUFFER_STATE b );
21511 -void yy_flush_buffer (YY_BUFFER_STATE b );
21512 -void yypush_buffer_state (YY_BUFFER_STATE new_buffer );
21513 -void yypop_buffer_state (void );
21514 -
21515 -static void yyensure_buffer_stack (void );
21516 -static void yy_load_buffer_state (void );
21517 -static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file );
21518 -
21519 -#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER )
21520 -
21521 -YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size );
21522 -YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str );
21523 -YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len );
21524 -
21525 -void *yyalloc (yy_size_t );
21526 -void *yyrealloc (void *,yy_size_t );
21527 -void yyfree (void * );
21528 -
21529 -#define yy_new_buffer yy_create_buffer
21530 -
21531 -#define yy_set_interactive(is_interactive) \
21532 - { \
21533 - if ( ! YY_CURRENT_BUFFER ){ \
21534 - yyensure_buffer_stack (); \
21535 - YY_CURRENT_BUFFER_LVALUE = \
21536 - yy_create_buffer(yyin,YY_BUF_SIZE ); \
21537 - } \
21538 - YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
21539 - }
21540 -
21541 -#define yy_set_bol(at_bol) \
21542 - { \
21543 - if ( ! YY_CURRENT_BUFFER ){\
21544 - yyensure_buffer_stack (); \
21545 - YY_CURRENT_BUFFER_LVALUE = \
21546 - yy_create_buffer(yyin,YY_BUF_SIZE ); \
21547 - } \
21548 - YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
21549 - }
21550 -
21551 -#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
21552 -
21553 -/* Begin user sect3 */
21554 -
21555 -#define yywrap(n) 1
21556 -#define YY_SKIP_YYWRAP
21557 -
21558 -typedef unsigned char YY_CHAR;
21559 -
21560 -FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
21561 -
21562 -typedef int yy_state_type;
21563 -
21564 -extern int yylineno;
21565 -
21566 -int yylineno = 1;
21567 -
21568 -extern char *yytext;
21569 -#define yytext_ptr yytext
21570 -
21571 -static yy_state_type yy_get_previous_state (void );
21572 -static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
21573 -static int yy_get_next_buffer (void );
21574 -static void yy_fatal_error (yyconst char msg[] );
21575 -
21576 -/* Done after the current pattern has been matched and before the
21577 - * corresponding action - sets up yytext.
21578 - */
21579 -#define YY_DO_BEFORE_ACTION \
21580 - (yytext_ptr) = yy_bp; \
21581 - yyleng = (size_t) (yy_cp - yy_bp); \
21582 - (yy_hold_char) = *yy_cp; \
21583 - *yy_cp = '\0'; \
21584 - (yy_c_buf_p) = yy_cp;
21585 -
21586 -#define YY_NUM_RULES 49
21587 -#define YY_END_OF_BUFFER 50
21588 -/* This struct is not used in this scanner,
21589 - but its presence is necessary. */
21590 -struct yy_trans_info
21591 - {
21592 - flex_int32_t yy_verify;
21593 - flex_int32_t yy_nxt;
21594 - };
21595 -static yyconst flex_int16_t yy_accept[445] =
21596 - { 0,
21597 - 0, 0, 0, 0, 0, 0, 0, 0, 50, 36,
21598 - 36, 33, 45, 36, 45, 34, 36, 36, 34, 34,
21599 - 34, 34, 34, 31, 10, 10, 31, 29, 31, 31,
21600 - 31, 20, 31, 31, 31, 31, 31, 31, 31, 31,
21601 - 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
21602 - 31, 10, 31, 41, 39, 46, 46, 0, 0, 0,
21603 - 37, 0, 0, 0, 38, 32, 34, 0, 0, 0,
21604 - 0, 0, 0, 0, 0, 0, 34, 34, 34, 34,
21605 - 34, 10, 0, 25, 0, 0, 0, 0, 9, 20,
21606 - 24, 0, 0, 0, 0, 0, 0, 0, 0, 26,
21607 -
21608 - 11, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21609 - 0, 0, 0, 0, 0, 10, 0, 0, 0, 0,
21610 - 42, 44, 43, 0, 35, 0, 0, 0, 0, 0,
21611 - 0, 34, 34, 34, 34, 34, 34, 27, 28, 0,
21612 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21613 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21614 - 0, 0, 0, 30, 0, 0, 0, 0, 0, 0,
21615 - 0, 0, 0, 0, 34, 34, 34, 34, 34, 34,
21616 - 0, 0, 0, 13, 0, 14, 0, 0, 0, 0,
21617 - 22, 22, 0, 0, 0, 0, 0, 0, 0, 0,
21618 -
21619 - 0, 0, 0, 48, 0, 0, 0, 0, 0, 0,
21620 - 0, 34, 34, 34, 34, 34, 34, 0, 0, 0,
21621 - 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
21622 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21623 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 34,
21624 - 34, 34, 34, 34, 3, 0, 0, 0, 0, 12,
21625 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21626 - 0, 0, 0, 0, 0, 0, 15, 0, 0, 0,
21627 - 0, 0, 0, 0, 34, 4, 5, 2, 34, 0,
21628 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21629 -
21630 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 16,
21631 - 0, 0, 0, 0, 34, 1, 0, 0, 0, 0,
21632 - 0, 0, 0, 0, 0, 22, 22, 0, 0, 0,
21633 - 0, 0, 0, 0, 0, 0, 0, 34, 34, 34,
21634 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21635 - 21, 0, 0, 0, 0, 0, 0, 34, 7, 6,
21636 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 18,
21637 - 0, 0, 0, 34, 0, 0, 0, 0, 0, 0,
21638 - 0, 0, 19, 0, 0, 47, 34, 0, 0, 0,
21639 - 0, 0, 0, 0, 0, 0, 0, 34, 0, 0,
21640 -
21641 - 0, 0, 0, 0, 0, 0, 34, 0, 24, 24,
21642 - 0, 0, 0, 0, 0, 0, 0, 34, 0, 0,
21643 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
21644 - 0, 23, 0, 0, 0, 0, 0, 40, 0, 0,
21645 - 0, 0, 0, 0
21646 - } ;
21647 -
21648 -static yyconst flex_int32_t yy_ec[256] =
21649 - { 0,
21650 - 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
21651 - 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
21652 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21653 - 1, 2, 1, 4, 5, 1, 6, 1, 7, 8,
21654 - 9, 10, 1, 6, 6, 11, 12, 13, 13, 13,
21655 - 13, 13, 13, 13, 13, 13, 13, 6, 6, 6,
21656 - 6, 6, 1, 1, 14, 15, 16, 17, 18, 19,
21657 - 20, 21, 22, 23, 23, 24, 25, 26, 27, 28,
21658 - 23, 29, 30, 31, 32, 33, 34, 23, 35, 23,
21659 - 36, 37, 38, 1, 39, 1, 40, 41, 42, 43,
21660 -
21661 - 44, 45, 46, 47, 48, 49, 49, 50, 51, 52,
21662 - 53, 54, 49, 55, 56, 57, 58, 59, 49, 60,
21663 - 61, 62, 6, 6, 6, 1, 1, 1, 1, 1,
21664 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21665 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21666 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21667 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21668 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21669 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21670 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21671 -
21672 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21673 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21674 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21675 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21676 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21677 - 1, 1, 1, 1, 1
21678 - } ;
21679 -
21680 -static yyconst flex_int32_t yy_meta[63] =
21681 - { 0,
21682 - 1, 2, 3, 1, 1, 1, 1, 1, 4, 5,
21683 - 1, 1, 6, 7, 7, 7, 7, 7, 7, 7,
21684 - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21685 - 7, 7, 7, 7, 7, 8, 1, 1, 9, 9,
21686 - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
21687 - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
21688 - 9, 9
21689 - } ;
21690 -
21691 -static yyconst flex_int16_t yy_base[483] =
21692 - { 0,
21693 - 0, 38, 96, 12, 12, 13, 15, 16, 1028, 1444,
21694 - 32, 51, 20, 990, 1016, 0, 157, 18, 1007, 964,
21695 - 966, 961, 969, 1444, 25, 27, 27, 1444, 983, 1008,
21696 - 1008, 1004, 215, 253, 5, 32, 29, 974, 45, 962,
21697 - 996, 35, 38, 39, 40, 41, 134, 42, 136, 137,
21698 - 138, 75, 996, 0, 1444, 985, 984, 166, 964, 162,
21699 - 1444, 0, 987, 990, 1444, 1444, 0, 186, 165, 974,
21700 - 931, 933, 928, 936, 168, 943, 967, 928, 140, 930,
21701 - 935, 87, 167, 1444, 979, 974, 977, 968, 1444, 950,
21702 - 1444, 935, 934, 145, 52, 46, 148, 165, 922, 1444,
21703 -
21704 - 1444, 152, 156, 155, 170, 173, 175, 182, 183, 185,
21705 - 211, 214, 222, 218, 221, 269, 957, 956, 291, 0,
21706 - 1444, 1444, 1444, 922, 1444, 937, 898, 195, 900, 905,
21707 - 907, 912, 906, 892, 890, 903, 893, 1444, 1444, 209,
21708 - 254, 251, 353, 248, 391, 354, 350, 351, 340, 355,
21709 - 341, 429, 339, 356, 344, 347, 360, 390, 43, 361,
21710 - 391, 395, 429, 1444, 0, 0, 280, 906, 900, 886,
21711 - 884, 897, 872, 876, 890, 867, 873, 878, 876, 866,
21712 - 381, 348, 382, 1444, 384, 1444, 389, 397, 491, 398,
21713 - 1444, 528, 418, 399, 420, 477, 478, 422, 421, 480,
21714 -
21715 - 479, 0, 449, 1444, 884, 861, 867, 872, 870, 860,
21716 - 859, 892, 857, 866, 850, 862, 586, 493, 496, 494,
21717 - 484, 624, 1444, 0, 878, 876, 876, 834, 839, 841,
21718 - 832, 830, 199, 830, 490, 499, 486, 492, 488, 489,
21719 - 662, 0, 863, 828, 837, 821, 833, 0, 832, 859,
21720 - 700, 738, 776, 829, 1444, 431, 258, 437, 515, 1444,
21721 - 846, 844, 841, 817, 829, 809, 319, 815, 813, 478,
21722 - 809, 512, 528, 520, 525, 814, 1444, 0, 833, 0,
21723 - 0, 0, 803, 551, 808, 1444, 1444, 1444, 852, 383,
21724 - 521, 530, 539, 822, 829, 813, 793, 787, 802, 801,
21725 -
21726 - 556, 793, 783, 785, 792, 787, 523, 545, 535, 1444,
21727 - 0, 795, 0, 561, 585, 1444, 555, 343, 581, 584,
21728 - 794, 811, 792, 773, 772, 1444, 0, 771, 783, 772,
21729 - 764, 552, 890, 558, 0, 623, 778, 784, 928, 966,
21730 - 583, 593, 594, 613, 792, 792, 771, 761, 746, 591,
21731 - 1444, 1004, 0, 778, 0, 0, 766, 776, 1444, 1444,
21732 - 620, 621, 626, 627, 653, 777, 769, 775, 1042, 1444,
21733 - 0, 772, 787, 767, 556, 577, 615, 649, 629, 762,
21734 - 753, 774, 1444, 0, 763, 1444, 773, 632, 659, 662,
21735 - 656, 654, 754, 742, 753, 0, 754, 729, 665, 688,
21736 -
21737 - 667, 744, 742, 683, 0, 695, 692, 689, 715, 722,
21738 - 699, 711, 701, 666, 673, 0, 705, 1080, 704, 749,
21739 - 751, 753, 756, 663, 658, 618, 593, 0, 0, 1444,
21740 - 758, 1444, 760, 600, 588, 543, 483, 1444, 439, 386,
21741 - 247, 206, 167, 1444, 1118, 1127, 1136, 1145, 1154, 1158,
21742 - 1167, 1176, 1185, 1194, 1202, 1211, 1220, 1229, 1238, 1247,
21743 - 1256, 1265, 1273, 1282, 1290, 1298, 1306, 1314, 1323, 1331,
21744 - 1340, 1349, 1357, 1365, 1374, 1383, 1392, 1400, 1409, 1417,
21745 - 1426, 1435
21746 - } ;
21747 -
21748 -static yyconst flex_int16_t yy_def[483] =
21749 - { 0,
21750 - 445, 445, 444, 3, 446, 446, 446, 446, 444, 444,
21751 - 444, 444, 447, 448, 449, 450, 444, 444, 450, 450,
21752 - 450, 450, 450, 444, 444, 444, 451, 444, 452, 444,
21753 - 444, 444, 453, 453, 34, 34, 34, 34, 34, 454,
21754 - 444, 34, 34, 34, 34, 34, 34, 34, 34, 34,
21755 - 34, 444, 455, 456, 444, 457, 457, 444, 444, 447,
21756 - 444, 447, 444, 448, 444, 444, 450, 444, 444, 444,
21757 - 444, 444, 444, 444, 444, 444, 450, 450, 450, 450,
21758 - 450, 444, 451, 444, 451, 444, 452, 444, 444, 444,
21759 - 444, 34, 34, 34, 34, 34, 34, 34, 454, 444,
21760 -
21761 - 444, 34, 34, 34, 34, 34, 34, 34, 34, 34,
21762 - 34, 34, 34, 34, 34, 444, 455, 455, 444, 458,
21763 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21764 - 444, 450, 450, 450, 450, 450, 450, 444, 444, 34,
21765 - 34, 34, 453, 34, 453, 34, 34, 34, 34, 34,
21766 - 34, 453, 34, 34, 34, 34, 34, 34, 34, 34,
21767 - 34, 34, 119, 444, 119, 459, 444, 444, 444, 444,
21768 - 444, 444, 444, 444, 450, 450, 450, 450, 450, 450,
21769 - 34, 34, 34, 444, 34, 444, 34, 34, 453, 34,
21770 - 444, 444, 34, 34, 34, 34, 34, 34, 34, 34,
21771 -
21772 - 34, 460, 444, 444, 444, 444, 444, 444, 444, 444,
21773 - 444, 450, 450, 450, 450, 450, 450, 34, 34, 34,
21774 - 34, 453, 444, 192, 444, 444, 444, 444, 444, 444,
21775 - 444, 444, 444, 444, 34, 34, 34, 34, 34, 34,
21776 - 453, 461, 444, 444, 444, 444, 444, 462, 444, 450,
21777 - 450, 450, 450, 450, 444, 34, 34, 34, 34, 444,
21778 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21779 - 444, 34, 34, 34, 34, 453, 444, 463, 444, 464,
21780 - 465, 466, 444, 444, 450, 444, 444, 444, 450, 34,
21781 - 34, 34, 34, 444, 444, 444, 444, 444, 444, 444,
21782 -
21783 - 467, 444, 444, 444, 444, 444, 34, 34, 34, 444,
21784 - 468, 444, 469, 444, 450, 444, 34, 34, 34, 34,
21785 - 444, 444, 444, 444, 444, 444, 192, 444, 444, 444,
21786 - 444, 34, 453, 34, 470, 444, 444, 450, 450, 450,
21787 - 34, 34, 34, 34, 444, 444, 444, 444, 444, 34,
21788 - 444, 453, 471, 444, 472, 473, 444, 450, 444, 444,
21789 - 34, 34, 34, 34, 34, 444, 444, 444, 453, 444,
21790 - 474, 444, 444, 450, 34, 34, 34, 34, 34, 444,
21791 - 444, 444, 444, 475, 444, 444, 450, 34, 34, 34,
21792 - 34, 34, 444, 444, 444, 476, 444, 450, 34, 34,
21793 -
21794 - 34, 444, 444, 444, 477, 444, 450, 34, 444, 478,
21795 - 34, 444, 444, 444, 444, 479, 444, 450, 34, 444,
21796 - 478, 478, 480, 444, 444, 444, 444, 481, 482, 444,
21797 - 444, 444, 480, 444, 444, 444, 444, 444, 444, 444,
21798 - 444, 444, 444, 0, 444, 444, 444, 444, 444, 444,
21799 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21800 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21801 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21802 - 444, 444
21803 - } ;
21804 -
21805 -static yyconst flex_int16_t yy_nxt[1507] =
21806 - { 0,
21807 - 10, 11, 12, 13, 10, 10, 14, 10, 10, 10,
21808 - 10, 15, 10, 52, 55, 55, 53, 55, 55, 75,
21809 - 444, 56, 56, 61, 57, 57, 82, 82, 82, 82,
21810 - 84, 92, 94, 58, 58, 10, 10, 10, 10, 17,
21811 - 12, 13, 18, 10, 14, 10, 10, 10, 10, 15,
21812 - 10, 59, 58, 58, 19, 92, 62, 95, 92, 96,
21813 - 76, 92, 98, 85, 92, 92, 92, 92, 92, 92,
21814 - 59, 92, 92, 10, 10, 10, 116, 82, 92, 117,
21815 - 143, 20, 105, 142, 103, 109, 198, 102, 82, 82,
21816 - 104, 106, 107, 21, 22, 23, 24, 25, 26, 27,
21817 -
21818 - 24, 28, 29, 28, 28, 28, 30, 31, 32, 33,
21819 - 34, 35, 33, 36, 33, 37, 38, 33, 33, 33,
21820 - 33, 33, 33, 33, 33, 33, 33, 33, 39, 33,
21821 - 33, 40, 41, 24, 33, 33, 42, 43, 44, 45,
21822 - 33, 33, 33, 46, 33, 47, 33, 48, 33, 49,
21823 - 33, 50, 33, 51, 33, 33, 33, 33, 68, 58,
21824 - 92, 69, 92, 92, 92, 61, 75, 58, 58, 75,
21825 - 84, 92, 141, 70, 92, 110, 59, 144, 92, 134,
21826 - 145, 92, 92, 112, 113, 59, 108, 68, 58, 115,
21827 - 69, 92, 111, 114, 135, 147, 92, 301, 62, 92,
21828 -
21829 - 71, 92, 70, 85, 146, 59, 148, 76, 92, 92,
21830 - 76, 92, 72, 73, 74, 91, 91, 91, 91, 91,
21831 - 91, 91, 91, 91, 91, 91, 91, 151, 149, 71,
21832 - 150, 152, 181, 153, 170, 92, 301, 92, 154, 155,
21833 - 92, 72, 73, 74, 92, 269, 270, 92, 92, 171,
21834 - 91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
21835 - 91, 91, 91, 91, 91, 156, 157, 158, 161, 182,
21836 - 116, 82, 160, 117, 92, 183, 162, 92, 185, 93,
21837 - 92, 203, 203, 159, 92, 443, 291, 204, 91, 91,
21838 - 91, 163, 163, 164, 163, 163, 163, 163, 163, 163,
21839 -
21840 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
21841 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
21842 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 165,
21843 - 165, 165, 165, 165, 165, 165, 165, 165, 165, 165,
21844 - 165, 165, 165, 165, 165, 165, 165, 165, 165, 165,
21845 - 165, 165, 165, 184, 184, 184, 184, 184, 184, 184,
21846 - 184, 184, 184, 184, 184, 92, 92, 92, 219, 92,
21847 - 92, 300, 342, 92, 92, 301, 92, 92, 188, 190,
21848 - 92, 92, 92, 194, 152, 195, 92, 92, 184, 184,
21849 - 184, 186, 186, 186, 186, 186, 186, 186, 186, 186,
21850 -
21851 - 186, 186, 186, 152, 152, 189, 187, 92, 92, 92,
21852 - 92, 442, 193, 317, 196, 92, 92, 92, 199, 218,
21853 - 220, 92, 221, 92, 92, 92, 186, 186, 186, 191,
21854 - 192, 192, 191, 191, 191, 191, 191, 191, 191, 191,
21855 - 191, 197, 201, 200, 92, 222, 92, 92, 92, 236,
21856 - 203, 203, 290, 152, 152, 441, 204, 92, 292, 237,
21857 - 239, 235, 240, 92, 191, 191, 191, 163, 163, 163,
21858 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
21859 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
21860 - 163, 223, 223, 223, 223, 223, 223, 223, 223, 223,
21861 -
21862 - 223, 223, 223, 92, 92, 92, 92, 256, 258, 257,
21863 - 92, 273, 92, 301, 92, 92, 92, 259, 92, 92,
21864 - 92, 238, 92, 304, 158, 92, 223, 223, 223, 224,
21865 - 224, 241, 272, 152, 152, 275, 293, 274, 92, 305,
21866 - 273, 92, 225, 226, 152, 276, 92, 92, 227, 92,
21867 - 307, 92, 314, 314, 92, 320, 92, 327, 327, 318,
21868 - 319, 92, 314, 314, 440, 92, 274, 308, 228, 229,
21869 - 230, 92, 309, 341, 334, 231, 332, 232, 92, 388,
21870 - 337, 92, 92, 233, 92, 234, 255, 255, 255, 255,
21871 - 255, 255, 255, 255, 255, 255, 255, 255, 338, 343,
21872 -
21873 - 333, 344, 389, 92, 361, 439, 339, 92, 350, 92,
21874 - 92, 340, 340, 352, 362, 363, 301, 92, 437, 92,
21875 - 92, 255, 255, 255, 260, 260, 260, 260, 260, 260,
21876 - 260, 260, 260, 260, 260, 260, 354, 375, 390, 92,
21877 - 376, 92, 364, 377, 355, 369, 92, 92, 152, 356,
21878 - 356, 365, 92, 92, 392, 92, 436, 378, 92, 260,
21879 - 260, 260, 277, 277, 277, 277, 277, 277, 277, 277,
21880 - 277, 277, 277, 277, 379, 92, 399, 401, 400, 92,
21881 - 92, 408, 92, 435, 152, 92, 434, 391, 92, 409,
21882 - 409, 92, 411, 92, 427, 410, 426, 277, 277, 277,
21883 -
21884 - 286, 286, 286, 286, 286, 286, 286, 286, 286, 286,
21885 - 286, 286, 414, 418, 92, 92, 420, 420, 418, 418,
21886 - 425, 415, 421, 422, 422, 92, 429, 419, 424, 152,
21887 - 92, 429, 429, 417, 152, 286, 286, 286, 287, 287,
21888 - 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
21889 - 420, 420, 422, 422, 422, 422, 421, 431, 431, 431,
21890 - 431, 431, 431, 413, 432, 412, 432, 407, 432, 406,
21891 - 404, 403, 402, 287, 287, 287, 288, 288, 288, 288,
21892 - 288, 288, 288, 288, 288, 288, 288, 288, 398, 397,
21893 - 395, 394, 393, 387, 386, 385, 382, 381, 380, 374,
21894 -
21895 - 373, 372, 301, 301, 368, 367, 366, 358, 357, 304,
21896 - 349, 288, 288, 288, 310, 310, 310, 310, 310, 310,
21897 - 310, 310, 310, 310, 310, 310, 348, 301, 301, 301,
21898 - 347, 346, 345, 336, 331, 330, 329, 328, 301, 325,
21899 - 324, 301, 301, 323, 322, 321, 315, 313, 312, 310,
21900 - 310, 310, 316, 316, 316, 316, 316, 316, 316, 316,
21901 - 316, 316, 316, 316, 306, 303, 302, 299, 298, 297,
21902 - 296, 295, 294, 289, 285, 284, 283, 282, 281, 280,
21903 - 279, 271, 268, 267, 266, 265, 264, 316, 316, 316,
21904 - 351, 351, 351, 351, 351, 351, 351, 351, 351, 351,
21905 -
21906 - 351, 351, 263, 262, 261, 254, 253, 252, 251, 250,
21907 - 249, 248, 247, 246, 245, 244, 243, 217, 216, 215,
21908 - 214, 213, 212, 211, 210, 351, 351, 351, 359, 359,
21909 - 359, 359, 359, 359, 359, 359, 359, 359, 359, 359,
21910 - 209, 208, 207, 206, 205, 180, 179, 178, 177, 176,
21911 - 175, 174, 173, 172, 169, 168, 167, 118, 118, 100,
21912 - 140, 92, 90, 359, 359, 359, 360, 360, 360, 360,
21913 - 360, 360, 360, 360, 360, 360, 360, 360, 139, 444,
21914 - 138, 444, 137, 136, 133, 132, 131, 130, 129, 128,
21915 - 127, 126, 444, 125, 124, 123, 122, 118, 101, 100,
21916 -
21917 - 97, 360, 360, 360, 370, 370, 370, 370, 370, 370,
21918 - 370, 370, 370, 370, 370, 370, 90, 89, 88, 87,
21919 - 81, 80, 79, 78, 77, 66, 64, 444, 444, 444,
21920 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 370,
21921 - 370, 370, 383, 383, 383, 383, 383, 383, 383, 383,
21922 - 383, 383, 383, 383, 444, 444, 444, 444, 444, 444,
21923 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21924 - 444, 444, 444, 444, 444, 444, 444, 383, 383, 383,
21925 - 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
21926 - 430, 430, 444, 444, 444, 444, 444, 444, 444, 444,
21927 -
21928 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21929 - 444, 444, 444, 444, 444, 430, 430, 430, 16, 16,
21930 - 16, 16, 16, 16, 16, 16, 16, 54, 54, 54,
21931 - 54, 54, 54, 54, 54, 54, 60, 60, 60, 60,
21932 - 60, 60, 60, 60, 60, 63, 63, 63, 63, 63,
21933 - 63, 63, 63, 63, 65, 65, 65, 65, 65, 65,
21934 - 65, 65, 65, 67, 67, 444, 67, 83, 83, 83,
21935 - 83, 83, 83, 83, 83, 83, 86, 86, 86, 86,
21936 - 86, 86, 86, 86, 86, 92, 92, 92, 92, 92,
21937 - 92, 92, 92, 92, 99, 99, 99, 99, 99, 99,
21938 -
21939 - 99, 444, 99, 119, 444, 444, 444, 444, 444, 444,
21940 - 119, 120, 120, 444, 120, 444, 120, 120, 120, 120,
21941 - 121, 121, 121, 121, 121, 121, 121, 121, 121, 166,
21942 - 166, 444, 166, 444, 166, 166, 166, 166, 202, 202,
21943 - 444, 202, 444, 202, 202, 202, 202, 242, 242, 444,
21944 - 242, 444, 242, 242, 242, 242, 278, 278, 444, 278,
21945 - 444, 278, 278, 278, 278, 255, 255, 255, 255, 255,
21946 - 444, 444, 255, 311, 311, 444, 311, 444, 311, 311,
21947 - 311, 311, 286, 286, 286, 286, 286, 444, 444, 286,
21948 - 287, 287, 287, 287, 287, 444, 444, 287, 288, 288,
21949 -
21950 - 288, 288, 288, 444, 444, 288, 326, 326, 326, 326,
21951 - 326, 444, 444, 326, 335, 335, 444, 335, 444, 335,
21952 - 335, 335, 335, 316, 316, 316, 316, 316, 444, 444,
21953 - 316, 353, 353, 444, 353, 444, 353, 353, 353, 353,
21954 - 371, 371, 444, 371, 444, 371, 371, 371, 371, 359,
21955 - 359, 359, 359, 359, 444, 444, 359, 360, 360, 360,
21956 - 360, 360, 444, 444, 360, 384, 384, 444, 384, 444,
21957 - 384, 384, 384, 384, 396, 396, 444, 396, 444, 396,
21958 - 396, 396, 396, 405, 405, 444, 405, 444, 405, 405,
21959 - 405, 405, 416, 416, 444, 416, 444, 416, 416, 416,
21960 -
21961 - 416, 423, 423, 444, 444, 444, 423, 444, 423, 428,
21962 - 428, 444, 428, 444, 428, 428, 428, 428, 433, 433,
21963 - 433, 444, 433, 433, 444, 433, 438, 438, 444, 438,
21964 - 444, 438, 438, 438, 438, 430, 430, 430, 430, 430,
21965 - 444, 444, 430, 9, 444, 444, 444, 444, 444, 444,
21966 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21967 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21968 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21969 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21970 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
21971 -
21972 - 444, 444, 444, 444, 444, 444
21973 - } ;
21974 -
21975 -static yyconst flex_int16_t yy_chk[1507] =
21976 - { 0,
21977 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
21978 - 1, 1, 1, 4, 5, 6, 4, 7, 8, 18,
21979 - 0, 5, 6, 13, 7, 8, 25, 25, 26, 26,
21980 - 27, 35, 35, 11, 11, 1, 1, 1, 2, 2,
21981 - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
21982 - 2, 11, 12, 12, 2, 37, 13, 36, 36, 37,
21983 - 18, 42, 39, 27, 43, 44, 45, 46, 48, 159,
21984 - 12, 39, 96, 2, 2, 2, 52, 52, 95, 52,
21985 - 96, 2, 44, 95, 43, 48, 159, 42, 82, 82,
21986 - 43, 45, 46, 2, 2, 2, 3, 3, 3, 3,
21987 -
21988 - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
21989 - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
21990 - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
21991 - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
21992 - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
21993 - 3, 3, 3, 3, 3, 3, 3, 3, 17, 17,
21994 - 47, 17, 49, 50, 51, 60, 69, 58, 58, 75,
21995 - 83, 94, 94, 17, 97, 49, 17, 97, 102, 79,
21996 - 98, 104, 103, 50, 50, 58, 47, 68, 68, 51,
21997 - 68, 98, 49, 50, 79, 103, 105, 443, 60, 106,
21998 -
21999 - 17, 107, 68, 83, 102, 68, 104, 69, 108, 109,
22000 - 75, 110, 17, 17, 17, 33, 33, 33, 33, 33,
22001 - 33, 33, 33, 33, 33, 33, 33, 107, 105, 68,
22002 - 106, 107, 140, 108, 128, 140, 442, 111, 109, 110,
22003 - 112, 68, 68, 68, 114, 233, 233, 115, 113, 128,
22004 - 33, 33, 33, 34, 34, 34, 34, 34, 34, 34,
22005 - 34, 34, 34, 34, 34, 111, 112, 113, 115, 141,
22006 - 116, 116, 114, 116, 144, 142, 115, 142, 144, 34,
22007 - 141, 167, 167, 113, 257, 441, 257, 167, 34, 34,
22008 - 34, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22009 -
22010 - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22011 - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22012 - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22013 - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22014 - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
22015 - 119, 119, 119, 143, 143, 143, 143, 143, 143, 143,
22016 - 143, 143, 143, 143, 143, 153, 149, 151, 182, 318,
22017 - 155, 267, 318, 156, 182, 267, 147, 148, 149, 151,
22018 - 146, 150, 154, 155, 153, 156, 157, 160, 143, 143,
22019 - 143, 145, 145, 145, 145, 145, 145, 145, 145, 145,
22020 -
22021 - 145, 145, 145, 146, 147, 150, 148, 181, 183, 290,
22022 - 185, 440, 154, 290, 157, 187, 158, 161, 160, 181,
22023 - 183, 162, 185, 188, 190, 194, 145, 145, 145, 152,
22024 - 152, 152, 152, 152, 152, 152, 152, 152, 152, 152,
22025 - 152, 158, 162, 161, 193, 187, 195, 199, 198, 194,
22026 - 203, 203, 256, 188, 190, 439, 203, 256, 258, 195,
22027 - 198, 193, 199, 258, 152, 152, 152, 163, 163, 163,
22028 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
22029 - 163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
22030 - 163, 189, 189, 189, 189, 189, 189, 189, 189, 189,
22031 -
22032 - 189, 189, 189, 196, 197, 201, 200, 218, 220, 219,
22033 - 221, 236, 237, 437, 239, 240, 235, 221, 238, 218,
22034 - 220, 197, 219, 270, 201, 236, 189, 189, 189, 192,
22035 - 192, 200, 235, 196, 238, 237, 259, 236, 272, 270,
22036 - 273, 259, 192, 192, 239, 240, 274, 291, 192, 307,
22037 - 272, 275, 284, 284, 273, 293, 292, 301, 301, 291,
22038 - 292, 309, 314, 314, 436, 293, 273, 274, 192, 192,
22039 - 192, 308, 275, 317, 309, 192, 307, 192, 332, 375,
22040 - 314, 317, 375, 192, 334, 192, 217, 217, 217, 217,
22041 - 217, 217, 217, 217, 217, 217, 217, 217, 315, 319,
22042 -
22043 - 308, 320, 376, 376, 341, 435, 315, 319, 332, 341,
22044 - 320, 315, 315, 334, 342, 343, 434, 350, 427, 342,
22045 - 343, 217, 217, 217, 222, 222, 222, 222, 222, 222,
22046 - 222, 222, 222, 222, 222, 222, 336, 361, 377, 344,
22047 - 362, 377, 344, 363, 336, 350, 361, 362, 388, 336,
22048 - 336, 344, 363, 364, 379, 379, 426, 364, 388, 222,
22049 - 222, 222, 241, 241, 241, 241, 241, 241, 241, 241,
22050 - 241, 241, 241, 241, 365, 378, 389, 391, 390, 365,
22051 - 392, 399, 391, 425, 392, 389, 424, 378, 390, 400,
22052 - 400, 399, 401, 401, 415, 400, 414, 241, 241, 241,
22053 -
22054 - 251, 251, 251, 251, 251, 251, 251, 251, 251, 251,
22055 - 251, 251, 404, 407, 400, 408, 409, 409, 407, 407,
22056 - 413, 404, 409, 410, 410, 411, 417, 408, 412, 411,
22057 - 419, 417, 417, 406, 419, 251, 251, 251, 252, 252,
22058 - 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
22059 - 420, 420, 421, 421, 422, 422, 420, 423, 423, 431,
22060 - 431, 433, 433, 403, 423, 402, 431, 398, 433, 397,
22061 - 395, 394, 393, 252, 252, 252, 253, 253, 253, 253,
22062 - 253, 253, 253, 253, 253, 253, 253, 253, 387, 385,
22063 - 382, 381, 380, 374, 373, 372, 368, 367, 366, 358,
22064 -
22065 - 357, 354, 349, 348, 347, 346, 345, 338, 337, 331,
22066 - 330, 253, 253, 253, 276, 276, 276, 276, 276, 276,
22067 - 276, 276, 276, 276, 276, 276, 329, 328, 325, 324,
22068 - 323, 322, 321, 312, 306, 305, 304, 303, 302, 300,
22069 - 299, 298, 297, 296, 295, 294, 285, 283, 279, 276,
22070 - 276, 276, 289, 289, 289, 289, 289, 289, 289, 289,
22071 - 289, 289, 289, 289, 271, 269, 268, 266, 265, 264,
22072 - 263, 262, 261, 254, 250, 249, 247, 246, 245, 244,
22073 - 243, 234, 232, 231, 230, 229, 228, 289, 289, 289,
22074 - 333, 333, 333, 333, 333, 333, 333, 333, 333, 333,
22075 -
22076 - 333, 333, 227, 226, 225, 216, 215, 214, 213, 212,
22077 - 211, 210, 209, 208, 207, 206, 205, 180, 179, 178,
22078 - 177, 176, 175, 174, 173, 333, 333, 333, 339, 339,
22079 - 339, 339, 339, 339, 339, 339, 339, 339, 339, 339,
22080 - 172, 171, 170, 169, 168, 137, 136, 135, 134, 133,
22081 - 132, 131, 130, 129, 127, 126, 124, 118, 117, 99,
22082 - 93, 92, 90, 339, 339, 339, 340, 340, 340, 340,
22083 - 340, 340, 340, 340, 340, 340, 340, 340, 88, 87,
22084 - 86, 85, 81, 80, 78, 77, 76, 74, 73, 72,
22085 - 71, 70, 64, 63, 59, 57, 56, 53, 41, 40,
22086 -
22087 - 38, 340, 340, 340, 352, 352, 352, 352, 352, 352,
22088 - 352, 352, 352, 352, 352, 352, 32, 31, 30, 29,
22089 - 23, 22, 21, 20, 19, 15, 14, 9, 0, 0,
22090 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 352,
22091 - 352, 352, 369, 369, 369, 369, 369, 369, 369, 369,
22092 - 369, 369, 369, 369, 0, 0, 0, 0, 0, 0,
22093 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
22094 - 0, 0, 0, 0, 0, 0, 0, 369, 369, 369,
22095 - 418, 418, 418, 418, 418, 418, 418, 418, 418, 418,
22096 - 418, 418, 0, 0, 0, 0, 0, 0, 0, 0,
22097 -
22098 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
22099 - 0, 0, 0, 0, 0, 418, 418, 418, 445, 445,
22100 - 445, 445, 445, 445, 445, 445, 445, 446, 446, 446,
22101 - 446, 446, 446, 446, 446, 446, 447, 447, 447, 447,
22102 - 447, 447, 447, 447, 447, 448, 448, 448, 448, 448,
22103 - 448, 448, 448, 448, 449, 449, 449, 449, 449, 449,
22104 - 449, 449, 449, 450, 450, 0, 450, 451, 451, 451,
22105 - 451, 451, 451, 451, 451, 451, 452, 452, 452, 452,
22106 - 452, 452, 452, 452, 452, 453, 453, 453, 453, 453,
22107 - 453, 453, 453, 453, 454, 454, 454, 454, 454, 454,
22108 -
22109 - 454, 0, 454, 455, 0, 0, 0, 0, 0, 0,
22110 - 455, 456, 456, 0, 456, 0, 456, 456, 456, 456,
22111 - 457, 457, 457, 457, 457, 457, 457, 457, 457, 458,
22112 - 458, 0, 458, 0, 458, 458, 458, 458, 459, 459,
22113 - 0, 459, 0, 459, 459, 459, 459, 460, 460, 0,
22114 - 460, 0, 460, 460, 460, 460, 461, 461, 0, 461,
22115 - 0, 461, 461, 461, 461, 462, 462, 462, 462, 462,
22116 - 0, 0, 462, 463, 463, 0, 463, 0, 463, 463,
22117 - 463, 463, 464, 464, 464, 464, 464, 0, 0, 464,
22118 - 465, 465, 465, 465, 465, 0, 0, 465, 466, 466,
22119 -
22120 - 466, 466, 466, 0, 0, 466, 467, 467, 467, 467,
22121 - 467, 0, 0, 467, 468, 468, 0, 468, 0, 468,
22122 - 468, 468, 468, 469, 469, 469, 469, 469, 0, 0,
22123 - 469, 470, 470, 0, 470, 0, 470, 470, 470, 470,
22124 - 471, 471, 0, 471, 0, 471, 471, 471, 471, 472,
22125 - 472, 472, 472, 472, 0, 0, 472, 473, 473, 473,
22126 - 473, 473, 0, 0, 473, 474, 474, 0, 474, 0,
22127 - 474, 474, 474, 474, 475, 475, 0, 475, 0, 475,
22128 - 475, 475, 475, 476, 476, 0, 476, 0, 476, 476,
22129 - 476, 476, 477, 477, 0, 477, 0, 477, 477, 477,
22130 -
22131 - 477, 478, 478, 0, 0, 0, 478, 0, 478, 479,
22132 - 479, 0, 479, 0, 479, 479, 479, 479, 480, 480,
22133 - 480, 0, 480, 480, 0, 480, 481, 481, 0, 481,
22134 - 0, 481, 481, 481, 481, 482, 482, 482, 482, 482,
22135 - 0, 0, 482, 444, 444, 444, 444, 444, 444, 444,
22136 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
22137 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
22138 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
22139 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
22140 - 444, 444, 444, 444, 444, 444, 444, 444, 444, 444,
22141 -
22142 - 444, 444, 444, 444, 444, 444
22143 - } ;
22144 -
22145 -static yy_state_type yy_last_accepting_state;
22146 -static char *yy_last_accepting_cpos;
22147 -
22148 -extern int yy_flex_debug;
22149 -int yy_flex_debug = 0;
22150 -
22151 -/* The intent behind this definition is that it'll catch
22152 - * any uses of REJECT which flex missed.
22153 - */
22154 -#define REJECT reject_used_but_not_detected
22155 -#define yymore() yymore_used_but_not_detected
22156 -#define YY_MORE_ADJ 0
22157 -#define YY_RESTORE_YY_MORE_OFFSET
22158 -char *yytext;
22159 -#line 1 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22160 -/* -*- indented-text -*- */
22161 -/* Process source files and output type information.
22162 - Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2009
22163 - Free Software Foundation, Inc.
22164 -
22165 -This file is part of GCC.
22166 -
22167 -GCC is free software; you can redistribute it and/or modify it under
22168 -the terms of the GNU General Public License as published by the Free
22169 -Software Foundation; either version 3, or (at your option) any later
22170 -version.
22171 -
22172 -GCC is distributed in the hope that it will be useful, but WITHOUT ANY
22173 -WARRANTY; without even the implied warranty of MERCHANTABILITY or
22174 -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22175 -for more details.
22176 -
22177 -You should have received a copy of the GNU General Public License
22178 -along with GCC; see the file COPYING3. If not see
22179 -<http://www.gnu.org/licenses/>. */
22180 -#line 23 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22181 -#include "bconfig.h"
22182 -#include "system.h"
22183 -
22184 -#define malloc xmalloc
22185 -#define realloc xrealloc
22186 -
22187 -#include "gengtype.h"
22188 -
22189 -#define YY_NO_INPUT
22190 -#define YY_DECL int yylex (const char **yylval)
22191 -#define yyterminate() return EOF_TOKEN
22192 -
22193 -struct fileloc lexer_line;
22194 -int lexer_toplevel_done;
22195 -
22196 -static void
22197 -update_lineno (const char *l, size_t len)
22198 -{
22199 - while (len-- > 0)
22200 - if (*l++ == '\n')
22201 - lexer_line.line++;
22202 -}
22203 -
22204 -
22205 -#line 986 "gengtype-lex.c"
22206 -
22207 -#define INITIAL 0
22208 -#define in_struct 1
22209 -#define in_struct_comment 2
22210 -#define in_comment 3
22211 -
22212 -#ifndef YY_NO_UNISTD_H
22213 -/* Special case for "unistd.h", since it is non-ANSI. We include it way
22214 - * down here because we want the user's section 1 to have been scanned first.
22215 - * The user has a chance to override it with an option.
22216 - */
22217 -#include <unistd.h>
22218 -#endif
22219 -
22220 -#ifndef YY_EXTRA_TYPE
22221 -#define YY_EXTRA_TYPE void *
22222 -#endif
22223 -
22224 -static int yy_init_globals (void );
22225 -
22226 -/* Accessor methods to globals.
22227 - These are made visible to non-reentrant scanners for convenience. */
22228 -
22229 -int yylex_destroy (void );
22230 -
22231 -int yyget_debug (void );
22232 -
22233 -void yyset_debug (int debug_flag );
22234 -
22235 -YY_EXTRA_TYPE yyget_extra (void );
22236 -
22237 -void yyset_extra (YY_EXTRA_TYPE user_defined );
22238 -
22239 -FILE *yyget_in (void );
22240 -
22241 -void yyset_in (FILE * in_str );
22242 -
22243 -FILE *yyget_out (void );
22244 -
22245 -void yyset_out (FILE * out_str );
22246 -
22247 -int yyget_leng (void );
22248 -
22249 -char *yyget_text (void );
22250 -
22251 -int yyget_lineno (void );
22252 -
22253 -void yyset_lineno (int line_number );
22254 -
22255 -/* Macros after this point can all be overridden by user definitions in
22256 - * section 1.
22257 - */
22258 -
22259 -#ifndef YY_SKIP_YYWRAP
22260 -#ifdef __cplusplus
22261 -extern "C" int yywrap (void );
22262 -#else
22263 -extern int yywrap (void );
22264 -#endif
22265 -#endif
22266 -
22267 -#ifndef yytext_ptr
22268 -static void yy_flex_strncpy (char *,yyconst char *,int );
22269 -#endif
22270 -
22271 -#ifdef YY_NEED_STRLEN
22272 -static int yy_flex_strlen (yyconst char * );
22273 -#endif
22274 -
22275 -#ifndef YY_NO_INPUT
22276 -
22277 -#ifdef __cplusplus
22278 -static int yyinput (void );
22279 -#else
22280 -static int input (void );
22281 -#endif
22282 -
22283 -#endif
22284 -
22285 -/* Amount of stuff to slurp up with each read. */
22286 -#ifndef YY_READ_BUF_SIZE
22287 -#define YY_READ_BUF_SIZE 8192
22288 -#endif
22289 -
22290 -/* Copy whatever the last rule matched to the standard output. */
22291 -#ifndef ECHO
22292 -/* This used to be an fputs(), but since the string might contain NUL's,
22293 - * we now use fwrite().
22294 - */
22295 -#define ECHO fwrite( yytext, yyleng, 1, yyout )
22296 -#endif
22297 -
22298 -/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
22299 - * is returned in "result".
22300 - */
22301 -#ifndef YY_INPUT
22302 -#define YY_INPUT(buf,result,max_size) \
22303 - if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
22304 - { \
22305 - int c = '*'; \
22306 - unsigned n; \
22307 - for ( n = 0; n < max_size && \
22308 - (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
22309 - buf[n] = (char) c; \
22310 - if ( c == '\n' ) \
22311 - buf[n++] = (char) c; \
22312 - if ( c == EOF && ferror( yyin ) ) \
22313 - YY_FATAL_ERROR( "input in flex scanner failed" ); \
22314 - result = n; \
22315 - } \
22316 - else \
22317 - { \
22318 - errno=0; \
22319 - while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \
22320 - { \
22321 - if( errno != EINTR) \
22322 - { \
22323 - YY_FATAL_ERROR( "input in flex scanner failed" ); \
22324 - break; \
22325 - } \
22326 - errno=0; \
22327 - clearerr(yyin); \
22328 - } \
22329 - }\
22330 -\
22331 -
22332 -#endif
22333 -
22334 -/* No semi-colon after return; correct usage is to write "yyterminate();" -
22335 - * we don't want an extra ';' after the "return" because that will cause
22336 - * some compilers to complain about unreachable statements.
22337 - */
22338 -#ifndef yyterminate
22339 -#define yyterminate() return YY_NULL
22340 -#endif
22341 -
22342 -/* Number of entries by which start-condition stack grows. */
22343 -#ifndef YY_START_STACK_INCR
22344 -#define YY_START_STACK_INCR 25
22345 -#endif
22346 -
22347 -/* Report a fatal error. */
22348 -#ifndef YY_FATAL_ERROR
22349 -#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
22350 -#endif
22351 -
22352 -/* end tables serialization structures and prototypes */
22353 -
22354 -/* Default declaration of generated scanner - a define so the user can
22355 - * easily add parameters.
22356 - */
22357 -#ifndef YY_DECL
22358 -#define YY_DECL_IS_OURS 1
22359 -
22360 -extern int yylex (void);
22361 -
22362 -#define YY_DECL int yylex (void)
22363 -#endif /* !YY_DECL */
22364 -
22365 -/* Code executed at the beginning of each rule, after yytext and yyleng
22366 - * have been set up.
22367 - */
22368 -#ifndef YY_USER_ACTION
22369 -#define YY_USER_ACTION
22370 -#endif
22371 -
22372 -/* Code executed at the end of each rule. */
22373 -#ifndef YY_BREAK
22374 -#define YY_BREAK break;
22375 -#endif
22376 -
22377 -#define YY_RULE_SETUP \
22378 - if ( yyleng > 0 ) \
22379 - YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \
22380 - (yytext[yyleng - 1] == '\n'); \
22381 - YY_USER_ACTION
22382 -
22383 -/** The main scanner function which does all the work.
22384 - */
22385 -YY_DECL
22386 -{
22387 - register yy_state_type yy_current_state;
22388 - register char *yy_cp, *yy_bp;
22389 - register int yy_act;
22390 -
22391 -#line 58 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22392 -
22393 - /* Do this on entry to yylex(): */
22394 - *yylval = 0;
22395 - if (lexer_toplevel_done)
22396 - {
22397 - BEGIN(INITIAL);
22398 - lexer_toplevel_done = 0;
22399 - }
22400 -
22401 - /* Things we look for in skipping mode: */
22402 -#line 1183 "gengtype-lex.c"
22403 -
22404 - if ( !(yy_init) )
22405 - {
22406 - (yy_init) = 1;
22407 -
22408 -#ifdef YY_USER_INIT
22409 - YY_USER_INIT;
22410 -#endif
22411 -
22412 - if ( ! (yy_start) )
22413 - (yy_start) = 1; /* first start state */
22414 -
22415 - if ( ! yyin )
22416 - yyin = stdin;
22417 -
22418 - if ( ! yyout )
22419 - yyout = stdout;
22420 -
22421 - if ( ! YY_CURRENT_BUFFER ) {
22422 - yyensure_buffer_stack ();
22423 - YY_CURRENT_BUFFER_LVALUE =
22424 - yy_create_buffer(yyin,YY_BUF_SIZE );
22425 - }
22426 -
22427 - yy_load_buffer_state( );
22428 - }
22429 -
22430 - while ( 1 ) /* loops until end-of-file is reached */
22431 - {
22432 - yy_cp = (yy_c_buf_p);
22433 -
22434 - /* Support of yytext. */
22435 - *yy_cp = (yy_hold_char);
22436 -
22437 - /* yy_bp points to the position in yy_ch_buf of the start of
22438 - * the current run.
22439 - */
22440 - yy_bp = yy_cp;
22441 -
22442 - yy_current_state = (yy_start);
22443 - yy_current_state += YY_AT_BOL();
22444 -yy_match:
22445 - do
22446 - {
22447 - register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
22448 - if ( yy_accept[yy_current_state] )
22449 - {
22450 - (yy_last_accepting_state) = yy_current_state;
22451 - (yy_last_accepting_cpos) = yy_cp;
22452 - }
22453 - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
22454 - {
22455 - yy_current_state = (int) yy_def[yy_current_state];
22456 - if ( yy_current_state >= 445 )
22457 - yy_c = yy_meta[(unsigned int) yy_c];
22458 - }
22459 - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
22460 - ++yy_cp;
22461 - }
22462 - while ( yy_current_state != 444 );
22463 - yy_cp = (yy_last_accepting_cpos);
22464 - yy_current_state = (yy_last_accepting_state);
22465 -
22466 -yy_find_action:
22467 - yy_act = yy_accept[yy_current_state];
22468 -
22469 - YY_DO_BEFORE_ACTION;
22470 -
22471 -do_action: /* This label is used only to access EOF actions. */
22472 -
22473 - switch ( yy_act )
22474 - { /* beginning of action switch */
22475 - case 0: /* must back up */
22476 - /* undo the effects of YY_DO_BEFORE_ACTION */
22477 - *yy_cp = (yy_hold_char);
22478 - yy_cp = (yy_last_accepting_cpos);
22479 - yy_current_state = (yy_last_accepting_state);
22480 - goto yy_find_action;
22481 -
22482 -case 1:
22483 -/* rule 1 can match eol */
22484 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22485 -(yy_c_buf_p) = yy_cp -= 1;
22486 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22487 -YY_RULE_SETUP
22488 -#line 69 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22489 -{
22490 - BEGIN(in_struct);
22491 - return TYPEDEF;
22492 -}
22493 - YY_BREAK
22494 -case 2:
22495 -/* rule 2 can match eol */
22496 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22497 -(yy_c_buf_p) = yy_cp -= 1;
22498 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22499 -YY_RULE_SETUP
22500 -#line 73 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22501 -{
22502 - BEGIN(in_struct);
22503 - return STRUCT;
22504 -}
22505 - YY_BREAK
22506 -case 3:
22507 -/* rule 3 can match eol */
22508 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22509 -(yy_c_buf_p) = yy_cp -= 1;
22510 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22511 -YY_RULE_SETUP
22512 -#line 77 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22513 -{
22514 - BEGIN(in_struct);
22515 - return UNION;
22516 -}
22517 - YY_BREAK
22518 -case 4:
22519 -/* rule 4 can match eol */
22520 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22521 -(yy_c_buf_p) = yy_cp -= 1;
22522 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22523 -YY_RULE_SETUP
22524 -#line 81 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22525 -{
22526 - BEGIN(in_struct);
22527 - return EXTERN;
22528 -}
22529 - YY_BREAK
22530 -case 5:
22531 -/* rule 5 can match eol */
22532 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22533 -(yy_c_buf_p) = yy_cp -= 1;
22534 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22535 -YY_RULE_SETUP
22536 -#line 85 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22537 -{
22538 - BEGIN(in_struct);
22539 - return STATIC;
22540 -}
22541 - YY_BREAK
22542 -case 6:
22543 -/* rule 6 can match eol */
22544 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22545 -(yy_c_buf_p) = yy_cp -= 1;
22546 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22547 -YY_RULE_SETUP
22548 -#line 90 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22549 -{
22550 - BEGIN(in_struct);
22551 - return DEFVEC_OP;
22552 -}
22553 - YY_BREAK
22554 -case 7:
22555 -/* rule 7 can match eol */
22556 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22557 -(yy_c_buf_p) = yy_cp -= 1;
22558 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22559 -YY_RULE_SETUP
22560 -#line 94 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22561 -{
22562 - BEGIN(in_struct);
22563 - return DEFVEC_I;
22564 -}
22565 - YY_BREAK
22566 -case 8:
22567 -/* rule 8 can match eol */
22568 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22569 -(yy_c_buf_p) = yy_cp -= 1;
22570 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22571 -YY_RULE_SETUP
22572 -#line 98 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22573 -{
22574 - BEGIN(in_struct);
22575 - return DEFVEC_ALLOC;
22576 -}
22577 - YY_BREAK
22578 -
22579 -
22580 -case 9:
22581 -YY_RULE_SETUP
22582 -#line 106 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22583 -{ BEGIN(in_struct_comment); }
22584 - YY_BREAK
22585 -case 10:
22586 -/* rule 10 can match eol */
22587 -YY_RULE_SETUP
22588 -#line 108 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22589 -{ update_lineno (yytext, yyleng); }
22590 - YY_BREAK
22591 -case 11:
22592 -/* rule 11 can match eol */
22593 -YY_RULE_SETUP
22594 -#line 109 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22595 -{ lexer_line.line++; }
22596 - YY_BREAK
22597 -case 12:
22598 -/* rule 12 can match eol */
22599 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22600 -(yy_c_buf_p) = yy_cp = yy_bp + 5;
22601 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22602 -YY_RULE_SETUP
22603 -#line 111 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22604 -/* don't care */
22605 - YY_BREAK
22606 -case 13:
22607 -/* rule 13 can match eol */
22608 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22609 -(yy_c_buf_p) = yy_cp = yy_bp + 3;
22610 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22611 -YY_RULE_SETUP
22612 -#line 112 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22613 -{ return GTY_TOKEN; }
22614 - YY_BREAK
22615 -case 14:
22616 -/* rule 14 can match eol */
22617 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22618 -(yy_c_buf_p) = yy_cp = yy_bp + 3;
22619 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22620 -YY_RULE_SETUP
22621 -#line 113 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22622 -{ return VEC_TOKEN; }
22623 - YY_BREAK
22624 -case 15:
22625 -/* rule 15 can match eol */
22626 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22627 -(yy_c_buf_p) = yy_cp = yy_bp + 5;
22628 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22629 -YY_RULE_SETUP
22630 -#line 114 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22631 -{ return UNION; }
22632 - YY_BREAK
22633 -case 16:
22634 -/* rule 16 can match eol */
22635 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22636 -(yy_c_buf_p) = yy_cp = yy_bp + 6;
22637 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22638 -YY_RULE_SETUP
22639 -#line 115 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22640 -{ return STRUCT; }
22641 - YY_BREAK
22642 -case 17:
22643 -/* rule 17 can match eol */
22644 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22645 -(yy_c_buf_p) = yy_cp = yy_bp + 4;
22646 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22647 -YY_RULE_SETUP
22648 -#line 116 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22649 -{ return ENUM; }
22650 - YY_BREAK
22651 -case 18:
22652 -/* rule 18 can match eol */
22653 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22654 -(yy_c_buf_p) = yy_cp = yy_bp + 9;
22655 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22656 -YY_RULE_SETUP
22657 -#line 117 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22658 -{ return PTR_ALIAS; }
22659 - YY_BREAK
22660 -case 19:
22661 -/* rule 19 can match eol */
22662 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22663 -(yy_c_buf_p) = yy_cp = yy_bp + 10;
22664 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22665 -YY_RULE_SETUP
22666 -#line 118 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22667 -{ return NESTED_PTR; }
22668 - YY_BREAK
22669 -case 20:
22670 -YY_RULE_SETUP
22671 -#line 119 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22672 -{ return NUM; }
22673 - YY_BREAK
22674 -case 21:
22675 -/* rule 21 can match eol */
22676 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22677 -(yy_c_buf_p) = yy_cp -= 1;
22678 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22679 -YY_RULE_SETUP
22680 -#line 120 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22681 -{
22682 - *yylval = XDUPVAR (const char, yytext, yyleng, yyleng+1);
22683 - return PARAM_IS;
22684 -}
22685 - YY_BREAK
22686 -case 22:
22687 -/* rule 22 can match eol */
22688 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22689 -(yy_c_buf_p) = yy_cp -= 1;
22690 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22691 -#line 126 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22692 -case 23:
22693 -/* rule 23 can match eol */
22694 -YY_RULE_SETUP
22695 -#line 126 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22696 -{
22697 - size_t len;
22698 -
22699 - for (len = yyleng; ISSPACE (yytext[len-1]); len--)
22700 - ;
22701 -
22702 - *yylval = XDUPVAR (const char, yytext, len, len+1);
22703 - update_lineno (yytext, yyleng);
22704 - return SCALAR;
22705 -}
22706 - YY_BREAK
22707 -case 24:
22708 -/* rule 24 can match eol */
22709 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22710 -(yy_c_buf_p) = yy_cp -= 1;
22711 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22712 -YY_RULE_SETUP
22713 -#line 138 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22714 -{
22715 - *yylval = XDUPVAR (const char, yytext, yyleng, yyleng+1);
22716 - return ID;
22717 -}
22718 - YY_BREAK
22719 -case 25:
22720 -/* rule 25 can match eol */
22721 -YY_RULE_SETUP
22722 -#line 143 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22723 -{
22724 - *yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng-1);
22725 - return STRING;
22726 -}
22727 - YY_BREAK
22728 -/* This "terminal" avoids having to parse integer constant expressions. */
22729 -case 26:
22730 -/* rule 26 can match eol */
22731 -YY_RULE_SETUP
22732 -#line 148 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22733 -{
22734 - *yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng-1);
22735 - return ARRAY;
22736 -}
22737 - YY_BREAK
22738 -case 27:
22739 -/* rule 27 can match eol */
22740 -YY_RULE_SETUP
22741 -#line 152 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22742 -{
22743 - *yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng);
22744 - return CHAR;
22745 -}
22746 - YY_BREAK
22747 -case 28:
22748 -YY_RULE_SETUP
22749 -#line 157 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22750 -{ return ELLIPSIS; }
22751 - YY_BREAK
22752 -case 29:
22753 -YY_RULE_SETUP
22754 -#line 158 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22755 -{ return yytext[0]; }
22756 - YY_BREAK
22757 -/* ignore pp-directives */
22758 -case 30:
22759 -/* rule 30 can match eol */
22760 -YY_RULE_SETUP
22761 -#line 161 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22762 -{lexer_line.line++;}
22763 - YY_BREAK
22764 -case 31:
22765 -YY_RULE_SETUP
22766 -#line 163 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22767 -{
22768 - error_at_line (&lexer_line, "unexpected character `%s'", yytext);
22769 -}
22770 - YY_BREAK
22771 -
22772 -case 32:
22773 -YY_RULE_SETUP
22774 -#line 168 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22775 -{ BEGIN(in_comment); }
22776 - YY_BREAK
22777 -case 33:
22778 -/* rule 33 can match eol */
22779 -YY_RULE_SETUP
22780 -#line 169 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22781 -{ lexer_line.line++; }
22782 - YY_BREAK
22783 -case 34:
22784 -#line 171 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22785 -case 35:
22786 -/* rule 35 can match eol */
22787 -#line 172 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22788 -case 36:
22789 -/* rule 36 can match eol */
22790 -YY_RULE_SETUP
22791 -#line 172 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22792 -/* do nothing */
22793 - YY_BREAK
22794 -case 37:
22795 -/* rule 37 can match eol */
22796 -YY_RULE_SETUP
22797 -#line 173 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22798 -{ update_lineno (yytext, yyleng); }
22799 - YY_BREAK
22800 -case 38:
22801 -/* rule 38 can match eol */
22802 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22803 -(yy_c_buf_p) = yy_cp = yy_bp + 1;
22804 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22805 -YY_RULE_SETUP
22806 -#line 174 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22807 -/* do nothing */
22808 - YY_BREAK
22809 -
22810 -case 39:
22811 -/* rule 39 can match eol */
22812 -YY_RULE_SETUP
22813 -#line 177 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22814 -{ lexer_line.line++; }
22815 - YY_BREAK
22816 -case 40:
22817 -#line 179 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22818 -case 41:
22819 -YY_RULE_SETUP
22820 -#line 179 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22821 -/* do nothing */
22822 - YY_BREAK
22823 -case 42:
22824 -/* rule 42 can match eol */
22825 -*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */
22826 -(yy_c_buf_p) = yy_cp = yy_bp + 1;
22827 -YY_DO_BEFORE_ACTION; /* set up yytext again */
22828 -YY_RULE_SETUP
22829 -#line 180 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22830 -/* do nothing */
22831 - YY_BREAK
22832 -
22833 -case 43:
22834 -YY_RULE_SETUP
22835 -#line 182 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22836 -{ BEGIN(INITIAL); }
22837 - YY_BREAK
22838 -case 44:
22839 -YY_RULE_SETUP
22840 -#line 183 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22841 -{ BEGIN(in_struct); }
22842 - YY_BREAK
22843 -case 45:
22844 -#line 186 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22845 -case 46:
22846 -YY_RULE_SETUP
22847 -#line 186 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22848 -{
22849 - error_at_line (&lexer_line,
22850 - "unterminated comment or string; unexpected EOF");
22851 -}
22852 - YY_BREAK
22853 -case 47:
22854 -/* rule 47 can match eol */
22855 -YY_RULE_SETUP
22856 -#line 191 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22857 -/* do nothing */
22858 - YY_BREAK
22859 -case 48:
22860 -/* rule 48 can match eol */
22861 -YY_RULE_SETUP
22862 -#line 192 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22863 -{
22864 - error_at_line (&lexer_line, "stray GTY marker");
22865 -}
22866 - YY_BREAK
22867 -case 49:
22868 -YY_RULE_SETUP
22869 -#line 196 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
22870 -YY_FATAL_ERROR( "flex scanner jammed" );
22871 - YY_BREAK
22872 -#line 1653 "gengtype-lex.c"
22873 -case YY_STATE_EOF(INITIAL):
22874 -case YY_STATE_EOF(in_struct):
22875 -case YY_STATE_EOF(in_struct_comment):
22876 -case YY_STATE_EOF(in_comment):
22877 - yyterminate();
22878 -
22879 - case YY_END_OF_BUFFER:
22880 - {
22881 - /* Amount of text matched not including the EOB char. */
22882 - int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
22883 -
22884 - /* Undo the effects of YY_DO_BEFORE_ACTION. */
22885 - *yy_cp = (yy_hold_char);
22886 - YY_RESTORE_YY_MORE_OFFSET
22887 -
22888 - if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
22889 - {
22890 - /* We're scanning a new file or input source. It's
22891 - * possible that this happened because the user
22892 - * just pointed yyin at a new source and called
22893 - * yylex(). If so, then we have to assure
22894 - * consistency between YY_CURRENT_BUFFER and our
22895 - * globals. Here is the right place to do so, because
22896 - * this is the first action (other than possibly a
22897 - * back-up) that will match for the new input source.
22898 - */
22899 - (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
22900 - YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
22901 - YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
22902 - }
22903 -
22904 - /* Note that here we test for yy_c_buf_p "<=" to the position
22905 - * of the first EOB in the buffer, since yy_c_buf_p will
22906 - * already have been incremented past the NUL character
22907 - * (since all states make transitions on EOB to the
22908 - * end-of-buffer state). Contrast this with the test
22909 - * in input().
22910 - */
22911 - if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
22912 - { /* This was really a NUL. */
22913 - yy_state_type yy_next_state;
22914 -
22915 - (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
22916 -
22917 - yy_current_state = yy_get_previous_state( );
22918 -
22919 - /* Okay, we're now positioned to make the NUL
22920 - * transition. We couldn't have
22921 - * yy_get_previous_state() go ahead and do it
22922 - * for us because it doesn't know how to deal
22923 - * with the possibility of jamming (and we don't
22924 - * want to build jamming into it because then it
22925 - * will run more slowly).
22926 - */
22927 -
22928 - yy_next_state = yy_try_NUL_trans( yy_current_state );
22929 -
22930 - yy_bp = (yytext_ptr) + YY_MORE_ADJ;
22931 -
22932 - if ( yy_next_state )
22933 - {
22934 - /* Consume the NUL. */
22935 - yy_cp = ++(yy_c_buf_p);
22936 - yy_current_state = yy_next_state;
22937 - goto yy_match;
22938 - }
22939 -
22940 - else
22941 - {
22942 - yy_cp = (yy_last_accepting_cpos);
22943 - yy_current_state = (yy_last_accepting_state);
22944 - goto yy_find_action;
22945 - }
22946 - }
22947 -
22948 - else switch ( yy_get_next_buffer( ) )
22949 - {
22950 - case EOB_ACT_END_OF_FILE:
22951 - {
22952 - (yy_did_buffer_switch_on_eof) = 0;
22953 -
22954 - if ( yywrap( ) )
22955 - {
22956 - /* Note: because we've taken care in
22957 - * yy_get_next_buffer() to have set up
22958 - * yytext, we can now set up
22959 - * yy_c_buf_p so that if some total
22960 - * hoser (like flex itself) wants to
22961 - * call the scanner after we return the
22962 - * YY_NULL, it'll still work - another
22963 - * YY_NULL will get returned.
22964 - */
22965 - (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
22966 -
22967 - yy_act = YY_STATE_EOF(YY_START);
22968 - goto do_action;
22969 - }
22970 -
22971 - else
22972 - {
22973 - if ( ! (yy_did_buffer_switch_on_eof) )
22974 - YY_NEW_FILE;
22975 - }
22976 - break;
22977 - }
22978 -
22979 - case EOB_ACT_CONTINUE_SCAN:
22980 - (yy_c_buf_p) =
22981 - (yytext_ptr) + yy_amount_of_matched_text;
22982 -
22983 - yy_current_state = yy_get_previous_state( );
22984 -
22985 - yy_cp = (yy_c_buf_p);
22986 - yy_bp = (yytext_ptr) + YY_MORE_ADJ;
22987 - goto yy_match;
22988 -
22989 - case EOB_ACT_LAST_MATCH:
22990 - (yy_c_buf_p) =
22991 - &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
22992 -
22993 - yy_current_state = yy_get_previous_state( );
22994 -
22995 - yy_cp = (yy_c_buf_p);
22996 - yy_bp = (yytext_ptr) + YY_MORE_ADJ;
22997 - goto yy_find_action;
22998 - }
22999 - break;
23000 - }
23001 -
23002 - default:
23003 - YY_FATAL_ERROR(
23004 - "fatal flex scanner internal error--no action found" );
23005 - } /* end of action switch */
23006 - } /* end of scanning one token */
23007 -} /* end of yylex */
23008 -
23009 -/* yy_get_next_buffer - try to read in a new buffer
23010 - *
23011 - * Returns a code representing an action:
23012 - * EOB_ACT_LAST_MATCH -
23013 - * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
23014 - * EOB_ACT_END_OF_FILE - end of file
23015 - */
23016 -static int yy_get_next_buffer (void)
23017 -{
23018 - register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
23019 - register char *source = (yytext_ptr);
23020 - register int number_to_move, i;
23021 - int ret_val;
23022 -
23023 - if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
23024 - YY_FATAL_ERROR(
23025 - "fatal flex scanner internal error--end of buffer missed" );
23026 -
23027 - if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
23028 - { /* Don't try to fill the buffer, so this is an EOF. */
23029 - if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
23030 - {
23031 - /* We matched a single character, the EOB, so
23032 - * treat this as a final EOF.
23033 - */
23034 - return EOB_ACT_END_OF_FILE;
23035 - }
23036 -
23037 - else
23038 - {
23039 - /* We matched some text prior to the EOB, first
23040 - * process it.
23041 - */
23042 - return EOB_ACT_LAST_MATCH;
23043 - }
23044 - }
23045 -
23046 - /* Try to read more data. */
23047 -
23048 - /* First move last chars to start of buffer. */
23049 - number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
23050 -
23051 - for ( i = 0; i < number_to_move; ++i )
23052 - *(dest++) = *(source++);
23053 -
23054 - if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
23055 - /* don't do the read, it's not guaranteed to return an EOF,
23056 - * just force an EOF
23057 - */
23058 - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
23059 -
23060 - else
23061 - {
23062 - int num_to_read =
23063 - YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
23064 -
23065 - while ( num_to_read <= 0 )
23066 - { /* Not enough room in the buffer - grow it. */
23067 -
23068 - /* just a shorter name for the current buffer */
23069 - YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
23070 -
23071 - int yy_c_buf_p_offset =
23072 - (int) ((yy_c_buf_p) - b->yy_ch_buf);
23073 -
23074 - if ( b->yy_is_our_buffer )
23075 - {
23076 - int new_size = b->yy_buf_size * 2;
23077 -
23078 - if ( new_size <= 0 )
23079 - b->yy_buf_size += b->yy_buf_size / 8;
23080 - else
23081 - b->yy_buf_size *= 2;
23082 -
23083 - b->yy_ch_buf = (char *)
23084 - /* Include room in for 2 EOB chars. */
23085 - yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
23086 - }
23087 - else
23088 - /* Can't grow it, we don't own it. */
23089 - b->yy_ch_buf = 0;
23090 -
23091 - if ( ! b->yy_ch_buf )
23092 - YY_FATAL_ERROR(
23093 - "fatal error - scanner input buffer overflow" );
23094 -
23095 - (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
23096 -
23097 - num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
23098 - number_to_move - 1;
23099 -
23100 - }
23101 -
23102 - if ( num_to_read > YY_READ_BUF_SIZE )
23103 - num_to_read = YY_READ_BUF_SIZE;
23104 -
23105 - /* Read in more data. */
23106 - YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
23107 - (yy_n_chars), (size_t) num_to_read );
23108 -
23109 - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
23110 - }
23111 -
23112 - if ( (yy_n_chars) == 0 )
23113 - {
23114 - if ( number_to_move == YY_MORE_ADJ )
23115 - {
23116 - ret_val = EOB_ACT_END_OF_FILE;
23117 - yyrestart(yyin );
23118 - }
23119 -
23120 - else
23121 - {
23122 - ret_val = EOB_ACT_LAST_MATCH;
23123 - YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
23124 - YY_BUFFER_EOF_PENDING;
23125 - }
23126 - }
23127 -
23128 - else
23129 - ret_val = EOB_ACT_CONTINUE_SCAN;
23130 -
23131 - if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
23132 - /* Extend the array by 50%, plus the number we really need. */
23133 - yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
23134 - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
23135 - if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
23136 - YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
23137 - }
23138 -
23139 - (yy_n_chars) += number_to_move;
23140 - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
23141 - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
23142 -
23143 - (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
23144 -
23145 - return ret_val;
23146 -}
23147 -
23148 -/* yy_get_previous_state - get the state just before the EOB char was reached */
23149 -
23150 - static yy_state_type yy_get_previous_state (void)
23151 -{
23152 - register yy_state_type yy_current_state;
23153 - register char *yy_cp;
23154 -
23155 - yy_current_state = (yy_start);
23156 - yy_current_state += YY_AT_BOL();
23157 -
23158 - for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
23159 - {
23160 - register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
23161 - if ( yy_accept[yy_current_state] )
23162 - {
23163 - (yy_last_accepting_state) = yy_current_state;
23164 - (yy_last_accepting_cpos) = yy_cp;
23165 - }
23166 - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
23167 - {
23168 - yy_current_state = (int) yy_def[yy_current_state];
23169 - if ( yy_current_state >= 445 )
23170 - yy_c = yy_meta[(unsigned int) yy_c];
23171 - }
23172 - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
23173 - }
23174 -
23175 - return yy_current_state;
23176 -}
23177 -
23178 -/* yy_try_NUL_trans - try to make a transition on the NUL character
23179 - *
23180 - * synopsis
23181 - * next_state = yy_try_NUL_trans( current_state );
23182 - */
23183 - static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
23184 -{
23185 - register int yy_is_jam;
23186 - register char *yy_cp = (yy_c_buf_p);
23187 -
23188 - register YY_CHAR yy_c = 1;
23189 - if ( yy_accept[yy_current_state] )
23190 - {
23191 - (yy_last_accepting_state) = yy_current_state;
23192 - (yy_last_accepting_cpos) = yy_cp;
23193 - }
23194 - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
23195 - {
23196 - yy_current_state = (int) yy_def[yy_current_state];
23197 - if ( yy_current_state >= 445 )
23198 - yy_c = yy_meta[(unsigned int) yy_c];
23199 - }
23200 - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
23201 - yy_is_jam = (yy_current_state == 444);
23202 -
23203 - return yy_is_jam ? 0 : yy_current_state;
23204 -}
23205 -
23206 -#ifndef YY_NO_INPUT
23207 -#ifdef __cplusplus
23208 - static int yyinput (void)
23209 -#else
23210 - static int input (void)
23211 -#endif
23212 -
23213 -{
23214 - int c;
23215 -
23216 - *(yy_c_buf_p) = (yy_hold_char);
23217 -
23218 - if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
23219 - {
23220 - /* yy_c_buf_p now points to the character we want to return.
23221 - * If this occurs *before* the EOB characters, then it's a
23222 - * valid NUL; if not, then we've hit the end of the buffer.
23223 - */
23224 - if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
23225 - /* This was really a NUL. */
23226 - *(yy_c_buf_p) = '\0';
23227 -
23228 - else
23229 - { /* need more input */
23230 - int offset = (yy_c_buf_p) - (yytext_ptr);
23231 - ++(yy_c_buf_p);
23232 -
23233 - switch ( yy_get_next_buffer( ) )
23234 - {
23235 - case EOB_ACT_LAST_MATCH:
23236 - /* This happens because yy_g_n_b()
23237 - * sees that we've accumulated a
23238 - * token and flags that we need to
23239 - * try matching the token before
23240 - * proceeding. But for input(),
23241 - * there's no matching to consider.
23242 - * So convert the EOB_ACT_LAST_MATCH
23243 - * to EOB_ACT_END_OF_FILE.
23244 - */
23245 -
23246 - /* Reset buffer status. */
23247 - yyrestart(yyin );
23248 -
23249 - /*FALLTHROUGH*/
23250 -
23251 - case EOB_ACT_END_OF_FILE:
23252 - {
23253 - if ( yywrap( ) )
23254 - return EOF;
23255 -
23256 - if ( ! (yy_did_buffer_switch_on_eof) )
23257 - YY_NEW_FILE;
23258 -#ifdef __cplusplus
23259 - return yyinput();
23260 -#else
23261 - return input();
23262 -#endif
23263 - }
23264 -
23265 - case EOB_ACT_CONTINUE_SCAN:
23266 - (yy_c_buf_p) = (yytext_ptr) + offset;
23267 - break;
23268 - }
23269 - }
23270 - }
23271 -
23272 - c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
23273 - *(yy_c_buf_p) = '\0'; /* preserve yytext */
23274 - (yy_hold_char) = *++(yy_c_buf_p);
23275 -
23276 - YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n');
23277 -
23278 - return c;
23279 -}
23280 -#endif /* ifndef YY_NO_INPUT */
23281 -
23282 -/** Immediately switch to a different input stream.
23283 - * @param input_file A readable stream.
23284 - *
23285 - * @note This function does not reset the start condition to @c INITIAL .
23286 - */
23287 - void yyrestart (FILE * input_file )
23288 -{
23289 -
23290 - if ( ! YY_CURRENT_BUFFER ){
23291 - yyensure_buffer_stack ();
23292 - YY_CURRENT_BUFFER_LVALUE =
23293 - yy_create_buffer(yyin,YY_BUF_SIZE );
23294 - }
23295 -
23296 - yy_init_buffer(YY_CURRENT_BUFFER,input_file );
23297 - yy_load_buffer_state( );
23298 -}
23299 -
23300 -/** Switch to a different input buffer.
23301 - * @param new_buffer The new input buffer.
23302 - *
23303 - */
23304 - void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
23305 -{
23306 -
23307 - /* TODO. We should be able to replace this entire function body
23308 - * with
23309 - * yypop_buffer_state();
23310 - * yypush_buffer_state(new_buffer);
23311 - */
23312 - yyensure_buffer_stack ();
23313 - if ( YY_CURRENT_BUFFER == new_buffer )
23314 - return;
23315 -
23316 - if ( YY_CURRENT_BUFFER )
23317 - {
23318 - /* Flush out information for old buffer. */
23319 - *(yy_c_buf_p) = (yy_hold_char);
23320 - YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
23321 - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
23322 - }
23323 -
23324 - YY_CURRENT_BUFFER_LVALUE = new_buffer;
23325 - yy_load_buffer_state( );
23326 -
23327 - /* We don't actually know whether we did this switch during
23328 - * EOF (yywrap()) processing, but the only time this flag
23329 - * is looked at is after yywrap() is called, so it's safe
23330 - * to go ahead and always set it.
23331 - */
23332 - (yy_did_buffer_switch_on_eof) = 1;
23333 -}
23334 -
23335 -static void yy_load_buffer_state (void)
23336 -{
23337 - (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
23338 - (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
23339 - yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
23340 - (yy_hold_char) = *(yy_c_buf_p);
23341 -}
23342 -
23343 -/** Allocate and initialize an input buffer state.
23344 - * @param file A readable stream.
23345 - * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
23346 - *
23347 - * @return the allocated buffer state.
23348 - */
23349 - YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
23350 -{
23351 - YY_BUFFER_STATE b;
23352 -
23353 - b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
23354 - if ( ! b )
23355 - YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
23356 -
23357 - b->yy_buf_size = size;
23358 -
23359 - /* yy_ch_buf has to be 2 characters longer than the size given because
23360 - * we need to put in 2 end-of-buffer characters.
23361 - */
23362 - b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 );
23363 - if ( ! b->yy_ch_buf )
23364 - YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
23365 -
23366 - b->yy_is_our_buffer = 1;
23367 -
23368 - yy_init_buffer(b,file );
23369 -
23370 - return b;
23371 -}
23372 -
23373 -/** Destroy the buffer.
23374 - * @param b a buffer created with yy_create_buffer()
23375 - *
23376 - */
23377 - void yy_delete_buffer (YY_BUFFER_STATE b )
23378 -{
23379 -
23380 - if ( ! b )
23381 - return;
23382 -
23383 - if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
23384 - YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
23385 -
23386 - if ( b->yy_is_our_buffer )
23387 - yyfree((void *) b->yy_ch_buf );
23388 -
23389 - yyfree((void *) b );
23390 -}
23391 -
23392 -/* Initializes or reinitializes a buffer.
23393 - * This function is sometimes called more than once on the same buffer,
23394 - * such as during a yyrestart() or at EOF.
23395 - */
23396 - static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
23397 -
23398 -{
23399 - int oerrno = errno;
23400 -
23401 - yy_flush_buffer(b );
23402 -
23403 - b->yy_input_file = file;
23404 - b->yy_fill_buffer = 1;
23405 -
23406 - /* If b is the current buffer, then yy_init_buffer was _probably_
23407 - * called from yyrestart() or through yy_get_next_buffer.
23408 - * In that case, we don't want to reset the lineno or column.
23409 - */
23410 - if (b != YY_CURRENT_BUFFER){
23411 - b->yy_bs_lineno = 1;
23412 - b->yy_bs_column = 0;
23413 - }
23414 -
23415 - b->yy_is_interactive = 0;
23416 -
23417 - errno = oerrno;
23418 -}
23419 -
23420 -/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
23421 - * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
23422 - *
23423 - */
23424 - void yy_flush_buffer (YY_BUFFER_STATE b )
23425 -{
23426 - if ( ! b )
23427 - return;
23428 -
23429 - b->yy_n_chars = 0;
23430 -
23431 - /* We always need two end-of-buffer characters. The first causes
23432 - * a transition to the end-of-buffer state. The second causes
23433 - * a jam in that state.
23434 - */
23435 - b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
23436 - b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
23437 -
23438 - b->yy_buf_pos = &b->yy_ch_buf[0];
23439 -
23440 - b->yy_at_bol = 1;
23441 - b->yy_buffer_status = YY_BUFFER_NEW;
23442 -
23443 - if ( b == YY_CURRENT_BUFFER )
23444 - yy_load_buffer_state( );
23445 -}
23446 -
23447 -/** Pushes the new state onto the stack. The new state becomes
23448 - * the current state. This function will allocate the stack
23449 - * if necessary.
23450 - * @param new_buffer The new state.
23451 - *
23452 - */
23453 -void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
23454 -{
23455 - if (new_buffer == NULL)
23456 - return;
23457 -
23458 - yyensure_buffer_stack();
23459 -
23460 - /* This block is copied from yy_switch_to_buffer. */
23461 - if ( YY_CURRENT_BUFFER )
23462 - {
23463 - /* Flush out information for old buffer. */
23464 - *(yy_c_buf_p) = (yy_hold_char);
23465 - YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
23466 - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
23467 - }
23468 -
23469 - /* Only push if top exists. Otherwise, replace top. */
23470 - if (YY_CURRENT_BUFFER)
23471 - (yy_buffer_stack_top)++;
23472 - YY_CURRENT_BUFFER_LVALUE = new_buffer;
23473 -
23474 - /* copied from yy_switch_to_buffer. */
23475 - yy_load_buffer_state( );
23476 - (yy_did_buffer_switch_on_eof) = 1;
23477 -}
23478 -
23479 -/** Removes and deletes the top of the stack, if present.
23480 - * The next element becomes the new top.
23481 - *
23482 - */
23483 -void yypop_buffer_state (void)
23484 -{
23485 - if (!YY_CURRENT_BUFFER)
23486 - return;
23487 -
23488 - yy_delete_buffer(YY_CURRENT_BUFFER );
23489 - YY_CURRENT_BUFFER_LVALUE = NULL;
23490 - if ((yy_buffer_stack_top) > 0)
23491 - --(yy_buffer_stack_top);
23492 -
23493 - if (YY_CURRENT_BUFFER) {
23494 - yy_load_buffer_state( );
23495 - (yy_did_buffer_switch_on_eof) = 1;
23496 - }
23497 -}
23498 -
23499 -/* Allocates the stack if it does not exist.
23500 - * Guarantees space for at least one push.
23501 - */
23502 -static void yyensure_buffer_stack (void)
23503 -{
23504 - int num_to_alloc;
23505 -
23506 - if (!(yy_buffer_stack)) {
23507 -
23508 - /* First allocation is just for 2 elements, since we don't know if this
23509 - * scanner will even need a stack. We use 2 instead of 1 to avoid an
23510 - * immediate realloc on the next call.
23511 - */
23512 - num_to_alloc = 1;
23513 - (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
23514 - (num_to_alloc * sizeof(struct yy_buffer_state*)
23515 - );
23516 - if ( ! (yy_buffer_stack) )
23517 - YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
23518 -
23519 - memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
23520 -
23521 - (yy_buffer_stack_max) = num_to_alloc;
23522 - (yy_buffer_stack_top) = 0;
23523 - return;
23524 - }
23525 -
23526 - if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
23527 -
23528 - /* Increase the buffer to prepare for a possible push. */
23529 - int grow_size = 8 /* arbitrary grow size */;
23530 -
23531 - num_to_alloc = (yy_buffer_stack_max) + grow_size;
23532 - (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
23533 - ((yy_buffer_stack),
23534 - num_to_alloc * sizeof(struct yy_buffer_state*)
23535 - );
23536 - if ( ! (yy_buffer_stack) )
23537 - YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
23538 -
23539 - /* zero only the new slots.*/
23540 - memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
23541 - (yy_buffer_stack_max) = num_to_alloc;
23542 - }
23543 -}
23544 -
23545 -/** Setup the input buffer state to scan directly from a user-specified character buffer.
23546 - * @param base the character buffer
23547 - * @param size the size in bytes of the character buffer
23548 - *
23549 - * @return the newly allocated buffer state object.
23550 - */
23551 -YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size )
23552 -{
23553 - YY_BUFFER_STATE b;
23554 -
23555 - if ( size < 2 ||
23556 - base[size-2] != YY_END_OF_BUFFER_CHAR ||
23557 - base[size-1] != YY_END_OF_BUFFER_CHAR )
23558 - /* They forgot to leave room for the EOB's. */
23559 - return 0;
23560 -
23561 - b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
23562 - if ( ! b )
23563 - YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
23564 -
23565 - b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
23566 - b->yy_buf_pos = b->yy_ch_buf = base;
23567 - b->yy_is_our_buffer = 0;
23568 - b->yy_input_file = 0;
23569 - b->yy_n_chars = b->yy_buf_size;
23570 - b->yy_is_interactive = 0;
23571 - b->yy_at_bol = 1;
23572 - b->yy_fill_buffer = 0;
23573 - b->yy_buffer_status = YY_BUFFER_NEW;
23574 -
23575 - yy_switch_to_buffer(b );
23576 -
23577 - return b;
23578 -}
23579 -
23580 -/** Setup the input buffer state to scan a string. The next call to yylex() will
23581 - * scan from a @e copy of @a str.
23582 - * @param yystr a NUL-terminated string to scan
23583 - *
23584 - * @return the newly allocated buffer state object.
23585 - * @note If you want to scan bytes that may contain NUL values, then use
23586 - * yy_scan_bytes() instead.
23587 - */
23588 -YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
23589 -{
23590 -
23591 - return yy_scan_bytes(yystr,strlen(yystr) );
23592 -}
23593 -
23594 -/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
23595 - * scan from a @e copy of @a bytes.
23596 - * @param bytes the byte buffer to scan
23597 - * @param len the number of bytes in the buffer pointed to by @a bytes.
23598 - *
23599 - * @return the newly allocated buffer state object.
23600 - */
23601 -YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len )
23602 -{
23603 - YY_BUFFER_STATE b;
23604 - char *buf;
23605 - yy_size_t n;
23606 - int i;
23607 -
23608 - /* Get memory for full buffer, including space for trailing EOB's. */
23609 - n = _yybytes_len + 2;
23610 - buf = (char *) yyalloc(n );
23611 - if ( ! buf )
23612 - YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
23613 -
23614 - for ( i = 0; i < _yybytes_len; ++i )
23615 - buf[i] = yybytes[i];
23616 -
23617 - buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
23618 -
23619 - b = yy_scan_buffer(buf,n );
23620 - if ( ! b )
23621 - YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
23622 -
23623 - /* It's okay to grow etc. this buffer, and we should throw it
23624 - * away when we're done.
23625 - */
23626 - b->yy_is_our_buffer = 1;
23627 -
23628 - return b;
23629 -}
23630 -
23631 -#ifndef YY_EXIT_FAILURE
23632 -#define YY_EXIT_FAILURE 2
23633 -#endif
23634 -
23635 -static void yy_fatal_error (yyconst char* msg )
23636 -{
23637 - (void) fprintf( stderr, "%s\n", msg );
23638 - exit( YY_EXIT_FAILURE );
23639 -}
23640 -
23641 -/* Redefine yyless() so it works in section 3 code. */
23642 -
23643 -#undef yyless
23644 -#define yyless(n) \
23645 - do \
23646 - { \
23647 - /* Undo effects of setting up yytext. */ \
23648 - int yyless_macro_arg = (n); \
23649 - YY_LESS_LINENO(yyless_macro_arg);\
23650 - yytext[yyleng] = (yy_hold_char); \
23651 - (yy_c_buf_p) = yytext + yyless_macro_arg; \
23652 - (yy_hold_char) = *(yy_c_buf_p); \
23653 - *(yy_c_buf_p) = '\0'; \
23654 - yyleng = yyless_macro_arg; \
23655 - } \
23656 - while ( 0 )
23657 -
23658 -/* Accessor methods (get/set functions) to struct members. */
23659 -
23660 -/** Get the current line number.
23661 - *
23662 - */
23663 -int yyget_lineno (void)
23664 -{
23665 -
23666 - return yylineno;
23667 -}
23668 -
23669 -/** Get the input stream.
23670 - *
23671 - */
23672 -FILE *yyget_in (void)
23673 -{
23674 - return yyin;
23675 -}
23676 -
23677 -/** Get the output stream.
23678 - *
23679 - */
23680 -FILE *yyget_out (void)
23681 -{
23682 - return yyout;
23683 -}
23684 -
23685 -/** Get the length of the current token.
23686 - *
23687 - */
23688 -int yyget_leng (void)
23689 -{
23690 - return yyleng;
23691 -}
23692 -
23693 -/** Get the current token.
23694 - *
23695 - */
23696 -
23697 -char *yyget_text (void)
23698 -{
23699 - return yytext;
23700 -}
23701 -
23702 -/** Set the current line number.
23703 - * @param line_number
23704 - *
23705 - */
23706 -void yyset_lineno (int line_number )
23707 -{
23708 -
23709 - yylineno = line_number;
23710 -}
23711 -
23712 -/** Set the input stream. This does not discard the current
23713 - * input buffer.
23714 - * @param in_str A readable stream.
23715 - *
23716 - * @see yy_switch_to_buffer
23717 - */
23718 -void yyset_in (FILE * in_str )
23719 -{
23720 - yyin = in_str ;
23721 -}
23722 -
23723 -void yyset_out (FILE * out_str )
23724 -{
23725 - yyout = out_str ;
23726 -}
23727 -
23728 -int yyget_debug (void)
23729 -{
23730 - return yy_flex_debug;
23731 -}
23732 -
23733 -void yyset_debug (int bdebug )
23734 -{
23735 - yy_flex_debug = bdebug ;
23736 -}
23737 -
23738 -static int yy_init_globals (void)
23739 -{
23740 - /* Initialization is the same as for the non-reentrant scanner.
23741 - * This function is called from yylex_destroy(), so don't allocate here.
23742 - */
23743 -
23744 - (yy_buffer_stack) = 0;
23745 - (yy_buffer_stack_top) = 0;
23746 - (yy_buffer_stack_max) = 0;
23747 - (yy_c_buf_p) = (char *) 0;
23748 - (yy_init) = 0;
23749 - (yy_start) = 0;
23750 -
23751 -/* Defined in main.c */
23752 -#ifdef YY_STDINIT
23753 - yyin = stdin;
23754 - yyout = stdout;
23755 -#else
23756 - yyin = (FILE *) 0;
23757 - yyout = (FILE *) 0;
23758 -#endif
23759 -
23760 - /* For future reference: Set errno on error, since we are called by
23761 - * yylex_init()
23762 - */
23763 - return 0;
23764 -}
23765 -
23766 -/* yylex_destroy is for both reentrant and non-reentrant scanners. */
23767 -int yylex_destroy (void)
23768 -{
23769 -
23770 - /* Pop the buffer stack, destroying each element. */
23771 - while(YY_CURRENT_BUFFER){
23772 - yy_delete_buffer(YY_CURRENT_BUFFER );
23773 - YY_CURRENT_BUFFER_LVALUE = NULL;
23774 - yypop_buffer_state();
23775 - }
23776 -
23777 - /* Destroy the stack itself. */
23778 - yyfree((yy_buffer_stack) );
23779 - (yy_buffer_stack) = NULL;
23780 -
23781 - /* Reset the globals. This is important in a non-reentrant scanner so the next time
23782 - * yylex() is called, initialization will occur. */
23783 - yy_init_globals( );
23784 -
23785 - return 0;
23786 -}
23787 -
23788 -/*
23789 - * Internal utility routines.
23790 - */
23791 -
23792 -#ifndef yytext_ptr
23793 -static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
23794 -{
23795 - register int i;
23796 - for ( i = 0; i < n; ++i )
23797 - s1[i] = s2[i];
23798 -}
23799 -#endif
23800 -
23801 -#ifdef YY_NEED_STRLEN
23802 -static int yy_flex_strlen (yyconst char * s )
23803 -{
23804 - register int n;
23805 - for ( n = 0; s[n]; ++n )
23806 - ;
23807 -
23808 - return n;
23809 -}
23810 -#endif
23811 -
23812 -void *yyalloc (yy_size_t size )
23813 -{
23814 - return (void *) malloc( size );
23815 -}
23816 -
23817 -void *yyrealloc (void * ptr, yy_size_t size )
23818 -{
23819 - /* The cast to (char *) in the following accommodates both
23820 - * implementations that use char* generic pointers, and those
23821 - * that use void* generic pointers. It works with the latter
23822 - * because both ANSI C and C++ allow castless assignment from
23823 - * any pointer type to void*, and deal with argument conversions
23824 - * as though doing an assignment.
23825 - */
23826 - return (void *) realloc( (char *) ptr, size );
23827 -}
23828 -
23829 -void yyfree (void * ptr )
23830 -{
23831 - free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
23832 -}
23833 -
23834 -#define YYTABLES_NAME "yytables"
23835 -
23836 -#line 196 "/d/gcc-4.4.1/gcc-4.4.1/gcc/gengtype-lex.l"
23837 -
23838 -
23839 -
23840 -void
23841 -yybegin (const char *fname)
23842 -{
23843 - yyin = fopen (fname, "r");
23844 - if (yyin == NULL)
23845 - {
23846 - perror (fname);
23847 - exit (1);
23848 - }
23849 - lexer_line.file = fname;
23850 - lexer_line.line = 1;
23851 -}
23852 -
23853 -void
23854 -yyend (void)
23855 -{
23856 - fclose (yyin);
23857 -}
23858 -
23859 --- a/gcc/gengtype-lex.l
23860 +++ b/gcc/gengtype-lex.l
23861 @@ -48,7 +48,7 @@
23862 ID [[:alpha:]_][[:alnum:]_]*
23863 WS [[:space:]]+
23864 HWS [ \t\r\v\f]*
23865 -IWORD short|long|(un)?signed|char|int|HOST_WIDE_INT|HOST_WIDEST_INT|bool|size_t|BOOL_BITFIELD|CPPCHAR_SIGNED_T|ino_t|dev_t
23866 +IWORD short|long|(un)?signed|char|int|HOST_WIDE_INT|HOST_WIDEST_INT|bool|size_t|BOOL_BITFIELD|CPPCHAR_SIGNED_T|ino_t|dev_t|HARD_REG_SET
23867 ITYPE {IWORD}({WS}{IWORD})*
23868 EOID [^[:alnum:]_]
23869
23870 --- a/gcc/haifa-sched.c
23871 +++ b/gcc/haifa-sched.c
23872 @@ -1990,6 +1990,23 @@
23873 SCHED_GROUP_P (insn) = 0;
23874 }
23875
23876 +/* Return true if scheduling INSN will finish current clock cycle. */
23877 +static bool
23878 +insn_finishes_cycle_p (rtx insn)
23879 +{
23880 + if (SCHED_GROUP_P (insn))
23881 + /* After issuing INSN, rest of the sched_group will be forced to issue
23882 + in order. Don't make any plans for the rest of cycle. */
23883 + return true;
23884 +
23885 + /* Finishing the block will, apparently, finish the cycle. */
23886 + if (current_sched_info->insn_finishes_block_p
23887 + && current_sched_info->insn_finishes_block_p (insn))
23888 + return true;
23889 +
23890 + return false;
23891 +}
23892 +
23893 /* The following structure describe an entry of the stack of choices. */
23894 struct choice_entry
23895 {
23896 @@ -2168,7 +2185,10 @@
23897 delay = state_transition (state, insn);
23898 if (delay < 0)
23899 {
23900 - if (state_dead_lock_p (state))
23901 + if (state_dead_lock_p (state)
23902 + || insn_finishes_cycle_p (insn))
23903 + /* We won't issue any more instructions in the next
23904 + choice_state. */
23905 top->rest = 0;
23906 else
23907 top->rest--;
23908 --- a/gcc/hooks.c
23909 +++ b/gcc/hooks.c
23910 @@ -335,3 +335,10 @@
23911 {
23912 return NULL;
23913 }
23914 +
23915 +/* Generic hook that takes a const_tree and returns NULL_TREE. */
23916 +tree
23917 +hook_tree_const_tree_null (const_tree t ATTRIBUTE_UNUSED)
23918 +{
23919 + return NULL;
23920 +}
23921 --- a/gcc/hooks.h
23922 +++ b/gcc/hooks.h
23923 @@ -64,6 +64,8 @@
23924 extern int hook_int_size_t_constcharptr_int_0 (size_t, const char *, int);
23925 extern int hook_int_void_no_regs (void);
23926
23927 +extern tree hook_tree_const_tree_null (const_tree);
23928 +
23929 extern tree hook_tree_tree_tree_null (tree, tree);
23930 extern tree hook_tree_tree_tree_tree_null (tree, tree, tree);
23931 extern tree hook_tree_tree_tree_tree_3rd_identity (tree, tree, tree);
23932 --- a/gcc/incpath.c
23933 +++ b/gcc/incpath.c
23934 @@ -30,6 +30,8 @@
23935 #include "intl.h"
23936 #include "incpath.h"
23937 #include "cppdefault.h"
23938 +#include "flags.h"
23939 +#include "toplev.h"
23940
23941 /* Microsoft Windows does not natively support inodes.
23942 VMS has non-numeric inodes. */
23943 @@ -353,6 +355,24 @@
23944 }
23945 fprintf (stderr, _("End of search list.\n"));
23946 }
23947 +
23948 +#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES
23949 + if (flag_poison_system_directories)
23950 + {
23951 + struct cpp_dir *p;
23952 +
23953 + for (p = heads[QUOTE]; p; p = p->next)
23954 + {
23955 + if ((!strncmp (p->name, "/usr/include", 12))
23956 + || (!strncmp (p->name, "/usr/local/include", 18))
23957 + || (!strncmp (p->name, "/usr/X11R6/include", 18)))
23958 + warning (OPT_Wpoison_system_directories,
23959 + "include location \"%s\" is unsafe for "
23960 + "cross-compilation",
23961 + p->name);
23962 + }
23963 + }
23964 +#endif
23965 }
23966
23967 /* Use given -I paths for #include "..." but not #include <...>, and
23968 --- a/gcc/ira.c
23969 +++ b/gcc/ira.c
23970 @@ -1349,14 +1349,12 @@
23971 return for_each_rtx (&insn, insn_contains_asm_1, NULL);
23972 }
23973
23974 -/* Set up regs_asm_clobbered. */
23975 +/* Add register clobbers from asm statements. */
23976 static void
23977 -compute_regs_asm_clobbered (char *regs_asm_clobbered)
23978 +compute_regs_asm_clobbered (void)
23979 {
23980 basic_block bb;
23981
23982 - memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
23983 -
23984 FOR_EACH_BB (bb)
23985 {
23986 rtx insn;
23987 @@ -1377,7 +1375,7 @@
23988 + hard_regno_nregs[dregno][mode] - 1;
23989
23990 for (i = dregno; i <= end; ++i)
23991 - regs_asm_clobbered[i] = 1;
23992 + SET_HARD_REG_BIT(crtl->asm_clobbers, i);
23993 }
23994 }
23995 }
23996 @@ -1415,7 +1413,8 @@
23997 COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
23998 CLEAR_HARD_REG_SET (eliminable_regset);
23999
24000 - compute_regs_asm_clobbered (regs_asm_clobbered);
24001 + compute_regs_asm_clobbered ();
24002 +
24003 /* Build the regset of all eliminable registers and show we can't
24004 use those that we already know won't be eliminated. */
24005 #ifdef ELIMINABLE_REGS
24006 @@ -1425,7 +1424,7 @@
24007 = (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to)
24008 || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp));
24009
24010 - if (! regs_asm_clobbered[eliminables[i].from])
24011 + if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
24012 {
24013 SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
24014
24015 @@ -1439,7 +1438,7 @@
24016 df_set_regs_ever_live (eliminables[i].from, true);
24017 }
24018 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
24019 - if (! regs_asm_clobbered[HARD_FRAME_POINTER_REGNUM])
24020 + if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
24021 {
24022 SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
24023 if (need_fp)
24024 @@ -1453,7 +1452,7 @@
24025 #endif
24026
24027 #else
24028 - if (! regs_asm_clobbered[FRAME_POINTER_REGNUM])
24029 + if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
24030 {
24031 SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
24032 if (need_fp)
24033 --- a/gcc/ira-costs.c
24034 +++ b/gcc/ira-costs.c
24035 @@ -706,11 +706,11 @@
24036
24037 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
24038 static inline bool
24039 -ok_for_index_p_nonstrict (rtx reg)
24040 +ok_for_index_p_nonstrict (rtx reg, enum machine_mode mode)
24041 {
24042 unsigned regno = REGNO (reg);
24043
24044 - return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
24045 + return regno >= FIRST_PSEUDO_REGISTER || ok_for_index_p_1 (regno, mode);
24046 }
24047
24048 /* A version of regno_ok_for_base_p for use here, when all
24049 @@ -748,7 +748,7 @@
24050 enum reg_class rclass;
24051
24052 if (context == 1)
24053 - rclass = INDEX_REG_CLASS;
24054 + rclass = index_reg_class (mode);
24055 else
24056 rclass = base_reg_class (mode, outer_code, index_code);
24057
24058 @@ -795,7 +795,8 @@
24059 just record registers in any non-constant operands. We
24060 assume here, as well as in the tests below, that all
24061 addresses are in canonical form. */
24062 - else if (INDEX_REG_CLASS == base_reg_class (VOIDmode, PLUS, SCRATCH))
24063 + else if (index_reg_class (mode)
24064 + == base_reg_class (mode, PLUS, SCRATCH))
24065 {
24066 record_address_regs (mode, arg0, context, PLUS, code1, scale);
24067 if (! CONSTANT_P (arg1))
24068 @@ -816,7 +817,7 @@
24069 else if (code0 == REG && code1 == REG
24070 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
24071 && (ok_for_base_p_nonstrict (arg0, mode, PLUS, REG)
24072 - || ok_for_index_p_nonstrict (arg0)))
24073 + || ok_for_index_p_nonstrict (arg0, mode)))
24074 record_address_regs (mode, arg1,
24075 ok_for_base_p_nonstrict (arg0, mode, PLUS, REG)
24076 ? 1 : 0,
24077 @@ -824,7 +825,7 @@
24078 else if (code0 == REG && code1 == REG
24079 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
24080 && (ok_for_base_p_nonstrict (arg1, mode, PLUS, REG)
24081 - || ok_for_index_p_nonstrict (arg1)))
24082 + || ok_for_index_p_nonstrict (arg1, mode)))
24083 record_address_regs (mode, arg0,
24084 ok_for_base_p_nonstrict (arg1, mode, PLUS, REG)
24085 ? 1 : 0,
24086 --- a/gcc/longlong.h
24087 +++ b/gcc/longlong.h
24088 @@ -982,7 +982,7 @@
24089 " or r1,%0" \
24090 : "=r" (q), "=&z" (r) \
24091 : "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16) \
24092 - : "r1", "r2", "r4", "r5", "r6", "pr"); \
24093 + : "r1", "r2", "r4", "r5", "r6", "pr", "t"); \
24094 } while (0)
24095
24096 #define UDIV_TIME 80
24097 --- a/gcc/Makefile.in
24098 +++ b/gcc/Makefile.in
24099 @@ -1249,6 +1249,7 @@
24100 tree-ssa-loop-manip.o \
24101 tree-ssa-loop-niter.o \
24102 tree-ssa-loop-prefetch.o \
24103 + tree-ssa-loop-promote.o \
24104 tree-ssa-loop-unswitch.o \
24105 tree-ssa-loop.o \
24106 tree-ssa-math-opts.o \
24107 @@ -1258,6 +1259,7 @@
24108 tree-ssa-pre.o \
24109 tree-ssa-propagate.o \
24110 tree-ssa-reassoc.o \
24111 + tree-ssa-remove-local-statics.o \
24112 tree-ssa-sccvn.o \
24113 tree-ssa-sink.o \
24114 tree-ssa-structalias.o \
24115 @@ -1674,7 +1676,7 @@
24116 $(MACHMODE_H) $(FPBIT) $(DPBIT) $(TPBIT) $(LIB2ADD) \
24117 $(LIB2ADD_ST) $(LIB2ADDEH) $(srcdir)/emutls.c gcov-iov.h $(SFP_MACHINE)
24118
24119 -libgcc.mvars: config.status Makefile $(LIB2ADD) $(LIB2ADD_ST) specs \
24120 +libgcc.mvars: config.status Makefile $(LIB2ADD) $(LIB2ADD_ST) specs $(tmake_file) \
24121 xgcc$(exeext)
24122 : > tmp-libgcc.mvars
24123 echo LIB1ASMFUNCS = '$(LIB1ASMFUNCS)' >> tmp-libgcc.mvars
24124 @@ -1728,7 +1730,7 @@
24125 # driver program needs to select the library directory based on the
24126 # switches.
24127 multilib.h: s-mlib; @true
24128 -s-mlib: $(srcdir)/genmultilib Makefile
24129 +s-mlib: $(srcdir)/genmultilib Makefile $(tmakefile)
24130 if test @enable_multilib@ = yes \
24131 || test -n "$(MULTILIB_OSDIRNAMES)"; then \
24132 $(SHELL) $(srcdir)/genmultilib \
24133 @@ -1816,7 +1818,7 @@
24134
24135 incpath.o: incpath.c incpath.h $(CONFIG_H) $(SYSTEM_H) $(CPPLIB_H) \
24136 intl.h prefix.h coretypes.h $(TM_H) cppdefault.h $(TARGET_H) \
24137 - $(MACHMODE_H)
24138 + $(MACHMODE_H) $(FLAGS_H) toplev.h
24139
24140 c-decl.o : c-decl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \
24141 $(RTL_H) $(C_TREE_H) $(GGC_H) $(TARGET_H) $(FLAGS_H) $(FUNCTION_H) output.h \
24142 @@ -1900,7 +1902,7 @@
24143 $(TREE_H) $(C_PRAGMA_H) $(FLAGS_H) $(TOPLEV_H) langhooks.h \
24144 $(TREE_INLINE_H) $(DIAGNOSTIC_H) intl.h debug.h $(C_COMMON_H) \
24145 opts.h options.h $(MKDEPS_H) incpath.h cppdefault.h $(TARGET_H) \
24146 - $(TM_P_H) $(VARRAY_H)
24147 + $(TM_P_H) $(VARRAY_H) $(C_TREE_H)
24148 $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) \
24149 $< $(OUTPUT_OPTION) @TARGET_SYSTEM_ROOT_DEFINE@
24150
24151 @@ -2176,6 +2178,9 @@
24152 alloc-pool.h $(BASIC_BLOCK_H) $(BITMAP_H) $(HASHTAB_H) $(GIMPLE_H) \
24153 $(TREE_INLINE_H) tree-iterator.h tree-ssa-sccvn.h $(PARAMS_H) \
24154 $(DBGCNT_H)
24155 +tree-ssa-remove-local-statics.o: tree-ssa-remove-local-statics.c \
24156 + coretypes.h $(CONFIG_H) $(SYSTEM_H) $(BASIC_BLOCK_H) tree.h tree-pass.h \
24157 + $(TM_H) $(HASHTAB_H) $(BASIC_BLOCK_H)
24158 tree-ssa-sccvn.o : tree-ssa-sccvn.c $(TREE_FLOW_H) $(CONFIG_H) \
24159 $(SYSTEM_H) $(TREE_H) $(GGC_H) $(DIAGNOSTIC_H) $(TIMEVAR_H) $(FIBHEAP_H) \
24160 $(TM_H) coretypes.h $(TREE_DUMP_H) tree-pass.h $(FLAGS_H) $(CFGLOOP_H) \
24161 @@ -2271,6 +2276,12 @@
24162 $(CFGLOOP_H) $(PARAMS_H) langhooks.h $(BASIC_BLOCK_H) hard-reg-set.h \
24163 tree-chrec.h $(TOPLEV_H) langhooks.h $(TREE_INLINE_H) $(TREE_DATA_REF_H) \
24164 $(OPTABS_H)
24165 +tree-ssa-loop-promote.o: tree-ssa-loop-promote.c \
24166 + coretypes.h $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TOPLEV_H) \
24167 + $(RTL_H) $(TM_P_H) hard-reg-set.h $(OBSTACK_H) $(BASIC_BLOCK_H) \
24168 + pointer-set.h intl.h $(TREE_H) $(GIMPLE_H) $(HASHTAB_H) $(DIAGNOSTIC_H) \
24169 + $(TREE_FLOW_H) $(TREE_DUMP_H) $(CFGLOOP_H) $(FLAGS_H) $(TIMEVAR_H) \
24170 + tree-pass.h $(TM_H)
24171 tree-predcom.o: tree-predcom.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_P_H) \
24172 $(CFGLOOP_H) $(TREE_FLOW_H) $(GGC_H) $(TREE_DATA_REF_H) $(SCEV_H) \
24173 $(PARAMS_H) $(DIAGNOSTIC_H) tree-pass.h $(TM_H) coretypes.h tree-affine.h \
24174 @@ -2865,7 +2876,7 @@
24175 $(RTL_H) $(REAL_H) $(FLAGS_H) $(EXPR_H) $(OPTABS_H) reload.h $(REGS_H) \
24176 hard-reg-set.h insn-config.h $(BASIC_BLOCK_H) $(RECOG_H) output.h \
24177 $(FUNCTION_H) $(TOPLEV_H) cselib.h $(TM_P_H) except.h $(TREE_H) $(MACHMODE_H) \
24178 - $(OBSTACK_H) $(TIMEVAR_H) tree-pass.h $(DF_H) $(DBGCNT_H)
24179 + $(OBSTACK_H) $(TIMEVAR_H) tree-pass.h addresses.h $(DF_H) $(DBGCNT_H)
24180 postreload-gcse.o : postreload-gcse.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
24181 $(TM_H) $(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
24182 $(RECOG_H) $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) output.h $(TOPLEV_H) \
24183 @@ -3582,7 +3593,7 @@
24184 # be rebuilt.
24185
24186 # Build the include directories.
24187 -stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) $(UNWIND_H) fixinc_list
24188 +stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) $(UNWIND_H)
24189 # Copy in the headers provided with gcc.
24190 # The sed command gets just the last file name component;
24191 # this is necessary because VPATH could add a dirname.
24192 @@ -3601,21 +3612,23 @@
24193 done
24194 rm -f include/unwind.h
24195 cp $(UNWIND_H) include/unwind.h
24196 - set -e; for ml in `cat fixinc_list`; do \
24197 - sysroot_headers_suffix=`echo $${ml} | sed -e 's/;.*$$//'`; \
24198 - multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \
24199 - fix_dir=include-fixed$${multi_dir}; \
24200 - if $(LIMITS_H_TEST) ; then \
24201 - cat $(srcdir)/limitx.h $(srcdir)/glimits.h $(srcdir)/limity.h > tmp-xlimits.h; \
24202 - else \
24203 - cat $(srcdir)/glimits.h > tmp-xlimits.h; \
24204 - fi; \
24205 - $(mkinstalldirs) $${fix_dir}; \
24206 - chmod a+rx $${fix_dir} || true; \
24207 - rm -f $${fix_dir}/limits.h; \
24208 - mv tmp-xlimits.h $${fix_dir}/limits.h; \
24209 - chmod a+r $${fix_dir}/limits.h; \
24210 - done
24211 + set -e; if [ -f fixinc_list ] ; then \
24212 + for ml in `cat fixinc_list`; do \
24213 + sysroot_headers_suffix=`echo $${ml} | sed -e 's/;.*$$//'`; \
24214 + multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \
24215 + fix_dir=include-fixed$${multi_dir}; \
24216 + if $(LIMITS_H_TEST) ; then \
24217 + cat $(srcdir)/limitx.h $(srcdir)/glimits.h $(srcdir)/limity.h > tmp-xlimits.h; \
24218 + else \
24219 + cat $(srcdir)/glimits.h > tmp-xlimits.h; \
24220 + fi; \
24221 + $(mkinstalldirs) $${fix_dir}; \
24222 + chmod a+rx $${fix_dir} || true; \
24223 + rm -f $${fix_dir}/limits.h; \
24224 + mv tmp-xlimits.h $${fix_dir}/limits.h; \
24225 + chmod a+r $${fix_dir}/limits.h; \
24226 + done; \
24227 + fi
24228 # Install the README
24229 rm -f include-fixed/README
24230 cp $(srcdir)/../fixincludes/README-fixinc include-fixed/README
24231 @@ -4340,16 +4353,18 @@
24232
24233 # Install supporting files for fixincludes to be run later.
24234 install-mkheaders: stmp-int-hdrs $(STMP_FIXPROTO) install-itoolsdirs \
24235 - macro_list fixinc_list
24236 + macro_list
24237 $(INSTALL_DATA) $(srcdir)/gsyslimits.h \
24238 $(DESTDIR)$(itoolsdatadir)/gsyslimits.h
24239 $(INSTALL_DATA) macro_list $(DESTDIR)$(itoolsdatadir)/macro_list
24240 - $(INSTALL_DATA) fixinc_list $(DESTDIR)$(itoolsdatadir)/fixinc_list
24241 - set -e; for ml in `cat fixinc_list`; do \
24242 - multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \
24243 - $(mkinstalldirs) $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}; \
24244 - $(INSTALL_DATA) include-fixed$${multidir}/limits.h $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}/limits.h; \
24245 - done
24246 + set -e; if [ -f fixinc_list ] ; then \
24247 + $(INSTALL_DATA) fixinc_list $(DESTDIR)$(itoolsdatadir)/fixinc_list; \
24248 + for ml in `cat fixinc_list`; do \
24249 + multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \
24250 + $(mkinstalldirs) $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}; \
24251 + $(INSTALL_DATA) include-fixed$${multidir}/limits.h $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}/limits.h; \
24252 + done; \
24253 + fi
24254 $(INSTALL_SCRIPT) $(srcdir)/../mkinstalldirs \
24255 $(DESTDIR)$(itoolsdir)/mkinstalldirs ; \
24256 if [ x$(STMP_FIXPROTO) != x ] ; then \
24257 --- a/gcc/modulo-sched.c
24258 +++ b/gcc/modulo-sched.c
24259 @@ -270,6 +270,7 @@
24260 NULL,
24261 sms_print_insn,
24262 NULL,
24263 + NULL, /* insn_finishes_block_p */
24264 NULL, NULL,
24265 NULL, NULL,
24266 0, 0,
24267 --- a/gcc/optabs.c
24268 +++ b/gcc/optabs.c
24269 @@ -3300,7 +3300,8 @@
24270 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
24271 || unoptab == popcount_optab || unoptab == parity_optab)
24272 outmode
24273 - = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
24274 + = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
24275 + optab_libfunc (unoptab, mode)));
24276
24277 start_sequence ();
24278
24279 @@ -4357,10 +4358,12 @@
24280 mode != VOIDmode;
24281 mode = GET_MODE_WIDER_MODE (mode))
24282 {
24283 - if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
24284 + if (code_to_optab[comparison]
24285 + && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
24286 break;
24287
24288 - if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
24289 + if (code_to_optab[swapped]
24290 + && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
24291 {
24292 rtx tmp;
24293 tmp = x; x = y; y = tmp;
24294 @@ -4368,7 +4371,8 @@
24295 break;
24296 }
24297
24298 - if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
24299 + if (code_to_optab[reversed]
24300 + && (libfunc = optab_libfunc (code_to_optab[reversed], mode))
24301 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
24302 {
24303 comparison = reversed;
24304 --- a/gcc/opts.c
24305 +++ b/gcc/opts.c
24306 @@ -904,7 +904,7 @@
24307 flag_tree_vrp = opt2;
24308 flag_tree_builtin_call_dce = opt2;
24309 flag_tree_pre = opt2;
24310 - flag_tree_switch_conversion = 1;
24311 + flag_tree_switch_conversion = opt2;
24312 flag_ipa_cp = opt2;
24313
24314 /* Allow more virtual operators to increase alias precision. */
24315 @@ -928,6 +928,7 @@
24316 flag_gcse_after_reload = opt3;
24317 flag_tree_vectorize = opt3;
24318 flag_ipa_cp_clone = opt3;
24319 + flag_tree_pre_partial_partial = opt3;
24320 if (flag_ipa_cp_clone)
24321 flag_ipa_cp = 1;
24322
24323 @@ -951,10 +952,13 @@
24324 being declared inline. */
24325 flag_inline_functions = 1;
24326
24327 - /* Basic optimization options. */
24328 - optimize_size = 1;
24329 + /* Basic optimization options at -Os are almost the same as -O2. The
24330 + only difference is that we disable PRE, because it sometimes still
24331 + increases code size. If the user want to run PRE with -Os, he/she
24332 + will have to indicate so explicitly. */
24333 if (optimize > 2)
24334 optimize = 2;
24335 + flag_tree_pre = 0;
24336
24337 /* We want to crossjump as much as possible. */
24338 set_param_value ("min-crossjump-insns", 1);
24339 @@ -2060,6 +2064,10 @@
24340 /* These are no-ops, preserved for backward compatibility. */
24341 break;
24342
24343 + case OPT_feglibc_:
24344 + /* This is a no-op at the moment. */
24345 + break;
24346 +
24347 default:
24348 /* If the flag was handled in a standard way, assume the lack of
24349 processing here is intentional. */
24350 --- a/gcc/passes.c
24351 +++ b/gcc/passes.c
24352 @@ -591,6 +591,7 @@
24353 NEXT_PASS (pass_rename_ssa_copies);
24354 NEXT_PASS (pass_complete_unrolli);
24355 NEXT_PASS (pass_ccp);
24356 + NEXT_PASS (pass_promote_indices);
24357 NEXT_PASS (pass_forwprop);
24358 /* Ideally the function call conditional
24359 dead code elimination phase can be delayed
24360 @@ -605,6 +606,7 @@
24361 alias information also rewrites no longer addressed
24362 locals into SSA form if possible. */
24363 NEXT_PASS (pass_build_alias);
24364 + NEXT_PASS (pass_remove_local_statics);
24365 NEXT_PASS (pass_return_slot);
24366 NEXT_PASS (pass_phiprop);
24367 NEXT_PASS (pass_fre);
24368 --- a/gcc/pointer-set.c
24369 +++ b/gcc/pointer-set.c
24370 @@ -181,6 +181,23 @@
24371 break;
24372 }
24373
24374 +/* Return the number of elements in PSET. */
24375 +
24376 +size_t
24377 +pointer_set_n_elements (struct pointer_set_t *pset)
24378 +{
24379 + return pset->n_elements;
24380 +}
24381 +
24382 +/* Remove all entries from PSET. */
24383 +
24384 +void
24385 +pointer_set_clear (struct pointer_set_t *pset)
24386 +{
24387 + pset->n_elements = 0;
24388 + memset (pset->slots, 0, sizeof (pset->slots[0]) * pset->n_slots);
24389 +}
24390 +
24391 \f
24392 /* A pointer map is represented the same way as a pointer_set, so
24393 the hash code is based on the address of the key, rather than
24394 @@ -301,3 +318,20 @@
24395 if (pmap->keys[i] && !fn (pmap->keys[i], &pmap->values[i], data))
24396 break;
24397 }
24398 +
24399 +/* Return the number of elements in PMAP. */
24400 +
24401 +size_t
24402 +pointer_map_n_elements (struct pointer_map_t *pmap)
24403 +{
24404 + return pmap->n_elements;
24405 +}
24406 +
24407 +/* Remove all entries from PMAP. */
24408 +
24409 +void pointer_map_clear (struct pointer_map_t *pmap)
24410 +{
24411 + pmap->n_elements = 0;
24412 + memset (pmap->keys, 0, sizeof (pmap->keys[0]) * pmap->n_slots);
24413 + memset (pmap->values, 0, sizeof (pmap->values[0]) * pmap->n_slots);
24414 +}
24415 --- a/gcc/pointer-set.h
24416 +++ b/gcc/pointer-set.h
24417 @@ -29,6 +29,8 @@
24418 void pointer_set_traverse (const struct pointer_set_t *,
24419 bool (*) (const void *, void *),
24420 void *);
24421 +size_t pointer_set_n_elements (struct pointer_set_t *);
24422 +void pointer_set_clear (struct pointer_set_t *);
24423
24424 struct pointer_map_t;
24425 struct pointer_map_t *pointer_map_create (void);
24426 @@ -38,5 +40,7 @@
24427 void **pointer_map_insert (struct pointer_map_t *pmap, const void *p);
24428 void pointer_map_traverse (const struct pointer_map_t *,
24429 bool (*) (const void *, void **, void *), void *);
24430 +size_t pointer_map_n_elements (struct pointer_map_t *);
24431 +void pointer_map_clear (struct pointer_map_t *);
24432
24433 #endif /* POINTER_SET_H */
24434 --- a/gcc/postreload.c
24435 +++ b/gcc/postreload.c
24436 @@ -46,6 +46,7 @@
24437 #include "tree.h"
24438 #include "timevar.h"
24439 #include "tree-pass.h"
24440 +#include "addresses.h"
24441 #include "df.h"
24442 #include "dbgcnt.h"
24443
24444 @@ -708,17 +709,19 @@
24445 int last_label_ruid;
24446 int min_labelno, n_labels;
24447 HARD_REG_SET ever_live_at_start, *label_live;
24448 + enum reg_class index_regs;
24449
24450 /* If reg+reg can be used in offsetable memory addresses, the main chunk of
24451 reload has already used it where appropriate, so there is no use in
24452 trying to generate it now. */
24453 - if (double_reg_address_ok && INDEX_REG_CLASS != NO_REGS)
24454 + index_regs = index_reg_class (VOIDmode);
24455 + if (double_reg_address_ok && index_regs != NO_REGS)
24456 return;
24457
24458 /* To avoid wasting too much time later searching for an index register,
24459 determine the minimum and maximum index register numbers. */
24460 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
24461 - if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
24462 + if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], r))
24463 {
24464 if (first_index_reg == -1)
24465 first_index_reg = r;
24466 @@ -826,8 +829,8 @@
24467 substitute uses of REG (typically in MEMs) with.
24468 First check REG and BASE for being index registers;
24469 we can use them even if they are not dead. */
24470 - if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
24471 - || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
24472 + if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], regno)
24473 + || TEST_HARD_REG_BIT (reg_class_contents[index_regs],
24474 REGNO (base)))
24475 {
24476 const_reg = reg;
24477 @@ -841,8 +844,7 @@
24478 two registers. */
24479 for (i = first_index_reg; i <= last_index_reg; i++)
24480 {
24481 - if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
24482 - i)
24483 + if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], i)
24484 && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
24485 && reg_state[i].store_ruid <= reg_state[regno].use_ruid
24486 && hard_regno_nregs[i][GET_MODE (reg)] == 1)
24487 --- a/gcc/real.c
24488 +++ b/gcc/real.c
24489 @@ -4513,6 +4513,167 @@
24490 false
24491 };
24492 \f
24493 +/* Encode half-precision floats. This routine is used both for the IEEE
24494 + ARM alternative encodings. */
24495 +static void
24496 +encode_ieee_half (const struct real_format *fmt, long *buf,
24497 + const REAL_VALUE_TYPE *r)
24498 +{
24499 + unsigned long image, sig, exp;
24500 + unsigned long sign = r->sign;
24501 + bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0;
24502 +
24503 + image = sign << 15;
24504 + sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 11)) & 0x3ff;
24505 +
24506 + switch (r->cl)
24507 + {
24508 + case rvc_zero:
24509 + break;
24510 +
24511 + case rvc_inf:
24512 + if (fmt->has_inf)
24513 + image |= 31 << 10;
24514 + else
24515 + image |= 0x7fff;
24516 + break;
24517 +
24518 + case rvc_nan:
24519 + if (fmt->has_nans)
24520 + {
24521 + if (r->canonical)
24522 + sig = (fmt->canonical_nan_lsbs_set ? (1 << 9) - 1 : 0);
24523 + if (r->signalling == fmt->qnan_msb_set)
24524 + sig &= ~(1 << 9);
24525 + else
24526 + sig |= 1 << 9;
24527 + if (sig == 0)
24528 + sig = 1 << 8;
24529 +
24530 + image |= 31 << 10;
24531 + image |= sig;
24532 + }
24533 + else
24534 + image |= 0x3ff;
24535 + break;
24536 +
24537 + case rvc_normal:
24538 + /* Recall that IEEE numbers are interpreted as 1.F x 2**exp,
24539 + whereas the intermediate representation is 0.F x 2**exp.
24540 + Which means we're off by one. */
24541 + if (denormal)
24542 + exp = 0;
24543 + else
24544 + exp = REAL_EXP (r) + 15 - 1;
24545 + image |= exp << 10;
24546 + image |= sig;
24547 + break;
24548 +
24549 + default:
24550 + gcc_unreachable ();
24551 + }
24552 +
24553 + buf[0] = image;
24554 +}
24555 +
24556 +/* Decode half-precision floats. This routine is used both for the IEEE
24557 + ARM alternative encodings. */
24558 +static void
24559 +decode_ieee_half (const struct real_format *fmt, REAL_VALUE_TYPE *r,
24560 + const long *buf)
24561 +{
24562 + unsigned long image = buf[0] & 0xffff;
24563 + bool sign = (image >> 15) & 1;
24564 + int exp = (image >> 10) & 0x1f;
24565 +
24566 + memset (r, 0, sizeof (*r));
24567 + image <<= HOST_BITS_PER_LONG - 11;
24568 + image &= ~SIG_MSB;
24569 +
24570 + if (exp == 0)
24571 + {
24572 + if (image && fmt->has_denorm)
24573 + {
24574 + r->cl = rvc_normal;
24575 + r->sign = sign;
24576 + SET_REAL_EXP (r, -14);
24577 + r->sig[SIGSZ-1] = image << 1;
24578 + normalize (r);
24579 + }
24580 + else if (fmt->has_signed_zero)
24581 + r->sign = sign;
24582 + }
24583 + else if (exp == 31 && (fmt->has_nans || fmt->has_inf))
24584 + {
24585 + if (image)
24586 + {
24587 + r->cl = rvc_nan;
24588 + r->sign = sign;
24589 + r->signalling = (((image >> (HOST_BITS_PER_LONG - 2)) & 1)
24590 + ^ fmt->qnan_msb_set);
24591 + r->sig[SIGSZ-1] = image;
24592 + }
24593 + else
24594 + {
24595 + r->cl = rvc_inf;
24596 + r->sign = sign;
24597 + }
24598 + }
24599 + else
24600 + {
24601 + r->cl = rvc_normal;
24602 + r->sign = sign;
24603 + SET_REAL_EXP (r, exp - 15 + 1);
24604 + r->sig[SIGSZ-1] = image | SIG_MSB;
24605 + }
24606 +}
24607 +
24608 +/* Half-precision format, as specified in IEEE 754R. */
24609 +const struct real_format ieee_half_format =
24610 + {
24611 + encode_ieee_half,
24612 + decode_ieee_half,
24613 + 2,
24614 + 11,
24615 + 11,
24616 + -13,
24617 + 16,
24618 + 15,
24619 + 15,
24620 + false,
24621 + true,
24622 + true,
24623 + true,
24624 + true,
24625 + true,
24626 + true,
24627 + false
24628 + };
24629 +
24630 +/* ARM's alternative half-precision format, similar to IEEE but with
24631 + no reserved exponent value for NaNs and infinities; rather, it just
24632 + extends the range of exponents by one. */
24633 +const struct real_format arm_half_format =
24634 + {
24635 + encode_ieee_half,
24636 + decode_ieee_half,
24637 + 2,
24638 + 11,
24639 + 11,
24640 + -13,
24641 + 17,
24642 + 15,
24643 + 15,
24644 + false,
24645 + true,
24646 + false,
24647 + false,
24648 + true,
24649 + true,
24650 + false,
24651 + false
24652 + };
24653 +\f
24654 /* A synthetic "format" for internal arithmetic. It's the size of the
24655 internal significand minus the two bits needed for proper rounding.
24656 The encode and decode routines exist only to satisfy our paranoia
24657 --- a/gcc/real.h
24658 +++ b/gcc/real.h
24659 @@ -304,6 +304,8 @@
24660 extern const struct real_format decimal_single_format;
24661 extern const struct real_format decimal_double_format;
24662 extern const struct real_format decimal_quad_format;
24663 +extern const struct real_format ieee_half_format;
24664 +extern const struct real_format arm_half_format;
24665
24666
24667 /* ====================================================================== */
24668 --- a/gcc/regrename.c
24669 +++ b/gcc/regrename.c
24670 @@ -567,14 +567,14 @@
24671 int index_op;
24672 unsigned regno0 = REGNO (op0), regno1 = REGNO (op1);
24673
24674 - if (REGNO_OK_FOR_INDEX_P (regno1)
24675 + if (regno_ok_for_index_p (regno1, mode)
24676 && regno_ok_for_base_p (regno0, mode, PLUS, REG))
24677 index_op = 1;
24678 - else if (REGNO_OK_FOR_INDEX_P (regno0)
24679 + else if (regno_ok_for_index_p (regno0, mode)
24680 && regno_ok_for_base_p (regno1, mode, PLUS, REG))
24681 index_op = 0;
24682 else if (regno_ok_for_base_p (regno0, mode, PLUS, REG)
24683 - || REGNO_OK_FOR_INDEX_P (regno1))
24684 + || regno_ok_for_index_p (regno1, mode))
24685 index_op = 1;
24686 else if (regno_ok_for_base_p (regno1, mode, PLUS, REG))
24687 index_op = 0;
24688 @@ -599,7 +599,7 @@
24689 }
24690
24691 if (locI)
24692 - scan_rtx_address (insn, locI, INDEX_REG_CLASS, action, mode);
24693 + scan_rtx_address (insn, locI, index_reg_class (mode), action, mode);
24694 if (locB)
24695 scan_rtx_address (insn, locB, base_reg_class (mode, PLUS, index_code),
24696 action, mode);
24697 @@ -1488,14 +1488,14 @@
24698 int index_op;
24699 unsigned regno0 = REGNO (op0), regno1 = REGNO (op1);
24700
24701 - if (REGNO_OK_FOR_INDEX_P (regno1)
24702 + if (regno_ok_for_index_p (regno1, mode)
24703 && regno_ok_for_base_p (regno0, mode, PLUS, REG))
24704 index_op = 1;
24705 - else if (REGNO_OK_FOR_INDEX_P (regno0)
24706 + else if (regno_ok_for_index_p (regno0, mode)
24707 && regno_ok_for_base_p (regno1, mode, PLUS, REG))
24708 index_op = 0;
24709 else if (regno_ok_for_base_p (regno0, mode, PLUS, REG)
24710 - || REGNO_OK_FOR_INDEX_P (regno1))
24711 + || regno_ok_for_index_p (regno1, mode))
24712 index_op = 1;
24713 else if (regno_ok_for_base_p (regno1, mode, PLUS, REG))
24714 index_op = 0;
24715 @@ -1520,8 +1520,8 @@
24716 }
24717
24718 if (locI)
24719 - changed |= replace_oldest_value_addr (locI, INDEX_REG_CLASS, mode,
24720 - insn, vd);
24721 + changed |= replace_oldest_value_addr (locI, index_reg_class (mode),
24722 + mode, insn, vd);
24723 if (locB)
24724 changed |= replace_oldest_value_addr (locB,
24725 base_reg_class (mode, PLUS,
24726 --- a/gcc/reload.c
24727 +++ b/gcc/reload.c
24728 @@ -5046,7 +5046,7 @@
24729 loc = &XEXP (*loc, 0);
24730 }
24731
24732 - if (double_reg_address_ok)
24733 + if (double_reg_address_ok && index_reg_class (mode) != NO_REGS)
24734 {
24735 /* Unshare the sum as well. */
24736 *loc = ad = copy_rtx (ad);
24737 @@ -5054,8 +5054,8 @@
24738 /* Reload the displacement into an index reg.
24739 We assume the frame pointer or arg pointer is a base reg. */
24740 find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1),
24741 - INDEX_REG_CLASS, GET_MODE (ad), opnum,
24742 - type, ind_levels);
24743 + index_reg_class (mode), GET_MODE (ad),
24744 + opnum, type, ind_levels);
24745 return 0;
24746 }
24747 else
24748 @@ -5448,13 +5448,13 @@
24749 #define REG_OK_FOR_CONTEXT(CONTEXT, REGNO, MODE, OUTER, INDEX) \
24750 ((CONTEXT) == 0 \
24751 ? regno_ok_for_base_p (REGNO, MODE, OUTER, INDEX) \
24752 - : REGNO_OK_FOR_INDEX_P (REGNO))
24753 + : regno_ok_for_index_p (REGNO, MODE))
24754
24755 enum reg_class context_reg_class;
24756 RTX_CODE code = GET_CODE (x);
24757
24758 if (context == 1)
24759 - context_reg_class = INDEX_REG_CLASS;
24760 + context_reg_class = index_reg_class (mode);
24761 else
24762 context_reg_class = base_reg_class (mode, outer_code, index_code);
24763
24764 @@ -5546,17 +5546,17 @@
24765
24766 else if (code0 == REG && code1 == REG)
24767 {
24768 - if (REGNO_OK_FOR_INDEX_P (REGNO (op1))
24769 + if (regno_ok_for_index_p (REGNO (op1), mode)
24770 && regno_ok_for_base_p (REGNO (op0), mode, PLUS, REG))
24771 return 0;
24772 - else if (REGNO_OK_FOR_INDEX_P (REGNO (op0))
24773 + else if (regno_ok_for_index_p (REGNO (op0), mode)
24774 && regno_ok_for_base_p (REGNO (op1), mode, PLUS, REG))
24775 return 0;
24776 else if (regno_ok_for_base_p (REGNO (op0), mode, PLUS, REG))
24777 find_reloads_address_1 (mode, orig_op1, 1, PLUS, SCRATCH,
24778 &XEXP (x, 1), opnum, type, ind_levels,
24779 insn);
24780 - else if (REGNO_OK_FOR_INDEX_P (REGNO (op1)))
24781 + else if (regno_ok_for_index_p (REGNO (op1), mode))
24782 find_reloads_address_1 (mode, orig_op0, 0, PLUS, REG,
24783 &XEXP (x, 0), opnum, type, ind_levels,
24784 insn);
24785 @@ -5564,7 +5564,7 @@
24786 find_reloads_address_1 (mode, orig_op0, 1, PLUS, SCRATCH,
24787 &XEXP (x, 0), opnum, type, ind_levels,
24788 insn);
24789 - else if (REGNO_OK_FOR_INDEX_P (REGNO (op0)))
24790 + else if (regno_ok_for_index_p (REGNO (op0), mode))
24791 find_reloads_address_1 (mode, orig_op1, 0, PLUS, REG,
24792 &XEXP (x, 1), opnum, type, ind_levels,
24793 insn);
24794 @@ -5634,7 +5634,7 @@
24795 need to live longer than a TYPE reload normally would, so be
24796 conservative and class it as RELOAD_OTHER. */
24797 if ((REG_P (XEXP (op1, 1))
24798 - && !REGNO_OK_FOR_INDEX_P (REGNO (XEXP (op1, 1))))
24799 + && !regno_ok_for_index_p (REGNO (XEXP (op1, 1)), mode))
24800 || GET_CODE (XEXP (op1, 1)) == PLUS)
24801 find_reloads_address_1 (mode, XEXP (op1, 1), 1, code, SCRATCH,
24802 &XEXP (op1, 1), opnum, RELOAD_OTHER,
24803 @@ -6128,18 +6128,26 @@
24804 /* For some processors an address may be valid in the
24805 original mode but not in a smaller mode. For
24806 example, ARM accepts a scaled index register in
24807 - SImode but not in HImode. Similarly, the address may
24808 - have been valid before the subreg offset was added,
24809 - but not afterwards. find_reloads_address
24810 - assumes that we pass it a valid address, and doesn't
24811 - force a reload. This will probably be fine if
24812 - find_reloads_address finds some reloads. But if it
24813 - doesn't find any, then we may have just converted a
24814 - valid address into an invalid one. Check for that
24815 - here. */
24816 + SImode but not in HImode. Note that this is only
24817 + a problem if the address in reg_equiv_mem is already
24818 + invalid in the new mode; other cases would be fixed
24819 + by find_reloads_address as usual.
24820 +
24821 + ??? We attempt to handle such cases here by doing an
24822 + additional reload of the full address after the
24823 + usual processing by find_reloads_address. Note that
24824 + this may not work in the general case, but it seems
24825 + to cover the cases where this situation currently
24826 + occurs. A more general fix might be to reload the
24827 + *value* instead of the address, but this would not
24828 + be expected by the callers of this routine as-is.
24829 +
24830 + If find_reloads_address already completed replaced
24831 + the address, there is nothing further to do. */
24832 if (reloaded == 0
24833 - && !strict_memory_address_p (GET_MODE (tem),
24834 - XEXP (tem, 0)))
24835 + && reg_equiv_mem[regno] != 0
24836 + && !strict_memory_address_p (GET_MODE (x),
24837 + XEXP (reg_equiv_mem[regno], 0)))
24838 push_reload (XEXP (tem, 0), NULL_RTX, &XEXP (tem, 0), (rtx*) 0,
24839 base_reg_class (GET_MODE (tem), MEM, SCRATCH),
24840 GET_MODE (XEXP (tem, 0)), VOIDmode, 0, 0,
24841 --- a/gcc/rtlanal.c
24842 +++ b/gcc/rtlanal.c
24843 @@ -2913,62 +2913,78 @@
24844 commutative_operand_precedence (rtx op)
24845 {
24846 enum rtx_code code = GET_CODE (op);
24847 + int value;
24848
24849 /* Constants always come the second operand. Prefer "nice" constants. */
24850 if (code == CONST_INT)
24851 - return -8;
24852 - if (code == CONST_DOUBLE)
24853 - return -7;
24854 - if (code == CONST_FIXED)
24855 - return -7;
24856 - op = avoid_constant_pool_reference (op);
24857 - code = GET_CODE (op);
24858 -
24859 - switch (GET_RTX_CLASS (code))
24860 - {
24861 - case RTX_CONST_OBJ:
24862 - if (code == CONST_INT)
24863 - return -6;
24864 - if (code == CONST_DOUBLE)
24865 - return -5;
24866 - if (code == CONST_FIXED)
24867 - return -5;
24868 - return -4;
24869 -
24870 - case RTX_EXTRA:
24871 - /* SUBREGs of objects should come second. */
24872 - if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
24873 - return -3;
24874 - return 0;
24875 + value = -8;
24876 + else if (code == CONST_DOUBLE)
24877 + value = -7;
24878 + else if (code == CONST_FIXED)
24879 + value = -7;
24880 + else
24881 + {
24882 + op = avoid_constant_pool_reference (op);
24883 + code = GET_CODE (op);
24884 +
24885 + switch (GET_RTX_CLASS (code))
24886 + {
24887 + case RTX_CONST_OBJ:
24888 + if (code == CONST_INT)
24889 + value = -6;
24890 + else if (code == CONST_DOUBLE)
24891 + value = -5;
24892 + else if (code == CONST_FIXED)
24893 + value = -5;
24894 + else
24895 + value = -4;
24896 + break;
24897 +
24898 + case RTX_EXTRA:
24899 + /* SUBREGs of objects should come second. */
24900 + if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
24901 + value = -3;
24902 + else
24903 + value = 0;
24904 + break;
24905 +
24906 + case RTX_OBJ:
24907 + /* Complex expressions should be the first, so decrease priority
24908 + of objects. */
24909 + value = -1;
24910 + break;
24911
24912 - case RTX_OBJ:
24913 - /* Complex expressions should be the first, so decrease priority
24914 - of objects. Prefer pointer objects over non pointer objects. */
24915 - if ((REG_P (op) && REG_POINTER (op))
24916 - || (MEM_P (op) && MEM_POINTER (op)))
24917 - return -1;
24918 - return -2;
24919 -
24920 - case RTX_COMM_ARITH:
24921 - /* Prefer operands that are themselves commutative to be first.
24922 - This helps to make things linear. In particular,
24923 - (and (and (reg) (reg)) (not (reg))) is canonical. */
24924 - return 4;
24925 -
24926 - case RTX_BIN_ARITH:
24927 - /* If only one operand is a binary expression, it will be the first
24928 - operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
24929 - is canonical, although it will usually be further simplified. */
24930 - return 2;
24931 + case RTX_COMM_ARITH:
24932 + /* Prefer operands that are themselves commutative to be first.
24933 + This helps to make things linear. In particular,
24934 + (and (and (reg) (reg)) (not (reg))) is canonical. */
24935 + value = 4;
24936 + break;
24937 +
24938 + case RTX_BIN_ARITH:
24939 + /* If only one operand is a binary expression, it will be the first
24940 + operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
24941 + is canonical, although it will usually be further simplified. */
24942 + value = 2;
24943 + break;
24944
24945 - case RTX_UNARY:
24946 - /* Then prefer NEG and NOT. */
24947 - if (code == NEG || code == NOT)
24948 - return 1;
24949 + case RTX_UNARY:
24950 + /* Then prefer NEG and NOT. */
24951 + if (code == NEG || code == NOT)
24952 + value = 1;
24953 + else
24954 + value = 0;
24955 + break;
24956
24957 - default:
24958 - return 0;
24959 + default:
24960 + value = 0;
24961 + }
24962 }
24963 +
24964 + if (targetm.commutative_operand_precedence)
24965 + value = targetm.commutative_operand_precedence (op, value);
24966 +
24967 + return value;
24968 }
24969
24970 /* Return 1 iff it is necessary to swap operands of commutative operation
24971 --- a/gcc/rtl.def
24972 +++ b/gcc/rtl.def
24973 @@ -1088,7 +1088,11 @@
24974 guard for the bypass. The function will get the two insns as
24975 parameters. If the function returns zero the bypass will be
24976 ignored for this case. Additional guard is necessary to recognize
24977 - complicated bypasses, e.g. when consumer is load address. */
24978 + complicated bypasses, e.g. when consumer is load address. If there
24979 + are more one bypass with the same output and input insns, the
24980 + chosen bypass is the first bypass with a guard in description whose
24981 + guard function returns nonzero. If there is no such bypass, then
24982 + bypass without the guard function is chosen. */
24983 DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA)
24984
24985 /* (define_automaton string) describes names of automata generated and
24986 --- a/gcc/sched-ebb.c
24987 +++ b/gcc/sched-ebb.c
24988 @@ -286,6 +286,7 @@
24989 rank,
24990 ebb_print_insn,
24991 ebb_contributes_to_priority,
24992 + NULL, /* insn_finishes_block_p */
24993
24994 NULL, NULL,
24995 NULL, NULL,
24996 --- a/gcc/sched-int.h
24997 +++ b/gcc/sched-int.h
24998 @@ -558,6 +558,10 @@
24999 calculations. */
25000 int (*contributes_to_priority) (rtx, rtx);
25001
25002 + /* Return true if scheduling insn (passed as the parameter) will trigger
25003 + finish of scheduling current block. */
25004 + bool (*insn_finishes_block_p) (rtx);
25005 +
25006 /* The boundaries of the set of insns to be scheduled. */
25007 rtx prev_head, next_tail;
25008
25009 --- a/gcc/sched-rgn.c
25010 +++ b/gcc/sched-rgn.c
25011 @@ -2338,6 +2338,19 @@
25012 0, 0, 0
25013 };
25014
25015 +/* Return true if scheduling INSN will trigger finish of scheduling
25016 + current block. */
25017 +static bool
25018 +rgn_insn_finishes_block_p (rtx insn)
25019 +{
25020 + if (INSN_BB (insn) == target_bb
25021 + && sched_target_n_insns + 1 == target_n_insns)
25022 + /* INSN is the last not-scheduled instruction in the current block. */
25023 + return true;
25024 +
25025 + return false;
25026 +}
25027 +
25028 /* Used in schedule_insns to initialize current_sched_info for scheduling
25029 regions (or single basic blocks). */
25030
25031 @@ -2350,6 +2363,7 @@
25032 rgn_rank,
25033 rgn_print_insn,
25034 contributes_to_priority,
25035 + rgn_insn_finishes_block_p,
25036
25037 NULL, NULL,
25038 NULL, NULL,
25039 --- a/gcc/sdbout.c
25040 +++ b/gcc/sdbout.c
25041 @@ -337,6 +337,7 @@
25042 debug_nothing_int, /* handle_pch */
25043 debug_nothing_rtx, /* var_location */
25044 debug_nothing_void, /* switch_text_section */
25045 + debug_nothing_tree_tree, /* set_name */
25046 0 /* start_end_main_source_file */
25047 };
25048
25049 --- a/gcc/sel-sched-ir.c
25050 +++ b/gcc/sel-sched-ir.c
25051 @@ -5431,6 +5431,7 @@
25052 NULL, /* rgn_rank */
25053 sel_print_insn, /* rgn_print_insn */
25054 contributes_to_priority,
25055 + NULL, /* insn_finishes_block_p */
25056
25057 NULL, NULL,
25058 NULL, NULL,
25059 --- a/gcc/target-def.h
25060 +++ b/gcc/target-def.h
25061 @@ -84,7 +84,7 @@
25062 #define TARGET_ASM_INTERNAL_LABEL default_internal_label
25063 #endif
25064
25065 -#ifndef TARGET_ARM_TTYPE
25066 +#ifndef TARGET_ASM_TTYPE
25067 #define TARGET_ASM_TTYPE hook_bool_rtx_false
25068 #endif
25069
25070 @@ -208,6 +208,10 @@
25071 #define TARGET_EXTRA_LIVE_ON_ENTRY hook_void_bitmap
25072 #endif
25073
25074 +#ifndef TARGET_WARN_FUNC_RESULT
25075 +#define TARGET_WARN_FUNC_RESULT hook_bool_void_true
25076 +#endif
25077 +
25078 #ifndef TARGET_ASM_FILE_START_APP_OFF
25079 #define TARGET_ASM_FILE_START_APP_OFF false
25080 #endif
25081 @@ -383,6 +387,9 @@
25082 #define TARGET_VECTOR_ALIGNMENT_REACHABLE \
25083 default_builtin_vector_alignment_reachable
25084 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM 0
25085 +#define TARGET_VECTOR_MIN_ALIGNMENT \
25086 + default_vector_min_alignment
25087 +#define TARGET_VECTOR_ALWAYS_MISALIGN hook_bool_const_tree_false
25088
25089 #define TARGET_VECTORIZE \
25090 { \
25091 @@ -393,7 +400,9 @@
25092 TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD, \
25093 TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST, \
25094 TARGET_VECTOR_ALIGNMENT_REACHABLE, \
25095 - TARGET_VECTORIZE_BUILTIN_VEC_PERM \
25096 + TARGET_VECTORIZE_BUILTIN_VEC_PERM, \
25097 + TARGET_VECTOR_MIN_ALIGNMENT, \
25098 + TARGET_VECTOR_ALWAYS_MISALIGN, \
25099 }
25100
25101 #define TARGET_DEFAULT_TARGET_FLAGS 0
25102 @@ -504,6 +513,7 @@
25103 #define TARGET_ALLOCATE_INITIAL_VALUE NULL
25104
25105 #define TARGET_UNSPEC_MAY_TRAP_P default_unspec_may_trap_p
25106 +#define TARGET_COMMUTATIVE_OPERAND_PRECEDENCE NULL
25107
25108 #ifndef TARGET_SET_CURRENT_FUNCTION
25109 #define TARGET_SET_CURRENT_FUNCTION hook_void_tree
25110 @@ -532,6 +542,10 @@
25111 #define TARGET_INVALID_CONVERSION hook_constcharptr_const_tree_const_tree_null
25112 #define TARGET_INVALID_UNARY_OP hook_constcharptr_int_const_tree_null
25113 #define TARGET_INVALID_BINARY_OP hook_constcharptr_int_const_tree_const_tree_null
25114 +#define TARGET_INVALID_PARAMETER_TYPE hook_constcharptr_const_tree_null
25115 +#define TARGET_INVALID_RETURN_TYPE hook_constcharptr_const_tree_null
25116 +#define TARGET_PROMOTED_TYPE hook_tree_const_tree_null
25117 +#define TARGET_CONVERT_TO_TYPE hook_tree_tree_tree_null
25118
25119 #define TARGET_FIXED_CONDITION_CODE_REGS hook_bool_uintp_uintp_false
25120
25121 @@ -590,6 +604,7 @@
25122 #define TARGET_ARG_PARTIAL_BYTES hook_int_CUMULATIVE_ARGS_mode_tree_bool_0
25123
25124 #define TARGET_FUNCTION_VALUE default_function_value
25125 +#define TARGET_LIBCALL_VALUE default_libcall_value
25126 #define TARGET_INTERNAL_ARG_POINTER default_internal_arg_pointer
25127 #define TARGET_UPDATE_STACK_BOUNDARY NULL
25128 #define TARGET_GET_DRAP_RTX NULL
25129 @@ -613,6 +628,7 @@
25130 TARGET_ARG_PARTIAL_BYTES, \
25131 TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN, \
25132 TARGET_FUNCTION_VALUE, \
25133 + TARGET_LIBCALL_VALUE, \
25134 TARGET_INTERNAL_ARG_POINTER, \
25135 TARGET_UPDATE_STACK_BOUNDARY, \
25136 TARGET_GET_DRAP_RTX, \
25137 @@ -716,6 +732,11 @@
25138 #define TARGET_CXX_ADJUST_CLASS_AT_DEFINITION hook_void_tree
25139 #endif
25140
25141 +
25142 +#ifndef TARGET_CXX_TTYPE_REF_ENCODE
25143 +#define TARGET_CXX_TTYPE_REF_ENCODE NULL
25144 +#endif
25145 +
25146 #define TARGET_CXX \
25147 { \
25148 TARGET_CXX_GUARD_TYPE, \
25149 @@ -730,7 +751,8 @@
25150 TARGET_CXX_LIBRARY_RTTI_COMDAT, \
25151 TARGET_CXX_USE_AEABI_ATEXIT, \
25152 TARGET_CXX_USE_ATEXIT_FOR_CXA_ATEXIT, \
25153 - TARGET_CXX_ADJUST_CLASS_AT_DEFINITION \
25154 + TARGET_CXX_ADJUST_CLASS_AT_DEFINITION, \
25155 + TARGET_CXX_TTYPE_REF_ENCODE \
25156 }
25157
25158 /* EMUTLS specific */
25159 @@ -886,6 +908,7 @@
25160 TARGET_ADDRESS_COST, \
25161 TARGET_ALLOCATE_INITIAL_VALUE, \
25162 TARGET_UNSPEC_MAY_TRAP_P, \
25163 + TARGET_COMMUTATIVE_OPERAND_PRECEDENCE, \
25164 TARGET_DWARF_REGISTER_SPAN, \
25165 TARGET_INIT_DWARF_REG_SIZES_EXTRA, \
25166 TARGET_FIXED_CONDITION_CODE_REGS, \
25167 @@ -913,6 +936,10 @@
25168 TARGET_INVALID_CONVERSION, \
25169 TARGET_INVALID_UNARY_OP, \
25170 TARGET_INVALID_BINARY_OP, \
25171 + TARGET_INVALID_PARAMETER_TYPE, \
25172 + TARGET_INVALID_RETURN_TYPE, \
25173 + TARGET_PROMOTED_TYPE, \
25174 + TARGET_CONVERT_TO_TYPE, \
25175 TARGET_IRA_COVER_CLASSES, \
25176 TARGET_SECONDARY_RELOAD, \
25177 TARGET_EXPAND_TO_RTL_HOOK, \
25178 @@ -923,6 +950,7 @@
25179 TARGET_EMUTLS, \
25180 TARGET_OPTION_HOOKS, \
25181 TARGET_EXTRA_LIVE_ON_ENTRY, \
25182 + TARGET_WARN_FUNC_RESULT, \
25183 TARGET_UNWIND_TABLES_DEFAULT, \
25184 TARGET_HAVE_NAMED_SECTIONS, \
25185 TARGET_HAVE_SWITCHABLE_BSS_SECTIONS, \
25186 --- a/gcc/target.h
25187 +++ b/gcc/target.h
25188 @@ -473,7 +473,16 @@
25189
25190 /* Target builtin that implements vector permute. */
25191 tree (* builtin_vec_perm) (tree, tree*);
25192 -} vectorize;
25193 +
25194 + /* Return the minimum alignment required to load or store a
25195 + vector of the given type, which may be less than the
25196 + natural alignment of the type. */
25197 + int (* vector_min_alignment) (const_tree);
25198 +
25199 + /* Return true if "movmisalign" patterns should be used for all
25200 + loads/stores from data arrays. */
25201 + bool (* always_misalign) (const_tree);
25202 + } vectorize;
25203
25204 /* The initial value of target_flags. */
25205 int default_target_flags;
25206 @@ -694,6 +703,10 @@
25207 FLAGS has the same meaning as in rtlanal.c: may_trap_p_1. */
25208 int (* unspec_may_trap_p) (const_rtx x, unsigned flags);
25209
25210 + /* Return a value indicating whether an operand of a commutative
25211 + operation is preferred as the first or second operand. */
25212 + int (* commutative_operand_precedence) (const_rtx, int);
25213 +
25214 /* Given a register, this hook should return a parallel of registers
25215 to represent where to find the register pieces. Define this hook
25216 if the register and its mode are represented in Dwarf in
25217 @@ -870,6 +883,10 @@
25218 rtx (*function_value) (const_tree ret_type, const_tree fn_decl_or_type,
25219 bool outgoing);
25220
25221 + /* Return the rtx for the result of a libcall of mode MODE,
25222 + calling the function FN_NAME. */
25223 + rtx (*libcall_value) (enum machine_mode, rtx);
25224 +
25225 /* Return an rtx for the argument pointer incoming to the
25226 current function. */
25227 rtx (*internal_arg_pointer) (void);
25228 @@ -899,6 +916,24 @@
25229 is not permitted on TYPE1 and TYPE2, NULL otherwise. */
25230 const char *(*invalid_binary_op) (int op, const_tree type1, const_tree type2);
25231
25232 + /* Return the diagnostic message string if TYPE is not valid as a
25233 + function parameter type, NULL otherwise. */
25234 + const char *(*invalid_parameter_type) (const_tree type);
25235 +
25236 + /* Return the diagnostic message string if TYPE is not valid as a
25237 + function return type, NULL otherwise. */
25238 + const char *(*invalid_return_type) (const_tree type);
25239 +
25240 + /* If values of TYPE are promoted to some other type when used in
25241 + expressions (analogous to the integer promotions), return that type,
25242 + or NULL_TREE otherwise. */
25243 + tree (*promoted_type) (const_tree type);
25244 +
25245 + /* Convert EXPR to TYPE, if target-specific types with special conversion
25246 + rules are involved. Return the converted expression, or NULL to apply
25247 + the standard conversion rules. */
25248 + tree (*convert_to_type) (tree type, tree expr);
25249 +
25250 /* Return the array of IRA cover classes for the current target. */
25251 const enum reg_class *(*ira_cover_classes) (void);
25252
25253 @@ -977,6 +1012,11 @@
25254 class (eg, tweak visibility or perform any other required
25255 target modifications). */
25256 void (*adjust_class_at_definition) (tree type);
25257 + /* Encode a reference type info, used for catching pointer
25258 + references. The provided expression will be the address of the
25259 + type info object of the type to which a reference is being
25260 + caught. */
25261 + tree (* ttype_ref_encode) (tree);
25262 } cxx;
25263
25264 /* Functions and data for emulated TLS support. */
25265 @@ -1040,6 +1080,10 @@
25266 bits in the bitmap passed in. */
25267 void (*live_on_entry) (bitmap);
25268
25269 + /* Return false if warnings about missing return statements or suspect
25270 + noreturn attributes should be suppressed for the current function. */
25271 + bool (*warn_func_result) (void);
25272 +
25273 /* True if unwinding tables should be generated by default. */
25274 bool unwind_tables_default;
25275
25276 --- a/gcc/targhooks.c
25277 +++ b/gcc/targhooks.c
25278 @@ -441,6 +441,15 @@
25279 return NULL;
25280 }
25281
25282 +tree
25283 +hook_cxx_ttype_ref_in_bit0 (tree exp)
25284 +{
25285 + exp = convert (build_pointer_type (char_type_node), exp);
25286 + exp = pointer_int_sum (PLUS_EXPR, exp, integer_one_node);
25287 +
25288 + return exp;
25289 +}
25290 +
25291 /* Initialize the stack protection decls. */
25292
25293 /* Stack protection related decls living in libgcc. */
25294 @@ -561,6 +570,12 @@
25295 }
25296
25297 rtx
25298 +default_libcall_value (enum machine_mode mode, rtx fun ATTRIBUTE_UNUSED)
25299 +{
25300 + return LIBCALL_VALUE (mode);
25301 +}
25302 +
25303 +rtx
25304 default_internal_arg_pointer (void)
25305 {
25306 /* If the reg that the virtual arg pointer will be translated into is
25307 @@ -712,6 +727,12 @@
25308 return true;
25309 }
25310
25311 +int
25312 +default_vector_min_alignment (const_tree type)
25313 +{
25314 + return TYPE_ALIGN_UNIT (type);
25315 +}
25316 +
25317 bool
25318 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
25319 {
25320 --- a/gcc/targhooks.h
25321 +++ b/gcc/targhooks.h
25322 @@ -48,6 +48,7 @@
25323
25324 extern tree default_cxx_guard_type (void);
25325 extern tree default_cxx_get_cookie_size (tree);
25326 +extern tree hook_cxx_ttype_ref_in_bit0 (tree);
25327
25328 extern bool hook_pass_by_reference_must_pass_in_stack
25329 (CUMULATIVE_ARGS *, enum machine_mode mode, const_tree, bool);
25330 @@ -71,6 +72,8 @@
25331
25332 extern bool default_builtin_vector_alignment_reachable (const_tree, bool);
25333
25334 +extern int default_vector_min_alignment (const_tree);
25335 +
25336 /* These are here, and not in hooks.[ch], because not all users of
25337 hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS. */
25338
25339 @@ -87,6 +90,7 @@
25340 (const_tree, const_tree, const_tree);
25341 extern bool hook_bool_const_rtx_commutative_p (const_rtx, int);
25342 extern rtx default_function_value (const_tree, const_tree, bool);
25343 +extern rtx default_libcall_value (enum machine_mode, rtx);
25344 extern rtx default_internal_arg_pointer (void);
25345 #ifdef IRA_COVER_CLASSES
25346 extern const enum reg_class *default_ira_cover_classes (void);
25347 --- a/gcc/timevar.def
25348 +++ b/gcc/timevar.def
25349 @@ -134,6 +134,7 @@
25350 DEFTIMEVAR (TV_PREDCOM , "predictive commoning")
25351 DEFTIMEVAR (TV_TREE_LOOP_INIT , "tree loop init")
25352 DEFTIMEVAR (TV_TREE_LOOP_FINI , "tree loop fini")
25353 +DEFTIMEVAR (TV_TREE_LOOP_PROMOTE , "tree loop index promotion")
25354 DEFTIMEVAR (TV_TREE_CH , "tree copy headers")
25355 DEFTIMEVAR (TV_TREE_SSA_UNCPROP , "tree SSA uncprop")
25356 DEFTIMEVAR (TV_TREE_SSA_TO_NORMAL , "tree SSA to normal")
25357 @@ -141,6 +142,7 @@
25358 DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies")
25359 DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier")
25360 DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier")
25361 +DEFTIMEVAR (TV_TREE_RLS , "tree local static removal")
25362 DEFTIMEVAR (TV_TREE_SWITCH_CONVERSION, "tree switch initialization conversion")
25363 DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier")
25364 DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers")
25365 --- a/gcc/toplev.h
25366 +++ b/gcc/toplev.h
25367 @@ -139,6 +139,7 @@
25368 extern int flag_unroll_all_loops;
25369 extern int flag_unswitch_loops;
25370 extern int flag_cprop_registers;
25371 +extern int flag_remove_local_statics;
25372 extern int time_report;
25373 extern int flag_ira_coalesce;
25374 extern int flag_ira_move_spills;
25375 --- a/gcc/tree.c
25376 +++ b/gcc/tree.c
25377 @@ -4062,6 +4062,7 @@
25378 bool *no_add_attrs)
25379 {
25380 tree node = *pnode;
25381 + bool is_dllimport;
25382
25383 /* These attributes may apply to structure and union types being created,
25384 but otherwise should pass to the declaration involved. */
25385 @@ -4109,9 +4110,11 @@
25386 return NULL_TREE;
25387 }
25388
25389 + is_dllimport = is_attribute_p ("dllimport", name);
25390 +
25391 /* Report error on dllimport ambiguities seen now before they cause
25392 any damage. */
25393 - else if (is_attribute_p ("dllimport", name))
25394 + if (is_dllimport)
25395 {
25396 /* Honor any target-specific overrides. */
25397 if (!targetm.valid_dllimport_attribute_p (node))
25398 @@ -4153,6 +4156,9 @@
25399 if (*no_add_attrs == false)
25400 DECL_DLLIMPORT_P (node) = 1;
25401 }
25402 + else if (DECL_DECLARED_INLINE_P (node))
25403 + /* An exported function, even if inline, must be emitted. */
25404 + DECL_EXTERNAL (node) = 0;
25405
25406 /* Report error if symbol is not accessible at global scope. */
25407 if (!TREE_PUBLIC (node)
25408 --- a/gcc/tree-cfg.c
25409 +++ b/gcc/tree-cfg.c
25410 @@ -47,6 +47,7 @@
25411 #include "value-prof.h"
25412 #include "pointer-set.h"
25413 #include "tree-inline.h"
25414 +#include "target.h"
25415
25416 /* This file contains functions for building the Control Flow Graph (CFG)
25417 for a function tree. */
25418 @@ -7052,6 +7053,9 @@
25419 edge e;
25420 edge_iterator ei;
25421
25422 + if (!targetm.warn_func_result())
25423 + return 0;
25424 +
25425 /* If we have a path to EXIT, then we do return. */
25426 if (TREE_THIS_VOLATILE (cfun->decl)
25427 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0)
25428 --- a/gcc/tree.h
25429 +++ b/gcc/tree.h
25430 @@ -381,8 +381,10 @@
25431 unsigned lang_flag_5 : 1;
25432 unsigned lang_flag_6 : 1;
25433 unsigned visited : 1;
25434 + unsigned packed_flag : 1;
25435 + unsigned user_align : 1;
25436
25437 - unsigned spare : 23;
25438 + unsigned spare : 21;
25439
25440 union tree_ann_d *ann;
25441 };
25442 @@ -2140,7 +2142,7 @@
25443
25444 /* 1 if the alignment for this type was requested by "aligned" attribute,
25445 0 if it is the default for this type. */
25446 -#define TYPE_USER_ALIGN(NODE) (TYPE_CHECK (NODE)->type.user_align)
25447 +#define TYPE_USER_ALIGN(NODE) (TYPE_CHECK (NODE)->common.base.user_align)
25448
25449 /* The alignment for NODE, in bytes. */
25450 #define TYPE_ALIGN_UNIT(NODE) (TYPE_ALIGN (NODE) / BITS_PER_UNIT)
25451 @@ -2246,7 +2248,7 @@
25452
25453 /* Indicated that objects of this type should be laid out in as
25454 compact a way as possible. */
25455 -#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->type.packed_flag)
25456 +#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->common.base.packed_flag)
25457
25458 /* Used by type_contains_placeholder_p to avoid recomputation.
25459 Values are: 0 (unknown), 1 (false), 2 (true). Never access
25460 @@ -2265,17 +2267,16 @@
25461 tree attributes;
25462 unsigned int uid;
25463
25464 - unsigned int precision : 9;
25465 - ENUM_BITFIELD(machine_mode) mode : 7;
25466 -
25467 - unsigned string_flag : 1;
25468 + unsigned int precision : 10;
25469 unsigned no_force_blk_flag : 1;
25470 unsigned needs_constructing_flag : 1;
25471 unsigned transparent_union_flag : 1;
25472 - unsigned packed_flag : 1;
25473 unsigned restrict_flag : 1;
25474 unsigned contains_placeholder_bits : 2;
25475
25476 + ENUM_BITFIELD(machine_mode) mode : 8;
25477 +
25478 + unsigned string_flag : 1;
25479 unsigned lang_flag_0 : 1;
25480 unsigned lang_flag_1 : 1;
25481 unsigned lang_flag_2 : 1;
25482 @@ -2283,7 +2284,6 @@
25483 unsigned lang_flag_4 : 1;
25484 unsigned lang_flag_5 : 1;
25485 unsigned lang_flag_6 : 1;
25486 - unsigned user_align : 1;
25487
25488 unsigned int align;
25489 alias_set_type alias_set;
25490 @@ -2584,7 +2584,7 @@
25491 #define DECL_ALIGN_UNIT(NODE) (DECL_ALIGN (NODE) / BITS_PER_UNIT)
25492 /* Set if the alignment of this DECL has been set by the user, for
25493 example with an 'aligned' attribute. */
25494 -#define DECL_USER_ALIGN(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.user_align)
25495 +#define DECL_USER_ALIGN(NODE) (DECL_COMMON_CHECK (NODE)->common.base.user_align)
25496 /* Holds the machine mode corresponding to the declaration of a variable or
25497 field. Always equal to TYPE_MODE (TREE_TYPE (decl)) except for a
25498 FIELD_DECL. */
25499 @@ -2621,7 +2621,7 @@
25500 example, for a FUNCTION_DECL, DECL_SAVED_TREE may be non-NULL and
25501 DECL_EXTERNAL may be true simultaneously; that can be the case for
25502 a C99 "extern inline" function. */
25503 -#define DECL_EXTERNAL(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.decl_flag_2)
25504 +#define DECL_EXTERNAL(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.decl_flag_1)
25505
25506 /* Nonzero in a ..._DECL means this variable is ref'd from a nested function.
25507 For VAR_DECL nodes, PARM_DECL nodes, and FUNCTION_DECL nodes.
25508 @@ -2696,7 +2696,6 @@
25509 unsigned ignored_flag : 1;
25510 unsigned abstract_flag : 1;
25511 unsigned artificial_flag : 1;
25512 - unsigned user_align : 1;
25513 unsigned preserve_flag: 1;
25514 unsigned debug_expr_is_from : 1;
25515
25516 @@ -2712,22 +2711,20 @@
25517 /* In LABEL_DECL, this is DECL_ERROR_ISSUED.
25518 In VAR_DECL and PARM_DECL, this is DECL_REGISTER. */
25519 unsigned decl_flag_0 : 1;
25520 - /* In FIELD_DECL, this is DECL_PACKED. */
25521 - unsigned decl_flag_1 : 1;
25522 /* In FIELD_DECL, this is DECL_BIT_FIELD
25523 In VAR_DECL and FUNCTION_DECL, this is DECL_EXTERNAL.
25524 - In TYPE_DECL, this is TYPE_DECL_SUPRESS_DEBUG. */
25525 - unsigned decl_flag_2 : 1;
25526 + In TYPE_DECL, this is TYPE_DECL_SUPPRESS_DEBUG. */
25527 + unsigned decl_flag_1 : 1;
25528 /* In FIELD_DECL, this is DECL_NONADDRESSABLE_P
25529 - In VAR_DECL and PARM_DECL, this is DECL_HAS_VALUE_EXPR. */
25530 - unsigned decl_flag_3 : 1;
25531 + In VAR_DECL and PARM_DECL, this is DECL_HAS_VALUE_EXPR_P. */
25532 + unsigned decl_flag_2 : 1;
25533 /* Logically, these two would go in a theoretical base shared by var and
25534 parm decl. */
25535 unsigned gimple_reg_flag : 1;
25536 /* In a DECL with pointer type, set if no TBAA should be done. */
25537 unsigned no_tbaa_flag : 1;
25538 /* Padding so that 'align' can be on a 32-bit boundary. */
25539 - unsigned decl_common_unused : 2;
25540 + unsigned decl_common_unused : 4;
25541
25542 unsigned int align : 24;
25543 /* DECL_OFFSET_ALIGN, used only for FIELD_DECLs. */
25544 @@ -2751,7 +2748,7 @@
25545 decl itself. This should only be used for debugging; once this field has
25546 been set, the decl itself may not legitimately appear in the function. */
25547 #define DECL_HAS_VALUE_EXPR_P(NODE) \
25548 - (TREE_CHECK2 (NODE, VAR_DECL, PARM_DECL)->decl_common.decl_flag_3)
25549 + (TREE_CHECK2 (NODE, VAR_DECL, PARM_DECL)->decl_common.decl_flag_2)
25550 #define DECL_VALUE_EXPR(NODE) \
25551 (decl_value_expr_lookup (DECL_WRTL_CHECK (NODE)))
25552 #define SET_DECL_VALUE_EXPR(NODE, VAL) \
25553 @@ -2830,11 +2827,11 @@
25554 #define DECL_FCONTEXT(NODE) (FIELD_DECL_CHECK (NODE)->field_decl.fcontext)
25555
25556 /* In a FIELD_DECL, indicates this field should be bit-packed. */
25557 -#define DECL_PACKED(NODE) (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_1)
25558 +#define DECL_PACKED(NODE) (FIELD_DECL_CHECK (NODE)->common.base.packed_flag)
25559
25560 /* Nonzero in a FIELD_DECL means it is a bit field, and must be accessed
25561 specially. */
25562 -#define DECL_BIT_FIELD(NODE) (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_2)
25563 +#define DECL_BIT_FIELD(NODE) (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_1)
25564
25565 /* Used in a FIELD_DECL to indicate that we cannot form the address of
25566 this component. This makes it possible for Type-Based Alias Analysis
25567 @@ -2852,7 +2849,7 @@
25568 accesses to s.i must not be given the alias set of the type of 'i'
25569 (int) but instead directly that of the type of 's' (struct S). */
25570 #define DECL_NONADDRESSABLE_P(NODE) \
25571 - (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_3)
25572 + (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_2)
25573
25574 struct tree_field_decl GTY(())
25575 {
25576 @@ -3337,7 +3334,7 @@
25577 into stabs. Instead it will generate cross reference ('x') of names.
25578 This uses the same flag as DECL_EXTERNAL. */
25579 #define TYPE_DECL_SUPPRESS_DEBUG(NODE) \
25580 - (TYPE_DECL_CHECK (NODE)->decl_common.decl_flag_2)
25581 + (TYPE_DECL_CHECK (NODE)->decl_common.decl_flag_1)
25582
25583 /* Getter of the imported declaration associated to the
25584 IMPORTED_DECL node. */
25585 --- a/gcc/tree-pass.h
25586 +++ b/gcc/tree-pass.h
25587 @@ -323,6 +323,7 @@
25588 extern struct gimple_opt_pass pass_empty_loop;
25589 extern struct gimple_opt_pass pass_record_bounds;
25590 extern struct gimple_opt_pass pass_graphite_transforms;
25591 +extern struct gimple_opt_pass pass_promote_indices;
25592 extern struct gimple_opt_pass pass_if_conversion;
25593 extern struct gimple_opt_pass pass_loop_distribution;
25594 extern struct gimple_opt_pass pass_vectorize;
25595 @@ -388,6 +389,7 @@
25596 extern struct gimple_opt_pass pass_rebuild_cgraph_edges;
25597 extern struct gimple_opt_pass pass_build_cgraph_edges;
25598 extern struct gimple_opt_pass pass_reset_cc_flags;
25599 +extern struct gimple_opt_pass pass_remove_local_statics;
25600
25601 /* IPA Passes */
25602 extern struct ipa_opt_pass pass_ipa_inline;
25603 --- a/gcc/tree-sra.c
25604 +++ b/gcc/tree-sra.c
25605 @@ -274,6 +274,12 @@
25606 != TYPE_PRECISION (TREE_TYPE (t))))
25607 goto fail;
25608
25609 + /* Disable optimization of bitfields on BITS_BIG_ENDIAN
25610 + architectures. SRA doesn't properly handle padding bits
25611 + at the bottom, see issue6713. */
25612 + if (DECL_BIT_FIELD (t) && BITS_BIG_ENDIAN)
25613 + goto fail;
25614 +
25615 saw_one_field = true;
25616 }
25617
25618 --- /dev/null
25619 +++ b/gcc/tree-ssa-loop-promote.c
25620 @@ -0,0 +1,1628 @@
25621 +/* Promotion of shorter-than-word-size loop indices.
25622 + Copyright (C) 2009 Free Software Foundation, Inc.
25623 +
25624 +This file is part of GCC.
25625 +
25626 +GCC is free software; you can redistribute it and/or modify it
25627 +under the terms of the GNU General Public License as published by the
25628 +Free Software Foundation; either version 3, or (at your option) any
25629 +later version.
25630 +
25631 +GCC is distributed in the hope that it will be useful, but WITHOUT
25632 +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
25633 +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25634 +for more details.
25635 +
25636 +You should have received a copy of the GNU General Public License
25637 +along with GCC; see the file COPYING3. If not see
25638 +<http://www.gnu.org/licenses/>. */
25639 +
25640 +/* This pass finds loop indices that are declared as
25641 + shorter-than-word-size and replaces them with word-sized loop
25642 + indices. (It assumes that word-sized quantities are the most
25643 + efficient type on which to do arithmetic.) The loop optimization
25644 + machinery has a difficult time seeing through the casts required to
25645 + promote such indices to word-sized quantities for memory addressing
25646 + and/or preserving the semantics of the source language (such as C).
25647 + The transformation also helps eliminate unnecessary
25648 + {sign,zero}-extensions required for the same.
25649 +
25650 + Although this is most naturally expressed as a loop optimization
25651 + pass, we choose to place this pass some ways before the loop
25652 + optimization passes proper, so that other scalar optimizations will
25653 + run on our "cleaned-up" code. This decision has the negative of
25654 + requiring us to build and destroy all the loop optimization
25655 + infrastructure.
25656 +
25657 + The algorithm is relatively simple. For each single-exit loop, we
25658 + identify the loop index variable. If the loop index variable is
25659 + shorter than the word size, then we have a candidate for promotion.
25660 + We determine whether the scalar evolution of the loop index fits a
25661 + particular pattern (incremented by 1, compared against a
25662 + similarly-typed loop bound, and only modified by a single increment
25663 + within the loop), as well as examining the uses of the loop index to
25664 + ensure we are able to safely promote those uses (e.g. the loop index
25665 + must not be stored to memory or passed to function calls). If these
25666 + conditions are satisfied, we create an appropriate word-sized type
25667 + and replace all uses and defs of the loop index variable with the new
25668 + variable. */
25669 +
25670 +#include "config.h"
25671 +#include "system.h"
25672 +#include "coretypes.h"
25673 +#include "tm.h"
25674 +
25675 +#include "toplev.h"
25676 +#include "rtl.h"
25677 +#include "tm_p.h"
25678 +#include "hard-reg-set.h"
25679 +#include "obstack.h"
25680 +#include "basic-block.h"
25681 +#include "pointer-set.h"
25682 +#include "intl.h"
25683 +
25684 +#include "tree.h"
25685 +#include "gimple.h"
25686 +#include "hashtab.h"
25687 +#include "diagnostic.h"
25688 +#include "tree-flow.h"
25689 +#include "tree-dump.h"
25690 +#include "cfgloop.h"
25691 +#include "flags.h"
25692 +#include "timevar.h"
25693 +#include "tree-pass.h"
25694 +
25695 +struct promote_info {
25696 + /* The loop being analyzed. */
25697 + struct loop *loop;
25698 +
25699 + /* The GIMPLE_COND controlling exit from the loop. */
25700 + gimple exit_expr;
25701 +
25702 + /* The loop index variable's SSA_NAME that is defined in a phi node in
25703 + LOOP->HEADER. Note that this SSA_NAME may be different than the
25704 + one appearing in EXIT_EXPR. */
25705 + tree loop_index_name;
25706 +
25707 + /* The bound of the loop. */
25708 + tree loop_limit;
25709 +
25710 + /* Whether we've warned about things with
25711 + warn_unsafe_loop_optimizations. */
25712 + bool warned;
25713 +
25714 + /* LOOP_INDEX_NAME's underlying VAR_DECL. */
25715 + tree var_decl;
25716 +
25717 + /* The types to which defs/uses of LOOP_INDEX_NAME are cast via
25718 + NOP_EXPRs. */
25719 + VEC(tree, heap) *cast_types;
25720 +
25721 + /* The number of times we have seen a cast to the corresponding type
25722 + (as determined by types_compatible_p) in CAST_TYPES. */
25723 + VEC(int, heap) *cast_counts;
25724 +
25725 + /* Whether LOOP_INDEX_NAME is suitable for promotion. */
25726 + bool can_be_promoted_p;
25727 +
25728 + /* If CAN_BE_PROMOTED_P, the promoted type. */
25729 + tree promoted_type;
25730 +
25731 + /* If CAN_BE_PROMOTED_P, the promoted VAR_DECL. */
25732 + tree promoted_var;
25733 +};
25734 +
25735 +/* A set of `struct promote_info'. */
25736 +
25737 +static struct pointer_set_t *promotion_info;
25738 +
25739 +/* A set of all potentially promotable SSA_NAMEs, used for quick
25740 +decision-making during analysis. */
25741 +
25742 +static struct pointer_set_t *promotable_names;
25743 +
25744 +/* A map from SSA_NAMEs to the VAR_DECL to which they will be
25745 + promoted. */
25746 +
25747 +static struct pointer_map_t *variable_map;
25748 +
25749 +/* A set of the stmts that we have already rebuilt with promoted variables. */
25750 +
25751 +static struct pointer_set_t *promoted_stmts;
25752 +
25753 +\f
25754 +/* Add CASTED to PI->CAST_TYPES if we haven't seen CASTED before. */
25755 +
25756 +static void
25757 +add_casted_type (struct promote_info *pi, tree casted)
25758 +{
25759 + int i;
25760 + tree type;
25761 +
25762 + /* For this information to be useful later, CASTED must be wider than
25763 + the type of the variable. */
25764 + if (TYPE_PRECISION (casted) <= TYPE_PRECISION (TREE_TYPE (pi->var_decl)))
25765 + return;
25766 +
25767 + for (i = 0; VEC_iterate (tree, pi->cast_types, i, type); i++)
25768 + if (types_compatible_p (casted, type))
25769 + {
25770 + int c = VEC_index(int, pi->cast_counts, i);
25771 + VEC_replace(int, pi->cast_counts, i, ++c);
25772 + return;
25773 + }
25774 +
25775 + /* Haven't see the type before. */
25776 + VEC_safe_push (tree, heap, pi->cast_types, casted);
25777 + VEC_safe_push (int, heap, pi->cast_counts, 1);
25778 +}
25779 +
25780 +/* Return the most-casted-to type in PI->CAST_TYPES. Return an
25781 + appropriately signed variant of size_type_node if the variable wasn't
25782 + cast in some fashion. */
25783 +
25784 +static tree
25785 +choose_profitable_promoted_type (struct promote_info *pi)
25786 +{
25787 + int i;
25788 + int count;
25789 + tree type = NULL_TREE;
25790 + int maxuse = -1;
25791 +
25792 + for (i = 0; VEC_iterate (int, pi->cast_counts, i, count); i++)
25793 + if (count > maxuse)
25794 + {
25795 + maxuse = count;
25796 + type = VEC_index (tree, pi->cast_types, i);
25797 + }
25798 +
25799 + if (type == NULL_TREE)
25800 + {
25801 + if (dump_file)
25802 + {
25803 + fprintf (dump_file, "Warning, failed to find upcast type for ");
25804 + print_generic_expr (dump_file, pi->loop_index_name, 0);
25805 + fprintf (dump_file, "\n");
25806 + }
25807 + return (TYPE_UNSIGNED (TREE_TYPE (pi->var_decl))
25808 + ? size_type_node
25809 + : signed_type_for (size_type_node));
25810 + }
25811 + else
25812 + return signed_type_for (type);
25813 +}
25814 +
25815 +/* Intuit the loop index for LOOP from PHI. There must be a path that
25816 + only goes through NOP_EXPRs or CONVERT_EXPRs from the result of PHI
25817 + to one of the operands of COND. If such a path cannot be found,
25818 + return NULL_TREE. If LIMIT is not NULL and a path can be found,
25819 + store the other operand of COND into LIMIT. */
25820 +
25821 +static tree
25822 +find_promotion_candidate_from_phi (struct loop *loop, gimple cond,
25823 + gimple phi, tree *limit)
25824 +{
25825 + tree op0, op1;
25826 + tree result, candidate;
25827 +
25828 + result = candidate = PHI_RESULT (phi);
25829 + /* Must be an integer variable. */
25830 + if (TREE_CODE (TREE_TYPE (candidate)) != INTEGER_TYPE)
25831 + return NULL_TREE;
25832 +
25833 + op0 = gimple_cond_lhs (cond);
25834 + op1 = gimple_cond_rhs (cond);
25835 +
25836 + /* See if there's a path from CANDIDATE to an operand of COND. */
25837 + while (true)
25838 + {
25839 + use_operand_p use;
25840 + imm_use_iterator iui;
25841 + gimple use_stmt = NULL;
25842 +
25843 + if (candidate == op0)
25844 + {
25845 + if (limit) *limit = op1;
25846 + break;
25847 + }
25848 + if (candidate == op1)
25849 + {
25850 + if (limit) *limit = op0;
25851 + break;
25852 + }
25853 +
25854 + /* Find a single use in the loop header. Give up if there's
25855 + multiple ones. */
25856 + FOR_EACH_IMM_USE_FAST (use, iui, candidate)
25857 + {
25858 + gimple stmt = USE_STMT (use);
25859 +
25860 + if (gimple_bb (stmt) == loop->header)
25861 + {
25862 + if (use_stmt)
25863 + {
25864 + if (dump_file)
25865 + {
25866 + fprintf (dump_file, "Rejecting ");
25867 + print_generic_expr (dump_file, candidate, 0);
25868 + fprintf (dump_file, " because it has multiple uses in the loop header (bb #%d).\n",
25869 + loop->header->index);
25870 + fprintf (dump_file, "first use: ");
25871 + print_gimple_stmt (dump_file, use_stmt, 0, 0);
25872 + fprintf (dump_file, "\nsecond use: ");
25873 + print_gimple_stmt (dump_file, stmt, 0, 0);
25874 + fprintf (dump_file, "\n(possibly more, but unanalyzed)\n");
25875 + }
25876 + return NULL_TREE;
25877 + }
25878 + else
25879 + use_stmt = stmt;
25880 + }
25881 + }
25882 +
25883 + /* No uses in the loop header, bail. */
25884 + if (use_stmt == NULL)
25885 + return NULL_TREE;
25886 +
25887 + if (gimple_code (use_stmt) != GIMPLE_ASSIGN
25888 + || TREE_CODE (gimple_assign_lhs (use_stmt)) != SSA_NAME
25889 + || (gimple_assign_rhs_code (use_stmt) != NOP_EXPR
25890 + && gimple_assign_rhs_code (use_stmt) != CONVERT_EXPR))
25891 + {
25892 + if (dump_file)
25893 + {
25894 + fprintf (dump_file, "Rejecting ");
25895 + print_generic_expr (dump_file, candidate, 0);
25896 + fprintf (dump_file, " because of use in ");
25897 + print_gimple_stmt (dump_file, use_stmt, 0, 0);
25898 + fprintf (dump_file, "\n");
25899 + }
25900 + return NULL_TREE;
25901 + }
25902 +
25903 + candidate = gimple_assign_lhs (use_stmt);
25904 + }
25905 +
25906 + /* CANDIDATE is now what we believe to be the loop index variable. There
25907 + are two possibilities:
25908 +
25909 + - CANDIDATE is not the "true" loop index variable, but rather is a
25910 + promoted version of RESULT, done for purposes of satisfying a
25911 + language's semantics;
25912 +
25913 + - CANDIDATE is the "true" loop index variable. */
25914 + if (!types_compatible_p (TREE_TYPE (result), TREE_TYPE (candidate)))
25915 + candidate = result;
25916 +
25917 + /* The type of candidate must be "short" to consider promoting it. */
25918 + if (TREE_CODE (TREE_TYPE (candidate)) != INTEGER_TYPE
25919 + || TYPE_PRECISION (TREE_TYPE (candidate)) >= TYPE_PRECISION (size_type_node))
25920 + return NULL_TREE;
25921 +
25922 + return candidate;
25923 +}
25924 +
25925 +/* Find the loop index variable of LOOP. LOOP's exit is controlled by
25926 + the COND_EXPR EXPR. IF we can't determine what the loop index
25927 + variable is, or EXPR does not appear to be analyzable, then return
25928 + NULL_TREE. */
25929 +
25930 +static tree
25931 +find_promotion_candidate (struct loop *loop, gimple cond, tree *limit)
25932 +{
25933 + tree candidate = NULL_TREE;
25934 + gimple_stmt_iterator gsi;
25935 +
25936 + switch (gimple_cond_code (cond))
25937 + {
25938 + case GT_EXPR:
25939 + case GE_EXPR:
25940 + case NE_EXPR:
25941 + case LT_EXPR:
25942 + case LE_EXPR:
25943 + break;
25944 +
25945 + default:
25946 + return NULL_TREE;
25947 + }
25948 +
25949 + /* We'd like to examine COND and intuit the loop index variable from
25950 + there. Instead, we're going to start from the phi nodes in BB and
25951 + attempt to work our way forwards to one of the operands of COND,
25952 + since starting from COND might yield an upcast loop index. If we
25953 + find multiple phi nodes whose results reach COND, then give up. */
25954 + for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
25955 + {
25956 + gimple phi = gsi_stmt (gsi);
25957 + tree t = find_promotion_candidate_from_phi (loop, cond, phi, limit);
25958 +
25959 + if (t == NULL_TREE)
25960 + continue;
25961 + else if (candidate == NULL_TREE)
25962 + candidate = t;
25963 + else
25964 + {
25965 + if (dump_file)
25966 + {
25967 + fprintf (dump_file, "Can't find a candidate from ");
25968 + print_gimple_stmt (dump_file, cond, 0, 0);
25969 + fprintf (dump_file, "\n because too many phi node results reach the condition.\n");
25970 + }
25971 + return NULL_TREE;
25972 + }
25973 + }
25974 +
25975 + return candidate;
25976 +}
25977 +
25978 +/* Return true if X is something that could be promoted. */
25979 +
25980 +static bool
25981 +could_be_promoted (tree x)
25982 +{
25983 + return (TREE_CODE (x) == INTEGER_CST
25984 + || (TREE_CODE (x) == SSA_NAME
25985 + && pointer_set_contains (promotable_names, x)));
25986 +}
25987 +
25988 +/* Examine the RHS of STMT's suitability with respect to being able to
25989 + promote VAR. */
25990 +
25991 +static bool
25992 +check_rhs_for_promotability (struct promote_info *pi, tree var, gimple stmt,
25993 + bool is_assign)
25994 +{
25995 + enum tree_code subcode = gimple_assign_rhs_code (stmt);
25996 +
25997 + bool ok = true;
25998 +
25999 + switch (subcode)
26000 + {
26001 + case PLUS_EXPR:
26002 + case MINUS_EXPR:
26003 + case MULT_EXPR:
26004 + case EQ_EXPR:
26005 + case NE_EXPR:
26006 + case LT_EXPR:
26007 + case LE_EXPR:
26008 + case GT_EXPR:
26009 + case GE_EXPR:
26010 + {
26011 + tree op0 = gimple_assign_rhs1 (stmt);
26012 + tree op1 = gimple_assign_rhs2 (stmt);
26013 +
26014 + ok = ((op0 == var && could_be_promoted (op1))
26015 + || (op1 == var && could_be_promoted (op0)));
26016 + break;
26017 + }
26018 + case COND_EXPR:
26019 + if (gimple_expr_type (stmt) == NULL
26020 + || gimple_expr_type (stmt) == void_type_node)
26021 + ok = true;
26022 + else
26023 + /* This is conservative; it's possible that these sorts of nodes
26024 + could be promoted, but we'd have to be very careful about
26025 + checking in which parts of the COND_EXPR the promotable
26026 + variable(s) are. */
26027 + ok = false;
26028 + break;
26029 + case SSA_NAME:
26030 + {
26031 + tree expr = gimple_assign_rhs1 (stmt);
26032 + ok = (expr == var || could_be_promoted (expr));
26033 + }
26034 + break;
26035 + case INTEGER_CST:
26036 + break;
26037 + case NOP_EXPR:
26038 + case CONVERT_EXPR:
26039 + if (!is_assign)
26040 + {
26041 + add_casted_type (pi, gimple_expr_type (stmt));
26042 + break;
26043 + }
26044 + /* Fallthrough. */
26045 + default:
26046 + ok = false;
26047 + break;
26048 + }
26049 +
26050 + return ok;
26051 +}
26052 +
26053 +/* Analyze the loop index VAR for promotability. The rules for
26054 + promotability are:
26055 +
26056 + For uses:
26057 +
26058 + - The underlying variable may be used in NOP_EXPRs.
26059 +
26060 + - The underlying variable may be used in simple arithmmetic
26061 + expressions so long as the other parts are potentially promotable
26062 + variables or constants (so we don't go willy-nilly on promoting
26063 + things).
26064 +
26065 + - The underlying variable may not be stored to memory.
26066 +
26067 + - All uses must occur inside the loop.
26068 +
26069 + For defs:
26070 +
26071 + - The underlying variable may not be loaded from memory; and
26072 +
26073 + - The underlying variable may only be formed from expressions
26074 + involving potentially promotable varibles or constants.
26075 +
26076 + Note that defs may occur outside of the loop; we do this to handle
26077 + initial conditions before entering the loop. */
26078 +
26079 +static void
26080 +analyze_loop_index_uses (tree var, struct promote_info *pi)
26081 +{
26082 + imm_use_iterator iui;
26083 + use_operand_p use;
26084 + gimple bad_stmt = NULL;
26085 + const char *reason = NULL;
26086 +
26087 + FOR_EACH_IMM_USE_FAST (use, iui, var)
26088 + {
26089 + basic_block bb;
26090 + gimple use_stmt = USE_STMT (use);
26091 +
26092 + /* Uses must exist only within the loop. */
26093 + bb = gimple_bb (use_stmt);
26094 +
26095 + if (dump_file)
26096 + {
26097 + fprintf (dump_file, "Checking ");
26098 + print_gimple_stmt (dump_file, use_stmt, 0, 0);
26099 + fprintf (dump_file, "\n");
26100 + }
26101 +
26102 + if (!flow_bb_inside_loop_p (pi->loop, bb))
26103 + {
26104 + bad_stmt = use_stmt;
26105 + reason = " is involved in stmt outside loop ";
26106 + break;
26107 + }
26108 +
26109 + /* We cannot store the index to memory. */
26110 + if (gimple_references_memory_p (use_stmt))
26111 + {
26112 + bad_stmt = use_stmt;
26113 + reason = " is stored to memory in ";
26114 + break;
26115 + }
26116 +
26117 + if (gimple_code (use_stmt) == GIMPLE_CALL)
26118 + {
26119 + /* We cannot pass the variable to a function. */
26120 + bad_stmt = use_stmt;
26121 + reason = " is passed to function in ";
26122 + break;
26123 + }
26124 + else if (gimple_code (use_stmt) == GIMPLE_ASSIGN)
26125 + {
26126 + tree lhs = gimple_assign_lhs (use_stmt);
26127 +
26128 + if (!check_rhs_for_promotability (pi, var, use_stmt,
26129 + /*is_assign=*/false))
26130 + {
26131 + bad_stmt = use_stmt;
26132 + reason = " is involved in non-promotable expression ";
26133 + break;
26134 + }
26135 + else if ((TREE_CODE_CLASS (gimple_assign_rhs_code (use_stmt)) == tcc_binary
26136 + || gimple_assign_rhs_code (use_stmt) == SSA_NAME)
26137 + && !could_be_promoted (lhs))
26138 + {
26139 + bad_stmt = use_stmt;
26140 + reason = " is being assigned to non-promotable variable ";
26141 + break;
26142 + }
26143 + }
26144 + else if (gimple_code (use_stmt) != GIMPLE_COND
26145 + && gimple_code (use_stmt) != GIMPLE_PHI)
26146 + {
26147 + /* Use of the variable in some statement we don't know how to
26148 + analyze. */
26149 + bad_stmt = use_stmt;
26150 + reason = " is used in unanalyzable expression in ";
26151 + break;
26152 + }
26153 + }
26154 +
26155 + if (bad_stmt && reason)
26156 + {
26157 + if (dump_file)
26158 + {
26159 + fprintf (dump_file, "Loop index ");
26160 + print_generic_expr (dump_file, var, 0);
26161 + fprintf (dump_file, "%s", reason);
26162 + print_gimple_stmt (dump_file, bad_stmt, 0, 0);
26163 + fprintf (dump_file, "\n");
26164 + }
26165 + pi->can_be_promoted_p = false;
26166 + }
26167 +}
26168 +
26169 +/* Check that the uses and def of VAR, defined in STMT, conform to the
26170 + rules given above. */
26171 +
26172 +static bool
26173 +analyze_loop_index (tree var, gimple stmt, void *data)
26174 +{
26175 + struct promote_info *pi = (struct promote_info *) data;
26176 +
26177 + if (dump_file)
26178 + {
26179 + fprintf (dump_file, "Analyzing loop index ");
26180 + print_generic_expr (dump_file, var, 0);
26181 + fprintf (dump_file, " defined in ");
26182 + print_gimple_stmt (dump_file, stmt, 0, 0);
26183 + fprintf (dump_file, "\n");
26184 + }
26185 +
26186 + /* Check the definition. */
26187 + switch (gimple_code (stmt))
26188 + {
26189 + case GIMPLE_PHI:
26190 + /* Phi nodes are OK. */
26191 + break;
26192 +
26193 + case GIMPLE_ASSIGN:
26194 + if (!check_rhs_for_promotability (pi, var, stmt,
26195 + /*is_assign=*/true))
26196 + break;
26197 + /* Fallthrough. */
26198 +
26199 + default:
26200 + /* Something we can't handle or the variable is being loaded from
26201 + memory. */
26202 + pi->can_be_promoted_p = false;
26203 + goto done;
26204 + }
26205 +
26206 + if (gimple_code (stmt) == GIMPLE_PHI)
26207 + {
26208 + unsigned int i;
26209 +
26210 + for (i = 0; i < gimple_phi_num_args (stmt); i++)
26211 + {
26212 + tree arg = PHI_ARG_DEF (stmt, i);
26213 +
26214 + if (TREE_CODE (arg) == SSA_NAME)
26215 + pointer_set_insert (promotable_names, arg);
26216 + }
26217 +
26218 + analyze_loop_index_uses (PHI_RESULT (stmt), pi);
26219 + }
26220 + else
26221 + analyze_loop_index_uses (var, pi);
26222 +
26223 + /* Only worth continuing if we think the loop index can be
26224 + promoted. */
26225 + done:
26226 + if (dump_file)
26227 + {
26228 + fprintf (dump_file, "Done analyzing ");
26229 + print_generic_expr (dump_file, var, 0);
26230 + fprintf (dump_file, " defined in ");
26231 + print_gimple_stmt (dump_file, stmt, 0, 0);
26232 + fprintf (dump_file, "...%s to analyze\n\n",
26233 + pi->can_be_promoted_p ? "continuing" : "not continuing");
26234 + }
26235 + return !pi->can_be_promoted_p;
26236 +}
26237 +
26238 +/* Determine whether T is an INTEGER_CST or a single-use SSA_NAME
26239 + defined as the result of a NOP_EXPR or CONVERT_EXPR. Return the
26240 + operand of the NOP_EXPR or CONVERT_EXPR if so. */
26241 +
26242 +static tree
26243 +upcast_operand_p (tree t)
26244 +{
26245 + gimple def;
26246 +
26247 + if (TREE_CODE (t) == INTEGER_CST)
26248 + return t;
26249 +
26250 + if (TREE_CODE (t) != SSA_NAME
26251 + || !has_single_use (t))
26252 + return NULL_TREE;
26253 +
26254 + def = SSA_NAME_DEF_STMT (t);
26255 + if (gimple_code (def) != GIMPLE_ASSIGN)
26256 + return NULL_TREE;
26257 +
26258 + if (gimple_assign_rhs_code (def) != CONVERT_EXPR
26259 + && gimple_assign_rhs_code (def) != NOP_EXPR)
26260 + return NULL_TREE;
26261 +
26262 + return gimple_assign_rhs1 (def);
26263 +}
26264 +
26265 +/* Check for the idiom:
26266 +
26267 + short x, y;
26268 + unsigned short x.2, y.2, tmp;
26269 + ...
26270 + x.2 = (unsigned short) x;
26271 + y.2 = (unsigned short) y;
26272 + tmp = x.2 + y.2;
26273 + x = (short) tmp;
26274 +
26275 + which is generated by convert for avoiding signed arithmetic
26276 + overflow. RHS is TMP in the above statement. If RHS is
26277 + defined via such an idiom, store x and y into *OP0 and *OP1,
26278 + respectively. We permit y.2 to be a constant if necessary. */
26279 +
26280 +static bool
26281 +signed_arithmetic_overflow_idiom_p (tree rhs, tree *op0, tree *op1)
26282 +{
26283 + gimple op_stmt = SSA_NAME_DEF_STMT (rhs);
26284 + tree x2, y2;
26285 + bool yes = false;
26286 + enum tree_code code;
26287 +
26288 + if (!has_single_use (rhs)
26289 + || gimple_code (op_stmt) != GIMPLE_ASSIGN)
26290 + goto done;
26291 +
26292 + /* This could probably profitably be expanded to consider
26293 + MINUS_EXPR, MULT_EXPR, etc. */
26294 + code = gimple_assign_rhs_code (op_stmt);
26295 + if (code != PLUS_EXPR)
26296 + goto done;
26297 + x2 = gimple_assign_rhs1 (op_stmt);
26298 + y2 = gimple_assign_rhs2 (op_stmt);
26299 +
26300 + x2 = upcast_operand_p (x2);
26301 + if (x2 == NULL_TREE)
26302 + goto done;
26303 + y2 = upcast_operand_p (y2);
26304 + if (y2 == NULL_TREE)
26305 + goto done;
26306 +
26307 + *op0 = x2;
26308 + *op1 = y2;
26309 + yes = true;
26310 +
26311 + done:
26312 + return yes;
26313 +}
26314 +
26315 +/* Simple wrapper around flow_bb_inside_loop_p that handles NULL
26316 + statements and initial definitions of variables. */
26317 +
26318 +static bool
26319 +stmt_in_loop_p (gimple t, struct loop *loop)
26320 +{
26321 + basic_block bb;
26322 +
26323 + if (t == NULL)
26324 + return false;
26325 +
26326 + bb = gimple_bb (t);
26327 + if (bb == NULL)
26328 + return false;
26329 +
26330 + return flow_bb_inside_loop_p (loop, bb);
26331 +}
26332 +
26333 +/* The loop index should have a specific usage pattern:
26334 +
26335 + - It should be defined in a phi node with two incoming values:
26336 +
26337 + LI_phi = PHI (LI_out, LI_in)
26338 +
26339 + - One incoming value, LI_out, should be from outside the loop.
26340 +
26341 + - The other incoming value, LI_in, should be defined thusly:
26342 +
26343 + LI_in = LI_phi + increment
26344 +
26345 + - increment should be 1. We permit other increments with
26346 + -funsafe-loop-optimizations.
26347 +
26348 + - Finally, in the comparison to exit the loop, the loop index must be
26349 + compared against a variable that has a type at least as precise as
26350 + the loop index's type. For instance, something like:
26351 +
26352 + char limit;
26353 + short i;
26354 +
26355 + for (i = 0; i < limit; i++) ...
26356 +
26357 + would not be permitted. */
26358 +
26359 +static bool
26360 +analyze_loop_index_definition_pattern (struct promote_info *pi)
26361 +{
26362 + gimple phi = SSA_NAME_DEF_STMT (pi->loop_index_name);
26363 + bool ok = false, warn = false;
26364 + tree in0, in1;
26365 + bool inside0, inside1;
26366 + gimple def0, def1;
26367 + tree op0, op1, increment = NULL_TREE;
26368 +
26369 + if (gimple_code (phi) != GIMPLE_PHI
26370 + || gimple_phi_num_args (phi) != 2)
26371 + goto done;
26372 +
26373 + in0 = PHI_ARG_DEF (phi, 0);
26374 + in1 = PHI_ARG_DEF (phi, 1);
26375 +
26376 + /* Figure out which value comes from outside the loop. */
26377 + def0 = TREE_CODE (in0) == SSA_NAME ? SSA_NAME_DEF_STMT (in0) : NULL;
26378 + def1 = TREE_CODE (in1) == SSA_NAME ? SSA_NAME_DEF_STMT (in1) : NULL;
26379 +
26380 + inside0 = stmt_in_loop_p (def0, pi->loop);
26381 + inside1 = stmt_in_loop_p (def1, pi->loop);
26382 +
26383 + if (inside0 && inside1)
26384 + goto done;
26385 + else if (inside0)
26386 + {
26387 + tree t = in0;
26388 + gimple g;
26389 + in0 = in1;
26390 + in1 = t;
26391 + g = def0;
26392 + def0 = def1;
26393 + def1 = g;
26394 + }
26395 + else if (!inside1)
26396 + goto done;
26397 +
26398 + /* IN0 comes from outside the loop, IN1 from inside. Analyze IN1. */
26399 + if (gimple_code (def1) != GIMPLE_ASSIGN)
26400 + goto done;
26401 +
26402 + switch (gimple_assign_rhs_code (def1))
26403 + {
26404 + case CONVERT_EXPR:
26405 + case NOP_EXPR:
26406 + if (!signed_arithmetic_overflow_idiom_p (gimple_assign_rhs1 (def1),
26407 + &op0, &op1))
26408 + goto done;
26409 + goto plus;
26410 + case PLUS_EXPR:
26411 + op0 = gimple_assign_rhs1 (def1);
26412 + op1 = gimple_assign_rhs2 (def1);
26413 + plus:
26414 + {
26415 + bool op0_li = op0 == PHI_RESULT (phi);
26416 + bool op1_li = op1 == PHI_RESULT (phi);
26417 + if (op0_li && op1_li)
26418 + /* This is weird, and definitely is not a case we can support
26419 + for promotion. */
26420 + goto done;
26421 + else if (op0_li)
26422 + increment = op1;
26423 + else if (op1_li)
26424 + increment = op0;
26425 + else
26426 + goto done;
26427 + break;
26428 + }
26429 + default:
26430 + break;
26431 + }
26432 +
26433 +
26434 + /* Check that the exit condition for the loop is OK. */
26435 + {
26436 + enum tree_code code = gimple_cond_code (pi->exit_expr);
26437 +
26438 + op0 = gimple_cond_lhs (pi->exit_expr);
26439 + op1 = gimple_cond_rhs (pi->exit_expr);
26440 +
26441 + if (op0 == pi->loop_limit)
26442 + {
26443 + tree t = op0;
26444 + op0 = op1;
26445 + op1 = t;
26446 + code = swap_tree_comparison (code);
26447 + }
26448 +
26449 + if (code != LT_EXPR && code != LE_EXPR)
26450 + goto done;
26451 +
26452 + if (!types_compatible_p (TREE_TYPE (pi->loop_index_name),
26453 + TREE_TYPE (pi->loop_limit)))
26454 + {
26455 + switch (TREE_CODE (pi->loop_limit))
26456 + {
26457 + case INTEGER_CST:
26458 + if (!int_fits_type_p (pi->loop_limit,
26459 + TREE_TYPE (pi->loop_index_name)))
26460 + goto done;
26461 + break;
26462 + case SSA_NAME:
26463 + {
26464 + tree v = pi->loop_limit;
26465 + gimple def = SSA_NAME_DEF_STMT (v);
26466 +
26467 + /* Backtrack through CONVERT_EXPRs and/or NOP_EXPRs to
26468 + determine if the variables "started out" as the same
26469 + type. */
26470 + while (gimple_code (def) == GIMPLE_ASSIGN)
26471 + {
26472 + enum tree_code rhs_code = gimple_assign_rhs_code (def);
26473 +
26474 + if (rhs_code != NOP_EXPR && rhs_code != CONVERT_EXPR)
26475 + break;
26476 +
26477 + v = gimple_assign_rhs1 (def);
26478 + def = SSA_NAME_DEF_STMT (v);
26479 + }
26480 + /* Permit comparisons between non-compatible types with
26481 + flag_unsafe_loop_optimizations, since we can assume the
26482 + loop index does not overflow. */
26483 + if (types_compatible_p (TREE_TYPE (pi->loop_index_name),
26484 + TREE_TYPE (v))
26485 + || flag_unsafe_loop_optimizations)
26486 + break;
26487 + /* Fallthrough. */
26488 + default:
26489 + goto done;
26490 + }
26491 + }
26492 + }
26493 + }
26494 +
26495 + if (increment == NULL_TREE)
26496 + goto done;
26497 + if (TREE_CODE (increment) != INTEGER_CST
26498 + || compare_tree_int (increment, 1) != 0)
26499 + {
26500 + warn = true;
26501 + if (!flag_unsafe_loop_optimizations)
26502 + goto done;
26503 + }
26504 +
26505 + ok = true;
26506 + done:
26507 + if (warn && !pi->warned)
26508 + {
26509 + pi->warned = true;
26510 + /* We can promote unsigned indices only if -funsafe-loop-optimizations
26511 + is in effect, since the user might be depending on the modulo
26512 + wraparound behavior of unsigned types. */
26513 + if (warn_unsafe_loop_optimizations)
26514 + {
26515 + const char *wording;
26516 +
26517 + wording = (flag_unsafe_loop_optimizations
26518 + ? N_("assuming that the loop counter does not overflow")
26519 + : N_("cannot optimize loop, the loop counter may overflow"));
26520 + warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
26521 + }
26522 + }
26523 +
26524 + return ok;
26525 +}
26526 +
26527 +/* Analyze the loop associated with PI_ to see if its loop index can be
26528 + promoted. */
26529 +
26530 +static bool
26531 +analyze_loop (const void *pi_, void *data)
26532 +{
26533 + struct promote_info *pi = CONST_CAST (struct promote_info *,
26534 + (const struct promote_info *) pi_);
26535 + bool *changed = (bool *) data;
26536 +
26537 + /* We previously determined we can't promote this; go ahead and
26538 + continue iterating. */
26539 + if (pi->loop_index_name == NULL_TREE)
26540 + return true;
26541 +
26542 + /* Assume we can always promote the loop index, even if it doesn't
26543 + exist. */
26544 + pi->can_be_promoted_p = true;
26545 +
26546 + if (dump_file)
26547 + {
26548 + fprintf (dump_file, "Analyzing ");
26549 + print_generic_expr (dump_file, pi->loop_index_name, 0);
26550 + fprintf (dump_file, "\n");
26551 + }
26552 +
26553 + if (pi->loop_index_name
26554 + && analyze_loop_index_definition_pattern (pi))
26555 + {
26556 + /* Clear any previously gathered information. */
26557 + VEC_truncate (tree, pi->cast_types, 0);
26558 + VEC_truncate (int, pi->cast_counts, 0);
26559 +
26560 + walk_use_def_chains (pi->loop_index_name, analyze_loop_index, pi, false);
26561 + }
26562 + else
26563 + pi->can_be_promoted_p = false;
26564 +
26565 + /* If we determined the loop index is used in strange ways, clear it
26566 + so we don't examine it again. */
26567 + if (!pi->can_be_promoted_p)
26568 + pi->loop_index_name = NULL_TREE;
26569 +
26570 + /* Let our caller know whether to re-do the analysis. */
26571 + *changed = *changed || !pi->can_be_promoted_p;
26572 + /* Continue if PI is promotable. */
26573 + return pi->can_be_promoted_p;
26574 +}
26575 +
26576 +/* Add PI_->LOOP_INDEX_NAME to the set of variables, DATA, that we are
26577 + considering for promotion. */
26578 +
26579 +static bool
26580 +add_variable (const void *pi_, void *data ATTRIBUTE_UNUSED)
26581 +{
26582 + const struct promote_info *pi = (const struct promote_info *) pi_;
26583 + struct pointer_set_t *pset = (struct pointer_set_t *) data;
26584 + int presentp;
26585 +
26586 + if (pi->loop_index_name != NULL_TREE)
26587 + {
26588 + presentp = pointer_set_insert (pset, pi->loop_index_name);
26589 + gcc_assert (!presentp);
26590 + }
26591 +
26592 + /* Continue traversal. */
26593 + return true;
26594 +}
26595 +
26596 +/* For each promotable variable:
26597 +
26598 + - create a new, promoted VAR_DECL;
26599 +
26600 + - walk through all the uses and defs and create new statements using
26601 + the promoted variables. We don't create new phi nodes; post-pass
26602 + SSA update will handle those for us. */
26603 +
26604 +/* Make dump files readable. */
26605 +#define PROMOTED_VAR_SUFFIX ".promoted"
26606 +
26607 +/* Create a variable NAME with TYPE and do the necessary work to inform
26608 + the SSA machinery about it. */
26609 +
26610 +static tree
26611 +create_pli_var (tree type, char *name)
26612 +{
26613 + tree var = create_tmp_var (type, name);
26614 + create_var_ann (var);
26615 + mark_sym_for_renaming (var);
26616 + add_referenced_var (var);
26617 + return var;
26618 +}
26619 +
26620 +/* Associate the SSA_NAME VAR with the promoted variable DATA. */
26621 +
26622 +static bool
26623 +associate_name_with_var (tree var, gimple def_stmt, void *data)
26624 +{
26625 + tree promoted_var = (tree) data;
26626 + void **p;
26627 +
26628 + gcc_assert (promoted_var != NULL_TREE);
26629 +
26630 + if (gimple_code (def_stmt) == GIMPLE_PHI)
26631 + var = PHI_RESULT (def_stmt);
26632 +
26633 + p = pointer_map_insert (variable_map, var);
26634 +
26635 + if (!*p)
26636 + {
26637 + if (dump_file)
26638 + {
26639 + fprintf (dump_file, "Associating ");
26640 + print_generic_expr (dump_file, var, 0);
26641 + fprintf (dump_file, " with ");
26642 + print_generic_expr (dump_file, promoted_var, 0);
26643 + fprintf (dump_file, "\n\n");
26644 + }
26645 + *(tree *)p = promoted_var;
26646 + }
26647 +
26648 + /* Continue traversal. */
26649 + return false;
26650 +}
26651 +
26652 +/* Create a promoted variable for the variable from PI_. */
26653 +
26654 +static bool
26655 +create_promoted_variable (const void *pi_, void *data ATTRIBUTE_UNUSED)
26656 +{
26657 + struct promote_info *pi = CONST_CAST (struct promote_info *,
26658 + (const struct promote_info *) pi_);
26659 +
26660 + if (pi->can_be_promoted_p)
26661 + {
26662 + tree type = choose_profitable_promoted_type (pi);
26663 + tree orig_name = DECL_NAME (pi->var_decl);
26664 + size_t id_len = IDENTIFIER_LENGTH (orig_name);
26665 + size_t name_len = id_len + strlen (PROMOTED_VAR_SUFFIX) + 1;
26666 + char *name;
26667 +
26668 + name = (char *) alloca (name_len);
26669 + strcpy (name, IDENTIFIER_POINTER (orig_name));
26670 + strcpy (name + id_len, PROMOTED_VAR_SUFFIX);
26671 +
26672 + pi->promoted_type = type;
26673 + pi->promoted_var = create_pli_var (type, name);
26674 +
26675 + if (dump_file)
26676 + {
26677 + fprintf (dump_file, "Created new variable ");
26678 + print_generic_expr (dump_file, pi->promoted_var, 0);
26679 + fprintf (dump_file, " to stand in for ");
26680 + print_generic_expr (dump_file, pi->loop_index_name, 0);
26681 + fprintf (dump_file, "\n\n");
26682 + }
26683 +
26684 + walk_use_def_chains (pi->loop_index_name,
26685 + associate_name_with_var,
26686 + pi->promoted_var, false);
26687 + }
26688 +
26689 + /* Continue traversal. */
26690 + return true;
26691 +}
26692 +
26693 +/* Rebuild T with newly promoted variables; STMT is the original
26694 + statement in which T appeared and may be equivalent to T. TYPE is
26695 + non-null when rebuilding the rhs of a GIMPLE_ASSIGN and indicates the
26696 + type of the lhs. */
26697 +
26698 +static tree
26699 +rebuild_tree_with_promotion (tree t, gimple stmt, tree type,
26700 + gimple_stmt_iterator gsi,
26701 + struct promote_info *pi)
26702 +{
26703 + tree op0, op1;
26704 +
26705 + switch (TREE_CODE (t))
26706 + {
26707 + case NOP_EXPR:
26708 + case CONVERT_EXPR:
26709 + {
26710 + tree pvar = rebuild_tree_with_promotion (TREE_OPERAND (t, 0), stmt, type, gsi, pi);
26711 +
26712 + if (types_compatible_p (type, TREE_TYPE (pvar)))
26713 + return pvar;
26714 + else
26715 + return build1 (TREE_CODE (t), type, pvar);
26716 + }
26717 + case INTEGER_CST:
26718 + {
26719 + return build_int_cst_wide (pi->promoted_type,
26720 + TREE_INT_CST_LOW (t),
26721 + TREE_INT_CST_HIGH (t));
26722 + }
26723 + case COND_EXPR:
26724 + {
26725 + tree orig_op0 = TREE_OPERAND (t, 0);
26726 + op0 = rebuild_tree_with_promotion (orig_op0, stmt, type, gsi, pi);
26727 + gcc_assert (orig_op0 != op0);
26728 + TREE_OPERAND (t, 0) = op0;
26729 + return t;
26730 + }
26731 + case PLUS_EXPR:
26732 + case MINUS_EXPR:
26733 + case MULT_EXPR:
26734 + type = pi->promoted_type;
26735 + goto binary_expr;
26736 + case EQ_EXPR:
26737 + case NE_EXPR:
26738 + case LT_EXPR:
26739 + case LE_EXPR:
26740 + case GT_EXPR:
26741 + case GE_EXPR:
26742 + type = TREE_TYPE (t);
26743 + binary_expr:
26744 + op0 = TREE_OPERAND (t, 0);
26745 + op1 = TREE_OPERAND (t, 1);
26746 + op0 = rebuild_tree_with_promotion (op0, stmt, type, gsi, pi);
26747 + op1 = rebuild_tree_with_promotion (op1, stmt, type, gsi, pi);
26748 + return build2 (TREE_CODE (t), type, op0, op1);
26749 + case SSA_NAME:
26750 + {
26751 + void **p = pointer_map_contains (variable_map, t);
26752 +
26753 + if (p == NULL)
26754 + {
26755 + /* This is unexpected, but it does happen if we were dealing
26756 + with COND_EXPRs and such. Just go ahead and create a
26757 + temporary for it. */
26758 + if (types_compatible_p (TREE_TYPE (t), pi->promoted_type)
26759 + || SSA_NAME_DEF_STMT (t) == stmt)
26760 + return t;
26761 + else
26762 + goto insert_cast;
26763 + }
26764 + else
26765 + return *(tree *)p;
26766 + }
26767 + case VAR_DECL:
26768 + return t;
26769 + default:
26770 + insert_cast:
26771 + {
26772 + gimple cast;
26773 + tree tmp, nop;
26774 + tree to_upcast = t;
26775 +
26776 + /* If we are dealing with a memory reference, then we can't have
26777 + wrap it in a NOP_EXPR; we need to load the value from memory
26778 + first, then convert it. */
26779 + if (!is_gimple_reg (to_upcast))
26780 + {
26781 + tree tmp = create_pli_var (TREE_TYPE (to_upcast),
26782 + CONST_CAST (char *, "loadtmp"));
26783 + gimple stmt = gimple_build_assign (tmp, to_upcast);
26784 + gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
26785 + to_upcast = tmp;
26786 + }
26787 +
26788 + tmp = create_pli_var (pi->promoted_type,
26789 + CONST_CAST (char *, "promotetmp"));
26790 + nop = build1 (NOP_EXPR, pi->promoted_type, to_upcast);
26791 + cast = gimple_build_assign (tmp, nop);
26792 + if (dump_file)
26793 + {
26794 + fprintf (dump_file, "Inserting cast ");
26795 + print_gimple_stmt (dump_file, cast, 0, 0);
26796 + fprintf (dump_file, " prior to ");
26797 + print_gimple_stmt (dump_file, stmt, 0, 0);
26798 + fprintf (dump_file, "\n");
26799 + }
26800 + gsi_insert_before (&gsi, cast, GSI_SAME_STMT);
26801 + return tmp;
26802 + }
26803 + }
26804 +}
26805 +
26806 +/* Split E and place STMT in the block created by doing so. */
26807 +
26808 +static void
26809 +insert_along_edge (gimple stmt, edge e)
26810 +{
26811 + basic_block bb = split_edge (e);
26812 +
26813 + gimple_set_bb (stmt, bb);
26814 + set_bb_seq (bb, gimple_seq_alloc_with_stmt (stmt));
26815 +}
26816 +
26817 +/* Rebuild STMT, which contains uses or a def of the promotable variable
26818 + associated with PI. */
26819 +
26820 +static void
26821 +rebuild_with_promotion (gimple stmt, struct promote_info *pi)
26822 +{
26823 + gimple_stmt_iterator gsi;
26824 +
26825 + if (pointer_set_insert (promoted_stmts, stmt))
26826 + return;
26827 +
26828 + if (dump_file)
26829 + {
26830 + fprintf (dump_file, "Rebuilding stmt ");
26831 + print_gimple_stmt (dump_file, stmt, 0, 0);
26832 + fprintf (dump_file, "\n");
26833 + }
26834 +
26835 + gsi = gsi_for_stmt (stmt);
26836 +
26837 + switch (gimple_code (stmt))
26838 + {
26839 + case GIMPLE_ASSIGN:
26840 + {
26841 + enum tree_code subcode = gimple_assign_rhs_code (stmt);
26842 + enum tree_code newcode = subcode;
26843 + tree lhs = gimple_assign_lhs (stmt);
26844 + tree rhs1 = gimple_assign_rhs1 (stmt);
26845 + tree rhs2 = gimple_assign_rhs2 (stmt);
26846 + tree x, y;
26847 + void **v;
26848 +
26849 + /* If we are defining a promotable variable, check for special
26850 + idioms. */
26851 + v = pointer_map_contains (variable_map, lhs);
26852 + if (v != NULL
26853 + && *(tree *)v == pi->promoted_var
26854 + && (subcode == NOP_EXPR || subcode == CONVERT_EXPR)
26855 + && signed_arithmetic_overflow_idiom_p (rhs1, &x, &y))
26856 + {
26857 + void **xp;
26858 + void **yp;
26859 + if (TYPE_PRECISION (TREE_TYPE (rhs1))
26860 + >= TYPE_PRECISION (pi->promoted_type))
26861 + goto done;
26862 +
26863 + /* It's possible that we've already promoted the operands of
26864 + one or both of the NOP_EXPRs. In that case, we can
26865 + bypass the logic below and go straight to rebuilding the
26866 + rhs that we really want to transform. */
26867 + if (TREE_CODE (x) == VAR_DECL
26868 + || TREE_CODE (y) == VAR_DECL)
26869 + goto build_fake;
26870 + xp = pointer_map_contains (variable_map, x);
26871 + yp = pointer_map_contains (variable_map, y);
26872 +
26873 + /* Nothing to see here. */
26874 + if (!types_compatible_p (TREE_TYPE (x),
26875 + TREE_TYPE (y))
26876 + || (xp == NULL && yp == NULL))
26877 + goto done;
26878 + x = (xp == NULL ? NULL_TREE : *(tree *)xp);
26879 + y = (yp == NULL ? NULL_TREE : *(tree *)yp);
26880 +
26881 + if (x != pi->promoted_var && y != pi->promoted_var)
26882 + goto done;
26883 +
26884 + build_fake:
26885 + newcode = PLUS_EXPR;
26886 + rhs1 = x;
26887 + rhs2 = y;
26888 + if (dump_file)
26889 + {
26890 + fprintf (dump_file, "Substituting ");
26891 + print_generic_expr (dump_file, x, 0);
26892 + fprintf (dump_file, " + ");
26893 + print_generic_expr (dump_file, y, 0);
26894 + fprintf (dump_file, " for rhs of original statement\n");
26895 + }
26896 +
26897 + done:
26898 + ;
26899 + }
26900 +
26901 + lhs = rebuild_tree_with_promotion (lhs, stmt, NULL, gsi, pi);
26902 + rhs1 = rebuild_tree_with_promotion (rhs1, stmt, NULL, gsi, pi);
26903 + if (rhs2)
26904 + rhs2 = rebuild_tree_with_promotion (rhs2, stmt, NULL, gsi, pi);
26905 +
26906 + if (newcode != subcode)
26907 + {
26908 + gimple newstmt = gimple_build_assign_with_ops (newcode,
26909 + lhs, rhs1, rhs2);
26910 + gsi_replace (&gsi, newstmt, true);
26911 + stmt = newstmt;
26912 + }
26913 + else
26914 + {
26915 + gimple_assign_set_lhs (stmt, lhs);
26916 + gimple_assign_set_rhs1 (stmt, rhs1);
26917 + if (rhs2)
26918 + gimple_assign_set_rhs2 (stmt, rhs2);
26919 + }
26920 + }
26921 + break;
26922 + case GIMPLE_COND:
26923 + {
26924 + tree lhs = gimple_cond_lhs (stmt);
26925 + tree rhs = gimple_cond_rhs (stmt);
26926 +
26927 + lhs = rebuild_tree_with_promotion (lhs, stmt, NULL, gsi, pi);
26928 + rhs = rebuild_tree_with_promotion (rhs, stmt, NULL, gsi, pi);
26929 +
26930 + gimple_cond_set_lhs (stmt, lhs);
26931 + gimple_cond_set_rhs (stmt, rhs);
26932 + }
26933 + break;
26934 + case GIMPLE_PHI:
26935 + {
26936 + unsigned int i;
26937 + bool promoted_result = could_be_promoted (PHI_RESULT (stmt));
26938 +
26939 + for (i = 0; i < gimple_phi_num_args (stmt); i++)
26940 + {
26941 + tree var = gimple_phi_arg_def (stmt, i);
26942 + edge e = gimple_phi_arg_edge (stmt, i);
26943 + gimple assign = NULL;
26944 +
26945 + if (TREE_CODE (var) == INTEGER_CST && promoted_result)
26946 + {
26947 + tree cst = build_int_cst_wide (pi->promoted_type,
26948 + TREE_INT_CST_LOW (var),
26949 + TREE_INT_CST_HIGH (var));
26950 +
26951 + assign = gimple_build_assign (pi->promoted_var, cst);
26952 + insert_along_edge (assign, e);
26953 + }
26954 + else if (TREE_CODE (var) == SSA_NAME
26955 + && SSA_NAME_VAR (var) == pi->var_decl
26956 + && !promoted_result)
26957 + {
26958 + tree t = create_pli_var (TREE_TYPE (PHI_RESULT (stmt)),
26959 + CONST_CAST (char *, "promotetmp"));
26960 + tree name;
26961 + assign = gimple_build_assign_with_ops (CONVERT_EXPR,
26962 + t, pi->promoted_var,
26963 + NULL_TREE);
26964 +
26965 + name = make_ssa_name (t, assign);
26966 + gimple_assign_set_lhs (assign, name);
26967 +
26968 + insert_along_edge (assign, e);
26969 + SET_PHI_ARG_DEF (stmt, i, name);
26970 + }
26971 + }
26972 + }
26973 + break;
26974 + default:
26975 + gcc_unreachable ();
26976 + }
26977 +
26978 + if (dump_file)
26979 + {
26980 + fprintf (dump_file, "Converted stmt ");
26981 + print_gimple_stmt (dump_file, stmt, 0, 0);
26982 + fprintf (dump_file, "\n\n");
26983 + }
26984 + update_stmt (stmt);
26985 +}
26986 +
26987 +/* Helper function for promote_variable that walks over use/def
26988 + chains. */
26989 +
26990 +static bool
26991 +promote_variable_1 (tree var, gimple stmt, void *data)
26992 +{
26993 + struct promote_info *pi = (struct promote_info *) data;
26994 + imm_use_iterator imi;
26995 + gimple use_stmt;
26996 +
26997 + rebuild_with_promotion (stmt, pi);
26998 +
26999 + if (gimple_code (stmt) == GIMPLE_PHI)
27000 + var = PHI_RESULT (stmt);
27001 +
27002 + if (could_be_promoted (var))
27003 + FOR_EACH_IMM_USE_STMT (use_stmt, imi, var)
27004 + {
27005 + rebuild_with_promotion (use_stmt, pi);
27006 + }
27007 +
27008 + return false;
27009 +}
27010 +
27011 +/* Convert all uses and defs of PI_->LOOP_INDEX_NAME as linked by
27012 + use-def chains to uses and defs of PI_->PROMOTED_VAR. */
27013 +
27014 +static bool
27015 +promote_variable (const void *pi_, void *data ATTRIBUTE_UNUSED)
27016 +{
27017 + const struct promote_info *pi = (const struct promote_info *) pi_;
27018 +
27019 + if (pi->can_be_promoted_p)
27020 + {
27021 + walk_use_def_chains (pi->loop_index_name, promote_variable_1,
27022 + CONST_CAST (struct promote_info *, pi), false);
27023 + }
27024 +
27025 + /* Continue traversal. */
27026 + return true;
27027 +}
27028 +
27029 +/* Free PI_ and its associated data. */
27030 +
27031 +static bool
27032 +free_pi_entries (const void *pi_, void *data ATTRIBUTE_UNUSED)
27033 +{
27034 + struct promote_info *pi = CONST_CAST (struct promote_info *,
27035 + (const struct promote_info *) pi_);
27036 +
27037 + VEC_free (tree, heap, pi->cast_types);
27038 + VEC_free (int, heap, pi->cast_counts);
27039 + free (pi);
27040 +
27041 + /* Continue traversal. */
27042 + return true;
27043 +}
27044 +
27045 +/* Collect information about variables that we believe to be loop
27046 + indices in PROMOTION_INFO. */
27047 +
27048 +static void
27049 +collect_promotion_candidates (void)
27050 +{
27051 + loop_iterator li;
27052 + struct loop *loop;
27053 +
27054 + FOR_EACH_LOOP (li, loop, 0)
27055 + {
27056 + basic_block header = loop->header;
27057 + gimple exit_cond = last_stmt (header);
27058 +
27059 + if (exit_cond && gimple_code (exit_cond) == GIMPLE_COND)
27060 + {
27061 + tree loop_index;
27062 + tree limit = NULL_TREE;
27063 + tree decl;
27064 + struct promote_info *pi;
27065 +
27066 + loop_index = find_promotion_candidate (loop, exit_cond, &limit);
27067 + if (loop_index == NULL_TREE)
27068 + continue;
27069 + decl = SSA_NAME_VAR (loop_index);
27070 + if (TREE_ADDRESSABLE (decl))
27071 + continue;
27072 +
27073 + if (dump_file)
27074 + {
27075 + fprintf (dump_file, "Found loop index ");
27076 + print_generic_expr (dump_file, loop_index, 0);
27077 + fprintf (dump_file, " involved in ");
27078 + print_gimple_stmt (dump_file, exit_cond, 0, 0);
27079 + fprintf (dump_file, "\n\n");
27080 + }
27081 +
27082 + pi = XCNEW (struct promote_info);
27083 + pi->loop = loop;
27084 + pi->exit_expr = exit_cond;
27085 + pi->loop_index_name = loop_index;
27086 + pi->loop_limit = limit;
27087 + pi->var_decl = decl;
27088 + /* We think so, anyway... */
27089 + pi->can_be_promoted_p = true;
27090 + pointer_set_insert (promotion_info, pi);
27091 + }
27092 + else if (dump_file)
27093 + {
27094 + fprintf (dump_file, "\nSkipping analysis of loop %d (header bb #%d)\n",
27095 + loop->num, loop->header->index);
27096 + if (exit_cond)
27097 + {
27098 + fprintf (dump_file, "Exit condition was ");
27099 + print_gimple_stmt (dump_file, exit_cond, 0, 0);
27100 + fprintf (dump_file, "\n");
27101 + }
27102 + }
27103 + }
27104 +}
27105 +
27106 +/* Free memory associated with global variables that we used. */
27107 +
27108 +static void
27109 +pli_cleanup (void)
27110 +{
27111 + if (promoted_stmts)
27112 + {
27113 + pointer_set_destroy (promoted_stmts);
27114 + promoted_stmts = NULL;
27115 + }
27116 + if (variable_map)
27117 + {
27118 + pointer_map_destroy (variable_map);
27119 + variable_map = NULL;
27120 + }
27121 + if (promotable_names)
27122 + {
27123 + pointer_set_destroy (promotable_names);
27124 + promotable_names = NULL;
27125 + }
27126 + if (promotion_info)
27127 + {
27128 + pointer_set_traverse (promotion_info, free_pi_entries, NULL);
27129 + pointer_set_destroy (promotion_info);
27130 + promotion_info = NULL;
27131 + }
27132 +}
27133 +
27134 +/* The guts of the pass. */
27135 +
27136 +static unsigned int
27137 +promote_short_indices (void)
27138 +{
27139 + bool did_something = false;
27140 + bool changed;
27141 + size_t max_iterations, i, n_promoted;
27142 +
27143 + promotion_info = pointer_set_create ();
27144 + collect_promotion_candidates ();
27145 +
27146 + if (dump_file)
27147 + fprintf (dump_file, "Found %d candidates for promotion\n",
27148 + (int) pointer_set_n_elements (promotion_info));
27149 +
27150 + /* Nothing to do. */
27151 + if (pointer_set_n_elements (promotion_info) == 0)
27152 + goto cleanup;
27153 +
27154 + /* We have information about which variables are loop index variables.
27155 + We now need to determine the promotability of the loop indices.
27156 + Since the promotability of loop indices may depend on other loop
27157 + indices, we need to repeat this until we reach a fixed point. */
27158 + changed = true;
27159 + max_iterations = pointer_set_n_elements (promotion_info);
27160 + i = 0;
27161 +
27162 + promotable_names = pointer_set_create ();
27163 +
27164 + while (changed)
27165 + {
27166 + changed = false;
27167 + pointer_set_clear (promotable_names);
27168 + pointer_set_traverse (promotion_info, add_variable,
27169 + promotable_names);
27170 + n_promoted = pointer_set_n_elements (promotable_names);
27171 +
27172 + if (dump_file)
27173 + fprintf (dump_file, "\nIteration %d, have %d variables to consider\n",
27174 + (int) i, (int) n_promoted);
27175 +
27176 + if (n_promoted == 0)
27177 + break;
27178 + gcc_assert (i < max_iterations);
27179 + pointer_set_traverse (promotion_info, analyze_loop, &changed);
27180 + i++;
27181 + }
27182 +
27183 + if (dump_file)
27184 + fprintf (dump_file, "Promoting %d variables\n",
27185 + (int) n_promoted);
27186 +
27187 + if (n_promoted != 0)
27188 + {
27189 + did_something = true;
27190 + variable_map = pointer_map_create ();
27191 + promoted_stmts = pointer_set_create ();
27192 + pointer_set_traverse (promotion_info, create_promoted_variable, NULL);
27193 + pointer_set_traverse (promotion_info, promote_variable, NULL);
27194 + }
27195 +
27196 + cleanup:
27197 + pli_cleanup ();
27198 + return did_something ? TODO_update_ssa : 0;
27199 +}
27200 +
27201 +/* Entry point for the short loop index promotion pass. */
27202 +
27203 +static unsigned int
27204 +tree_short_index_promotion (void)
27205 +{
27206 + unsigned int changed = 0;
27207 +
27208 + /* Initialize all the necessary loop infrastructure. */
27209 + loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES | LOOPS_HAVE_RECORDED_EXITS);
27210 + add_noreturn_fake_exit_edges ();
27211 + connect_infinite_loops_to_exit ();
27212 +
27213 + if (number_of_loops () > 1)
27214 + changed = promote_short_indices ();
27215 +
27216 + /* Tear down loop optimization infrastructure. */
27217 + remove_fake_exit_edges ();
27218 + free_numbers_of_iterations_estimates ();
27219 + loop_optimizer_finalize ();
27220 +
27221 + return changed;
27222 +}
27223 +
27224 +static bool
27225 +gate_short_index_promotion (void)
27226 +{
27227 + return optimize > 0 && flag_promote_loop_indices;
27228 +}
27229 +
27230 +struct gimple_opt_pass pass_promote_indices =
27231 +{
27232 + {
27233 + GIMPLE_PASS,
27234 + "promoteshort", /* name */
27235 + gate_short_index_promotion, /* gate */
27236 + tree_short_index_promotion, /* execute */
27237 + NULL, /* sub */
27238 + NULL, /* next */
27239 + 0, /* static_pass_number */
27240 + TV_TREE_LOOP_PROMOTE, /* tv_id */
27241 + PROP_cfg | PROP_ssa, /* properties_required */
27242 + 0, /* properties_provided */
27243 + 0, /* properties_destroyed */
27244 + 0, /* todo_flags_start */
27245 + TODO_dump_func | TODO_verify_loops
27246 + | TODO_ggc_collect /* todo_flags_finish */
27247 + }
27248 +};
27249 --- a/gcc/tree-ssa-pre.c
27250 +++ b/gcc/tree-ssa-pre.c
27251 @@ -104,6 +104,10 @@
27252 In order to make it fully redundant, we insert the expression into
27253 the predecessors where it is not available, but is ANTIC.
27254
27255 + When optimizing for size, we only eliminate the partial redundancy
27256 + if we need to insert in only one predecessor. This avoids almost
27257 + completely the code size increase that PRE usually causes.
27258 +
27259 For the partial anticipation case, we only perform insertion if it
27260 is partially anticipated in some block, and fully available in all
27261 of the predecessors.
27262 @@ -429,6 +433,7 @@
27263 static void bitmap_value_insert_into_set (bitmap_set_t, pre_expr);
27264 static void bitmap_value_replace_in_set (bitmap_set_t, pre_expr);
27265 static void bitmap_set_copy (bitmap_set_t, bitmap_set_t);
27266 +static void bitmap_set_and (bitmap_set_t, bitmap_set_t);
27267 static bool bitmap_set_contains_value (bitmap_set_t, unsigned int);
27268 static void bitmap_insert_into_set (bitmap_set_t, pre_expr);
27269 static void bitmap_insert_into_set_1 (bitmap_set_t, pre_expr, bool);
27270 @@ -2988,13 +2993,6 @@
27271 tree temp;
27272 gimple phi;
27273
27274 - if (dump_file && (dump_flags & TDF_DETAILS))
27275 - {
27276 - fprintf (dump_file, "Found partial redundancy for expression ");
27277 - print_pre_expr (dump_file, expr);
27278 - fprintf (dump_file, " (%04d)\n", val);
27279 - }
27280 -
27281 /* Make sure we aren't creating an induction variable. */
27282 if (block->loop_depth > 0 && EDGE_COUNT (block->preds) == 2
27283 && expr->kind != REFERENCE)
27284 @@ -3192,6 +3190,47 @@
27285 }
27286
27287
27288 +/* Indicate if, when optimizing for speed, it is appropriate to make
27289 + INSERTS_NEEDED insertions in order to make EXPR in BLOCK redundant. */
27290 +static bool
27291 +ppre_n_insert_for_speed_p (pre_expr expr, basic_block block,
27292 + unsigned int inserts_needed)
27293 +{
27294 + /* The more expensive EXPR is, the more we should be prepared to insert
27295 + in the predecessors of BLOCK to make EXPR fully redundant.
27296 + For now, only recognize AND, OR, XOR, PLUS and MINUS of a multiple-use
27297 + SSA_NAME with a constant as cheap. */
27298 + int cost;
27299 +
27300 + if (flag_tree_pre_partial_partial_obliviously)
27301 + return true;
27302 + if (expr->kind == NARY)
27303 + {
27304 + vn_nary_op_t nary = PRE_EXPR_NARY (expr);
27305 + switch (nary->opcode)
27306 + {
27307 + tree name, cnst;
27308 + case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR:
27309 + case PLUS_EXPR: case MINUS_EXPR:
27310 +
27311 + gcc_assert (nary->length == 2);
27312 + name = nary->op[0];
27313 + cnst = nary->op[1];
27314 + if (TREE_CODE (name) != SSA_NAME || has_single_use (name))
27315 + return true;
27316 + if (!is_gimple_min_invariant (cnst))
27317 + return true;
27318 + cost = 1;
27319 + break;
27320 + default:
27321 + return true;
27322 + }
27323 + }
27324 + else
27325 + return true;
27326 + return EDGE_COUNT (block->preds) * cost >= inserts_needed;
27327 +
27328 +}
27329
27330 /* Perform insertion of partially redundant values.
27331 For BLOCK, do the following:
27332 @@ -3226,6 +3265,7 @@
27333 pre_expr *avail;
27334 unsigned int val;
27335 bool by_some = false;
27336 + unsigned int inserts_needed = 0;
27337 bool cant_insert = false;
27338 bool all_same = true;
27339 pre_expr first_s = NULL;
27340 @@ -3280,6 +3320,7 @@
27341 {
27342 avail[bprime->index] = eprime;
27343 all_same = false;
27344 + inserts_needed++;
27345 }
27346 else
27347 {
27348 @@ -3289,6 +3330,11 @@
27349 first_s = edoubleprime;
27350 else if (!pre_expr_eq (first_s, edoubleprime))
27351 all_same = false;
27352 + /* If the available value is not a NAME, PREing this
27353 + value will probably result in a copy on the edge
27354 + to assign the expression to a register. */
27355 + if (edoubleprime->kind != NAME)
27356 + inserts_needed++;
27357 }
27358 }
27359 /* If we can insert it, it's not the same value
27360 @@ -3297,9 +3343,27 @@
27361 partially redundant. */
27362 if (!cant_insert && !all_same && by_some && dbg_cnt (treepre_insert))
27363 {
27364 - if (insert_into_preds_of_block (block, get_expression_id (expr),
27365 - avail))
27366 - new_stuff = true;
27367 + if (dump_file && (dump_flags & TDF_DETAILS))
27368 + {
27369 + fprintf (dump_file,
27370 + "Found partial redundancy for expression ");
27371 + print_pre_expr (dump_file, expr);
27372 + fprintf (dump_file, " (%04d)\n", get_expr_value_id (expr));
27373 + }
27374 +
27375 + /* If optimizing for size, insert at most one
27376 + new expression to avoid increasing code size. */
27377 + if (optimize_function_for_speed_p (cfun)
27378 + ? 1 : EDGE_COUNT (block->preds) - inserts_needed == 1)
27379 + new_stuff |=
27380 + insert_into_preds_of_block (block,
27381 + get_expression_id (expr),
27382 + avail);
27383 + else if (dump_file && (dump_flags & TDF_DETAILS))
27384 + fprintf (dump_file, "Not inserting (optimizing for %s)\n",
27385 + optimize_function_for_speed_p (cfun)
27386 + ? "speed" : "size");
27387 +
27388 }
27389 /* If all edges produce the same value and that value is
27390 an invariant, then the PHI has the same value on all
27391 @@ -3428,9 +3492,28 @@
27392 if (!cant_insert && by_all && dbg_cnt (treepre_insert))
27393 {
27394 pre_stats.pa_insert++;
27395 - if (insert_into_preds_of_block (block, get_expression_id (expr),
27396 - avail))
27397 - new_stuff = true;
27398 + if (dump_file && (dump_flags & TDF_DETAILS))
27399 + {
27400 + fprintf (dump_file,
27401 + "Found partial redundancy for expression ");
27402 + print_pre_expr (dump_file, expr);
27403 + fprintf (dump_file, " (%04d)\n", get_expr_value_id (expr));
27404 + }
27405 + /* Assuming the expression is 50% anticipatable, we have to
27406 + multiply the number of insertions needed by two for a cost
27407 + comparison. */
27408 + if (!optimize_function_for_speed_p (cfun)
27409 + || ppre_n_insert_for_speed_p (expr, block,
27410 + 2 * EDGE_COUNT (block->preds)))
27411 + new_stuff |=
27412 + insert_into_preds_of_block (block,
27413 + get_expression_id (expr),
27414 + avail);
27415 + else if (dump_file && (dump_flags & TDF_DETAILS))
27416 + fprintf (dump_file, "Not inserting (optimizing for %s)\n",
27417 + optimize_function_for_speed_p (cfun)
27418 + ? "speed" : "size");
27419 +
27420 }
27421 free (avail);
27422 }
27423 @@ -3471,7 +3554,9 @@
27424 if (!single_pred_p (block))
27425 {
27426 new_stuff |= do_regular_insertion (block, dom);
27427 - if (do_partial_partial)
27428 + /* Don't bother with partial-partial redundancies when
27429 + optimizing for size. */
27430 + if (do_partial_partial && ! optimize_function_for_size_p (cfun))
27431 new_stuff |= do_partial_partial_insertion (block, dom);
27432 }
27433 }
27434 @@ -4220,11 +4305,11 @@
27435 only wants to do full redundancy elimination. */
27436
27437 static unsigned int
27438 -execute_pre (bool do_fre ATTRIBUTE_UNUSED)
27439 +execute_pre (bool do_fre)
27440 {
27441 unsigned int todo = 0;
27442
27443 - do_partial_partial = optimize > 2;
27444 + do_partial_partial = flag_tree_pre_partial_partial;
27445
27446 /* This has to happen before SCCVN runs because
27447 loop_optimizer_init may create new phis, etc. */
27448 @@ -4297,19 +4382,20 @@
27449 return todo;
27450 }
27451
27452 -/* Gate and execute functions for PRE. */
27453 +/* Gate and execute functions for FRE/PRE. */
27454
27455 static unsigned int
27456 do_pre (void)
27457 {
27458 - return TODO_rebuild_alias | execute_pre (false);
27459 + return TODO_rebuild_alias
27460 + | execute_pre (! flag_tree_pre);
27461 }
27462
27463 static bool
27464 gate_pre (void)
27465 {
27466 - /* PRE tends to generate bigger code. */
27467 - return flag_tree_pre != 0 && optimize_function_for_speed_p (cfun);
27468 + /* Run FRE even if we don't run PRE. */
27469 + return (flag_tree_fre || flag_tree_pre);
27470 }
27471
27472 struct gimple_opt_pass pass_pre =
27473 --- /dev/null
27474 +++ b/gcc/tree-ssa-remove-local-statics.c
27475 @@ -0,0 +1,868 @@
27476 +/* Local static variable elimination pass.
27477 + Copyright (C) 2007 Free Software Foundation, Inc.
27478 + Contributed by Nathan Froyd <froydnj@codesourcery.com>
27479 +
27480 +This file is part of GCC.
27481 +
27482 +GCC is free software; you can redistribute it and/or modify it
27483 +under the terms of the GNU General Public License as published by the
27484 +Free Software Foundation; either version 3, or (at your option) any
27485 +later version.
27486 +
27487 +GCC is distributed in the hope that it will be useful, but WITHOUT
27488 +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27489 +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27490 +for more details.
27491 +
27492 +You should have received a copy of the GNU General Public License
27493 +along with GCC; see the file COPYING3. If not see
27494 +<http://www.gnu.org/licenses/>. */
27495 +
27496 +/* Converting static function-local variables to automatic variables.
27497 +
27498 + The motivating example is a function like:
27499 +
27500 + void
27501 + foo (unsigned n)
27502 + {
27503 + static int var;
27504 + unsigned i;
27505 +
27506 + for (i = 0; i != n; i++)
27507 + {
27508 + var = ...
27509 +
27510 + do other things with var...
27511 + }
27512 + }
27513 +
27514 + Because VAR is static, doing things like code motion to loads and
27515 + stores of VAR is difficult. Furthermore, accesses to VAR are
27516 + inefficient. This pass aims to recognize the cases where it is not
27517 + necessary for VAR to be static and modify the code so that later
27518 + passes will do the appropriate optimizations.
27519 +
27520 + The criteria for a static function-local variable V in a function F
27521 + being converted to an automatic variable are:
27522 +
27523 + 1. F does not call setjmp; and
27524 + 2. V's address is never taken; and
27525 + 3. V is not declared volatile; and
27526 + 4. V is not used in any nested function;
27527 + 5. V is not an aggregate value (union, struct, array, etc.); and
27528 + 6. Every use of V is defined along all paths leading to the use.
27529 +
27530 + NOTE: For ease of implementation, we currently treat a function call
27531 + as killing all previous definitions of static variables, since we
27532 + could have:
27533 +
27534 + static void
27535 + foo (...)
27536 + {
27537 + static int x;
27538 +
27539 + x = ...; (1)
27540 +
27541 + f (...); (2)
27542 +
27543 + ... = x; (3)
27544 + }
27545 +
27546 + The use at (3) needs to pick up a possible definition made by the
27547 + call at (2). If the call at (2) does not call back into 'foo',
27548 + then the call is not a killing call. We currently treat it as
27549 + though it is. */
27550 +
27551 +#include "config.h"
27552 +#include "system.h"
27553 +#include "coretypes.h"
27554 +#include "tm.h"
27555 +
27556 +#include "rtl.h"
27557 +#include "tm_p.h"
27558 +#include "hard-reg-set.h"
27559 +#include "obstack.h"
27560 +#include "basic-block.h"
27561 +
27562 +#include "tree.h"
27563 +#include "gimple.h"
27564 +#include "hashtab.h"
27565 +#include "diagnostic.h"
27566 +#include "tree-flow.h"
27567 +#include "tree-dump.h"
27568 +#include "flags.h"
27569 +#include "timevar.h"
27570 +#include "tree-pass.h"
27571 +
27572 +struct rls_decl_info
27573 +{
27574 + /* The variable declaration. */
27575 + tree orig_var;
27576 +
27577 + /* Its index in rls_block_local_data. */
27578 + int index;
27579 +
27580 + /* Whether we can optimize this variable. */
27581 + bool optimizable_p;
27582 +
27583 + /* The new variable declaration, if we can optimize away the staticness
27584 + of 'orig_var'. */
27585 + tree new_var;
27586 +};
27587 +
27588 +/* Filled with 'struct rls_decl_info'; keyed off ORIG_VAR. */
27589 +static htab_t static_variables;
27590 +
27591 +struct rls_stmt_info
27592 +{
27593 + /* The variable declaration. */
27594 + tree var;
27595 +
27596 + /* The statement in which we found a def or a use of the variable. */
27597 + gimple stmt;
27598 +
27599 + /* Whether STMT represents a use of VAR. */
27600 + bool use_p;
27601 +
27602 + /* A bitmap whose entries denote what variables have been defined
27603 + when execution arrives at STMT. This field is only used when
27604 + USE_P is true. */
27605 + sbitmap defined;
27606 +};
27607 +
27608 +/* Filled with 'struct rls_stmt_info'; keyed off STMT. */
27609 +static htab_t defuse_statements;
27610 +
27611 +static struct
27612 +{
27613 + /* The number of static variables we found. */
27614 + size_t n_statics;
27615 +
27616 + /* The number of optimizable variables we found. */
27617 + size_t n_optimizable;
27618 +} stats;
27619 +
27620 +struct rls_block_dataflow_data {
27621 + /* A bitmap whose entries denote what variables have been defined on
27622 + entry to this block. */
27623 + sbitmap defined_in;
27624 +
27625 + /* A bitmap whose entries denote what variables have been defined on
27626 + exit from this block. */
27627 + sbitmap defined_out;
27628 +};
27629 +
27630 +/* Parameters for the 'static_variables' hash table. */
27631 +
27632 +static hashval_t
27633 +rls_hash_decl_info (const void *x)
27634 +{
27635 + return htab_hash_pointer
27636 + ((const void *) ((const struct rls_decl_info *) x)->orig_var);
27637 +}
27638 +
27639 +static int
27640 +rls_eq_decl_info (const void *x, const void *y)
27641 +{
27642 + const struct rls_decl_info *a = (const struct rls_decl_info *) x;
27643 + const struct rls_decl_info *b = (const struct rls_decl_info *) y;
27644 +
27645 + return a->orig_var == b->orig_var;
27646 +}
27647 +
27648 +static void
27649 +rls_free_decl_info (void *info)
27650 +{
27651 + free (info);
27652 +}
27653 +
27654 +/* Parameters for the 'defuse_statements' hash table. */
27655 +
27656 +static hashval_t
27657 +rls_hash_use_info (const void *x)
27658 +{
27659 + return htab_hash_pointer
27660 + ((const void *) ((const struct rls_stmt_info *) x)->stmt);
27661 +}
27662 +
27663 +static int
27664 +rls_eq_use_info (const void *x, const void *y)
27665 +{
27666 + const struct rls_stmt_info *a = (const struct rls_stmt_info *) x;
27667 + const struct rls_stmt_info *b = (const struct rls_stmt_info *) y;
27668 +
27669 + return a->stmt == b->stmt;
27670 +}
27671 +
27672 +static void
27673 +rls_free_use_info (void *info)
27674 +{
27675 + struct rls_stmt_info *stmt_info = (struct rls_stmt_info *) info;
27676 +
27677 + if (stmt_info->defined)
27678 + sbitmap_free (stmt_info->defined);
27679 +
27680 + free (stmt_info);
27681 +}
27682 +
27683 +/* Initialize data structures and statistics. */
27684 +
27685 +static void
27686 +rls_init (void)
27687 +{
27688 + basic_block bb;
27689 +
27690 + /* We expect relatively few static variables, hence the small
27691 + initial size for the hash table. */
27692 + static_variables = htab_create (8, rls_hash_decl_info,
27693 + rls_eq_decl_info, rls_free_decl_info);
27694 +
27695 + /* We expect quite a few statements. */
27696 + defuse_statements = htab_create (128, rls_hash_use_info,
27697 + rls_eq_use_info, rls_free_use_info);
27698 +
27699 + FOR_ALL_BB (bb)
27700 + {
27701 + struct rls_block_dataflow_data *data;
27702 +
27703 + data = XNEW (struct rls_block_dataflow_data);
27704 + memset (data, 0, sizeof (*data));
27705 + bb->aux = data;
27706 + }
27707 +
27708 + stats.n_statics = 0;
27709 + stats.n_optimizable = 0;
27710 +}
27711 +
27712 +/* Free data structures. */
27713 +
27714 +static void
27715 +rls_done (void)
27716 +{
27717 + basic_block bb;
27718 +
27719 + htab_delete (static_variables);
27720 + htab_delete (defuse_statements);
27721 +
27722 + FOR_ALL_BB (bb)
27723 + {
27724 + struct rls_block_dataflow_data *data
27725 + = (struct rls_block_dataflow_data *) bb->aux;
27726 +
27727 + gcc_assert (data);
27728 +
27729 + if (data->defined_in)
27730 + sbitmap_free (data->defined_in);
27731 + if (data->defined_out)
27732 + sbitmap_free (data->defined_out);
27733 + free (data);
27734 + bb->aux = NULL;
27735 + }
27736 +}
27737 +
27738 +\f
27739 +/* Doing the initial work to find static variables. */
27740 +
27741 +/* Examine the defining statement for VAR and determine whether it is a
27742 + static variable we could potentially optimize. If so, stick in it
27743 + in the 'static_variables' hashtable.
27744 +
27745 + STMT is the statement in which a definition or use of VAR occurs.
27746 + USE_P indicates whether VAR is used or defined in STMT. Enter STMT
27747 + into 'defuse_statements' as well for use during dataflow
27748 + analysis. */
27749 +
27750 +static void
27751 +maybe_discover_new_declaration (tree var, gimple stmt, bool use_p)
27752 +{
27753 + tree def_stmt = SSA_NAME_VAR (var);
27754 +
27755 + if (TREE_CODE (def_stmt) == VAR_DECL
27756 + && DECL_CONTEXT (def_stmt) != NULL_TREE
27757 + && TREE_CODE (DECL_CONTEXT (def_stmt)) == FUNCTION_DECL
27758 + /* We cannot optimize away a static used in multiple functions (as
27759 + might happen in C++). */
27760 + && !DECL_NONLOCAL(def_stmt)
27761 + && TREE_STATIC (def_stmt)
27762 + /* We cannot optimize away aggregate statics, as we would have to
27763 + prove that definitions of every field of the aggregate dominate
27764 + uses. */
27765 + && !AGGREGATE_TYPE_P (TREE_TYPE (def_stmt))
27766 + /* GCC doesn't normally treat vectors as aggregates; we need to,
27767 + though, since a user could use intrinsics to read/write
27768 + particular fields of the vector, thereby treating it as an
27769 + array. */
27770 + && TREE_CODE (TREE_TYPE (def_stmt)) != VECTOR_TYPE
27771 + && !TREE_ADDRESSABLE (def_stmt)
27772 + && !TREE_THIS_VOLATILE (def_stmt))
27773 + {
27774 + struct rls_decl_info dummy;
27775 + void **slot;
27776 +
27777 + dummy.orig_var = def_stmt;
27778 + slot = htab_find_slot (static_variables, &dummy, INSERT);
27779 +
27780 + if (*slot == NULL)
27781 + {
27782 + /* Found a use or a def of a new declaration. */
27783 + struct rls_decl_info *info = XNEW (struct rls_decl_info);
27784 +
27785 + info->orig_var = def_stmt;
27786 + info->index = stats.n_statics++;
27787 + /* Optimistically assume that we can optimize. */
27788 + info->optimizable_p = true;
27789 + info->new_var = NULL_TREE;
27790 + *slot = (void *) info;
27791 + }
27792 +
27793 + /* Enter the statement into DEFUSE_STATEMENTS. */
27794 + {
27795 + struct rls_stmt_info dummy;
27796 + struct rls_stmt_info *info;
27797 +
27798 + dummy.stmt = stmt;
27799 + slot = htab_find_slot (defuse_statements, &dummy, INSERT);
27800 +
27801 + /* We should never insert the same statement into the
27802 + hashtable twice. */
27803 + gcc_assert (*slot == NULL
27804 + || ((struct rls_stmt_info *)(*slot))->stmt == stmt);
27805 +
27806 + if (*slot != NULL && ((struct rls_stmt_info *)(*slot))->stmt == stmt)
27807 + return;
27808 +
27809 + info = XNEW (struct rls_stmt_info);
27810 + info->var = def_stmt;
27811 + info->stmt = stmt;
27812 + if (dump_file)
27813 + {
27814 + fprintf (dump_file, "entering as %s ", use_p ? "use" : "def");
27815 + print_gimple_stmt (dump_file, stmt, 0, TDF_DETAILS | TDF_VOPS);
27816 + }
27817 + info->use_p = use_p;
27818 + /* We don't know how big to make the bitmap yet. */
27819 + info->defined = NULL;
27820 + *slot = (void *) info;
27821 + }
27822 + }
27823 +}
27824 +
27825 +/* Grovel through all the statements in the program, looking for
27826 + SSA_NAMEs whose SSA_NAME_VAR is a VAR_DECL. We look at both use and
27827 + def SSA_NAMEs. */
27828 +
27829 +static void
27830 +find_static_nonvolatile_declarations (void)
27831 +{
27832 + basic_block bb;
27833 +
27834 + FOR_EACH_BB (bb)
27835 + {
27836 + gimple_stmt_iterator i;
27837 +
27838 + for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
27839 + {
27840 + tree var;
27841 + ssa_op_iter iter;
27842 + gimple stmt = gsi_stmt (i);
27843 +
27844 + /* If there's a call expression in STMT, then previous passes
27845 + will have determined if the call transitively defines some
27846 + static variable. However, we need more precise
27847 + information--we need to know whether static variables are
27848 + live out after the call.
27849 +
27850 + Since we'll never see something like:
27851 +
27852 + staticvar = foo (bar, baz);
27853 +
27854 + in GIMPLE (the result of the call will be assigned to a
27855 + normal, non-static local variable which is then assigned to
27856 + STATICVAR in a subsequent statement), don't bother finding
27857 + new declarations if we see a GIMPLE_CALL.
27858 +
27859 + In a similar fashion, asm statements that clobber memory
27860 + will appear to define static variables. In general,
27861 + however, assuming that asm statements define static
27862 + variables would cause us to see that in the following
27863 + situation:
27864 +
27865 + static int foo = 0;
27866 +
27867 + __asm__ (... : "memory");
27868 + foo++;
27869 +
27870 + foo could be unstaticized because the asm has "defined"
27871 + foo. This is likely false. (Even if the asm does happen
27872 + to define foo--and only foo--that situation would be
27873 + sufficiently unusual that not optimizing it seems OK.) */
27874 + if (gimple_code (stmt) != GIMPLE_CALL
27875 + && gimple_code (stmt) != GIMPLE_ASM)
27876 + FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_VDEF)
27877 + {
27878 + maybe_discover_new_declaration (var, stmt, false);
27879 + }
27880 +
27881 + FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_VUSE)
27882 + {
27883 + maybe_discover_new_declaration (var, stmt, true);
27884 + }
27885 + }
27886 + }
27887 +}
27888 +
27889 +\f
27890 +/* Determining if we have anything to optimize. */
27891 +
27892 +/* Examine *SLOT (which is a 'struct rls_decl_info *') to see whether
27893 + the associated variable is optimizable. If it is, create a new,
27894 + non-static declaration for the variable; this new variable will be
27895 + used during a subsequent rewrite of the function. */
27896 +
27897 +#define NEW_VAR_PREFIX ".unstatic"
27898 +
27899 +static int
27900 +maybe_create_new_variable (void **slot, void *data ATTRIBUTE_UNUSED)
27901 +{
27902 + struct rls_decl_info *info = (struct rls_decl_info *) *slot;
27903 + tree id_node = DECL_NAME (info->orig_var);
27904 + size_t id_len = IDENTIFIER_LENGTH (id_node);
27905 + size_t name_len = id_len + strlen (NEW_VAR_PREFIX) + 1;
27906 + char *name;
27907 +
27908 + /* Don't create a new variable multiple times. */
27909 + gcc_assert (!info->new_var);
27910 +
27911 + /* Tie the new name to the old one to aid debugging dumps. */
27912 + name = (char *) alloca (name_len);
27913 + strcpy (name, IDENTIFIER_POINTER (id_node));
27914 + strcpy (name + id_len, NEW_VAR_PREFIX);
27915 + info->new_var = create_tmp_var (TREE_TYPE (info->orig_var), name);
27916 +
27917 + if (dump_file)
27918 + {
27919 + fprintf (dump_file, "new variable ");
27920 + print_generic_expr (dump_file, info->new_var, 0);
27921 + fprintf (dump_file, "\n");
27922 + }
27923 +
27924 + /* Inform SSA about this new variable. */
27925 + create_var_ann (info->new_var);
27926 + mark_sym_for_renaming (info->new_var);
27927 + /* We need to make sure we rebuild bits for the original variable,
27928 + such as virtual operands attached to statements. */
27929 + mark_sym_for_renaming (info->orig_var);
27930 + add_referenced_var (info->new_var);
27931 +
27932 + /* Always continue scanning. */
27933 + return 1;
27934 +}
27935 +
27936 +#undef NEW_VAR_PREFIX
27937 +
27938 +/* Traverse the 'defuse_statements' hash table. For every use,
27939 + determine if the associated variable is defined along all paths
27940 + leading to said use. Remove the associated variable from
27941 + 'static_variables' if it is not. */
27942 +
27943 +static int
27944 +check_definedness (void **slot, void *data ATTRIBUTE_UNUSED)
27945 +{
27946 + struct rls_stmt_info *info = (struct rls_stmt_info *) *slot;
27947 + struct rls_decl_info dummy;
27948 +
27949 + /* We don't need to look at definitions. Continue scanning. */
27950 + if (!info->use_p)
27951 + return 1;
27952 +
27953 + dummy.orig_var = info->var;
27954 + slot = htab_find_slot (static_variables, &dummy, INSERT);
27955 +
27956 + /* Might not be there because we deleted it already. */
27957 + if (*slot)
27958 + {
27959 + struct rls_decl_info *decl = (struct rls_decl_info *) *slot;
27960 +
27961 + if (!TEST_BIT (info->defined, decl->index))
27962 + {
27963 + if (dump_file)
27964 + {
27965 + fprintf (dump_file, "not optimizing ");
27966 + print_generic_expr (dump_file, decl->orig_var, 0);
27967 + fprintf (dump_file, "due to uncovered use in ");
27968 + print_gimple_stmt (dump_file, info->stmt, 0, 0);
27969 + fprintf (dump_file, "\n");
27970 + }
27971 +
27972 + htab_clear_slot (static_variables, slot);
27973 + stats.n_optimizable--;
27974 + }
27975 + }
27976 +
27977 + /* Continue scan. */
27978 + return 1;
27979 +}
27980 +
27981 +/* Check all statements in 'defuse_statements' to see if all the
27982 + statements that use a static variable have that variable defined
27983 + along all paths leading to the statement. Once that's done, go
27984 + through and create new, non-static variables for any static variables
27985 + that can be optimized. */
27986 +
27987 +static size_t
27988 +determine_optimizable_statics (void)
27989 +{
27990 + htab_traverse (defuse_statements, check_definedness, NULL);
27991 +
27992 + htab_traverse (static_variables, maybe_create_new_variable, NULL);
27993 +
27994 + return stats.n_optimizable;
27995 +}
27996 +
27997 +/* Look at STMT to see if we have uses or defs of a static variable.
27998 + STMT is passed in DATA. Definitions of a static variable are found
27999 + by the presence of a V_MUST_DEF, while uses are found by the presence
28000 + of a VUSE. */
28001 +
28002 +static int
28003 +unstaticize_variable (void **slot, void *data)
28004 +{
28005 + struct rls_decl_info *info = (struct rls_decl_info *) *slot;
28006 + gimple stmt = (gimple) data;
28007 + tree vdef;
28008 + tree vuse;
28009 + int continue_scan = 1;
28010 +
28011 + /* We should have removed unoptimizable variables during an earlier
28012 + traversal. */
28013 + gcc_assert (info->optimizable_p);
28014 +
28015 + /* Check for virtual definitions first. */
28016 + vdef = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_VDEF);
28017 +
28018 + if (vdef != NULL
28019 + && ZERO_SSA_OPERANDS (stmt, SSA_OP_DEF)
28020 + && gimple_code (stmt) == GIMPLE_ASSIGN
28021 + && TREE_CODE (gimple_assign_lhs (stmt)) == VAR_DECL
28022 + && gimple_assign_lhs(stmt) == info->orig_var)
28023 + {
28024 + /* Make the statement define the new name. The new name has
28025 + already been marked for renaming, so no need to do that
28026 + here. */
28027 + gimple_assign_set_lhs (stmt, info->new_var);
28028 + if (dump_file)
28029 + {
28030 + fprintf (dump_file, "found virtual definition!\n");
28031 + print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS | TDF_DETAILS);
28032 + fprintf (dump_file, "\n");
28033 + }
28034 + continue_scan = 0;
28035 + goto done;
28036 + }
28037 +
28038 + /* Check for virtual uses. */
28039 + vuse = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_VUSE);
28040 +
28041 + if (vuse != NULL
28042 + && gimple_code (stmt) == GIMPLE_ASSIGN
28043 + && gimple_assign_rhs_code (stmt) == VAR_DECL
28044 + && gimple_assign_rhs1 (stmt) == info->orig_var)
28045 + {
28046 + /* Make the statement use the new name. */
28047 + gimple_assign_set_rhs1 (stmt, info->new_var);
28048 + if (dump_file)
28049 + {
28050 + fprintf (dump_file, "found virtual use!\n");
28051 + print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS | TDF_DETAILS);
28052 + fprintf (dump_file, "\n");
28053 + }
28054 + continue_scan = 0;
28055 + goto done;
28056 + }
28057 +
28058 + done:
28059 + if (!continue_scan)
28060 + {
28061 + /* None of the other optimizable static variables can occur
28062 + in this statement. Stop the scan. */
28063 + update_stmt (stmt);
28064 +
28065 + if (dump_file)
28066 + {
28067 + fprintf (dump_file, "updated stmt\n");
28068 + print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS | TDF_DETAILS);
28069 + }
28070 + }
28071 +
28072 + return continue_scan;
28073 +}
28074 +
28075 +/* Determine if we have any static variables we can optimize. If so,
28076 + replace any defs or uses of those variables in their defining/using
28077 + statements. */
28078 +
28079 +static void
28080 +maybe_remove_static_from_declarations (void)
28081 +{
28082 + size_t n_optimizable = determine_optimizable_statics ();
28083 + basic_block bb;
28084 +
28085 + if (n_optimizable)
28086 + /* Replace any optimizable variables with new, non-static variables. */
28087 + FOR_EACH_BB (bb)
28088 + {
28089 + gimple_stmt_iterator gsi;
28090 +
28091 + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
28092 + {
28093 + gimple stmt = gsi_stmt (gsi);
28094 +
28095 + htab_traverse (static_variables, unstaticize_variable, stmt);
28096 + }
28097 + }
28098 +}
28099 +
28100 +/* Callback for htab_traverse to initialize the bitmap for *SLOT, which
28101 + is a 'struct rls_stmt_info'. */
28102 +
28103 +static int
28104 +initialize_statement_dataflow (void **slot, void *data ATTRIBUTE_UNUSED)
28105 +{
28106 + struct rls_stmt_info *info = (struct rls_stmt_info *) *slot;
28107 +
28108 + gcc_assert (!info->defined);
28109 +
28110 + if (info->use_p)
28111 + {
28112 + info->defined = sbitmap_alloc (stats.n_statics);
28113 + /* Assume defined along all paths until otherwise informed. */
28114 + sbitmap_ones (info->defined);
28115 + }
28116 +
28117 + /* Continue traversal. */
28118 + return 1;
28119 +}
28120 +
28121 +/* We have N_STATICS static variables to consider. Go through all the
28122 + blocks and all the use statements to initialize their bitmaps. */
28123 +
28124 +static void
28125 +initialize_block_and_statement_dataflow (size_t n_statics)
28126 +{
28127 + basic_block bb;
28128 +
28129 + FOR_ALL_BB (bb)
28130 + {
28131 + struct rls_block_dataflow_data *data
28132 + = (struct rls_block_dataflow_data *) bb->aux;
28133 +
28134 + gcc_assert (data);
28135 +
28136 + data->defined_in = sbitmap_alloc (n_statics);
28137 + sbitmap_zero (data->defined_in);
28138 + data->defined_out = sbitmap_alloc (n_statics);
28139 + sbitmap_zero (data->defined_out);
28140 + }
28141 +
28142 + htab_traverse (defuse_statements, initialize_statement_dataflow, NULL);
28143 +}
28144 +
28145 +/* Apply the individual effects of the stmts in BB to update the
28146 + dataflow analysis information for BB. */
28147 +
28148 +static void
28149 +compute_definedness_for_block (basic_block bb)
28150 +{
28151 + bool changed_p = false;
28152 + struct rls_block_dataflow_data *data
28153 + = (struct rls_block_dataflow_data *) bb->aux;
28154 + gimple_stmt_iterator gsi;
28155 +
28156 + sbitmap_copy (data->defined_out, data->defined_in);
28157 +
28158 + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
28159 + {
28160 + gimple stmt = gsi_stmt (gsi);
28161 +
28162 + if (gimple_code (stmt) == GIMPLE_CALL)
28163 + /* If there's a call expression in STMT, then previous passes
28164 + will have determined if the call transitively defines some
28165 + static variable. However, we need more precise
28166 + information--we need to know whether static variables are
28167 + live out after the call. In the absence of such information,
28168 + simply declare that all static variables are clobbered by the
28169 + call. A better analysis would be interprocedural and compute
28170 + the liveness information we require, but for now, we're being
28171 + pessimistic. */
28172 + sbitmap_zero (data->defined_out);
28173 + else
28174 + {
28175 + struct rls_stmt_info dummy;
28176 + void **slot;
28177 +
28178 + /* See if this statement uses or defines a static variable. */
28179 + dummy.stmt = stmt;
28180 + slot = htab_find_slot (defuse_statements, &dummy, INSERT);
28181 +
28182 + /* Check for uses. */
28183 + if (*slot != NULL)
28184 + {
28185 + struct rls_stmt_info *info = (struct rls_stmt_info *) *slot;
28186 +
28187 + if (info->use_p)
28188 + {
28189 + gcc_assert (info->defined);
28190 +
28191 + /* Found a statement that uses a function-local static
28192 + variable. Copy the current state of definedness. */
28193 + sbitmap_copy (info->defined, data->defined_out);
28194 + }
28195 + else
28196 + {
28197 + struct rls_decl_info dummy;
28198 + struct rls_decl_info *decl;
28199 +
28200 + gcc_assert (!info->defined);
28201 +
28202 + /* Found a statement that defines a function-local static
28203 + variable. Look up the associated variable's information
28204 + and mark it as defined in the block. */
28205 + dummy.orig_var = info->var;
28206 + slot = htab_find_slot (static_variables, &dummy, INSERT);
28207 +
28208 + gcc_assert (*slot);
28209 +
28210 + decl = (struct rls_decl_info *) *slot;
28211 +
28212 + SET_BIT (data->defined_out, decl->index);
28213 + changed_p |= true;
28214 + }
28215 + }
28216 + }
28217 + }
28218 +}
28219 +
28220 +/* Solve the dataflow equations:
28221 +
28222 + DEFINED_IN(b) = intersect DEFINED_OUT(p) for p in preds(b)
28223 + DEFINED_OUT(b) = VARIABLES_DEFINED (b, DEFINED_IN (b))
28224 +
28225 + via a simple iterative solver. VARIABLES_DEFINED is computed by
28226 + 'compute_definedness_for_block'. */
28227 +
28228 +static void
28229 +compute_definedness (void)
28230 +{
28231 + basic_block bb;
28232 + bool changed_p;
28233 + sbitmap tmp_bitmap = sbitmap_alloc (stats.n_statics);
28234 +
28235 + /* Compute initial sets. */
28236 + FOR_EACH_BB (bb)
28237 + {
28238 + compute_definedness_for_block (bb);
28239 + }
28240 +
28241 + /* Iterate. */
28242 + do {
28243 + changed_p = false;
28244 +
28245 + FOR_EACH_BB (bb)
28246 + {
28247 + edge e;
28248 + edge_iterator ei;
28249 + struct rls_block_dataflow_data *data
28250 + = (struct rls_block_dataflow_data *) bb->aux;
28251 + bool bitmap_changed_p = false;
28252 +
28253 + sbitmap_ones (tmp_bitmap);
28254 +
28255 + gcc_assert (data);
28256 +
28257 + /* We require information about whether a variable was defined
28258 + over all paths leading to a particular use. Therefore, we
28259 + intersect the DEFINED sets of all predecessors. */
28260 + FOR_EACH_EDGE (e, ei, bb->preds)
28261 + {
28262 + struct rls_block_dataflow_data *pred_data
28263 + = (struct rls_block_dataflow_data *) e->src->aux;
28264 +
28265 + gcc_assert (pred_data);
28266 +
28267 + sbitmap_a_and_b (tmp_bitmap, tmp_bitmap, pred_data->defined_out);
28268 + }
28269 +
28270 + bitmap_changed_p = !sbitmap_equal (tmp_bitmap, data->defined_in);
28271 +
28272 + if (bitmap_changed_p)
28273 + {
28274 + sbitmap_copy (data->defined_in, tmp_bitmap);
28275 + compute_definedness_for_block (bb);
28276 + }
28277 +
28278 + changed_p |= bitmap_changed_p;
28279 + }
28280 + } while (changed_p);
28281 +
28282 + sbitmap_free (tmp_bitmap);
28283 +}
28284 +
28285 +static unsigned int
28286 +execute_rls (void)
28287 +{
28288 + rls_init ();
28289 +
28290 + find_static_nonvolatile_declarations ();
28291 +
28292 + /* Can we optimize anything? */
28293 + if (stats.n_statics != 0)
28294 + {
28295 + stats.n_optimizable = stats.n_statics;
28296 +
28297 + if (dump_file)
28298 + fprintf (dump_file, "found %d static variables to consider\n",
28299 + stats.n_statics);
28300 +
28301 + initialize_block_and_statement_dataflow (stats.n_statics);
28302 +
28303 + compute_definedness ();
28304 +
28305 + maybe_remove_static_from_declarations ();
28306 +
28307 + if (dump_file)
28308 + fprintf (dump_file, "removed %d static variables\n",
28309 + stats.n_optimizable);
28310 + }
28311 +
28312 + rls_done ();
28313 +
28314 + return 0;
28315 +}
28316 +
28317 +static bool
28318 +gate_rls (void)
28319 +{
28320 + return (flag_remove_local_statics != 0
28321 + && !cfun->calls_setjmp
28322 + && !cgraph_node (current_function_decl)->ever_was_nested);
28323 +}
28324 +
28325 +struct gimple_opt_pass pass_remove_local_statics =
28326 +{
28327 + {
28328 + GIMPLE_PASS,
28329 + "remlocstatic", /* name */
28330 + gate_rls, /* gate */
28331 + execute_rls, /* execute */
28332 + NULL, /* sub */
28333 + NULL, /* next */
28334 + 0, /* static_pass_number */
28335 + TV_TREE_RLS, /* tv_id */
28336 + PROP_cfg | PROP_ssa, /* properties_required */
28337 + 0, /* properties_provided */
28338 + 0, /* properties_destroyed */
28339 + 0, /* todo_flags_start */
28340 + TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts
28341 + | TODO_rebuild_alias | TODO_update_ssa /* todo_flags_finish */
28342 + }
28343 +};
28344 --- a/gcc/tree-ssa-sink.c
28345 +++ b/gcc/tree-ssa-sink.c
28346 @@ -449,6 +449,47 @@
28347 last = false;
28348 continue;
28349 }
28350 +
28351 + /* We cannot move statements that contain references to block-scope
28352 + variables out of that block, as this may lead to incorrect aliasing
28353 + when we lay out the stack frame in cfgexpand.c.
28354 + In lieu of more sophisticated analysis, be very conservative here
28355 + and prohibit moving any statement that references memory out of a
28356 + block with variables. */
28357 + if (gimple_references_memory_p (stmt))
28358 + {
28359 + tree fromblock = gimple_block (stmt);
28360 + while (fromblock
28361 + && fromblock != current_function_decl
28362 + && !BLOCK_VARS (fromblock))
28363 + fromblock = BLOCK_SUPERCONTEXT (fromblock);
28364 + if (fromblock && fromblock != current_function_decl)
28365 + {
28366 + gimple tostmt;
28367 + tree toblock;
28368 +
28369 + if (gsi_end_p (togsi))
28370 + tostmt = gimple_seq_last_stmt (gsi_seq (togsi));
28371 + else
28372 + tostmt = gsi_stmt (togsi);
28373 + if (tostmt)
28374 + toblock = gimple_block (tostmt);
28375 + else
28376 + toblock = NULL;
28377 + while (toblock
28378 + && toblock != current_function_decl
28379 + && toblock != fromblock)
28380 + toblock = BLOCK_SUPERCONTEXT (toblock);
28381 + if (!toblock || toblock != fromblock)
28382 + {
28383 + if (!gsi_end_p (gsi))
28384 + gsi_prev (&gsi);
28385 + last = false;
28386 + continue;
28387 + }
28388 + }
28389 + }
28390 +
28391 if (dump_file)
28392 {
28393 fprintf (dump_file, "Sinking ");
28394 --- a/gcc/tree-ssa-structalias.c
28395 +++ b/gcc/tree-ssa-structalias.c
28396 @@ -2926,7 +2926,8 @@
28397 /* Some people like to do cute things like take the address of
28398 &0->a.b */
28399 forzero = t;
28400 - while (!SSA_VAR_P (forzero) && !CONSTANT_CLASS_P (forzero))
28401 + while (!SSA_VAR_P (forzero) && TREE_CODE (forzero) != FUNCTION_DECL
28402 + && !CONSTANT_CLASS_P (forzero))
28403 forzero = TREE_OPERAND (forzero, 0);
28404
28405 if (CONSTANT_CLASS_P (forzero) && integer_zerop (forzero))
28406 --- a/gcc/tree-vect-analyze.c
28407 +++ b/gcc/tree-vect-analyze.c
28408 @@ -1459,7 +1459,7 @@
28409 }
28410
28411 base = build_fold_indirect_ref (base_addr);
28412 - alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
28413 + alignment = ssize_int (targetm.vectorize.vector_min_alignment (vectype));
28414
28415 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
28416 || !misalign)
28417 @@ -1510,7 +1510,8 @@
28418 /* At this point we assume that the base is aligned. */
28419 gcc_assert (base_aligned
28420 || (TREE_CODE (base) == VAR_DECL
28421 - && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
28422 + && (DECL_ALIGN (base)
28423 + >= targetm.vectorize.vector_min_alignment (vectype))));
28424
28425 /* Modulo alignment. */
28426 misalign = size_binop (TRUNC_MOD_EXPR, misalign, alignment);
28427 --- a/gcc/tree-vectorizer.c
28428 +++ b/gcc/tree-vectorizer.c
28429 @@ -2868,11 +2868,13 @@
28430 vnode = vnode->next_needed)
28431 {
28432 tree vectype, decl = vnode->decl;
28433 + tree t;
28434 unsigned int alignment;
28435
28436 - if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
28437 + t = TREE_TYPE(decl);
28438 + if (TREE_CODE (t) != ARRAY_TYPE)
28439 continue;
28440 - vectype = get_vectype_for_scalar_type (TREE_TYPE (TREE_TYPE (decl)));
28441 + vectype = get_vectype_for_scalar_type (strip_array_types (t));
28442 if (!vectype)
28443 continue;
28444 alignment = TYPE_ALIGN (vectype);
28445 @@ -2887,6 +2889,7 @@
28446 {
28447 fprintf (dump_file, "Increasing alignment of decl: ");
28448 print_generic_expr (dump_file, decl, TDF_SLIM);
28449 + fprintf (dump_file, "\n");
28450 }
28451 }
28452 }
28453 --- a/gcc/tree-vect-transform.c
28454 +++ b/gcc/tree-vect-transform.c
28455 @@ -5442,7 +5442,21 @@
28456 vect_permute_store_chain(). */
28457 vec_oprnd = VEC_index (tree, result_chain, i);
28458
28459 - data_ref = build_fold_indirect_ref (dataref_ptr);
28460 + if (alignment_support_scheme == dr_aligned
28461 + && !targetm.vectorize.always_misalign(vectype))
28462 + {
28463 + data_ref = build_fold_indirect_ref (dataref_ptr);
28464 + }
28465 + else
28466 + {
28467 + /* TODO: Record actual alignment in always_misalign case. */
28468 + int mis = DR_MISALIGNMENT (first_dr);
28469 + tree tmis;
28470 + tmis = (mis == -1 ? size_zero_node : size_int (mis));
28471 + tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
28472 + data_ref =
28473 + build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
28474 + }
28475
28476 /* Arguments are ready. Create the new vector stmt. */
28477 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
28478 @@ -6621,10 +6635,15 @@
28479 {
28480 case dr_aligned:
28481 gcc_assert (aligned_access_p (first_dr));
28482 - data_ref = build_fold_indirect_ref (dataref_ptr);
28483 - break;
28484 + if (!targetm.vectorize.always_misalign(vectype))
28485 + {
28486 + data_ref = build_fold_indirect_ref (dataref_ptr);
28487 + break;
28488 + }
28489 + /* Fall through... */
28490 case dr_unaligned_supported:
28491 {
28492 + /* TODO: Record actual alignment in always_misalign case. */
28493 int mis = DR_MISALIGNMENT (first_dr);
28494 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
28495
28496 @@ -7595,7 +7614,7 @@
28497 gimple dr_stmt = DR_STMT (dr);
28498 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
28499 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
28500 - int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
28501 + int vectype_align = targetm.vectorize.vector_min_alignment (vectype);
28502 tree niters_type = TREE_TYPE (loop_niters);
28503 int step = 1;
28504 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
28505 --- a/gcc/unwind-dw2.c
28506 +++ b/gcc/unwind-dw2.c
28507 @@ -1414,16 +1414,12 @@
28508 /* Fill in CONTEXT for top-of-stack. The only valid registers at this
28509 level will be the return address and the CFA. */
28510
28511 -#define uw_init_context(CONTEXT) \
28512 - do \
28513 - { \
28514 - /* Do any necessary initialization to access arbitrary stack frames. \
28515 - On the SPARC, this means flushing the register windows. */ \
28516 - __builtin_unwind_init (); \
28517 - uw_init_context_1 (CONTEXT, __builtin_dwarf_cfa (), \
28518 - __builtin_return_address (0)); \
28519 - } \
28520 - while (0)
28521 +#define uw_init_context(CONTEXT) \
28522 + /* Do any necessary initialization to access arbitrary stack frames. \
28523 + On the SPARC, this means flushing the register windows. */ \
28524 + (__builtin_unwind_init (), \
28525 + uw_init_context_1 ((CONTEXT), __builtin_dwarf_cfa (), \
28526 + __builtin_return_address (0)))
28527
28528 static inline void
28529 init_dwarf_reg_size_table (void)
28530 @@ -1431,7 +1427,7 @@
28531 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
28532 }
28533
28534 -static void
28535 +static _Unwind_Reason_Code
28536 uw_init_context_1 (struct _Unwind_Context *context,
28537 void *outer_cfa, void *outer_ra)
28538 {
28539 @@ -1445,7 +1441,8 @@
28540 context->flags = EXTENDED_CONTEXT_BIT;
28541
28542 code = uw_frame_state_for (context, &fs);
28543 - gcc_assert (code == _URC_NO_REASON);
28544 + if (code != _URC_NO_REASON)
28545 + return code;
28546
28547 #if __GTHREADS
28548 {
28549 @@ -1471,6 +1468,8 @@
28550 initialization context, then we can't see it in the given
28551 call frame data. So have the initialization context tell us. */
28552 context->ra = __builtin_extract_return_addr (outer_ra);
28553 +
28554 + return _URC_NO_REASON;
28555 }
28556
28557
28558 --- a/gcc/unwind.inc
28559 +++ b/gcc/unwind.inc
28560 @@ -85,7 +85,8 @@
28561 _Unwind_Reason_Code code;
28562
28563 /* Set up this_context to describe the current stack frame. */
28564 - uw_init_context (&this_context);
28565 + code = uw_init_context (&this_context);
28566 + gcc_assert (code == _URC_NO_REASON);
28567 cur_context = this_context;
28568
28569 /* Phase 1: Search. Unwind the stack, calling the personality routine
28570 @@ -198,7 +199,8 @@
28571 struct _Unwind_Context this_context, cur_context;
28572 _Unwind_Reason_Code code;
28573
28574 - uw_init_context (&this_context);
28575 + code = uw_init_context (&this_context);
28576 + gcc_assert (code == _URC_NO_REASON);
28577 cur_context = this_context;
28578
28579 exc->private_1 = (_Unwind_Ptr) stop;
28580 @@ -221,7 +223,8 @@
28581 struct _Unwind_Context this_context, cur_context;
28582 _Unwind_Reason_Code code;
28583
28584 - uw_init_context (&this_context);
28585 + code = uw_init_context (&this_context);
28586 + gcc_assert (code == _URC_NO_REASON);
28587 cur_context = this_context;
28588
28589 /* Choose between continuing to process _Unwind_RaiseException
28590 @@ -251,7 +254,8 @@
28591 if (exc->private_1 == 0)
28592 return _Unwind_RaiseException (exc);
28593
28594 - uw_init_context (&this_context);
28595 + code = uw_init_context (&this_context);
28596 + gcc_assert (code == _URC_NO_REASON);
28597 cur_context = this_context;
28598
28599 code = _Unwind_ForcedUnwind_Phase2 (exc, &cur_context);
28600 @@ -280,7 +284,9 @@
28601 struct _Unwind_Context context;
28602 _Unwind_Reason_Code code;
28603
28604 - uw_init_context (&context);
28605 + code = uw_init_context (&context);
28606 + if (code != _URC_NO_REASON)
28607 + return _URC_FATAL_PHASE1_ERROR;
28608
28609 while (1)
28610 {
28611 --- a/gcc/unwind-sjlj.c
28612 +++ b/gcc/unwind-sjlj.c
28613 @@ -292,10 +292,11 @@
28614 uw_update_context (context, fs);
28615 }
28616
28617 -static inline void
28618 +static inline _Unwind_Reason_Code
28619 uw_init_context (struct _Unwind_Context *context)
28620 {
28621 context->fc = _Unwind_SjLj_GetContext ();
28622 + return _URC_NO_REASON;
28623 }
28624
28625 static void __attribute__((noreturn))
28626 --- a/gcc/varasm.c
28627 +++ b/gcc/varasm.c
28628 @@ -1126,11 +1126,14 @@
28629 {
28630 #ifdef DATA_ALIGNMENT
28631 unsigned int data_align = DATA_ALIGNMENT (TREE_TYPE (decl), align);
28632 +#else
28633 + unsigned int data_align = align;
28634 +#endif
28635 + data_align = alignment_for_aligned_arrays (TREE_TYPE (decl), data_align);
28636 /* Don't increase alignment too much for TLS variables - TLS space
28637 is too precious. */
28638 if (! DECL_THREAD_LOCAL_P (decl) || data_align <= BITS_PER_WORD)
28639 align = data_align;
28640 -#endif
28641 #ifdef CONSTANT_ALIGNMENT
28642 if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node)
28643 {
28644 @@ -3196,6 +3199,10 @@
28645 set_mem_alias_set (rtl, 0);
28646 set_mem_alias_set (rtl, const_alias_set);
28647
28648 + /* We cannot share RTX'es in pool entries.
28649 + Mark this piece of RTL as required for unsharing. */
28650 + RTX_FLAG (rtl, used) = 1;
28651 +
28652 /* Set flags or add text to the name to record information, such as
28653 that it is a local symbol. If the name is changed, the macro
28654 ASM_OUTPUT_LABELREF will have to know how to strip this
28655 --- a/gcc/vmsdbgout.c
28656 +++ b/gcc/vmsdbgout.c
28657 @@ -211,6 +211,7 @@
28658 debug_nothing_int, /* handle_pch */
28659 debug_nothing_rtx, /* var_location */
28660 debug_nothing_void, /* switch_text_section */
28661 + debug_nothing_tree_tree, /* set_name */
28662 0 /* start_end_main_source_file */
28663 };
28664
28665 --- a/libcpp/directives.c
28666 +++ b/libcpp/directives.c
28667 @@ -2299,13 +2299,6 @@
28668 run_directive (pfile, type, str, count);
28669 }
28670
28671 -/* The number of errors for a given reader. */
28672 -unsigned int
28673 -cpp_errors (cpp_reader *pfile)
28674 -{
28675 - return pfile->errors;
28676 -}
28677 -
28678 /* The options structure. */
28679 cpp_options *
28680 cpp_get_options (cpp_reader *pfile)
28681 --- a/libcpp/errors.c
28682 +++ b/libcpp/errors.c
28683 @@ -28,171 +28,69 @@
28684 #include "cpplib.h"
28685 #include "internal.h"
28686
28687 -static void print_location (cpp_reader *, source_location, unsigned int);
28688 -
28689 -/* Print the logical file location (LINE, COL) in preparation for a
28690 - diagnostic. Outputs the #include chain if it has changed. A line
28691 - of zero suppresses the include stack, and outputs the program name
28692 - instead. */
28693 -static void
28694 -print_location (cpp_reader *pfile, source_location line, unsigned int col)
28695 -{
28696 - if (line == 0)
28697 - fprintf (stderr, "%s: ", progname);
28698 - else
28699 - {
28700 - const struct line_map *map;
28701 - linenum_type lin;
28702 -
28703 - map = linemap_lookup (pfile->line_table, line);
28704 - linemap_print_containing_files (pfile->line_table, map);
28705 -
28706 - lin = SOURCE_LINE (map, line);
28707 - if (col == 0)
28708 - {
28709 - col = SOURCE_COLUMN (map, line);
28710 - if (col == 0)
28711 - col = 1;
28712 - }
28713 -
28714 - if (lin == 0)
28715 - fprintf (stderr, "%s:", map->to_file);
28716 - else if (CPP_OPTION (pfile, show_column) == 0)
28717 - fprintf (stderr, "%s:%u:", map->to_file, lin);
28718 - else
28719 - fprintf (stderr, "%s:%u:%u:", map->to_file, lin, col);
28720 -
28721 - fputc (' ', stderr);
28722 - }
28723 -}
28724 -
28725 -/* Set up for a diagnostic: print the file and line, bump the error
28726 - counter, etc. SRC_LOC is the logical line number; zero means to print
28727 - at the location of the previously lexed token, which tends to be
28728 - the correct place by default. The column number can be specified either
28729 - using COLUMN or (if COLUMN==0) extracting SOURCE_COLUMN from SRC_LOC.
28730 - (This may seem redundant, but is useful when pre-scanning (cleaning) a line,
28731 - when we haven't yet verified whether the current line_map has a
28732 - big enough max_column_hint.)
28733 -
28734 - Returns 0 if the error has been suppressed. */
28735 -static int
28736 -_cpp_begin_message (cpp_reader *pfile, int code,
28737 - source_location src_loc, unsigned int column)
28738 -{
28739 - int level = CPP_DL_EXTRACT (code);
28740 -
28741 - switch (level)
28742 - {
28743 - case CPP_DL_WARNING:
28744 - case CPP_DL_PEDWARN:
28745 - if (cpp_in_system_header (pfile)
28746 - && ! CPP_OPTION (pfile, warn_system_headers))
28747 - return 0;
28748 - /* Fall through. */
28749 -
28750 - case CPP_DL_WARNING_SYSHDR:
28751 - if (CPP_OPTION (pfile, warnings_are_errors)
28752 - || (level == CPP_DL_PEDWARN && CPP_OPTION (pfile, pedantic_errors)))
28753 - {
28754 - if (CPP_OPTION (pfile, inhibit_errors))
28755 - return 0;
28756 - level = CPP_DL_ERROR;
28757 - pfile->errors++;
28758 - }
28759 - else if (CPP_OPTION (pfile, inhibit_warnings))
28760 - return 0;
28761 - break;
28762 -
28763 - case CPP_DL_ERROR:
28764 - if (CPP_OPTION (pfile, inhibit_errors))
28765 - return 0;
28766 - /* ICEs cannot be inhibited. */
28767 - case CPP_DL_ICE:
28768 - pfile->errors++;
28769 - break;
28770 - }
28771 -
28772 - print_location (pfile, src_loc, column);
28773 - if (CPP_DL_WARNING_P (level))
28774 - fputs (_("warning: "), stderr);
28775 - else if (level == CPP_DL_ICE)
28776 - fputs (_("internal error: "), stderr);
28777 - else
28778 - fputs (_("error: "), stderr);
28779 -
28780 - return 1;
28781 -}
28782 -
28783 -/* Don't remove the blank before do, as otherwise the exgettext
28784 - script will mistake this as a function definition */
28785 -#define v_message(msgid, ap) \
28786 - do { vfprintf (stderr, _(msgid), ap); putc ('\n', stderr); } while (0)
28787 -
28788 -/* Exported interface. */
28789 -
28790 /* Print an error at the location of the previously lexed token. */
28791 -void
28792 +bool
28793 cpp_error (cpp_reader * pfile, int level, const char *msgid, ...)
28794 {
28795 source_location src_loc;
28796 va_list ap;
28797 -
28798 + bool ret;
28799 +
28800 va_start (ap, msgid);
28801
28802 - if (CPP_OPTION (pfile, client_diagnostic))
28803 - pfile->cb.error (pfile, level, _(msgid), &ap);
28804 - else
28805 + if (CPP_OPTION (pfile, traditional))
28806 {
28807 - if (CPP_OPTION (pfile, traditional))
28808 - {
28809 - if (pfile->state.in_directive)
28810 - src_loc = pfile->directive_line;
28811 - else
28812 - src_loc = pfile->line_table->highest_line;
28813 - }
28814 - /* We don't want to refer to a token before the beginning of the
28815 - current run -- that is invalid. */
28816 - else if (pfile->cur_token == pfile->cur_run->base)
28817 - {
28818 - if (pfile->cur_run->prev != NULL)
28819 - src_loc = pfile->cur_run->prev->limit->src_loc;
28820 - else
28821 - src_loc = 0;
28822 - }
28823 + if (pfile->state.in_directive)
28824 + src_loc = pfile->directive_line;
28825 else
28826 - {
28827 - src_loc = pfile->cur_token[-1].src_loc;
28828 - }
28829 -
28830 - if (_cpp_begin_message (pfile, level, src_loc, 0))
28831 - v_message (msgid, ap);
28832 + src_loc = pfile->line_table->highest_line;
28833 + }
28834 + /* We don't want to refer to a token before the beginning of the
28835 + current run -- that is invalid. */
28836 + else if (pfile->cur_token == pfile->cur_run->base)
28837 + {
28838 + if (pfile->cur_run->prev != NULL)
28839 + src_loc = pfile->cur_run->prev->limit->src_loc;
28840 + else
28841 + src_loc = 0;
28842 }
28843 + else
28844 + {
28845 + src_loc = pfile->cur_token[-1].src_loc;
28846 + }
28847 +
28848 + if (!pfile->cb.error)
28849 + abort ();
28850 + ret = pfile->cb.error (pfile, level, src_loc, 0, _(msgid), &ap);
28851
28852 va_end (ap);
28853 + return ret;
28854 }
28855
28856 /* Print an error at a specific location. */
28857 -void
28858 +bool
28859 cpp_error_with_line (cpp_reader *pfile, int level,
28860 source_location src_loc, unsigned int column,
28861 const char *msgid, ...)
28862 {
28863 va_list ap;
28864 + bool ret;
28865
28866 va_start (ap, msgid);
28867
28868 - if (_cpp_begin_message (pfile, level, src_loc, column))
28869 - v_message (msgid, ap);
28870 + if (!pfile->cb.error)
28871 + abort ();
28872 + ret = pfile->cb.error (pfile, level, src_loc, column, _(msgid), &ap);
28873
28874 va_end (ap);
28875 + return ret;
28876 }
28877
28878 -void
28879 +bool
28880 cpp_errno (cpp_reader *pfile, int level, const char *msgid)
28881 {
28882 if (msgid[0] == '\0')
28883 msgid = _("stdout");
28884
28885 - cpp_error (pfile, level, "%s: %s", msgid, xstrerror (errno));
28886 + return cpp_error (pfile, level, "%s: %s", msgid, xstrerror (errno));
28887 }
28888 --- a/libcpp/files.c
28889 +++ b/libcpp/files.c
28890 @@ -488,7 +488,6 @@
28891 return file;
28892 }
28893
28894 - open_file_failed (pfile, file, angle_brackets);
28895 if (invalid_pch)
28896 {
28897 cpp_error (pfile, CPP_DL_ERROR,
28898 @@ -497,6 +496,7 @@
28899 cpp_error (pfile, CPP_DL_ERROR,
28900 "use -Winvalid-pch for more information");
28901 }
28902 + open_file_failed (pfile, file, angle_brackets);
28903 break;
28904 }
28905
28906 @@ -934,15 +934,28 @@
28907
28908 errno = file->err_no;
28909 if (print_dep && CPP_OPTION (pfile, deps.missing_files) && errno == ENOENT)
28910 - deps_add_dep (pfile->deps, file->name);
28911 + {
28912 + deps_add_dep (pfile->deps, file->name);
28913 + /* If the preprocessor output (other than dependency information) is
28914 + being used, we must also flag an error. */
28915 + if (CPP_OPTION (pfile, deps.need_preprocessor_output))
28916 + cpp_errno (pfile, CPP_DL_FATAL, file->path);
28917 + }
28918 else
28919 {
28920 - /* If we are outputting dependencies but not for this file then
28921 - don't error because we can still produce correct output. */
28922 - if (CPP_OPTION (pfile, deps.style) && ! print_dep)
28923 - cpp_errno (pfile, CPP_DL_WARNING, file->path);
28924 + /* If we are not outputting dependencies, or if we are and dependencies
28925 + were requested for this file, or if preprocessor output is needed
28926 + in addition to dependency information, this is an error.
28927 +
28928 + Otherwise (outputting dependencies but not for this file, and not
28929 + using the preprocessor output), we can still produce correct output
28930 + so it's only a warning. */
28931 + if (CPP_OPTION (pfile, deps.style) == DEPS_NONE
28932 + || print_dep
28933 + || CPP_OPTION (pfile, deps.need_preprocessor_output))
28934 + cpp_errno (pfile, CPP_DL_FATAL, file->path);
28935 else
28936 - cpp_errno (pfile, CPP_DL_ERROR, file->path);
28937 + cpp_errno (pfile, CPP_DL_WARNING, file->path);
28938 }
28939 }
28940
28941 --- a/libcpp/include/cpplib.h
28942 +++ b/libcpp/include/cpplib.h
28943 @@ -302,22 +302,9 @@
28944 /* Nonzero means print names of header files (-H). */
28945 unsigned char print_include_names;
28946
28947 - /* Nonzero means cpp_pedwarn causes a hard error. */
28948 - unsigned char pedantic_errors;
28949 -
28950 - /* Nonzero means don't print warning messages. */
28951 - unsigned char inhibit_warnings;
28952 -
28953 /* Nonzero means complain about deprecated features. */
28954 unsigned char warn_deprecated;
28955
28956 - /* Nonzero means don't suppress warnings from system headers. */
28957 - unsigned char warn_system_headers;
28958 -
28959 - /* Nonzero means don't print error messages. Has no option to
28960 - select it, but can be set by a user of cpplib (e.g. fix-header). */
28961 - unsigned char inhibit_errors;
28962 -
28963 /* Nonzero means warn if slash-star appears in a comment. */
28964 unsigned char warn_comments;
28965
28966 @@ -353,9 +340,6 @@
28967 explicitly undefined. */
28968 unsigned char warn_builtin_macro_redefined;
28969
28970 - /* Nonzero means turn warnings into errors. */
28971 - unsigned char warnings_are_errors;
28972 -
28973 /* Nonzero means we should look for header.gcc files that remap file
28974 names. */
28975 unsigned char remap;
28976 @@ -432,6 +416,10 @@
28977
28978 /* If true, no dependency is generated on the main file. */
28979 bool ignore_main_file;
28980 +
28981 + /* If true, intend to use the preprocessor output (e.g., for compilation)
28982 + in addition to the dependency info. */
28983 + bool need_preprocessor_output;
28984 } deps;
28985
28986 /* Target-specific features set by the front end or client. */
28987 @@ -450,9 +438,6 @@
28988 /* Nonzero means __STDC__ should have the value 0 in system headers. */
28989 unsigned char stdc_0_in_system_headers;
28990
28991 - /* True means error callback should be used for diagnostics. */
28992 - bool client_diagnostic;
28993 -
28994 /* True disables tokenization outside of preprocessing directives. */
28995 bool directives_only;
28996 };
28997 @@ -492,10 +477,11 @@
28998 be expanded. */
28999 cpp_hashnode * (*macro_to_expand) (cpp_reader *, const cpp_token *);
29000
29001 - /* Called to emit a diagnostic if client_diagnostic option is true.
29002 - This callback receives the translated message. */
29003 - void (*error) (cpp_reader *, int, const char *, va_list *)
29004 - ATTRIBUTE_FPTR_PRINTF(3,0);
29005 + /* Called to emit a diagnostic. This callback receives the
29006 + translated message. */
29007 + bool (*error) (cpp_reader *, int, source_location, unsigned int,
29008 + const char *, va_list *)
29009 + ATTRIBUTE_FPTR_PRINTF(5,0);
29010
29011 /* Callbacks for when a macro is expanded, or tested (whether
29012 defined or not at the time) in #ifdef, #ifndef or "defined". */
29013 @@ -697,19 +683,13 @@
29014
29015 /* Call this to finish preprocessing. If you requested dependency
29016 generation, pass an open stream to write the information to,
29017 - otherwise NULL. It is your responsibility to close the stream.
29018 -
29019 - Returns cpp_errors (pfile). */
29020 -extern int cpp_finish (cpp_reader *, FILE *deps_stream);
29021 + otherwise NULL. It is your responsibility to close the stream. */
29022 +extern void cpp_finish (cpp_reader *, FILE *deps_stream);
29023
29024 /* Call this to release the handle at the end of preprocessing. Any
29025 - use of the handle after this function returns is invalid. Returns
29026 - cpp_errors (pfile). */
29027 + use of the handle after this function returns is invalid. */
29028 extern void cpp_destroy (cpp_reader *);
29029
29030 -/* Error count. */
29031 -extern unsigned int cpp_errors (cpp_reader *);
29032 -
29033 extern unsigned int cpp_token_len (const cpp_token *);
29034 extern unsigned char *cpp_token_as_text (cpp_reader *, const cpp_token *);
29035 extern unsigned char *cpp_spell_token (cpp_reader *, const cpp_token *,
29036 @@ -835,24 +815,23 @@
29037 /* An internal consistency check failed. Prints "internal error: ",
29038 otherwise the same as CPP_DL_ERROR. */
29039 #define CPP_DL_ICE 0x04
29040 -/* Extracts a diagnostic level from an int. */
29041 -#define CPP_DL_EXTRACT(l) (l & 0xf)
29042 -/* Nonzero if a diagnostic level is one of the warnings. */
29043 -#define CPP_DL_WARNING_P(l) (CPP_DL_EXTRACT (l) >= CPP_DL_WARNING \
29044 - && CPP_DL_EXTRACT (l) <= CPP_DL_PEDWARN)
29045 +/* An informative note following a warning. */
29046 +#define CPP_DL_NOTE 0x05
29047 +/* A fatal error. */
29048 +#define CPP_DL_FATAL 0x06
29049
29050 /* Output a diagnostic of some kind. */
29051 -extern void cpp_error (cpp_reader *, int, const char *msgid, ...)
29052 +extern bool cpp_error (cpp_reader *, int, const char *msgid, ...)
29053 ATTRIBUTE_PRINTF_3;
29054
29055 /* Output a diagnostic with "MSGID: " preceding the
29056 error string of errno. No location is printed. */
29057 -extern void cpp_errno (cpp_reader *, int, const char *msgid);
29058 +extern bool cpp_errno (cpp_reader *, int, const char *msgid);
29059
29060 /* Same as cpp_error, except additionally specifies a position as a
29061 (translation unit) physical line and physical column. If the line is
29062 zero, then no location is printed. */
29063 -extern void cpp_error_with_line (cpp_reader *, int, source_location, unsigned,
29064 +extern bool cpp_error_with_line (cpp_reader *, int, source_location, unsigned,
29065 const char *msgid, ...) ATTRIBUTE_PRINTF_5;
29066
29067 /* In lex.c */
29068 --- a/libcpp/include/line-map.h
29069 +++ b/libcpp/include/line-map.h
29070 @@ -144,12 +144,6 @@
29071 extern const struct line_map *linemap_lookup
29072 (struct line_maps *, source_location);
29073
29074 -/* Print the file names and line numbers of the #include commands
29075 - which led to the map MAP, if any, to stderr. Nothing is output if
29076 - the most recently listed stack is the same as the current one. */
29077 -extern void linemap_print_containing_files (struct line_maps *,
29078 - const struct line_map *);
29079 -
29080 /* Converts a map and a source_location to source line. */
29081 #define SOURCE_LINE(MAP, LOC) \
29082 ((((LOC) - (MAP)->start_location) >> (MAP)->column_bits) + (MAP)->to_line)
29083 --- a/libcpp/init.c
29084 +++ b/libcpp/init.c
29085 @@ -616,12 +616,11 @@
29086 }
29087
29088 /* This is called at the end of preprocessing. It pops the last
29089 - buffer and writes dependency output, and returns the number of
29090 - errors.
29091 + buffer and writes dependency output.
29092
29093 Maybe it should also reset state, such that you could call
29094 cpp_start_read with a new filename to restart processing. */
29095 -int
29096 +void
29097 cpp_finish (cpp_reader *pfile, FILE *deps_stream)
29098 {
29099 /* Warn about unused macros before popping the final buffer. */
29100 @@ -636,9 +635,8 @@
29101 while (pfile->buffer)
29102 _cpp_pop_buffer (pfile);
29103
29104 - /* Don't write the deps file if there are errors. */
29105 if (CPP_OPTION (pfile, deps.style) != DEPS_NONE
29106 - && deps_stream && pfile->errors == 0)
29107 + && deps_stream)
29108 {
29109 deps_write (pfile->deps, deps_stream, 72);
29110
29111 @@ -649,8 +647,6 @@
29112 /* Report on headers that could use multiple include guards. */
29113 if (CPP_OPTION (pfile, print_include_names))
29114 _cpp_report_missing_guards (pfile);
29115 -
29116 - return pfile->errors;
29117 }
29118
29119 static void
29120 --- a/libcpp/internal.h
29121 +++ b/libcpp/internal.h
29122 @@ -388,9 +388,6 @@
29123 /* Nonzero prevents the lexer from re-using the token runs. */
29124 unsigned int keep_tokens;
29125
29126 - /* Error counter for exit code. */
29127 - unsigned int errors;
29128 -
29129 /* Buffer to hold macro definition string. */
29130 unsigned char *macro_buffer;
29131 unsigned int macro_buffer_len;
29132 --- a/libcpp/line-map.c
29133 +++ b/libcpp/line-map.c
29134 @@ -302,45 +302,6 @@
29135 return &set->maps[mn];
29136 }
29137
29138 -/* Print the file names and line numbers of the #include commands
29139 - which led to the map MAP, if any, to stderr. Nothing is output if
29140 - the most recently listed stack is the same as the current one. */
29141 -
29142 -void
29143 -linemap_print_containing_files (struct line_maps *set,
29144 - const struct line_map *map)
29145 -{
29146 - if (MAIN_FILE_P (map) || set->last_listed == map->included_from)
29147 - return;
29148 -
29149 - set->last_listed = map->included_from;
29150 - map = INCLUDED_FROM (set, map);
29151 -
29152 - fprintf (stderr, _("In file included from %s:%u"),
29153 - map->to_file, LAST_SOURCE_LINE (map));
29154 -
29155 - while (! MAIN_FILE_P (map))
29156 - {
29157 - map = INCLUDED_FROM (set, map);
29158 - /* Translators note: this message is used in conjunction
29159 - with "In file included from %s:%ld" and some other
29160 - tricks. We want something like this:
29161 -
29162 - | In file included from sys/select.h:123,
29163 - | from sys/types.h:234,
29164 - | from userfile.c:31:
29165 - | bits/select.h:45: <error message here>
29166 -
29167 - with all the "from"s lined up.
29168 - The trailing comma is at the beginning of this message,
29169 - and the trailing colon is not translated. */
29170 - fprintf (stderr, _(",\n from %s:%u"),
29171 - map->to_file, LAST_SOURCE_LINE (map));
29172 - }
29173 -
29174 - fputs (":\n", stderr);
29175 -}
29176 -
29177 /* Print an include trace, for e.g. the -H option of the preprocessor. */
29178
29179 static void
29180 --- a/libcpp/macro.c
29181 +++ b/libcpp/macro.c
29182 @@ -1833,11 +1833,13 @@
29183
29184 if (warn_of_redefinition (pfile, node, macro))
29185 {
29186 - cpp_error_with_line (pfile, CPP_DL_PEDWARN, pfile->directive_line, 0,
29187 - "\"%s\" redefined", NODE_NAME (node));
29188 + bool warned;
29189 + warned = cpp_error_with_line (pfile, CPP_DL_PEDWARN,
29190 + pfile->directive_line, 0,
29191 + "\"%s\" redefined", NODE_NAME (node));
29192
29193 - if (node->type == NT_MACRO && !(node->flags & NODE_BUILTIN))
29194 - cpp_error_with_line (pfile, CPP_DL_PEDWARN,
29195 + if (warned && node->type == NT_MACRO && !(node->flags & NODE_BUILTIN))
29196 + cpp_error_with_line (pfile, CPP_DL_NOTE,
29197 node->value.macro->line, 0,
29198 "this is the location of the previous definition");
29199 }
29200 --- /dev/null
29201 +++ b/libgcc/config/arm/t-divmod-ef
29202 @@ -0,0 +1,4 @@
29203 +# On ARM, specifying -fnon-call-exceptions will needlessly pull in
29204 +# the unwinder in simple programs which use 64-bit division. Omitting
29205 +# the option is safe.
29206 +LIB2_DIVMOD_EXCEPTION_FLAGS := -fexceptions
29207 --- /dev/null
29208 +++ b/libgcc/config/mips/t-crtfm
29209 @@ -0,0 +1,3 @@
29210 +crtfastmath.o: $(gcc_srcdir)/config/mips/crtfastmath.c
29211 + $(gcc_compile) -c $(gcc_srcdir)/config/mips/crtfastmath.c
29212 +
29213 --- a/libgcc/config/rs6000/t-ppccomm
29214 +++ b/libgcc/config/rs6000/t-ppccomm
29215 @@ -101,3 +101,63 @@
29216
29217 ncrtn$(objext): ncrtn.S
29218 $(crt_compile) -c ncrtn.S
29219 +
29220 +crtsavres$(objext): crtsavres.S
29221 + $(crt_compile) -c crtsavres.S
29222 +
29223 +crtsavfpr$(objext): crtsavfpr.S
29224 + $(crt_compile) -c crtsavfpr.S
29225 +
29226 +crtresfpr$(objext): crtresfpr.S
29227 + $(crt_compile) -c crtresfpr.S
29228 +
29229 +crtsavgpr$(objext): crtsavgpr.S
29230 + $(crt_compile) -c crtsavgpr.S
29231 +
29232 +crtresgpr$(objext): crtresgpr.S
29233 + $(crt_compile) -c crtresgpr.S
29234 +
29235 +crtresxfpr$(objext): crtresxfpr.S
29236 + $(crt_compile) -c crtresxfpr.S
29237 +
29238 +crtresxgpr$(objext): crtresxgpr.S
29239 + $(crt_compile) -c crtresxgpr.S
29240 +
29241 +e500crtres32gpr$(objext): e500crtres32gpr.S
29242 + $(crt_compile) -c e500crtres32gpr.S
29243 +
29244 +e500crtres64gpr$(objext): e500crtres64gpr.S
29245 + $(crt_compile) -c e500crtres64gpr.S
29246 +
29247 +e500crtres64gprctr$(objext): e500crtres64gprctr.S
29248 + $(crt_compile) -c e500crtres64gprctr.S
29249 +
29250 +e500crtrest32gpr$(objext): e500crtrest32gpr.S
29251 + $(crt_compile) -c e500crtrest32gpr.S
29252 +
29253 +e500crtrest64gpr$(objext): e500crtrest64gpr.S
29254 + $(crt_compile) -c e500crtrest64gpr.S
29255 +
29256 +e500crtresx32gpr$(objext): e500crtresx32gpr.S
29257 + $(crt_compile) -c e500crtresx32gpr.S
29258 +
29259 +e500crtresx64gpr$(objext): e500crtresx64gpr.S
29260 + $(crt_compile) -c e500crtresx64gpr.S
29261 +
29262 +e500crtsav32gpr$(objext): e500crtsav32gpr.S
29263 + $(crt_compile) -c e500crtsav32gpr.S
29264 +
29265 +e500crtsav64gpr$(objext): e500crtsav64gpr.S
29266 + $(crt_compile) -c e500crtsav64gpr.S
29267 +
29268 +e500crtsav64gprctr$(objext): e500crtsav64gprctr.S
29269 + $(crt_compile) -c e500crtsav64gprctr.S
29270 +
29271 +e500crtsavg32gpr$(objext): e500crtsavg32gpr.S
29272 + $(crt_compile) -c e500crtsavg32gpr.S
29273 +
29274 +e500crtsavg64gpr$(objext): e500crtsavg64gpr.S
29275 + $(crt_compile) -c e500crtsavg64gpr.S
29276 +
29277 +e500crtsavg64gprctr$(objext): e500crtsavg64gprctr.S
29278 + $(crt_compile) -c e500crtsavg64gprctr.S
29279 --- a/libgcc/config.host
29280 +++ b/libgcc/config.host
29281 @@ -203,12 +203,15 @@
29282 arm*-*-netbsd*)
29283 ;;
29284 arm*-*-linux*) # ARM GNU/Linux with ELF
29285 + tmake_file="${tmake_file} arm/t-divmod-ef"
29286 ;;
29287 arm*-*-uclinux*) # ARM ucLinux
29288 + tmake_file="${tmake_file} arm/t-divmod-ef"
29289 ;;
29290 arm*-*-ecos-elf)
29291 ;;
29292 arm*-*-eabi* | arm*-*-symbianelf* )
29293 + tmake_file="${tmake_file} arm/t-divmod-ef"
29294 ;;
29295 arm*-*-rtems*)
29296 ;;
29297 @@ -394,8 +397,12 @@
29298 mips*-*-netbsd*) # NetBSD/mips, either endian.
29299 ;;
29300 mips64*-*-linux*)
29301 + extra_parts="$extra_parts crtfastmath.o"
29302 + tmake_file="{$tmake_file} mips/t-crtfm"
29303 ;;
29304 mips*-*-linux*) # Linux MIPS, either endian.
29305 + extra_parts="$extra_parts crtfastmath.o"
29306 + tmake_file="{$tmake_file} mips/t-crtfm"
29307 ;;
29308 mips*-*-openbsd*)
29309 ;;
29310 @@ -419,6 +426,10 @@
29311 ;;
29312 mips64orion-*-elf* | mips64orionel-*-elf*)
29313 ;;
29314 +mips64octeon-wrs-elf* | mips64octeonel-wrs-elf*)
29315 + ;;
29316 +mips64octeon-montavista-elf*)
29317 + ;;
29318 mips*-*-rtems*)
29319 ;;
29320 mips-wrs-vxworks)
29321 --- a/libgcc/Makefile.in
29322 +++ b/libgcc/Makefile.in
29323 @@ -389,18 +389,24 @@
29324 endif
29325 endif
29326
29327 +ifeq ($(LIB2_DIVMOD_EXCEPTION_FLAGS),)
29328 +# Provide default flags for compiling divmod functions, if they haven't been
29329 +# set already by a target-specific Makefile fragment.
29330 +LIB2_DIVMOD_EXCEPTION_FLAGS := -fexceptions -fnon-call-exceptions
29331 +endif
29332 +
29333 # Build LIB2_DIVMOD_FUNCS.
29334 lib2-divmod-o = $(patsubst %,%$(objext),$(LIB2_DIVMOD_FUNCS))
29335 $(lib2-divmod-o): %$(objext): $(gcc_srcdir)/libgcc2.c
29336 $(gcc_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \
29337 - -fexceptions -fnon-call-exceptions $(vis_hide)
29338 + $(LIB2_DIVMOD_EXCEPTION_FLAGS) $(vis_hide)
29339 libgcc-objects += $(lib2-divmod-o)
29340
29341 ifeq ($(enable_shared),yes)
29342 lib2-divmod-s-o = $(patsubst %,%_s$(objext),$(LIB2_DIVMOD_FUNCS))
29343 $(lib2-divmod-s-o): %_s$(objext): $(gcc_srcdir)/libgcc2.c
29344 $(gcc_s_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \
29345 - -fexceptions -fnon-call-exceptions
29346 + $(LIB2_DIVMOD_EXCEPTION_FLAGS)
29347 libgcc-s-objects += $(lib2-divmod-s-o)
29348 endif
29349
29350 --- a/libgcc/shared-object.mk
29351 +++ b/libgcc/shared-object.mk
29352 @@ -8,11 +8,13 @@
29353
29354 ifeq ($(suffix $o),.c)
29355
29356 +c_flags-$(base)$(objext) := $(c_flags)
29357 $(base)$(objext): $o
29358 - $(gcc_compile) $(c_flags) -c $< $(vis_hide)
29359 + $(gcc_compile) $(c_flags-$@) -c $< $(vis_hide)
29360
29361 +c_flags-$(base)_s$(objext) := $(c_flags)
29362 $(base)_s$(objext): $o
29363 - $(gcc_s_compile) $(c_flags) -c $<
29364 + $(gcc_s_compile) $(c_flags-$@) -c $<
29365
29366 else
29367
29368 --- a/libgcc/static-object.mk
29369 +++ b/libgcc/static-object.mk
29370 @@ -8,8 +8,9 @@
29371
29372 ifeq ($(suffix $o),.c)
29373
29374 +c_flags-$(base)$(objext) := $(c_flags)
29375 $(base)$(objext): $o
29376 - $(gcc_compile) $(c_flags) -c $< $(vis_hide)
29377 + $(gcc_compile) $(c_flags-$@) -c $< $(vis_hide)
29378
29379 else
29380
29381 --- a/libiberty/argv.c
29382 +++ b/libiberty/argv.c
29383 @@ -119,6 +119,24 @@
29384 }
29385 }
29386
29387 +static void
29388 +consume_whitespace (const char **input)
29389 +{
29390 + while (ISSPACE (**input))
29391 + {
29392 + (*input)++;
29393 + }
29394 +}
29395 +
29396 +static int
29397 +only_whitespace (const char* input)
29398 +{
29399 + while (*input != EOS && ISSPACE (*input))
29400 + input++;
29401 +
29402 + return (*input == EOS);
29403 +}
29404 +
29405 /*
29406
29407 @deftypefn Extension char** buildargv (char *@var{sp})
29408 @@ -179,10 +197,8 @@
29409 do
29410 {
29411 /* Pick off argv[argc] */
29412 - while (ISBLANK (*input))
29413 - {
29414 - input++;
29415 - }
29416 + consume_whitespace (&input);
29417 +
29418 if ((maxargc == 0) || (argc >= (maxargc - 1)))
29419 {
29420 /* argv needs initialization, or expansion */
29421 @@ -278,10 +294,7 @@
29422 argc++;
29423 argv[argc] = NULL;
29424
29425 - while (ISSPACE (*input))
29426 - {
29427 - input++;
29428 - }
29429 + consume_whitespace (&input);
29430 }
29431 while (*input != EOS);
29432 }
29433 @@ -420,8 +433,17 @@
29434 goto error;
29435 /* Add a NUL terminator. */
29436 buffer[len] = '\0';
29437 - /* Parse the string. */
29438 - file_argv = buildargv (buffer);
29439 + /* If the file is empty or contains only whitespace, buildargv would
29440 + return a single empty argument. In this context we want no arguments,
29441 + instead. */
29442 + if (only_whitespace (buffer))
29443 + {
29444 + file_argv = (char **) xmalloc (sizeof (char *));
29445 + file_argv[0] = NULL;
29446 + }
29447 + else
29448 + /* Parse the string. */
29449 + file_argv = buildargv (buffer);
29450 /* If *ARGVP is not already dynamically allocated, copy it. */
29451 if (!argv_dynamic)
29452 {
29453 @@ -434,7 +456,7 @@
29454 }
29455 /* Count the number of arguments. */
29456 file_argc = 0;
29457 - while (file_argv[file_argc] && *file_argv[file_argc])
29458 + while (file_argv[file_argc])
29459 ++file_argc;
29460 /* Now, insert FILE_ARGV into ARGV. The "+1" below handles the
29461 NULL terminator at the end of ARGV. */
29462 --- a/libstdc++-v3/config/cpu/sh/atomicity.h
29463 +++ b/libstdc++-v3/config/cpu/sh/atomicity.h
29464 @@ -25,47 +25,48 @@
29465
29466 #ifdef __SH4A__
29467
29468 -#ifndef _GLIBCXX_ATOMICITY_H
29469 -#define _GLIBCXX_ATOMICITY_H 1
29470 +#include <ext/atomicity.h>
29471
29472 -typedef int _Atomic_word;
29473 +_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
29474
29475 -static inline _Atomic_word
29476 -__attribute__ ((__unused__))
29477 -__exchange_and_add (volatile _Atomic_word* __mem, int __val)
29478 -{
29479 - _Atomic_word __result;
29480 + typedef int _Atomic_word;
29481
29482 - __asm__ __volatile__
29483 - ("0:\n"
29484 - "\tmovli.l\t@%2,r0\n"
29485 - "\tmov\tr0,%1\n"
29486 - "\tadd\t%3,r0\n"
29487 - "\tmovco.l\tr0,@%2\n"
29488 - "\tbf\t0b"
29489 - : "+m" (*__mem), "=r" (__result)
29490 - : "r" (__mem), "rI08" (__val)
29491 - : "r0");
29492 -
29493 - return __result;
29494 -}
29495 -
29496 -
29497 -static inline void
29498 -__attribute__ ((__unused__))
29499 -__atomic_add (volatile _Atomic_word* __mem, int __val)
29500 -{
29501 - asm("0:\n"
29502 - "\tmovli.l\t@%1,r0\n"
29503 - "\tadd\t%2,r0\n"
29504 - "\tmovco.l\tr0,@%1\n"
29505 - "\tbf\t0b"
29506 - : "+m" (*__mem)
29507 - : "r" (__mem), "rI08" (__val)
29508 - : "r0");
29509 -}
29510 + _Atomic_word
29511 + __attribute__ ((__unused__))
29512 + __exchange_and_add (volatile _Atomic_word* __mem, int __val)
29513 + {
29514 + _Atomic_word __result;
29515
29516 -#endif
29517 + __asm__ __volatile__
29518 + ("0:\n"
29519 + "\tmovli.l\t@%2,r0\n"
29520 + "\tmov\tr0,%1\n"
29521 + "\tadd\t%3,r0\n"
29522 + "\tmovco.l\tr0,@%2\n"
29523 + "\tbf\t0b"
29524 + : "+m" (*__mem), "=&r" (__result)
29525 + : "r" (__mem), "rI08" (__val)
29526 + : "r0");
29527 +
29528 + return __result;
29529 + }
29530 +
29531 +
29532 + void
29533 + __attribute__ ((__unused__))
29534 + __atomic_add (volatile _Atomic_word* __mem, int __val)
29535 + {
29536 + asm("0:\n"
29537 + "\tmovli.l\t@%1,r0\n"
29538 + "\tadd\t%2,r0\n"
29539 + "\tmovco.l\tr0,@%1\n"
29540 + "\tbf\t0b"
29541 + : "+m" (*__mem)
29542 + : "r" (__mem), "rI08" (__val)
29543 + : "r0");
29544 + }
29545 +
29546 +_GLIBCXX_END_NAMESPACE
29547
29548 #else /* !__SH4A__ */
29549
29550 --- a/libstdc++-v3/libsupc++/eh_arm.cc
29551 +++ b/libstdc++-v3/libsupc++/eh_arm.cc
29552 @@ -38,7 +38,7 @@
29553 extern "C" __cxa_type_match_result
29554 __cxa_type_match(_Unwind_Exception* ue_header,
29555 const std::type_info* catch_type,
29556 - bool is_reference __attribute__((__unused__)),
29557 + bool is_reference,
29558 void** thrown_ptr_p)
29559 {
29560 bool forced_unwind = __is_gxx_forced_unwind_class(ue_header->exception_class);
29561 @@ -68,11 +68,11 @@
29562 if (throw_type->__is_pointer_p())
29563 thrown_ptr = *(void**) thrown_ptr;
29564
29565 - if (catch_type->__do_catch(throw_type, &thrown_ptr, 1))
29566 + if (catch_type->__do_catch (throw_type, &thrown_ptr, 1 + is_reference * 2))
29567 {
29568 *thrown_ptr_p = thrown_ptr;
29569
29570 - if (typeid(*catch_type) == typeid (typeid(void*)))
29571 + if (typeid (*catch_type) == typeid (typeid(void*)))
29572 {
29573 const __pointer_type_info *catch_pointer_type =
29574 static_cast<const __pointer_type_info *> (catch_type);
29575 --- a/libstdc++-v3/libsupc++/eh_personality.cc
29576 +++ b/libstdc++-v3/libsupc++/eh_personality.cc
29577 @@ -89,20 +89,22 @@
29578 // Return an element from a type table.
29579
29580 static const std::type_info*
29581 -get_ttype_entry(lsda_header_info* info, _uleb128_t i)
29582 +get_ttype_entry(lsda_header_info* info, _uleb128_t i, bool &is_ref)
29583 {
29584 _Unwind_Ptr ptr;
29585
29586 ptr = (_Unwind_Ptr) (info->TType - (i * 4));
29587 ptr = _Unwind_decode_target2(ptr);
29588
29589 - return reinterpret_cast<const std::type_info *>(ptr);
29590 + is_ref = ptr & 1;
29591 +
29592 + return reinterpret_cast<const std::type_info *>(ptr & ~1);
29593 }
29594
29595 // The ABI provides a routine for matching exception object types.
29596 typedef _Unwind_Control_Block _throw_typet;
29597 -#define get_adjusted_ptr(catch_type, throw_type, thrown_ptr_p) \
29598 - (__cxa_type_match (throw_type, catch_type, false, thrown_ptr_p) \
29599 +#define get_adjusted_ptr(catch_type, throw_type, is_ref, thrown_ptr_p) \
29600 + (__cxa_type_match (throw_type, catch_type, is_ref, thrown_ptr_p) \
29601 != ctm_failed)
29602
29603 // Return true if THROW_TYPE matches one if the filter types.
29604 @@ -118,6 +120,7 @@
29605 {
29606 const std::type_info* catch_type;
29607 _uleb128_t tmp;
29608 + bool is_ref;
29609
29610 tmp = *e;
29611
29612 @@ -129,13 +132,14 @@
29613 tmp = _Unwind_decode_target2((_Unwind_Word) e);
29614
29615 // Match a ttype entry.
29616 - catch_type = reinterpret_cast<const std::type_info*>(tmp);
29617 + is_ref = tmp & 1;
29618 + catch_type = reinterpret_cast<const std::type_info*>(tmp & ~1);
29619
29620 // ??? There is currently no way to ask the RTTI code about the
29621 // relationship between two types without reference to a specific
29622 // object. There should be; then we wouldn't need to mess with
29623 // thrown_ptr here.
29624 - if (get_adjusted_ptr(catch_type, throw_type, &thrown_ptr))
29625 + if (get_adjusted_ptr(catch_type, throw_type, is_ref, &thrown_ptr))
29626 return true;
29627
29628 // Advance to the next entry.
29629 @@ -207,7 +211,7 @@
29630 // Return an element from a type table.
29631
29632 static const std::type_info *
29633 -get_ttype_entry (lsda_header_info *info, _uleb128_t i)
29634 +get_ttype_entry (lsda_header_info *info, _uleb128_t i, bool &is_ref)
29635 {
29636 _Unwind_Ptr ptr;
29637
29638 @@ -215,7 +219,9 @@
29639 read_encoded_value_with_base (info->ttype_encoding, info->ttype_base,
29640 info->TType - i, &ptr);
29641
29642 - return reinterpret_cast<const std::type_info *>(ptr);
29643 + is_ref = ptr & 1;
29644 +
29645 + return reinterpret_cast<const std::type_info *>(ptr & ~1);
29646 }
29647
29648 // Given the thrown type THROW_TYPE, pointer to a variable containing a
29649 @@ -226,6 +232,7 @@
29650 static bool
29651 get_adjusted_ptr (const std::type_info *catch_type,
29652 const std::type_info *throw_type,
29653 + bool is_ref,
29654 void **thrown_ptr_p)
29655 {
29656 void *thrown_ptr = *thrown_ptr_p;
29657 @@ -237,7 +244,7 @@
29658 if (throw_type->__is_pointer_p ())
29659 thrown_ptr = *(void **) thrown_ptr;
29660
29661 - if (catch_type->__do_catch (throw_type, &thrown_ptr, 1))
29662 + if (catch_type->__do_catch (throw_type, &thrown_ptr, 1 + is_ref * 2))
29663 {
29664 *thrown_ptr_p = thrown_ptr;
29665 return true;
29666 @@ -267,13 +274,15 @@
29667 return false;
29668
29669 // Match a ttype entry.
29670 - catch_type = get_ttype_entry (info, tmp);
29671 + bool is_ref;
29672 +
29673 + catch_type = get_ttype_entry (info, tmp, is_ref);
29674
29675 // ??? There is currently no way to ask the RTTI code about the
29676 // relationship between two types without reference to a specific
29677 // object. There should be; then we wouldn't need to mess with
29678 // thrown_ptr here.
29679 - if (get_adjusted_ptr (catch_type, throw_type, &thrown_ptr))
29680 + if (get_adjusted_ptr (catch_type, throw_type, is_ref, &thrown_ptr))
29681 return true;
29682 }
29683 }
29684 @@ -582,14 +591,16 @@
29685 else if (ar_filter > 0)
29686 {
29687 // Positive filter values are handlers.
29688 - catch_type = get_ttype_entry (&info, ar_filter);
29689 + bool is_ref;
29690 +
29691 + catch_type = get_ttype_entry (&info, ar_filter, is_ref);
29692
29693 // Null catch type is a catch-all handler; we can catch foreign
29694 // exceptions with this. Otherwise we must match types.
29695 if (! catch_type
29696 || (throw_type
29697 && get_adjusted_ptr (catch_type, throw_type,
29698 - &thrown_ptr)))
29699 + is_ref, &thrown_ptr)))
29700 {
29701 saw_handler = true;
29702 break;
29703 --- a/libcpp/Makefile.in
29704 +++ b/libcpp/Makefile.in
29705 @@ -72,13 +72,12 @@
29706 libcpp_a_OBJS = charset.o directives.o directives-only.o errors.o \
29707 expr.o files.o identifiers.o init.o lex.o line-map.o macro.o \
29708 mkdeps.o pch.o symtab.o traditional.o
29709 -makedepend_OBJS = makedepend.o
29710
29711 libcpp_a_SOURCES = charset.c directives.c directives-only.c errors.c \
29712 expr.c files.c identifiers.c init.c lex.c line-map.c macro.c \
29713 mkdeps.c pch.c symtab.c traditional.c
29714
29715 -all: libcpp.a makedepend$(EXEEXT) $(USED_CATALOGS)
29716 +all: libcpp.a $(USED_CATALOGS)
29717
29718 .SUFFIXES:
29719 .SUFFIXES: .c .gmo .o .obj .po .pox
29720 @@ -88,12 +87,6 @@
29721 $(AR) $(ARFLAGS) libcpp.a $(libcpp_a_OBJS)
29722 $(RANLIB) libcpp.a
29723
29724 -makedepend$(EXEEXT): $(makedepend_OBJS) libcpp.a ../libiberty/libiberty.a
29725 - @rm -f makedepend$(EXEEXT)
29726 - $(CC) $(CFLAGS) $(LDFLAGS) -o makedepend$(EXEEXT) \
29727 - $(makedepend_OBJS) libcpp.a ../libiberty/libiberty.a \
29728 - $(LIBINTL) $(LIBICONV)
29729 -
29730 # Rules to rebuild the configuration
29731
29732 Makefile: $(srcdir)/Makefile.in config.status
29733 @@ -165,7 +158,7 @@
29734 -rm -f *.o
29735
29736 clean: mostlyclean
29737 - -rm -rf makedepend$(EXEEXT) libcpp.a $(srcdir)/autom4te.cache
29738 + -rm -rf libcpp.a $(srcdir)/autom4te.cache
29739
29740 distclean: clean
29741 -rm -f config.h stamp-h1 config.status config.cache config.log \
29742 @@ -247,7 +240,7 @@
29743 sed 's:$(srcdir)/::g' <po/$(PACKAGE).pot.tmp >po/$(PACKAGE).pot
29744 rm po/$(PACKAGE).pot.tmp
29745
29746 -TAGS_SOURCES = $(libcpp_a_SOURCES) makedepend.c internal.h ucnid.h \
29747 +TAGS_SOURCES = $(libcpp_a_SOURCES) internal.h ucnid.h \
29748 include/line-map.h include/symtab.h include/cpp-id-data.h \
29749 include/cpplib.h include/mkdeps.h system.h
29750
29751 @@ -259,7 +252,7 @@
29752 .NOEXPORT:
29753
29754 # Dependencies
29755 --include $(patsubst %.o, $(DEPDIR)/%.Po, $(libcpp_a_OBJS) $(makedepend_OBJS))
29756 +-include $(patsubst %.o, $(DEPDIR)/%.Po, $(libcpp_a_OBJS))
29757
29758 # Dependencies on generated headers have to be explicit.
29759 init.o: localedir.h
This page took 1.419077 seconds and 5 git commands to generate.