3 @@ -10779,7 +10779,7 @@ validate_arglist (const_tree callexpr, .
7 - code = va_arg (ap, enum tree_code);
8 + code = va_arg (ap, int);
14 @@ -3496,7 +3496,7 @@ emit_library_call_value_1 (int retval, r
15 for (; count < nargs; count++)
17 rtx val = va_arg (p, rtx);
18 - enum machine_mode mode = va_arg (p, enum machine_mode);
19 + enum machine_mode mode = va_arg (p, int);
21 /* We cannot convert the arg value to the mode the library wants here;
22 must do it earlier where we know the signedness of the arg. */
24 +++ b/gcc/config/avr32/avr32.c
27 + Target hooks and helper functions for AVR32.
28 + Copyright 2003-2006 Atmel Corporation.
30 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
31 + Initial porting by Anders �dland.
33 + This file is part of GCC.
35 + This program is free software; you can redistribute it and/or modify
36 + it under the terms of the GNU General Public License as published by
37 + the Free Software Foundation; either version 2 of the License, or
38 + (at your option) any later version.
40 + This program is distributed in the hope that it will be useful,
41 + but WITHOUT ANY WARRANTY; without even the implied warranty of
42 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
43 + GNU General Public License for more details.
45 + You should have received a copy of the GNU General Public License
46 + along with this program; if not, write to the Free Software
47 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
51 +#include "coretypes.h"
57 +#include "hard-reg-set.h"
59 +#include "insn-config.h"
60 +#include "conditions.h"
62 +#include "insn-attr.h"
65 +#include "function.h"
72 +#include "c-pragma.h"
73 +#include "integrate.h"
75 +#include "langhooks.h"
80 +#include "target-def.h"
84 +/* Forward definitions of types. */
85 +typedef struct minipool_node Mnode;
86 +typedef struct minipool_fixup Mfix;
88 +/* Obstack for minipool constant handling. */
89 +static struct obstack minipool_obstack;
90 +static char *minipool_startobj;
91 +static rtx minipool_vector_label;
93 +/* True if we are currently building a constant table. */
94 +int making_const_table;
96 +/* Some forward function declarations */
97 +static unsigned long avr32_isr_value (tree);
98 +static unsigned long avr32_compute_func_type (void);
99 +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
100 +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
101 +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
102 + int flags, bool * no_add_attrs);
103 +static void avr32_reorg (void);
104 +bool avr32_return_in_msb (tree type);
105 +bool avr32_vector_mode_supported (enum machine_mode mode);
106 +static void avr32_init_libfuncs (void);
110 +avr32_add_gc_roots (void)
112 + gcc_obstack_init (&minipool_obstack);
113 + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
117 +/* List of all known AVR32 parts */
118 +static const struct part_type_s avr32_part_types[] = {
119 + /* name, part_type, architecture type, macro */
120 + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
121 + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
122 + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
123 + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
124 + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
125 + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
126 + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
127 + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
128 + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
129 + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
130 + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
131 + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
132 + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
133 + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
134 + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
135 + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
136 + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
137 + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
138 + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
139 + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
140 + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
141 + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
142 + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
143 + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
144 + {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
145 + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
146 + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
147 + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
148 + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
149 + {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
150 + {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512C__"},
151 + {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0256C__"},
152 + {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0128C__"},
153 + {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C064C__"},
154 + {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512C__"},
155 + {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1256C__"},
156 + {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1128C__"},
157 + {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C164C__"},
158 + {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512C__"},
159 + {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2256C__"},
160 + {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2128C__"},
161 + {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C264C__"},
162 + {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
163 + {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
164 + {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
168 +/* List of all known AVR32 architectures */
169 +static const struct arch_type_s avr32_arch_types[] = {
170 + /* name, architecture type, microarchitecture type, feature flags, macro */
171 + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
172 + (FLAG_AVR32_HAS_DSP
173 + | FLAG_AVR32_HAS_SIMD
174 + | FLAG_AVR32_HAS_UNALIGNED_WORD
175 + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
176 + | FLAG_AVR32_HAS_CACHES),
178 + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
179 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
181 + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
182 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
183 + | FLAG_AVR32_HAS_V2_INSNS),
185 + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
186 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
187 + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
189 + {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
190 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
191 + | FLAG_AVR32_HAS_V2_INSNS),
193 + {NULL, 0, 0, 0, NULL}
196 +/* Default arch name */
197 +const char *avr32_arch_name = "none";
198 +const char *avr32_part_name = "none";
200 +const struct part_type_s *avr32_part;
201 +const struct arch_type_s *avr32_arch;
204 +/* Set default target_flags. */
205 +#undef TARGET_DEFAULT_TARGET_FLAGS
206 +#define TARGET_DEFAULT_TARGET_FLAGS \
207 + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
210 +avr32_optimization_options (int level,
212 + if (AVR32_ALWAYS_PIC)
215 + /* Enable section anchors if optimization is enabled. */
216 + if (level > 0 || size)
217 + flag_section_anchors = 1;
220 +/* Override command line options */
222 +avr32_override_options (void)
224 + const struct part_type_s *part;
225 + const struct arch_type_s *arch;
227 + /*Add backward compability*/
228 + if (strcmp ("uc", avr32_arch_name)== 0)
230 + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
231 + "Please use '-march=ucr1' instead. "
232 + "Converting to arch 'ucr1'\n",
234 + avr32_arch_name="ucr1";
237 + /* Check if arch type is set. */
238 + for (arch = avr32_arch_types; arch->name; arch++)
240 + if (strcmp (arch->name, avr32_arch_name) == 0)
245 + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
247 + fprintf (stderr, "Unknown arch `%s' specified\n"
248 + "Known arch names:\n"
249 + "\tuc (deprecated)\n",
251 + for (arch = avr32_arch_types; arch->name; arch++)
252 + fprintf (stderr, "\t%s\n", arch->name);
253 + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
256 + /* Check if part type is set. */
257 + for (part = avr32_part_types; part->name; part++)
258 + if (strcmp (part->name, avr32_part_name) == 0)
264 + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
266 + for (part = avr32_part_types; part->name; part++)
268 + if (strcmp("none", part->name) != 0)
269 + fprintf (stderr, "\t%s\n", part->name);
271 + /* Set default to NONE*/
272 + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
275 + /* NB! option -march= overrides option -mpart
276 + * if both are used at the same time */
278 + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
280 + /* If optimization level is two or greater, then align start of loops to a
281 + word boundary since this will allow folding the first insn of the loop.
282 + Do this only for targets supporting branch prediction. */
283 + if (optimize >= 2 && TARGET_BRANCH_PRED)
287 + /* Enable fast-float library if unsafe math optimizations
289 + if (flag_unsafe_math_optimizations)
290 + target_flags |= MASK_FAST_FLOAT;
292 + /* Check if we should set avr32_imm_in_const_pool
293 + based on if caches are present or not. */
294 + if ( avr32_imm_in_const_pool == -1 )
296 + if ( TARGET_CACHES )
297 + avr32_imm_in_const_pool = 1;
299 + avr32_imm_in_const_pool = 0;
305 + avr32_add_gc_roots ();
310 +If defined, a function that outputs the assembler code for entry to a
311 +function. The prologue is responsible for setting up the stack frame,
312 +initializing the frame pointer register, saving registers that must be
313 +saved, and allocating size additional bytes of storage for the
314 +local variables. size is an integer. file is a stdio
315 +stream to which the assembler code should be output.
317 +The label for the beginning of the function need not be output by this
318 +macro. That has already been done when the macro is run.
320 +To determine which registers to save, the macro can refer to the array
321 +regs_ever_live: element r is nonzero if hard register
322 +r is used anywhere within the function. This implies the function
323 +prologue should save register r, provided it is not one of the
324 +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
327 +On machines that have ``register windows'', the function entry code does
328 +not save on the stack the registers that are in the windows, even if
329 +they are supposed to be preserved by function calls; instead it takes
330 +appropriate steps to ``push'' the register stack, if any non-call-used
331 +registers are used in the function.
333 +On machines where functions may or may not have frame-pointers, the
334 +function entry code must vary accordingly; it must set up the frame
335 +pointer if one is wanted, and not otherwise. To determine whether a
336 +frame pointer is in wanted, the macro can refer to the variable
337 +frame_pointer_needed. The variable's value will be 1 at run
338 +time in a function that needs a frame pointer. (see Elimination).
340 +The function entry code is responsible for allocating any stack space
341 +required for the function. This stack space consists of the regions
342 +listed below. In most cases, these regions are allocated in the
343 +order listed, with the last listed region closest to the top of the
344 +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
345 +the highest address if it is not defined). You can use a different order
346 +for a machine if doing so is more convenient or required for
347 +compatibility reasons. Except in cases where required by standard
348 +or by a debugger, there is no reason why the stack layout used by GCC
349 +need agree with that used by other compilers for a machine.
352 +#undef TARGET_ASM_FUNCTION_PROLOGUE
353 +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
356 +#undef TARGET_DEFAULT_SHORT_ENUMS
357 +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
359 +#undef TARGET_PROMOTE_FUNCTION_ARGS
360 +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
362 +#undef TARGET_PROMOTE_FUNCTION_RETURN
363 +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
365 +#undef TARGET_PROMOTE_PROTOTYPES
366 +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
368 +#undef TARGET_MUST_PASS_IN_STACK
369 +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
371 +#undef TARGET_PASS_BY_REFERENCE
372 +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
374 +#undef TARGET_STRICT_ARGUMENT_NAMING
375 +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
377 +#undef TARGET_VECTOR_MODE_SUPPORTED_P
378 +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
380 +#undef TARGET_RETURN_IN_MEMORY
381 +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
383 +#undef TARGET_RETURN_IN_MSB
384 +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
386 +#undef TARGET_ARG_PARTIAL_BYTES
387 +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
389 +#undef TARGET_STRIP_NAME_ENCODING
390 +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
392 +#define streq(string1, string2) (strcmp (string1, string2) == 0)
394 +#undef TARGET_NARROW_VOLATILE_BITFIELD
395 +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
397 +#undef TARGET_ATTRIBUTE_TABLE
398 +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
400 +#undef TARGET_COMP_TYPE_ATTRIBUTES
401 +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
404 +#undef TARGET_RTX_COSTS
405 +#define TARGET_RTX_COSTS avr32_rtx_costs
407 +#undef TARGET_CANNOT_FORCE_CONST_MEM
408 +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
410 +#undef TARGET_ASM_INTEGER
411 +#define TARGET_ASM_INTEGER avr32_assemble_integer
413 +#undef TARGET_FUNCTION_VALUE
414 +#define TARGET_FUNCTION_VALUE avr32_function_value
416 +#undef TARGET_MIN_ANCHOR_OFFSET
417 +#define TARGET_MIN_ANCHOR_OFFSET (0)
419 +#undef TARGET_MAX_ANCHOR_OFFSET
420 +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
424 + * Switches to the appropriate section for output of constant pool
425 + * entry x in mode. You can assume that x is some kind of constant in
426 + * RTL. The argument mode is redundant except in the case of a
427 + * const_int rtx. Select the section by calling readonly_data_ section
428 + * or one of the alternatives for other sections. align is the
429 + * constant alignment in bits.
431 + * The default version of this function takes care of putting symbolic
432 + * constants in flag_ pic mode in data_section and everything else in
433 + * readonly_data_section.
435 +//#undef TARGET_ASM_SELECT_RTX_SECTION
436 +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
440 + * If non-null, this hook performs a target-specific pass over the
441 + * instruction stream. The compiler will run it at all optimization
442 + * levels, just before the point at which it normally does
443 + * delayed-branch scheduling.
445 + * The exact purpose of the hook varies from target to target. Some
446 + * use it to do transformations that are necessary for correctness,
447 + * such as laying out in-function constant pools or avoiding hardware
448 + * hazards. Others use it as an opportunity to do some
449 + * machine-dependent optimizations.
451 + * You need not implement the hook if it has nothing to do. The
452 + * default definition is null.
454 +#undef TARGET_MACHINE_DEPENDENT_REORG
455 +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
457 +/* Target hook for assembling integer objects.
458 + Need to handle integer vectors */
460 +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
462 + if (avr32_vector_mode_supported (GET_MODE (x)))
466 + if (GET_CODE (x) != CONST_VECTOR)
469 + units = CONST_VECTOR_NUNITS (x);
471 + switch (GET_MODE (x))
483 + for (i = 0; i < units; i++)
487 + elt = CONST_VECTOR_ELT (x, i);
488 + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
494 + return default_assemble_integer (x, size, aligned_p);
498 + * This target hook describes the relative costs of RTL expressions.
500 + * The cost may depend on the precise form of the expression, which is
501 + * available for examination in x, and the rtx code of the expression
502 + * in which it is contained, found in outer_code. code is the
503 + * expression code--redundant, since it can be obtained with GET_CODE
506 + * In implementing this hook, you can use the construct COSTS_N_INSNS
507 + * (n) to specify a cost equal to n fast instructions.
509 + * On entry to the hook, *total contains a default estimate for the
510 + * cost of the expression. The hook should modify this value as
511 + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
512 + * for multiplications, COSTS_N_INSNS (7) for division and modulus
513 + * operations, and COSTS_N_INSNS (1) for all other operations.
515 + * When optimizing for code size, i.e. when optimize_size is non-zero,
516 + * this target hook should be used to estimate the relative size cost
517 + * of an expression, again relative to COSTS_N_INSNS.
519 + * The hook returns true when all subexpressions of x have been
520 + * processed, and false when rtx_cost should recurse.
523 +/* Worker routine for avr32_rtx_costs. */
525 +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
526 + enum rtx_code outer ATTRIBUTE_UNUSED)
528 + enum machine_mode mode = GET_MODE (x);
530 + switch (GET_CODE (x))
533 + /* Using pre decrement / post increment memory operations on the
534 + avr32_uc architecture means that two writebacks must be performed
535 + and hence two cycles are needed. */
537 + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
539 + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
540 + || GET_CODE (XEXP (x, 0)) == POST_INC))
541 + return COSTS_N_INSNS (5);
543 + /* Memory costs quite a lot for the first word, but subsequent words
544 + load at the equivalent of a single insn each. */
545 + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
546 + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
548 + return COSTS_N_INSNS (4);
551 + /* These are valid for the pseudo insns: lda.w and call which operates
552 + on direct addresses. We assume that the cost of a lda.w is the same
553 + as the cost of a ld.w insn. */
554 + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
559 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
563 + if (mode == TImode)
564 + return COSTS_N_INSNS (100);
566 + if (mode == DImode)
567 + return COSTS_N_INSNS (10);
568 + return COSTS_N_INSNS (4);
573 + if (mode == TImode)
574 + return COSTS_N_INSNS (10);
576 + if (mode == DImode)
577 + return COSTS_N_INSNS (4);
578 + return COSTS_N_INSNS (1);
584 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
585 + return COSTS_N_INSNS (100);
587 + if (mode == TImode)
588 + return COSTS_N_INSNS (50);
590 + if (mode == DImode)
591 + return COSTS_N_INSNS (2);
592 + return COSTS_N_INSNS (1);
596 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
597 + return COSTS_N_INSNS (300);
599 + if (mode == TImode)
600 + return COSTS_N_INSNS (16);
602 + if (mode == DImode)
603 + return COSTS_N_INSNS (4);
605 + if (mode == HImode)
606 + return COSTS_N_INSNS (2);
608 + return COSTS_N_INSNS (3);
611 + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
612 + return COSTS_N_INSNS (4);
613 + return COSTS_N_INSNS (1);
616 + /* Sign/Zero extensions of registers cost quite much since these
617 + instrcutions only take one register operand which means that gcc
618 + often must insert some move instrcutions */
619 + if (mode == QImode || mode == HImode)
620 + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
621 + return COSTS_N_INSNS (4);
623 + /* divmod operations */
624 + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
625 + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
627 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
631 + return COSTS_N_INSNS (1);
636 +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
638 + *total = avr32_rtx_costs_1 (x, code, outer_code);
644 +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
646 + /* Do not want symbols in the constant pool when compiling pic or if using
647 + address pseudo instructions. */
648 + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
649 + && avr32_find_symbol (x) != NULL_RTX);
653 +/* Table of machine attributes. */
654 +const struct attribute_spec avr32_attribute_table[] = {
655 + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
656 + /* Interrupt Service Routines have special prologue and epilogue
658 + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
659 + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
660 + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
661 + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
662 + {NULL, 0, 0, false, false, false, NULL}
668 + const char *const arg;
669 + const unsigned long return_value;
673 +static const isr_attribute_arg isr_attribute_args[] = {
674 + {"FULL", AVR32_FT_ISR_FULL},
675 + {"full", AVR32_FT_ISR_FULL},
676 + {"HALF", AVR32_FT_ISR_HALF},
677 + {"half", AVR32_FT_ISR_HALF},
678 + {"NONE", AVR32_FT_ISR_NONE},
679 + {"none", AVR32_FT_ISR_NONE},
680 + {"UNDEF", AVR32_FT_ISR_NONE},
681 + {"undef", AVR32_FT_ISR_NONE},
682 + {"SWI", AVR32_FT_ISR_NONE},
683 + {"swi", AVR32_FT_ISR_NONE},
684 + {NULL, AVR32_FT_ISR_NONE}
687 +/* Returns the (interrupt) function type of the current
688 + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
690 +static unsigned long
691 +avr32_isr_value (tree argument)
693 + const isr_attribute_arg *ptr;
696 + /* No argument - default to ISR_NONE. */
697 + if (argument == NULL_TREE)
698 + return AVR32_FT_ISR_NONE;
700 + /* Get the value of the argument. */
701 + if (TREE_VALUE (argument) == NULL_TREE
702 + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
703 + return AVR32_FT_UNKNOWN;
705 + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
707 + /* Check it against the list of known arguments. */
708 + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
709 + if (streq (arg, ptr->arg))
710 + return ptr->return_value;
712 + /* An unrecognized interrupt type. */
713 + return AVR32_FT_UNKNOWN;
719 +These hooks specify assembly directives for creating certain kinds
720 +of integer object. The TARGET_ASM_BYTE_OP directive creates a
721 +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
722 +aligned two-byte object, and so on. Any of the hooks may be
723 +NULL, indicating that no suitable directive is available.
725 +The compiler will print these strings at the start of a new line,
726 +followed immediately by the object's initial value. In most cases,
727 +the string should contain a tab, a pseudo-op, and then another tab.
729 +#undef TARGET_ASM_BYTE_OP
730 +#define TARGET_ASM_BYTE_OP "\t.byte\t"
731 +#undef TARGET_ASM_ALIGNED_HI_OP
732 +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
733 +#undef TARGET_ASM_ALIGNED_SI_OP
734 +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
735 +#undef TARGET_ASM_ALIGNED_DI_OP
736 +#define TARGET_ASM_ALIGNED_DI_OP NULL
737 +#undef TARGET_ASM_ALIGNED_TI_OP
738 +#define TARGET_ASM_ALIGNED_TI_OP NULL
739 +#undef TARGET_ASM_UNALIGNED_HI_OP
740 +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
741 +#undef TARGET_ASM_UNALIGNED_SI_OP
742 +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
743 +#undef TARGET_ASM_UNALIGNED_DI_OP
744 +#define TARGET_ASM_UNALIGNED_DI_OP NULL
745 +#undef TARGET_ASM_UNALIGNED_TI_OP
746 +#define TARGET_ASM_UNALIGNED_TI_OP NULL
748 +#undef TARGET_ASM_OUTPUT_MI_THUNK
749 +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
751 +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
752 +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
755 +avr32_output_mi_thunk (FILE * file,
756 + tree thunk ATTRIBUTE_UNUSED,
757 + HOST_WIDE_INT delta,
758 + HOST_WIDE_INT vcall_offset, tree function)
760 + int mi_delta = delta;
762 + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
763 + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
766 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
769 + fputs ("\tpushm\tlr\n", file);
775 + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
777 + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
781 + /* Immediate is larger than k21 we must make us a temp register by
782 + pushing a register to the stack. */
783 + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
784 + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
785 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
790 + if (vcall_offset != 0)
792 + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
793 + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
794 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
798 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
801 + fputs ("\tpopm\tlr\n", file);
804 + /* Jump to the function. We assume that we can use an rjmp since the
805 + function to jump to is local and probably not too far away from
806 + the thunk. If this assumption proves to be wrong we could implement
807 + this jump by calculating the offset between the jump source and destination
808 + and put this in the constant pool and then perform an add to pc.
809 + This would also be legitimate PIC code. But for now we hope that an rjmp
810 + will be sufficient...
812 + fputs ("\trjmp\t", file);
813 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
814 + fputc ('\n', file);
818 +/* Implements target hook vector_mode_supported. */
820 +avr32_vector_mode_supported (enum machine_mode mode)
822 + if ((mode == V2HImode) || (mode == V4QImode))
829 +#undef TARGET_INIT_LIBFUNCS
830 +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
832 +#undef TARGET_INIT_BUILTINS
833 +#define TARGET_INIT_BUILTINS avr32_init_builtins
835 +#undef TARGET_EXPAND_BUILTIN
836 +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
838 +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
839 + void_ftype_ptr_int;
840 +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
841 +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
842 + short_ftype_short_short;
843 +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
844 +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
845 +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
846 +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
847 +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
849 +#define def_builtin(NAME, TYPE, CODE) \
850 + add_builtin_function ((NAME), (TYPE), (CODE), \
851 + BUILT_IN_MD, NULL, NULL_TREE)
853 +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
857 + add_builtin_function ((NAME), (TYPE), (CODE), \
858 + BUILT_IN_MD, NULL, NULL_TREE); \
862 +struct builtin_description
864 + const unsigned int mask;
865 + const enum insn_code icode;
866 + const char *const name;
868 + const enum rtx_code comparison;
869 + const unsigned int flag;
873 +static const struct builtin_description bdesc_2arg[] = {
874 +#define DSP_BUILTIN(code, builtin, ftype) \
875 + { 1, CODE_FOR_##code, "__builtin_" #code , \
876 + AVR32_BUILTIN_##builtin, 0, 0, ftype }
878 + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
879 + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
880 + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
881 + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
882 + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
883 + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
884 + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
885 + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
886 + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
887 + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
888 + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
893 +avr32_init_builtins (void)
896 + const struct builtin_description *d;
897 + tree endlink = void_list_node;
898 + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
899 + tree longlong_endlink =
900 + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
901 + tree short_endlink =
902 + tree_cons (NULL_TREE, short_integer_type_node, endlink);
903 + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
905 + /* int func (int) */
906 + int_ftype_int = build_function_type (integer_type_node, int_endlink);
908 + /* short func (short) */
910 + = build_function_type (short_integer_type_node, short_endlink);
912 + /* short func (short, short) */
913 + short_ftype_short_short
914 + = build_function_type (short_integer_type_node,
915 + tree_cons (NULL_TREE, short_integer_type_node,
918 + /* long long func (long long, short, short) */
919 + longlong_ftype_longlong_short_short
920 + = build_function_type (long_long_integer_type_node,
921 + tree_cons (NULL_TREE, long_long_integer_type_node,
922 + tree_cons (NULL_TREE,
923 + short_integer_type_node,
926 + /* long long func (short, short) */
927 + longlong_ftype_short_short
928 + = build_function_type (long_long_integer_type_node,
929 + tree_cons (NULL_TREE, short_integer_type_node,
932 + /* int func (int, int) */
934 + = build_function_type (integer_type_node,
935 + tree_cons (NULL_TREE, integer_type_node,
938 + /* long long func (int, int) */
939 + longlong_ftype_int_int
940 + = build_function_type (long_long_integer_type_node,
941 + tree_cons (NULL_TREE, integer_type_node,
944 + /* long long int func (long long, int, short) */
945 + longlong_ftype_longlong_int_short
946 + = build_function_type (long_long_integer_type_node,
947 + tree_cons (NULL_TREE, long_long_integer_type_node,
948 + tree_cons (NULL_TREE, integer_type_node,
951 + /* long long int func (int, short) */
952 + longlong_ftype_int_short
953 + = build_function_type (long_long_integer_type_node,
954 + tree_cons (NULL_TREE, integer_type_node,
957 + /* int func (int, short, short) */
958 + int_ftype_int_short_short
959 + = build_function_type (integer_type_node,
960 + tree_cons (NULL_TREE, integer_type_node,
961 + tree_cons (NULL_TREE,
962 + short_integer_type_node,
965 + /* int func (short, short) */
966 + int_ftype_short_short
967 + = build_function_type (integer_type_node,
968 + tree_cons (NULL_TREE, short_integer_type_node,
971 + /* int func (int, short) */
972 + int_ftype_int_short
973 + = build_function_type (integer_type_node,
974 + tree_cons (NULL_TREE, integer_type_node,
977 + /* void func (int, int) */
979 + = build_function_type (void_type_node,
980 + tree_cons (NULL_TREE, integer_type_node,
983 + /* void func (int, int, int) */
984 + void_ftype_int_int_int
985 + = build_function_type (void_type_node,
986 + tree_cons (NULL_TREE, integer_type_node,
987 + tree_cons (NULL_TREE, integer_type_node,
990 + /* void func (int, int, long long) */
991 + void_ftype_int_int_longlong
992 + = build_function_type (void_type_node,
993 + tree_cons (NULL_TREE, integer_type_node,
994 + tree_cons (NULL_TREE, integer_type_node,
995 + longlong_endlink)));
997 + /* void func (int, int, int, int, int) */
998 + void_ftype_int_int_int_int_int
999 + = build_function_type (void_type_node,
1000 + tree_cons (NULL_TREE, integer_type_node,
1001 + tree_cons (NULL_TREE, integer_type_node,
1002 + tree_cons (NULL_TREE,
1003 + integer_type_node,
1006 + integer_type_node,
1009 + /* void func (void *, int) */
1010 + void_ftype_ptr_int
1011 + = build_function_type (void_type_node,
1012 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1014 + /* void func (int) */
1015 + void_ftype_int = build_function_type (void_type_node, int_endlink);
1017 + /* void func (void) */
1018 + void_ftype_void = build_function_type (void_type_node, void_endlink);
1020 + /* int func (void) */
1021 + int_ftype_void = build_function_type (integer_type_node, void_endlink);
1023 + /* int func (void *, int) */
1025 + = build_function_type (integer_type_node,
1026 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1028 + /* int func (int, int, int) */
1029 + int_ftype_int_int_int
1030 + = build_function_type (integer_type_node,
1031 + tree_cons (NULL_TREE, integer_type_node,
1032 + tree_cons (NULL_TREE, integer_type_node,
1035 + /* Initialize avr32 builtins. */
1036 + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
1037 + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
1038 + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
1039 + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
1040 + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
1041 + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
1042 + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
1043 + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
1044 + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
1045 + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
1046 + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
1047 + def_builtin ("__builtin_breakpoint", void_ftype_void,
1048 + AVR32_BUILTIN_BREAKPOINT);
1049 + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
1050 + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
1051 + def_builtin ("__builtin_bswap_16", short_ftype_short,
1052 + AVR32_BUILTIN_BSWAP16);
1053 + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
1054 + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
1055 + AVR32_BUILTIN_COP);
1056 + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
1057 + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
1058 + AVR32_BUILTIN_MVRC_W);
1059 + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
1060 + AVR32_BUILTIN_MVCR_D);
1061 + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
1062 + AVR32_BUILTIN_MVRC_D);
1063 + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
1064 + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
1065 + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
1066 + AVR32_BUILTIN_SATRNDS);
1067 + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
1068 + AVR32_BUILTIN_SATRNDU);
1069 + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1070 + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1071 + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1072 + AVR32_BUILTIN_MACSATHH_W);
1073 + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1074 + AVR32_BUILTIN_MACWH_D);
1075 + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1076 + AVR32_BUILTIN_MACHH_D);
1078 + /* Add all builtins that are more or less simple operations on two
1080 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1082 + /* Use one of the operands; the target can have a different mode for
1083 + mask-generating compares. */
1088 + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1093 +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1096 +avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
1099 + tree arg0 = CALL_EXPR_ARG (exp,0);
1100 + tree arg1 = CALL_EXPR_ARG (exp,1);
1101 + rtx op0 = expand_normal (arg0);
1102 + rtx op1 = expand_normal (arg1);
1103 + enum machine_mode tmode = insn_data[icode].operand[0].mode;
1104 + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1105 + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1108 + || GET_MODE (target) != tmode
1109 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1110 + target = gen_reg_rtx (tmode);
1112 + /* In case the insn wants input operands in modes different from the
1114 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1116 + /* If op0 is already a reg we must cast it to the correct mode. */
1118 + op0 = convert_to_mode (mode0, op0, 1);
1120 + op0 = copy_to_mode_reg (mode0, op0);
1122 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1124 + /* If op1 is already a reg we must cast it to the correct mode. */
1126 + op1 = convert_to_mode (mode1, op1, 1);
1128 + op1 = copy_to_mode_reg (mode1, op1);
1130 + pat = GEN_FCN (icode) (target, op0, op1);
1137 +/* Expand an expression EXP that calls a built-in function,
1138 + with result going to TARGET if that's convenient
1139 + (and in mode MODE if that's convenient).
1140 + SUBTARGET may be used as the target for computing one of EXP's operands.
1141 + IGNORE is nonzero if the value is to be ignored. */
1144 +avr32_expand_builtin (tree exp,
1146 + rtx subtarget ATTRIBUTE_UNUSED,
1147 + enum machine_mode mode ATTRIBUTE_UNUSED,
1148 + int ignore ATTRIBUTE_UNUSED)
1150 + const struct builtin_description *d;
1152 + enum insn_code icode = 0;
1153 + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
1154 + tree arg0, arg1, arg2;
1155 + rtx op0, op1, op2, pat;
1156 + enum machine_mode tmode, mode0, mode1;
1157 + enum machine_mode arg0_mode;
1158 + int fcode = DECL_FUNCTION_CODE (fndecl);
1165 + case AVR32_BUILTIN_SATS:
1166 + case AVR32_BUILTIN_SATU:
1167 + case AVR32_BUILTIN_SATRNDS:
1168 + case AVR32_BUILTIN_SATRNDU:
1170 + const char *fname;
1174 + case AVR32_BUILTIN_SATS:
1175 + icode = CODE_FOR_sats;
1178 + case AVR32_BUILTIN_SATU:
1179 + icode = CODE_FOR_satu;
1182 + case AVR32_BUILTIN_SATRNDS:
1183 + icode = CODE_FOR_satrnds;
1184 + fname = "satrnds";
1186 + case AVR32_BUILTIN_SATRNDU:
1187 + icode = CODE_FOR_satrndu;
1188 + fname = "satrndu";
1192 + arg0 = CALL_EXPR_ARG (exp,0);
1193 + arg1 = CALL_EXPR_ARG (exp,1);
1194 + arg2 = CALL_EXPR_ARG (exp,2);
1195 + op0 = expand_normal (arg0);
1196 + op1 = expand_normal (arg1);
1197 + op2 = expand_normal (arg2);
1199 + tmode = insn_data[icode].operand[0].mode;
1203 + || GET_MODE (target) != tmode
1204 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1205 + target = gen_reg_rtx (tmode);
1208 + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1210 + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1213 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1215 + error ("Parameter 2 to __builtin_%s should be a constant number.",
1220 + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1222 + error ("Parameter 3 to __builtin_%s should be a constant number.",
1227 + emit_move_insn (target, op0);
1228 + pat = GEN_FCN (icode) (target, op1, op2);
1235 + case AVR32_BUILTIN_MUSTR:
1236 + icode = CODE_FOR_mustr;
1237 + tmode = insn_data[icode].operand[0].mode;
1240 + || GET_MODE (target) != tmode
1241 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1242 + target = gen_reg_rtx (tmode);
1243 + pat = GEN_FCN (icode) (target);
1249 + case AVR32_BUILTIN_MFSR:
1250 + icode = CODE_FOR_mfsr;
1251 + arg0 = CALL_EXPR_ARG (exp,0);
1252 + op0 = expand_normal (arg0);
1253 + tmode = insn_data[icode].operand[0].mode;
1254 + mode0 = insn_data[icode].operand[1].mode;
1256 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1258 + error ("Parameter 1 to __builtin_mfsr must be a constant number");
1262 + || GET_MODE (target) != tmode
1263 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1264 + target = gen_reg_rtx (tmode);
1265 + pat = GEN_FCN (icode) (target, op0);
1270 + case AVR32_BUILTIN_MTSR:
1271 + icode = CODE_FOR_mtsr;
1272 + arg0 = CALL_EXPR_ARG (exp,0);
1273 + arg1 = CALL_EXPR_ARG (exp,1);
1274 + op0 = expand_normal (arg0);
1275 + op1 = expand_normal (arg1);
1276 + mode0 = insn_data[icode].operand[0].mode;
1277 + mode1 = insn_data[icode].operand[1].mode;
1279 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1281 + error ("Parameter 1 to __builtin_mtsr must be a constant number");
1282 + return gen_reg_rtx (mode0);
1284 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1285 + op1 = copy_to_mode_reg (mode1, op1);
1286 + pat = GEN_FCN (icode) (op0, op1);
1291 + case AVR32_BUILTIN_MFDR:
1292 + icode = CODE_FOR_mfdr;
1293 + arg0 = CALL_EXPR_ARG (exp,0);
1294 + op0 = expand_normal (arg0);
1295 + tmode = insn_data[icode].operand[0].mode;
1296 + mode0 = insn_data[icode].operand[1].mode;
1298 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1300 + error ("Parameter 1 to __builtin_mfdr must be a constant number");
1304 + || GET_MODE (target) != tmode
1305 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1306 + target = gen_reg_rtx (tmode);
1307 + pat = GEN_FCN (icode) (target, op0);
1312 + case AVR32_BUILTIN_MTDR:
1313 + icode = CODE_FOR_mtdr;
1314 + arg0 = CALL_EXPR_ARG (exp,0);
1315 + arg1 = CALL_EXPR_ARG (exp,1);
1316 + op0 = expand_normal (arg0);
1317 + op1 = expand_normal (arg1);
1318 + mode0 = insn_data[icode].operand[0].mode;
1319 + mode1 = insn_data[icode].operand[1].mode;
1321 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1323 + error ("Parameter 1 to __builtin_mtdr must be a constant number");
1324 + return gen_reg_rtx (mode0);
1326 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1327 + op1 = copy_to_mode_reg (mode1, op1);
1328 + pat = GEN_FCN (icode) (op0, op1);
1333 + case AVR32_BUILTIN_CACHE:
1334 + icode = CODE_FOR_cache;
1335 + arg0 = CALL_EXPR_ARG (exp,0);
1336 + arg1 = CALL_EXPR_ARG (exp,1);
1337 + op0 = expand_normal (arg0);
1338 + op1 = expand_normal (arg1);
1339 + mode0 = insn_data[icode].operand[0].mode;
1340 + mode1 = insn_data[icode].operand[1].mode;
1342 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1344 + error ("Parameter 2 to __builtin_cache must be a constant number");
1345 + return gen_reg_rtx (mode1);
1348 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1349 + op0 = copy_to_mode_reg (mode0, op0);
1351 + pat = GEN_FCN (icode) (op0, op1);
1356 + case AVR32_BUILTIN_SYNC:
1357 + case AVR32_BUILTIN_MUSFR:
1358 + case AVR32_BUILTIN_SSRF:
1359 + case AVR32_BUILTIN_CSRF:
1361 + const char *fname;
1365 + case AVR32_BUILTIN_SYNC:
1366 + icode = CODE_FOR_sync;
1369 + case AVR32_BUILTIN_MUSFR:
1370 + icode = CODE_FOR_musfr;
1373 + case AVR32_BUILTIN_SSRF:
1374 + icode = CODE_FOR_ssrf;
1377 + case AVR32_BUILTIN_CSRF:
1378 + icode = CODE_FOR_csrf;
1383 + arg0 = CALL_EXPR_ARG (exp,0);
1384 + op0 = expand_normal (arg0);
1385 + mode0 = insn_data[icode].operand[0].mode;
1387 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1389 + if (icode == CODE_FOR_musfr)
1390 + op0 = copy_to_mode_reg (mode0, op0);
1393 + error ("Parameter to __builtin_%s is illegal.", fname);
1394 + return gen_reg_rtx (mode0);
1397 + pat = GEN_FCN (icode) (op0);
1403 + case AVR32_BUILTIN_TLBR:
1404 + icode = CODE_FOR_tlbr;
1405 + pat = GEN_FCN (icode) (NULL_RTX);
1410 + case AVR32_BUILTIN_TLBS:
1411 + icode = CODE_FOR_tlbs;
1412 + pat = GEN_FCN (icode) (NULL_RTX);
1417 + case AVR32_BUILTIN_TLBW:
1418 + icode = CODE_FOR_tlbw;
1419 + pat = GEN_FCN (icode) (NULL_RTX);
1424 + case AVR32_BUILTIN_BREAKPOINT:
1425 + icode = CODE_FOR_breakpoint;
1426 + pat = GEN_FCN (icode) (NULL_RTX);
1431 + case AVR32_BUILTIN_XCHG:
1432 + icode = CODE_FOR_sync_lock_test_and_setsi;
1433 + arg0 = CALL_EXPR_ARG (exp,0);
1434 + arg1 = CALL_EXPR_ARG (exp,1);
1435 + op0 = expand_normal (arg0);
1436 + op1 = expand_normal (arg1);
1437 + tmode = insn_data[icode].operand[0].mode;
1438 + mode0 = insn_data[icode].operand[1].mode;
1439 + mode1 = insn_data[icode].operand[2].mode;
1441 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1443 + op1 = copy_to_mode_reg (mode1, op1);
1446 + op0 = force_reg (GET_MODE (op0), op0);
1447 + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
1448 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1451 + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
1455 + || GET_MODE (target) != tmode
1456 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1457 + target = gen_reg_rtx (tmode);
1458 + pat = GEN_FCN (icode) (target, op0, op1);
1463 + case AVR32_BUILTIN_LDXI:
1464 + icode = CODE_FOR_ldxi;
1465 + arg0 = CALL_EXPR_ARG (exp,0);
1466 + arg1 = CALL_EXPR_ARG (exp,1);
1467 + arg2 = CALL_EXPR_ARG (exp,2);
1468 + op0 = expand_normal (arg0);
1469 + op1 = expand_normal (arg1);
1470 + op2 = expand_normal (arg2);
1471 + tmode = insn_data[icode].operand[0].mode;
1472 + mode0 = insn_data[icode].operand[1].mode;
1473 + mode1 = insn_data[icode].operand[2].mode;
1475 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1477 + op0 = copy_to_mode_reg (mode0, op0);
1480 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1482 + op1 = copy_to_mode_reg (mode1, op1);
1485 + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1488 + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1489 + return gen_reg_rtx (mode0);
1493 + || GET_MODE (target) != tmode
1494 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1495 + target = gen_reg_rtx (tmode);
1496 + pat = GEN_FCN (icode) (target, op0, op1, op2);
1501 + case AVR32_BUILTIN_BSWAP16:
1503 + icode = CODE_FOR_bswap_16;
1504 + arg0 = CALL_EXPR_ARG (exp,0);
1505 + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1506 + mode0 = insn_data[icode].operand[1].mode;
1507 + if (arg0_mode != mode0)
1508 + arg0 = build1 (NOP_EXPR,
1509 + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1511 + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1512 + tmode = insn_data[icode].operand[0].mode;
1515 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1517 + if ( CONST_INT_P (op0) )
1519 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
1520 + ((INTVAL (op0)&0xff00) >> 8) );
1521 + /* Sign extend 16-bit value to host wide int */
1522 + val <<= (HOST_BITS_PER_WIDE_INT - 16);
1523 + val >>= (HOST_BITS_PER_WIDE_INT - 16);
1524 + op0 = GEN_INT(val);
1526 + || GET_MODE (target) != tmode
1527 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1528 + target = gen_reg_rtx (tmode);
1529 + emit_move_insn(target, op0);
1533 + op0 = copy_to_mode_reg (mode0, op0);
1537 + || GET_MODE (target) != tmode
1538 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1540 + target = gen_reg_rtx (tmode);
1544 + pat = GEN_FCN (icode) (target, op0);
1551 + case AVR32_BUILTIN_BSWAP32:
1553 + icode = CODE_FOR_bswap_32;
1554 + arg0 = CALL_EXPR_ARG (exp,0);
1555 + op0 = expand_normal (arg0);
1556 + tmode = insn_data[icode].operand[0].mode;
1557 + mode0 = insn_data[icode].operand[1].mode;
1559 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1561 + if ( CONST_INT_P (op0) )
1563 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
1564 + ((INTVAL (op0)&0x0000ff00) << 8) |
1565 + ((INTVAL (op0)&0x00ff0000) >> 8) |
1566 + ((INTVAL (op0)&0xff000000) >> 24) );
1567 + /* Sign extend 32-bit value to host wide int */
1568 + val <<= (HOST_BITS_PER_WIDE_INT - 32);
1569 + val >>= (HOST_BITS_PER_WIDE_INT - 32);
1570 + op0 = GEN_INT(val);
1572 + || GET_MODE (target) != tmode
1573 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1574 + target = gen_reg_rtx (tmode);
1575 + emit_move_insn(target, op0);
1579 + op0 = copy_to_mode_reg (mode0, op0);
1583 + || GET_MODE (target) != tmode
1584 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1585 + target = gen_reg_rtx (tmode);
1588 + pat = GEN_FCN (icode) (target, op0);
1595 + case AVR32_BUILTIN_MVCR_W:
1596 + case AVR32_BUILTIN_MVCR_D:
1598 + arg0 = CALL_EXPR_ARG (exp,0);
1599 + arg1 = CALL_EXPR_ARG (exp,1);
1600 + op0 = expand_normal (arg0);
1601 + op1 = expand_normal (arg1);
1603 + if (fcode == AVR32_BUILTIN_MVCR_W)
1604 + icode = CODE_FOR_mvcrsi;
1606 + icode = CODE_FOR_mvcrdi;
1608 + tmode = insn_data[icode].operand[0].mode;
1611 + || GET_MODE (target) != tmode
1612 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1613 + target = gen_reg_rtx (tmode);
1615 + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1618 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1619 + error ("Number should be between 0 and 7.");
1623 + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1626 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1627 + error ("Number should be between 0 and 15.");
1631 + pat = GEN_FCN (icode) (target, op0, op1);
1638 + case AVR32_BUILTIN_MACSATHH_W:
1639 + case AVR32_BUILTIN_MACWH_D:
1640 + case AVR32_BUILTIN_MACHH_D:
1642 + arg0 = CALL_EXPR_ARG (exp,0);
1643 + arg1 = CALL_EXPR_ARG (exp,1);
1644 + arg2 = CALL_EXPR_ARG (exp,2);
1645 + op0 = expand_normal (arg0);
1646 + op1 = expand_normal (arg1);
1647 + op2 = expand_normal (arg2);
1649 + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1650 + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1651 + CODE_FOR_machh_d);
1653 + tmode = insn_data[icode].operand[0].mode;
1654 + mode0 = insn_data[icode].operand[1].mode;
1655 + mode1 = insn_data[icode].operand[2].mode;
1659 + || GET_MODE (target) != tmode
1660 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1661 + target = gen_reg_rtx (tmode);
1663 + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1665 + /* If op0 is already a reg we must cast it to the correct mode. */
1667 + op0 = convert_to_mode (tmode, op0, 1);
1669 + op0 = copy_to_mode_reg (tmode, op0);
1672 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1674 + /* If op1 is already a reg we must cast it to the correct mode. */
1676 + op1 = convert_to_mode (mode0, op1, 1);
1678 + op1 = copy_to_mode_reg (mode0, op1);
1681 + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1683 + /* If op1 is already a reg we must cast it to the correct mode. */
1685 + op2 = convert_to_mode (mode1, op2, 1);
1687 + op2 = copy_to_mode_reg (mode1, op2);
1690 + emit_move_insn (target, op0);
1692 + pat = GEN_FCN (icode) (target, op1, op2);
1698 + case AVR32_BUILTIN_MVRC_W:
1699 + case AVR32_BUILTIN_MVRC_D:
1701 + arg0 = CALL_EXPR_ARG (exp,0);
1702 + arg1 = CALL_EXPR_ARG (exp,1);
1703 + arg2 = CALL_EXPR_ARG (exp,2);
1704 + op0 = expand_normal (arg0);
1705 + op1 = expand_normal (arg1);
1706 + op2 = expand_normal (arg2);
1708 + if (fcode == AVR32_BUILTIN_MVRC_W)
1709 + icode = CODE_FOR_mvrcsi;
1711 + icode = CODE_FOR_mvrcdi;
1713 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1715 + error ("Parameter 1 is not a valid coprocessor number.");
1716 + error ("Number should be between 0 and 7.");
1720 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1722 + error ("Parameter 2 is not a valid coprocessor register number.");
1723 + error ("Number should be between 0 and 15.");
1727 + if (GET_CODE (op2) == CONST_INT
1728 + || GET_CODE (op2) == CONST
1729 + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1731 + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1734 + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1735 + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1738 + pat = GEN_FCN (icode) (op0, op1, op2);
1745 + case AVR32_BUILTIN_COP:
1749 + icode = CODE_FOR_cop;
1750 + arg0 = CALL_EXPR_ARG (exp,0);
1751 + arg1 = CALL_EXPR_ARG (exp,1);
1752 + arg2 = CALL_EXPR_ARG (exp,2);
1753 + arg3 = CALL_EXPR_ARG (exp,3);
1754 + arg4 = CALL_EXPR_ARG (exp,4);
1755 + op0 = expand_normal (arg0);
1756 + op1 = expand_normal (arg1);
1757 + op2 = expand_normal (arg2);
1758 + op3 = expand_normal (arg3);
1759 + op4 = expand_normal (arg4);
1761 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1764 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1765 + error ("Number should be between 0 and 7.");
1769 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1772 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1773 + error ("Number should be between 0 and 15.");
1777 + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1780 + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1781 + error ("Number should be between 0 and 15.");
1785 + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1788 + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1789 + error ("Number should be between 0 and 15.");
1793 + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1796 + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1797 + error ("Number should be between 0 and 127.");
1801 + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1811 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1812 + if (d->code == fcode)
1813 + return avr32_expand_binop_builtin (d->icode, exp, target);
1816 + /* @@@ Should really do something sensible here. */
1821 +/* Handle an "interrupt" or "isr" attribute;
1822 + arguments as in struct attribute_spec.handler. */
1825 +avr32_handle_isr_attribute (tree * node, tree name, tree args,
1826 + int flags, bool * no_add_attrs)
1828 + if (DECL_P (*node))
1830 + if (TREE_CODE (*node) != FUNCTION_DECL)
1832 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1833 + IDENTIFIER_POINTER (name));
1834 + *no_add_attrs = true;
1836 + /* FIXME: the argument if any is checked for type attributes; should it
1837 + be checked for decl ones? */
1841 + if (TREE_CODE (*node) == FUNCTION_TYPE
1842 + || TREE_CODE (*node) == METHOD_TYPE)
1844 + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
1846 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1847 + *no_add_attrs = true;
1850 + else if (TREE_CODE (*node) == POINTER_TYPE
1851 + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
1852 + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
1853 + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
1855 + *node = build_variant_type_copy (*node);
1856 + TREE_TYPE (*node) = build_type_attribute_variant
1857 + (TREE_TYPE (*node),
1858 + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
1859 + *no_add_attrs = true;
1863 + /* Possibly pass this attribute on from the type to a decl. */
1864 + if (flags & ((int) ATTR_FLAG_DECL_NEXT
1865 + | (int) ATTR_FLAG_FUNCTION_NEXT
1866 + | (int) ATTR_FLAG_ARRAY_NEXT))
1868 + *no_add_attrs = true;
1869 + return tree_cons (name, args, NULL_TREE);
1873 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1881 +/* Handle an attribute requiring a FUNCTION_DECL;
1882 + arguments as in struct attribute_spec.handler. */
1884 +avr32_handle_fndecl_attribute (tree * node, tree name,
1885 + tree args ATTRIBUTE_UNUSED,
1886 + int flags ATTRIBUTE_UNUSED,
1887 + bool * no_add_attrs)
1889 + if (TREE_CODE (*node) != FUNCTION_DECL)
1891 + warning (OPT_Wattributes,"%qs attribute only applies to functions",
1892 + IDENTIFIER_POINTER (name));
1893 + *no_add_attrs = true;
1900 +/* Handle an acall attribute;
1901 + arguments as in struct attribute_spec.handler. */
1904 +avr32_handle_acall_attribute (tree * node, tree name,
1905 + tree args ATTRIBUTE_UNUSED,
1906 + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
1908 + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
1910 + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
1911 + IDENTIFIER_POINTER (name));
1912 + *no_add_attrs = true;
1916 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1917 + IDENTIFIER_POINTER (name));
1918 + *no_add_attrs = true;
1923 +/* Return 0 if the attributes for two types are incompatible, 1 if they
1924 + are compatible, and 2 if they are nearly compatible (which causes a
1925 + warning to be generated). */
1928 +avr32_comp_type_attributes (tree type1, tree type2)
1930 + int acall1, acall2, isr1, isr2, naked1, naked2;
1932 + /* Check for mismatch of non-default calling convention. */
1933 + if (TREE_CODE (type1) != FUNCTION_TYPE)
1936 + /* Check for mismatched call attributes. */
1937 + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
1938 + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
1939 + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
1940 + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
1941 + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
1943 + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
1945 + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
1947 + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
1949 + if ((acall1 && isr2)
1950 + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
1957 +/* Computes the type of the current function. */
1959 +static unsigned long
1960 +avr32_compute_func_type (void)
1962 + unsigned long type = AVR32_FT_UNKNOWN;
1966 + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1969 + /* Decide if the current function is volatile. Such functions never
1970 + return, and many memory cycles can be saved by not storing register
1971 + values that will never be needed again. This optimization was added to
1972 + speed up context switching in a kernel application. */
1974 + && TREE_NOTHROW (current_function_decl)
1975 + && TREE_THIS_VOLATILE (current_function_decl))
1976 + type |= AVR32_FT_VOLATILE;
1978 + if (cfun->static_chain_decl != NULL)
1979 + type |= AVR32_FT_NESTED;
1981 + attr = DECL_ATTRIBUTES (current_function_decl);
1983 + a = lookup_attribute ("isr", attr);
1984 + if (a == NULL_TREE)
1985 + a = lookup_attribute ("interrupt", attr);
1987 + if (a == NULL_TREE)
1988 + type |= AVR32_FT_NORMAL;
1990 + type |= avr32_isr_value (TREE_VALUE (a));
1993 + a = lookup_attribute ("acall", attr);
1994 + if (a != NULL_TREE)
1995 + type |= AVR32_FT_ACALL;
1997 + a = lookup_attribute ("naked", attr);
1998 + if (a != NULL_TREE)
1999 + type |= AVR32_FT_NAKED;
2004 +/* Returns the type of the current function. */
2006 +static unsigned long
2007 +avr32_current_func_type (void)
2009 + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
2010 + cfun->machine->func_type = avr32_compute_func_type ();
2012 + return cfun->machine->func_type;
2016 + This target hook should return true if we should not pass type solely
2017 + in registers. The file expr.h defines a definition that is usually appropriate,
2018 + refer to expr.h for additional documentation.
2021 +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
2023 + if (type && AGGREGATE_TYPE_P (type)
2024 + /* If the alignment is less than the size then pass in the struct on
2026 + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
2027 + (unsigned int) int_size_in_bytes (type))
2028 + /* If we support unaligned word accesses then structs of size 4 and 8
2029 + can have any alignment and still be passed in registers. */
2030 + && !(TARGET_UNALIGNED_WORD
2031 + && (int_size_in_bytes (type) == 4
2032 + || int_size_in_bytes (type) == 8))
2033 + /* Double word structs need only a word alignment. */
2034 + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
2037 + if (type && AGGREGATE_TYPE_P (type)
2038 + /* Structs of size 3,5,6,7 are always passed in registers. */
2039 + && (int_size_in_bytes (type) == 3
2040 + || int_size_in_bytes (type) == 5
2041 + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
2045 + return (type && TREE_ADDRESSABLE (type));
2050 +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
2056 + This target hook should return true if an argument at the position indicated
2057 + by cum should be passed by reference. This predicate is queried after target
2058 + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
2060 + If the hook returns true, a copy of that argument is made in memory and a
2061 + pointer to the argument is passed instead of the argument itself. The pointer
2062 + is passed in whatever way is appropriate for passing a pointer to that type.
2065 +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
2066 + enum machine_mode mode ATTRIBUTE_UNUSED,
2067 + tree type, bool named ATTRIBUTE_UNUSED)
2069 + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
2073 +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
2074 + enum machine_mode mode ATTRIBUTE_UNUSED,
2075 + tree type ATTRIBUTE_UNUSED,
2076 + bool named ATTRIBUTE_UNUSED)
2082 +struct gcc_target targetm = TARGET_INITIALIZER;
2085 + Table used to convert from register number in the assembler instructions and
2086 + the register numbers used in gcc.
2088 +const int avr32_function_arg_reglist[] = {
2089 + INTERNAL_REGNUM (12),
2090 + INTERNAL_REGNUM (11),
2091 + INTERNAL_REGNUM (10),
2092 + INTERNAL_REGNUM (9),
2093 + INTERNAL_REGNUM (8)
2096 +rtx avr32_compare_op0 = NULL_RTX;
2097 +rtx avr32_compare_op1 = NULL_RTX;
2098 +rtx avr32_compare_operator = NULL_RTX;
2099 +rtx avr32_acc_cache = NULL_RTX;
2102 + Returns nonzero if it is allowed to store a value of mode mode in hard
2103 + register number regno.
2106 +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
2108 + /* We allow only float modes in the fp-registers */
2109 + if (regnr >= FIRST_FP_REGNUM
2110 + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
2117 + case DImode: /* long long */
2118 + case DFmode: /* double */
2119 + case SCmode: /* __complex__ float */
2120 + case CSImode: /* __complex__ int */
2122 + { /* long long int not supported in r12, sp, lr
2128 + if (regnr % 2) /* long long int has to be refered in even
2134 + case CDImode: /* __complex__ long long */
2135 + case DCmode: /* __complex__ double */
2136 + case TImode: /* 16 bytes */
2139 + else if (regnr % 2)
2150 +avr32_rnd_operands (rtx add, rtx shift)
2152 + if (GET_CODE (shift) == CONST_INT &&
2153 + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
2155 + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
2165 +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
2172 + HOST_WIDE_INT min_value = 0, max_value = 0;
2176 + size_str[0] = str[2];
2177 + size_str[1] = str[3];
2178 + size_str[2] = '\0';
2179 + const_size = atoi (size_str);
2181 + if (toupper (str[1]) == 'U')
2184 + max_value = (1 << const_size) - 1;
2186 + else if (toupper (str[1]) == 'S')
2188 + min_value = -(1 << (const_size - 1));
2189 + max_value = (1 << (const_size - 1)) - 1;
2197 + if (value >= min_value && value <= max_value)
2204 + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
2206 + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
2213 +/*Compute mask of which floating-point registers needs saving upon
2214 + entry to this function*/
2215 +static unsigned long
2216 +avr32_compute_save_fp_reg_mask (void)
2218 + unsigned long func_type = avr32_current_func_type ();
2219 + unsigned int save_reg_mask = 0;
2221 + unsigned int max_reg = 7;
2222 + int save_all_call_used_regs = FALSE;
2224 + /* This only applies for hardware floating-point implementation. */
2225 + if (!TARGET_HARD_FLOAT)
2228 + if (IS_INTERRUPT (func_type))
2231 + /* Interrupt functions must not corrupt any registers, even call
2232 + clobbered ones. If this is a leaf function we can just examine the
2233 + registers used by the RTL, but otherwise we have to assume that
2234 + whatever function is called might clobber anything, and so we have
2235 + to save all the call-clobbered registers as well. */
2237 + save_all_call_used_regs = !current_function_is_leaf;
2240 + /* All used registers used must be saved */
2241 + for (reg = 0; reg <= max_reg; reg++)
2242 + if (df_regs_ever_live_p (INTERNAL_FP_REGNUM (reg))
2243 + || (save_all_call_used_regs
2244 + && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
2245 + save_reg_mask |= (1 << reg);
2247 + return save_reg_mask;
2250 +/*Compute mask of registers which needs saving upon function entry */
2251 +static unsigned long
2252 +avr32_compute_save_reg_mask (int push)
2254 + unsigned long func_type;
2255 + unsigned int save_reg_mask = 0;
2258 + func_type = avr32_current_func_type ();
2260 + if (IS_INTERRUPT (func_type))
2262 + unsigned int max_reg = 12;
2265 + /* Get the banking scheme for the interrupt */
2266 + switch (func_type)
2268 + case AVR32_FT_ISR_FULL:
2271 + case AVR32_FT_ISR_HALF:
2274 + case AVR32_FT_ISR_NONE:
2279 + /* Interrupt functions must not corrupt any registers, even call
2280 + clobbered ones. If this is a leaf function we can just examine the
2281 + registers used by the RTL, but otherwise we have to assume that
2282 + whatever function is called might clobber anything, and so we have
2283 + to save all the call-clobbered registers as well. */
2285 + /* Need not push the registers r8-r12 for AVR32A architectures, as this
2286 + is automatially done in hardware. We also do not have any shadow
2288 + if (TARGET_UARCH_AVR32A)
2291 + func_type = AVR32_FT_ISR_NONE;
2294 + /* All registers which are used and is not shadowed must be saved */
2295 + for (reg = 0; reg <= max_reg; reg++)
2296 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2297 + || (!current_function_is_leaf
2298 + && call_used_regs[INTERNAL_REGNUM (reg)]))
2299 + save_reg_mask |= (1 << reg);
2302 + if ((df_regs_ever_live_p (LR_REGNUM)
2303 + || !current_function_is_leaf || frame_pointer_needed)
2304 + /* Only non-shadowed register models */
2305 + && (func_type == AVR32_FT_ISR_NONE))
2306 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2308 + /* Make sure that the GOT register is pushed. */
2309 + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
2310 + && current_function_uses_pic_offset_table)
2311 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2316 + int use_pushm = optimize_size;
2318 + /* In the normal case we only need to save those registers which are
2319 + call saved and which are used by this function. */
2320 + for (reg = 0; reg <= 7; reg++)
2321 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2322 + && !call_used_regs[INTERNAL_REGNUM (reg)])
2323 + save_reg_mask |= (1 << reg);
2325 + /* Make sure that the GOT register is pushed. */
2326 + if (current_function_uses_pic_offset_table)
2327 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2330 + /* If we optimize for size and do not have anonymous arguments: use
2331 + popm/pushm always */
2334 + if ((save_reg_mask & (1 << 0))
2335 + || (save_reg_mask & (1 << 1))
2336 + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
2337 + save_reg_mask |= 0xf;
2339 + if ((save_reg_mask & (1 << 4))
2340 + || (save_reg_mask & (1 << 5))
2341 + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
2342 + save_reg_mask |= 0xf0;
2344 + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
2345 + save_reg_mask |= 0x300;
2350 + if ((df_regs_ever_live_p (LR_REGNUM)
2351 + || !current_function_is_leaf
2354 + && !current_function_calls_eh_return) || frame_pointer_needed))
2357 + /* Never pop LR into PC for functions which
2358 + calls __builtin_eh_return, since we need to
2359 + fix the SP after the restoring of the registers
2360 + and before returning. */
2361 + || current_function_calls_eh_return)
2364 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2369 + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
2375 + /* Save registers so the exception handler can modify them. */
2376 + if (current_function_calls_eh_return)
2382 + reg = EH_RETURN_DATA_REGNO (i);
2383 + if (reg == INVALID_REGNUM)
2385 + save_reg_mask |= 1 << ASM_REGNUM (reg);
2389 + return save_reg_mask;
2392 +/*Compute total size in bytes of all saved registers */
2394 +avr32_get_reg_mask_size (int reg_mask)
2399 + for (reg = 0; reg <= 15; reg++)
2400 + if (reg_mask & (1 << reg))
2406 +/*Get a register from one of the registers which are saved onto the stack
2407 + upon function entry */
2410 +avr32_get_saved_reg (int save_reg_mask)
2414 + /* Find the first register which is saved in the saved_reg_mask */
2415 + for (reg = 0; reg <= 15; reg++)
2416 + if (save_reg_mask & (1 << reg))
2422 +/* Return 1 if it is possible to return using a single instruction. */
2424 +avr32_use_return_insn (int iscond)
2426 + unsigned int func_type = avr32_current_func_type ();
2427 + unsigned long saved_int_regs;
2428 + unsigned long saved_fp_regs;
2430 + /* Never use a return instruction before reload has run. */
2431 + if (!reload_completed)
2434 + /* Must adjust the stack for vararg functions. */
2435 + if (current_function_args_info.uses_anonymous_args)
2438 + /* If there a stack adjstment. */
2439 + if (get_frame_size ())
2442 + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
2443 + saved_fp_regs = avr32_compute_save_fp_reg_mask ();
2445 + /* Functions which have saved fp-regs on the stack can not be performed in
2446 + one instruction */
2447 + if (saved_fp_regs)
2450 + /* Conditional returns can not be performed in one instruction if we need
2451 + to restore registers from the stack */
2452 + if (iscond && saved_int_regs)
2455 + /* Conditional return can not be used for interrupt handlers. */
2456 + if (iscond && IS_INTERRUPT (func_type))
2459 + /* For interrupt handlers which needs to pop registers */
2460 + if (saved_int_regs && IS_INTERRUPT (func_type))
2464 + /* If there are saved registers but the LR isn't saved, then we need two
2465 + instructions for the return. */
2466 + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2474 +/*Generate some function prologue info in the assembly file*/
2477 +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
2479 + if (IS_NAKED (avr32_current_func_type ()))
2481 + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
2483 + if (IS_INTERRUPT (avr32_current_func_type ()))
2485 + switch (avr32_current_func_type ())
2487 + case AVR32_FT_ISR_FULL:
2489 + "\t# Interrupt Function: Fully shadowed register file\n");
2491 + case AVR32_FT_ISR_HALF:
2493 + "\t# Interrupt Function: Half shadowed register file\n");
2496 + case AVR32_FT_ISR_NONE:
2497 + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
2503 + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
2504 + current_function_args_size, frame_size,
2505 + current_function_pretend_args_size);
2507 + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
2508 + frame_pointer_needed, current_function_is_leaf);
2510 + fprintf (f, "\t# uses_anonymous_args = %i\n",
2511 + current_function_args_info.uses_anonymous_args);
2512 + if (current_function_calls_eh_return)
2513 + fprintf (f, "\t# Calls __builtin_eh_return.\n");
2518 +/* Generate and emit an insn that we will recognize as a pushm or stm.
2519 + Unfortunately, since this insn does not reflect very well the actual
2520 + semantics of the operation, we need to annotate the insn for the benefit
2521 + of DWARF2 frame unwind information. */
2523 +int avr32_convert_to_reglist16 (int reglist8_vect);
2526 +emit_multi_reg_push (int reglist, int usePUSHM)
2538 + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
2539 + reglist = avr32_convert_to_reglist16 (reglist);
2543 + insn = emit_insn (gen_stm (stack_pointer_rtx,
2544 + gen_rtx_CONST_INT (SImode, reglist),
2545 + gen_rtx_CONST_INT (SImode, 1)));
2548 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2549 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2551 + for (i = 15; i >= 0; i--)
2553 + if (reglist & (1 << i))
2555 + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
2556 + tmp = gen_rtx_SET (VOIDmode,
2557 + gen_rtx_MEM (SImode,
2558 + plus_constant (stack_pointer_rtx,
2559 + 4 * index)), reg);
2560 + RTX_FRAME_RELATED_P (tmp) = 1;
2561 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2565 + tmp = gen_rtx_SET (SImode,
2566 + stack_pointer_rtx,
2567 + gen_rtx_PLUS (SImode,
2568 + stack_pointer_rtx,
2569 + GEN_INT (-4 * nr_regs)));
2570 + RTX_FRAME_RELATED_P (tmp) = 1;
2571 + XVECEXP (dwarf, 0, 0) = tmp;
2572 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2573 + REG_NOTES (insn));
2579 +emit_multi_fp_reg_push (int reglist)
2589 + insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
2590 + gen_rtx_CONST_INT (SImode, reglist),
2591 + gen_rtx_CONST_INT (SImode, 1)));
2593 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2594 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2596 + for (i = 15; i >= 0; i--)
2598 + if (reglist & (1 << i))
2600 + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
2601 + tmp = gen_rtx_SET (VOIDmode,
2602 + gen_rtx_MEM (SImode,
2603 + plus_constant (stack_pointer_rtx,
2604 + 4 * index)), reg);
2605 + RTX_FRAME_RELATED_P (tmp) = 1;
2606 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2610 + tmp = gen_rtx_SET (SImode,
2611 + stack_pointer_rtx,
2612 + gen_rtx_PLUS (SImode,
2613 + stack_pointer_rtx,
2614 + GEN_INT (-4 * nr_regs)));
2615 + RTX_FRAME_RELATED_P (tmp) = 1;
2616 + XVECEXP (dwarf, 0, 0) = tmp;
2617 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2618 + REG_NOTES (insn));
2623 +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
2624 + int write_back, int in_struct_p, int scalar_p)
2631 + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
2635 + XVECEXP (result, 0, 0)
2636 + = gen_rtx_SET (GET_MODE (from), from,
2637 + plus_constant (from, count * 4));
2643 + for (j = 0; i < count; i++, j++)
2646 + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
2647 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2648 + MEM_SCALAR_P (mem) = scalar_p;
2649 + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
2650 + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
2658 +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
2659 + int in_struct_p, int scalar_p)
2664 + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
2666 + for (j = 0; i < count; i++, j++)
2668 + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
2669 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2670 + MEM_SCALAR_P (mem) = scalar_p;
2671 + XVECEXP (result, 0, i)
2672 + = gen_rtx_SET (VOIDmode, mem,
2673 + gen_rtx_UNSPEC (VOIDmode,
2674 + gen_rtvec (1, regs[j]),
2675 + UNSPEC_STORE_MULTIPLE));
2682 +/* Move a block of memory if it is word aligned or we support unaligned
2683 + word memory accesses. The size must be maximum 64 bytes. */
2686 +avr32_gen_movmemsi (rtx * operands)
2688 + HOST_WIDE_INT bytes_to_go;
2690 + rtx st_src, st_dst;
2691 + int src_offset = 0, dst_offset = 0;
2693 + int dst_in_struct_p, src_in_struct_p;
2694 + int dst_scalar_p, src_scalar_p;
2697 + if (GET_CODE (operands[2]) != CONST_INT
2698 + || GET_CODE (operands[3]) != CONST_INT
2699 + || INTVAL (operands[2]) > 64
2700 + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
2703 + unaligned = (INTVAL (operands[3]) & 3) != 0;
2707 + st_dst = XEXP (operands[0], 0);
2708 + st_src = XEXP (operands[1], 0);
2710 + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
2711 + dst_scalar_p = MEM_SCALAR_P (operands[0]);
2712 + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
2713 + src_scalar_p = MEM_SCALAR_P (operands[1]);
2715 + dst = copy_to_mode_reg (SImode, st_dst);
2716 + src = copy_to_mode_reg (SImode, st_src);
2718 + bytes_to_go = INTVAL (operands[2]);
2720 + while (bytes_to_go)
2722 + enum machine_mode move_mode;
2723 + /* (Seems to be a problem with reloads for the movti pattern so this is
2724 + disabled until that problem is resolved)
2725 + UPDATE: Problem seems to be solved now.... */
2726 + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
2727 + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
2728 + && !TARGET_ARCH_UC)
2729 + move_mode = TImode;
2730 + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
2731 + move_mode = DImode;
2732 + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
2733 + move_mode = SImode;
2735 + move_mode = QImode;
2739 + rtx dst_mem = gen_rtx_MEM (move_mode,
2740 + gen_rtx_PLUS (SImode, dst,
2741 + GEN_INT (dst_offset)));
2742 + dst_offset += GET_MODE_SIZE (move_mode);
2743 + if ( 0 /* This causes an error in GCC. Think there is
2744 + something wrong in the gcse pass which causes REQ_EQUIV notes
2745 + to be wrong so disabling it for now. */
2746 + && move_mode == TImode
2747 + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
2749 + src_mem = gen_rtx_MEM (move_mode,
2750 + gen_rtx_POST_INC (SImode, src));
2754 + src_mem = gen_rtx_MEM (move_mode,
2755 + gen_rtx_PLUS (SImode, src,
2756 + GEN_INT (src_offset)));
2757 + src_offset += GET_MODE_SIZE (move_mode);
2760 + bytes_to_go -= GET_MODE_SIZE (move_mode);
2762 + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
2763 + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
2765 + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
2766 + MEM_SCALAR_P (src_mem) = src_scalar_p;
2767 + emit_move_insn (dst_mem, src_mem);
2777 +/*Expand the prologue instruction*/
2779 +avr32_expand_prologue (void)
2782 + unsigned long saved_reg_mask, saved_fp_reg_mask;
2785 + /* Naked functions does not have a prologue */
2786 + if (IS_NAKED (avr32_current_func_type ()))
2789 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
2791 + if (saved_reg_mask)
2793 + /* Must push used registers */
2795 + /* Should we use POPM or LDM? */
2796 + int usePUSHM = TRUE;
2798 + if (((saved_reg_mask & (1 << 0)) ||
2799 + (saved_reg_mask & (1 << 1)) ||
2800 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
2802 + /* One of R0-R3 should at least be pushed */
2803 + if (((saved_reg_mask & (1 << 0)) &&
2804 + (saved_reg_mask & (1 << 1)) &&
2805 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
2807 + /* All should be pushed */
2816 + if (((saved_reg_mask & (1 << 4)) ||
2817 + (saved_reg_mask & (1 << 5)) ||
2818 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
2820 + /* One of R4-R7 should at least be pushed */
2821 + if (((saved_reg_mask & (1 << 4)) &&
2822 + (saved_reg_mask & (1 << 5)) &&
2823 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
2826 + /* All should be pushed */
2835 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
2837 + /* One of R8-R9 should at least be pushed */
2838 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
2841 + /* All should be pushed */
2850 + if (saved_reg_mask & (1 << 10))
2853 + if (saved_reg_mask & (1 << 11))
2856 + if (saved_reg_mask & (1 << 12))
2859 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
2867 + insn = emit_multi_reg_push (reglist8, TRUE);
2871 + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
2873 + RTX_FRAME_RELATED_P (insn) = 1;
2875 + /* Prevent this instruction from being scheduled after any other
2877 + emit_insn (gen_blockage ());
2880 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
2881 + if (saved_fp_reg_mask)
2883 + insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
2884 + RTX_FRAME_RELATED_P (insn) = 1;
2886 + /* Prevent this instruction from being scheduled after any other
2888 + emit_insn (gen_blockage ());
2891 + /* Set frame pointer */
2892 + if (frame_pointer_needed)
2894 + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
2895 + RTX_FRAME_RELATED_P (insn) = 1;
2898 + if (get_frame_size () > 0)
2900 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
2902 + insn = emit_insn (gen_rtx_SET (SImode,
2903 + stack_pointer_rtx,
2904 + gen_rtx_PLUS (SImode,
2905 + stack_pointer_rtx,
2910 + RTX_FRAME_RELATED_P (insn) = 1;
2914 + /* Immediate is larger than k21 We must either check if we can use
2915 + one of the pushed reegisters as temporary storage or we must
2916 + make us a temp register by pushing a register to the stack. */
2917 + rtx temp_reg, const_pool_entry, insn;
2918 + if (saved_reg_mask)
2921 + gen_rtx_REG (SImode,
2922 + INTERNAL_REGNUM (avr32_get_saved_reg
2923 + (saved_reg_mask)));
2927 + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
2928 + emit_move_insn (gen_rtx_MEM
2930 + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
2934 + const_pool_entry =
2935 + force_const_mem (SImode,
2936 + gen_rtx_CONST_INT (SImode, get_frame_size ()));
2937 + emit_move_insn (temp_reg, const_pool_entry);
2939 + insn = emit_insn (gen_rtx_SET (SImode,
2940 + stack_pointer_rtx,
2941 + gen_rtx_MINUS (SImode,
2942 + stack_pointer_rtx,
2945 + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2946 + gen_rtx_PLUS (SImode, stack_pointer_rtx,
2947 + GEN_INT (-get_frame_size ())));
2948 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2949 + dwarf, REG_NOTES (insn));
2950 + RTX_FRAME_RELATED_P (insn) = 1;
2952 + if (!saved_reg_mask)
2955 + emit_move_insn (temp_reg,
2956 + gen_rtx_MEM (SImode,
2957 + gen_rtx_POST_INC (SImode,
2963 + /* Mark the temp register as dead */
2964 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
2965 + REG_NOTES (insn));
2970 + /* Prevent the the stack adjustment to be scheduled after any
2971 + instructions using the frame pointer. */
2972 + emit_insn (gen_blockage ());
2978 + avr32_load_pic_register ();
2980 + /* gcc does not know that load or call instructions might use the pic
2981 + register so it might schedule these instructions before the loading
2982 + of the pic register. To avoid this emit a barrier for now. TODO!
2983 + Find out a better way to let gcc know which instructions might use
2984 + the pic register. */
2985 + emit_insn (gen_blockage ());
2991 +avr32_set_return_address (rtx source, rtx scratch)
2994 + unsigned long saved_regs;
2996 + saved_regs = avr32_compute_save_reg_mask (TRUE);
2998 + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2999 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
3002 + if (frame_pointer_needed)
3003 + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
3005 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
3007 + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
3011 + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
3014 + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
3020 +/* Return the length of INSN. LENGTH is the initial length computed by
3021 + attributes in the machine-description file. */
3024 +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
3025 + int length ATTRIBUTE_UNUSED)
3031 +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
3032 + int iscond ATTRIBUTE_UNUSED,
3033 + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
3036 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3037 + int insert_ret = TRUE;
3039 + int stack_adjustment = get_frame_size ();
3040 + unsigned int func_type = avr32_current_func_type ();
3041 + FILE *f = asm_out_file;
3043 + /* Naked functions does not have an epilogue */
3044 + if (IS_NAKED (func_type))
3047 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3049 + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
3051 + /* Reset frame pointer */
3052 + if (stack_adjustment > 0)
3054 + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
3056 + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
3057 + -stack_adjustment);
3061 + /* TODO! Is it safe to use r8 as scratch?? */
3062 + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
3063 + -stack_adjustment);
3064 + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
3065 + -stack_adjustment);
3066 + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
3070 + if (saved_fp_reg_mask)
3072 + char reglist[64]; /* 64 bytes should be enough... */
3073 + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
3074 + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
3075 + if (saved_fp_reg_mask & ~0xff)
3077 + saved_fp_reg_mask &= ~0xff;
3078 + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
3079 + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
3083 + if (saved_reg_mask)
3085 + /* Must pop used registers */
3087 + /* Should we use POPM or LDM? */
3088 + int usePOPM = TRUE;
3089 + if (((saved_reg_mask & (1 << 0)) ||
3090 + (saved_reg_mask & (1 << 1)) ||
3091 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
3093 + /* One of R0-R3 should at least be popped */
3094 + if (((saved_reg_mask & (1 << 0)) &&
3095 + (saved_reg_mask & (1 << 1)) &&
3096 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
3098 + /* All should be popped */
3107 + if (((saved_reg_mask & (1 << 4)) ||
3108 + (saved_reg_mask & (1 << 5)) ||
3109 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
3111 + /* One of R0-R3 should at least be popped */
3112 + if (((saved_reg_mask & (1 << 4)) &&
3113 + (saved_reg_mask & (1 << 5)) &&
3114 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
3117 + /* All should be popped */
3126 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
3128 + /* One of R8-R9 should at least be pushed */
3129 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
3132 + /* All should be pushed */
3141 + if (saved_reg_mask & (1 << 10))
3144 + if (saved_reg_mask & (1 << 11))
3147 + if (saved_reg_mask & (1 << 12))
3150 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
3154 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3155 + /* Pop LR into PC. */
3160 + char reglist[64]; /* 64 bytes should be enough... */
3161 + avr32_make_reglist8 (reglist8, (char *) reglist);
3163 + if (reglist8 & 0x80)
3164 + /* This instruction is also a return */
3165 + insert_ret = FALSE;
3167 + if (r12_imm && !insert_ret)
3168 + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
3170 + fprintf (f, "\tpopm\t%s\n", reglist);
3175 + char reglist[64]; /* 64 bytes should be enough... */
3176 + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
3177 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3178 + /* This instruction is also a return */
3179 + insert_ret = FALSE;
3181 + if (r12_imm && !insert_ret)
3182 + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
3183 + INTVAL (r12_imm));
3185 + fprintf (f, "\tldm\tsp++, %s\n", reglist);
3191 + /* Stack adjustment for exception handler. */
3192 + if (current_function_calls_eh_return)
3193 + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
3196 + if (IS_INTERRUPT (func_type))
3198 + fprintf (f, "\trete\n");
3200 + else if (insert_ret)
3203 + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
3205 + fprintf (f, "\tretal\tr12\n");
3209 +/* Function for converting a fp-register mask to a
3210 + reglistCPD8 register list string. */
3212 +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
3216 + /* Make sure reglist_string is empty */
3217 + reglist_string[0] = '\0';
3219 + for (i = 0; i < NUM_FP_REGS; i += 2)
3221 + if (reglist_mask & (1 << i))
3223 + strlen (reglist_string) ?
3224 + sprintf (reglist_string, "%s, %s-%s", reglist_string,
3225 + reg_names[INTERNAL_FP_REGNUM (i)],
3226 + reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
3227 + sprintf (reglist_string, "%s-%s",
3228 + reg_names[INTERNAL_FP_REGNUM (i)],
3229 + reg_names[INTERNAL_FP_REGNUM (i + 1)]);
3234 +/* Function for converting a fp-register mask to a
3235 + reglistCP8 register list string. */
3237 +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
3241 + /* Make sure reglist_string is empty */
3242 + reglist_string[0] = '\0';
3244 + for (i = 0; i < NUM_FP_REGS; ++i)
3246 + if (reglist_mask & (1 << i))
3248 + strlen (reglist_string) ?
3249 + sprintf (reglist_string, "%s, %s", reglist_string,
3250 + reg_names[INTERNAL_FP_REGNUM (i)]) :
3251 + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
3257 +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
3261 + /* Make sure reglist16_string is empty */
3262 + reglist16_string[0] = '\0';
3264 + for (i = 0; i < 16; ++i)
3266 + if (reglist16_vect & (1 << i))
3268 + strlen (reglist16_string) ?
3269 + sprintf (reglist16_string, "%s, %s", reglist16_string,
3270 + reg_names[INTERNAL_REGNUM (i)]) :
3271 + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
3277 +avr32_convert_to_reglist16 (int reglist8_vect)
3279 + int reglist16_vect = 0;
3280 + if (reglist8_vect & 0x1)
3281 + reglist16_vect |= 0xF;
3282 + if (reglist8_vect & 0x2)
3283 + reglist16_vect |= 0xF0;
3284 + if (reglist8_vect & 0x4)
3285 + reglist16_vect |= 0x300;
3286 + if (reglist8_vect & 0x8)
3287 + reglist16_vect |= 0x400;
3288 + if (reglist8_vect & 0x10)
3289 + reglist16_vect |= 0x800;
3290 + if (reglist8_vect & 0x20)
3291 + reglist16_vect |= 0x1000;
3292 + if (reglist8_vect & 0x40)
3293 + reglist16_vect |= 0x4000;
3294 + if (reglist8_vect & 0x80)
3295 + reglist16_vect |= 0x8000;
3297 + return reglist16_vect;
3301 +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
3303 + /* Make sure reglist8_string is empty */
3304 + reglist8_string[0] = '\0';
3306 + if (reglist8_vect & 0x1)
3307 + sprintf (reglist8_string, "r0-r3");
3308 + if (reglist8_vect & 0x2)
3309 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
3310 + reglist8_string) :
3311 + sprintf (reglist8_string, "r4-r7");
3312 + if (reglist8_vect & 0x4)
3313 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
3314 + reglist8_string) :
3315 + sprintf (reglist8_string, "r8-r9");
3316 + if (reglist8_vect & 0x8)
3317 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
3318 + reglist8_string) :
3319 + sprintf (reglist8_string, "r10");
3320 + if (reglist8_vect & 0x10)
3321 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
3322 + reglist8_string) :
3323 + sprintf (reglist8_string, "r11");
3324 + if (reglist8_vect & 0x20)
3325 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
3326 + reglist8_string) :
3327 + sprintf (reglist8_string, "r12");
3328 + if (reglist8_vect & 0x40)
3329 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
3330 + reglist8_string) :
3331 + sprintf (reglist8_string, "lr");
3332 + if (reglist8_vect & 0x80)
3333 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
3334 + reglist8_string) :
3335 + sprintf (reglist8_string, "pc");
3339 +avr32_eh_return_data_regno (int n)
3341 + if (n >= 0 && n <= 3)
3344 + return INVALID_REGNUM;
3347 +/* Compute the distance from register FROM to register TO.
3348 + These can be the arg pointer, the frame pointer or
3349 + the stack pointer.
3350 + Typical stack layout looks like this:
3352 + old stack pointer -> | |
3355 + | | saved arguments for
3356 + | | vararg functions
3357 + arg_pointer -> | | /
3367 + stack ptr --> | | /
3375 + For a given funciton some or all of these stack compomnents
3376 + may not be needed, giving rise to the possibility of
3377 + eliminating some of the registers.
3379 + The values returned by this function must reflect the behaviour
3380 + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
3382 + The sign of the number returned reflects the direction of stack
3383 + growth, so the values are positive for all eliminations except
3384 + from the soft frame pointer to the hard frame pointer. */
3388 +avr32_initial_elimination_offset (int from, int to)
3391 + int call_saved_regs = 0;
3392 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3393 + unsigned int local_vars = get_frame_size ();
3395 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3396 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3398 + for (i = 0; i < 16; ++i)
3400 + if (saved_reg_mask & (1 << i))
3401 + call_saved_regs += 4;
3404 + for (i = 0; i < NUM_FP_REGS; ++i)
3406 + if (saved_fp_reg_mask & (1 << i))
3407 + call_saved_regs += 4;
3412 + case ARG_POINTER_REGNUM:
3415 + case STACK_POINTER_REGNUM:
3416 + return call_saved_regs + local_vars;
3417 + case FRAME_POINTER_REGNUM:
3418 + return call_saved_regs;
3422 + case FRAME_POINTER_REGNUM:
3425 + case STACK_POINTER_REGNUM:
3426 + return local_vars;
3437 + Returns a rtx used when passing the next argument to a function.
3438 + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
3442 +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3443 + tree type, int named)
3447 + HOST_WIDE_INT arg_size, arg_rsize;
3450 + arg_size = int_size_in_bytes (type);
3454 + arg_size = GET_MODE_SIZE (mode);
3456 + arg_rsize = PUSH_ROUNDING (arg_size);
3459 + The last time this macro is called, it is called with mode == VOIDmode,
3460 + and its result is passed to the call or call_value pattern as operands 2
3461 + and 3 respectively. */
3462 + if (mode == VOIDmode)
3464 + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
3467 + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
3472 + if (arg_rsize == 8)
3474 + /* use r11:r10 or r9:r8. */
3475 + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
3477 + else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
3482 + else if (arg_rsize == 4)
3483 + { /* Use first available register */
3485 + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
3487 + if (index > LAST_CUM_REG_INDEX)
3491 + SET_REG_INDEX (cum, index);
3493 + if (GET_REG_INDEX (cum) >= 0)
3494 + return gen_rtx_REG (mode,
3495 + avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
3501 + Set the register used for passing the first argument to a function.
3504 +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
3505 + tree fntype ATTRIBUTE_UNUSED,
3506 + rtx libname ATTRIBUTE_UNUSED,
3507 + tree fndecl ATTRIBUTE_UNUSED)
3509 + /* Set all registers as unused. */
3510 + SET_INDEXES_UNUSED (cum);
3512 + /* Reset uses_anonymous_args */
3513 + cum->uses_anonymous_args = 0;
3515 + /* Reset size of stack pushed arguments */
3516 + cum->stack_pushed_args_size = 0;
3520 + Set register used for passing the next argument to a function. Only the
3521 + Scratch Registers are used.
3526 + 13 r13 _SP_________
3527 + FIRST_CUM_REG 12 r12 _||_
3529 + 11 r10 _||_ Scratch Registers
3531 + LAST_SCRATCH_REG 9 r8 _\/_________
3543 +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3544 + tree type, int named ATTRIBUTE_UNUSED)
3546 + HOST_WIDE_INT arg_size, arg_rsize;
3550 + arg_size = int_size_in_bytes (type);
3554 + arg_size = GET_MODE_SIZE (mode);
3556 + arg_rsize = PUSH_ROUNDING (arg_size);
3558 + /* It the argument had to be passed in stack, no register is used. */
3559 + if ((*targetm.calls.must_pass_in_stack) (mode, type))
3561 + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
3565 + /* Mark the used registers as "used". */
3566 + if (GET_REG_INDEX (cum) >= 0)
3568 + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
3569 + if (arg_rsize == 8)
3571 + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
3576 + /* Had to use stack */
3577 + cum->stack_pushed_args_size += arg_rsize;
3582 + Defines witch direction to go to find the next register to use if the
3583 + argument is larger then one register or for arguments shorter than an
3584 + int which is not promoted, such as the last part of structures with
3585 + size not a multiple of 4. */
3587 +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
3590 + /* Pad upward for all aggregates except byte and halfword sized aggregates
3591 + which can be passed in registers. */
3593 + && AGGREGATE_TYPE_P (type)
3594 + && (int_size_in_bytes (type) != 1)
3595 + && !((int_size_in_bytes (type) == 2)
3596 + && TYPE_ALIGN_UNIT (type) >= 2)
3597 + && (int_size_in_bytes (type) & 0x3))
3606 + Return a rtx used for the return value from a function call.
3609 +avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
3611 + if (avr32_return_in_memory (type, func))
3614 + if (int_size_in_bytes (type) <= 4)
3616 + enum machine_mode mode = TYPE_MODE (type);
3617 + int unsignedp = 0;
3618 + PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
3619 + return gen_rtx_REG (mode, RET_REGISTER);
3621 + else if (int_size_in_bytes (type) <= 8)
3622 + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
3628 + Return a rtx used for the return value from a library function call.
3631 +avr32_libcall_value (enum machine_mode mode)
3634 + if (GET_MODE_SIZE (mode) <= 4)
3635 + return gen_rtx_REG (mode, RET_REGISTER);
3636 + else if (GET_MODE_SIZE (mode) <= 8)
3637 + return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
3642 +/* Return TRUE if X references a SYMBOL_REF. */
3644 +symbol_mentioned_p (rtx x)
3649 + if (GET_CODE (x) == SYMBOL_REF)
3652 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3654 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3656 + if (fmt[i] == 'E')
3660 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3661 + if (symbol_mentioned_p (XVECEXP (x, i, j)))
3664 + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3671 +/* Return TRUE if X references a LABEL_REF. */
3673 +label_mentioned_p (rtx x)
3678 + if (GET_CODE (x) == LABEL_REF)
3681 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3682 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3684 + if (fmt[i] == 'E')
3688 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3689 + if (label_mentioned_p (XVECEXP (x, i, j)))
3692 + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3699 +/* Return TRUE if X contains a MEM expression. */
3701 +mem_mentioned_p (rtx x)
3709 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3710 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3712 + if (fmt[i] == 'E')
3716 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3717 + if (mem_mentioned_p (XVECEXP (x, i, j)))
3720 + else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
3728 +avr32_legitimate_pic_operand_p (rtx x)
3731 + /* We can't have const, this must be broken down to a symbol. */
3732 + if (GET_CODE (x) == CONST)
3735 + /* Can't access symbols or labels via the constant pool either */
3736 + if ((GET_CODE (x) == SYMBOL_REF
3737 + && CONSTANT_POOL_ADDRESS_P (x)
3738 + && (symbol_mentioned_p (get_pool_constant (x))
3739 + || label_mentioned_p (get_pool_constant (x)))))
3747 +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3751 + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
3757 + if (!can_create_pseudo_p ())
3760 + reg = gen_reg_rtx (Pmode);
3765 + emit_move_insn (reg, orig);
3767 + /* Only set current function as using pic offset table if flag_pic is
3768 + set. This is because this function is also used if
3769 + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
3771 + current_function_uses_pic_offset_table = 1;
3773 + /* Put a REG_EQUAL note on this insn, so that it can be optimized by
3777 + else if (GET_CODE (orig) == CONST)
3782 + && GET_CODE (XEXP (orig, 0)) == PLUS
3783 + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3788 + if (!can_create_pseudo_p ())
3791 + reg = gen_reg_rtx (Pmode);
3794 + if (GET_CODE (XEXP (orig, 0)) == PLUS)
3797 + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3799 + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3800 + base == reg ? 0 : reg);
3805 + if (GET_CODE (offset) == CONST_INT)
3807 + /* The base register doesn't really matter, we only want to test
3808 + the index for the appropriate mode. */
3809 + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
3811 + if (can_create_pseudo_p ())
3812 + offset = force_reg (Pmode, offset);
3817 + if (GET_CODE (offset) == CONST_INT)
3818 + return plus_constant (base, INTVAL (offset));
3821 + return gen_rtx_PLUS (Pmode, base, offset);
3827 +/* Generate code to load the PIC register. */
3829 +avr32_load_pic_register (void)
3832 + rtx global_offset_table;
3834 + if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
3840 + l1 = gen_label_rtx ();
3842 + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3844 + gen_rtx_CONST (Pmode,
3845 + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
3846 + global_offset_table));
3847 + emit_insn (gen_pic_load_addr
3848 + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
3849 + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
3851 + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
3852 + can cause life info to screw up. */
3853 + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3858 +/* This hook should return true if values of type type are returned at the most
3859 + significant end of a register (in other words, if they are padded at the
3860 + least significant end). You can assume that type is returned in a register;
3861 + the caller is required to check this. Note that the register provided by
3862 + FUNCTION_VALUE must be able to hold the complete return value. For example,
3863 + if a 1-, 2- or 3-byte structure is returned at the most significant end of a
3864 + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
3866 +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
3868 + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
3869 + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
3870 + false; else return true; */
3877 + Returns one if a certain function value is going to be returned in memory
3878 + and zero if it is going to be returned in a register.
3880 + BLKmode and all other modes that is larger than 64 bits are returned in
3884 +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
3886 + if (TYPE_MODE (type) == VOIDmode)
3889 + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
3890 + || int_size_in_bytes (type) == -1)
3895 + /* If we have an aggregate then use the same mechanism as when checking if
3896 + it should be passed on the stack. */
3898 + && AGGREGATE_TYPE_P (type)
3899 + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
3906 +/* Output the constant part of the trampoline.
3907 + lddpc r0, pc[0x8:e] ; load static chain register
3908 + lddpc pc, pc[0x8:e] ; jump to subrutine
3909 + .long 0 ; Address to static chain,
3910 + ; filled in by avr32_initialize_trampoline()
3911 + .long 0 ; Address to subrutine,
3912 + ; filled in by avr32_initialize_trampoline()
3915 +avr32_trampoline_template (FILE * file)
3917 + fprintf (file, "\tlddpc r0, pc[8]\n");
3918 + fprintf (file, "\tlddpc pc, pc[8]\n");
3919 + /* make room for the address of the static chain. */
3920 + fprintf (file, "\t.long\t0\n");
3921 + /* make room for the address to the subrutine. */
3922 + fprintf (file, "\t.long\t0\n");
3927 + Initialize the variable parts of a trampoline.
3930 +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3932 + /* Store the address to the static chain. */
3933 + emit_move_insn (gen_rtx_MEM
3934 + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
3937 + /* Store the address to the function. */
3938 + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
3941 + emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
3942 + gen_rtx_CONST_INT (SImode,
3943 + AVR32_CACHE_INVALIDATE_ICACHE)));
3946 +/* Return nonzero if X is valid as an addressing register. */
3948 +avr32_address_register_rtx_p (rtx x, int strict_p)
3952 + if (!register_operand(x, GET_MODE(x)))
3955 + /* If strict we require the register to be a hard register. */
3960 + regno = REGNO (x);
3963 + return REGNO_OK_FOR_BASE_P (regno);
3965 + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
3968 +/* Return nonzero if INDEX is valid for an address index operand. */
3970 +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
3972 + enum rtx_code code = GET_CODE (index);
3974 + if (GET_MODE_SIZE (mode) > 8)
3977 + /* Standard coprocessor addressing modes. */
3978 + if (code == CONST_INT)
3980 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
3981 + /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
3982 + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
3984 + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
3987 + if (avr32_address_register_rtx_p (index, strict_p))
3992 + rtx xiop0 = XEXP (index, 0);
3993 + rtx xiop1 = XEXP (index, 1);
3994 + return ((avr32_address_register_rtx_p (xiop0, strict_p)
3995 + && power_of_two_operand (xiop1, SImode)
3996 + && (INTVAL (xiop1) <= 8))
3997 + || (avr32_address_register_rtx_p (xiop1, strict_p)
3998 + && power_of_two_operand (xiop0, SImode)
3999 + && (INTVAL (xiop0) <= 8)));
4001 + else if (code == ASHIFT)
4003 + rtx op = XEXP (index, 1);
4005 + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
4006 + && GET_CODE (op) == CONST_INT
4007 + && INTVAL (op) > 0 && INTVAL (op) <= 3);
4014 + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
4015 + the RTX x is a legitimate memory address.
4017 + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
4021 +/* Forward declaration*/
4022 +int is_minipool_label (rtx label);
4025 +avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
4028 + switch (GET_CODE (x))
4031 + return avr32_address_register_rtx_p (x, strict);
4034 + rtx label = avr32_find_symbol (x);
4038 + If we enable (const (plus (symbol_ref ...))) type constant
4039 + pool entries we must add support for it in the predicates and
4040 + in the minipool generation in avr32_reorg().
4041 + (CONSTANT_POOL_ADDRESS_P (label)
4043 + && (symbol_mentioned_p (get_pool_constant (label))
4044 + || label_mentioned_p (get_pool_constant (label)))))
4046 + ((GET_CODE (label) == LABEL_REF)
4047 + && GET_CODE (XEXP (label, 0)) == CODE_LABEL
4048 + && is_minipool_label (XEXP (label, 0)))))
4055 + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
4056 + && is_minipool_label (XEXP (x, 0)))
4063 + if (CONSTANT_POOL_ADDRESS_P (x)
4065 + && (symbol_mentioned_p (get_pool_constant (x))
4066 + || label_mentioned_p (get_pool_constant (x)))))
4069 + A symbol_ref is only legal if it is a function. If all of them are
4070 + legal, a pseudo reg that is a constant will be replaced by a
4071 + symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by
4072 + ENCODE_SECTION_INFO. */
4073 + else if (SYMBOL_REF_RCALL_FUNCTION_P (x))
4077 + case PRE_DEC: /* (pre_dec (...)) */
4078 + case POST_INC: /* (post_inc (...)) */
4079 + return avr32_address_register_rtx_p (XEXP (x, 0), strict);
4080 + case PLUS: /* (plus (...) (...)) */
4082 + rtx xop0 = XEXP (x, 0);
4083 + rtx xop1 = XEXP (x, 1);
4085 + return ((avr32_address_register_rtx_p (xop0, strict)
4086 + && avr32_legitimate_index_p (mode, xop1, strict))
4087 + || (avr32_address_register_rtx_p (xop1, strict)
4088 + && avr32_legitimate_index_p (mode, xop0, strict)));
4099 +avr32_const_ok_for_move (HOST_WIDE_INT c)
4101 + if ( TARGET_V2_INSNS )
4102 + return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
4103 + /* movh instruction */
4104 + || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
4106 + return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
4110 +avr32_const_double_immediate (rtx value)
4112 + HOST_WIDE_INT hi, lo;
4114 + if (GET_CODE (value) != CONST_DOUBLE)
4117 + if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
4119 + HOST_WIDE_INT target_float[2];
4121 + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
4122 + GET_MODE (value));
4123 + lo = target_float[0];
4124 + hi = target_float[1];
4128 + hi = CONST_DOUBLE_HIGH (value);
4129 + lo = CONST_DOUBLE_LOW (value);
4132 + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
4133 + && (GET_MODE (value) == SFmode
4134 + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
4144 +avr32_legitimate_constant_p (rtx x)
4146 + switch (GET_CODE (x))
4149 + /* Check if we should put large immediate into constant pool
4150 + or load them directly with mov/orh.*/
4151 + if (!avr32_imm_in_const_pool)
4154 + return avr32_const_ok_for_move (INTVAL (x));
4155 + case CONST_DOUBLE:
4156 + /* Check if we should put large immediate into constant pool
4157 + or load them directly with mov/orh.*/
4158 + if (!avr32_imm_in_const_pool)
4161 + if (GET_MODE (x) == SFmode
4162 + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
4163 + return avr32_const_double_immediate (x);
4167 + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
4169 + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
4172 + case CONST_VECTOR:
4175 + printf ("%s():\n", __FUNCTION__);
4182 +/* Strip any special encoding from labels */
4184 +avr32_strip_name_encoding (const char *name)
4186 + const char *stripped = name;
4190 + switch (stripped[0])
4193 + stripped = strchr (name + 1, '#') + 1;
4196 + stripped = &stripped[1];
4206 +/* Do anything needed before RTL is emitted for each function. */
4207 +static struct machine_function *
4208 +avr32_init_machine_status (void)
4210 + struct machine_function *machine;
4212 + (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
4214 +#if AVR32_FT_UNKNOWN != 0
4215 + machine->func_type = AVR32_FT_UNKNOWN;
4218 + machine->minipool_label_head = 0;
4219 + machine->minipool_label_tail = 0;
4220 + machine->ifcvt_after_reload = 0;
4225 +avr32_init_expanders (void)
4227 + /* Arrange to initialize and mark the machine per-function status. */
4228 + init_machine_status = avr32_init_machine_status;
4232 +/* Return an RTX indicating where the return address to the
4233 + calling function can be found. */
4236 +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4241 + return get_hard_reg_initial_val (Pmode, LR_REGNUM);
4246 +avr32_encode_section_info (tree decl, rtx rtl, int first)
4249 + if (first && DECL_P (decl))
4251 + /* Set SYMBOL_REG_FLAG for local functions */
4252 + if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL)
4254 + if ((*targetm.binds_local_p) (decl))
4256 + SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
4263 +avr32_asm_output_label (FILE * stream, const char *name)
4265 + name = avr32_strip_name_encoding (name);
4267 + /* Print the label. */
4268 + assemble_name (stream, name);
4269 + fprintf (stream, ":\n");
4275 +avr32_asm_weaken_label (FILE * stream, const char *name)
4277 + fprintf (stream, "\t.weak ");
4278 + assemble_name (stream, name);
4279 + fprintf (stream, "\n");
4283 + Checks if a labelref is equal to a reserved word in the assembler. If it is,
4284 + insert a '_' before the label name.
4287 +avr32_asm_output_labelref (FILE * stream, const char *name)
4289 + int verbatim = FALSE;
4290 + const char *stripped = name;
4291 + int strip_finished = FALSE;
4293 + while (!strip_finished)
4295 + switch (stripped[0])
4298 + stripped = strchr (name + 1, '#') + 1;
4301 + stripped = &stripped[1];
4305 + strip_finished = TRUE;
4311 + fputs (stripped, stream);
4313 + asm_fprintf (stream, "%U%s", stripped);
4319 + Check if the comparison in compare_exp is redundant
4320 + for the condition given in next_cond given that the
4321 + needed flags are already set by an earlier instruction.
4322 + Uses cc_prev_status to check this.
4324 + Returns NULL_RTX if the compare is not redundant
4325 + or the new condition to use in the conditional
4326 + instruction if the compare is redundant.
4329 +is_compare_redundant (rtx compare_exp, rtx next_cond)
4331 + int z_flag_valid = FALSE;
4332 + int n_flag_valid = FALSE;
4335 + if (GET_CODE (compare_exp) != COMPARE
4336 + && GET_CODE (compare_exp) != AND)
4340 + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
4342 + /* cc0 already contains the correct comparison -> delete cmp insn */
4346 + if (GET_MODE (compare_exp) != SImode)
4349 + switch (cc_prev_status.mdep.flags)
4353 + n_flag_valid = TRUE;
4356 + z_flag_valid = TRUE;
4359 + if (cc_prev_status.mdep.value
4360 + && GET_CODE (compare_exp) == COMPARE
4361 + && REG_P (XEXP (compare_exp, 0))
4362 + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
4363 + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
4364 + && next_cond != NULL_RTX)
4366 + if (INTVAL (XEXP (compare_exp, 1)) == 0
4368 + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
4369 + /* We can skip comparison Z flag is already reflecting ops[0] */
4371 + else if (n_flag_valid
4372 + && ((INTVAL (XEXP (compare_exp, 1)) == 0
4373 + && (GET_CODE (next_cond) == GE
4374 + || GET_CODE (next_cond) == LT))
4375 + || (INTVAL (XEXP (compare_exp, 1)) == -1
4376 + && (GET_CODE (next_cond) == GT
4377 + || GET_CODE (next_cond) == LE))))
4379 + /* We can skip comparison N flag is already reflecting ops[0],
4380 + which means that we can use the mi/pl conditions to check if
4381 + ops[0] is GE or LT 0. */
4382 + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
4384 + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
4388 + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
4396 +/* Updates cc_status. */
4398 +avr32_notice_update_cc (rtx exp, rtx insn)
4400 + enum attr_cc attr_cc = get_attr_cc (insn);
4402 + if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
4404 + if (TARGET_V2_INSNS)
4405 + attr_cc = CC_NONE;
4407 + attr_cc = CC_SET_Z;
4415 + /* Check if the function call returns a value in r12 */
4416 + if (REG_P (recog_data.operand[0])
4417 + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
4419 + cc_status.flags = 0;
4420 + cc_status.mdep.value =
4421 + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
4422 + cc_status.mdep.flags = CC_SET_VNCZ;
4423 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4429 + /* Check that compare will not be optimized away if so nothing should
4431 + rtx compare_exp = SET_SRC (exp);
4432 + /* Check if we have a tst expression. If so convert it to a
4433 + compare with 0. */
4434 + if ( REG_P (SET_SRC (exp)) )
4435 + compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
4439 + if (!next_insn_emits_cmp (insn)
4440 + && (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) == NULL_RTX))
4443 + /* Reset the nonstandard flag */
4445 + cc_status.flags = 0;
4446 + cc_status.mdep.value = compare_exp;
4447 + cc_status.mdep.flags = CC_SET_VNCZ;
4448 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4452 + case CC_CMP_COND_INSN:
4454 + /* Conditional insn that emit the compare itself. */
4456 + rtx cmp_op0, cmp_op1;
4459 + rtx next_insn = next_nonnote_insn (insn);
4461 + if ( GET_CODE (exp) == COND_EXEC )
4463 + cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
4464 + cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
4465 + cond = COND_EXEC_TEST (exp);
4466 + dest = SET_DEST (COND_EXEC_CODE (exp));
4470 + /* If then else conditional. compare operands are in operands
4472 + cmp_op0 = recog_data.operand[4];
4473 + cmp_op1 = recog_data.operand[5];
4474 + cond = recog_data.operand[1];
4475 + dest = SET_DEST (exp);
4478 + if ( GET_CODE (cmp_op0) == AND )
4481 + cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
4485 + /* Check if the conditional insns updates a register present
4486 + in the comparison, if so then we must reset the cc_status. */
4488 + && (reg_mentioned_p (dest, cmp_op0)
4489 + || reg_mentioned_p (dest, cmp_op1))
4490 + && GET_CODE (exp) != COND_EXEC )
4494 + else if (is_compare_redundant (cmp, cond) == NULL_RTX)
4496 + /* Reset the nonstandard flag */
4498 + if ( GET_CODE (cmp_op0) == AND )
4500 + cc_status.flags = CC_INVERTED;
4501 + cc_status.mdep.flags = CC_SET_Z;
4505 + cc_status.flags = 0;
4506 + cc_status.mdep.flags = CC_SET_VNCZ;
4508 + cc_status.mdep.value = cmp;
4509 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4513 + /* Check if we have a COND_EXEC insn which updates one
4514 + of the registers in the compare status. */
4516 + && (reg_mentioned_p (dest, cmp_op0)
4517 + || reg_mentioned_p (dest, cmp_op1))
4518 + && GET_CODE (exp) == COND_EXEC )
4519 + cc_status.mdep.cond_exec_cmp_clobbered = 1;
4521 + if ( cc_status.mdep.cond_exec_cmp_clobbered
4522 + && GET_CODE (exp) == COND_EXEC
4523 + && next_insn != NULL
4524 + && INSN_P (next_insn)
4525 + && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
4526 + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
4527 + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
4528 + && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
4529 + || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
4531 + /* We have a sequence of conditional insns where the compare status has been clobbered
4532 + since the compare no longer reflects the content of the values to compare. */
4534 + cc_status.mdep.cond_exec_cmp_clobbered = 1;
4539 + case CC_FPCOMPARE:
4540 + /* Check that floating-point compare will not be optimized away if so
4541 + nothing should be done */
4542 + if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
4544 + /* cc0 already contains the correct comparison -> delete cmp insn */
4545 + /* Reset the nonstandard flag */
4546 + cc_status.mdep.fpvalue = SET_SRC (exp);
4547 + cc_status.mdep.fpflags = CC_SET_CZ;
4550 + case CC_FROM_FPCC:
4551 + /* Flags are updated with flags from Floating-point coprocessor, set
4552 + CC_NOT_SIGNED flag since the flags are set so that unsigned
4553 + condidion codes can be used directly. */
4555 + cc_status.flags = CC_NOT_SIGNED;
4556 + cc_status.mdep.value = cc_status.mdep.fpvalue;
4557 + cc_status.mdep.flags = cc_status.mdep.fpflags;
4560 + /* Bit load is kind of like an inverted testsi, because the Z flag is
4563 + cc_status.flags = CC_INVERTED;
4564 + cc_status.mdep.value = SET_SRC (exp);
4565 + cc_status.mdep.flags = CC_SET_Z;
4566 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4569 + /* Insn does not affect CC at all. Check if the instruction updates
4570 + some of the register currently reflected in cc0 */
4572 + if ((GET_CODE (exp) == SET)
4573 + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
4574 + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
4575 + || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
4576 + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
4581 + /* If this is a parallel we must step through each of the parallel
4583 + if (GET_CODE (exp) == PARALLEL)
4586 + for (i = 0; i < XVECLEN (exp, 0); ++i)
4588 + rtx vec_exp = XVECEXP (exp, 0, i);
4589 + if ((GET_CODE (vec_exp) == SET)
4590 + && (cc_status.value1 || cc_status.value2
4591 + || cc_status.mdep.value)
4592 + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
4593 + || reg_mentioned_p (SET_DEST (vec_exp),
4595 + || reg_mentioned_p (SET_DEST (vec_exp),
4596 + cc_status.mdep.value)))
4603 + /* Check if we have memory opartions with post_inc or pre_dec on the
4604 + register currently reflected in cc0 */
4605 + if (GET_CODE (exp) == SET
4606 + && GET_CODE (SET_SRC (exp)) == MEM
4607 + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
4608 + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
4611 + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
4612 + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
4614 + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
4615 + cc_status.mdep.value)))
4618 + if (GET_CODE (exp) == SET
4619 + && GET_CODE (SET_DEST (exp)) == MEM
4620 + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
4621 + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
4624 + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
4625 + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
4627 + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
4628 + cc_status.mdep.value)))
4634 + cc_status.mdep.value = recog_data.operand[0];
4635 + cc_status.mdep.flags = CC_SET_VNCZ;
4636 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4641 + cc_status.mdep.value = recog_data.operand[0];
4642 + cc_status.mdep.flags = CC_SET_NCZ;
4643 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4648 + cc_status.mdep.value = recog_data.operand[0];
4649 + cc_status.mdep.flags = CC_SET_CZ;
4650 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4655 + cc_status.mdep.value = recog_data.operand[0];
4656 + cc_status.mdep.flags = CC_SET_Z;
4657 + cc_status.mdep.cond_exec_cmp_clobbered = 0;
4671 + Outputs to stdio stream stream the assembler syntax for an instruction
4672 + operand x. x is an RTL expression.
4675 +avr32_print_operand (FILE * stream, rtx x, int code)
4679 + if ( code == '?' )
4681 + /* Predicable instruction, print condition code */
4683 + /* If the insn should not be conditional then do nothing. */
4684 + if ( current_insn_predicate == NULL_RTX )
4687 + /* Set x to the predicate to force printing
4688 + the condition later on. */
4689 + x = current_insn_predicate;
4691 + /* Reverse condition if useing bld insn. */
4692 + if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
4693 + x = reversed_condition (current_insn_predicate);
4695 + else if ( code == '!' )
4697 + /* Output compare for conditional insn if needed. */
4699 + gcc_assert ( current_insn_predicate != NULL_RTX );
4700 + new_cond = avr32_output_cmp(current_insn_predicate,
4701 + GET_MODE(XEXP(current_insn_predicate,0)),
4702 + XEXP(current_insn_predicate,0),
4703 + XEXP(current_insn_predicate,1));
4705 + /* Check if the new condition is a special avr32 condition
4706 + specified using UNSPECs. If so we must handle it differently. */
4707 + if ( GET_CODE (new_cond) == UNSPEC )
4709 + current_insn_predicate =
4710 + gen_rtx_UNSPEC (CCmode,
4712 + XEXP(current_insn_predicate,0),
4713 + XEXP(current_insn_predicate,1)),
4714 + XINT (new_cond, 1));
4718 + PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
4723 + switch (GET_CODE (x))
4726 + switch (XINT (x, 1))
4728 + case UNSPEC_COND_PL:
4730 + fputs ("mi", stream);
4732 + fputs ("pl", stream);
4734 + case UNSPEC_COND_MI:
4736 + fputs ("pl", stream);
4738 + fputs ("mi", stream);
4746 + fputs ("ne", stream);
4748 + fputs ("eq", stream);
4752 + fputs ("eq", stream);
4754 + fputs ("ne", stream);
4758 + fputs ("le", stream);
4760 + fputs ("gt", stream);
4764 + fputs ("ls", stream);
4766 + fputs ("hi", stream);
4770 + fputs ("ge", stream);
4772 + fputs ("lt", stream);
4776 + fputs ("hs", stream);
4778 + fputs ("lo", stream);
4782 + fputs ("lt", stream);
4784 + fputs ("ge", stream);
4788 + fputs ("lo", stream);
4790 + fputs ("hs", stream);
4794 + fputs ("gt", stream);
4796 + fputs ("le", stream);
4800 + fputs ("hi", stream);
4802 + fputs ("ls", stream);
4806 + HOST_WIDE_INT value = INTVAL (x);
4811 + if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
4813 + /* A const_int can be used to represent DImode constants. */
4814 + value >>= BITS_PER_WORD;
4816 + /* We might get a const_int immediate for setting a DI register,
4817 + we then must then return the correct sign extended DI. The most
4818 + significant word is just a sign extension. */
4819 + else if (value < 0)
4829 + /* Set to bit position of first bit set in immediate */
4830 + int i, bitpos = 32;
4831 + for (i = 0; i < 32; i++)
4832 + if (value & (1 << i))
4847 + sprintf (op, "r0-r3");
4849 + strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op,
4852 + strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op,
4855 + strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op,
4858 + strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op,
4861 + strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op,
4864 + strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
4866 + strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
4868 + fputs (op, stream);
4874 + char reglist16_string[100];
4876 + reglist16_string[0] = '\0';
4878 + for (i = 0; i < 16; ++i)
4880 + if (value & (1 << i))
4882 + strlen (reglist16_string) ? sprintf (reglist16_string,
4888 + sprintf (reglist16_string, "%s",
4889 + reg_names[INTERNAL_REGNUM (i)]);
4892 + fputs (reglist16_string, stream);
4898 + char reglist_string[100];
4899 + avr32_make_fp_reglist_w (value, (char *) reglist_string);
4900 + fputs (reglist_string, stream);
4906 + char reglist_string[100];
4907 + avr32_make_fp_reglist_d (value, (char *) reglist_string);
4908 + fputs (reglist_string, stream);
4912 + /* Print halfword part of word */
4913 + fputs (value ? "b" : "t", stream);
4918 + fprintf (stream, "%d", value);
4921 + case CONST_DOUBLE:
4923 + HOST_WIDE_INT hi, lo;
4924 + if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
4926 + HOST_WIDE_INT target_float[2];
4928 + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
4930 + /* For doubles the most significant part starts at index 0. */
4931 + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
4933 + hi = target_float[0];
4934 + lo = target_float[1];
4938 + lo = target_float[0];
4943 + hi = CONST_DOUBLE_HIGH (x);
4944 + lo = CONST_DOUBLE_LOW (x);
4948 + fprintf (stream, "%ld", hi);
4950 + fprintf (stream, "%ld", lo);
4955 + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
4956 + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
4959 + /* Swap register name if the register is DImode or DFmode. */
4960 + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
4962 + /* Double register must have an even numbered address */
4963 + gcc_assert (!(REGNO (x) % 2));
4965 + fputs (reg_names[true_regnum (x)], stream);
4967 + fputs (reg_names[true_regnum (x) + 1], stream);
4969 + else if (GET_MODE (x) == TImode)
4974 + fputs (reg_names[true_regnum (x)], stream);
4977 + fputs (reg_names[true_regnum (x) + 1], stream);
4980 + fputs (reg_names[true_regnum (x) + 2], stream);
4983 + fputs (reg_names[true_regnum (x) + 3], stream);
4986 + fprintf (stream, "%s, %s, %s, %s",
4987 + reg_names[true_regnum (x) + 3],
4988 + reg_names[true_regnum (x) + 2],
4989 + reg_names[true_regnum (x) + 1],
4990 + reg_names[true_regnum (x)]);
4996 + fputs (reg_names[true_regnum (x)], stream);
5002 + output_addr_const (stream, x);
5005 + switch (GET_CODE (XEXP (x, 0)))
5009 + output_addr_const (stream, XEXP (x, 0));
5012 + switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5015 + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
5023 + avr32_print_operand (stream, XEXP (x, 0), 0);
5025 + fputs ("[0]", stream);
5028 + fputs ("--", stream);
5029 + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
5032 + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
5033 + fputs ("++", stream);
5037 + rtx op0 = XEXP (XEXP (x, 0), 0);
5038 + rtx op1 = XEXP (XEXP (x, 0), 1);
5039 + rtx base = NULL_RTX, offset = NULL_RTX;
5041 + if (avr32_address_register_rtx_p (op0, 1))
5046 + else if (avr32_address_register_rtx_p (op1, 1))
5048 + /* Operands are switched. */
5053 + gcc_assert (base && offset
5054 + && avr32_address_register_rtx_p (base, 1)
5055 + && avr32_legitimate_index_p (GET_MODE (x), offset,
5058 + avr32_print_operand (stream, base, 0);
5059 + fputs ("[", stream);
5060 + avr32_print_operand (stream, offset, 0);
5061 + fputs ("]", stream);
5065 + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
5066 + fprintf (stream, " + %ld",
5067 + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
5075 + int value = INTVAL (XEXP (x, 1));
5077 + /* Convert immediate in multiplication into a shift immediate */
5092 + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
5097 + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5098 + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
5099 + (int) INTVAL (XEXP (x, 1)));
5100 + else if (REG_P (XEXP (x, 1)))
5101 + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
5102 + reg_names[true_regnum (XEXP (x, 1))]);
5109 + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5110 + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
5111 + (int) INTVAL (XEXP (x, 1)));
5112 + else if (REG_P (XEXP (x, 1)))
5113 + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
5114 + reg_names[true_regnum (XEXP (x, 1))]);
5119 + fprintf (stream, ">>");
5123 + /* Load store multiple */
5125 + int count = XVECLEN (x, 0);
5126 + int reglist16 = 0;
5127 + char reglist16_string[100];
5129 + for (i = 0; i < count; ++i)
5131 + rtx vec_elm = XVECEXP (x, 0, i);
5132 + if (GET_MODE (vec_elm) != SET)
5134 + debug_rtx (vec_elm);
5135 + internal_error ("Unknown element in parallel expression!");
5137 + if (GET_MODE (XEXP (vec_elm, 0)) == REG)
5139 + /* Load multiple */
5140 + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
5144 + /* Store multiple */
5145 + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
5149 + avr32_make_reglist16 (reglist16, reglist16_string);
5150 + fputs (reglist16_string, stream);
5157 + rtx op0 = XEXP (x, 0);
5158 + rtx op1 = XEXP (x, 1);
5159 + rtx base = NULL_RTX, offset = NULL_RTX;
5161 + if (avr32_address_register_rtx_p (op0, 1))
5166 + else if (avr32_address_register_rtx_p (op1, 1))
5168 + /* Operands are switched. */
5173 + gcc_assert (base && offset
5174 + && avr32_address_register_rtx_p (base, 1)
5175 + && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
5177 + avr32_print_operand (stream, base, 0);
5178 + fputs ("[", stream);
5179 + avr32_print_operand (stream, offset, 0);
5180 + fputs ("]", stream);
5191 + internal_error ("Illegal expression for avr32_print_operand");
5196 +avr32_get_note_reg_equiv (rtx insn)
5200 + note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
5202 + if (note != NULL_RTX)
5203 + return XEXP (note, 0);
5209 + Outputs to stdio stream stream the assembler syntax for an instruction
5210 + operand that is a memory reference whose address is x. x is an RTL
5216 +avr32_print_operand_address (FILE * stream, rtx x)
5218 + fprintf (stream, "(%d) /* address */", REGNO (x));
5221 +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
5223 +avr32_got_mentioned_p (rtx addr)
5225 + if (GET_CODE (addr) == MEM)
5226 + addr = XEXP (addr, 0);
5227 + while (GET_CODE (addr) == CONST)
5228 + addr = XEXP (addr, 0);
5229 + if (GET_CODE (addr) == SYMBOL_REF)
5231 + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
5233 + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
5237 + l1 = avr32_got_mentioned_p (XEXP (addr, 0));
5238 + l2 = avr32_got_mentioned_p (XEXP (addr, 1));
5245 +/* Find the symbol in an address expression. */
5248 +avr32_find_symbol (rtx addr)
5250 + if (GET_CODE (addr) == MEM)
5251 + addr = XEXP (addr, 0);
5253 + while (GET_CODE (addr) == CONST)
5254 + addr = XEXP (addr, 0);
5256 + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5258 + if (GET_CODE (addr) == PLUS)
5262 + l1 = avr32_find_symbol (XEXP (addr, 0));
5263 + l2 = avr32_find_symbol (XEXP (addr, 1));
5264 + if (l1 != NULL_RTX && l2 == NULL_RTX)
5266 + else if (l1 == NULL_RTX && l2 != NULL_RTX)
5274 +/* Routines for manipulation of the constant pool. */
5276 +/* AVR32 instructions cannot load a large constant directly into a
5277 + register; they have to come from a pc relative load. The constant
5278 + must therefore be placed in the addressable range of the pc
5279 + relative load. Depending on the precise pc relative load
5280 + instruction the range is somewhere between 256 bytes and 4k. This
5281 + means that we often have to dump a constant inside a function, and
5282 + generate code to branch around it.
5284 + It is important to minimize this, since the branches will slow
5285 + things down and make the code larger.
5287 + Normally we can hide the table after an existing unconditional
5288 + branch so that there is no interruption of the flow, but in the
5289 + worst case the code looks like this:
5307 + We fix this by performing a scan after scheduling, which notices
5308 + which instructions need to have their operands fetched from the
5309 + constant table and builds the table.
5311 + The algorithm starts by building a table of all the constants that
5312 + need fixing up and all the natural barriers in the function (places
5313 + where a constant table can be dropped without breaking the flow).
5314 + For each fixup we note how far the pc-relative replacement will be
5315 + able to reach and the offset of the instruction into the function.
5317 + Having built the table we then group the fixes together to form
5318 + tables that are as large as possible (subject to addressing
5319 + constraints) and emit each table of constants after the last
5320 + barrier that is within range of all the instructions in the group.
5321 + If a group does not contain a barrier, then we forcibly create one
5322 + by inserting a jump instruction into the flow. Once the table has
5323 + been inserted, the insns are then modified to reference the
5324 + relevant entry in the pool.
5326 + Possible enhancements to the algorithm (not implemented) are:
5328 + 1) For some processors and object formats, there may be benefit in
5329 + aligning the pools to the start of cache lines; this alignment
5330 + would need to be taken into account when calculating addressability
5333 +/* These typedefs are located at the start of this file, so that
5334 + they can be used in the prototypes there. This comment is to
5335 + remind readers of that fact so that the following structures
5336 + can be understood more easily.
5338 + typedef struct minipool_node Mnode;
5339 + typedef struct minipool_fixup Mfix; */
5341 +struct minipool_node
5343 + /* Doubly linked chain of entries. */
5346 + /* The maximum offset into the code that this entry can be placed. While
5347 + pushing fixes for forward references, all entries are sorted in order of
5348 + increasing max_address. */
5349 + HOST_WIDE_INT max_address;
5350 + /* Similarly for an entry inserted for a backwards ref. */
5351 + HOST_WIDE_INT min_address;
5352 + /* The number of fixes referencing this entry. This can become zero if we
5353 + "unpush" an entry. In this case we ignore the entry when we come to
5356 + /* The offset from the start of the minipool. */
5357 + HOST_WIDE_INT offset;
5358 + /* The value in table. */
5360 + /* The mode of value. */
5361 + enum machine_mode mode;
5362 + /* The size of the value. */
5366 +struct minipool_fixup
5370 + HOST_WIDE_INT address;
5372 + enum machine_mode mode;
5376 + HOST_WIDE_INT forwards;
5377 + HOST_WIDE_INT backwards;
5381 +/* Fixes less than a word need padding out to a word boundary. */
5382 +#define MINIPOOL_FIX_SIZE(mode, value) \
5383 + (IS_FORCE_MINIPOOL(value) ? 0 : \
5384 + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
5386 +#define IS_FORCE_MINIPOOL(x) \
5387 + (GET_CODE(x) == UNSPEC && \
5388 + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
5390 +static Mnode *minipool_vector_head;
5391 +static Mnode *minipool_vector_tail;
5393 +/* The linked list of all minipool fixes required for this function. */
5394 +Mfix *minipool_fix_head;
5395 +Mfix *minipool_fix_tail;
5396 +/* The fix entry for the current minipool, once it has been placed. */
5397 +Mfix *minipool_barrier;
5399 +/* Determines if INSN is the start of a jump table. Returns the end
5400 + of the TABLE or NULL_RTX. */
5402 +is_jump_table (rtx insn)
5406 + if (GET_CODE (insn) == JUMP_INSN
5407 + && JUMP_LABEL (insn) != NULL
5408 + && ((table = next_real_insn (JUMP_LABEL (insn)))
5409 + == next_real_insn (insn))
5411 + && GET_CODE (table) == JUMP_INSN
5412 + && (GET_CODE (PATTERN (table)) == ADDR_VEC
5413 + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
5419 +static HOST_WIDE_INT
5420 +get_jump_table_size (rtx insn)
5422 + /* ADDR_VECs only take room if read-only data does into the text section. */
5423 + if (JUMP_TABLES_IN_TEXT_SECTION
5424 +#if !defined(READONLY_DATA_SECTION_ASM_OP)
5429 + rtx body = PATTERN (insn);
5430 + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
5432 + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
5438 +/* Move a minipool fix MP from its current location to before MAX_MP.
5439 + If MAX_MP is NULL, then MP doesn't need moving, but the addressing
5440 + constraints may need updating. */
5442 +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
5443 + HOST_WIDE_INT max_address)
5445 + /* This should never be true and the code below assumes these are
5450 + if (max_mp == NULL)
5452 + if (max_address < mp->max_address)
5453 + mp->max_address = max_address;
5457 + if (max_address > max_mp->max_address - mp->fix_size)
5458 + mp->max_address = max_mp->max_address - mp->fix_size;
5460 + mp->max_address = max_address;
5462 + /* Unlink MP from its current position. Since max_mp is non-null,
5463 + mp->prev must be non-null. */
5464 + mp->prev->next = mp->next;
5465 + if (mp->next != NULL)
5466 + mp->next->prev = mp->prev;
5468 + minipool_vector_tail = mp->prev;
5470 + /* Re-insert it before MAX_MP. */
5471 + mp->next = max_mp;
5472 + mp->prev = max_mp->prev;
5473 + max_mp->prev = mp;
5475 + if (mp->prev != NULL)
5476 + mp->prev->next = mp;
5478 + minipool_vector_head = mp;
5481 + /* Save the new entry. */
5484 + /* Scan over the preceding entries and adjust their addresses as required.
5486 + while (mp->prev != NULL
5487 + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5489 + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5496 +/* Add a constant to the minipool for a forward reference. Returns the
5497 + node added or NULL if the constant will not fit in this pool. */
5499 +add_minipool_forward_ref (Mfix * fix)
5501 + /* If set, max_mp is the first pool_entry that has a lower constraint than
5502 + the one we are trying to add. */
5503 + Mnode *max_mp = NULL;
5504 + HOST_WIDE_INT max_address = fix->address + fix->forwards;
5507 + /* If this fix's address is greater than the address of the first entry,
5508 + then we can't put the fix in this pool. We subtract the size of the
5509 + current fix to ensure that if the table is fully packed we still have
5510 + enough room to insert this value by suffling the other fixes forwards. */
5511 + if (minipool_vector_head &&
5512 + fix->address >= minipool_vector_head->max_address - fix->fix_size)
5515 + /* Scan the pool to see if a constant with the same value has already been
5516 + added. While we are doing this, also note the location where we must
5517 + insert the constant if it doesn't already exist. */
5518 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5520 + if (GET_CODE (fix->value) == GET_CODE (mp->value)
5521 + && fix->mode == mp->mode
5522 + && (GET_CODE (fix->value) != CODE_LABEL
5523 + || (CODE_LABEL_NUMBER (fix->value)
5524 + == CODE_LABEL_NUMBER (mp->value)))
5525 + && rtx_equal_p (fix->value, mp->value))
5527 + /* More than one fix references this entry. */
5529 + return move_minipool_fix_forward_ref (mp, max_mp, max_address);
5532 + /* Note the insertion point if necessary. */
5533 + if (max_mp == NULL && mp->max_address > max_address)
5538 + /* The value is not currently in the minipool, so we need to create a new
5539 + entry for it. If MAX_MP is NULL, the entry will be put on the end of
5540 + the list since the placement is less constrained than any existing
5541 + entry. Otherwise, we insert the new fix before MAX_MP and, if
5542 + necessary, adjust the constraints on the other entries. */
5543 + mp = xmalloc (sizeof (*mp));
5544 + mp->fix_size = fix->fix_size;
5545 + mp->mode = fix->mode;
5546 + mp->value = fix->value;
5548 + /* Not yet required for a backwards ref. */
5549 + mp->min_address = -65536;
5551 + if (max_mp == NULL)
5553 + mp->max_address = max_address;
5555 + mp->prev = minipool_vector_tail;
5557 + if (mp->prev == NULL)
5559 + minipool_vector_head = mp;
5560 + minipool_vector_label = gen_label_rtx ();
5563 + mp->prev->next = mp;
5565 + minipool_vector_tail = mp;
5569 + if (max_address > max_mp->max_address - mp->fix_size)
5570 + mp->max_address = max_mp->max_address - mp->fix_size;
5572 + mp->max_address = max_address;
5574 + mp->next = max_mp;
5575 + mp->prev = max_mp->prev;
5576 + max_mp->prev = mp;
5577 + if (mp->prev != NULL)
5578 + mp->prev->next = mp;
5580 + minipool_vector_head = mp;
5583 + /* Save the new entry. */
5586 + /* Scan over the preceding entries and adjust their addresses as required.
5588 + while (mp->prev != NULL
5589 + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5591 + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5599 +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
5600 + HOST_WIDE_INT min_address)
5602 + HOST_WIDE_INT offset;
5604 + /* This should never be true, and the code below assumes these are
5609 + if (min_mp == NULL)
5611 + if (min_address > mp->min_address)
5612 + mp->min_address = min_address;
5616 + /* We will adjust this below if it is too loose. */
5617 + mp->min_address = min_address;
5619 + /* Unlink MP from its current position. Since min_mp is non-null,
5620 + mp->next must be non-null. */
5621 + mp->next->prev = mp->prev;
5622 + if (mp->prev != NULL)
5623 + mp->prev->next = mp->next;
5625 + minipool_vector_head = mp->next;
5627 + /* Reinsert it after MIN_MP. */
5628 + mp->prev = min_mp;
5629 + mp->next = min_mp->next;
5630 + min_mp->next = mp;
5631 + if (mp->next != NULL)
5632 + mp->next->prev = mp;
5634 + minipool_vector_tail = mp;
5640 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5642 + mp->offset = offset;
5643 + if (mp->refcount > 0)
5644 + offset += mp->fix_size;
5646 + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
5647 + mp->next->min_address = mp->min_address + mp->fix_size;
5653 +/* Add a constant to the minipool for a backward reference. Returns the
5654 + node added or NULL if the constant will not fit in this pool.
5656 + Note that the code for insertion for a backwards reference can be
5657 + somewhat confusing because the calculated offsets for each fix do
5658 + not take into account the size of the pool (which is still under
5661 +add_minipool_backward_ref (Mfix * fix)
5663 + /* If set, min_mp is the last pool_entry that has a lower constraint than
5664 + the one we are trying to add. */
5665 + Mnode *min_mp = NULL;
5666 + /* This can be negative, since it is only a constraint. */
5667 + HOST_WIDE_INT min_address = fix->address - fix->backwards;
5670 + /* If we can't reach the current pool from this insn, or if we can't insert
5671 + this entry at the end of the pool without pushing other fixes out of
5672 + range, then we don't try. This ensures that we can't fail later on. */
5673 + if (min_address >= minipool_barrier->address
5674 + || (minipool_vector_tail->min_address + fix->fix_size
5675 + >= minipool_barrier->address))
5678 + /* Scan the pool to see if a constant with the same value has already been
5679 + added. While we are doing this, also note the location where we must
5680 + insert the constant if it doesn't already exist. */
5681 + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
5683 + if (GET_CODE (fix->value) == GET_CODE (mp->value)
5684 + && fix->mode == mp->mode
5685 + && (GET_CODE (fix->value) != CODE_LABEL
5686 + || (CODE_LABEL_NUMBER (fix->value)
5687 + == CODE_LABEL_NUMBER (mp->value)))
5688 + && rtx_equal_p (fix->value, mp->value)
5689 + /* Check that there is enough slack to move this entry to the end
5690 + of the table (this is conservative). */
5691 + && (mp->max_address
5692 + > (minipool_barrier->address
5693 + + minipool_vector_tail->offset
5694 + + minipool_vector_tail->fix_size)))
5697 + return move_minipool_fix_backward_ref (mp, min_mp, min_address);
5700 + if (min_mp != NULL)
5701 + mp->min_address += fix->fix_size;
5704 + /* Note the insertion point if necessary. */
5705 + if (mp->min_address < min_address)
5709 + else if (mp->max_address
5710 + < minipool_barrier->address + mp->offset + fix->fix_size)
5712 + /* Inserting before this entry would push the fix beyond its
5713 + maximum address (which can happen if we have re-located a
5714 + forwards fix); force the new fix to come after it. */
5716 + min_address = mp->min_address + fix->fix_size;
5721 + /* We need to create a new entry. */
5722 + mp = xmalloc (sizeof (*mp));
5723 + mp->fix_size = fix->fix_size;
5724 + mp->mode = fix->mode;
5725 + mp->value = fix->value;
5727 + mp->max_address = minipool_barrier->address + 65536;
5729 + mp->min_address = min_address;
5731 + if (min_mp == NULL)
5734 + mp->next = minipool_vector_head;
5736 + if (mp->next == NULL)
5738 + minipool_vector_tail = mp;
5739 + minipool_vector_label = gen_label_rtx ();
5742 + mp->next->prev = mp;
5744 + minipool_vector_head = mp;
5748 + mp->next = min_mp->next;
5749 + mp->prev = min_mp;
5750 + min_mp->next = mp;
5752 + if (mp->next != NULL)
5753 + mp->next->prev = mp;
5755 + minipool_vector_tail = mp;
5758 + /* Save the new entry. */
5766 + /* Scan over the following entries and adjust their offsets. */
5767 + while (mp->next != NULL)
5769 + if (mp->next->min_address < mp->min_address + mp->fix_size)
5770 + mp->next->min_address = mp->min_address + mp->fix_size;
5773 + mp->next->offset = mp->offset + mp->fix_size;
5775 + mp->next->offset = mp->offset;
5784 +assign_minipool_offsets (Mfix * barrier)
5786 + HOST_WIDE_INT offset = 0;
5789 + minipool_barrier = barrier;
5791 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5793 + mp->offset = offset;
5795 + if (mp->refcount > 0)
5796 + offset += mp->fix_size;
5800 +/* Print a symbolic form of X to the debug file, F. */
5802 +avr32_print_value (FILE * f, rtx x)
5804 + switch (GET_CODE (x))
5807 + fprintf (f, "0x%x", (int) INTVAL (x));
5810 + case CONST_DOUBLE:
5811 + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
5814 + case CONST_VECTOR:
5819 + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
5821 + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
5822 + if (i < (CONST_VECTOR_NUNITS (x) - 1))
5829 + case CONST_STRING:
5830 + fprintf (f, "\"%s\"", XSTR (x, 0));
5834 + fprintf (f, "`%s'", XSTR (x, 0));
5838 + fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
5842 + avr32_print_value (f, XEXP (x, 0));
5846 + avr32_print_value (f, XEXP (x, 0));
5848 + avr32_print_value (f, XEXP (x, 1));
5852 + fprintf (f, "pc");
5856 + fprintf (f, "????");
5862 +is_minipool_label (rtx label)
5864 + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
5866 + if (GET_CODE (label) != CODE_LABEL)
5869 + while (cur_mp_label)
5871 + if (CODE_LABEL_NUMBER (label)
5872 + == CODE_LABEL_NUMBER (cur_mp_label->label))
5874 + cur_mp_label = cur_mp_label->next;
5880 +new_minipool_label (rtx label)
5882 + if (!cfun->machine->minipool_label_head)
5884 + cfun->machine->minipool_label_head =
5885 + ggc_alloc (sizeof (minipool_labels));
5886 + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
5887 + cfun->machine->minipool_label_head->label = label;
5888 + cfun->machine->minipool_label_head->next = 0;
5889 + cfun->machine->minipool_label_head->prev = 0;
5893 + cfun->machine->minipool_label_tail->next =
5894 + ggc_alloc (sizeof (minipool_labels));
5895 + cfun->machine->minipool_label_tail->next->label = label;
5896 + cfun->machine->minipool_label_tail->next->next = 0;
5897 + cfun->machine->minipool_label_tail->next->prev =
5898 + cfun->machine->minipool_label_tail;
5899 + cfun->machine->minipool_label_tail =
5900 + cfun->machine->minipool_label_tail->next;
5904 +/* Output the literal table */
5906 +dump_minipool (rtx scan)
5912 + fprintf (dump_file,
5913 + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
5914 + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
5916 + scan = emit_insn_after (gen_consttable_start (), scan);
5917 + scan = emit_insn_after (gen_align_4 (), scan);
5918 + scan = emit_label_after (minipool_vector_label, scan);
5919 + new_minipool_label (minipool_vector_label);
5921 + for (mp = minipool_vector_head; mp != NULL; mp = nmp)
5923 + if (mp->refcount > 0)
5927 + fprintf (dump_file,
5928 + ";; Offset %u, min %ld, max %ld ",
5929 + (unsigned) mp->offset, (unsigned long) mp->min_address,
5930 + (unsigned long) mp->max_address);
5931 + avr32_print_value (dump_file, mp->value);
5932 + fputc ('\n', dump_file);
5935 + switch (mp->fix_size)
5937 +#ifdef HAVE_consttable_4
5939 + scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
5943 +#ifdef HAVE_consttable_8
5945 + scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
5949 +#ifdef HAVE_consttable_16
5951 + scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
5956 + /* This can happen for force-minipool entries which just are
5957 + there to force the minipool to be generate. */
5969 + minipool_vector_head = minipool_vector_tail = NULL;
5970 + scan = emit_insn_after (gen_consttable_end (), scan);
5971 + scan = emit_barrier_after (scan);
5974 +/* Return the cost of forcibly inserting a barrier after INSN. */
5976 +avr32_barrier_cost (rtx insn)
5978 + /* Basing the location of the pool on the loop depth is preferable, but at
5979 + the moment, the basic block information seems to be corrupt by this
5980 + stage of the compilation. */
5981 + int base_cost = 50;
5982 + rtx next = next_nonnote_insn (insn);
5984 + if (next != NULL && GET_CODE (next) == CODE_LABEL)
5987 + switch (GET_CODE (insn))
5990 + /* It will always be better to place the table before the label, rather
5999 + return base_cost - 10;
6002 + return base_cost + 10;
6006 +/* Find the best place in the insn stream in the range
6007 + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
6008 + Create the barrier by inserting a jump and add a new fix entry for
6011 +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
6013 + HOST_WIDE_INT count = 0;
6015 + rtx from = fix->insn;
6016 + rtx selected = from;
6017 + int selected_cost;
6018 + HOST_WIDE_INT selected_address;
6020 + HOST_WIDE_INT max_count = max_address - fix->address;
6021 + rtx label = gen_label_rtx ();
6023 + selected_cost = avr32_barrier_cost (from);
6024 + selected_address = fix->address;
6026 + while (from && count < max_count)
6031 + /* This code shouldn't have been called if there was a natural barrier
6033 + if (GET_CODE (from) == BARRIER)
6036 + /* Count the length of this insn. */
6037 + count += get_attr_length (from);
6039 + /* If there is a jump table, add its length. */
6040 + tmp = is_jump_table (from);
6043 + count += get_jump_table_size (tmp);
6045 + /* Jump tables aren't in a basic block, so base the cost on the
6046 + dispatch insn. If we select this location, we will still put
6047 + the pool after the table. */
6048 + new_cost = avr32_barrier_cost (from);
6050 + if (count < max_count && new_cost <= selected_cost)
6053 + selected_cost = new_cost;
6054 + selected_address = fix->address + count;
6057 + /* Continue after the dispatch table. */
6058 + from = NEXT_INSN (tmp);
6062 + new_cost = avr32_barrier_cost (from);
6064 + if (count < max_count && new_cost <= selected_cost)
6067 + selected_cost = new_cost;
6068 + selected_address = fix->address + count;
6071 + from = NEXT_INSN (from);
6074 + /* Create a new JUMP_INSN that branches around a barrier. */
6075 + from = emit_jump_insn_after (gen_jump (label), selected);
6076 + JUMP_LABEL (from) = label;
6077 + barrier = emit_barrier_after (from);
6078 + emit_label_after (label, barrier);
6080 + /* Create a minipool barrier entry for the new barrier. */
6081 + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
6082 + new_fix->insn = barrier;
6083 + new_fix->address = selected_address;
6084 + new_fix->next = fix->next;
6085 + fix->next = new_fix;
6090 +/* Record that there is a natural barrier in the insn stream at
6093 +push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
6095 + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
6098 + fix->address = address;
6101 + if (minipool_fix_head != NULL)
6102 + minipool_fix_tail->next = fix;
6104 + minipool_fix_head = fix;
6106 + minipool_fix_tail = fix;
6109 +/* Record INSN, which will need fixing up to load a value from the
6110 + minipool. ADDRESS is the offset of the insn since the start of the
6111 + function; LOC is a pointer to the part of the insn which requires
6112 + fixing; VALUE is the constant that must be loaded, which is of type
6115 +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
6116 + enum machine_mode mode, rtx value)
6118 + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
6119 + rtx body = PATTERN (insn);
6122 + fix->address = address;
6125 + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
6126 + fix->value = value;
6128 + if (GET_CODE (body) == PARALLEL)
6130 + /* Mcall : Ks16 << 2 */
6131 + fix->forwards = ((1 << 15) - 1) << 2;
6132 + fix->backwards = (1 << 15) << 2;
6134 + else if (GET_CODE (body) == SET
6135 + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
6138 + if (TARGET_HARD_FLOAT
6139 + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
6141 + /* Ldc0.w : Ku12 << 2 */
6142 + fix->forwards = ((1 << 12) - 1) << 2;
6143 + fix->backwards = 0;
6147 + if (optimize_size)
6149 + /* Lddpc : Ku7 << 2 */
6150 + fix->forwards = ((1 << 7) - 1) << 2;
6151 + fix->backwards = 0;
6156 + fix->forwards = ((1 << 15) - 4);
6157 + fix->backwards = (1 << 15);
6161 + else if (GET_CODE (body) == SET
6162 + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
6164 + /* Double word load */
6165 + if (TARGET_HARD_FLOAT
6166 + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
6168 + /* Ldc0.d : Ku12 << 2 */
6169 + fix->forwards = ((1 << 12) - 1) << 2;
6170 + fix->backwards = 0;
6175 + fix->forwards = ((1 << 15) - 4);
6176 + fix->backwards = (1 << 15);
6179 + else if (GET_CODE (body) == UNSPEC_VOLATILE
6180 + && XINT (body, 1) == VUNSPEC_MVRC)
6182 + /* Coprocessor load */
6183 + /* Ldc : Ku8 << 2 */
6184 + fix->forwards = ((1 << 8) - 1) << 2;
6185 + fix->backwards = 0;
6189 + /* Assume worst case which is lddpc insn. */
6190 + fix->forwards = ((1 << 7) - 1) << 2;
6191 + fix->backwards = 0;
6194 + fix->minipool = NULL;
6196 + /* If an insn doesn't have a range defined for it, then it isn't expecting
6197 + to be reworked by this code. Better to abort now than to generate duff
6199 + if (fix->forwards == 0 && fix->backwards == 0)
6204 + fprintf (dump_file,
6205 + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6206 + GET_MODE_NAME (mode),
6207 + INSN_UID (insn), (unsigned long) address,
6208 + -1 * (long) fix->backwards, (long) fix->forwards);
6209 + avr32_print_value (dump_file, fix->value);
6210 + fprintf (dump_file, "\n");
6213 + /* Add it to the chain of fixes. */
6216 + if (minipool_fix_head != NULL)
6217 + minipool_fix_tail->next = fix;
6219 + minipool_fix_head = fix;
6221 + minipool_fix_tail = fix;
6224 +/* Scan INSN and note any of its operands that need fixing.
6225 + If DO_PUSHES is false we do not actually push any of the fixups
6226 + needed. The function returns TRUE is any fixups were needed/pushed.
6227 + This is used by avr32_memory_load_p() which needs to know about loads
6228 + of constants that will be converted into minipool loads. */
6230 +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
6232 + bool result = false;
6235 + extract_insn (insn);
6237 + if (!constrain_operands (1))
6238 + fatal_insn_not_found (insn);
6240 + if (recog_data.n_alternatives == 0)
6243 + /* Fill in recog_op_alt with information about the constraints of this
6245 + preprocess_constraints ();
6247 + for (opno = 0; opno < recog_data.n_operands; opno++)
6251 + /* Things we need to fix can only occur in inputs. */
6252 + if (recog_data.operand_type[opno] != OP_IN)
6255 + op = recog_data.operand[opno];
6257 + if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
6261 + rtx cop = avoid_constant_pool_reference (op);
6263 + /* Casting the address of something to a mode narrower than a
6264 + word can cause avoid_constant_pool_reference() to return the
6265 + pool reference itself. That's no good to us here. Lets
6266 + just hope that we can use the constant pool value directly.
6269 + cop = get_pool_constant (XEXP (op, 0));
6271 + push_minipool_fix (insn, address,
6272 + recog_data.operand_loc[opno],
6273 + recog_data.operand_mode[opno], cop);
6278 + else if (TARGET_HAS_ASM_ADDR_PSEUDOS
6279 + && avr32_address_operand (op, GET_MODE (op)))
6281 + /* Handle pseudo instructions using a direct address. These pseudo
6282 + instructions might need entries in the constant pool and we must
6283 + therefor create a constant pool for them, in case the
6284 + assembler/linker needs to insert entries. */
6287 + /* Push a dummy constant pool entry so that the .cpool
6288 + directive should be inserted on the appropriate place in the
6289 + code even if there are no real constant pool entries. This
6290 + is used by the assembler and linker to know where to put
6291 + generated constant pool entries. */
6292 + push_minipool_fix (insn, address,
6293 + recog_data.operand_loc[opno],
6294 + recog_data.operand_mode[opno],
6295 + gen_rtx_UNSPEC (VOIDmode,
6296 + gen_rtvec (1, const0_rtx),
6297 + UNSPEC_FORCE_MINIPOOL));
6307 +avr32_insn_is_cast (rtx insn)
6310 + if (NONJUMP_INSN_P (insn)
6311 + && GET_CODE (PATTERN (insn)) == SET
6312 + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
6313 + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
6314 + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
6315 + && REG_P (SET_DEST (PATTERN (insn))))
6321 + Replace all occurances of reg FROM with reg TO in X */
6324 +avr32_replace_reg (rtx x, rtx from, rtx to)
6329 + gcc_assert ( REG_P (from) && REG_P (to) );
6331 + /* Allow this function to make replacements in EXPR_LISTs. */
6335 + if (rtx_equal_p (x, from))
6338 + if (GET_CODE (x) == SUBREG)
6340 + rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
6342 + if (GET_CODE (new) == CONST_INT)
6344 + x = simplify_subreg (GET_MODE (x), new,
6345 + GET_MODE (SUBREG_REG (x)),
6350 + SUBREG_REG (x) = new;
6354 + else if (GET_CODE (x) == ZERO_EXTEND)
6356 + rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
6358 + if (GET_CODE (new) == CONST_INT)
6360 + x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
6361 + new, GET_MODE (XEXP (x, 0)));
6365 + XEXP (x, 0) = new;
6370 + fmt = GET_RTX_FORMAT (GET_CODE (x));
6371 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6373 + if (fmt[i] == 'e')
6374 + XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
6375 + else if (fmt[i] == 'E')
6376 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6377 + XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
6384 +/* FIXME: The level of nesting in this function is way too deep. It needs to be
6387 +avr32_reorg_optimization (void)
6389 + rtx first = get_first_nonnote_insn ();
6392 + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
6395 + /* Scan through all insns looking for cast operations. */
6398 + fprintf (dump_file, ";; Deleting redundant cast operations:\n");
6400 + for (insn = first; insn; insn = NEXT_INSN (insn))
6402 + rtx reg, src_reg, scan;
6403 + enum machine_mode mode;
6407 + if (avr32_insn_is_cast (insn)
6408 + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
6409 + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
6411 + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
6412 + reg = SET_DEST (PATTERN (insn));
6413 + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
6420 + unused_cast = false;
6421 + label_ref = NULL_RTX;
6422 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
6424 + /* Check if we have reached the destination of a simple
6425 + conditional jump which we have already scanned past. If so,
6426 + we can safely continue scanning. */
6427 + if (LABEL_P (scan) && label_ref != NULL_RTX)
6429 + if (CODE_LABEL_NUMBER (scan) ==
6430 + CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
6431 + label_ref = NULL_RTX;
6436 + if (!INSN_P (scan))
6439 + /* For conditional jumps we can manage to keep on scanning if
6440 + we meet the destination label later on before any new jump
6442 + if (GET_CODE (scan) == JUMP_INSN)
6444 + if (any_condjump_p (scan) && label_ref == NULL_RTX)
6445 + label_ref = condjump_label (scan);
6450 + /* Check if we have a call and the register is used as an argument. */
6452 + && find_reg_fusage (scan, USE, reg) )
6455 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6458 + /* Check if casted register is used in this insn */
6459 + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
6460 + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
6463 + /* If not used in the source to the set or in a memory
6464 + expression in the destiantion then the register is used
6465 + as a destination and is really dead. */
6466 + if (single_set (scan)
6467 + && GET_CODE (PATTERN (scan)) == SET
6468 + && REG_P (SET_DEST (PATTERN (scan)))
6469 + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
6470 + && label_ref == NULL_RTX)
6472 + unused_cast = true;
6477 + /* Check if register is dead or set in this insn */
6478 + if (dead_or_set_p (scan, reg))
6480 + unused_cast = true;
6485 + /* Check if we have unresolved conditional jumps */
6486 + if (label_ref != NULL_RTX)
6491 + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
6493 + /* One operand cast, safe to delete */
6496 + fprintf (dump_file,
6497 + ";; INSN %i removed, casted register %i value not used.\n",
6498 + INSN_UID (insn), REGNO (reg));
6500 + SET_INSN_DELETED (insn);
6501 + /* Force the instruction to be recognized again */
6502 + INSN_CODE (insn) = -1;
6506 + /* Two operand cast, which really could be substituted with
6507 + a move, if the source register is dead after the cast
6508 + insn and then the insn which sets the source register
6509 + could instead directly set the destination register for
6510 + the cast. As long as there are no insns in between which
6511 + uses the register. */
6512 + rtx link = NULL_RTX;
6514 + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
6515 + unused_cast = false;
6517 + if (!find_reg_note (insn, REG_DEAD, src_reg))
6520 + /* Search for the insn which sets the source register */
6521 + for (scan = PREV_INSN (insn);
6522 + scan && GET_CODE (scan) != CODE_LABEL;
6523 + scan = PREV_INSN (scan))
6525 + if (! INSN_P (scan))
6528 + set = single_set (scan);
6529 + // Fix for bug #11763 : the following if condition
6530 + // has been modified and else part is included to
6531 + // set the link to NULL_RTX.
6532 + // if (set && rtx_equal_p (src_reg, SET_DEST (set)))
6533 + if (set && (REGNO(src_reg) == REGNO(SET_DEST(set))))
6535 + if (rtx_equal_p (src_reg, SET_DEST (set)))
6549 + /* Found no link or link is a call insn where we can not
6550 + change the destination register */
6551 + if (link == NULL_RTX || CALL_P (link))
6554 + /* Scan through all insn between link and insn */
6555 + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
6557 + /* Don't try to trace forward past a CODE_LABEL if we
6558 + haven't seen INSN yet. Ordinarily, we will only
6559 + find the setting insn in LOG_LINKS if it is in the
6560 + same basic block. However, cross-jumping can insert
6561 + code labels in between the load and the call, and
6562 + can result in situations where a single call insn
6563 + may have two targets depending on where we came
6566 + if (GET_CODE (scan) == CODE_LABEL)
6569 + if (!INSN_P (scan))
6572 + /* Don't try to trace forward past a JUMP. To optimize
6573 + safely, we would have to check that all the
6574 + instructions at the jump destination did not use REG.
6577 + if (GET_CODE (scan) == JUMP_INSN)
6582 + if (!reg_mentioned_p (src_reg, PATTERN (scan)))
6585 + /* We have reached the cast insn */
6588 + /* We can remove cast and replace the destination
6589 + register of the link insn with the destination
6593 + fprintf (dump_file,
6594 + ";; INSN %i removed, casted value unused. "
6595 + "Destination of removed cast operation: register %i, folded into INSN %i.\n",
6596 + INSN_UID (insn), REGNO (reg),
6599 + /* Update link insn */
6600 + SET_DEST (PATTERN (link)) =
6601 + gen_rtx_REG (mode, REGNO (reg));
6602 + /* Force the instruction to be recognized again */
6603 + INSN_CODE (link) = -1;
6606 + SET_INSN_DELETED (insn);
6607 + /* Force the instruction to be recognized again */
6608 + INSN_CODE (insn) = -1;
6617 + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
6620 + /* Scan through all insns looking for shifted add operations */
6623 + fprintf (dump_file,
6624 + ";; Deleting redundant shifted add operations:\n");
6626 + for (insn = first; insn; insn = NEXT_INSN (insn))
6628 + rtx reg, mem_expr, scan, op0, op1;
6629 + int add_only_used_as_pointer;
6632 + && GET_CODE (PATTERN (insn)) == SET
6633 + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
6634 + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
6635 + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
6636 + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
6637 + CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
6638 + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
6639 + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
6641 + reg = SET_DEST (PATTERN (insn));
6642 + mem_expr = SET_SRC (PATTERN (insn));
6643 + op0 = XEXP (XEXP (mem_expr, 0), 0);
6644 + op1 = XEXP (mem_expr, 1);
6651 + /* Scan forward the check if the result of the shifted add
6652 + operation is only used as an address in memory operations and
6653 + that the operands to the shifted add are not clobbered. */
6654 + add_only_used_as_pointer = false;
6655 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
6657 + if (!INSN_P (scan))
6660 + /* Don't try to trace forward past a JUMP or CALL. To optimize
6661 + safely, we would have to check that all the instructions at
6662 + the jump destination did not use REG. */
6664 + if (GET_CODE (scan) == JUMP_INSN)
6669 + /* If used in a call insn then we cannot optimize it away */
6670 + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
6673 + /* If any of the operands of the shifted add are clobbered we
6674 + cannot optimize the shifted adda away */
6675 + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
6676 + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
6679 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6682 + /* If used any other place than as a pointer or as the
6683 + destination register we failed */
6684 + if (!(single_set (scan)
6685 + && GET_CODE (PATTERN (scan)) == SET
6686 + && ((MEM_P (SET_DEST (PATTERN (scan)))
6687 + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
6688 + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
6689 + || (MEM_P (SET_SRC (PATTERN (scan)))
6690 + && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
6692 + (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
6693 + && !(GET_CODE (PATTERN (scan)) == SET
6694 + && REG_P (SET_DEST (PATTERN (scan)))
6695 + && !regno_use_in (REGNO (reg),
6696 + SET_SRC (PATTERN (scan)))))
6699 + /* We cannot replace the pointer in TImode insns
6700 + as these has a differene addressing mode than the other
6702 + if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
6705 + /* Check if register is dead or set in this insn */
6706 + if (dead_or_set_p (scan, reg))
6708 + add_only_used_as_pointer = true;
6713 + if (add_only_used_as_pointer)
6715 + /* Lets delete the add insn and replace all memory references
6716 + which uses the pointer with the full expression. */
6719 + fprintf (dump_file,
6720 + ";; Deleting INSN %i since address expression can be folded into all "
6721 + "memory references using this expression\n",
6724 + SET_INSN_DELETED (insn);
6725 + /* Force the instruction to be recognized again */
6726 + INSN_CODE (insn) = -1;
6728 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
6730 + if (!INSN_P (scan))
6733 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6736 + /* If used any other place than as a pointer or as the
6737 + destination register we failed */
6738 + if ((single_set (scan)
6739 + && GET_CODE (PATTERN (scan)) == SET
6740 + && ((MEM_P (SET_DEST (PATTERN (scan)))
6741 + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
6742 + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
6743 + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
6746 + (SET_SRC (PATTERN (scan)),
6750 + (SET_SRC (PATTERN (scan)),
6751 + 0)) == REGNO (reg)))))
6755 + fprintf (dump_file,
6756 + ";; Register %i replaced by indexed address in INSN %i\n",
6757 + REGNO (reg), INSN_UID (scan));
6759 + if (MEM_P (SET_DEST (PATTERN (scan))))
6760 + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
6762 + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
6765 + /* Check if register is dead or set in this insn */
6766 + if (dead_or_set_p (scan, reg))
6777 + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
6780 + /* Scan through all insns looking for conditional register to
6781 + register move operations */
6784 + fprintf (dump_file,
6785 + ";; Folding redundant conditional move operations:\n");
6787 + for (insn = first; insn; insn = next_nonnote_insn (insn))
6789 + rtx src_reg, dst_reg, scan, test;
6792 + && GET_CODE (PATTERN (insn)) == COND_EXEC
6793 + && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
6794 + && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
6795 + && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
6796 + && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
6798 + src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
6799 + dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
6800 + test = COND_EXEC_TEST (PATTERN (insn));
6807 + /* Scan backward through the rest of insns in this if-then or if-else
6808 + block and check if we can fold the move into another of the conditional
6809 + insns in the same block. */
6810 + scan = prev_nonnote_insn (insn);
6811 + while (INSN_P (scan)
6812 + && GET_CODE (PATTERN (scan)) == COND_EXEC
6813 + && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
6815 + rtx pattern = COND_EXEC_CODE (PATTERN (scan));
6816 + if ( GET_CODE (pattern) == PARALLEL )
6817 + pattern = XVECEXP (pattern, 0, 0);
6819 + if ( reg_set_p (src_reg, pattern) )
6821 + /* Fold in the destination register for the cond. move
6822 + into this insn. */
6823 + SET_DEST (pattern) = dst_reg;
6826 + fprintf (dump_file,
6827 + ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
6828 + INSN_UID (insn), INSN_UID (scan));
6831 + /* Scan and check if any of the insns in between uses the src_reg. We
6832 + must then replace it with the dst_reg. */
6833 + while ( (scan = next_nonnote_insn (scan)) != insn ){
6834 + avr32_replace_reg (scan, src_reg, dst_reg);
6836 + /* Delete the insn. */
6837 + SET_INSN_DELETED (insn);
6839 + /* Force the instruction to be recognized again */
6840 + INSN_CODE (insn) = -1;
6844 + /* If the destination register is used but not set in this insn
6845 + we cannot fold. */
6846 + if ( reg_mentioned_p (dst_reg, pattern) )
6849 + scan = prev_nonnote_insn (scan);
6856 +/* Exported to toplev.c.
6858 + Do a final pass over the function, just before delayed branch
6865 + HOST_WIDE_INT address = 0;
6868 + minipool_fix_head = minipool_fix_tail = NULL;
6870 + /* The first insn must always be a note, or the code below won't scan it
6872 + insn = get_insns ();
6873 + if (GET_CODE (insn) != NOTE)
6876 + /* Scan all the insns and record the operands that will need fixing. */
6877 + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
6879 + if (GET_CODE (insn) == BARRIER)
6880 + push_minipool_barrier (insn, address);
6881 + else if (INSN_P (insn))
6885 + note_invalid_constants (insn, address, true);
6886 + address += get_attr_length (insn);
6888 + /* If the insn is a vector jump, add the size of the table and skip
6890 + if ((table = is_jump_table (insn)) != NULL)
6892 + address += get_jump_table_size (table);
6898 + fix = minipool_fix_head;
6900 + /* Now scan the fixups and perform the required changes. */
6905 + Mfix *last_added_fix;
6906 + Mfix *last_barrier = NULL;
6909 + /* Skip any further barriers before the next fix. */
6910 + while (fix && GET_CODE (fix->insn) == BARRIER)
6913 + /* No more fixes. */
6917 + last_added_fix = NULL;
6919 + for (ftmp = fix; ftmp; ftmp = ftmp->next)
6921 + if (GET_CODE (ftmp->insn) == BARRIER)
6923 + if (ftmp->address >= minipool_vector_head->max_address)
6926 + last_barrier = ftmp;
6928 + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
6931 + last_added_fix = ftmp; /* Keep track of the last fix added.
6935 + /* If we found a barrier, drop back to that; any fixes that we could
6936 + have reached but come after the barrier will now go in the next
6938 + if (last_barrier != NULL)
6940 + /* Reduce the refcount for those fixes that won't go into this pool
6942 + for (fdel = last_barrier->next;
6943 + fdel && fdel != ftmp; fdel = fdel->next)
6945 + fdel->minipool->refcount--;
6946 + fdel->minipool = NULL;
6949 + ftmp = last_barrier;
6953 + /* ftmp is first fix that we can't fit into this pool and there no
6954 + natural barriers that we could use. Insert a new barrier in the
6955 + code somewhere between the previous fix and this one, and
6956 + arrange to jump around it. */
6957 + HOST_WIDE_INT max_address;
6959 + /* The last item on the list of fixes must be a barrier, so we can
6960 + never run off the end of the list of fixes without last_barrier
6965 + max_address = minipool_vector_head->max_address;
6966 + /* Check that there isn't another fix that is in range that we
6967 + couldn't fit into this pool because the pool was already too
6968 + large: we need to put the pool before such an instruction. */
6969 + if (ftmp->address < max_address)
6970 + max_address = ftmp->address;
6972 + last_barrier = create_fix_barrier (last_added_fix, max_address);
6975 + assign_minipool_offsets (last_barrier);
6979 + if (GET_CODE (ftmp->insn) != BARRIER
6980 + && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
6984 + ftmp = ftmp->next;
6987 + /* Scan over the fixes we have identified for this pool, fixing them up
6988 + and adding the constants to the pool itself. */
6989 + for (this_fix = fix; this_fix && ftmp != this_fix;
6990 + this_fix = this_fix->next)
6991 + if (GET_CODE (this_fix->insn) != BARRIER
6992 + /* Do nothing for entries present just to force the insertion of
6994 + && !IS_FORCE_MINIPOOL (this_fix->value))
6996 + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
6997 + minipool_vector_label),
6998 + this_fix->minipool->offset);
6999 + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7002 + dump_minipool (last_barrier->insn);
7006 + /* Free the minipool memory. */
7007 + obstack_free (&minipool_obstack, minipool_startobj);
7009 + avr32_reorg_optimization ();
7014 + Hook for doing some final scanning of instructions. Does nothing yet...*/
7016 +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
7017 + rtx * opvec ATTRIBUTE_UNUSED,
7018 + int noperands ATTRIBUTE_UNUSED)
7024 +/* Function for changing the condition on the next instruction,
7025 + should be used when emmiting compare instructions and
7026 + the condition of the next instruction needs to change.
7029 +set_next_insn_cond (rtx cur_insn, rtx new_cond)
7031 + rtx next_insn = next_nonnote_insn (cur_insn);
7032 + if ((next_insn != NULL_RTX)
7033 + && (INSN_P (next_insn)))
7035 + if ((GET_CODE (PATTERN (next_insn)) == SET)
7036 + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
7038 + /* Branch instructions */
7039 + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
7040 + /* Force the instruction to be recognized again */
7041 + INSN_CODE (next_insn) = -1;
7044 + else if ((GET_CODE (PATTERN (next_insn)) == SET)
7045 + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
7046 + GET_MODE (SET_SRC (PATTERN (next_insn)))))
7048 + /* scc with no compare */
7049 + SET_SRC (PATTERN (next_insn)) = new_cond;
7050 + /* Force the instruction to be recognized again */
7051 + INSN_CODE (next_insn) = -1;
7054 + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
7056 + if ( GET_CODE (new_cond) == UNSPEC )
7058 + COND_EXEC_TEST (PATTERN (next_insn)) =
7059 + gen_rtx_UNSPEC (CCmode,
7061 + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
7062 + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
7063 + XINT (new_cond, 1));
7067 + PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
7075 +/* Function for obtaining the condition for the next instruction
7079 +get_next_insn_cond (rtx cur_insn)
7081 + rtx next_insn = next_nonnote_insn (cur_insn);
7082 + rtx cond = NULL_RTX;
7083 + if (next_insn != NULL_RTX
7084 + && INSN_P (next_insn))
7086 + if ((GET_CODE (PATTERN (next_insn)) == SET)
7087 + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
7089 + /* Branch and cond if then else instructions */
7090 + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
7092 + else if ((GET_CODE (PATTERN (next_insn)) == SET)
7093 + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
7094 + GET_MODE (SET_SRC (PATTERN (next_insn)))))
7096 + /* scc with no compare */
7097 + cond = SET_SRC (PATTERN (next_insn));
7099 + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
7101 + cond = COND_EXEC_TEST (PATTERN (next_insn));
7108 +/* Check if the next insn is a conditional insn that will emit a compare
7112 +next_insn_emits_cmp (rtx cur_insn)
7114 + rtx next_insn = next_nonnote_insn (cur_insn);
7115 + rtx cond = NULL_RTX;
7116 + if (next_insn != NULL_RTX
7117 + && INSN_P (next_insn))
7119 + if ( ((GET_CODE (PATTERN (next_insn)) == SET)
7120 + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)
7121 + && (XEXP (XEXP (SET_SRC (PATTERN (next_insn)), 0),0) != cc0_rtx))
7122 + || GET_CODE (PATTERN (next_insn)) == COND_EXEC )
7130 +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
7133 + rtx new_cond = NULL_RTX;
7135 + rtx compare_pattern;
7139 + if ( GET_CODE (op0) == AND )
7140 + compare_pattern = op0;
7142 + compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
7144 + new_cond = is_compare_redundant (compare_pattern, cond);
7146 + if (new_cond != NULL_RTX)
7149 + /* Check if we are inserting a bit-load instead of a compare. */
7150 + if ( GET_CODE (op0) == AND )
7152 + ops[0] = XEXP (op0, 0);
7153 + ops[1] = XEXP (op0, 1);
7154 + output_asm_insn ("bld\t%0, %p1", ops);
7158 + /* Insert compare */
7162 + output_asm_insn ("cp.b\t%0, %1", ops);
7165 + output_asm_insn ("cp.h\t%0, %1", ops);
7168 + output_asm_insn ("cp.w\t%0, %1", ops);
7171 + if (GET_CODE (op1) != REG)
7172 + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
7174 + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
7177 + internal_error ("Unknown comparison mode");
7185 +avr32_load_multiple_operation (rtx op,
7186 + enum machine_mode mode ATTRIBUTE_UNUSED)
7188 + int count = XVECLEN (op, 0);
7189 + unsigned int dest_regno;
7192 + int i = 1, base = 0;
7194 + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
7197 + /* Check to see if this might be a write-back. */
7198 + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
7203 + /* Now check it more carefully. */
7204 + if (GET_CODE (SET_DEST (elt)) != REG
7205 + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
7206 + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
7207 + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
7211 + /* Perform a quick check so we don't blow up below. */
7213 + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
7214 + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
7215 + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
7218 + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
7219 + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
7221 + for (; i < count; i++)
7223 + elt = XVECEXP (op, 0, i);
7225 + if (GET_CODE (elt) != SET
7226 + || GET_CODE (SET_DEST (elt)) != REG
7227 + || GET_MODE (SET_DEST (elt)) != SImode
7228 + || GET_CODE (SET_SRC (elt)) != UNSPEC)
7236 +avr32_store_multiple_operation (rtx op,
7237 + enum machine_mode mode ATTRIBUTE_UNUSED)
7239 + int count = XVECLEN (op, 0);
7245 + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
7248 + /* Perform a quick check so we don't blow up below. */
7250 + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
7251 + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
7252 + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
7255 + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
7256 + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
7258 + for (; i < count; i++)
7260 + elt = XVECEXP (op, 0, i);
7262 + if (GET_CODE (elt) != SET
7263 + || GET_CODE (SET_DEST (elt)) != MEM
7264 + || GET_MODE (SET_DEST (elt)) != SImode
7265 + || GET_CODE (SET_SRC (elt)) != UNSPEC)
7273 +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
7275 + /* Check if they use the same accumulator */
7277 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7286 +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
7289 + Check if the mul instruction produces the accumulator for the mac
7292 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7300 +avr32_store_bypass (rtx insn_out, rtx insn_in)
7302 + /* Only valid bypass if the output result is used as an src in the store
7303 + instruction, NOT if used as a pointer or base. */
7305 + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
7314 +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
7316 + /* Check if the register holding the result from the mul instruction is
7317 + used as a result register in the input instruction. */
7319 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7328 +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
7330 + /* Check if the first loaded word in insn_out is used in insn_in. */
7332 + rtx second_loaded_reg;
7334 + /* If this is a double alu operation then the bypass is not valid */
7335 + if ((get_attr_type (insn_in) == TYPE_ALU
7336 + || get_attr_type (insn_in) == TYPE_ALU2)
7337 + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
7340 + /* Get the destination register in the load */
7341 + if (!REG_P (SET_DEST (PATTERN (insn_out))))
7344 + dst_reg = SET_DEST (PATTERN (insn_out));
7345 + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
7347 + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
7355 +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
7358 + Check if the two first loaded word in insn_out are used in insn_in. */
7360 + rtx third_loaded_reg, fourth_loaded_reg;
7362 + /* Get the destination register in the load */
7363 + if (!REG_P (SET_DEST (PATTERN (insn_out))))
7366 + dst_reg = SET_DEST (PATTERN (insn_out));
7367 + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
7368 + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
7370 + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
7371 + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
7382 +avr32_ifcvt_modify_test (ce_if_block_t *ce_info,
7391 + || test == NULL_RTX
7392 + || !reg_mentioned_p (cc0_rtx, test))
7395 + branch_insn = BB_END (ce_info->test_bb);
7396 + cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
7398 + if (GET_CODE(cmp_test) != SET
7399 + || !CC0_P(XEXP(cmp_test, 0)) )
7402 + if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
7403 + compare_op0 = XEXP(SET_SRC(cmp_test), 0);
7404 + compare_op1 = XEXP(SET_SRC(cmp_test), 1);
7406 + compare_op0 = SET_SRC(cmp_test);
7407 + compare_op1 = const0_rtx;
7410 + return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
7411 + compare_op0, compare_op1);
7417 +avr32_ifcvt_modify_insn (ce_if_block_t *ce_info,
7420 + int *num_true_changes){
7421 + rtx test = COND_EXEC_TEST(pattern);
7422 + rtx op = COND_EXEC_CODE(pattern);
7424 + rtx cond_exec_insn;
7425 + int inputs_set_outside_ifblock = 1;
7426 + basic_block current_bb = BLOCK_FOR_INSN (insn);
7428 + enum machine_mode mode = GET_MODE (XEXP (op, 0));
7430 + if (CC0_P(XEXP(test, 0)))
7431 + test = avr32_ifcvt_modify_test (ce_info,
7434 + /* We do not support multiple tests. */
7436 + && ce_info->num_multiple_test_blocks > 0 )
7439 + pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
7441 + if ( !reload_completed )
7445 + int max_insns = MAX_CONDITIONAL_EXECUTE;
7450 + /* Check if the insn is not suitable for conditional
7452 + start_sequence ();
7453 + cond_exec_insn = emit_insn (pattern);
7454 + if ( recog_memoized (cond_exec_insn) < 0
7455 + && can_create_pseudo_p () )
7457 + /* Insn is not suitable for conditional execution, try
7458 + to fix it up by using an extra scratch register or
7459 + by pulling the operation outside the if-then-else
7460 + and then emiting a conditional move inside the if-then-else. */
7462 + if ( GET_CODE (op) != SET
7463 + || !REG_P (SET_DEST (op))
7464 + || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
7465 + || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
7468 + /* Check if any of the input operands to the insn is set inside the
7470 + if ( current_bb->index == ce_info->then_bb->index )
7471 + start = PREV_INSN (BB_HEAD (ce_info->then_bb));
7473 + start = PREV_INSN (BB_HEAD (ce_info->else_bb));
7476 + for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
7478 + rtx set = single_set (bb_insn);
7480 + if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
7482 + inputs_set_outside_ifblock = 0;
7487 + cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
7490 + /* Check if we can insert more insns. */
7491 + num_insns = ( ce_info->num_then_insns +
7492 + ce_info->num_else_insns +
7493 + ce_info->num_cond_clobber_insns +
7494 + ce_info->num_extra_move_insns );
7496 + if ( ce_info->num_else_insns != 0 )
7499 + if ( num_insns >= max_insns )
7502 + /* Check if we have an instruction which might be converted to
7503 + conditional form if we give it a scratch register to clobber. */
7506 + rtx scratch_reg = gen_reg_rtx (mode);
7507 + rtx new_pattern = copy_rtx (pattern);
7508 + rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
7510 + rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
7511 + rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
7512 + COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
7514 + start_sequence ();
7515 + clobber_insn = emit_insn (new_pattern);
7517 + if ( recog_memoized (clobber_insn) >= 0
7518 + && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
7519 + && CONST_INT_P (XEXP (set_src, 1))
7520 + && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
7521 + || !ce_info->else_bb
7522 + || current_bb->index == ce_info->else_bb->index ))
7525 + /* Force the insn to be recognized again. */
7526 + INSN_CODE (insn) = -1;
7528 + /* If this is the first change in this IF-block then
7529 + signal that we have made a change. */
7530 + if ( ce_info->num_cond_clobber_insns == 0
7531 + && ce_info->num_extra_move_insns == 0 )
7532 + *num_true_changes += 1;
7534 + ce_info->num_cond_clobber_insns++;
7537 + fprintf (dump_file,
7538 + "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
7541 + return COND_EXEC_CODE (new_pattern);
7546 + if ( inputs_set_outside_ifblock )
7548 + /* Check if the insn before the cmp is an and which used
7549 + together with the cmp can be optimized into a bld. If
7550 + so then we should try to put the insn before the and
7551 + so that we can catch the bld peephole. */
7553 + rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
7554 + if (insn_before_cmp_insn
7555 + && (set = single_set (insn_before_cmp_insn))
7556 + && GET_CODE (SET_SRC (set)) == AND
7557 + && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
7558 + /* Also make sure that the insn does not set any
7559 + of the input operands to the insn we are pulling out. */
7560 + && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
7561 + cmp_insn = prev_nonnote_insn (cmp_insn);
7563 + /* We can try to put the operation outside the if-then-else
7564 + blocks and insert a move. */
7565 + if ( !insn_invalid_p (insn)
7566 + /* Do not allow conditional insns to be moved outside the
7568 + && !reg_mentioned_p (cc0_rtx, insn)
7569 + /* We cannot move memory loads outside of the if-then-else
7570 + since the memory access should not be perfomed if the
7571 + condition is not met. */
7572 + && !mem_mentioned_p (SET_SRC (op)) )
7574 + rtx scratch_reg = gen_reg_rtx (mode);
7575 + rtx op_pattern = copy_rtx (op);
7576 + rtx new_insn, seq;
7577 + rtx link, prev_link;
7578 + op = copy_rtx (op);
7579 + /* Emit the operation to a temp reg before the compare,
7580 + and emit a move inside the if-then-else, hoping that the
7581 + whole if-then-else can be converted to conditional
7583 + SET_DEST (op_pattern) = scratch_reg;
7584 + start_sequence ();
7585 + new_insn = emit_insn (op_pattern);
7586 + seq = get_insns();
7589 + /* Check again that the insn is valid. For some insns the insn might
7590 + become invalid if the destination register is changed. Ie. for mulacc
7592 + if ( insn_invalid_p (new_insn) )
7595 + emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
7598 + fprintf (dump_file,
7599 + "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
7600 + INSN_UID (insn), INSN_UID (new_insn));
7602 + ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
7603 + ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
7604 + XEXP (op, 1) = scratch_reg;
7605 + /* Force the insn to be recognized again. */
7606 + INSN_CODE (insn) = -1;
7608 + /* Move REG_DEAD notes to the moved insn. */
7609 + prev_link = NULL_RTX;
7610 + for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
7612 + if (REG_NOTE_KIND (link) == REG_DEAD)
7614 + /* Add the REG_DEAD note to the new insn. */
7615 + rtx dead_reg = XEXP (link, 0);
7616 + REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
7617 + /* Remove the REG_DEAD note from the insn we convert to a move. */
7619 + XEXP (prev_link, 1) = XEXP (link, 1);
7621 + REG_NOTES (insn) = XEXP (link, 1);
7628 + /* Add a REG_DEAD note to signal that the scratch register is dead. */
7629 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
7631 + /* If this is the first change in this IF-block then
7632 + signal that we have made a change. */
7633 + if ( ce_info->num_cond_clobber_insns == 0
7634 + && ce_info->num_extra_move_insns == 0 )
7635 + *num_true_changes += 1;
7637 + ce_info->num_extra_move_insns++;
7642 + /* We failed to fixup the insns, so this if-then-else can not be made
7643 + conditional. Just return NULL_RTX so that the if-then-else conversion
7644 + for this if-then-else will be cancelled. */
7651 + /* Signal that we have started if conversion after reload, which means
7652 + that it should be safe to split all the predicable clobber insns which
7653 + did not become cond_exec back into a simpler form if possible. */
7654 + cfun->machine->ifcvt_after_reload = 1;
7661 +avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info,
7662 + int *num_true_changes)
7666 + if ( ce_info->num_extra_move_insns > 0
7667 + && ce_info->num_cond_clobber_insns == 0)
7668 + /* Signal that we did not do any changes after all. */
7669 + *num_true_changes -= 1;
7671 + /* Remove any inserted move insns. */
7672 + for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
7674 + rtx link, prev_link;
7676 + /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
7677 + prev_link = NULL_RTX;
7678 + for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
7680 + if (REG_NOTE_KIND (link) == REG_DEAD)
7683 + XEXP (prev_link, 1) = XEXP (link, 1);
7685 + REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
7693 + /* Revert all reg_notes for the moved insn. */
7694 + for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
7696 + REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
7698 + REG_NOTES (ce_info->extra_move_insns[n]));
7701 + /* Remove the moved insn. */
7702 + remove_insn ( ce_info->moved_insns[n] );
7706 +/* Function returning TRUE if INSN with OPERANDS is a splittable
7707 + conditional immediate clobber insn. We assume that the insn is
7708 + already a conditional immediate clobber insns and do not check
7711 +avr32_cond_imm_clobber_splittable (rtx insn,
7714 + if ( REGNO (operands[0]) == REGNO (operands[1]) )
7716 + if ( (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
7717 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is21"))
7718 + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
7719 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21")))
7722 + else if ( (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
7723 + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
7724 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
7725 + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
7726 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
7732 +/* Function for getting an integer value from a const_int or const_double
7733 + expression regardless of the HOST_WIDE_INT size. Each target cpu word
7734 + will be put into the val array where the LSW will be stored at the lowest
7735 + address and so forth. Assumes that const_expr is either a const_int or
7736 + const_double. Only valid for modes which have sizes that are a multiple
7740 +avr32_get_intval (enum machine_mode mode,
7742 + HOST_WIDE_INT *val)
7744 + int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
7745 + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
7747 + if ( GET_CODE(const_expr) == CONST_DOUBLE ){
7748 + HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
7749 + HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
7750 + /* Evaluate hi and lo values of const_double. */
7751 + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
7754 + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
7756 + &val[words_in_const_int]);
7757 + } else if ( GET_CODE(const_expr) == CONST_INT ){
7758 + HOST_WIDE_INT value = INTVAL(const_expr);
7760 + for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){
7761 + /* Shift word up to the MSW and shift down again to extract the
7762 + word and sign-extend. */
7763 + int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD;
7764 + int rshift = (words_in_const_int-1) * BITS_PER_WORD;
7765 + val[word] = (value << lshift) >> rshift;
7768 + for ( ; word < words_in_mode; word++ ){
7769 + /* Just put the sign bits in the remaining words. */
7770 + val[word] = value < 0 ? -1 : 0;
7776 +avr32_split_const_expr (enum machine_mode mode,
7777 + enum machine_mode new_mode,
7782 + int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
7783 + int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
7784 + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
7785 + HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
7787 + avr32_get_intval (mode, expr, val);
7789 + for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
7791 + HOST_WIDE_INT value_lo = 0, value_hi = 0;
7792 + for ( word = 0; word < words_in_split_values; word++ )
7794 + if ( word >= words_in_const_int )
7795 + value_hi |= ((val[i * words_in_split_values + word] &
7796 + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
7797 + << (BITS_PER_WORD * (word - words_in_const_int)));
7799 + value_lo |= ((val[i * words_in_split_values + word] &
7800 + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
7801 + << (BITS_PER_WORD * word));
7803 + split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
7808 +/* Set up library functions to comply to AVR32 ABI */
7811 +avr32_init_libfuncs (void)
7813 + /* Convert gcc run-time function names to AVR32 ABI names */
7815 + /* Double-precision floating-point arithmetic. */
7816 + set_optab_libfunc (neg_optab, DFmode, NULL);
7818 + /* Double-precision comparisons. */
7819 + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
7820 + set_optab_libfunc (ne_optab, DFmode, NULL);
7821 + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
7822 + set_optab_libfunc (le_optab, DFmode, NULL);
7823 + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
7824 + set_optab_libfunc (gt_optab, DFmode, NULL);
7826 + /* Single-precision floating-point arithmetic. */
7827 + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
7828 + set_optab_libfunc (neg_optab, SFmode, NULL);
7830 + /* Single-precision comparisons. */
7831 + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
7832 + set_optab_libfunc (ne_optab, SFmode, NULL);
7833 + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
7834 + set_optab_libfunc (le_optab, SFmode, NULL);
7835 + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
7836 + set_optab_libfunc (gt_optab, SFmode, NULL);
7838 + /* Floating-point to integer conversions. */
7839 + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
7840 + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
7841 + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
7842 + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
7843 + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
7844 + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
7845 + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
7846 + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
7848 + /* Conversions between floating types. */
7849 + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
7850 + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
7852 + /* Integer to floating-point conversions. Table 8. */
7853 + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
7854 + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
7855 + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
7856 + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
7857 + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
7858 + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
7859 + /* TODO: Add these to gcc library functions */
7860 + //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
7861 + //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
7863 + /* Long long. Table 9. */
7864 + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
7865 + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
7866 + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
7867 + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
7868 + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
7869 + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
7870 + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
7871 + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
7873 + /* Floating point library functions which have fast versions. */
7874 + if ( TARGET_FAST_FLOAT )
7876 + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
7877 + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
7878 + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
7879 + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
7880 + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
7881 + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast");
7882 + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast");
7886 + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
7887 + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
7888 + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
7889 + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
7890 + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
7891 + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
7892 + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
7896 +++ b/gcc/config/avr32/avr32-elf.h
7899 + Elf specific definitions.
7900 + Copyright 2003-2006 Atmel Corporation.
7902 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
7904 + This file is part of GCC.
7906 + This program is free software; you can redistribute it and/or modify
7907 + it under the terms of the GNU General Public License as published by
7908 + the Free Software Foundation; either version 2 of the License, or
7909 + (at your option) any later version.
7911 + This program is distributed in the hope that it will be useful,
7912 + but WITHOUT ANY WARRANTY; without even the implied warranty of
7913 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7914 + GNU General Public License for more details.
7916 + You should have received a copy of the GNU General Public License
7917 + along with this program; if not, write to the Free Software
7918 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
7921 +/*****************************************************************************
7922 + * Controlling the Compilator Driver, 'gcc'
7923 + *****************************************************************************/
7925 +/* Run-time Target Specification. */
7926 +#undef TARGET_VERSION
7927 +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
7930 +Another C string constant used much like LINK_SPEC. The
7931 +difference between the two is that STARTFILE_SPEC is used at
7932 +the very beginning of the command given to the linker.
7934 +If this macro is not defined, a default is provided that loads the
7935 +standard C startup file from the usual place. See gcc.c.
7937 +#undef STARTFILE_SPEC
7938 +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
7941 +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
7945 +Another C string constant used much like LINK_SPEC. The
7946 +difference between the two is that ENDFILE_SPEC is used at
7947 +the very end of the command given to the linker.
7949 +Do not define this macro if it does not need to do anything.
7951 +#undef ENDFILE_SPEC
7952 +#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
7955 +/* Target CPU builtins. */
7956 +#define TARGET_CPU_CPP_BUILTINS() \
7959 + builtin_define ("__avr32__"); \
7960 + builtin_define ("__AVR32__"); \
7961 + builtin_define ("__AVR32_ELF__"); \
7962 + builtin_define (avr32_part->macro); \
7963 + builtin_define (avr32_arch->macro); \
7964 + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
7965 + builtin_define ("__AVR32_AVR32A__"); \
7967 + builtin_define ("__AVR32_AVR32B__"); \
7968 + if (TARGET_UNALIGNED_WORD) \
7969 + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
7970 + if (TARGET_SIMD) \
7971 + builtin_define ("__AVR32_HAS_SIMD__"); \
7973 + builtin_define ("__AVR32_HAS_DSP__"); \
7975 + builtin_define ("__AVR32_HAS_RMW__"); \
7976 + if (TARGET_BRANCH_PRED) \
7977 + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
7978 + if (TARGET_FAST_FLOAT) \
7979 + builtin_define ("__AVR32_FAST_FLOAT__"); \
7980 + if (TARGET_NO_MUL_INSNS) \
7981 + builtin_define ("__AVR32_NO_MUL__"); \
7985 +++ b/gcc/config/avr32/avr32.h
7988 + Definitions of target machine for AVR32.
7989 + Copyright 2003-2006 Atmel Corporation.
7991 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
7992 + Initial porting by Anders �dland.
7994 + This file is part of GCC.
7996 + This program is free software; you can redistribute it and/or modify
7997 + it under the terms of the GNU General Public License as published by
7998 + the Free Software Foundation; either version 2 of the License, or
7999 + (at your option) any later version.
8001 + This program is distributed in the hope that it will be useful,
8002 + but WITHOUT ANY WARRANTY; without even the implied warranty of
8003 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8004 + GNU General Public License for more details.
8006 + You should have received a copy of the GNU General Public License
8007 + along with this program; if not, write to the Free Software
8008 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
8010 +#ifndef GCC_AVR32_H
8011 +#define GCC_AVR32_H
8014 +#ifndef OBJECT_FORMAT_ELF
8015 +#error avr32.h included before elfos.h
8018 +#ifndef LOCAL_LABEL_PREFIX
8019 +#define LOCAL_LABEL_PREFIX "."
8022 +#ifndef SUBTARGET_CPP_SPEC
8023 +#define SUBTARGET_CPP_SPEC "-D__ELF__"
8027 +extern struct rtx_def *avr32_compare_op0;
8028 +extern struct rtx_def *avr32_compare_op1;
8031 +extern struct rtx_def *avr32_acc_cache;
8033 +/* cache instruction op5 codes */
8034 +#define AVR32_CACHE_INVALIDATE_ICACHE 1
8036 +/* These bits describe the different types of function supported
8037 + by the AVR32 backend. They are exclusive. ie a function cannot be both a
8038 + normal function and an interworked function, for example. Knowing the
8039 + type of a function is important for determining its prologue and
8040 + epilogue sequences.
8041 + Note value 7 is currently unassigned. Also note that the interrupt
8042 + function types all have bit 2 set, so that they can be tested for easily.
8043 + Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the
8044 + machine_function structure is initialized (to zero) func_type will
8045 + default to unknown. This will force the first use of avr32_current_func_type
8046 + to call avr32_compute_func_type. */
8047 +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined.
8049 +#define AVR32_FT_NORMAL 1 /* Your normal, straightforward
8051 +#define AVR32_FT_ACALL 2 /* An acall function. */
8052 +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
8053 +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
8054 +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
8055 +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
8057 +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
8059 +/* In addition functions can have several type modifiers,
8060 + outlined by these bit masks: */
8061 +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR
8063 +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
8064 +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
8065 +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another
8068 +/* Some macros to test these flags. */
8069 +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
8070 +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
8071 +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
8072 +#define IS_NAKED(t) (t & AVR32_FT_NAKED)
8073 +#define IS_NESTED(t) (t & AVR32_FT_NESTED)
8076 +typedef struct minipool_labels
8077 +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
8080 + struct minipool_labels *prev;
8081 + struct minipool_labels *next;
8084 +/* A C structure for machine-specific, per-function data.
8085 + This is added to the cfun structure. */
8087 +typedef struct machine_function
8090 + /* Records the type of the current function. */
8091 + unsigned long func_type;
8092 + /* List of minipool labels, use for checking if code label is valid in a
8093 + memory expression */
8094 + minipool_labels *minipool_label_head;
8095 + minipool_labels *minipool_label_tail;
8096 + int ifcvt_after_reload;
8097 +} machine_function;
8099 +/* Initialize data used by insn expanders. This is called from insn_emit,
8100 + once for every function before code is generated. */
8101 +#define INIT_EXPANDERS avr32_init_expanders ()
8103 +/******************************************************************************
8105 + *****************************************************************************/
8108 +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
8111 +#ifndef MULTILIB_DEFAULTS
8112 +#define MULTILIB_DEFAULTS { "march=ap", "" }
8115 +/******************************************************************************
8116 + * Run-time Target Specification
8117 + *****************************************************************************/
8118 +#ifndef TARGET_VERSION
8119 +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
8123 +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
8126 + PART_TYPE_AVR32_NONE,
8127 + PART_TYPE_AVR32_AP7000,
8128 + PART_TYPE_AVR32_AP7001,
8129 + PART_TYPE_AVR32_AP7002,
8130 + PART_TYPE_AVR32_AP7200,
8131 + PART_TYPE_AVR32_UC3A0128,
8132 + PART_TYPE_AVR32_UC3A0256,
8133 + PART_TYPE_AVR32_UC3A0512,
8134 + PART_TYPE_AVR32_UC3A0512ES,
8135 + PART_TYPE_AVR32_UC3A1128,
8136 + PART_TYPE_AVR32_UC3A1256,
8137 + PART_TYPE_AVR32_UC3A1512,
8138 + PART_TYPE_AVR32_UC3A1512ES,
8139 + PART_TYPE_AVR32_UC3A3REVD,
8140 + PART_TYPE_AVR32_UC3A364,
8141 + PART_TYPE_AVR32_UC3A364S,
8142 + PART_TYPE_AVR32_UC3A3128,
8143 + PART_TYPE_AVR32_UC3A3128S,
8144 + PART_TYPE_AVR32_UC3A3256,
8145 + PART_TYPE_AVR32_UC3A3256S,
8146 + PART_TYPE_AVR32_UC3B064,
8147 + PART_TYPE_AVR32_UC3B0128,
8148 + PART_TYPE_AVR32_UC3B0256,
8149 + PART_TYPE_AVR32_UC3B0256ES,
8150 + PART_TYPE_AVR32_UC3B0512REVC,
8151 + PART_TYPE_AVR32_UC3B164,
8152 + PART_TYPE_AVR32_UC3B1128,
8153 + PART_TYPE_AVR32_UC3B1256,
8154 + PART_TYPE_AVR32_UC3B1256ES,
8155 + PART_TYPE_AVR32_UC3B1512REVC,
8156 + PART_TYPE_AVR32_UC3C0512C,
8157 + PART_TYPE_AVR32_UC3C0256C,
8158 + PART_TYPE_AVR32_UC3C0128C,
8159 + PART_TYPE_AVR32_UC3C064C,
8160 + PART_TYPE_AVR32_UC3C1512C,
8161 + PART_TYPE_AVR32_UC3C1256C,
8162 + PART_TYPE_AVR32_UC3C1128C,
8163 + PART_TYPE_AVR32_UC3C164C,
8164 + PART_TYPE_AVR32_UC3C2512C,
8165 + PART_TYPE_AVR32_UC3C2256C,
8166 + PART_TYPE_AVR32_UC3C2128C,
8167 + PART_TYPE_AVR32_UC3C264C,
8168 + PART_TYPE_AVR32_UC3L064,
8169 + PART_TYPE_AVR32_UC3L032,
8170 + PART_TYPE_AVR32_UC3L016
8173 +/* Microarchitectures. */
8174 +enum microarchitecture_type
8176 + UARCH_TYPE_AVR32A,
8177 + UARCH_TYPE_AVR32B,
8181 +/* Architectures types which specifies the pipeline.
8182 + Keep this in sync with avr32_arch_types in avr32.c
8183 + and the pipeline attribute in avr32.md */
8184 +enum architecture_type
8186 + ARCH_TYPE_AVR32_AP,
8187 + ARCH_TYPE_AVR32_UCR1,
8188 + ARCH_TYPE_AVR32_UCR2,
8189 + ARCH_TYPE_AVR32_UCR2NOMUL,
8190 + ARCH_TYPE_AVR32_UCR3,
8191 + ARCH_TYPE_AVR32_NONE
8194 +/* Flag specifying if the cpu has support for DSP instructions.*/
8195 +#define FLAG_AVR32_HAS_DSP (1 << 0)
8196 +/* Flag specifying if the cpu has support for Read-Modify-Write
8198 +#define FLAG_AVR32_HAS_RMW (1 << 1)
8199 +/* Flag specifying if the cpu has support for SIMD instructions. */
8200 +#define FLAG_AVR32_HAS_SIMD (1 << 2)
8201 +/* Flag specifying if the cpu has support for unaligned memory word access. */
8202 +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
8203 +/* Flag specifying if the cpu has support for branch prediction. */
8204 +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
8205 +/* Flag specifying if the cpu has support for a return stack. */
8206 +#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
8207 +/* Flag specifying if the cpu has caches. */
8208 +#define FLAG_AVR32_HAS_CACHES (1 << 6)
8209 +/* Flag specifying if the cpu has support for v2 insns. */
8210 +#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
8211 +/* Flag specifying that the cpu has buggy mul insns. */
8212 +#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
8214 +/* Structure for holding information about different avr32 CPUs/parts */
8217 + const char *const name;
8218 + enum part_type part_type;
8219 + enum architecture_type arch_type;
8220 + /* Must lie outside user's namespace. NULL == no macro. */
8221 + const char *const macro;
8224 +/* Structure for holding information about different avr32 pipeline
8228 + const char *const name;
8229 + enum architecture_type arch_type;
8230 + enum microarchitecture_type uarch_type;
8231 + const unsigned long feature_flags;
8232 + /* Must lie outside user's namespace. NULL == no macro. */
8233 + const char *const macro;
8236 +extern const struct part_type_s *avr32_part;
8237 +extern const struct arch_type_s *avr32_arch;
8239 +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
8240 +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
8241 +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
8242 +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
8243 +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
8244 +#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
8245 +#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
8246 +#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
8247 +#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
8248 +#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
8249 +#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
8250 +#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
8251 +#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
8252 +#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
8253 +#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
8255 +#define CAN_DEBUG_WITHOUT_FP
8260 +/******************************************************************************
8262 + *****************************************************************************/
8265 +Define this macro to have the value 1 if the most significant bit in a
8266 +byte has the lowest number; otherwise define it to have the value zero.
8267 +This means that bit-field instructions count from the most significant
8268 +bit. If the machine has no bit-field instructions, then this must still
8269 +be defined, but it doesn't matter which value it is defined to. This
8270 +macro need not be a constant.
8272 +This macro does not affect the way structure fields are packed into
8273 +bytes or words; that is controlled by BYTES_BIG_ENDIAN.
8275 +#define BITS_BIG_ENDIAN 0
8278 +Define this macro to have the value 1 if the most significant byte in a
8279 +word has the lowest number. This macro need not be a constant.
8282 + Data is stored in an big-endian way.
8284 +#define BYTES_BIG_ENDIAN 1
8287 +Define this macro to have the value 1 if, in a multiword object, the
8288 +most significant word has the lowest number. This applies to both
8289 +memory locations and registers; GCC fundamentally assumes that the
8290 +order of words in memory is the same as the order in registers. This
8291 +macro need not be a constant.
8294 + Data is stored in an bin-endian way.
8296 +#define WORDS_BIG_ENDIAN 1
8299 +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
8300 +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
8301 +used only when compiling libgcc2.c. Typically the value will be set
8302 +based on preprocessor defines.
8304 +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
8307 +Define this macro to have the value 1 if DFmode, XFmode or
8308 +TFmode floating point numbers are stored in memory with the word
8309 +containing the sign bit at the lowest address; otherwise define it to
8310 +have the value 0. This macro need not be a constant.
8312 +You need not define this macro if the ordering is the same as for
8313 +multi-word integers.
8315 +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
8318 +Define this macro to be the number of bits in an addressable storage
8319 +unit (byte); normally 8.
8321 +#define BITS_PER_UNIT 8
8324 +Number of bits in a word; normally 32.
8326 +#define BITS_PER_WORD 32
8329 +Maximum number of bits in a word. If this is undefined, the default is
8330 +BITS_PER_WORD. Otherwise, it is the constant value that is the
8331 +largest value that BITS_PER_WORD can have at run-time.
8333 +/* MAX_BITS_PER_WORD not defined*/
8336 +Number of storage units in a word; normally 4.
8338 +#define UNITS_PER_WORD 4
8341 +Minimum number of units in a word. If this is undefined, the default is
8342 +UNITS_PER_WORD. Otherwise, it is the constant value that is the
8343 +smallest value that UNITS_PER_WORD can have at run-time.
8345 +/* MIN_UNITS_PER_WORD not defined */
8348 +Width of a pointer, in bits. You must specify a value no wider than the
8349 +width of Pmode. If it is not equal to the width of Pmode,
8350 +you must define POINTERS_EXTEND_UNSIGNED.
8352 +#define POINTER_SIZE 32
8355 +A C expression whose value is greater than zero if pointers that need to be
8356 +extended from being POINTER_SIZE bits wide to Pmode are to
8357 +be zero-extended and zero if they are to be sign-extended. If the value
8358 +is less then zero then there must be an "ptr_extend" instruction that
8359 +extends a pointer from POINTER_SIZE to Pmode.
8361 +You need not define this macro if the POINTER_SIZE is equal
8362 +to the width of Pmode.
8364 +/* #define POINTERS_EXTEND_UNSIGNED */
8367 +A Macro to update M and UNSIGNEDP when an object whose type
8368 +is TYPE and which has the specified mode and signedness is to be
8369 +stored in a register. This macro is only called when TYPE is a
8372 +On most RISC machines, which only have operations that operate on a full
8373 +register, define this macro to set M to word_mode if
8374 +M is an integer mode narrower than BITS_PER_WORD. In most
8375 +cases, only integer modes should be widened because wider-precision
8376 +floating-point operations are usually more expensive than their narrower
8379 +For most machines, the macro definition does not change UNSIGNEDP.
8380 +However, some machines, have instructions that preferentially handle
8381 +either signed or unsigned quantities of certain modes. For example, on
8382 +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
8383 +sign-extend the result to 64 bits. On such machines, set
8384 +UNSIGNEDP according to which kind of extension is more efficient.
8386 +Do not define this macro if it would never modify M.
8388 +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
8390 + if (!AGGREGATE_TYPE_P (TYPE) \
8391 + && GET_MODE_CLASS (mode) == MODE_INT \
8392 + && GET_MODE_SIZE (mode) < 4) \
8394 + if (M == QImode) \
8395 + (UNSIGNEDP) = 1; \
8396 + else if (M == HImode) \
8397 + (UNSIGNEDP) = 0; \
8402 +#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
8403 + PROMOTE_MODE(M, UNSIGNEDP, TYPE)
8405 +/* Define if operations between registers always perform the operation
8406 + on the full register even if a narrower mode is specified. */
8407 +#define WORD_REGISTER_OPERATIONS
8409 +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
8410 + will either zero-extend or sign-extend. The value of this macro should
8411 + be the code that says which one of the two operations is implicitly
8412 + done, UNKNOWN if not known. */
8413 +#define LOAD_EXTEND_OP(MODE) \
8414 + (((MODE) == QImode) ? ZERO_EXTEND \
8415 + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
8419 +Normal alignment required for function parameters on the stack, in
8420 +bits. All stack parameters receive at least this much alignment
8421 +regardless of data type. On most machines, this is the same as the
8422 +size of an integer.
8424 +#define PARM_BOUNDARY 32
8427 +Define this macro to the minimum alignment enforced by hardware for the
8428 +stack pointer on this machine. The definition is a C expression for the
8429 +desired alignment (measured in bits). This value is used as a default
8430 +if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
8431 +this should be the same as PARM_BOUNDARY.
8433 +#define STACK_BOUNDARY 32
8436 +Define this macro if you wish to preserve a certain alignment for the
8437 +stack pointer, greater than what the hardware enforces. The definition
8438 +is a C expression for the desired alignment (measured in bits). This
8439 +macro must evaluate to a value equal to or larger than
8442 +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
8445 +Alignment required for a function entry point, in bits.
8447 +#define FUNCTION_BOUNDARY 16
8450 +Biggest alignment that any data type can require on this machine, in bits.
8452 +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
8455 +If defined, the smallest alignment, in bits, that can be given to an
8456 +object that can be referenced in one operation, without disturbing any
8457 +nearby object. Normally, this is BITS_PER_UNIT, but may be larger
8458 +on machines that don't have byte or half-word store operations.
8460 +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
8464 +An integer expression for the size in bits of the largest integer machine mode that
8465 +should actually be used. All integer machine modes of this size or smaller can be
8466 +used for structures and unions with the appropriate sizes. If this macro is undefined,
8467 +GET_MODE_BITSIZE (DImode) is assumed.*/
8468 +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
8472 +If defined, a C expression to compute the alignment given to a constant
8473 +that is being placed in memory. CONSTANT is the constant and
8474 +BASIC_ALIGN is the alignment that the object would ordinarily
8475 +have. The value of this macro is used instead of that alignment to
8478 +If this macro is not defined, then BASIC_ALIGN is used.
8480 +The typical use of this macro is to increase alignment for string
8481 +constants to be word aligned so that strcpy calls that copy
8482 +constants can be done inline.
8484 +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
8485 + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
8487 +/* Try to align string to a word. */
8488 +#define DATA_ALIGNMENT(TYPE, ALIGN) \
8489 + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
8490 + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
8491 + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
8493 +/* Try to align local store strings to a word. */
8494 +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
8495 + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
8496 + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
8497 + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
8500 +Define this macro to be the value 1 if instructions will fail to work
8501 +if given data not on the nominal alignment. If instructions will merely
8502 +go slower in that case, define this macro as 0.
8504 +#define STRICT_ALIGNMENT 1
8507 +Define this if you wish to imitate the way many other C compilers handle
8508 +alignment of bit-fields and the structures that contain them.
8510 +The behavior is that the type written for a bit-field (int,
8511 +short, or other integer type) imposes an alignment for the
8512 +entire structure, as if the structure really did contain an ordinary
8513 +field of that type. In addition, the bit-field is placed within the
8514 +structure so that it would fit within such a field, not crossing a
8517 +Thus, on most machines, a bit-field whose type is written as int
8518 +would not cross a four-byte boundary, and would force four-byte
8519 +alignment for the whole structure. (The alignment used may not be four
8520 +bytes; it is controlled by the other alignment parameters.)
8522 +If the macro is defined, its definition should be a C expression;
8523 +a nonzero value for the expression enables this behavior.
8525 +Note that if this macro is not defined, or its value is zero, some
8526 +bit-fields may cross more than one alignment boundary. The compiler can
8527 +support such references if there are insv, extv, and
8528 +extzv insns that can directly reference memory.
8530 +The other known way of making bit-fields work is to define
8531 +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
8532 +Then every structure can be accessed with fullwords.
8534 +Unless the machine has bit-field instructions or you define
8535 +STRUCTURE_SIZE_BOUNDARY that way, you must define
8536 +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
8538 +If your aim is to make GCC use the same conventions for laying out
8539 +bit-fields as are used by another compiler, here is how to investigate
8540 +what the other compiler does. Compile and run this program:
8558 + printf ("Size of foo1 is %d\n",
8559 + sizeof (struct foo1));
8560 + printf ("Size of foo2 is %d\n",
8561 + sizeof (struct foo2));
8565 +If this prints 2 and 5, then the compiler's behavior is what you would
8566 +get from PCC_BITFIELD_TYPE_MATTERS.
8568 +#define PCC_BITFIELD_TYPE_MATTERS 1
8571 +/******************************************************************************
8572 + * Layout of Source Language Data Types
8573 + *****************************************************************************/
8576 +A C expression for the size in bits of the type int on the
8577 +target machine. If you don't define this, the default is one word.
8579 +#define INT_TYPE_SIZE 32
8582 +A C expression for the size in bits of the type short on the
8583 +target machine. If you don't define this, the default is half a word. (If
8584 +this would be less than one storage unit, it is rounded up to one unit.)
8586 +#define SHORT_TYPE_SIZE 16
8589 +A C expression for the size in bits of the type long on the
8590 +target machine. If you don't define this, the default is one word.
8592 +#define LONG_TYPE_SIZE 32
8596 +A C expression for the size in bits of the type long long on the
8597 +target machine. If you don't define this, the default is two
8598 +words. If you want to support GNU Ada on your machine, the value of this
8599 +macro must be at least 64.
8601 +#define LONG_LONG_TYPE_SIZE 64
8604 +A C expression for the size in bits of the type char on the
8605 +target machine. If you don't define this, the default is
8608 +#define CHAR_TYPE_SIZE 8
8612 +A C expression for the size in bits of the C++ type bool and
8613 +C99 type _Bool on the target machine. If you don't define
8614 +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
8616 +#define BOOL_TYPE_SIZE 8
8620 +An expression whose value is 1 or 0, according to whether the type
8621 +char should be signed or unsigned by default. The user can
8622 +always override this default with the options -fsigned-char
8623 +and -funsigned-char.
8625 +/* We are using unsigned char */
8626 +#define DEFAULT_SIGNED_CHAR 0
8630 +A C expression for a string describing the name of the data type to use
8631 +for size values. The typedef name size_t is defined using the
8632 +contents of the string.
8634 +The string can contain more than one keyword. If so, separate them with
8635 +spaces, and write first any length keyword, then unsigned if
8636 +appropriate, and finally int. The string must exactly match one
8637 +of the data type names defined in the function
8638 +init_decl_processing in the file c-decl.c. You may not
8639 +omit int or change the order - that would cause the compiler to
8642 +If you don't define this macro, the default is "long unsigned int".
8644 +#define SIZE_TYPE "long unsigned int"
8647 +A C expression for a string describing the name of the data type to use
8648 +for the result of subtracting two pointers. The typedef name
8649 +ptrdiff_t is defined using the contents of the string. See
8650 +SIZE_TYPE above for more information.
8652 +If you don't define this macro, the default is "long int".
8654 +#define PTRDIFF_TYPE "long int"
8658 +A C expression for the size in bits of the data type for wide
8659 +characters. This is used in cpp, which cannot make use of
8662 +#define WCHAR_TYPE_SIZE 32
8666 +A C expression for a string describing the name of the data type to
8667 +use for wide characters passed to printf and returned from
8668 +getwc. The typedef name wint_t is defined using the
8669 +contents of the string. See SIZE_TYPE above for more
8672 +If you don't define this macro, the default is "unsigned int".
8674 +#define WINT_TYPE "unsigned int"
8677 +A C expression for a string describing the name of the data type that
8678 +can represent any value of any standard or extended signed integer type.
8679 +The typedef name intmax_t is defined using the contents of the
8680 +string. See SIZE_TYPE above for more information.
8682 +If you don't define this macro, the default is the first of
8683 +"int", "long int", or "long long int" that has as
8684 +much precision as long long int.
8686 +#define INTMAX_TYPE "long long int"
8689 +A C expression for a string describing the name of the data type that
8690 +can represent any value of any standard or extended unsigned integer
8691 +type. The typedef name uintmax_t is defined using the contents
8692 +of the string. See SIZE_TYPE above for more information.
8694 +If you don't define this macro, the default is the first of
8695 +"unsigned int", "long unsigned int", or "long long unsigned int"
8696 +that has as much precision as long long unsigned int.
8698 +#define UINTMAX_TYPE "long long unsigned int"
8701 +/******************************************************************************
8703 + *****************************************************************************/
8705 +/* Convert from gcc internal register number to register number
8706 + used in assembly code */
8707 +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
8708 +#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
8710 +/* Convert between register number used in assembly to gcc
8711 + internal register number */
8712 +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
8713 +#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
8715 +/** Basic Characteristics of Registers **/
8718 +Number of hardware registers known to the compiler. They receive
8719 +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
8720 +pseudo register's number really is assigned the number
8721 +FIRST_PSEUDO_REGISTER.
8723 +#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1)
8725 +#define FIRST_REGNUM 0
8726 +#define LAST_REGNUM 15
8727 +#define NUM_FP_REGS 16
8728 +#define FIRST_FP_REGNUM 16
8729 +#define LAST_FP_REGNUM (16+NUM_FP_REGS-1)
8732 +An initializer that says which registers are used for fixed purposes
8733 +all throughout the compiled code and are therefore not available for
8734 +general allocation. These would include the stack pointer, the frame
8735 +pointer (except on machines where that can be used as a general
8736 +register when no frame pointer is needed), the program counter on
8737 +machines where that is considered one of the addressable registers,
8738 +and any other numbered register with a standard use.
8740 +This information is expressed as a sequence of numbers, separated by
8741 +commas and surrounded by braces. The nth number is 1 if
8742 +register n is fixed, 0 otherwise.
8744 +The table initialized from this macro, and the table initialized by
8745 +the following one, may be overridden at run time either automatically,
8746 +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
8747 +the user with the command options -ffixed-[reg],
8748 +-fcall-used-[reg] and -fcall-saved-[reg].
8751 +/* The internal gcc register numbers are reversed
8752 + compared to the real register numbers since
8753 + gcc expects data types stored over multiple
8754 + registers in the register file to be big endian
8755 + if the memory layout is big endian. But this
8756 + is not the case for avr32 so we fake a big
8757 + endian register file. */
8759 +#define FIXED_REGISTERS { \
8760 + 1, /* Program Counter */ \
8761 + 0, /* Link Register */ \
8762 + 1, /* Stack Pointer */ \
8795 +Like FIXED_REGISTERS but has 1 for each register that is
8796 +clobbered (in general) by function calls as well as for fixed
8797 +registers. This macro therefore identifies the registers that are not
8798 +available for general allocation of values that must live across
8801 +If a register has 0 in CALL_USED_REGISTERS, the compiler
8802 +automatically saves it on function entry and restores it on function
8803 +exit, if the register is used within the function.
8805 +#define CALL_USED_REGISTERS { \
8806 + 1, /* Program Counter */ \
8807 + 0, /* Link Register */ \
8808 + 1, /* Stack Pointer */ \
8840 +/* Interrupt functions can only use registers that have already been
8841 + saved by the prologue, even if they would normally be
8842 + call-clobbered. */
8843 +#define HARD_REGNO_RENAME_OK(SRC, DST) \
8844 + (! IS_INTERRUPT (cfun->machine->func_type) || \
8845 + df_regs_ever_live_p (DST))
8849 +Zero or more C statements that may conditionally modify five variables
8850 +fixed_regs, call_used_regs, global_regs,
8851 +reg_names, and reg_class_contents, to take into account
8852 +any dependence of these register sets on target flags. The first three
8853 +of these are of type char [] (interpreted as Boolean vectors).
8854 +global_regs is a const char *[], and
8855 +reg_class_contents is a HARD_REG_SET. Before the macro is
8856 +called, fixed_regs, call_used_regs,
8857 +reg_class_contents, and reg_names have been initialized
8858 +from FIXED_REGISTERS, CALL_USED_REGISTERS,
8859 +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
8860 +global_regs has been cleared, and any -ffixed-[reg],
8861 +-fcall-used-[reg] and -fcall-saved-[reg]
8862 +command options have been applied.
8864 +You need not define this macro if it has no work to do.
8866 +If the usage of an entire class of registers depends on the target
8867 +flags, you may indicate this to GCC by using this macro to modify
8868 +fixed_regs and call_used_regs to 1 for each of the
8869 +registers in the classes which should not be used by GCC. Also define
8870 +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
8871 +is called with a letter for a class that shouldn't be used.
8873 + (However, if this class is not included in GENERAL_REGS and all
8874 +of the insn patterns whose constraints permit this class are
8875 +controlled by target switches, then GCC will automatically avoid using
8876 +these registers when the target switches are opposed to them.)
8878 +#define CONDITIONAL_REGISTER_USAGE \
8883 + if (TARGET_SOFT_FLOAT) \
8885 + for (regno = FIRST_FP_REGNUM; \
8886 + regno <= LAST_FP_REGNUM; ++regno) \
8887 + fixed_regs[regno] = call_used_regs[regno] = 1; \
8891 + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
8892 + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
8899 +If the program counter has a register number, define this as that
8900 +register number. Otherwise, do not define it.
8903 +#define LAST_AVR32_REGNUM 16
8906 +/** Order of Allocation of Registers **/
8909 +If defined, an initializer for a vector of integers, containing the
8910 +numbers of hard registers in the order in which GCC should prefer
8911 +to use them (from most preferred to least).
8913 +If this macro is not defined, registers are used lowest numbered first
8914 +(all else being equal).
8916 +One use of this macro is on machines where the highest numbered
8917 +registers must always be saved and the save-multiple-registers
8918 +instruction supports only sequences of consecutive registers. On such
8919 +machines, define REG_ALLOC_ORDER to be an initializer that lists
8920 +the highest numbered allocable register first.
8922 +#define REG_ALLOC_ORDER \
8924 + INTERNAL_REGNUM(8), \
8925 + INTERNAL_REGNUM(9), \
8926 + INTERNAL_REGNUM(10), \
8927 + INTERNAL_REGNUM(11), \
8928 + INTERNAL_REGNUM(12), \
8930 + INTERNAL_REGNUM(7), \
8931 + INTERNAL_REGNUM(6), \
8932 + INTERNAL_REGNUM(5), \
8933 + INTERNAL_REGNUM(4), \
8934 + INTERNAL_REGNUM(3), \
8935 + INTERNAL_REGNUM(2), \
8936 + INTERNAL_REGNUM(1), \
8937 + INTERNAL_REGNUM(0), \
8938 + INTERNAL_FP_REGNUM(15), \
8939 + INTERNAL_FP_REGNUM(14), \
8940 + INTERNAL_FP_REGNUM(13), \
8941 + INTERNAL_FP_REGNUM(12), \
8942 + INTERNAL_FP_REGNUM(11), \
8943 + INTERNAL_FP_REGNUM(10), \
8944 + INTERNAL_FP_REGNUM(9), \
8945 + INTERNAL_FP_REGNUM(8), \
8946 + INTERNAL_FP_REGNUM(7), \
8947 + INTERNAL_FP_REGNUM(6), \
8948 + INTERNAL_FP_REGNUM(5), \
8949 + INTERNAL_FP_REGNUM(4), \
8950 + INTERNAL_FP_REGNUM(3), \
8951 + INTERNAL_FP_REGNUM(2), \
8952 + INTERNAL_FP_REGNUM(1), \
8953 + INTERNAL_FP_REGNUM(0), \
8959 +/** How Values Fit in Registers **/
8962 +A C expression for the number of consecutive hard registers, starting
8963 +at register number REGNO, required to hold a value of mode
8966 +On a machine where all registers are exactly one word, a suitable
8967 +definition of this macro is
8969 +#define HARD_REGNO_NREGS(REGNO, MODE) \
8970 + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
8973 +#define HARD_REGNO_NREGS(REGNO, MODE) \
8974 + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
8977 +A C expression that is nonzero if it is permissible to store a value
8978 +of mode MODE in hard register number REGNO (or in several
8979 +registers starting with that one). For a machine where all registers
8980 +are equivalent, a suitable definition is
8982 + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
8984 +You need not include code to check for the numbers of fixed registers,
8985 +because the allocation mechanism considers them to be always occupied.
8987 +On some machines, double-precision values must be kept in even/odd
8988 +register pairs. You can implement that by defining this macro to reject
8989 +odd register numbers for such modes.
8991 +The minimum requirement for a mode to be OK in a register is that the
8992 +mov[mode] instruction pattern support moves between the
8993 +register and other hard register in the same class and that moving a
8994 +value into the register and back out not alter it.
8996 +Since the same instruction used to move word_mode will work for
8997 +all narrower integer modes, it is not necessary on any machine for
8998 +HARD_REGNO_MODE_OK to distinguish between these modes, provided
8999 +you define patterns movhi, etc., to take advantage of this. This
9000 +is useful because of the interaction between HARD_REGNO_MODE_OK
9001 +and MODES_TIEABLE_P; it is very desirable for all integer modes
9004 +Many machines have special registers for floating point arithmetic.
9005 +Often people assume that floating point machine modes are allowed only
9006 +in floating point registers. This is not true. Any registers that
9007 +can hold integers can safely hold a floating point machine
9008 +mode, whether or not floating arithmetic can be done on it in those
9009 +registers. Integer move instructions can be used to move the values.
9011 +On some machines, though, the converse is true: fixed-point machine
9012 +modes may not go in floating registers. This is true if the floating
9013 +registers normalize any value stored in them, because storing a
9014 +non-floating value there would garble it. In this case,
9015 +HARD_REGNO_MODE_OK should reject fixed-point machine modes in
9016 +floating registers. But if the floating registers do not automatically
9017 +normalize, if you can store any bit pattern in one and retrieve it
9018 +unchanged without a trap, then any machine mode may go in a floating
9019 +register, so you can define this macro to say so.
9021 +The primary significance of special floating registers is rather that
9022 +they are the registers acceptable in floating point arithmetic
9023 +instructions. However, this is of no concern to
9024 +HARD_REGNO_MODE_OK. You handle it by writing the proper
9025 +constraints for those instructions.
9027 +On some machines, the floating registers are especially slow to access,
9028 +so that it is better to store a value in a stack frame than in such a
9029 +register if floating point arithmetic is not being done. As long as the
9030 +floating registers are not in class GENERAL_REGS, they will not
9031 +be used unless some pattern's constraint asks for one.
9033 +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
9036 +A C expression that is nonzero if a value of mode
9037 +MODE1 is accessible in mode MODE2 without copying.
9039 +If HARD_REGNO_MODE_OK(R, MODE1) and
9040 +HARD_REGNO_MODE_OK(R, MODE2) are always the same for
9041 +any R, then MODES_TIEABLE_P(MODE1, MODE2)
9042 +should be nonzero. If they differ for any R, you should define
9043 +this macro to return zero unless some other mechanism ensures the
9044 +accessibility of the value in a narrower mode.
9046 +You should define this macro to return nonzero in as many cases as
9047 +possible since doing so will allow GCC to perform better register
9050 +#define MODES_TIEABLE_P(MODE1, MODE2) \
9051 + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
9055 +/******************************************************************************
9056 + * Register Classes
9057 + *****************************************************************************/
9060 +An enumeral type that must be defined with all the register class names
9061 +as enumeral values. NO_REGS must be first. ALL_REGS
9062 +must be the last register class, followed by one more enumeral value,
9063 +LIM_REG_CLASSES, which is not a register class but rather
9064 +tells how many classes there are.
9066 +Each register class has a number, which is the value of casting
9067 +the class name to type int. The number serves as an index
9068 +in many of the tables described below.
9080 +The number of distinct register classes, defined as follows:
9081 + #define N_REG_CLASSES (int) LIM_REG_CLASSES
9083 +#define N_REG_CLASSES (int)LIM_REG_CLASSES
9086 +An initializer containing the names of the register classes as C string
9087 +constants. These names are used in writing some of the debugging dumps.
9089 +#define REG_CLASS_NAMES \
9093 + "FLOATING_POINT_REGS", \
9098 +An initializer containing the contents of the register classes, as integers
9099 +which are bit masks. The nth integer specifies the contents of class
9100 +n. The way the integer mask is interpreted is that
9101 +register r is in the class if mask & (1 << r) is 1.
9103 +When the machine has more than 32 registers, an integer does not suffice.
9104 +Then the integers are replaced by sub-initializers, braced groupings containing
9105 +several integers. Each sub-initializer must be suitable as an initializer
9106 +for the type HARD_REG_SET which is defined in hard-reg-set.h.
9107 +In this situation, the first integer in each sub-initializer corresponds to
9108 +registers 0 through 31, the second integer to registers 32 through 63, and
9111 +#define REG_CLASS_CONTENTS { \
9112 + {0x00000000}, /* NO_REGS */ \
9113 + {0x0000FFFF}, /* GENERAL_REGS */ \
9114 + {0xFFFF0000}, /* FP_REGS */ \
9115 + {0x7FFFFFFF}, /* ALL_REGS */ \
9120 +A C expression whose value is a register class containing hard register
9121 +REGNO. In general there is more than one such class; choose a class
9122 +which is minimal, meaning that no smaller class also contains the
9125 +#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS)
9128 +A macro whose definition is the name of the class to which a valid
9129 +base register must belong. A base register is one used in an address
9130 +which is the register value plus a displacement.
9132 +#define BASE_REG_CLASS GENERAL_REGS
9135 +This is a variation of the BASE_REG_CLASS macro which allows
9136 +the selection of a base register in a mode depenedent manner. If
9137 +mode is VOIDmode then it should return the same value as
9140 +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
9143 +A macro whose definition is the name of the class to which a valid
9144 +index register must belong. An index register is one used in an
9145 +address where its value is either multiplied by a scale factor or
9146 +added to another register (as well as added to a displacement).
9148 +#define INDEX_REG_CLASS BASE_REG_CLASS
9151 +A C expression which defines the machine-dependent operand constraint
9152 +letters for register classes. If CHAR is such a letter, the
9153 +value should be the register class corresponding to it. Otherwise,
9154 +the value should be NO_REGS. The register letter r,
9155 +corresponding to class GENERAL_REGS, will not be passed
9156 +to this macro; you do not need to handle it.
9158 +#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS)
9161 +/* These assume that REGNO is a hard or pseudo reg number.
9162 + They give nonzero only if REGNO is a hard reg of the suitable class
9163 + or a pseudo reg currently allocated to a suitable hard reg.
9164 + Since they use reg_renumber, they are safe only once reg_renumber
9165 + has been allocated, which happens in local-alloc.c. */
9166 +#define TEST_REGNO(R, TEST, VALUE) \
9167 + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
9170 +A C expression which is nonzero if register number num is suitable for use as a base
9171 +register in operand addresses. It may be either a suitable hard register or a pseudo
9172 +register that has been allocated such a hard register.
9174 +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
9177 +A C expression which is nonzero if register number NUM is
9178 +suitable for use as an index register in operand addresses. It may be
9179 +either a suitable hard register or a pseudo register that has been
9180 +allocated such a hard register.
9182 +The difference between an index register and a base register is that
9183 +the index register may be scaled. If an address involves the sum of
9184 +two registers, neither one of them scaled, then either one may be
9185 +labeled the ``base'' and the other the ``index''; but whichever
9186 +labeling is used must fit the machine's constraints of which registers
9187 +may serve in each capacity. The compiler will try both labelings,
9188 +looking for one that is valid, and will reload one or both registers
9189 +only if neither labeling works.
9191 +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
9194 +A C expression that places additional restrictions on the register class
9195 +to use when it is necessary to copy value X into a register in class
9196 +CLASS. The value is a register class; perhaps CLASS, or perhaps
9197 +another, smaller class. On many machines, the following definition is
9198 +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
9200 +Sometimes returning a more restrictive class makes better code. For
9201 +example, on the 68000, when X is an integer constant that is in range
9202 +for a 'moveq' instruction, the value of this macro is always
9203 +DATA_REGS as long as CLASS includes the data registers.
9204 +Requiring a data register guarantees that a 'moveq' will be used.
9206 +If X is a const_double, by returning NO_REGS
9207 +you can force X into a memory constant. This is useful on
9208 +certain machines where immediate floating values cannot be loaded into
9209 +certain kinds of registers.
9211 +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
9216 +A C expression for the maximum number of consecutive registers
9217 +of class CLASS needed to hold a value of mode MODE.
9219 +This is closely related to the macro HARD_REGNO_NREGS. In fact,
9220 +the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
9221 +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
9222 +for all REGNO values in the class CLASS.
9224 +This macro helps control the handling of multiple-word values
9225 +in the reload pass.
9227 +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
9228 + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
9232 + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
9233 + in order to support constraints with more than one letter.
9234 + Only two letters are then used for constant constraints,
9235 + the letter 'K' and the letter 'I'. The constraint starting with
9236 + these letters must consist of four characters. The character following
9237 + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
9238 + if the constant is zero or sign extended. The last two characters specify
9239 + the length in bits of the constant. The base constraint letter 'I' means
9240 + that this is an negated constant, meaning that actually -VAL should be
9241 + checked to lie withing the valid range instead of VAL which is used when
9242 + 'K' is the base constraint letter.
9246 +#define CONSTRAINT_LEN(C, STR) \
9247 + ( ((C) == 'K' || (C) == 'I') ? 4 : \
9248 + ((C) == 'R') ? 5 : \
9249 + ((C) == 'N' || (C) == 'O' || \
9250 + (C) == 'P' || (C) == 'L') ? -1 : \
9251 + DEFAULT_CONSTRAINT_LEN((C), (STR)) )
9253 +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
9254 + avr32_const_ok_for_constraint_p(VALUE, C, STR)
9257 +A C expression that defines the machine-dependent operand constraint
9258 +letters that specify particular ranges of const_double values ('G' or 'H').
9260 +If C is one of those letters, the expression should check that
9261 +VALUE, an RTX of code const_double, is in the appropriate
9262 +range and return 1 if so, 0 otherwise. If C is not one of those
9263 +letters, the value should be 0 regardless of VALUE.
9265 +const_double is used for all floating-point constants and for
9266 +DImode fixed-point constants. A given letter can accept either
9267 +or both kinds of values. It can use GET_MODE to distinguish
9268 +between these kinds.
9270 +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
9271 + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
9274 +A C expression that defines the optional machine-dependent constraint
9275 +letters that can be used to segregate specific types of operands, usually
9276 +memory references, for the target machine. Any letter that is not
9277 +elsewhere defined and not matched by REG_CLASS_FROM_LETTER
9278 +may be used. Normally this macro will not be defined.
9280 +If it is required for a particular target machine, it should return 1
9281 +if VALUE corresponds to the operand type represented by the
9282 +constraint letter C. If C is not defined as an extra
9283 +constraint, the value returned should be 0 regardless of VALUE.
9285 +For example, on the ROMP, load instructions cannot have their output
9286 +in r0 if the memory reference contains a symbolic address. Constraint
9287 +letter 'Q' is defined as representing a memory address that does
9288 +not contain a symbolic address. An alternative is specified with
9289 +a 'Q' constraint on the input and 'r' on the output. The next
9290 +alternative specifies 'm' on the input and a register class that
9291 +does not include r0 on the output.
9293 +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
9294 + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
9295 + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
9296 + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
9297 + && avr32_const_ok_for_constraint_p( \
9298 + INTVAL(XEXP(XEXP(OP, 0), 1)), \
9299 + (STR)[1], &(STR)[1]))) : \
9300 + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
9301 + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
9302 + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
9303 + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
9307 +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
9312 +/* Returns nonzero if op is a function SYMBOL_REF which
9313 + can be called using an rcall instruction */
9314 +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
9315 + ( GET_CODE(op) == SYMBOL_REF \
9316 + && SYMBOL_REF_FUNCTION_P(op) \
9317 + && SYMBOL_REF_LOCAL_P(op) \
9318 + && !SYMBOL_REF_EXTERNAL_P(op) \
9319 + && !TARGET_HAS_ASM_ADDR_PSEUDOS )
9321 +/******************************************************************************
9322 + * Stack Layout and Calling Conventions
9323 + *****************************************************************************/
9325 +/** Basic Stack Layout **/
9328 +Define this macro if pushing a word onto the stack moves the stack
9329 +pointer to a smaller address.
9331 +When we say, ``define this macro if ...,'' it means that the
9332 +compiler checks this macro only with #ifdef so the precise
9333 +definition used does not matter.
9335 +/* pushm decrece SP: *(--SP) <-- Rx */
9336 +#define STACK_GROWS_DOWNWARD
9339 +This macro defines the operation used when something is pushed
9340 +on the stack. In RTL, a push operation will be
9341 +(set (mem (STACK_PUSH_CODE (reg sp))) ...)
9343 +The choices are PRE_DEC, POST_DEC, PRE_INC,
9344 +and POST_INC. Which of these is correct depends on
9345 +the stack direction and on whether the stack pointer points
9346 +to the last item on the stack or whether it points to the
9347 +space for the next item on the stack.
9349 +The default is PRE_DEC when STACK_GROWS_DOWNWARD is
9350 +defined, which is almost always right, and PRE_INC otherwise,
9351 +which is often wrong.
9353 +/* pushm: *(--SP) <-- Rx */
9354 +#define STACK_PUSH_CODE PRE_DEC
9356 +/* Define this to nonzero if the nominal address of the stack frame
9357 + is at the high-address end of the local variables;
9358 + that is, each additional local variable allocated
9359 + goes at a more negative offset in the frame. */
9360 +#define FRAME_GROWS_DOWNWARD 1
9364 +Offset from the frame pointer to the first local variable slot to be allocated.
9366 +If FRAME_GROWS_DOWNWARD, find the next slot's offset by
9367 +subtracting the first slot's length from STARTING_FRAME_OFFSET.
9368 +Otherwise, it is found by adding the length of the first slot to the
9369 +value STARTING_FRAME_OFFSET.
9370 + (i'm not sure if the above is still correct.. had to change it to get
9371 + rid of an overfull. --mew 2feb93 )
9373 +#define STARTING_FRAME_OFFSET 0
9376 +Offset from the stack pointer register to the first location at which
9377 +outgoing arguments are placed. If not specified, the default value of
9378 +zero is used. This is the proper value for most machines.
9380 +If ARGS_GROW_DOWNWARD, this is the offset to the location above
9381 +the first location at which outgoing arguments are placed.
9383 +#define STACK_POINTER_OFFSET 0
9386 +Offset from the argument pointer register to the first argument's
9387 +address. On some machines it may depend on the data type of the
9390 +If ARGS_GROW_DOWNWARD, this is the offset to the location above
9391 +the first argument's address.
9393 +#define FIRST_PARM_OFFSET(FUNDECL) 0
9397 +A C expression whose value is RTL representing the address in a stack
9398 +frame where the pointer to the caller's frame is stored. Assume that
9399 +FRAMEADDR is an RTL expression for the address of the stack frame
9402 +If you don't define this macro, the default is to return the value
9403 +of FRAMEADDR - that is, the stack frame address is also the
9404 +address of the stack word that points to the previous frame.
9406 +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
9410 +A C expression whose value is RTL representing the value of the return
9411 +address for the frame COUNT steps up from the current frame, after
9412 +the prologue. FRAMEADDR is the frame pointer of the COUNT
9413 +frame, or the frame pointer of the COUNT - 1 frame if
9414 +RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
9416 +The value of the expression must always be the correct address when
9417 +COUNT is zero, but may be NULL_RTX if there is not way to
9418 +determine the return address of other frames.
9420 +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
9424 +A C expression whose value is RTL representing the location of the
9425 +incoming return address at the beginning of any function, before the
9426 +prologue. This RTL is either a REG, indicating that the return
9427 +value is saved in 'REG', or a MEM representing a location in
9430 +You only need to define this macro if you want to support call frame
9431 +debugging information like that provided by DWARF 2.
9433 +If this RTL is a REG, you should also define
9434 +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
9436 +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
9441 +A C expression whose value is an integer giving the offset, in bytes,
9442 +from the value of the stack pointer register to the top of the stack
9443 +frame at the beginning of any function, before the prologue. The top of
9444 +the frame is defined to be the value of the stack pointer in the
9445 +previous frame, just before the call instruction.
9447 +You only need to define this macro if you want to support call frame
9448 +debugging information like that provided by DWARF 2.
9450 +#define INCOMING_FRAME_SP_OFFSET 0
9453 +/** Exception Handling Support **/
9455 +/* Use setjump/longjump for exception handling. */
9456 +#define DWARF2_UNWIND_INFO 0
9457 +#define MUST_USE_SJLJ_EXCEPTIONS 1
9460 +A C expression whose value is the Nth register number used for
9461 +data by exception handlers, or INVALID_REGNUM if fewer than
9462 +N registers are usable.
9464 +The exception handling library routines communicate with the exception
9465 +handlers via a set of agreed upon registers. Ideally these registers
9466 +should be call-clobbered; it is possible to use call-saved registers,
9467 +but may negatively impact code size. The target must support at least
9468 +2 data registers, but should define 4 if there are enough free registers.
9470 +You must define this macro if you want to support call frame exception
9471 +handling like that provided by DWARF 2.
9476 +#define EH_RETURN_DATA_REGNO(N) \
9477 + ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM)
9480 +A C expression whose value is RTL representing a location in which
9481 +to store a stack adjustment to be applied before function return.
9482 +This is used to unwind the stack to an exception handler's call frame.
9483 +It will be assigned zero on code paths that return normally.
9485 +Typically this is a call-clobbered hard register that is otherwise
9486 +untouched by the epilogue, but could also be a stack slot.
9488 +You must define this macro if you want to support call frame exception
9489 +handling like that provided by DWARF 2.
9494 +#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
9495 +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
9498 +A C expression whose value is RTL representing a location in which
9499 +to store the address of an exception handler to which we should
9500 +return. It will not be assigned on code paths that return normally.
9502 +Typically this is the location in the call frame at which the normal
9503 +return address is stored. For targets that return by popping an
9504 +address off the stack, this might be a memory address just below
9505 +the target call frame rather than inside the current call
9506 +frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
9507 +so it may be used to calculate the location of the target call frame.
9509 +Some targets have more complex requirements than storing to an
9510 +address calculable during initial code generation. In that case
9511 +the eh_return instruction pattern should be used instead.
9513 +If you want to support call frame exception handling, you must
9514 +define either this macro or the eh_return instruction pattern.
9517 + We define the eh_return instruction pattern, so this isn't needed.
9519 +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
9522 + This macro chooses the encoding of pointers embedded in the
9523 + exception handling sections. If at all possible, this should be
9524 + defined such that the exception handling section will not require
9525 + dynamic relocations, and so may be read-only.
9527 + code is 0 for data, 1 for code labels, 2 for function
9528 + pointers. global is true if the symbol may be affected by dynamic
9529 + relocations. The macro should return a combination of the DW_EH_PE_*
9530 + defines as found in dwarf2.h.
9532 + If this macro is not defined, pointers will not be encoded but
9533 + represented directly.
9535 +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
9536 + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
9537 + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
9538 + | DW_EH_PE_sdata4)
9540 +/* ToDo: The rest of this subsection */
9542 +/** Specifying How Stack Checking is Done **/
9543 +/* ToDo: All in this subsection */
9545 +/** Registers That Address the Stack Frame **/
9548 +The register number of the stack pointer register, which must also be a
9549 +fixed register according to FIXED_REGISTERS. On most machines,
9550 +the hardware determines which register this is.
9552 +/* Using r13 as stack pointer. */
9553 +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
9556 +The register number of the frame pointer register, which is used to
9557 +access automatic variables in the stack frame. On some machines, the
9558 +hardware determines which register this is. On other machines, you can
9559 +choose any register you wish for this purpose.
9562 +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
9567 +The register number of the arg pointer register, which is used to access
9568 +the function's argument list. On some machines, this is the same as the
9569 +frame pointer register. On some machines, the hardware determines which
9570 +register this is. On other machines, you can choose any register you
9571 +wish for this purpose. If this is not the same register as the frame
9572 +pointer register, then you must mark it as a fixed register according to
9573 +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
9574 +10.10.5 [Elimination], page 224).
9577 +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
9581 +Register numbers used for passing a function's static chain pointer. If
9582 +register windows are used, the register number as seen by the called
9583 +function is STATIC_CHAIN_INCOMING_REGNUM, while the register
9584 +number as seen by the calling function is STATIC_CHAIN_REGNUM. If
9585 +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
9588 +The static chain register need not be a fixed register.
9590 +If the static chain is passed in memory, these macros should not be
9591 +defined; instead, the next two macros should be defined.
9594 +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
9597 +/** Eliminating Frame Pointer and Arg Pointer **/
9600 +A C expression which is nonzero if a function must have and use a frame
9601 +pointer. This expression is evaluated in the reload pass. If its value is
9602 +nonzero the function will have a frame pointer.
9604 +The expression can in principle examine the current function and decide
9605 +according to the facts, but on most machines the constant 0 or the
9606 +constant 1 suffices. Use 0 when the machine allows code to be generated
9607 +with no frame pointer, and doing so saves some time or space. Use 1
9608 +when there is no possible advantage to avoiding a frame pointer.
9610 +In certain cases, the compiler does not know how to produce valid code
9611 +without a frame pointer. The compiler recognizes those cases and
9612 +automatically gives the function a frame pointer regardless of what
9613 +FRAME_POINTER_REQUIRED says. You don't need to worry about
9616 +In a function that does not require a frame pointer, the frame pointer
9617 +register can be allocated for ordinary usage, unless you mark it as a
9618 +fixed register. See FIXED_REGISTERS for more information.
9620 +/* We need the frame pointer when compiling for profiling */
9621 +#define FRAME_POINTER_REQUIRED (current_function_profile)
9624 +A C statement to store in the variable DEPTH_VAR the difference
9625 +between the frame pointer and the stack pointer values immediately after
9626 +the function prologue. The value would be computed from information
9627 +such as the result of get_frame_size () and the tables of
9628 +registers regs_ever_live and call_used_regs.
9630 +If ELIMINABLE_REGS is defined, this macro will be not be used and
9631 +need not be defined. Otherwise, it must be defined even if
9632 +FRAME_POINTER_REQUIRED is defined to always be true; in that
9633 +case, you may set DEPTH_VAR to anything.
9635 +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
9638 +If defined, this macro specifies a table of register pairs used to
9639 +eliminate unneeded registers that point into the stack frame. If it is not
9640 +defined, the only elimination attempted by the compiler is to replace
9641 +references to the frame pointer with references to the stack pointer.
9643 +The definition of this macro is a list of structure initializations, each
9644 +of which specifies an original and replacement register.
9646 +On some machines, the position of the argument pointer is not known until
9647 +the compilation is completed. In such a case, a separate hard register
9648 +must be used for the argument pointer. This register can be eliminated by
9649 +replacing it with either the frame pointer or the argument pointer,
9650 +depending on whether or not the frame pointer has been eliminated.
9652 +In this case, you might specify:
9653 + #define ELIMINABLE_REGS \
9654 + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
9655 + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
9656 + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
9658 +Note that the elimination of the argument pointer with the stack pointer is
9659 +specified first since that is the preferred elimination.
9661 +#define ELIMINABLE_REGS \
9663 + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
9664 + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
9665 + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
9669 +A C expression that returns nonzero if the compiler is allowed to try
9670 +to replace register number FROM with register number
9671 +TO. This macro need only be defined if ELIMINABLE_REGS
9672 +is defined, and will usually be the constant 1, since most of the cases
9673 +preventing register elimination are things that the compiler already
9676 +#define CAN_ELIMINATE(FROM, TO) 1
9679 +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
9680 +specifies the initial difference between the specified pair of
9681 +registers. This macro must be defined if ELIMINABLE_REGS is
9684 +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
9685 + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
9687 +/** Passing Function Arguments on the Stack **/
9691 +A C expression. If nonzero, push insns will be used to pass
9692 +outgoing arguments.
9693 +If the target machine does not have a push instruction, set it to zero.
9694 +That directs GCC to use an alternate strategy: to
9695 +allocate the entire argument block and then store the arguments into
9696 +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
9698 +#define PUSH_ARGS 1
9702 +A C expression that is the number of bytes actually pushed onto the
9703 +stack when an instruction attempts to push NPUSHED bytes.
9705 +On some machines, the definition
9707 + #define PUSH_ROUNDING(BYTES) (BYTES)
9709 +will suffice. But on other machines, instructions that appear
9710 +to push one byte actually push two bytes in an attempt to maintain
9711 +alignment. Then the definition should be
9713 + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
9715 +/* Push 4 bytes at the time. */
9716 +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
9719 +A C expression. If nonzero, the maximum amount of space required for
9720 +outgoing arguments will be computed and placed into the variable
9721 +current_function_outgoing_args_size. No space will be pushed
9722 +onto the stack for each call; instead, the function prologue should
9723 +increase the stack frame size by this amount.
9725 +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
9727 +#define ACCUMULATE_OUTGOING_ARGS 0
9733 +A C expression that should indicate the number of bytes of its own
9734 +arguments that a function pops on returning, or 0 if the
9735 +function pops no arguments and the caller must therefore pop them all
9736 +after the function returns.
9738 +FUNDECL is a C variable whose value is a tree node that describes
9739 +the function in question. Normally it is a node of type
9740 +FUNCTION_DECL that describes the declaration of the function.
9741 +From this you can obtain the DECL_ATTRIBUTES of the function.
9743 +FUNTYPE is a C variable whose value is a tree node that
9744 +describes the function in question. Normally it is a node of type
9745 +FUNCTION_TYPE that describes the data type of the function.
9746 +From this it is possible to obtain the data types of the value and
9747 +arguments (if known).
9749 +When a call to a library function is being considered, FUNDECL
9750 +will contain an identifier node for the library function. Thus, if
9751 +you need to distinguish among various library functions, you can do so
9752 +by their names. Note that ``library function'' in this context means
9753 +a function used to perform arithmetic, whose name is known specially
9754 +in the compiler and was not mentioned in the C code being compiled.
9756 +STACK_SIZE is the number of bytes of arguments passed on the
9757 +stack. If a variable number of bytes is passed, it is zero, and
9758 +argument popping will always be the responsibility of the calling function.
9760 +On the VAX, all functions always pop their arguments, so the definition
9761 +of this macro is STACK_SIZE. On the 68000, using the standard
9762 +calling convention, no functions pop their arguments, so the value of
9763 +the macro is always 0 in this case. But an alternative calling
9764 +convention is available in which functions that take a fixed number of
9765 +arguments pop them but other functions (such as printf) pop
9766 +nothing (the caller pops all). When this convention is in use,
9767 +FUNTYPE is examined to determine whether a function takes a fixed
9768 +number of arguments.
9770 +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
9773 +/*Return true if this function can we use a single return instruction*/
9774 +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
9777 +A C expression that should indicate the number of bytes a call sequence
9778 +pops off the stack. It is added to the value of RETURN_POPS_ARGS
9779 +when compiling a function call.
9781 +CUM is the variable in which all arguments to the called function
9782 +have been accumulated.
9784 +On certain architectures, such as the SH5, a call trampoline is used
9785 +that pops certain registers off the stack, depending on the arguments
9786 +that have been passed to the function. Since this is a property of the
9787 +call site, not of the called function, RETURN_POPS_ARGS is not
9790 +#define CALL_POPS_ARGS(CUM) 0
9792 +/* Passing Arguments in Registers */
9795 +A C expression that controls whether a function argument is passed
9796 +in a register, and which register.
9798 +The arguments are CUM, which summarizes all the previous
9799 +arguments; MODE, the machine mode of the argument; TYPE,
9800 +the data type of the argument as a tree node or 0 if that is not known
9801 +(which happens for C support library functions); and NAMED,
9802 +which is 1 for an ordinary argument and 0 for nameless arguments that
9803 +correspond to '...' in the called function's prototype.
9804 +TYPE can be an incomplete type if a syntax error has previously
9807 +The value of the expression is usually either a reg RTX for the
9808 +hard register in which to pass the argument, or zero to pass the
9809 +argument on the stack.
9811 +For machines like the VAX and 68000, where normally all arguments are
9812 +pushed, zero suffices as a definition.
9814 +The value of the expression can also be a parallel RTX. This is
9815 +used when an argument is passed in multiple locations. The mode of the
9816 +of the parallel should be the mode of the entire argument. The
9817 +parallel holds any number of expr_list pairs; each one
9818 +describes where part of the argument is passed. In each
9819 +expr_list the first operand must be a reg RTX for the hard
9820 +register in which to pass this part of the argument, and the mode of the
9821 +register RTX indicates how large this part of the argument is. The
9822 +second operand of the expr_list is a const_int which gives
9823 +the offset in bytes into the entire argument of where this part starts.
9824 +As a special exception the first expr_list in the parallel
9825 +RTX may have a first operand of zero. This indicates that the entire
9826 +argument is also stored on the stack.
9828 +The last time this macro is called, it is called with MODE == VOIDmode,
9829 +and its result is passed to the call or call_value
9830 +pattern as operands 2 and 3 respectively.
9832 +The usual way to make the ISO library 'stdarg.h' work on a machine
9833 +where some arguments are usually passed in registers, is to cause
9834 +nameless arguments to be passed on the stack instead. This is done
9835 +by making FUNCTION_ARG return 0 whenever NAMED is 0.
9837 +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
9838 +in the definition of this macro to determine if this argument is of a
9839 +type that must be passed in the stack. If REG_PARM_STACK_SPACE
9840 +is not defined and FUNCTION_ARG returns nonzero for such an
9841 +argument, the compiler will abort. If REG_PARM_STACK_SPACE is
9842 +defined, the argument will be computed in the stack and then loaded into
9845 +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
9846 + avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
9852 +A C type for declaring a variable that is used as the first argument of
9853 +FUNCTION_ARG and other related values. For some target machines,
9854 +the type int suffices and can hold the number of bytes of
9857 +There is no need to record in CUMULATIVE_ARGS anything about the
9858 +arguments that have been passed on the stack. The compiler has other
9859 +variables to keep track of that. For target machines on which all
9860 +arguments are passed on the stack, there is no need to store anything in
9861 +CUMULATIVE_ARGS; however, the data structure must exist and
9862 +should not be empty, so use int.
9864 +typedef struct avr32_args
9866 + /* Index representing the argument register the current function argument
9869 + /* A mask with bits representing the argument registers: if a bit is set
9870 + then this register is used for an arguemnt */
9872 + /* TRUE if this function has anonymous arguments */
9873 + int uses_anonymous_args;
9874 + /* The size in bytes of the named arguments pushed on the stack */
9875 + int stack_pushed_args_size;
9876 + /* Set to true if this function needs a Return Value Pointer */
9882 +#define FIRST_CUM_REG_INDEX 0
9883 +#define LAST_CUM_REG_INDEX 4
9884 +#define GET_REG_INDEX(CUM) ((CUM)->index)
9885 +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
9886 +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
9887 +#define SET_USED_INDEX(CUM, INDEX) \
9891 + (CUM)->used_index |= (1 << (INDEX)); \
9894 +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
9898 + A C statement (sans semicolon) for initializing the variable cum for the
9899 + state at the beginning of the argument list. The variable has type
9900 + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
9901 + the function which will receive the args, or 0 if the args are to a compiler
9902 + support library function. For direct calls that are not libcalls, FNDECL
9903 + contain the declaration node of the function. FNDECL is also set when
9904 + INIT_CUMULATIVE_ARGS is used to find arguments for the function being
9905 + compiled. N_NAMED_ARGS is set to the number of named arguments, including a
9906 + structure return address if it is passed as a parameter, when making a call.
9907 + When processing incoming arguments, N_NAMED_ARGS is set to -1.
9909 + When processing a call to a compiler support library function, LIBNAME
9910 + identifies which one. It is a symbol_ref rtx which contains the name of the
9911 + function, as a string. LIBNAME is 0 when an ordinary C function call is
9912 + being processed. Thus, each time this macro is called, either LIBNAME or
9913 + FNTYPE is nonzero, but never both of them at once.
9915 +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
9916 + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
9920 +A C statement (sans semicolon) to update the summarizer variable
9921 +CUM to advance past an argument in the argument list. The
9922 +values MODE, TYPE and NAMED describe that argument.
9923 +Once this is done, the variable CUM is suitable for analyzing
9924 +the following argument with FUNCTION_ARG, etc.
9926 +This macro need not do anything if the argument in question was passed
9927 +on the stack. The compiler knows how to track the amount of stack space
9928 +used for arguments without any special help.
9930 +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
9931 + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
9934 +If defined, a C expression which determines whether, and in which direction,
9935 +to pad out an argument with extra space. The value should be of type
9936 +enum direction: either 'upward' to pad above the argument,
9937 +'downward' to pad below, or 'none' to inhibit padding.
9939 +The amount of padding is always just enough to reach the next
9940 +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
9943 +This macro has a default definition which is right for most systems.
9944 +For little-endian machines, the default is to pad upward. For
9945 +big-endian machines, the default is to pad downward for an argument of
9946 +constant size shorter than an int, and upward otherwise.
9948 +#define FUNCTION_ARG_PADDING(MODE, TYPE) \
9949 + avr32_function_arg_padding(MODE, TYPE)
9952 + Specify padding for the last element of a block move between registers
9953 + and memory. First is nonzero if this is the only element. Defining
9954 + this macro allows better control of register function parameters on
9955 + big-endian machines, without using PARALLEL rtl. In particular,
9956 + MUST_PASS_IN_STACK need not test padding and mode of types in registers,
9957 + as there is no longer a "wrong" part of a register; For example, a three
9958 + byte aggregate may be passed in the high part of a register if so required.
9960 +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
9961 + avr32_function_arg_padding(MODE, TYPE)
9964 +If defined, a C expression which determines whether the default
9965 +implementation of va_arg will attempt to pad down before reading the
9966 +next argument, if that argument is smaller than its aligned space as
9967 +controlled by PARM_BOUNDARY. If this macro is not defined, all such
9968 +arguments are padded down if BYTES_BIG_ENDIAN is true.
9970 +#define PAD_VARARGS_DOWN \
9971 + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
9975 +A C expression that is nonzero if REGNO is the number of a hard
9976 +register in which function arguments are sometimes passed. This does
9977 +not include implicit arguments such as the static chain and
9978 +the structure-value address. On many machines, no registers can be
9979 +used for this purpose since all function arguments are pushed on the
9983 + Use r8 - r12 for function arguments.
9985 +#define FUNCTION_ARG_REGNO_P(REGNO) \
9986 + (REGNO >= 3 && REGNO <= 7)
9988 +/* Number of registers used for passing function arguments */
9989 +#define NUM_ARG_REGS 5
9992 +If defined, the order in which arguments are loaded into their
9993 +respective argument registers is reversed so that the last
9994 +argument is loaded first. This macro only affects arguments
9995 +passed in registers.
9997 +/* #define LOAD_ARGS_REVERSED */
9999 +/** How Scalar Function Values Are Returned **/
10001 +/* AVR32 is using r12 as return register. */
10002 +#define RET_REGISTER (15 - 12)
10006 +A C expression to create an RTX representing the place where a library
10007 +function returns a value of mode MODE. If the precise function
10008 +being called is known, FUNC is a tree node
10009 +(FUNCTION_DECL) for it; otherwise, func is a null
10010 +pointer. This makes it possible to use a different value-returning
10011 +convention for specific functions when all their calls are
10014 +Note that "library function" in this context means a compiler
10015 +support routine, used to perform arithmetic, whose name is known
10016 +specially by the compiler and was not mentioned in the C code being
10019 +The definition of LIBRARY_VALUE need not be concerned aggregate
10020 +data types, because none of the library functions returns such types.
10022 +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
10025 +A C expression that is nonzero if REGNO is the number of a hard
10026 +register in which the values of called function may come back.
10028 +A register whose use for returning values is limited to serving as the
10029 +second of a pair (for a value of type double, say) need not be
10030 +recognized by this macro. So for most machines, this definition
10032 + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
10034 +If the machine has register windows, so that the caller and the called
10035 +function use different registers for the return value, this macro
10036 +should recognize only the caller's register numbers.
10039 + When returning a value of mode DImode, r11:r10 is used, else r12 is used.
10041 +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
10042 + || (REGNO) == INTERNAL_REGNUM(11))
10045 +/** How Large Values Are Returned **/
10049 +Define this macro to be 1 if all structure and union return values must be
10050 +in memory. Since this results in slower code, this should be defined
10051 +only if needed for compatibility with other compilers or with an ABI.
10052 +If you define this macro to be 0, then the conventions used for structure
10053 +and union return values are decided by the RETURN_IN_MEMORY macro.
10055 +If not defined, this defaults to the value 1.
10057 +#define DEFAULT_PCC_STRUCT_RETURN 0
10062 +/** Generating Code for Profiling **/
10065 +A C statement or compound statement to output to FILE some
10066 +assembler code to call the profiling subroutine mcount.
10068 +The details of how mcount expects to be called are determined by
10069 +your operating system environment, not by GCC. To figure them out,
10070 +compile a small program for profiling using the system's installed C
10071 +compiler and look at the assembler code that results.
10073 +Older implementations of mcount expect the address of a counter
10074 +variable to be loaded into some register. The name of this variable is
10075 +'LP' followed by the number LABELNO, so you would generate
10076 +the name using 'LP%d' in a fprintf.
10079 +#ifndef FUNCTION_PROFILER
10080 +#define FUNCTION_PROFILER(FILE, LABELNO) \
10081 + fprintf((FILE), "/* profiler %d */", (LABELNO))
10085 +/*****************************************************************************
10086 + * Trampolines for Nested Functions *
10087 + *****************************************************************************/
10090 +A C statement to output, on the stream FILE, assembler code for a
10091 +block of data that contains the constant parts of a trampoline. This
10092 +code should not include a label - the label is taken care of
10095 +If you do not define this macro, it means no template is needed
10096 +for the target. Do not define this macro on systems where the block move
10097 +code to copy the trampoline into place would be larger than the code
10098 +to generate it on the spot.
10100 +/* ToDo: correct? */
10101 +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
10105 +A C expression for the size in bytes of the trampoline, as an integer.
10108 +#define TRAMPOLINE_SIZE 0x0C
10111 +Alignment required for trampolines, in bits.
10113 +If you don't define this macro, the value of BIGGEST_ALIGNMENT
10114 +is used for aligning trampolines.
10116 +#define TRAMPOLINE_ALIGNMENT 16
10119 +A C statement to initialize the variable parts of a trampoline.
10120 +ADDR is an RTX for the address of the trampoline; FNADDR is
10121 +an RTX for the address of the nested function; STATIC_CHAIN is an
10122 +RTX for the static chain value that should be passed to the function
10123 +when it is called.
10125 +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
10126 + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
10129 +/******************************************************************************
10130 + * Implicit Calls to Library Routines
10131 + *****************************************************************************/
10133 +/* Tail calling. */
10135 +/* A C expression that evaluates to true if it is ok to perform a sibling
10137 +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
10139 +#define OVERRIDE_OPTIONS avr32_override_options ()
10141 +#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
10143 +/******************************************************************************
10144 + * Addressing Modes
10145 + *****************************************************************************/
10148 +A C expression that is nonzero if the machine supports pre-increment,
10149 +pre-decrement, post-increment, or post-decrement addressing respectively.
10152 + AVR32 supports Rp++ and --Rp
10154 +#define HAVE_PRE_INCREMENT 0
10155 +#define HAVE_PRE_DECREMENT 1
10156 +#define HAVE_POST_INCREMENT 1
10157 +#define HAVE_POST_DECREMENT 0
10160 +A C expression that is nonzero if the machine supports pre- or
10161 +post-address side-effect generation involving constants other than
10162 +the size of the memory operand.
10164 +#define HAVE_PRE_MODIFY_DISP 0
10165 +#define HAVE_POST_MODIFY_DISP 0
10168 +A C expression that is nonzero if the machine supports pre- or
10169 +post-address side-effect generation involving a register displacement.
10171 +#define HAVE_PRE_MODIFY_REG 0
10172 +#define HAVE_POST_MODIFY_REG 0
10175 +A C expression that is 1 if the RTX X is a constant which
10176 +is a valid address. On most machines, this can be defined as
10177 +CONSTANT_P (X), but a few machines are more restrictive
10178 +in which constant addresses are supported.
10180 +CONSTANT_P accepts integer-values expressions whose values are
10181 +not explicitly known, such as symbol_ref, label_ref, and
10182 +high expressions and const arithmetic expressions, in
10183 +addition to const_int and const_double expressions.
10185 +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
10188 +A number, the maximum number of registers that can appear in a valid
10189 +memory address. Note that it is up to you to specify a value equal to
10190 +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
10193 +#define MAX_REGS_PER_ADDRESS 2
10196 +A C compound statement with a conditional goto LABEL;
10197 +executed if X (an RTX) is a legitimate memory address on the
10198 +target machine for a memory operand of mode MODE.
10200 +It usually pays to define several simpler macros to serve as
10201 +subroutines for this one. Otherwise it may be too complicated to
10204 +This macro must exist in two variants: a strict variant and a
10205 +non-strict one. The strict variant is used in the reload pass. It
10206 +must be defined so that any pseudo-register that has not been
10207 +allocated a hard register is considered a memory reference. In
10208 +contexts where some kind of register is required, a pseudo-register
10209 +with no hard register must be rejected.
10211 +The non-strict variant is used in other passes. It must be defined to
10212 +accept all pseudo-registers in every context where some kind of
10213 +register is required.
10215 +Compiler source files that want to use the strict variant of this
10216 +macro define the macro REG_OK_STRICT. You should use an
10217 +#ifdef REG_OK_STRICT conditional to define the strict variant
10218 +in that case and the non-strict variant otherwise.
10220 +Subroutines to check for acceptable registers for various purposes (one
10221 +for base registers, one for index registers, and so on) are typically
10222 +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
10223 +Then only these subroutine macros need have two variants; the higher
10224 +levels of macros may be the same whether strict or not.
10226 +Normally, constant addresses which are the sum of a symbol_ref
10227 +and an integer are stored inside a const RTX to mark them as
10228 +constant. Therefore, there is no need to recognize such sums
10229 +specifically as legitimate addresses. Normally you would simply
10230 +recognize any const as legitimate.
10232 +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
10233 +sums that are not marked with const. It assumes that a naked
10234 +plus indicates indexing. If so, then you must reject such
10235 +naked constant sums as illegitimate addresses, so that none of them will
10236 +be given to PRINT_OPERAND_ADDRESS.
10238 +On some machines, whether a symbolic address is legitimate depends on
10239 +the section that the address refers to. On these machines, define the
10240 +macro ENCODE_SECTION_INFO to store the information into the
10241 +symbol_ref, and then check for it here. When you see a
10242 +const, you will have to look inside it to find the
10243 +symbol_ref in order to determine the section.
10245 +The best way to modify the name string is by adding text to the
10246 +beginning, with suitable punctuation to prevent any ambiguity. Allocate
10247 +the new name in saveable_obstack. You will have to modify
10248 +ASM_OUTPUT_LABELREF to remove and decode the added text and
10249 +output the name accordingly, and define STRIP_NAME_ENCODING to
10250 +access the original name string.
10252 +You can check the information stored here into the symbol_ref in
10253 +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
10254 +PRINT_OPERAND_ADDRESS.
10256 +#ifdef REG_OK_STRICT
10257 +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
10260 + if (avr32_legitimate_address(MODE, X, 1)) \
10265 +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
10268 + if (avr32_legitimate_address(MODE, X, 0)) \
10277 +A C compound statement that attempts to replace X with a valid
10278 +memory address for an operand of mode MODE. win will be a
10279 +C statement label elsewhere in the code; the macro definition may use
10281 + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
10283 +to avoid further processing if the address has become legitimate.
10285 +X will always be the result of a call to break_out_memory_refs,
10286 +and OLDX will be the operand that was given to that function to produce
10289 +The code generated by this macro should not alter the substructure of
10290 +X. If it transforms X into a more legitimate form, it
10291 +should assign X (which will always be a C variable) a new value.
10293 +It is not necessary for this macro to come up with a legitimate
10294 +address. The compiler has standard ways of doing so in all cases. In
10295 +fact, it is safe for this macro to do nothing. But often a
10296 +machine-dependent strategy can generate better code.
10298 +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
10301 + if (GET_CODE(X) == PLUS \
10302 + && GET_CODE(XEXP(X, 0)) == REG \
10303 + && GET_CODE(XEXP(X, 1)) == CONST_INT \
10304 + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
10307 + rtx index = force_reg(SImode, XEXP(X, 1)); \
10308 + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
10310 + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
10316 +A C statement or compound statement with a conditional
10317 +goto LABEL; executed if memory address X (an RTX) can have
10318 +different meanings depending on the machine mode of the memory
10319 +reference it is used for or if the address is valid for some modes
10322 +Autoincrement and autodecrement addresses typically have mode-dependent
10323 +effects because the amount of the increment or decrement is the size
10324 +of the operand being addressed. Some machines have other mode-dependent
10325 +addresses. Many RISC machines have no mode-dependent addresses.
10327 +You may assume that ADDR is a valid address for the machine.
10329 +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
10332 + if (GET_CODE (ADDR) == POST_INC \
10333 + || GET_CODE (ADDR) == PRE_DEC) \
10339 +A C expression that is nonzero if X is a legitimate constant for
10340 +an immediate operand on the target machine. You can assume that
10341 +X satisfies CONSTANT_P, so you need not check this. In fact,
10342 +'1' is a suitable definition for this macro on machines where
10343 +anything CONSTANT_P is valid.
10345 +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
10348 +/******************************************************************************
10349 + * Condition Code Status
10350 + *****************************************************************************/
10353 +C code for a data type which is used for declaring the mdep
10354 +component of cc_status. It defaults to int.
10356 +This macro is not used on machines that do not use cc0.
10365 + int cond_exec_cmp_clobbered;
10366 +} avr32_status_reg;
10369 +#define CC_STATUS_MDEP avr32_status_reg
10372 +A C expression to initialize the mdep field to "empty".
10373 +The default definition does nothing, since most machines don't use
10374 +the field anyway. If you want to use the field, you should probably
10375 +define this macro to initialize it.
10377 +This macro is not used on machines that do not use cc0.
10380 +#define CC_STATUS_MDEP_INIT \
10381 + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
10383 +#define FPCC_STATUS_INIT \
10384 + (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0)
10387 +A C compound statement to set the components of cc_status
10388 +appropriately for an insn INSN whose body is EXP. It is
10389 +this macro's responsibility to recognize insns that set the condition
10390 +code as a byproduct of other activity as well as those that explicitly
10393 +This macro is not used on machines that do not use cc0.
10395 +If there are insns that do not set the condition code but do alter
10396 +other machine registers, this macro must check to see whether they
10397 +invalidate the expressions that the condition code is recorded as
10398 +reflecting. For example, on the 68000, insns that store in address
10399 +registers do not set the condition code, which means that usually
10400 +NOTICE_UPDATE_CC can leave cc_status unaltered for such
10401 +insns. But suppose that the previous insn set the condition code
10402 +based on location 'a4@@(102)' and the current insn stores a new
10403 +value in 'a4'. Although the condition code is not changed by
10404 +this, it will no longer be true that it reflects the contents of
10405 +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
10406 +cc_status in this case to say that nothing is known about the
10407 +condition code value.
10409 +The definition of NOTICE_UPDATE_CC must be prepared to deal
10410 +with the results of peephole optimization: insns whose patterns are
10411 +parallel RTXs containing various reg, mem or
10412 +constants which are just the operands. The RTL structure of these
10413 +insns is not sufficient to indicate what the insns actually do. What
10414 +NOTICE_UPDATE_CC should do when it sees one is just to run
10417 +A possible definition of NOTICE_UPDATE_CC is to call a function
10418 +that looks at an attribute (see Insn Attributes) named, for example,
10419 +'cc'. This avoids having detailed information about patterns in
10420 +two places, the 'md' file and in NOTICE_UPDATE_CC.
10423 +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
10428 +/******************************************************************************
10429 + * Describing Relative Costs of Operations
10430 + *****************************************************************************/
10435 +A C expression for the cost of moving data of mode MODE from a
10436 +register in class FROM to one in class TO. The classes are
10437 +expressed using the enumeration values such as GENERAL_REGS. A
10438 +value of 2 is the default; other values are interpreted relative to
10441 +It is not required that the cost always equal 2 when FROM is the
10442 +same as TO; on some machines it is expensive to move between
10443 +registers if they are not general registers.
10445 +If reload sees an insn consisting of a single set between two
10446 +hard registers, and if REGISTER_MOVE_COST applied to their
10447 +classes returns a value of 2, reload does not check to ensure that the
10448 +constraints of the insn are met. Setting a cost of other than 2 will
10449 +allow reload to verify that the constraints are met. You should do this
10450 +if the movm pattern's constraints do not allow such copying.
10452 +#define REGISTER_MOVE_COST(MODE, FROM, TO) \
10453 + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
10454 + (GET_MODE_SIZE(MODE) <= 8) ? 3: \
10458 +A C expression for the cost of moving data of mode MODE between a
10459 +register of class CLASS and memory; IN is zero if the value
10460 +is to be written to memory, nonzero if it is to be read in. This cost
10461 +is relative to those in REGISTER_MOVE_COST. If moving between
10462 +registers and memory is more expensive than between two registers, you
10463 +should define this macro to express the relative cost.
10465 +If you do not define this macro, GCC uses a default cost of 4 plus
10466 +the cost of copying via a secondary reload register, if one is
10467 +needed. If your machine requires a secondary reload register to copy
10468 +between memory and a register of CLASS but the reload mechanism is
10469 +more complex than copying via an intermediate, define this macro to
10470 +reflect the actual cost of the move.
10472 +GCC defines the function memory_move_secondary_cost if
10473 +secondary reloads are needed. It computes the costs due to copying via
10474 +a secondary register. If your machine copies from memory using a
10475 +secondary register in the conventional way but the default base value of
10476 +4 is not correct for your machine, define this macro to add some other
10477 +value to the result of that function. The arguments to that function
10478 +are the same as to this macro.
10481 + Memory moves are costly
10483 +#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
10484 + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
10485 + (GET_MODE_SIZE(MODE) > 8) ? 6 : \
10487 + : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3)))
10490 +A C expression for the cost of a branch instruction. A value of 1 is
10491 +the default; other values are interpreted relative to that.
10493 + /* Try to use conditionals as much as possible */
10494 +#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 4)
10496 +/*A C expression for the maximum number of instructions to execute via conditional
10497 + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
10498 + if the machine does not use cc0, and 1 if it does use cc0.*/
10499 +#define MAX_CONDITIONAL_EXECUTE 4
10502 +Define this macro as a C expression which is nonzero if accessing less
10503 +than a word of memory (i.e.: a char or a short) is no
10504 +faster than accessing a word of memory, i.e., if such access
10505 +require more than one instruction or if there is no difference in cost
10506 +between byte and (aligned) word loads.
10508 +When this macro is not defined, the compiler will access a field by
10509 +finding the smallest containing object; when it is defined, a fullword
10510 +load will be used if alignment permits. Unless bytes accesses are
10511 +faster than word accesses, using word accesses is preferable since it
10512 +may eliminate subsequent memory access if subsequent accesses occur to
10513 +other fields in the same word of the structure, but to different bytes.
10515 +#define SLOW_BYTE_ACCESS 1
10519 +Define this macro if it is as good or better to call a constant
10520 +function address than to call an address kept in a register.
10522 +#define NO_FUNCTION_CSE
10525 +/******************************************************************************
10526 + * Adjusting the Instruction Scheduler
10527 + *****************************************************************************/
10529 +/*****************************************************************************
10530 + * Dividing the Output into Sections (Texts, Data, ...) *
10531 + *****************************************************************************/
10534 +A C expression whose value is a string, including spacing, containing the
10535 +assembler operation that should precede instructions and read-only data.
10536 +Normally "\t.text" is right.
10538 +#define TEXT_SECTION_ASM_OP "\t.text"
10540 +A C statement that switches to the default section containing instructions.
10541 +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
10542 +is enough. The MIPS port uses this to sort all functions after all data
10545 +/* #define TEXT_SECTION */
10548 +A C expression whose value is a string, including spacing, containing the
10549 +assembler operation to identify the following data as writable initialized
10550 +data. Normally "\t.data" is right.
10552 +#define DATA_SECTION_ASM_OP "\t.data"
10555 +If defined, a C expression whose value is a string, including spacing,
10556 +containing the assembler operation to identify the following data as
10557 +shared data. If not defined, DATA_SECTION_ASM_OP will be used.
10561 +A C expression whose value is a string, including spacing, containing
10562 +the assembler operation to identify the following data as read-only
10565 +#undef READONLY_DATA_SECTION_ASM_OP
10566 +#define READONLY_DATA_SECTION_ASM_OP \
10567 + ((TARGET_USE_RODATA_SECTION) ? \
10568 + "\t.section\t.rodata" : \
10569 + TEXT_SECTION_ASM_OP )
10573 +If defined, a C expression whose value is a string, including spacing,
10574 +containing the assembler operation to identify the following data as
10575 +uninitialized global data. If not defined, and neither
10576 +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
10577 +uninitialized global data will be output in the data section if
10578 +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
10581 +#define BSS_SECTION_ASM_OP "\t.section\t.bss"
10584 +If defined, a C expression whose value is a string, including spacing,
10585 +containing the assembler operation to identify the following data as
10586 +uninitialized global shared data. If not defined, and
10587 +BSS_SECTION_ASM_OP is, the latter will be used.
10589 +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
10591 +If defined, a C expression whose value is a string, including spacing,
10592 +containing the assembler operation to identify the following data as
10593 +initialization code. If not defined, GCC will assume such a section does
10596 +#undef INIT_SECTION_ASM_OP
10597 +#define INIT_SECTION_ASM_OP "\t.section\t.init"
10600 +If defined, a C expression whose value is a string, including spacing,
10601 +containing the assembler operation to identify the following data as
10602 +finalization code. If not defined, GCC will assume such a section does
10605 +#undef FINI_SECTION_ASM_OP
10606 +#define FINI_SECTION_ASM_OP "\t.section\t.fini"
10609 +If defined, an ASM statement that switches to a different section
10610 +via SECTION_OP, calls FUNCTION, and switches back to
10611 +the text section. This is used in crtstuff.c if
10612 +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
10613 +to initialization and finalization functions from the init and fini
10614 +sections. By default, this macro uses a simple function call. Some
10615 +ports need hand-crafted assembly code to avoid dependencies on
10616 +registers initialized in the function prologue or to ensure that
10617 +constant pools don't end up too far way in the text section.
10619 +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
10620 + asm ( SECTION_OP "\n" \
10621 + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
10622 + TEXT_SECTION_ASM_OP);
10626 +Define this macro to be an expression with a nonzero value if jump
10627 +tables (for tablejump insns) should be output in the text
10628 +section, along with the assembler instructions. Otherwise, the
10629 +readonly data section is used.
10631 +This macro is irrelevant if there is no separate readonly data section.
10633 +/* Put jump tables in text section if we have caches. Otherwise assume that
10634 + loading data from code memory is slow. */
10635 +#define JUMP_TABLES_IN_TEXT_SECTION \
10636 + (TARGET_CACHES ? 1 : 0)
10639 +/******************************************************************************
10640 + * Position Independent Code (PIC)
10641 + *****************************************************************************/
10643 +#ifndef AVR32_ALWAYS_PIC
10644 +#define AVR32_ALWAYS_PIC 0
10647 +/* GOT is set to r6 */
10648 +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
10651 +A C expression that is nonzero if X is a legitimate immediate
10652 +operand on the target machine when generating position independent code.
10653 +You can assume that X satisfies CONSTANT_P, so you need not
10654 +check this. You can also assume flag_pic is true, so you need not
10655 +check it either. You need not define this macro if all constants
10656 +(including SYMBOL_REF) can be immediate operands when generating
10657 +position independent code.
10659 +/* We can't directly access anything that contains a symbol,
10660 + nor can we indirect via the constant pool. */
10661 +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
10664 +/* We need to know when we are making a constant pool; this determines
10665 + whether data needs to be in the GOT or can be referenced via a GOT
10667 +extern int making_const_table;
10669 +/******************************************************************************
10670 + * Defining the Output Assembler Language
10671 + *****************************************************************************/
10675 +A C string constant describing how to begin a comment in the target
10676 +assembler language. The compiler assumes that the comment will end at
10677 +the end of the line.
10679 +#define ASM_COMMENT_START "# "
10682 +A C string constant for text to be output before each asm
10683 +statement or group of consecutive ones. Normally this is
10684 +"#APP", which is a comment that has no effect on most
10685 +assemblers but tells the GNU assembler that it must check the lines
10686 +that follow for all valid assembler constructs.
10689 +#define ASM_APP_ON "#APP\n"
10692 +A C string constant for text to be output after each asm
10693 +statement or group of consecutive ones. Normally this is
10694 +"#NO_APP", which tells the GNU assembler to resume making the
10695 +time-saving assumptions that are valid for ordinary compiler output.
10697 +#undef ASM_APP_OFF
10698 +#define ASM_APP_OFF "#NO_APP\n"
10702 +#define FILE_ASM_OP "\t.file\n"
10703 +#define IDENT_ASM_OP "\t.ident\t"
10704 +#define SET_ASM_OP "\t.set\t"
10708 + * Output assembly directives to switch to section name. The section
10709 + * should have attributes as specified by flags, which is a bit mask
10710 + * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
10711 + * it contains an alignment in bytes to be used for the section,
10712 + * otherwise some target default should be used. Only targets that
10713 + * must specify an alignment within the section directive need pay
10714 + * attention to align -- we will still use ASM_OUTPUT_ALIGN.
10716 + * NOTE: This one must not be moved to avr32.c
10718 +#undef TARGET_ASM_NAMED_SECTION
10719 +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
10723 +You may define this macro as a C expression. You should define the
10724 +expression to have a nonzero value if GCC should output the constant
10725 +pool for a function before the code for the function, or a zero value if
10726 +GCC should output the constant pool after the function. If you do
10727 +not define this macro, the usual case, GCC will output the constant
10728 +pool before the function.
10730 +#define CONSTANT_POOL_BEFORE_FUNCTION 0
10734 +Define this macro as a C expression which is nonzero if the constant
10735 +EXP, of type tree, should be output after the code for a
10736 +function. The compiler will normally output all constants before the
10737 +function; you need not define this macro if this is OK.
10739 +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
10743 +Define this macro as a C expression which is nonzero if C is
10744 +as a logical line separator by the assembler. STR points to the
10745 +position in the string where C was found; this can be used if a
10746 +line separator uses multiple characters.
10748 +If you do not define this macro, the default is that only
10749 +the character ';' is treated as a logical line separator.
10751 +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C,STR) (((C) == '\n') || ((C) == ';'))
10754 +/** Output of Uninitialized Variables **/
10757 +A C statement (sans semicolon) to output to the stdio stream
10758 +STREAM the assembler definition of a common-label named
10759 +NAME whose size is SIZE bytes. The variable ROUNDED
10760 +is the size rounded up to whatever alignment the caller wants.
10762 +Use the expression assemble_name(STREAM, NAME) to
10763 +output the name itself; before and after that, output the additional
10764 +assembler syntax for defining the name, and a newline.
10766 +This macro controls how the assembler definitions of uninitialized
10767 +common global variables are output.
10770 +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
10771 + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
10774 +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
10777 + fputs ("\t.comm ", (FILE)); \
10778 + assemble_name ((FILE), (NAME)); \
10779 + fprintf ((FILE), ",%d\n", (SIZE)); \
10784 + * Like ASM_OUTPUT_BSS except takes the required alignment as a
10785 + * separate, explicit argument. If you define this macro, it is used
10786 + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
10787 + * handling the required alignment of the variable. The alignment is
10788 + * specified as the number of bits.
10790 + * Try to use function asm_output_aligned_bss defined in file varasm.c
10791 + * when defining this macro.
10793 +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
10794 + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
10797 +A C statement (sans semicolon) to output to the stdio stream
10798 +STREAM the assembler definition of a local-common-label named
10799 +NAME whose size is SIZE bytes. The variable ROUNDED
10800 +is the size rounded up to whatever alignment the caller wants.
10802 +Use the expression assemble_name(STREAM, NAME) to
10803 +output the name itself; before and after that, output the additional
10804 +assembler syntax for defining the name, and a newline.
10806 +This macro controls how the assembler definitions of uninitialized
10807 +static variables are output.
10809 +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
10812 + fputs ("\t.lcomm ", (FILE)); \
10813 + assemble_name ((FILE), (NAME)); \
10814 + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
10820 +A C statement (sans semicolon) to output to the stdio stream
10821 +STREAM the assembler definition of a label named NAME.
10822 +Use the expression assemble_name(STREAM, NAME) to
10823 +output the name itself; before and after that, output the additional
10824 +assembler syntax for defining the name, and a newline.
10826 +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
10828 +/* A C string containing the appropriate assembler directive to
10829 + * specify the size of a symbol, without any arguments. On systems
10830 + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
10831 + * on other systems, the default is not to define this macro.
10833 + * Define this macro only if it is correct to use the default
10834 + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
10835 + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
10836 + * custom definitions of those macros, or if you do not need explicit
10837 + * symbol sizes at all, do not define this macro.
10839 +#define SIZE_ASM_OP "\t.size\t"
10843 +A C statement (sans semicolon) to output to the stdio stream
10844 +STREAM some commands that will make the label NAME global;
10845 +that is, available for reference from other files. Use the expression
10846 +assemble_name(STREAM, NAME) to output the name
10847 +itself; before and after that, output the additional assembler syntax
10848 +for making that name global, and a newline.
10850 +#define GLOBAL_ASM_OP "\t.globl\t"
10855 +A C expression which evaluates to true if the target supports weak symbols.
10857 +If you don't define this macro, defaults.h provides a default
10858 +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
10859 +is defined, the default definition is '1'; otherwise, it is
10860 +'0'. Define this macro if you want to control weak symbol support
10861 +with a compiler flag such as -melf.
10863 +#define SUPPORTS_WEAK 1
10866 +A C statement (sans semicolon) to output to the stdio stream
10867 +STREAM a reference in assembler syntax to a label named
10868 +NAME. This should add '_' to the front of the name, if that
10869 +is customary on your operating system, as it is in most Berkeley Unix
10870 +systems. This macro is used in assemble_name.
10872 +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
10873 + avr32_asm_output_labelref(STREAM, NAME)
10878 +A C expression to assign to OUTVAR (which is a variable of type
10879 +char *) a newly allocated string made from the string
10880 +NAME and the number NUMBER, with some suitable punctuation
10881 +added. Use alloca to get space for the string.
10883 +The string will be used as an argument to ASM_OUTPUT_LABELREF to
10884 +produce an assembler label for an internal static variable whose name is
10885 +NAME. Therefore, the string must be such as to result in valid
10886 +assembler code. The argument NUMBER is different each time this
10887 +macro is executed; it prevents conflicts between similarly-named
10888 +internal static variables in different scopes.
10890 +Ideally this string should not be a valid C identifier, to prevent any
10891 +conflict with the user's own symbols. Most assemblers allow periods
10892 +or percent signs in assembler symbols; putting at least one of these
10893 +between the name and the number will suffice.
10895 +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
10898 + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
10899 + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
10904 +/** Macros Controlling Initialization Routines **/
10908 +If defined, main will not call __main as described above.
10909 +This macro should be defined for systems that control start-up code
10910 +on a symbol-by-symbol basis, such as OSF/1, and should not
10911 +be defined explicitly for systems that support INIT_SECTION_ASM_OP.
10914 + __main is not defined when debugging.
10916 +#define HAS_INIT_SECTION
10919 +/** Output of Assembler Instructions **/
10922 +A C initializer containing the assembler's names for the machine
10923 +registers, each one as a C string constant. This is what translates
10924 +register numbers in the compiler into assembler language.
10927 +#define REGISTER_NAMES \
10948 +A C compound statement to output to stdio stream STREAM the
10949 +assembler syntax for an instruction operand X. X is an
10952 +CODE is a value that can be used to specify one of several ways
10953 +of printing the operand. It is used when identical operands must be
10954 +printed differently depending on the context. CODE comes from
10955 +the '%' specification that was used to request printing of the
10956 +operand. If the specification was just '%digit' then
10957 +CODE is 0; if the specification was '%ltr digit'
10958 +then CODE is the ASCII code for ltr.
10960 +If X is a register, this macro should print the register's name.
10961 +The names can be found in an array reg_names whose type is
10962 +char *[]. reg_names is initialized from REGISTER_NAMES.
10964 +When the machine description has a specification '%punct'
10965 +(a '%' followed by a punctuation character), this macro is called
10966 +with a null pointer for X and the punctuation character for
10969 +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
10971 +/* A C statement to be executed just prior to the output of
10972 + assembler code for INSN, to modify the extracted operands so
10973 + they will be output differently.
10975 + Here the argument OPVEC is the vector containing the operands
10976 + extracted from INSN, and NOPERANDS is the number of elements of
10977 + the vector which contain meaningful data for this insn.
10978 + The contents of this vector are what will be used to convert the insn
10979 + template into assembler code, so you can change the assembler output
10980 + by changing the contents of the vector. */
10981 +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
10982 + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
10985 +A C expression which evaluates to true if CODE is a valid
10986 +punctuation character for use in the PRINT_OPERAND macro. If
10987 +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
10988 +punctuation characters (except for the standard one, '%') are used
10991 +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
10992 + (((CODE) == '?') \
10993 + || ((CODE) == '!'))
10996 +A C compound statement to output to stdio stream STREAM the
10997 +assembler syntax for an instruction operand that is a memory reference
10998 +whose address is X. X is an RTL expression.
11000 +On some machines, the syntax for a symbolic address depends on the
11001 +section that the address refers to. On these machines, define the macro
11002 +ENCODE_SECTION_INFO to store the information into the
11003 +symbol_ref, and then check for it here. (see Assembler Format.)
11005 +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
11008 +/** Output of Dispatch Tables **/
11011 + * A C statement to output to the stdio stream stream an assembler
11012 + * pseudo-instruction to generate a difference between two
11013 + * labels. value and rel are the numbers of two internal labels. The
11014 + * definitions of these labels are output using
11015 + * (*targetm.asm_out.internal_label), and they must be printed in the
11016 + * same way here. For example,
11018 + * fprintf (stream, "\t.word L%d-L%d\n",
11021 + * You must provide this macro on machines where the addresses in a
11022 + * dispatch table are relative to the table's own address. If defined,
11023 + * GCC will also use this macro on all machines when producing
11024 + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
11025 + * the mode and flags can be read.
11027 +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
11028 + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
11031 +This macro should be provided on machines where the addresses
11032 +in a dispatch table are absolute.
11034 +The definition should be a C statement to output to the stdio stream
11035 +STREAM an assembler pseudo-instruction to generate a reference to
11036 +a label. VALUE is the number of an internal label whose
11037 +definition is output using ASM_OUTPUT_INTERNAL_LABEL.
11040 +fprintf(STREAM, "\t.word L%d\n", VALUE)
11043 +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
11044 + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
11046 +/** Assembler Commands for Exception Regions */
11048 +/* ToDo: All of this subsection */
11050 +/** Assembler Commands for Alignment */
11054 +A C statement to output to the stdio stream STREAM an assembler
11055 +command to advance the location counter to a multiple of 2 to the
11056 +POWER bytes. POWER will be a C expression of type int.
11058 +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
11061 + if ((POWER) != 0) \
11062 + fprintf(STREAM, "\t.align\t%d\n", POWER); \
11067 +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
11070 +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
11071 + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
11075 +/******************************************************************************
11076 + * Controlling Debugging Information Format
11077 + *****************************************************************************/
11079 +/* How to renumber registers for dbx and gdb. */
11080 +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
11082 +/* The DWARF 2 CFA column which tracks the return address. */
11083 +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
11086 +Define this macro if GCC should produce dwarf version 2 format
11087 +debugging output in response to the -g option.
11089 +To support optional call frame debugging information, you must also
11090 +define INCOMING_RETURN_ADDR_RTX and either set
11091 +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
11092 +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
11093 +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
11095 +#define DWARF2_DEBUGGING_INFO 1
11098 +#define DWARF2_ASM_LINE_DEBUG_INFO 1
11099 +#define DWARF2_FRAME_INFO 1
11102 +/******************************************************************************
11103 + * Miscellaneous Parameters
11104 + *****************************************************************************/
11109 +An alias for a machine mode name. This is the machine mode that
11110 +elements of a jump-table should have.
11112 +#define CASE_VECTOR_MODE SImode
11115 +Define this macro to be a C expression to indicate when jump-tables
11116 +should contain relative addresses. If jump-tables never contain
11117 +relative addresses, then you need not define this macro.
11119 +#define CASE_VECTOR_PC_RELATIVE 0
11121 +/* Increase the threshold for using table jumps on the UC arch. */
11122 +#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
11125 +The maximum number of bytes that a single instruction can move quickly
11126 +between memory and registers or between two memory locations.
11128 +#define MOVE_MAX (2*UNITS_PER_WORD)
11131 +/* A C expression that is nonzero if on this machine the number of bits actually used
11132 + for the count of a shift operation is equal to the number of bits needed to represent
11133 + the size of the object being shifted. When this macro is nonzero, the compiler will
11134 + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
11135 + instructions that truncates the count of a shift operation. On machines that have
11136 + instructions that act on bit-fields at variable positions, which may include 'bit test'
11137 + 378 GNU Compiler Collection (GCC) Internals
11138 + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
11139 + of the values that serve as arguments to bit-field instructions.
11140 + If both types of instructions truncate the count (for shifts) and position (for bit-field
11141 + operations), or if no variable-position bit-field instructions exist, you should define
11143 + However, on some machines, such as the 80386 and the 680x0, truncation only applies
11144 + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
11145 + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
11146 + that include the implied truncation of the shift instructions.
11147 + You need not de
\fne this macro if it would always have the value of zero. */
11148 +#define SHIFT_COUNT_TRUNCATED 1
11151 +A C expression which is nonzero if on this machine it is safe to
11152 +convert an integer of INPREC bits to one of OUTPREC
11153 +bits (where OUTPREC is smaller than INPREC) by merely
11154 +operating on it as if it had only OUTPREC bits.
11156 +On many machines, this expression can be 1.
11158 +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
11159 +modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
11160 +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
11161 +such cases may improve things.
11163 +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
11166 +An alias for the machine mode for pointers. On most machines, define
11167 +this to be the integer mode corresponding to the width of a hardware
11168 +pointer; SImode on 32-bit machine or DImode on 64-bit machines.
11169 +On some machines you must define this to be one of the partial integer
11170 +modes, such as PSImode.
11172 +The width of Pmode must be at least as large as the value of
11173 +POINTER_SIZE. If it is not equal, you must define the macro
11174 +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
11177 +#define Pmode SImode
11180 +An alias for the machine mode used for memory references to functions
11181 +being called, in call RTL expressions. On most machines this
11184 +#define FUNCTION_MODE SImode
11187 +#define REG_S_P(x) \
11188 + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
11191 +/* If defined, modifies the length assigned to instruction INSN as a
11192 + function of the context in which it is used. LENGTH is an lvalue
11193 + that contains the initially computed length of the insn and should
11194 + be updated with the correct length of the insn. */
11195 +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
11196 + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
11199 +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
11200 + (value = 32, (mode == SImode))
11202 +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
11203 + (value = 32, (mode == SImode))
11205 +#define UNITS_PER_SIMD_WORD UNITS_PER_WORD
11207 +#define STORE_FLAG_VALUE 1
11210 +/* IF-conversion macros. */
11211 +#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
11213 + (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
11216 +#define IFCVT_EXTRA_FIELDS \
11217 + int num_cond_clobber_insns; \
11218 + int num_extra_move_insns; \
11219 + rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
11220 + rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
11222 +#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
11224 + (CE_INFO)->num_cond_clobber_insns = 0; \
11225 + (CE_INFO)->num_extra_move_insns = 0; \
11229 +#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
11231 +#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
11232 +#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
11234 +enum avr32_builtins
11236 + AVR32_BUILTIN_MTSR,
11237 + AVR32_BUILTIN_MFSR,
11238 + AVR32_BUILTIN_MTDR,
11239 + AVR32_BUILTIN_MFDR,
11240 + AVR32_BUILTIN_CACHE,
11241 + AVR32_BUILTIN_SYNC,
11242 + AVR32_BUILTIN_SSRF,
11243 + AVR32_BUILTIN_CSRF,
11244 + AVR32_BUILTIN_TLBR,
11245 + AVR32_BUILTIN_TLBS,
11246 + AVR32_BUILTIN_TLBW,
11247 + AVR32_BUILTIN_BREAKPOINT,
11248 + AVR32_BUILTIN_XCHG,
11249 + AVR32_BUILTIN_LDXI,
11250 + AVR32_BUILTIN_BSWAP16,
11251 + AVR32_BUILTIN_BSWAP32,
11252 + AVR32_BUILTIN_COP,
11253 + AVR32_BUILTIN_MVCR_W,
11254 + AVR32_BUILTIN_MVRC_W,
11255 + AVR32_BUILTIN_MVCR_D,
11256 + AVR32_BUILTIN_MVRC_D,
11257 + AVR32_BUILTIN_MULSATHH_H,
11258 + AVR32_BUILTIN_MULSATHH_W,
11259 + AVR32_BUILTIN_MULSATRNDHH_H,
11260 + AVR32_BUILTIN_MULSATRNDWH_W,
11261 + AVR32_BUILTIN_MULSATWH_W,
11262 + AVR32_BUILTIN_MACSATHH_W,
11263 + AVR32_BUILTIN_SATADD_H,
11264 + AVR32_BUILTIN_SATSUB_H,
11265 + AVR32_BUILTIN_SATADD_W,
11266 + AVR32_BUILTIN_SATSUB_W,
11267 + AVR32_BUILTIN_MULWH_D,
11268 + AVR32_BUILTIN_MULNWH_D,
11269 + AVR32_BUILTIN_MACWH_D,
11270 + AVR32_BUILTIN_MACHH_D,
11271 + AVR32_BUILTIN_MUSFR,
11272 + AVR32_BUILTIN_MUSTR,
11273 + AVR32_BUILTIN_SATS,
11274 + AVR32_BUILTIN_SATU,
11275 + AVR32_BUILTIN_SATRNDS,
11276 + AVR32_BUILTIN_SATRNDU
11280 +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
11281 + ((MODE == SFmode) || (MODE == DFmode))
11283 +#define RENAME_LIBRARY_SET ".set"
11285 +/* Make ABI_NAME an alias for __GCC_NAME. */
11286 +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
11287 + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
11288 + ".set\t__avr32_" #ABI_NAME \
11289 + ", __" #GCC_NAME "\n");
11291 +/* Give libgcc functions avr32 ABI name. */
11293 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
11296 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
11299 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
11302 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
11305 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
11308 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
11311 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
11314 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
11318 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
11320 +#ifdef L_fixunssfdi
11321 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
11323 +#ifdef L_floatdidf
11324 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
11326 +#ifdef L_floatdisf
11327 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
11332 +++ b/gcc/config/avr32/avr32.md
11334 +;; AVR32 machine description file.
11335 +;; Copyright 2003-2006 Atmel Corporation.
11337 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
11339 +;; This file is part of GCC.
11341 +;; This program is free software; you can redistribute it and/or modify
11342 +;; it under the terms of the GNU General Public License as published by
11343 +;; the Free Software Foundation; either version 2 of the License, or
11344 +;; (at your option) any later version.
11346 +;; This program is distributed in the hope that it will be useful,
11347 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
11348 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11349 +;; GNU General Public License for more details.
11351 +;; You should have received a copy of the GNU General Public License
11352 +;; along with this program; if not, write to the Free Software
11353 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
11355 +;; -*- Mode: Scheme -*-
11357 +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
11358 + (const_string "alu"))
11361 +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
11362 + (const_string "none"))
11365 +; NB! Keep this in sync with enum architecture_type in avr32.h
11366 +(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul,ucr3"
11367 + (const (symbol_ref "avr32_arch->arch_type")))
11369 +; Insn length in bytes
11370 +(define_attr "length" ""
11373 +; Signal if an insn is predicable and hence can be conditionally executed.
11374 +(define_attr "predicable" "no,yes" (const_string "no"))
11376 +;; Uses of UNSPEC in this file:
11378 + [(UNSPEC_PUSHM 0)
11380 + (UNSPEC_UDIVMODSI4_INTERNAL 2)
11381 + (UNSPEC_DIVMODSI4_INTERNAL 3)
11384 + (UNSPEC_MOVSICC 6)
11385 + (UNSPEC_ADDSICC 7)
11386 + (UNSPEC_COND_MI 8)
11387 + (UNSPEC_COND_PL 9)
11388 + (UNSPEC_PIC_SYM 10)
11389 + (UNSPEC_PIC_BASE 11)
11390 + (UNSPEC_STORE_MULTIPLE 12)
11391 + (UNSPEC_STMFP 13)
11392 + (UNSPEC_FPCC_TO_REG 14)
11393 + (UNSPEC_REG_TO_CC 15)
11394 + (UNSPEC_FORCE_MINIPOOL 16)
11397 + (UNSPEC_SATRNDS 19)
11398 + (UNSPEC_SATRNDU 20)
11402 + [(VUNSPEC_EPILOGUE 0)
11403 + (VUNSPEC_CACHE 1)
11406 + (VUNSPEC_BLOCKAGE 4)
11411 + (VUNSPEC_BREAKPOINT 9)
11412 + (VUNSPEC_MTDR 10)
11413 + (VUNSPEC_MFDR 11)
11414 + (VUNSPEC_MVCR 12)
11415 + (VUNSPEC_MVRC 13)
11417 + (VUNSPEC_ALIGN 15)
11418 + (VUNSPEC_POOL_START 16)
11419 + (VUNSPEC_POOL_END 17)
11420 + (VUNSPEC_POOL_4 18)
11421 + (VUNSPEC_POOL_8 19)
11422 + (VUNSPEC_POOL_16 20)
11423 + (VUNSPEC_MUSFR 21)
11424 + (VUNSPEC_MUSTR 22)
11425 + (VUNSPEC_SYNC_CMPXCHG 23)
11426 + (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24)
11427 + (VUNSPEC_SYNC_STORE_IF_LOCK 25)
11428 + (VUNSPEC_EH_RETURN 26)
11430 + (VUNSPEC_CSRF 28)
11431 + (VUNSPEC_SSRF 29)
11438 + ;; Return Register = R12 = 15 - 12 = 3
11439 + (RETVAL_REGNUM 3)
11440 + ;; SP = R13 = 15 - 13 = 2
11442 + ;; LR = R14 = 15 - 14 = 1
11444 + ;; PC = R15 = 15 - 15 = 0
11446 + ;; FPSR = GENERAL_REGS + 1 = 17
11453 +;;******************************************************************************
11455 +;;******************************************************************************
11457 +;; Integer Modes for basic alu insns
11458 +(define_mode_iterator INTM [SI HI QI])
11459 +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
11461 +;; Move word modes
11462 +(define_mode_iterator MOVM [SI V2HI V4QI])
11464 +;; For mov/addcc insns
11465 +(define_mode_iterator ADDCC [SI HI QI])
11466 +(define_mode_iterator MOVCC [SF SI HI QI])
11467 +(define_mode_iterator CMP [DI SI HI QI])
11468 +(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
11469 +(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
11470 +(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
11471 +(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
11472 +(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
11473 +(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
11474 +(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
11475 + (SI "register_const_int_operand")
11476 + (HI "register_operand")
11477 + (QI "register_operand")])
11478 +(define_mode_attr cmp_length [(DI "6")
11483 +;; For all conditional insns
11484 +(define_code_iterator any_cond [eq ne gt ge lt le gtu geu ltu leu])
11485 +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
11486 + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
11487 +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
11488 + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
11490 +;; For logical operations
11491 +(define_code_iterator logical [and ior xor])
11492 +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
11494 +;; Predicable operations with three register operands
11495 +(define_code_iterator predicable_op3 [and ior xor plus minus])
11496 +(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
11497 +(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
11499 +;; Load the predicates
11500 +(include "predicates.md")
11503 +;;******************************************************************************
11504 +;; Automaton pipeline description for avr32
11505 +;;******************************************************************************
11507 +(define_automaton "avr32_ap")
11510 +(define_cpu_unit "is" "avr32_ap")
11511 +(define_cpu_unit "a1,m1,da" "avr32_ap")
11512 +(define_cpu_unit "a2,m2,d" "avr32_ap")
11514 +;;Alu instructions
11515 +(define_insn_reservation "alu_op" 1
11516 + (and (eq_attr "pipeline" "ap")
11517 + (eq_attr "type" "alu"))
11520 +(define_insn_reservation "alu2_op" 2
11521 + (and (eq_attr "pipeline" "ap")
11522 + (eq_attr "type" "alu2"))
11523 + "is,is+a1,a1+a2,a2")
11525 +(define_insn_reservation "alu_sat_op" 2
11526 + (and (eq_attr "pipeline" "ap")
11527 + (eq_attr "type" "alu_sat"))
11531 +;;Mul instructions
11532 +(define_insn_reservation "mulhh_op" 2
11533 + (and (eq_attr "pipeline" "ap")
11534 + (eq_attr "type" "mulhh,mulwh"))
11537 +(define_insn_reservation "mulww_w_op" 3
11538 + (and (eq_attr "pipeline" "ap")
11539 + (eq_attr "type" "mulww_w"))
11540 + "is,m1,m1+m2,m2")
11542 +(define_insn_reservation "mulww_d_op" 5
11543 + (and (eq_attr "pipeline" "ap")
11544 + (eq_attr "type" "mulww_d"))
11545 + "is,m1,m1+m2,m1+m2,m2,m2")
11547 +(define_insn_reservation "div_op" 33
11548 + (and (eq_attr "pipeline" "ap")
11549 + (eq_attr "type" "div"))
11550 + "is,m1,m1*31 + m2*31,m2")
11552 +(define_insn_reservation "machh_w_op" 3
11553 + (and (eq_attr "pipeline" "ap")
11554 + (eq_attr "type" "machh_w"))
11558 +(define_insn_reservation "macww_w_op" 4
11559 + (and (eq_attr "pipeline" "ap")
11560 + (eq_attr "type" "macww_w"))
11564 +(define_insn_reservation "macww_d_op" 6
11565 + (and (eq_attr "pipeline" "ap")
11566 + (eq_attr "type" "macww_d"))
11567 + "is*2,m1,m1+m2,m1+m2,m2")
11569 +;;Bypasses for Mac instructions, because of accumulator cache.
11570 +;;Set latency as low as possible in order to let the compiler let
11571 +;;mul -> mac and mac -> mac combinations which use the same
11572 +;;accumulator cache be placed close together to avoid any
11573 +;;instructions which can ruin the accumulator cache come inbetween.
11574 +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11575 +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11576 +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11578 +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11579 +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11580 +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
11583 +;;Bypasses for all mul/mac instructions followed by an instruction
11584 +;;which reads the output AND writes the result to the same register.
11585 +;;This will generate an Write After Write hazard which gives an
11586 +;;extra cycle before the result is ready.
11587 +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
11588 +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
11589 +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
11591 +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
11592 +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
11593 +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
11595 +;;Branch and call instructions
11596 +;;We assume that all branches and rcalls are predicted correctly :-)
11597 +;;while calls use a lot of cycles.
11598 +(define_insn_reservation "branch_op" 0
11599 + (and (eq_attr "pipeline" "ap")
11600 + (eq_attr "type" "branch"))
11603 +(define_insn_reservation "call_op" 10
11604 + (and (eq_attr "pipeline" "ap")
11605 + (eq_attr "type" "call"))
11609 +;;Load store instructions
11610 +(define_insn_reservation "load_op" 2
11611 + (and (eq_attr "pipeline" "ap")
11612 + (eq_attr "type" "load"))
11615 +(define_insn_reservation "load_rm_op" 3
11616 + (and (eq_attr "pipeline" "ap")
11617 + (eq_attr "type" "load_rm"))
11621 +(define_insn_reservation "store_op" 0
11622 + (and (eq_attr "pipeline" "ap")
11623 + (eq_attr "type" "store"))
11627 +(define_insn_reservation "load_double_op" 3
11628 + (and (eq_attr "pipeline" "ap")
11629 + (eq_attr "type" "load2"))
11632 +(define_insn_reservation "load_quad_op" 4
11633 + (and (eq_attr "pipeline" "ap")
11634 + (eq_attr "type" "load4"))
11635 + "is,da,da+d,da+d,d")
11637 +(define_insn_reservation "store_double_op" 0
11638 + (and (eq_attr "pipeline" "ap")
11639 + (eq_attr "type" "store2"))
11643 +(define_insn_reservation "store_quad_op" 0
11644 + (and (eq_attr "pipeline" "ap")
11645 + (eq_attr "type" "store4"))
11646 + "is,da,da+d,da+d,d")
11648 +;;For store the operand to write to memory is read in d and
11649 +;;the real latency between any instruction and a store is therefore
11650 +;;one less than for the instructions which reads the operands in the first
11651 +;;excecution stage
11652 +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
11653 +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
11654 +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
11655 +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
11656 +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
11657 +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
11658 +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
11659 +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
11660 +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
11661 +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
11662 +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
11663 +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
11666 +; Bypass for load double operation. If only the first loaded word is needed
11667 +; then the latency is 2
11668 +(define_bypass 2 "load_double_op"
11669 + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
11670 + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
11671 + "avr32_valid_load_double_bypass")
11673 +; Bypass for load quad operation. If only the first or second loaded word is needed
11674 +; we set the latency to 2
11675 +(define_bypass 2 "load_quad_op"
11676 + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
11677 + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
11678 + "avr32_valid_load_quad_bypass")
11681 +;;******************************************************************************
11682 +;; End of Automaton pipeline description for avr32
11683 +;;******************************************************************************
11686 + [(match_operator 0 "avr32_comparison_operator"
11687 + [(match_operand:CMP 1 "register_operand" "r")
11688 + (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
11689 + "TARGET_V2_INSNS"
11694 + [(match_operator 0 "avr32_comparison_operator"
11695 + [(and:SI (match_operand:SI 1 "register_operand" "r")
11696 + (match_operand:SI 2 "one_bit_set_operand" "i"))
11698 + "TARGET_V2_INSNS"
11702 +;;=============================================================================
11704 +;;-----------------------------------------------------------------------------
11707 +;;== char - 8 bits ============================================================
11708 +(define_expand "movqi"
11709 + [(set (match_operand:QI 0 "nonimmediate_operand" "")
11710 + (match_operand:QI 1 "general_operand" ""))]
11713 + if ( can_create_pseudo_p () ){
11714 + if (GET_CODE (operands[1]) == MEM && optimize){
11715 + rtx reg = gen_reg_rtx (SImode);
11717 + emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
11718 + operands[1] = gen_lowpart (QImode, reg);
11721 + /* One of the ops has to be in a register. */
11722 + if (GET_CODE (operands[0]) == MEM)
11723 + operands[1] = force_reg (QImode, operands[1]);
11728 +(define_insn "*movqi_internal"
11729 + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
11730 + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
11731 + "register_operand (operands[0], QImode)
11732 + || register_operand (operands[1], QImode)"
11738 + [(set_attr "length" "2,4,4,4")
11739 + (set_attr "type" "alu,load_rm,store,alu")])
11743 +;;== short - 16 bits ==========================================================
11744 +(define_expand "movhi"
11745 + [(set (match_operand:HI 0 "nonimmediate_operand" "")
11746 + (match_operand:HI 1 "general_operand" ""))]
11749 + if ( can_create_pseudo_p () ){
11750 + if (GET_CODE (operands[1]) == MEM && optimize){
11751 + rtx reg = gen_reg_rtx (SImode);
11753 + emit_insn (gen_extendhisi2 (reg, operands[1]));
11754 + operands[1] = gen_lowpart (HImode, reg);
11757 + /* One of the ops has to be in a register. */
11758 + if (GET_CODE (operands[0]) == MEM)
11759 + operands[1] = force_reg (HImode, operands[1]);
11765 +(define_insn "*movhi_internal"
11766 + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
11767 + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
11768 + "register_operand (operands[0], HImode)
11769 + || register_operand (operands[1], HImode)"
11775 + [(set_attr "length" "2,4,4,4")
11776 + (set_attr "type" "alu,load_rm,store,alu")])
11779 +;;== int - 32 bits ============================================================
11781 +(define_expand "movmisalignsi"
11782 + [(set (match_operand:SI 0 "nonimmediate_operand" "")
11783 + (match_operand:SI 1 "nonimmediate_operand" ""))]
11784 + "TARGET_UNALIGNED_WORD"
11790 +(define_expand "mov<mode>"
11791 + [(set (match_operand:MOVM 0 "register_operand" "")
11792 + (match_operand:MOVM 1 "general_operand" ""))]
11796 + /* One of the ops has to be in a register. */
11797 + if (GET_CODE (operands[0]) == MEM)
11798 + operands[1] = force_reg (<MODE>mode, operands[1]);
11801 + /* Check for out of range immediate constants as these may
11802 + occur during reloading, since it seems like reload does
11803 + not check if the immediate is legitimate. Don't know if
11804 + this is a bug? */
11805 + if ( reload_in_progress
11806 + && avr32_imm_in_const_pool
11807 + && GET_CODE(operands[1]) == CONST_INT
11808 + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
11809 + operands[1] = force_const_mem(SImode, operands[1]);
11812 + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
11813 + && !avr32_legitimate_pic_operand_p(operands[1]) )
11814 + operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
11815 + (can_create_pseudo_p () ? 0: operands[0]));
11816 + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
11817 + /* If we have an address operand then this function uses the pic register. */
11818 + current_function_uses_pic_offset_table = 1;
11823 +(define_insn "mov<mode>_internal"
11824 + [(set (match_operand:MOVM 0 "nonimmediate_operand" "=r, r, r,r,r,m,r")
11825 + (match_operand:MOVM 1 "general_operand" "rKs08,Ks21,J,n,m,r,W"))]
11826 + "register_operand (operands[0], <MODE>mode)
11827 + || register_operand (operands[1], <MODE>mode)"
11829 + switch (which_alternative) {
11831 + case 1: return "mov\t%0, %1";
11833 + if ( TARGET_V2_INSNS )
11834 + return "movh\t%0, hi(%1)";
11835 + /* Fallthrough */
11836 + case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
11838 + if ( (REG_P(XEXP(operands[1], 0))
11839 + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
11840 + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
11841 + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
11842 + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
11843 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
11844 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
11845 + return "lddsp\t%0, %1";
11846 + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
11847 + return "lddpc\t%0, %1";
11849 + return "ld.w\t%0, %1";
11851 + if ( (REG_P(XEXP(operands[0], 0))
11852 + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
11853 + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
11854 + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
11855 + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
11856 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
11857 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
11858 + return "stdsp\t%0, %1";
11860 + return "st.w\t%0, %1";
11862 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
11863 + return "lda.w\t%0, %1";
11865 + return "ld.w\t%0, r6[%1@got]";
11871 + [(set_attr "length" "2,4,4,8,4,4,8")
11872 + (set_attr "type" "alu,alu,alu,alu2,load,store,load")
11873 + (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
11878 +;; These instructions are for loading constants which cannot be loaded
11879 +;; directly from the constant pool because the offset is too large
11880 +;; high and lo_sum are used even tough for our case it should be
11881 +;; low and high sum :-)
11882 +(define_insn "mov_symbol_lo"
11883 + [(set (match_operand:SI 0 "register_operand" "=r")
11884 + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
11886 + "mov\t%0, lo(%1)"
11887 + [(set_attr "type" "alu")
11888 + (set_attr "length" "4")]
11891 +(define_insn "add_symbol_hi"
11892 + [(set (match_operand:SI 0 "register_operand" "=r")
11893 + (lo_sum:SI (match_dup 0)
11894 + (match_operand:SI 1 "immediate_operand" "i" )))]
11896 + "orh\t%0, hi(%1)"
11897 + [(set_attr "type" "alu")
11898 + (set_attr "length" "4")]
11903 +;; When generating pic, we need to load the symbol offset into a register.
11904 +;; So that the optimizer does not confuse this with a normal symbol load
11905 +;; we use an unspec. The offset will be loaded from a constant pool entry,
11906 +;; since that is the only type of relocation we can use.
11907 +(define_insn "pic_load_addr"
11908 + [(set (match_operand:SI 0 "register_operand" "=r")
11909 + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
11910 + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
11912 + [(set_attr "type" "load")
11913 + (set_attr "length" "4")]
11916 +(define_insn "pic_compute_got_from_pc"
11917 + [(set (match_operand:SI 0 "register_operand" "+r")
11918 + (unspec:SI [(minus:SI (pc)
11919 + (match_dup 0))] UNSPEC_PIC_BASE))
11920 + (use (label_ref (match_operand 1 "" "")))]
11923 + (*targetm.asm_out.internal_label) (asm_out_file, "L",
11924 + CODE_LABEL_NUMBER (operands[1]));
11925 + return \"rsub\t%0, pc\";
11927 + [(set_attr "cc" "clobber")
11928 + (set_attr "length" "2")]
11931 +;;== long long int - 64 bits ==================================================
11933 +(define_expand "movdi"
11934 + [(set (match_operand:DI 0 "nonimmediate_operand" "")
11935 + (match_operand:DI 1 "general_operand" ""))]
11939 + /* One of the ops has to be in a register. */
11940 + if (GET_CODE (operands[0]) != REG)
11941 + operands[1] = force_reg (DImode, operands[1]);
11946 +(define_insn_and_split "*movdi_internal"
11947 + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
11948 + (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
11949 + "register_operand (operands[0], DImode)
11950 + || register_operand (operands[1], DImode)"
11952 + switch (which_alternative ){
11960 + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
11961 + return "ld.d\t%0, pc[%1 - .]";
11963 + return "ld.d\t%0, %1";
11965 + return "st.d\t%0, %1";
11970 +;; Lets split all reg->reg or imm->reg transfers into two SImode transfers
11971 + "reload_completed &&
11972 + (REG_P (operands[0]) &&
11973 + (REG_P (operands[1])
11974 + || GET_CODE (operands[1]) == CONST_INT
11975 + || GET_CODE (operands[1]) == CONST_DOUBLE))"
11976 + [(set (match_dup 0) (match_dup 1))
11977 + (set (match_dup 2) (match_dup 3))]
11979 + operands[2] = gen_highpart (SImode, operands[0]);
11980 + operands[0] = gen_lowpart (SImode, operands[0]);
11981 + if ( REG_P(operands[1]) ){
11982 + operands[3] = gen_highpart(SImode, operands[1]);
11983 + operands[1] = gen_lowpart(SImode, operands[1]);
11984 + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
11985 + || GET_CODE(operands[1]) == CONST_INT ){
11986 + rtx split_const[2];
11987 + avr32_split_const_expr (DImode, SImode, operands[1], split_const);
11988 + operands[3] = split_const[1];
11989 + operands[1] = split_const[0];
11991 + internal_error("Illegal operand[1] for movdi split!");
11995 + [(set_attr "length" "*,*,*,*,*,4,4")
11996 + (set_attr "type" "*,*,*,*,*,load2,store2")
11997 + (set_attr "cc" "*,*,*,*,*,none,none")])
12000 +;;== 128 bits ==================================================
12001 +(define_expand "movti"
12002 + [(set (match_operand:TI 0 "nonimmediate_operand" "")
12003 + (match_operand:TI 1 "nonimmediate_operand" ""))]
12007 + /* One of the ops has to be in a register. */
12008 + if (GET_CODE (operands[0]) != REG)
12009 + operands[1] = force_reg (TImode, operands[1]);
12011 + /* We must fix any pre_dec for loads and post_inc stores */
12012 + if ( GET_CODE (operands[0]) == MEM
12013 + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
12014 + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
12015 + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
12019 + if ( GET_CODE (operands[1]) == MEM
12020 + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
12021 + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
12022 + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
12028 +(define_insn_and_split "*movti_internal"
12029 + [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
12030 + (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
12031 + "(register_operand (operands[0], TImode)
12032 + || register_operand (operands[1], TImode))"
12034 + switch (which_alternative ){
12040 + return "ldm\t%p1, %0";
12042 + return "stm\t%p0, %1";
12044 + return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
12048 + "reload_completed &&
12049 + (REG_P (operands[0]) &&
12050 + (REG_P (operands[1])
12051 + /* If this is a load from the constant pool we split it into
12052 + two double loads. */
12053 + || (GET_CODE (operands[1]) == MEM
12054 + && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12055 + && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12056 + /* If this is a load where the pointer register is a part
12057 + of the register list, we must split it into two double
12058 + loads in order for it to be exception safe. */
12059 + || (GET_CODE (operands[1]) == MEM
12060 + && register_operand (XEXP (operands[1], 0), SImode)
12061 + && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
12062 + || GET_CODE (operands[1]) == CONST_INT
12063 + || GET_CODE (operands[1]) == CONST_DOUBLE))"
12064 + [(set (match_dup 0) (match_dup 1))
12065 + (set (match_dup 2) (match_dup 3))]
12067 + operands[2] = simplify_gen_subreg ( DImode, operands[0],
12069 + operands[0] = simplify_gen_subreg ( DImode, operands[0],
12071 + if ( REG_P(operands[1]) ){
12072 + operands[3] = simplify_gen_subreg ( DImode, operands[1],
12074 + operands[1] = simplify_gen_subreg ( DImode, operands[1],
12076 + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
12077 + || GET_CODE(operands[1]) == CONST_INT ){
12078 + rtx split_const[2];
12079 + avr32_split_const_expr (TImode, DImode, operands[1], split_const);
12080 + operands[3] = split_const[1];
12081 + operands[1] = split_const[0];
12082 + } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){
12083 + rtx split_const[2];
12084 + rtx cop = avoid_constant_pool_reference (operands[1]);
12085 + if (operands[1] == cop)
12086 + cop = get_pool_constant (XEXP (operands[1], 0));
12087 + avr32_split_const_expr (TImode, DImode, cop, split_const);
12088 + operands[3] = force_const_mem (DImode, split_const[1]);
12089 + operands[1] = force_const_mem (DImode, split_const[0]);
12091 + rtx ptr_reg = XEXP (operands[1], 0);
12092 + operands[1] = gen_rtx_MEM (DImode,
12093 + gen_rtx_PLUS ( SImode,
12096 + operands[3] = gen_rtx_MEM (DImode,
12099 + /* Check if the first load will clobber the pointer.
12100 + If so, we must switch the order of the operations. */
12101 + if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
12103 + /* We need to switch the order of the operations
12104 + so that the pointer register does not get clobbered
12105 + after the first double word load. */
12107 + tmp = operands[0];
12108 + operands[0] = operands[2];
12109 + operands[2] = tmp;
12110 + tmp = operands[1];
12111 + operands[1] = operands[3];
12112 + operands[3] = tmp;
12118 + [(set_attr "length" "*,*,4,4,*,8")
12119 + (set_attr "type" "*,*,load4,store4,*,load4")])
12122 +;;== float - 32 bits ==========================================================
12123 +(define_expand "movsf"
12124 + [(set (match_operand:SF 0 "nonimmediate_operand" "")
12125 + (match_operand:SF 1 "general_operand" ""))]
12130 + /* One of the ops has to be in a register. */
12131 + if (GET_CODE (operands[0]) != REG)
12132 + operands[1] = force_reg (SFmode, operands[1]);
12136 +(define_insn "*movsf_internal"
12137 + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
12138 + (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
12139 + "(register_operand (operands[0], SFmode)
12140 + || register_operand (operands[1], SFmode))"
12142 + switch (which_alternative) {
12144 + case 1: return "mov\t%0, %1";
12147 + HOST_WIDE_INT target_float[2];
12148 + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
12149 + if ( TARGET_V2_INSNS
12150 + && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
12151 + return "movh\t%0, hi(%1)";
12153 + return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
12156 + if ( (REG_P(XEXP(operands[1], 0))
12157 + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
12158 + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
12159 + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
12160 + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
12161 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
12162 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
12163 + return "lddsp\t%0, %1";
12164 + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
12165 + return "lddpc\t%0, %1";
12167 + return "ld.w\t%0, %1";
12169 + if ( (REG_P(XEXP(operands[0], 0))
12170 + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
12171 + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
12172 + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
12173 + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
12174 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
12175 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
12176 + return "stdsp\t%0, %1";
12178 + return "st.w\t%0, %1";
12184 + [(set_attr "length" "2,4,8,4,4")
12185 + (set_attr "type" "alu,alu,alu2,load,store")
12186 + (set_attr "cc" "none,none,clobber,none,none")])
12190 +;;== double - 64 bits =========================================================
12191 +(define_expand "movdf"
12192 + [(set (match_operand:DF 0 "nonimmediate_operand" "")
12193 + (match_operand:DF 1 "general_operand" ""))]
12196 + /* One of the ops has to be in a register. */
12197 + if (GET_CODE (operands[0]) != REG){
12198 + operands[1] = force_reg (DFmode, operands[1]);
12203 +(define_insn_and_split "*movdf_internal"
12204 + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
12205 + (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
12206 + "TARGET_SOFT_FLOAT
12207 + && (register_operand (operands[0], DFmode)
12208 + || register_operand (operands[1], DFmode))"
12210 + switch (which_alternative ){
12216 + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
12217 + return "ld.d\t%0, pc[%1 - .]";
12219 + return "ld.d\t%0, %1";
12221 + return "st.d\t%0, %1";
12226 + "TARGET_SOFT_FLOAT
12227 + && reload_completed
12228 + && (REG_P (operands[0])
12229 + && (REG_P (operands[1])
12230 + || GET_CODE (operands[1]) == CONST_DOUBLE))"
12231 + [(set (match_dup 0) (match_dup 1))
12232 + (set (match_dup 2) (match_dup 3))]
12235 + operands[2] = gen_highpart (SImode, operands[0]);
12236 + operands[0] = gen_lowpart (SImode, operands[0]);
12237 + operands[3] = gen_highpart(SImode, operands[1]);
12238 + operands[1] = gen_lowpart(SImode, operands[1]);
12242 + [(set_attr "length" "*,*,*,4,4")
12243 + (set_attr "type" "*,*,*,load2,store2")
12244 + (set_attr "cc" "*,*,*,none,none")])
12247 +;;=============================================================================
12248 +;; Conditional Moves
12249 +;;=============================================================================
12250 +(define_insn "ld<mode>_predicable"
12251 + [(set (match_operand:MOVCC 0 "register_operand" "=r")
12252 + (match_operand:MOVCC 1 "memory_operand" "<MOVCC:pred_mem_constraint>"))]
12253 + "TARGET_V2_INSNS"
12254 + "ld<MOVCC:load_postfix>%?\t%0, %1"
12255 + [(set_attr "length" "4")
12256 + (set_attr "cc" "cmp_cond_insn")
12257 + (set_attr "type" "load")
12258 + (set_attr "predicable" "yes")]
12262 +(define_insn "st<mode>_predicable"
12263 + [(set (match_operand:MOVCC 0 "memory_operand" "=<MOVCC:pred_mem_constraint>")
12264 + (match_operand:MOVCC 1 "register_operand" "r"))]
12265 + "TARGET_V2_INSNS"
12266 + "st<MOVCC:store_postfix>%?\t%0, %1"
12267 + [(set_attr "length" "4")
12268 + (set_attr "cc" "cmp_cond_insn")
12269 + (set_attr "type" "store")
12270 + (set_attr "predicable" "yes")]
12273 +(define_insn "mov<mode>_predicable"
12274 + [(set (match_operand:MOVCC 0 "register_operand" "=r")
12275 + (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
12278 + [(set_attr "length" "4")
12279 + (set_attr "cc" "cmp_cond_insn")
12280 + (set_attr "type" "alu")
12281 + (set_attr "predicable" "yes")]
12285 +;;=============================================================================
12286 +;; Move chunks of memory
12287 +;;=============================================================================
12289 +(define_expand "movmemsi"
12290 + [(match_operand:BLK 0 "general_operand" "")
12291 + (match_operand:BLK 1 "general_operand" "")
12292 + (match_operand:SI 2 "const_int_operand" "")
12293 + (match_operand:SI 3 "const_int_operand" "")]
12296 + if (avr32_gen_movmemsi (operands))
12305 +;;=============================================================================
12306 +;; Bit field instructions
12307 +;;-----------------------------------------------------------------------------
12308 +;; Instructions to insert or extract bit-fields
12309 +;;=============================================================================
12311 +(define_insn "insv"
12312 + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
12313 + (match_operand:SI 1 "immediate_operand" "Ku05")
12314 + (match_operand:SI 2 "immediate_operand" "Ku05"))
12315 + (match_operand 3 "register_operand" "r"))]
12317 + "bfins\t%0, %3, %2, %1"
12318 + [(set_attr "type" "alu")
12319 + (set_attr "length" "4")
12320 + (set_attr "cc" "set_ncz")])
12324 +(define_expand "extv"
12325 + [ (set (match_operand:SI 0 "register_operand" "")
12326 + (sign_extract:SI (match_operand:SI 1 "register_operand" "")
12327 + (match_operand:SI 2 "immediate_operand" "")
12328 + (match_operand:SI 3 "immediate_operand" "")))]
12331 + if ( INTVAL(operands[2]) >= 32 )
12336 +(define_expand "extzv"
12337 + [ (set (match_operand:SI 0 "register_operand" "")
12338 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
12339 + (match_operand:SI 2 "immediate_operand" "")
12340 + (match_operand:SI 3 "immediate_operand" "")))]
12343 + if ( INTVAL(operands[2]) >= 32 )
12348 +(define_insn "extv_internal"
12349 + [ (set (match_operand:SI 0 "register_operand" "=r")
12350 + (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
12351 + (match_operand:SI 2 "immediate_operand" "Ku05")
12352 + (match_operand:SI 3 "immediate_operand" "Ku05")))]
12353 + "INTVAL(operands[2]) < 32"
12354 + "bfexts\t%0, %1, %3, %2"
12355 + [(set_attr "type" "alu")
12356 + (set_attr "length" "4")
12357 + (set_attr "cc" "set_ncz")])
12360 +(define_insn "extzv_internal"
12361 + [ (set (match_operand:SI 0 "register_operand" "=r")
12362 + (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
12363 + (match_operand:SI 2 "immediate_operand" "Ku05")
12364 + (match_operand:SI 3 "immediate_operand" "Ku05")))]
12365 + "INTVAL(operands[2]) < 32"
12366 + "bfextu\t%0, %1, %3, %2"
12367 + [(set_attr "type" "alu")
12368 + (set_attr "length" "4")
12369 + (set_attr "cc" "set_ncz")])
12373 +;;=============================================================================
12374 +;; Some peepholes for avoiding unnecessary cast instructions
12375 +;; followed by bfins.
12376 +;;-----------------------------------------------------------------------------
12379 + [(set (match_operand:SI 0 "register_operand" "")
12380 + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
12381 + (set (zero_extract:SI (match_operand 2 "register_operand" "")
12382 + (match_operand:SI 3 "immediate_operand" "")
12383 + (match_operand:SI 4 "immediate_operand" ""))
12385 + "((peep2_reg_dead_p(2, operands[0]) &&
12386 + (INTVAL(operands[3]) <= 8)))"
12387 + [(set (zero_extract:SI (match_dup 2)
12394 + [(set (match_operand:SI 0 "register_operand" "")
12395 + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
12396 + (set (zero_extract:SI (match_operand 2 "register_operand" "")
12397 + (match_operand:SI 3 "immediate_operand" "")
12398 + (match_operand:SI 4 "immediate_operand" ""))
12400 + "((peep2_reg_dead_p(2, operands[0]) &&
12401 + (INTVAL(operands[3]) <= 16)))"
12402 + [(set (zero_extract:SI (match_dup 2)
12408 +;;=============================================================================
12410 +;;-----------------------------------------------------------------------------
12411 +;; Implements the push instruction
12412 +;;=============================================================================
12413 +(define_insn "pushm"
12414 + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
12415 + (unspec:BLK [(match_operand 0 "const_int_operand" "")]
12419 + if (INTVAL(operands[0])) {
12420 + return "pushm\t%r0";
12425 + [(set_attr "type" "store")
12426 + (set_attr "length" "2")
12427 + (set_attr "cc" "none")])
12429 +(define_insn "stm"
12430 + [(unspec [(match_operand 0 "register_operand" "r")
12431 + (match_operand 1 "const_int_operand" "")
12432 + (match_operand 2 "const_int_operand" "")]
12436 + if (INTVAL(operands[1])) {
12437 + if (INTVAL(operands[2]) != 0)
12438 + return "stm\t--%0, %s1";
12440 + return "stm\t%0, %s1";
12445 + [(set_attr "type" "store")
12446 + (set_attr "length" "4")
12447 + (set_attr "cc" "none")])
12451 +(define_insn "popm"
12452 + [(unspec [(match_operand 0 "const_int_operand" "")]
12456 + if (INTVAL(operands[0])) {
12457 + return "popm %r0";
12462 + [(set_attr "type" "load")
12463 + (set_attr "length" "2")])
12467 +;;=============================================================================
12469 +;;-----------------------------------------------------------------------------
12470 +;; Adds reg1 with reg2 and puts the result in reg0.
12471 +;;=============================================================================
12472 +(define_insn "add<mode>3"
12473 + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
12474 + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
12475 + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
12484 + [(set_attr "length" "2,4,2,4,4")
12485 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12487 +(define_insn "add<mode>3_lsl"
12488 + [(set (match_operand:INTM 0 "register_operand" "=r")
12489 + (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
12490 + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))
12491 + (match_operand:INTM 2 "register_operand" "r")))]
12493 + "add %0, %2, %1 << %3"
12494 + [(set_attr "length" "4")
12495 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12497 +(define_insn "add<mode>3_lsl2"
12498 + [(set (match_operand:INTM 0 "register_operand" "=r")
12499 + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
12500 + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
12501 + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
12503 + "add %0, %1, %2 << %3"
12504 + [(set_attr "length" "4")
12505 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12508 +(define_insn "add<mode>3_mul"
12509 + [(set (match_operand:INTM 0 "register_operand" "=r")
12510 + (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
12511 + (match_operand:INTM 3 "immediate_operand" "Ku04" ))
12512 + (match_operand:INTM 2 "register_operand" "r")))]
12513 + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
12514 + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
12515 + "add %0, %2, %1 << %p3"
12516 + [(set_attr "length" "4")
12517 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12519 +(define_insn "add<mode>3_mul2"
12520 + [(set (match_operand:INTM 0 "register_operand" "=r")
12521 + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
12522 + (mult:INTM (match_operand:INTM 2 "register_operand" "r")
12523 + (match_operand:INTM 3 "immediate_operand" "Ku04" ))))]
12524 + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
12525 + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
12526 + "add %0, %1, %2 << %p3"
12527 + [(set_attr "length" "4")
12528 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12532 + [(set (match_operand:SI 0 "register_operand" "")
12533 + (ashift:SI (match_operand:SI 1 "register_operand" "")
12534 + (match_operand:SI 2 "immediate_operand" "")))
12535 + (set (match_operand:SI 3 "register_operand" "")
12536 + (plus:SI (match_dup 0)
12537 + (match_operand:SI 4 "register_operand" "")))]
12538 + "(peep2_reg_dead_p(2, operands[0]) &&
12539 + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
12540 + [(set (match_dup 3)
12541 + (plus:SI (ashift:SI (match_dup 1)
12547 + [(set (match_operand:SI 0 "register_operand" "")
12548 + (ashift:SI (match_operand:SI 1 "register_operand" "")
12549 + (match_operand:SI 2 "immediate_operand" "")))
12550 + (set (match_operand:SI 3 "register_operand" "")
12551 + (plus:SI (match_operand:SI 4 "register_operand" "")
12553 + "(peep2_reg_dead_p(2, operands[0]) &&
12554 + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
12555 + [(set (match_dup 3)
12556 + (plus:SI (ashift:SI (match_dup 1)
12561 +(define_insn "adddi3"
12562 + [(set (match_operand:DI 0 "register_operand" "=r,r")
12563 + (plus:DI (match_operand:DI 1 "register_operand" "%r,0")
12564 + (match_operand:DI 2 "register_operand" "r,r")))]
12567 + add %0, %1, %2\;adc %m0, %m1, %m2
12568 + add %0, %2\;adc %m0, %m0, %m2"
12569 + [(set_attr "length" "8,6")
12570 + (set_attr "type" "alu2")
12571 + (set_attr "cc" "set_vncz")])
12574 +(define_insn "add<mode>_imm_predicable"
12575 + [(set (match_operand:INTM 0 "register_operand" "+r")
12576 + (plus:INTM (match_dup 0)
12577 + (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
12580 + [(set_attr "length" "4")
12581 + (set_attr "cc" "cmp_cond_insn")
12582 + (set_attr "predicable" "yes")]
12585 +;;=============================================================================
12587 +;;-----------------------------------------------------------------------------
12588 +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
12589 +;;=============================================================================
12591 +(define_insn "sub<mode>3"
12592 + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
12593 + (minus:INTM (match_operand:INTM 1 "register_const_int_operand" "0,r,0,r,0,r,Ks08")
12594 + (match_operand:INTM 2 "register_const_int_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
12604 + [(set_attr "length" "2,4,2,4,4,2,4")
12605 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12607 +(define_insn "*sub<mode>3_mul"
12608 + [(set (match_operand:INTM 0 "register_operand" "=r,r,r")
12609 + (minus:INTM (match_operand:INTM 1 "register_operand" "r,0,r")
12610 + (mult:INTM (match_operand:INTM 2 "register_operand" "r,r,0")
12611 + (match_operand:SI 3 "immediate_operand" "Ku04,Ku04,Ku04" ))))]
12612 + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
12613 + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
12615 + sub %0, %1, %2 << %p3
12616 + sub %0, %0, %2 << %p3
12617 + sub %0, %1, %0 << %p3"
12618 + [(set_attr "length" "4,4,4")
12619 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12621 +(define_insn "*sub<mode>3_lsl"
12622 + [(set (match_operand:INTM 0 "register_operand" "=r")
12623 + (minus:INTM (match_operand:INTM 1 "register_operand" "r")
12624 + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
12625 + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
12627 + "sub %0, %1, %2 << %3"
12628 + [(set_attr "length" "4")
12629 + (set_attr "cc" "<INTM:alu_cc_attr>")])
12632 +(define_insn "subdi3"
12633 + [(set (match_operand:DI 0 "register_operand" "=r,r")
12634 + (minus:DI (match_operand:DI 1 "register_operand" "%r,0")
12635 + (match_operand:DI 2 "register_operand" "r,r")))]
12638 + sub %0, %1, %2\;sbc %m0, %m1, %m2
12639 + sub %0, %2\;sbc %m0, %m0, %m2"
12640 + [(set_attr "length" "8,6")
12641 + (set_attr "type" "alu2")
12642 + (set_attr "cc" "set_vncz")])
12645 +(define_insn "sub<mode>_imm_predicable"
12646 + [(set (match_operand:INTM 0 "register_operand" "+r")
12647 + (minus:INTM (match_dup 0)
12648 + (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
12651 + [(set_attr "length" "4")
12652 + (set_attr "cc" "cmp_cond_insn")
12653 + (set_attr "predicable" "yes")])
12655 +(define_insn "rsub<mode>_imm_predicable"
12656 + [(set (match_operand:INTM 0 "register_operand" "+r")
12657 + (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
12661 + [(set_attr "length" "4")
12662 + (set_attr "cc" "cmp_cond_insn")
12663 + (set_attr "predicable" "yes")])
12665 +;;=============================================================================
12667 +;;-----------------------------------------------------------------------------
12668 +;; Multiply op1 and op2 and put the value in op0.
12669 +;;=============================================================================
12672 +(define_insn "mulqi3"
12673 + [(set (match_operand:QI 0 "register_operand" "=r,r,r")
12674 + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
12675 + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
12676 + "!TARGET_NO_MUL_INSNS"
12678 + switch (which_alternative){
12680 + return "mul %0, %2";
12682 + return "mul %0, %1, %2";
12684 + return "mul %0, %1, %2";
12686 + gcc_unreachable();
12689 + [(set_attr "type" "mulww_w,mulww_w,mulwh")
12690 + (set_attr "length" "2,4,4")
12691 + (set_attr "cc" "none")])
12693 +(define_insn "mulsi3"
12694 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12695 + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
12696 + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
12697 + "!TARGET_NO_MUL_INSNS"
12699 + switch (which_alternative){
12701 + return "mul %0, %2";
12703 + return "mul %0, %1, %2";
12705 + return "mul %0, %1, %2";
12707 + gcc_unreachable();
12710 + [(set_attr "type" "mulww_w,mulww_w,mulwh")
12711 + (set_attr "length" "2,4,4")
12712 + (set_attr "cc" "none")])
12715 +(define_insn "mulhisi3"
12716 + [(set (match_operand:SI 0 "register_operand" "=r")
12718 + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12719 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
12720 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12721 + "mulhh.w %0, %1:b, %2:b"
12722 + [(set_attr "type" "mulhh")
12723 + (set_attr "length" "4")
12724 + (set_attr "cc" "none")])
12727 + [(match_scratch:DI 6 "r")
12728 + (set (match_operand:SI 0 "register_operand" "")
12730 + (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
12731 + (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
12732 + (set (match_operand:SI 3 "register_operand" "")
12733 + (ashiftrt:SI (match_dup 0)
12734 + (const_int 16)))]
12735 + "!TARGET_NO_MUL_INSNS && TARGET_DSP
12736 + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
12737 + [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
12738 + (set (match_dup 6)
12739 + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
12740 + (sign_extend:DI (match_dup 2)))
12742 + (set (match_dup 3) (match_dup 5))]
12745 + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
12746 + operands[5] = gen_highpart (SImode, operands[4]);
12750 +(define_insn "mulnhisi3"
12751 + [(set (match_operand:SI 0 "register_operand" "=r")
12753 + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
12754 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
12755 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12756 + "mulnhh.w %0, %1:b, %2:b"
12757 + [(set_attr "type" "mulhh")
12758 + (set_attr "length" "4")
12759 + (set_attr "cc" "none")])
12761 +(define_insn "machisi3"
12762 + [(set (match_operand:SI 0 "register_operand" "+r")
12763 + (plus:SI (mult:SI
12764 + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12765 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
12767 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12768 + "machh.w %0, %1:b, %2:b"
12769 + [(set_attr "type" "machh_w")
12770 + (set_attr "length" "4")
12771 + (set_attr "cc" "none")])
12775 +(define_insn "mulsidi3"
12776 + [(set (match_operand:DI 0 "register_operand" "=r")
12778 + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12779 + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
12780 + "!TARGET_NO_MUL_INSNS"
12781 + "muls.d %0, %1, %2"
12782 + [(set_attr "type" "mulww_d")
12783 + (set_attr "length" "4")
12784 + (set_attr "cc" "none")])
12786 +(define_insn "umulsidi3"
12787 + [(set (match_operand:DI 0 "register_operand" "=r")
12789 + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12790 + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
12791 + "!TARGET_NO_MUL_INSNS"
12792 + "mulu.d %0, %1, %2"
12793 + [(set_attr "type" "mulww_d")
12794 + (set_attr "length" "4")
12795 + (set_attr "cc" "none")])
12797 +(define_insn "*mulaccsi3"
12798 + [(set (match_operand:SI 0 "register_operand" "+r")
12799 + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
12800 + (match_operand:SI 2 "register_operand" "r"))
12802 + "!TARGET_NO_MUL_INSNS"
12804 + [(set_attr "type" "macww_w")
12805 + (set_attr "length" "4")
12806 + (set_attr "cc" "none")])
12808 +(define_insn "mulaccsidi3"
12809 + [(set (match_operand:DI 0 "register_operand" "+r")
12810 + (plus:DI (mult:DI
12811 + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12812 + (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
12814 + "!TARGET_NO_MUL_INSNS"
12815 + "macs.d %0, %1, %2"
12816 + [(set_attr "type" "macww_d")
12817 + (set_attr "length" "4")
12818 + (set_attr "cc" "none")])
12820 +(define_insn "umulaccsidi3"
12821 + [(set (match_operand:DI 0 "register_operand" "+r")
12822 + (plus:DI (mult:DI
12823 + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12824 + (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
12826 + "!TARGET_NO_MUL_INSNS"
12827 + "macu.d %0, %1, %2"
12828 + [(set_attr "type" "macww_d")
12829 + (set_attr "length" "4")
12830 + (set_attr "cc" "none")])
12834 +;; Try to avoid Write-After-Write hazards for mul operations
12835 +;; if it can be done
12837 + [(set (match_operand:SI 0 "register_operand" "")
12839 + (sign_extend:SI (match_operand 1 "general_operand" ""))
12840 + (sign_extend:SI (match_operand 2 "general_operand" ""))))
12841 + (set (match_dup 0)
12842 + (match_operator:SI 3 "alu_operator" [(match_dup 0)
12843 + (match_operand 4 "general_operand" "")]))]
12844 + "peep2_reg_dead_p(1, operands[2])"
12845 + [(set (match_dup 5)
12847 + (sign_extend:SI (match_dup 1))
12848 + (sign_extend:SI (match_dup 2))))
12849 + (set (match_dup 0)
12850 + (match_op_dup 3 [(match_dup 5)
12851 + (match_dup 4)]))]
12852 + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
12857 +;;=============================================================================
12858 +;; DSP instructions
12859 +;;=============================================================================
12860 +(define_insn "mulsathh_h"
12861 + [(set (match_operand:HI 0 "register_operand" "=r")
12862 + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12863 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
12864 + (const_int 15))))]
12865 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12866 + "mulsathh.h\t%0, %1:b, %2:b"
12867 + [(set_attr "length" "4")
12868 + (set_attr "cc" "none")
12869 + (set_attr "type" "mulhh")])
12871 +(define_insn "mulsatrndhh_h"
12872 + [(set (match_operand:HI 0 "register_operand" "=r")
12873 + (ss_truncate:HI (ashiftrt:SI
12874 + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12875 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
12876 + (const_int 1073741824))
12877 + (const_int 15))))]
12878 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12879 + "mulsatrndhh.h\t%0, %1:b, %2:b"
12880 + [(set_attr "length" "4")
12881 + (set_attr "cc" "none")
12882 + (set_attr "type" "mulhh")])
12884 +(define_insn "mulsathh_w"
12885 + [(set (match_operand:SI 0 "register_operand" "=r")
12886 + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12887 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12888 + (const_int 1))))]
12889 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12890 + "mulsathh.w\t%0, %1:b, %2:b"
12891 + [(set_attr "length" "4")
12892 + (set_attr "cc" "none")
12893 + (set_attr "type" "mulhh")])
12895 +(define_insn "mulsatwh_w"
12896 + [(set (match_operand:SI 0 "register_operand" "=r")
12897 + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12898 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12899 + (const_int 15))))]
12900 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12901 + "mulsatwh.w\t%0, %1, %2:b"
12902 + [(set_attr "length" "4")
12903 + (set_attr "cc" "none")
12904 + (set_attr "type" "mulwh")])
12906 +(define_insn "mulsatrndwh_w"
12907 + [(set (match_operand:SI 0 "register_operand" "=r")
12908 + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12909 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12910 + (const_int 1073741824))
12911 + (const_int 15))))]
12912 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12913 + "mulsatrndwh.w\t%0, %1, %2:b"
12914 + [(set_attr "length" "4")
12915 + (set_attr "cc" "none")
12916 + (set_attr "type" "mulwh")])
12918 +(define_insn "macsathh_w"
12919 + [(set (match_operand:SI 0 "register_operand" "+r")
12920 + (plus:SI (match_dup 0)
12921 + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12922 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12923 + (const_int 1)))))]
12924 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12925 + "macsathh.w\t%0, %1:b, %2:b"
12926 + [(set_attr "length" "4")
12927 + (set_attr "cc" "none")
12928 + (set_attr "type" "mulhh")])
12931 +(define_insn "mulwh_d"
12932 + [(set (match_operand:DI 0 "register_operand" "=r")
12933 + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12934 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12935 + (const_int 16)))]
12936 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12937 + "mulwh.d\t%0, %1, %2:b"
12938 + [(set_attr "length" "4")
12939 + (set_attr "cc" "none")
12940 + (set_attr "type" "mulwh")])
12943 +(define_insn "mulnwh_d"
12944 + [(set (match_operand:DI 0 "register_operand" "=r")
12945 + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
12946 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12947 + (const_int 16)))]
12948 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12949 + "mulnwh.d\t%0, %1, %2:b"
12950 + [(set_attr "length" "4")
12951 + (set_attr "cc" "none")
12952 + (set_attr "type" "mulwh")])
12954 +(define_insn "macwh_d"
12955 + [(set (match_operand:DI 0 "register_operand" "+r")
12956 + (plus:DI (match_dup 0)
12957 + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12958 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12959 + (const_int 16))))]
12960 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12961 + "macwh.d\t%0, %1, %2:b"
12962 + [(set_attr "length" "4")
12963 + (set_attr "cc" "none")
12964 + (set_attr "type" "mulwh")])
12966 +(define_insn "machh_d"
12967 + [(set (match_operand:DI 0 "register_operand" "+r")
12968 + (plus:DI (match_dup 0)
12969 + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12970 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
12971 + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
12972 + "machh.d\t%0, %1:b, %2:b"
12973 + [(set_attr "length" "4")
12974 + (set_attr "cc" "none")
12975 + (set_attr "type" "mulwh")])
12977 +(define_insn "satadd_w"
12978 + [(set (match_operand:SI 0 "register_operand" "=r")
12979 + (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
12980 + (match_operand:SI 2 "register_operand" "r")))]
12982 + "satadd.w\t%0, %1, %2"
12983 + [(set_attr "length" "4")
12984 + (set_attr "cc" "none")
12985 + (set_attr "type" "alu_sat")])
12987 +(define_insn "satsub_w"
12988 + [(set (match_operand:SI 0 "register_operand" "=r")
12989 + (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
12990 + (match_operand:SI 2 "register_operand" "r")))]
12992 + "satsub.w\t%0, %1, %2"
12993 + [(set_attr "length" "4")
12994 + (set_attr "cc" "none")
12995 + (set_attr "type" "alu_sat")])
12997 +(define_insn "satadd_h"
12998 + [(set (match_operand:HI 0 "register_operand" "=r")
12999 + (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
13000 + (match_operand:HI 2 "register_operand" "r")))]
13002 + "satadd.h\t%0, %1, %2"
13003 + [(set_attr "length" "4")
13004 + (set_attr "cc" "none")
13005 + (set_attr "type" "alu_sat")])
13007 +(define_insn "satsub_h"
13008 + [(set (match_operand:HI 0 "register_operand" "=r")
13009 + (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
13010 + (match_operand:HI 2 "register_operand" "r")))]
13012 + "satsub.h\t%0, %1, %2"
13013 + [(set_attr "length" "4")
13014 + (set_attr "cc" "none")
13015 + (set_attr "type" "alu_sat")])
13018 +;;=============================================================================
13020 +;;-----------------------------------------------------------------------------
13021 +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
13022 +;; values in the registers.
13023 +;;=============================================================================
13024 +(define_insn "sminsi3"
13025 + [(set (match_operand:SI 0 "register_operand" "=r")
13026 + (smin:SI (match_operand:SI 1 "register_operand" "r")
13027 + (match_operand:SI 2 "register_operand" "r")))]
13030 + [(set_attr "length" "4")
13031 + (set_attr "cc" "none")])
13033 +;;=============================================================================
13035 +;;-----------------------------------------------------------------------------
13036 +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
13037 +;; values in the registers.
13038 +;;=============================================================================
13039 +(define_insn "smaxsi3"
13040 + [(set (match_operand:SI 0 "register_operand" "=r")
13041 + (smax:SI (match_operand:SI 1 "register_operand" "r")
13042 + (match_operand:SI 2 "register_operand" "r")))]
13045 + [(set_attr "length" "4")
13046 + (set_attr "cc" "none")])
13050 +;;=============================================================================
13051 +;; Logical operations
13052 +;;-----------------------------------------------------------------------------
13055 +;; Split up simple DImode logical operations. Simply perform the logical
13056 +;; operation on the upper and lower halves of the registers.
13058 + [(set (match_operand:DI 0 "register_operand" "")
13059 + (match_operator:DI 6 "logical_binary_operator"
13060 + [(match_operand:DI 1 "register_operand" "")
13061 + (match_operand:DI 2 "register_operand" "")]))]
13062 + "reload_completed"
13063 + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
13064 + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
13067 + operands[3] = gen_highpart (SImode, operands[0]);
13068 + operands[0] = gen_lowpart (SImode, operands[0]);
13069 + operands[4] = gen_highpart (SImode, operands[1]);
13070 + operands[1] = gen_lowpart (SImode, operands[1]);
13071 + operands[5] = gen_highpart (SImode, operands[2]);
13072 + operands[2] = gen_lowpart (SImode, operands[2]);
13076 +;;=============================================================================
13077 +;; Logical operations with shifted operand
13078 +;;=============================================================================
13079 +(define_insn "<code>si_lshift"
13080 + [(set (match_operand:SI 0 "register_operand" "=r")
13081 + (logical:SI (match_operator:SI 4 "logical_shift_operator"
13082 + [(match_operand:SI 2 "register_operand" "r")
13083 + (match_operand:SI 3 "immediate_operand" "Ku05")])
13084 + (match_operand:SI 1 "register_operand" "r")))]
13087 + if ( GET_CODE(operands[4]) == ASHIFT )
13088 + return "<logical_insn>\t%0, %1, %2 << %3";
13090 + return "<logical_insn>\t%0, %1, %2 >> %3";
13093 + [(set_attr "cc" "set_z")]
13097 +;;************************************************
13098 +;; Peepholes for detecting logical operantions
13099 +;; with shifted operands
13100 +;;************************************************
13103 + [(set (match_operand:SI 3 "register_operand" "")
13104 + (match_operator:SI 5 "logical_shift_operator"
13105 + [(match_operand:SI 1 "register_operand" "")
13106 + (match_operand:SI 2 "immediate_operand" "")]))
13107 + (set (match_operand:SI 0 "register_operand" "")
13108 + (logical:SI (match_operand:SI 4 "register_operand" "")
13110 + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
13112 + if ( GET_CODE(operands[5]) == ASHIFT )
13113 + return "<logical_insn>\t%0, %4, %1 << %2";
13115 + return "<logical_insn>\t%0, %4, %1 >> %2";
13117 + [(set_attr "cc" "set_z")]
13121 + [(set (match_operand:SI 3 "register_operand" "")
13122 + (match_operator:SI 5 "logical_shift_operator"
13123 + [(match_operand:SI 1 "register_operand" "")
13124 + (match_operand:SI 2 "immediate_operand" "")]))
13125 + (set (match_operand:SI 0 "register_operand" "")
13126 + (logical:SI (match_dup 3)
13127 + (match_operand:SI 4 "register_operand" "")))]
13128 + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
13130 + if ( GET_CODE(operands[5]) == ASHIFT )
13131 + return "<logical_insn>\t%0, %4, %1 << %2";
13133 + return "<logical_insn>\t%0, %4, %1 >> %2";
13135 + [(set_attr "cc" "set_z")]
13140 + [(set (match_operand:SI 0 "register_operand" "")
13141 + (match_operator:SI 5 "logical_shift_operator"
13142 + [(match_operand:SI 1 "register_operand" "")
13143 + (match_operand:SI 2 "immediate_operand" "")]))
13144 + (set (match_operand:SI 3 "register_operand" "")
13145 + (logical:SI (match_operand:SI 4 "register_operand" "")
13147 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
13149 + [(set (match_dup 3)
13150 + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
13157 + [(set (match_operand:SI 0 "register_operand" "")
13158 + (match_operator:SI 5 "logical_shift_operator"
13159 + [(match_operand:SI 1 "register_operand" "")
13160 + (match_operand:SI 2 "immediate_operand" "")]))
13161 + (set (match_operand:SI 3 "register_operand" "")
13162 + (logical:SI (match_dup 0)
13163 + (match_operand:SI 4 "register_operand" "")))]
13164 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
13166 + [(set (match_dup 3)
13167 + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
13174 +;;=============================================================================
13176 +;;-----------------------------------------------------------------------------
13177 +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
13178 +;;=============================================================================
13180 +(define_insn "andnsi"
13181 + [(set (match_operand:SI 0 "register_operand" "+r")
13182 + (and:SI (match_dup 0)
13183 + (not:SI (match_operand:SI 1 "register_operand" "r"))))]
13186 + [(set_attr "cc" "set_z")
13187 + (set_attr "length" "2")]
13191 +(define_insn "andsi3"
13192 + [(set (match_operand:SI 0 "register_operand" "=r, r, r, r")
13193 + (and:SI (match_operand:SI 1 "register_operand" "%0, r, 0, r")
13194 + (match_operand:SI 2 "nonmemory_operand" "r, M, i, r")))]
13197 + switch (which_alternative){
13199 + return "and\t%0, %2";
13202 + int i, first_set = -1;
13203 + /* Search for first bit set in mask */
13204 + for ( i = 31; i >= 0; --i )
13205 + if ( INTVAL(operands[2]) & (1 << i) ){
13209 + operands[2] = gen_rtx_CONST_INT(SImode, first_set + 1);
13210 + return "bfextu\t%0, %1, 0, %2";
13213 + if ( one_bit_cleared_operand(operands[2], VOIDmode) ){
13215 + for ( bitpos = 0; bitpos < 32; bitpos++ )
13216 + if ( !(INTVAL(operands[2]) & (1 << bitpos)) )
13218 + operands[2] = gen_rtx_CONST_INT(SImode, bitpos);
13219 + return "cbr\t%0, %2";
13220 + } else if ( (INTVAL(operands[2]) >= 0) &&
13221 + (INTVAL(operands[2]) <= 65535) )
13222 + return "andl\t%0, %2, COH";
13223 + else if ( (INTVAL(operands[2]) < 0) &&
13224 + (INTVAL(operands[2]) >= -65536 ) )
13225 + return "andl\t%0, lo(%2)";
13226 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0xffff) )
13227 + return "andh\t%0, hi(%2)";
13228 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
13229 + return "andh\t%0, hi(%2), COH";
13231 + return "andh\t%0, hi(%2)\;andl\t%0, lo(%2)";
13233 + return "and\t%0, %1, %2";
13239 + [(set_attr "length" "2,4,8,4")
13240 + (set_attr "cc" "set_z")])
13245 +(define_insn "anddi3"
13246 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
13247 + (and:DI (match_operand:DI 1 "register_operand" "%0,r")
13248 + (match_operand:DI 2 "register_operand" "r,r")))]
13251 + [(set_attr "length" "8")
13252 + (set_attr "cc" "clobber")]
13255 +;;=============================================================================
13257 +;;-----------------------------------------------------------------------------
13258 +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
13259 +;;=============================================================================
13261 +(define_insn "iorsi3"
13262 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
13263 + (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r" )
13264 + (match_operand:SI 2 "nonmemory_operand" "r ,i,r")))]
13267 + switch (which_alternative){
13269 + return "or\t%0, %2";
13271 + if ( one_bit_set_operand(operands[2], VOIDmode) ){
13273 + for (bitpos = 0; bitpos < 32; bitpos++)
13274 + if (INTVAL(operands[2]) & (1 << bitpos))
13276 + operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
13277 + return "sbr\t%0, %2";
13278 + } else if ( (INTVAL(operands[2]) >= 0) &&
13279 + (INTVAL(operands[2]) <= 65535) )
13280 + return "orl\t%0, %2";
13281 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
13282 + return "orh\t%0, hi(%2)";
13284 + return "orh\t%0, hi(%2)\;orl\t%0, lo(%2)";
13286 + return "or\t%0, %1, %2";
13291 + [(set_attr "length" "2,8,4")
13292 + (set_attr "cc" "set_z")])
13295 +(define_insn "iordi3"
13296 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
13297 + (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
13298 + (match_operand:DI 2 "register_operand" "r,r")))]
13301 + [(set_attr "length" "8")
13302 + (set_attr "cc" "clobber")]
13305 +;;=============================================================================
13307 +;;-----------------------------------------------------------------------------
13308 +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
13309 +;;=============================================================================
13311 +(define_insn "xorsi3"
13312 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
13313 + (xor:SI (match_operand:SI 1 "register_operand" "0,0,r")
13314 + (match_operand:SI 2 "nonmemory_operand" "r,i,r")))]
13317 + switch (which_alternative){
13319 + return "eor %0, %2";
13321 + if ( (INTVAL(operands[2]) >= 0) &&
13322 + (INTVAL(operands[2]) <= 65535) )
13323 + return "eorl %0, %2";
13324 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
13325 + return "eorh %0, hi(%2)";
13327 + return "eorh %0, hi(%2)\;eorl %0, lo(%2)";
13329 + return "eor %0, %1, %2";
13335 + [(set_attr "length" "2,8,4")
13336 + (set_attr "cc" "set_z")])
13339 +(define_insn "xordi3"
13340 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
13341 + (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
13342 + (match_operand:DI 2 "register_operand" "r,r")))]
13345 + [(set_attr "length" "8")
13346 + (set_attr "cc" "clobber")]
13349 +;;=============================================================================
13350 +;; Three operand predicable insns
13351 +;;=============================================================================
13353 +(define_insn "<predicable_insn3><mode>_predicable"
13354 + [(set (match_operand:INTM 0 "register_operand" "=r")
13355 + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
13356 + (match_operand:INTM 2 "register_operand" "r")))]
13357 + "TARGET_V2_INSNS"
13358 + "<predicable_insn3>%?\t%0, %1, %2"
13359 + [(set_attr "length" "4")
13360 + (set_attr "cc" "cmp_cond_insn")
13361 + (set_attr "predicable" "yes")]
13364 +(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
13366 + [(set (match_operand:INTM 0 "register_operand" "=r")
13367 + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
13368 + (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
13369 + (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
13370 + "TARGET_V2_INSNS"
13372 + if ( current_insn_predicate != NULL_RTX )
13374 + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
13375 + return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
13376 + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
13377 + return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
13379 + return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
13383 + if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
13385 + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
13386 + return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
13387 + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
13388 + return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
13390 + return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
13396 + ;; If we find out that we could not actually do if-conversion on the block
13397 + ;; containing this insn we convert it back to normal immediate format
13398 + ;; to avoid outputing a redundant move insn
13399 + ;; Do not split until after we have checked if we can make the insn
13401 + "(GET_CODE (PATTERN (insn)) != COND_EXEC
13402 + && cfun->machine->ifcvt_after_reload
13403 + && avr32_cond_imm_clobber_splittable (insn, operands))"
13404 + [(set (match_dup 0)
13405 + (predicable_op3:INTM (match_dup 1)
13408 + [(set_attr "length" "8")
13409 + (set_attr "cc" "cmp_cond_insn")
13410 + (set_attr "predicable" "yes")]
13414 +;;=============================================================================
13415 +;; Zero extend predicable insns
13416 +;;=============================================================================
13417 +(define_insn_and_split "zero_extendhisi_clobber_predicable"
13419 + [(set (match_operand:SI 0 "register_operand" "=r")
13420 + (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
13421 + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
13422 + "TARGET_V2_INSNS"
13424 + if ( current_insn_predicate != NULL_RTX )
13426 + return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
13434 + ;; If we find out that we could not actually do if-conversion on the block
13435 + ;; containing this insn we convert it back to normal immediate format
13436 + ;; to avoid outputing a redundant move insn
13437 + ;; Do not split until after we have checked if we can make the insn
13439 + "(GET_CODE (PATTERN (insn)) != COND_EXEC
13440 + && cfun->machine->ifcvt_after_reload)"
13441 + [(set (match_dup 0)
13442 + (zero_extend:SI (match_dup 1)))]
13444 + [(set_attr "length" "8")
13445 + (set_attr "cc" "cmp_cond_insn")
13446 + (set_attr "predicable" "yes")]
13449 +(define_insn_and_split "zero_extendqisi_clobber_predicable"
13451 + [(set (match_operand:SI 0 "register_operand" "=r")
13452 + (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
13453 + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
13454 + "TARGET_V2_INSNS"
13456 + if ( current_insn_predicate != NULL_RTX )
13458 + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
13466 + ;; If we find out that we could not actually do if-conversion on the block
13467 + ;; containing this insn we convert it back to normal immediate format
13468 + ;; to avoid outputing a redundant move insn
13469 + ;; Do not split until after we have checked if we can make the insn
13471 + "(GET_CODE (PATTERN (insn)) != COND_EXEC
13472 + && cfun->machine->ifcvt_after_reload)"
13473 + [(set (match_dup 0)
13474 + (zero_extend:SI (match_dup 1)))]
13476 + [(set_attr "length" "8")
13477 + (set_attr "cc" "cmp_cond_insn")
13478 + (set_attr "predicable" "yes")]
13481 +(define_insn_and_split "zero_extendqihi_clobber_predicable"
13483 + [(set (match_operand:HI 0 "register_operand" "=r")
13484 + (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
13485 + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
13486 + "TARGET_V2_INSNS"
13488 + if ( current_insn_predicate != NULL_RTX )
13490 + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
13498 + ;; If we find out that we could not actually do if-conversion on the block
13499 + ;; containing this insn we convert it back to normal immediate format
13500 + ;; to avoid outputing a redundant move insn
13501 + ;; Do not split until after we have checked if we can make the insn
13503 + "(GET_CODE (PATTERN (insn)) != COND_EXEC
13504 + && cfun->machine->ifcvt_after_reload)"
13505 + [(set (match_dup 0)
13506 + (zero_extend:HI (match_dup 1)))]
13508 + [(set_attr "length" "8")
13509 + (set_attr "cc" "cmp_cond_insn")
13510 + (set_attr "predicable" "yes")]
13512 +;;=============================================================================
13514 +;;-----------------------------------------------------------------------------
13515 +;; Signed division that produces both a quotient and a remainder.
13516 +;;=============================================================================
13517 +(define_expand "divmodsi4"
13520 + (set (match_operand:SI 0 "register_operand" "=r")
13521 + (div:SI (match_operand:SI 1 "register_operand" "r")
13522 + (match_operand:SI 2 "register_operand" "r")))
13523 + (set (match_operand:SI 3 "register_operand" "=r")
13524 + (mod:SI (match_dup 1)
13525 + (match_dup 2)))])
13526 + (use (match_dup 4))])]
13529 + if (can_create_pseudo_p ()) {
13530 + operands[4] = gen_reg_rtx (DImode);
13532 + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
13533 + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
13534 + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
13544 +(define_insn "divmodsi4_internal"
13545 + [(set (match_operand:DI 0 "register_operand" "=r")
13546 + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
13547 + (match_operand:SI 2 "register_operand" "r")]
13548 + UNSPEC_DIVMODSI4_INTERNAL))]
13550 + "divs %0, %1, %2"
13551 + [(set_attr "type" "div")
13552 + (set_attr "cc" "none")])
13555 +;;=============================================================================
13557 +;;-----------------------------------------------------------------------------
13558 +;; Unsigned division that produces both a quotient and a remainder.
13559 +;;=============================================================================
13560 +(define_expand "udivmodsi4"
13563 + (set (match_operand:SI 0 "register_operand" "=r")
13564 + (udiv:SI (match_operand:SI 1 "register_operand" "r")
13565 + (match_operand:SI 2 "register_operand" "r")))
13566 + (set (match_operand:SI 3 "register_operand" "=r")
13567 + (umod:SI (match_dup 1)
13568 + (match_dup 2)))])
13569 + (use (match_dup 4))])]
13572 + if (can_create_pseudo_p ()) {
13573 + operands[4] = gen_reg_rtx (DImode);
13575 + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
13576 + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
13577 + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
13585 +(define_insn "udivmodsi4_internal"
13586 + [(set (match_operand:DI 0 "register_operand" "=r")
13587 + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
13588 + (match_operand:SI 2 "register_operand" "r")]
13589 + UNSPEC_UDIVMODSI4_INTERNAL))]
13591 + "divu %0, %1, %2"
13592 + [(set_attr "type" "div")
13593 + (set_attr "cc" "none")])
13596 +;;=============================================================================
13597 +;; Arithmetic-shift left
13598 +;;-----------------------------------------------------------------------------
13599 +;; Arithmetic-shift reg0 left by reg2 or immediate value.
13600 +;;=============================================================================
13602 +(define_insn "ashlsi3"
13603 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
13604 + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
13605 + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
13611 + [(set_attr "length" "4,2,4")
13612 + (set_attr "cc" "set_ncz")])
13614 +;;=============================================================================
13615 +;; Arithmetic-shift right
13616 +;;-----------------------------------------------------------------------------
13617 +;; Arithmetic-shift reg0 right by an immediate value.
13618 +;;=============================================================================
13620 +(define_insn "ashrsi3"
13621 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
13622 + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
13623 + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
13629 + [(set_attr "length" "4,2,4")
13630 + (set_attr "cc" "set_ncz")])
13632 +;;=============================================================================
13633 +;; Logical shift right
13634 +;;-----------------------------------------------------------------------------
13635 +;; Logical shift reg0 right by an immediate value.
13636 +;;=============================================================================
13638 +(define_insn "lshrsi3"
13639 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
13640 + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
13641 + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
13647 + [(set_attr "length" "4,2,4")
13648 + (set_attr "cc" "set_ncz")])
13651 +;;=============================================================================
13653 +;;-----------------------------------------------------------------------------
13654 +;; Negate operand 1 and store the result in operand 0.
13655 +;;=============================================================================
13656 +(define_insn "negsi2"
13657 + [(set (match_operand:SI 0 "register_operand" "=r,r")
13658 + (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
13663 + [(set_attr "length" "2,4")
13664 + (set_attr "cc" "set_vncz")])
13666 +(define_insn "negsi2_predicable"
13667 + [(set (match_operand:SI 0 "register_operand" "+r")
13668 + (neg:SI (match_dup 0)))]
13669 + "TARGET_V2_INSNS"
13671 + [(set_attr "length" "4")
13672 + (set_attr "cc" "cmp_cond_insn")
13673 + (set_attr "predicable" "yes")])
13675 +;;=============================================================================
13677 +;;-----------------------------------------------------------------------------
13678 +;; Store the absolute value of operand 1 into operand 0.
13679 +;;=============================================================================
13680 +(define_insn "abssi2"
13681 + [(set (match_operand:SI 0 "register_operand" "=r")
13682 + (abs:SI (match_operand:SI 1 "register_operand" "0")))]
13685 + [(set_attr "length" "2")
13686 + (set_attr "cc" "set_z")])
13689 +;;=============================================================================
13691 +;;-----------------------------------------------------------------------------
13692 +;; Store the bitwise-complement of operand 1 into operand 0.
13693 +;;=============================================================================
13695 +(define_insn "one_cmplsi2"
13696 + [(set (match_operand:SI 0 "register_operand" "=r,r")
13697 + (not:SI (match_operand:SI 1 "register_operand" "r,0")))]
13702 + [(set_attr "length" "4,2")
13703 + (set_attr "cc" "set_z")])
13706 +(define_insn "one_cmplsi2_predicable"
13707 + [(set (match_operand:SI 0 "register_operand" "+r")
13708 + (not:SI (match_dup 0)))]
13709 + "TARGET_V2_INSNS"
13711 + [(set_attr "length" "4")
13712 + (set_attr "cc" "cmp_cond_insn")
13713 + (set_attr "predicable" "yes")])
13716 +;;=============================================================================
13718 +;;-----------------------------------------------------------------------------
13719 +;; Load a bit into Z and C flags
13720 +;;=============================================================================
13721 +(define_insn "bldsi"
13723 + (and:SI (match_operand:SI 0 "register_operand" "r")
13724 + (match_operand:SI 1 "one_bit_set_operand" "i")))]
13727 + [(set_attr "length" "4")
13728 + (set_attr "cc" "bld")]
13732 +;;=============================================================================
13734 +;;-----------------------------------------------------------------------------
13735 +;; Compare reg0 with reg1 or an immediate value.
13736 +;;=============================================================================
13738 +(define_expand "cmp<mode>"
13741 + (match_operand:CMP 0 "register_operand" "")
13742 + (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
13745 + avr32_compare_op0 = operands[0];
13746 + avr32_compare_op1 = operands[1];
13750 +(define_insn "cmp<mode>_internal"
13753 + (match_operand:CMP 0 "register_operand" "r")
13754 + (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
13757 + /* Check if the next insn already will output a compare. */
13758 + if (!next_insn_emits_cmp (insn))
13759 + set_next_insn_cond(insn,
13760 + avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
13763 + [(set_attr "length" "4")
13764 + (set_attr "cc" "compare")])
13767 +;;;=============================================================================
13769 +;;-----------------------------------------------------------------------------
13770 +;; Compare reg against zero and set the condition codes.
13771 +;;=============================================================================
13774 +(define_expand "tstsi"
13776 + (match_operand:SI 0 "register_operand" ""))]
13779 + avr32_compare_op0 = operands[0];
13780 + avr32_compare_op1 = const0_rtx;
13784 +(define_insn "tstsi_internal"
13786 + (match_operand:SI 0 "register_operand" "r"))]
13789 + /* Check if the next insn already will output a compare. */
13790 + if (!next_insn_emits_cmp (insn))
13791 + set_next_insn_cond(insn,
13792 + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
13796 + [(set_attr "length" "2")
13797 + (set_attr "cc" "compare")])
13800 +(define_expand "tstdi"
13802 + (match_operand:DI 0 "register_operand" ""))]
13805 + avr32_compare_op0 = operands[0];
13806 + avr32_compare_op1 = const0_rtx;
13810 +(define_insn "tstdi_internal"
13812 + (match_operand:DI 0 "register_operand" "r"))]
13815 + /* Check if the next insn already will output a compare. */
13816 + if (!next_insn_emits_cmp (insn))
13817 + set_next_insn_cond(insn,
13818 + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
13821 + [(set_attr "length" "4")
13822 + (set_attr "type" "alu2")
13823 + (set_attr "cc" "compare")])
13827 +;;=============================================================================
13828 +;; Convert operands
13829 +;;-----------------------------------------------------------------------------
13831 +;;=============================================================================
13832 +(define_insn "truncdisi2"
13833 + [(set (match_operand:SI 0 "general_operand" "")
13834 + (truncate:SI (match_operand:DI 1 "general_operand" "")))]
13838 +;;=============================================================================
13840 +;;-----------------------------------------------------------------------------
13842 +;;=============================================================================
13845 +(define_insn "extendhisi2"
13846 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13847 + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13850 + switch ( which_alternative ){
13852 + return "casts.h\t%0";
13854 + return "bfexts\t%0, %1, 0, 16";
13857 + return "ld.sh\t%0, %1";
13862 + [(set_attr "length" "2,4,2,4")
13863 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13864 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13866 +(define_insn "extendqisi2"
13867 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13868 + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
13871 + switch ( which_alternative ){
13873 + return "casts.b\t%0";
13875 + return "bfexts\t%0, %1, 0, 8";
13878 + return "ld.sb\t%0, %1";
13883 + [(set_attr "length" "2,4,2,4")
13884 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13885 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13887 +(define_insn "extendqihi2"
13888 + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
13889 + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
13892 + switch ( which_alternative ){
13894 + return "casts.b\t%0";
13896 + return "bfexts\t%0, %1, 0, 8";
13899 + return "ld.sb\t%0, %1";
13904 + [(set_attr "length" "2,4,2,4")
13905 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13906 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13909 +;;=============================================================================
13911 +;;-----------------------------------------------------------------------------
13913 +;;=============================================================================
13915 +(define_insn "zero_extendhisi2"
13916 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13917 + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13920 + switch ( which_alternative ){
13922 + return "castu.h\t%0";
13924 + return "bfextu\t%0, %1, 0, 16";
13927 + return "ld.uh\t%0, %1";
13933 + [(set_attr "length" "2,4,2,4")
13934 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13935 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13937 +(define_insn "zero_extendqisi2"
13938 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13939 + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13942 + switch ( which_alternative ){
13944 + return "castu.b\t%0";
13946 + return "bfextu\t%0, %1, 0, 8";
13949 + return "ld.ub\t%0, %1";
13954 + [(set_attr "length" "2,4,2,4")
13955 + (set_attr "cc" "set_ncz, set_ncz, none, none")
13956 + (set_attr "type" "alu, alu, load_rm, load_rm")])
13958 +(define_insn "zero_extendqihi2"
13959 + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
13960 + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13963 + switch ( which_alternative ){
13965 + return "castu.b\t%0";
13967 + return "bfextu\t%0, %1, 0, 8";
13970 + return "ld.ub\t%0, %1";
13975 + [(set_attr "length" "2,4,2,4")
13976 + (set_attr "cc" "set_ncz, set_ncz, none, none")
13977 + (set_attr "type" "alu, alu, load_rm, load_rm")])
13980 +;;=============================================================================
13981 +;; Conditional load and extend insns
13982 +;;=============================================================================
13983 +(define_insn "ldsi<mode>_predicable_se"
13984 + [(set (match_operand:SI 0 "register_operand" "=r")
13986 + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
13987 + "TARGET_V2_INSNS"
13988 + "ld<INTM:load_postfix_s>%?\t%0, %1"
13989 + [(set_attr "length" "4")
13990 + (set_attr "cc" "cmp_cond_insn")
13991 + (set_attr "type" "load")
13992 + (set_attr "predicable" "yes")]
13995 +(define_insn "ldsi<mode>_predicable_ze"
13996 + [(set (match_operand:SI 0 "register_operand" "=r")
13998 + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
13999 + "TARGET_V2_INSNS"
14000 + "ld<INTM:load_postfix_u>%?\t%0, %1"
14001 + [(set_attr "length" "4")
14002 + (set_attr "cc" "cmp_cond_insn")
14003 + (set_attr "type" "load")
14004 + (set_attr "predicable" "yes")]
14007 +(define_insn "ldhi_predicable_ze"
14008 + [(set (match_operand:HI 0 "register_operand" "=r")
14010 + (match_operand:QI 1 "memory_operand" "RKs10")))]
14011 + "TARGET_V2_INSNS"
14012 + "ld.ub%?\t%0, %1"
14013 + [(set_attr "length" "4")
14014 + (set_attr "cc" "cmp_cond_insn")
14015 + (set_attr "type" "load")
14016 + (set_attr "predicable" "yes")]
14019 +(define_insn "ldhi_predicable_se"
14020 + [(set (match_operand:HI 0 "register_operand" "=r")
14022 + (match_operand:QI 1 "memory_operand" "RKs10")))]
14023 + "TARGET_V2_INSNS"
14024 + "ld.sb%?\t%0, %1"
14025 + [(set_attr "length" "4")
14026 + (set_attr "cc" "cmp_cond_insn")
14027 + (set_attr "type" "load")
14028 + (set_attr "predicable" "yes")]
14031 +;;=============================================================================
14032 +;; Conditional set register
14034 +;;-----------------------------------------------------------------------------
14036 +;;Because of the same issue as with conditional moves and adds we must
14037 +;;not separate the compare instrcution from the scc instruction as
14038 +;;they might be sheduled "badly".
14040 +(define_insn "s<code>"
14041 + [(set (match_operand:SI 0 "register_operand" "=r")
14042 + (any_cond:SI (cc0)
14046 + [(set_attr "length" "2")
14047 + (set_attr "cc" "none")])
14049 +(define_insn "smi"
14050 + [(set (match_operand:SI 0 "register_operand" "=r")
14051 + (unspec:SI [(cc0)
14052 + (const_int 0)] UNSPEC_COND_MI))]
14055 + [(set_attr "length" "2")
14056 + (set_attr "cc" "none")])
14058 +(define_insn "spl"
14059 + [(set (match_operand:SI 0 "register_operand" "=r")
14060 + (unspec:SI [(cc0)
14061 + (const_int 0)] UNSPEC_COND_PL))]
14064 + [(set_attr "length" "2")
14065 + (set_attr "cc" "none")])
14068 +;;=============================================================================
14069 +;; Conditional branch
14070 +;;-----------------------------------------------------------------------------
14071 +;; Branch to label if the specified condition codes are set.
14072 +;;=============================================================================
14073 +; branch if negative
14074 +(define_insn "bmi"
14076 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
14077 + (label_ref (match_operand 0 "" ""))
14081 + [(set_attr "type" "branch")
14082 + (set (attr "length")
14083 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14084 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14085 + (const_int 2)] ; use compact branch
14086 + (const_int 4))) ; use extended branch
14087 + (set_attr "cc" "none")])
14089 +(define_insn "*bmi-reverse"
14091 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
14093 + (label_ref (match_operand 0 "" ""))))]
14096 + [(set_attr "type" "branch")
14097 + (set (attr "length")
14098 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14099 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14100 + (const_int 2)] ; use compact branch
14101 + (const_int 4))) ; use extended branch
14102 + (set_attr "cc" "none")])
14104 +; branch if positive
14105 +(define_insn "bpl"
14107 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
14108 + (label_ref (match_operand 0 "" ""))
14112 + [(set_attr "type" "branch")
14113 + (set (attr "length")
14114 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14115 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14116 + (const_int 2)] ; use compact branch
14117 + (const_int 4))) ; use extended branch
14118 + (set_attr "cc" "none")])
14120 +(define_insn "*bpl-reverse"
14122 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
14124 + (label_ref (match_operand 0 "" ""))))]
14127 + [(set_attr "type" "branch")
14128 + (set (attr "length")
14129 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14130 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14131 + (const_int 2)] ; use compact branch
14132 + (const_int 4))) ; use extended branch
14133 + (set_attr "cc" "none")])
14136 +(define_insn "b<code>"
14138 + (if_then_else (any_cond:CC (cc0)
14140 + (label_ref (match_operand 0 "" ""))
14144 + [(set_attr "type" "branch")
14145 + (set (attr "length")
14146 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14147 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14148 + (const_int 2)] ; use compact branch
14149 + (const_int 4))) ; use extended branch
14150 + (set_attr "cc" "none")])
14153 +(define_insn "*b<code>-reverse"
14155 + (if_then_else (any_cond:CC (cc0)
14158 + (label_ref (match_operand 0 "" ""))))]
14160 + "br<invcond> %0 "
14161 + [(set_attr "type" "branch")
14162 + (set (attr "length")
14163 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
14164 + (le (minus (pc) (match_dup 0)) (const_int 256)))
14165 + (const_int 2)] ; use compact branch
14166 + (const_int 4))) ; use extended branch
14167 + (set_attr "cc" "none")])
14171 +;=============================================================================
14172 +; Conditional Add/Subtract
14173 +;-----------------------------------------------------------------------------
14174 +; sub{cond4} Rd, imm
14175 +;=============================================================================
14178 +(define_expand "add<mode>cc"
14179 + [(set (match_operand:ADDCC 0 "register_operand" "")
14180 + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
14183 + (match_operand:ADDCC 2 "register_operand" "")
14186 + (match_operand:ADDCC 3 "" ""))))]
14189 + if ( !(GET_CODE (operands[3]) == CONST_INT
14190 + || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
14194 + /* Delete compare instruction as it is merged into this instruction */
14195 + remove_insn (get_last_insn_anywhere ());
14197 + operands[4] = avr32_compare_op0;
14198 + operands[5] = avr32_compare_op1;
14200 + if ( TARGET_V2_INSNS
14201 + && REG_P(operands[3])
14202 + && REGNO(operands[0]) != REGNO(operands[2]) ){
14203 + emit_move_insn (operands[0], operands[2]);
14204 + operands[2] = operands[0];
14209 +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
14210 + [(set (match_operand:ADDCC 0 "register_operand" "=r")
14211 + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
14212 + [(match_operand:CMP 4 "register_operand" "r")
14213 + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
14216 + (match_operand:ADDCC 2 "register_operand" "r")
14217 + (match_operand:ADDCC 3 "register_operand" "r"))))]
14218 + "TARGET_V2_INSNS"
14220 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
14221 + return "add%i1\t%0, %2, %3";
14223 + [(set_attr "length" "8")
14224 + (set_attr "cc" "cmp_cond_insn")])
14226 +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
14227 + [(set (match_operand:ADDCC 0 "register_operand" "=r")
14228 + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
14229 + [(match_operand:CMP 4 "register_operand" "r")
14230 + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
14231 + (match_operand:ADDCC 2 "register_operand" "0")
14234 + (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))]
14237 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
14238 + return "sub%i1\t%0, -%3";
14240 + [(set_attr "length" "8")
14241 + (set_attr "cc" "cmp_cond_insn")])
14243 +;=============================================================================
14244 +; Conditional Move
14245 +;-----------------------------------------------------------------------------
14246 +; mov{cond4} Rd, (Rs/imm)
14247 +;=============================================================================
14248 +(define_expand "mov<mode>cc"
14249 + [(set (match_operand:MOVCC 0 "register_operand" "")
14250 + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
14253 + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "")
14254 + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))]
14257 + /* Delete compare instruction as it is merged into this instruction */
14258 + remove_insn (get_last_insn_anywhere ());
14260 + operands[4] = avr32_compare_op0;
14261 + operands[5] = avr32_compare_op1;
14266 +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
14267 + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
14268 + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
14269 + [(match_operand:CMP 4 "register_operand" "r,r,r")
14270 + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")])
14271 + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08")
14272 + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))]
14275 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
14277 + switch( which_alternative ){
14279 + return "mov%i1 %0, %3";
14281 + return "mov%1 %0, %2";
14283 + return "mov%1 %0, %2\;mov%i1 %0, %3";
14289 + [(set_attr "length" "8,8,12")
14290 + (set_attr "cc" "cmp_cond_insn")])
14295 +;;=============================================================================
14297 +;;-----------------------------------------------------------------------------
14298 +;; Jump inside a function; an unconditional branch to a label.
14299 +;;=============================================================================
14300 +(define_insn "jump"
14302 + (label_ref (match_operand 0 "" "")))]
14305 + if (get_attr_length(insn) > 4)
14306 + return "Can't jump this far";
14307 + return (get_attr_length(insn) == 2 ?
14308 + "rjmp %0" : "bral %0");
14310 + [(set_attr "type" "branch")
14311 + (set (attr "length")
14312 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
14313 + (le (minus (pc) (match_dup 0)) (const_int 1024)))
14314 + (const_int 2) ; use rjmp
14315 + (le (match_dup 0) (const_int 1048575))
14316 + (const_int 4)] ; use bral
14317 + (const_int 8))) ; do something else
14318 + (set_attr "cc" "none")])
14320 +;;=============================================================================
14322 +;;-----------------------------------------------------------------------------
14323 +;; Subroutine call instruction returning no value.
14324 +;;=============================================================================
14325 +(define_insn "call_internal"
14326 + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
14327 + (match_operand 1 "" ""))
14328 + (clobber (reg:SI LR_REGNUM))])]
14331 + switch (which_alternative){
14333 + return "icall\t%0";
14335 + return "rcall\t%0";
14337 + return "mcall\t%0";
14339 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
14340 + return "call\t%0";
14342 + return "mcall\tr6[%0@got]";
14347 + [(set_attr "type" "call")
14348 + (set_attr "length" "2,4,4,10")
14349 + (set_attr "cc" "clobber")])
14352 +(define_expand "call"
14353 + [(parallel [(call (match_operand:SI 0 "" "")
14354 + (match_operand 1 "" ""))
14355 + (clobber (reg:SI LR_REGNUM))])]
14358 + rtx call_address;
14359 + if ( GET_CODE(operands[0]) != MEM )
14362 + call_address = XEXP(operands[0], 0);
14364 + /* If assembler supports call pseudo insn and the call
14365 + address is a symbol then nothing special needs to be done. */
14366 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
14367 + && (GET_CODE(call_address) == SYMBOL_REF) ){
14368 + /* We must however mark the function as using the GOT if
14369 + flag_pic is set, since the call insn might turn into
14370 + a mcall using the GOT ptr register. */
14372 + current_function_uses_pic_offset_table = 1;
14373 + emit_call_insn(gen_call_internal(call_address, operands[1]));
14378 + GET_CODE(call_address) == SYMBOL_REF ){
14379 + current_function_uses_pic_offset_table = 1;
14380 + emit_call_insn(gen_call_internal(call_address, operands[1]));
14384 + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){
14385 + if ( optimize_size &&
14386 + GET_CODE(call_address) == SYMBOL_REF ){
14387 + call_address = force_const_mem(SImode, call_address);
14389 + call_address = force_reg(SImode, call_address);
14393 + emit_call_insn(gen_call_internal(call_address, operands[1]));
14398 +;;=============================================================================
14400 +;;-----------------------------------------------------------------------------
14401 +;; Subrutine call instruction returning a value.
14402 +;;=============================================================================
14403 +(define_expand "call_value"
14404 + [(parallel [(set (match_operand:SI 0 "" "")
14405 + (call (match_operand:SI 1 "" "")
14406 + (match_operand 2 "" "")))
14407 + (clobber (reg:SI LR_REGNUM))])]
14410 + rtx call_address;
14411 + if ( GET_CODE(operands[1]) != MEM )
14414 + call_address = XEXP(operands[1], 0);
14416 + /* If assembler supports call pseudo insn and the call
14417 + address is a symbol then nothing special needs to be done. */
14418 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
14419 + && (GET_CODE(call_address) == SYMBOL_REF) ){
14420 + /* We must however mark the function as using the GOT if
14421 + flag_pic is set, since the call insn might turn into
14422 + a mcall using the GOT ptr register. */
14423 + if ( flag_pic ) {
14424 + current_function_uses_pic_offset_table = 1;
14425 + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
14430 + GET_CODE(call_address) == SYMBOL_REF ){
14431 + current_function_uses_pic_offset_table = 1;
14432 + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
14436 + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
14437 + if ( optimize_size &&
14438 + GET_CODE(call_address) == SYMBOL_REF){
14439 + call_address = force_const_mem(SImode, call_address);
14441 + call_address = force_reg(SImode, call_address);
14445 + emit_call_insn(gen_call_value_internal(operands[0], call_address,
14451 +(define_insn "call_value_internal"
14452 + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
14453 + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
14454 + (match_operand 2 "" "")))
14455 + (clobber (reg:SI LR_REGNUM))])]
14456 + ;; Operand 2 not used on the AVR32.
14459 + switch (which_alternative){
14461 + return "icall\t%1";
14463 + return "rcall\t%1";
14465 + return "mcall\t%1";
14467 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
14468 + return "call\t%1";
14470 + return "mcall\tr6[%1@got]";
14475 + [(set_attr "type" "call")
14476 + (set_attr "length" "2,4,4,10")
14477 + (set_attr "cc" "call_set")])
14480 +;;=============================================================================
14482 +;;-----------------------------------------------------------------------------
14483 +;; Subrutine call instruction returning a value of any type.
14484 +;; The code is copied from m68k.md (except gen_blockage is removed)
14486 +;;=============================================================================
14487 +(define_expand "untyped_call"
14488 + [(parallel [(call (match_operand 0 "avr32_call_operand" "")
14490 + (match_operand 1 "" "")
14491 + (match_operand 2 "" "")])]
14496 + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
14498 + for (i = 0; i < XVECLEN (operands[2], 0); i++) {
14499 + rtx set = XVECEXP (operands[2], 0, i);
14500 + emit_move_insn (SET_DEST (set), SET_SRC (set));
14503 + /* The optimizer does not know that the call sets the function value
14504 + registers we stored in the result block. We avoid problems by
14505 + claiming that all hard registers are used and clobbered at this
14507 + emit_insn (gen_blockage ());
14513 +;;=============================================================================
14515 +;;=============================================================================
14517 +(define_insn "return"
14519 + "USE_RETURN_INSN (FALSE)"
14521 + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
14524 + [(set_attr "length" "4")
14525 + (set_attr "type" "call")]
14529 +(define_insn "return_cond"
14531 + (if_then_else (match_operand 0 "avr32_comparison_operand" "")
14534 + "USE_RETURN_INSN (TRUE)"
14536 + [(set_attr "type" "call")])
14538 +(define_insn "return_cond_predicable"
14540 + "USE_RETURN_INSN (TRUE)"
14542 + [(set_attr "type" "call")
14543 + (set_attr "predicable" "yes")])
14546 +(define_insn "return_imm"
14547 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
14548 + (use (reg RETVAL_REGNUM))
14550 + "USE_RETURN_INSN (FALSE) &&
14551 + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
14553 + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
14556 + [(set_attr "length" "4")
14557 + (set_attr "type" "call")]
14560 +(define_insn "return_imm_cond"
14561 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
14562 + (use (reg RETVAL_REGNUM))
14564 + (if_then_else (match_operand 1 "avr32_comparison_operand" "")
14567 + "USE_RETURN_INSN (TRUE) &&
14568 + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
14570 + [(set_attr "type" "call")]
14573 +(define_insn "return_imm_predicable"
14574 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
14575 + (use (reg RETVAL_REGNUM))
14577 + "USE_RETURN_INSN (TRUE) &&
14578 + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
14580 + [(set_attr "type" "call")
14581 + (set_attr "predicable" "yes")])
14583 +(define_insn "return_<mode>reg"
14584 + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
14585 + (use (reg RETVAL_REGNUM))
14587 + "USE_RETURN_INSN (TRUE)"
14589 + [(set_attr "type" "call")
14590 + (set_attr "predicable" "yes")])
14592 +(define_insn "return_<mode>reg_cond"
14593 + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
14594 + (use (reg RETVAL_REGNUM))
14596 + (if_then_else (match_operator 1 "avr32_comparison_operator"
14597 + [(cc0) (const_int 0)])
14600 + "USE_RETURN_INSN (TRUE)"
14602 + [(set_attr "type" "call")])
14604 +;;=============================================================================
14606 +;;-----------------------------------------------------------------------------
14607 +;; No-op instruction.
14608 +;;=============================================================================
14609 +(define_insn "nop"
14613 + [(set_attr "length" "2")
14614 + (set_attr "type" "alu")
14615 + (set_attr "cc" "none")])
14617 +;;=============================================================================
14618 +;; nonlocal_goto_receiver
14619 +;;-----------------------------------------------------------------------------
14620 +;; For targets with a return stack we must make sure to flush the return stack
14621 +;; since it will be corrupt after a nonlocal goto.
14622 +;;=============================================================================
14623 +(define_expand "nonlocal_goto_receiver"
14625 + "TARGET_RETURN_STACK"
14628 + emit_insn ( gen_frs() );
14635 +;;=============================================================================
14636 +;; builtin_setjmp_receiver
14637 +;;-----------------------------------------------------------------------------
14638 +;; For pic code we need to reload the pic register.
14639 +;; For targets with a return stack we must make sure to flush the return stack
14640 +;; since it will probably be corrupted.
14641 +;;=============================================================================
14642 +(define_expand "builtin_setjmp_receiver"
14643 + [(label_ref (match_operand 0 "" ""))]
14647 + if ( TARGET_RETURN_STACK )
14648 + emit_insn ( gen_frs() );
14650 + avr32_load_pic_register ();
14657 +;;=============================================================================
14659 +;;-----------------------------------------------------------------------------
14660 +;; Jump to an address in reg or memory.
14661 +;;=============================================================================
14662 +(define_expand "indirect_jump"
14664 + (match_operand:SI 0 "general_operand" ""))]
14667 + /* One of the ops has to be in a register. */
14668 + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
14669 + && !avr32_legitimate_pic_operand_p(operands[0]) )
14670 + operands[0] = legitimize_pic_address (operands[0], SImode, 0);
14671 + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
14672 + /* If we have an address operand then this function uses the pic register. */
14673 + current_function_uses_pic_offset_table = 1;
14677 +(define_insn "indirect_jump_internal"
14679 + (match_operand:SI 0 "general_operand" "r,m,W"))]
14682 + switch( which_alternative ){
14684 + return "mov\tpc, %0";
14686 + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
14687 + return "lddpc\tpc, %0";
14689 + return "ld.w\tpc, %0";
14692 + return "ld.w\tpc, r6[%0@got]";
14694 + return "lda.w\tpc, %0";
14699 + [(set_attr "length" "2,4,8")
14700 + (set_attr "type" "call,call,call")
14701 + (set_attr "cc" "none,none,clobber")])
14705 +;;=============================================================================
14706 +;; casesi and tablejump
14707 +;;=============================================================================
14708 +(define_insn "tablejump_add"
14710 + (plus:SI (match_operand:SI 0 "register_operand" "r")
14711 + (mult:SI (match_operand:SI 1 "register_operand" "r")
14712 + (match_operand:SI 2 "immediate_operand" "Ku04" ))))
14713 + (use (label_ref (match_operand 3 "" "")))]
14715 + ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) ||
14716 + (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))"
14717 + "add\tpc, %0, %1 << %p2"
14718 + [(set_attr "length" "4")
14719 + (set_attr "cc" "clobber")])
14721 +(define_insn "tablejump_insn"
14722 + [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
14723 + (use (label_ref (match_operand 1 "" "")))]
14726 + [(set_attr "length" "4")
14727 + (set_attr "type" "call")
14728 + (set_attr "cc" "none")])
14730 +(define_expand "casesi"
14731 + [(match_operand:SI 0 "register_operand" "") ; index to jump on
14732 + (match_operand:SI 1 "const_int_operand" "") ; lower bound
14733 + (match_operand:SI 2 "const_int_operand" "") ; total range
14734 + (match_operand:SI 3 "" "") ; table label
14735 + (match_operand:SI 4 "" "")] ; Out of range label
14740 + rtx index = operands[0];
14741 + rtx low_bound = operands[1];
14742 + rtx range = operands[2];
14743 + rtx table_label = operands[3];
14744 + rtx oor_label = operands[4];
14746 + index = force_reg ( SImode, index );
14747 + if (low_bound != const0_rtx)
14749 + if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
14750 + reg = force_reg(SImode, GEN_INT (INTVAL (low_bound)));
14751 + emit_insn (gen_subsi3 (reg, index,
14754 + reg = gen_reg_rtx (SImode);
14755 + emit_insn (gen_addsi3 (reg, index,
14756 + GEN_INT (-INTVAL (low_bound))));
14761 + if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
14762 + range = force_reg (SImode, range);
14764 + emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label );
14765 + reg = gen_reg_rtx (SImode);
14766 + emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label));
14769 + emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label));
14772 + gen_tablejump_insn ( gen_rtx_MEM ( SImode,
14773 + gen_rtx_PLUS ( SImode,
14775 + gen_rtx_MULT ( SImode,
14785 +(define_insn "prefetch"
14786 + [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
14787 + (match_operand 1 "const_int_operand" "")
14788 + (match_operand 2 "const_int_operand" ""))]
14791 + return "pref\t%0";
14794 + [(set_attr "length" "4")
14795 + (set_attr "type" "load")
14796 + (set_attr "cc" "none")])
14800 +;;=============================================================================
14802 +;;-----------------------------------------------------------------------------
14803 +;; This pattern, if defined, emits RTL for entry to a function. The function
14804 +;; entry i responsible for setting up the stack frame, initializing the frame
14805 +;; pointer register, saving callee saved registers, etc.
14806 +;;=============================================================================
14807 +(define_expand "prologue"
14808 + [(clobber (const_int 0))]
14811 + avr32_expand_prologue();
14816 +;;=============================================================================
14818 +;;-----------------------------------------------------------------------------
14819 +;; This pattern, if defined, affects the way __builtin_eh_return, and
14820 +;; thence the call frame exception handling library routines, are
14821 +;; built. It is intended to handle non-trivial actions needed along
14822 +;; the abnormal return path.
14824 +;; The address of the exception handler to which the function should
14825 +;; return is passed as operand to this pattern. It will normally need
14826 +;; to copied by the pattern to some special register or memory
14827 +;; location. If the pattern needs to determine the location of the
14828 +;; target call frame in order to do so, it may use
14829 +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
14832 +;; If this pattern is not defined, the default action will be to
14833 +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
14834 +;; that macro or this pattern needs to be defined if call frame
14835 +;; exception handling is to be used.
14837 +;; We can't expand this before we know where the link register is stored.
14838 +(define_insn_and_split "eh_return"
14839 + [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
14840 + VUNSPEC_EH_RETURN)
14841 + (clobber (match_scratch:SI 1 "=&r"))]
14844 + "reload_completed"
14848 + avr32_set_return_address (operands[0], operands[1]);
14854 +;;=============================================================================
14856 +;;-----------------------------------------------------------------------------
14857 +(define_insn "ffssi2"
14858 + [ (set (match_operand:SI 0 "register_operand" "=r")
14859 + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
14867 + [(set_attr "length" "18")
14868 + (set_attr "cc" "clobber")]
14873 +;;=============================================================================
14875 +;;-----------------------------------------------------------------------------
14876 +(define_insn "*swap_h"
14877 + [ (set (match_operand:SI 0 "register_operand" "=r")
14878 + (ior:SI (ashift:SI (match_dup 0) (const_int 16))
14879 + (lshiftrt:SI (match_dup 0) (const_int 16))))]
14882 + [(set_attr "length" "2")]
14885 +(define_insn_and_split "bswap_16"
14886 + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
14887 + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
14890 + (ashift:HI (and:HI (match_dup 1)
14892 + (const_int 8))))]
14895 + switch ( which_alternative ){
14897 + if ( REGNO(operands[0]) == REGNO(operands[1]))
14898 + return "swap.bh\t%0";
14900 + return "mov\t%0, %1\;swap.bh\t%0";
14902 + return "stswp.h\t%0, %1";
14904 + return "ldswp.sh\t%0, %1";
14910 + "(reload_completed &&
14911 + REG_P(operands[0]) && REG_P(operands[1])
14912 + && (REGNO(operands[0]) != REGNO(operands[1])))"
14913 + [(set (match_dup 0) (match_dup 1))
14914 + (set (match_dup 0)
14915 + (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
14918 + (ashift:HI (and:HI (match_dup 0)
14920 + (const_int 8))))]
14923 + [(set_attr "length" "4,4,4")
14924 + (set_attr "type" "alu,store,load_rm")]
14927 +(define_insn_and_split "bswap_32"
14928 + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
14929 + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
14930 + (const_int -16777216))
14932 + (lshiftrt:SI (and:SI (match_dup 1)
14933 + (const_int 16711680))
14935 + (ior:SI (ashift:SI (and:SI (match_dup 1)
14936 + (const_int 65280))
14938 + (ashift:SI (and:SI (match_dup 1)
14940 + (const_int 24)))))]
14943 + switch ( which_alternative ){
14945 + if ( REGNO(operands[0]) == REGNO(operands[1]))
14946 + return "swap.b\t%0";
14950 + return "stswp.w\t%0, %1";
14952 + return "ldswp.w\t%0, %1";
14957 + "(reload_completed &&
14958 + REG_P(operands[0]) && REG_P(operands[1])
14959 + && (REGNO(operands[0]) != REGNO(operands[1])))"
14960 + [(set (match_dup 0) (match_dup 1))
14961 + (set (match_dup 0)
14962 + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
14963 + (const_int -16777216))
14965 + (lshiftrt:SI (and:SI (match_dup 0)
14966 + (const_int 16711680))
14968 + (ior:SI (ashift:SI (and:SI (match_dup 0)
14969 + (const_int 65280))
14971 + (ashift:SI (and:SI (match_dup 0)
14973 + (const_int 24)))))]
14976 + [(set_attr "length" "4,4,4")
14977 + (set_attr "type" "alu,store,load_rm")]
14981 +;;=============================================================================
14983 +;;-----------------------------------------------------------------------------
14984 +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
14985 +;; all of memory. This blocks insns from being moved across this point.
14987 +(define_insn "blockage"
14988 + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
14991 + [(set_attr "length" "0")]
14994 +;;=============================================================================
14996 +;;-----------------------------------------------------------------------------
14997 +(define_insn "clzsi2"
14998 + [ (set (match_operand:SI 0 "register_operand" "=r")
14999 + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
15002 + [(set_attr "length" "4")
15003 + (set_attr "cc" "set_z")]
15006 +;;=============================================================================
15008 +;;-----------------------------------------------------------------------------
15009 +(define_insn "ctzsi2"
15010 + [ (set (match_operand:SI 0 "register_operand" "=r,r")
15011 + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
15014 + brev\t%0\;clz\t%0, %0
15015 + mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
15016 + [(set_attr "length" "8")
15017 + (set_attr "cc" "set_z")]
15020 +;;=============================================================================
15021 +;; cache instructions
15022 +;;-----------------------------------------------------------------------------
15023 +(define_insn "cache"
15024 + [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
15025 + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
15028 + [(set_attr "length" "4")]
15031 +(define_insn "sync"
15032 + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
15035 + [(set_attr "length" "4")]
15038 +;;=============================================================================
15039 +;; TLB instructions
15040 +;;-----------------------------------------------------------------------------
15041 +(define_insn "tlbr"
15042 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
15045 + [(set_attr "length" "2")]
15048 +(define_insn "tlbw"
15049 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
15052 + [(set_attr "length" "2")]
15055 +(define_insn "tlbs"
15056 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
15059 + [(set_attr "length" "2")]
15062 +;;=============================================================================
15063 +;; Breakpoint instruction
15064 +;;-----------------------------------------------------------------------------
15065 +(define_insn "breakpoint"
15066 + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
15069 + [(set_attr "length" "2")]
15073 +;;=============================================================================
15074 +;; mtsr/mfsr instruction
15075 +;;-----------------------------------------------------------------------------
15076 +(define_insn "mtsr"
15077 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
15078 + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
15081 + [(set_attr "length" "4")]
15084 +(define_insn "mfsr"
15085 + [ (set (match_operand:SI 0 "register_operand" "=r")
15086 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
15089 + [(set_attr "length" "4")]
15092 +;;=============================================================================
15093 +;; mtdr/mfdr instruction
15094 +;;-----------------------------------------------------------------------------
15095 +(define_insn "mtdr"
15096 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
15097 + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
15100 + [(set_attr "length" "4")]
15103 +(define_insn "mfdr"
15104 + [ (set (match_operand:SI 0 "register_operand" "=r")
15105 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
15108 + [(set_attr "length" "4")]
15111 +;;=============================================================================
15113 +;;-----------------------------------------------------------------------------
15114 +(define_insn "musfr"
15115 + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
15118 + [(set_attr "length" "2")
15119 + (set_attr "cc" "clobber")]
15122 +(define_insn "mustr"
15123 + [ (set (match_operand:SI 0 "register_operand" "=r")
15124 + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
15127 + [(set_attr "length" "2")]
15130 +(define_insn "ssrf"
15131 + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
15134 + [(set_attr "length" "2")
15135 + (set_attr "cc" "clobber")]
15138 +(define_insn "csrf"
15139 + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
15142 + [(set_attr "length" "2")
15143 + (set_attr "cc" "clobber")]
15146 +;;=============================================================================
15147 +;; Flush Return Stack instruction
15148 +;;-----------------------------------------------------------------------------
15149 +(define_insn "frs"
15150 + [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)]
15153 + [(set_attr "length" "2")
15154 + (set_attr "cc" "none")]
15158 +;;=============================================================================
15159 +;; Saturation Round Scale instruction
15160 +;;-----------------------------------------------------------------------------
15161 +(define_insn "sats"
15162 + [ (set (match_operand:SI 0 "register_operand" "+r")
15163 + (unspec:SI [(match_dup 0)
15164 + (match_operand 1 "immediate_operand" "Ku05")
15165 + (match_operand 2 "immediate_operand" "Ku05")]
15168 + "sats\t%0 >> %1, %2"
15169 + [(set_attr "type" "alu_sat")
15170 + (set_attr "length" "4")]
15173 +(define_insn "satu"
15174 + [ (set (match_operand:SI 0 "register_operand" "+r")
15175 + (unspec:SI [(match_dup 0)
15176 + (match_operand 1 "immediate_operand" "Ku05")
15177 + (match_operand 2 "immediate_operand" "Ku05")]
15180 + "satu\t%0 >> %1, %2"
15181 + [(set_attr "type" "alu_sat")
15182 + (set_attr "length" "4")]
15185 +(define_insn "satrnds"
15186 + [ (set (match_operand:SI 0 "register_operand" "+r")
15187 + (unspec:SI [(match_dup 0)
15188 + (match_operand 1 "immediate_operand" "Ku05")
15189 + (match_operand 2 "immediate_operand" "Ku05")]
15190 + UNSPEC_SATRNDS)) ]
15192 + "satrnds\t%0 >> %1, %2"
15193 + [(set_attr "type" "alu_sat")
15194 + (set_attr "length" "4")]
15197 +(define_insn "satrndu"
15198 + [ (set (match_operand:SI 0 "register_operand" "+r")
15199 + (unspec:SI [(match_dup 0)
15200 + (match_operand 1 "immediate_operand" "Ku05")
15201 + (match_operand 2 "immediate_operand" "Ku05")]
15202 + UNSPEC_SATRNDU)) ]
15204 + "sats\t%0 >> %1, %2"
15205 + [(set_attr "type" "alu_sat")
15206 + (set_attr "length" "4")]
15209 +;; Special patterns for dealing with the constant pool
15211 +(define_insn "align_4"
15212 + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
15215 + assemble_align (32);
15218 + [(set_attr "length" "2")]
15221 +(define_insn "consttable_start"
15222 + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
15227 + [(set_attr "length" "0")]
15230 +(define_insn "consttable_end"
15231 + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
15234 + making_const_table = FALSE;
15237 + [(set_attr "length" "0")]
15241 +(define_insn "consttable_4"
15242 + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
15245 + making_const_table = TRUE;
15246 + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
15250 + REAL_VALUE_TYPE r;
15251 + char real_string[1024];
15252 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
15253 + real_to_decimal(real_string, &r, 1024, 0, 1);
15254 + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
15258 + assemble_integer (operands[0], 4, 0, 1);
15263 + [(set_attr "length" "4")]
15266 +(define_insn "consttable_8"
15267 + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
15270 + making_const_table = TRUE;
15271 + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
15275 + REAL_VALUE_TYPE r;
15276 + char real_string[1024];
15277 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
15278 + real_to_decimal(real_string, &r, 1024, 0, 1);
15279 + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
15283 + assemble_integer(operands[0], 8, 0, 1);
15288 + [(set_attr "length" "8")]
15291 +(define_insn "consttable_16"
15292 + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
15295 + making_const_table = TRUE;
15296 + assemble_integer(operands[0], 16, 0, 1);
15299 + [(set_attr "length" "16")]
15302 +;;=============================================================================
15303 +;; coprocessor instructions
15304 +;;-----------------------------------------------------------------------------
15305 +(define_insn "cop"
15306 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
15307 + (match_operand 1 "immediate_operand" "Ku04")
15308 + (match_operand 2 "immediate_operand" "Ku04")
15309 + (match_operand 3 "immediate_operand" "Ku04")
15310 + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
15312 + "cop\tcp%0, cr%1, cr%2, cr%3, %4"
15313 + [(set_attr "length" "4")]
15316 +(define_insn "mvcrsi"
15317 + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
15318 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
15319 + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
15323 + mvcr.w\tcp%1, %0, cr%2
15324 + stcm.w\tcp%1, %0, cr%2
15325 + stc.w\tcp%1, %0, cr%2"
15326 + [(set_attr "length" "4")]
15329 +(define_insn "mvcrdi"
15330 + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
15331 + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
15332 + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
15336 + mvcr.d\tcp%1, %0, cr%2
15337 + stcm.d\tcp%1, %0, cr%2-cr%i2
15338 + stc.d\tcp%1, %0, cr%2"
15339 + [(set_attr "length" "4")]
15342 +(define_insn "mvrcsi"
15343 + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
15344 + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
15345 + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
15349 + switch (which_alternative){
15351 + return "mvrc.w\tcp%0, cr%1, %2";
15353 + return "ldcm.w\tcp%0, %2, cr%1";
15355 + return "ldc.w\tcp%0, cr%1, %2";
15360 + [(set_attr "length" "4")]
15363 +(define_insn "mvrcdi"
15364 + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
15365 + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
15366 + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
15370 + switch (which_alternative){
15372 + return "mvrc.d\tcp%0, cr%1, %2";
15374 + return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
15376 + return "ldc.d\tcp%0, cr%1, %2";
15381 + [(set_attr "length" "4")]
15384 +;;=============================================================================
15386 +;;-----------------------------------------------------------------------------
15387 +;; This pattern emits RTL for exit from a function. The function exit is
15388 +;; responsible for deallocating the stack frame, restoring callee saved
15389 +;; registers and emitting the return instruction.
15390 +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
15391 +;;=============================================================================
15392 +(define_expand "epilogue"
15393 + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
15396 + if (USE_RETURN_INSN (FALSE)){
15397 + emit_jump_insn (gen_return ());
15400 + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
15402 + gen_rtx_RETURN (VOIDmode)),
15403 + VUNSPEC_EPILOGUE));
15408 +(define_insn "*epilogue_insns"
15409 + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
15412 + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
15415 + ; Length is absolute worst case
15416 + [(set_attr "type" "branch")
15417 + (set_attr "length" "12")]
15420 +(define_insn "*epilogue_insns_ret_imm"
15421 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
15422 + (use (reg RETVAL_REGNUM))
15423 + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
15424 + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
15426 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
15429 + ; Length is absolute worst case
15430 + [(set_attr "type" "branch")
15431 + (set_attr "length" "12")]
15434 +(define_insn "sibcall_epilogue"
15435 + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
15438 + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
15441 +;; Length is absolute worst case
15442 + [(set_attr "type" "branch")
15443 + (set_attr "length" "12")]
15446 +(define_insn "*sibcall_epilogue_insns_ret_imm"
15447 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
15448 + (use (reg RETVAL_REGNUM))
15449 + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
15450 + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
15452 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
15455 + ; Length is absolute worst case
15456 + [(set_attr "type" "branch")
15457 + (set_attr "length" "12")]
15460 +(define_insn "ldxi"
15461 + [(set (match_operand:SI 0 "register_operand" "=r")
15463 + (match_operand:SI 1 "register_operand" "r")
15464 + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
15466 + (match_operand:SI 3 "immediate_operand" "Ku05"))
15467 + (const_int 4)))))]
15468 + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
15469 + || INTVAL(operands[3]) == 0)"
15471 + switch ( INTVAL(operands[3]) ){
15473 + return "ld.w %0, %1[%2:b << 2]";
15475 + return "ld.w %0, %1[%2:l << 2]";
15477 + return "ld.w %0, %1[%2:u << 2]";
15479 + return "ld.w %0, %1[%2:t << 2]";
15481 + internal_error("illegal operand for ldxi");
15484 + [(set_attr "type" "load")
15485 + (set_attr "length" "4")
15486 + (set_attr "cc" "none")])
15493 +;;=============================================================================
15494 +;; Peephole optimizing
15495 +;;-----------------------------------------------------------------------------
15498 +;; st.w r8[0x0], r12
15501 +;; st.w r7[-0x8], r12
15502 +;;=============================================================================
15503 +; (set (reg:SI 9 r8)
15504 +; (plus:SI (reg/f:SI 6 r7)
15505 +; (const_int ...)))
15506 +; (set (mem:SI (reg:SI 9 r8))
15507 +; (reg:SI 12 r12))
15509 + [(set (match_operand:SI 0 "register_operand" "")
15510 + (plus:SI (match_operand:SI 1 "register_operand" "")
15511 + (match_operand:SI 2 "immediate_operand" "")))
15512 + (set (mem:SI (match_dup 0))
15513 + (match_operand:SI 3 "register_operand" ""))]
15514 + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
15515 + [(set (match_dup 0)
15516 + (plus:SI (match_dup 1)
15518 + (set (mem:SI (plus:SI (match_dup 1)
15523 +;;=============================================================================
15524 +;; Peephole optimizing
15525 +;;-----------------------------------------------------------------------------
15528 +;; ld.w r6, r6[0x0]
15531 +;; ld.w r6, r7[-0x4]
15532 +;;=============================================================================
15533 +; (set (reg:SI 7 r6)
15534 +; (plus:SI (reg/f:SI 6 r7)
15535 +; (const_int -4 [0xfffffffc])))
15536 +; (set (reg:SI 7 r6)
15537 +; (mem:SI (reg:SI 7 r6)))
15539 + [(set (match_operand:SI 0 "register_operand" "")
15540 + (plus:SI (match_operand:SI 1 "register_operand" "")
15541 + (match_operand:SI 2 "immediate_operand" "")))
15542 + (set (match_operand:SI 3 "register_operand" "")
15543 + (mem:SI (match_dup 0)))]
15544 + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
15545 + [(set (match_dup 0)
15546 + (plus:SI (match_dup 1)
15548 + (set (match_dup 3)
15549 + (mem:SI (plus:SI (match_dup 1)
15550 + (match_dup 2))))]
15553 +;;=============================================================================
15554 +;; Peephole optimizing
15555 +;;-----------------------------------------------------------------------------
15557 +;; ld.sb r0, r7[-0x6]
15560 +;; ld.sb r0, r7[-0x6]
15561 +;;=============================================================================
15563 + [(set (match_operand:QI 0 "register_operand" "")
15564 + (match_operand:QI 1 "load_sb_memory_operand" ""))
15565 + (set (match_operand:SI 2 "register_operand" "")
15566 + (sign_extend:SI (match_dup 0)))]
15567 + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
15568 + [(set (match_dup 2)
15569 + (sign_extend:SI (match_dup 1)))]
15572 +;;=============================================================================
15573 +;; Peephole optimizing
15574 +;;-----------------------------------------------------------------------------
15576 +;; ld.ub r0, r7[-0x6]
15579 +;; ld.ub r0, r7[-0x6]
15580 +;;=============================================================================
15582 + [(set (match_operand:QI 0 "register_operand" "")
15583 + (match_operand:QI 1 "memory_operand" ""))
15584 + (set (match_operand:SI 2 "register_operand" "")
15585 + (zero_extend:SI (match_dup 0)))]
15586 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
15587 + [(set (match_dup 2)
15588 + (zero_extend:SI (match_dup 1)))]
15591 +;;=============================================================================
15592 +;; Peephole optimizing
15593 +;;-----------------------------------------------------------------------------
15595 +;; ld.sh r0, r7[-0x6]
15598 +;; ld.sh r0, r7[-0x6]
15599 +;;=============================================================================
15601 + [(set (match_operand:HI 0 "register_operand" "")
15602 + (match_operand:HI 1 "memory_operand" ""))
15603 + (set (match_operand:SI 2 "register_operand" "")
15604 + (sign_extend:SI (match_dup 0)))]
15605 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
15606 + [(set (match_dup 2)
15607 + (sign_extend:SI (match_dup 1)))]
15610 +;;=============================================================================
15611 +;; Peephole optimizing
15612 +;;-----------------------------------------------------------------------------
15614 +;; ld.uh r0, r7[-0x6]
15617 +;; ld.uh r0, r7[-0x6]
15618 +;;=============================================================================
15620 + [(set (match_operand:HI 0 "register_operand" "")
15621 + (match_operand:HI 1 "memory_operand" ""))
15622 + (set (match_operand:SI 2 "register_operand" "")
15623 + (zero_extend:SI (match_dup 0)))]
15624 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
15625 + [(set (match_dup 2)
15626 + (zero_extend:SI (match_dup 1)))]
15629 +;;=============================================================================
15630 +;; Peephole optimizing
15631 +;;-----------------------------------------------------------------------------
15636 +;; add rd2, rd, rd2
15638 +;; mac rd2, rx, ry
15639 +;;=============================================================================
15641 + [(set (match_operand:SI 0 "register_operand" "")
15642 + (mult:SI (match_operand:SI 1 "register_operand" "")
15643 + (match_operand:SI 2 "register_operand" "")))
15644 + (set (match_operand:SI 3 "register_operand" "")
15645 + (plus:SI (match_dup 3)
15647 + "peep2_reg_dead_p(2, operands[0])"
15648 + [(set (match_dup 3)
15649 + (plus:SI (mult:SI (match_dup 1)
15655 + [(set (match_operand:SI 0 "register_operand" "")
15656 + (mult:SI (match_operand:SI 1 "register_operand" "")
15657 + (match_operand:SI 2 "register_operand" "")))
15658 + (set (match_operand:SI 3 "register_operand" "")
15659 + (plus:SI (match_dup 0)
15661 + "peep2_reg_dead_p(2, operands[0])"
15662 + [(set (match_dup 3)
15663 + (plus:SI (mult:SI (match_dup 1)
15669 +;;=============================================================================
15670 +;; Peephole optimizing
15671 +;;-----------------------------------------------------------------------------
15673 +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
15677 +;; If rd is dead after the operation.
15678 +;;=============================================================================
15680 + [ (set (match_operand:SI 0 "register_operand" "")
15681 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
15683 + (match_operand:SI 2 "immediate_operand" "")))
15686 + "peep2_reg_dead_p(2, operands[0])"
15688 + (and:SI (match_dup 1)
15690 + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
15693 + [ (set (match_operand:SI 0 "register_operand" "")
15694 + (and:SI (match_operand:SI 1 "register_operand" "")
15695 + (match_operand:SI 2 "one_bit_set_operand" "")))
15698 + "peep2_reg_dead_p(2, operands[0])"
15700 + (and:SI (match_dup 1)
15704 +;;=============================================================================
15705 +;; Peephole optimizing
15706 +;;-----------------------------------------------------------------------------
15707 +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
15709 +;;=============================================================================
15713 + [(set (match_operand:SI 0 "register_operand" "")
15714 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
15716 + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
15717 + (set (match_operand:SI 3 "register_operand" "")
15718 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15719 + (match_operand:SI 4 "register_operand" ""))))]
15721 + "(dead_or_set_p(insn, operands[0]))"
15723 + switch ( INTVAL(operands[2]) ){
15725 + return "ld.w %3, %4[%1:b << 2]";
15727 + return "ld.w %3, %4[%1:l << 2]";
15729 + return "ld.w %3, %4[%1:u << 2]";
15731 + return "ld.w %3, %4[%1:t << 2]";
15733 + internal_error("illegal operand for ldxi");
15736 + [(set_attr "type" "load")
15737 + (set_attr "length" "4")
15738 + (set_attr "cc" "clobber")]
15744 + [(set (match_operand:SI 0 "register_operand" "")
15745 + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
15746 + (set (match_operand:SI 2 "register_operand" "")
15747 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15748 + (match_operand:SI 3 "register_operand" ""))))]
15750 + "(dead_or_set_p(insn, operands[0]))"
15752 + "ld.w %2, %3[%1:b << 2]"
15753 + [(set_attr "type" "load")
15754 + (set_attr "length" "4")
15755 + (set_attr "cc" "clobber")]
15760 + [(set (match_operand:SI 0 "register_operand" "")
15761 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
15763 + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
15764 + (set (match_operand:SI 3 "register_operand" "")
15765 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15766 + (match_operand:SI 4 "register_operand" ""))))]
15768 + "(peep2_reg_dead_p(2, operands[0]))
15769 + || (REGNO(operands[0]) == REGNO(operands[3]))"
15770 + [(set (match_dup 3)
15773 + (mult:SI (zero_extract:SI (match_dup 1)
15776 + (const_int 4)))))]
15780 + [(set (match_operand:SI 0 "register_operand" "")
15781 + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
15782 + (set (match_operand:SI 2 "register_operand" "")
15783 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15784 + (match_operand:SI 3 "register_operand" ""))))]
15786 + "(peep2_reg_dead_p(2, operands[0]))
15787 + || (REGNO(operands[0]) == REGNO(operands[2]))"
15788 + [(set (match_dup 2)
15791 + (mult:SI (zero_extract:SI (match_dup 1)
15794 + (const_int 4)))))]
15795 + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
15800 + [(set (match_operand:SI 0 "register_operand" "")
15801 + (and:SI (match_operand:SI 1 "register_operand" "")
15802 + (const_int 255)))
15803 + (set (match_operand:SI 2 "register_operand" "")
15804 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15805 + (match_operand:SI 3 "register_operand" ""))))]
15807 + "(peep2_reg_dead_p(2, operands[0]))
15808 + || (REGNO(operands[0]) == REGNO(operands[2]))"
15809 + [(set (match_dup 2)
15812 + (mult:SI (zero_extract:SI (match_dup 1)
15815 + (const_int 4)))))]
15822 + [(set (match_operand:SI 0 "register_operand" "")
15823 + (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
15825 + (set (match_operand:SI 2 "register_operand" "")
15826 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
15827 + (match_operand:SI 3 "register_operand" ""))))]
15829 + "(peep2_reg_dead_p(2, operands[0]))
15830 + || (REGNO(operands[0]) == REGNO(operands[2]))"
15831 + [(set (match_dup 2)
15834 + (mult:SI (zero_extract:SI (match_dup 1)
15837 + (const_int 4)))))]
15842 +;;************************************************
15845 +;;************************************************
15849 + [(set (match_operand:SI 0 "register_operand" "")
15850 + (not:SI (match_operand:SI 1 "register_operand" "")))
15851 + (set (match_operand:SI 2 "register_operand" "")
15852 + (and:SI (match_dup 2)
15854 + "peep2_reg_dead_p(2, operands[0])"
15856 + [(set (match_dup 2)
15857 + (and:SI (match_dup 2)
15858 + (not:SI (match_dup 1))
15864 + [(set (match_operand:SI 0 "register_operand" "")
15865 + (not:SI (match_operand:SI 1 "register_operand" "")))
15866 + (set (match_operand:SI 2 "register_operand" "")
15867 + (and:SI (match_dup 0)
15870 + "peep2_reg_dead_p(2, operands[0])"
15872 + [(set (match_dup 2)
15873 + (and:SI (match_dup 2)
15874 + (not:SI (match_dup 1))
15881 +;;=================================================================
15882 +;; Addabs peephole
15883 +;;=================================================================
15886 + [(set (match_operand:SI 2 "register_operand" "=r")
15887 + (abs:SI (match_operand:SI 1 "register_operand" "r")))
15888 + (set (match_operand:SI 0 "register_operand" "=r")
15889 + (plus:SI (match_operand:SI 3 "register_operand" "r")
15891 + "dead_or_set_p(insn, operands[2])"
15892 + "addabs %0, %3, %1"
15893 + [(set_attr "length" "4")
15894 + (set_attr "cc" "set_z")])
15897 + [(set (match_operand:SI 2 "register_operand" "=r")
15898 + (abs:SI (match_operand:SI 1 "register_operand" "r")))
15899 + (set (match_operand:SI 0 "register_operand" "=r")
15900 + (plus:SI (match_dup 2)
15901 + (match_operand:SI 3 "register_operand" "r")))]
15902 + "dead_or_set_p(insn, operands[2])"
15903 + "addabs %0, %3, %1"
15904 + [(set_attr "length" "4")
15905 + (set_attr "cc" "set_z")])
15908 +;;=================================================================
15909 +;; Detect roundings
15910 +;;=================================================================
15912 +(define_insn "*round"
15913 + [(set (match_operand:SI 0 "register_operand" "+r")
15914 + (ashiftrt:SI (plus:SI (match_dup 0)
15915 + (match_operand:SI 1 "immediate_operand" "i"))
15916 + (match_operand:SI 2 "immediate_operand" "i")))]
15917 + "avr32_rnd_operands(operands[1], operands[2])"
15919 + "satrnds %0 >> %2, 31"
15921 + [(set_attr "type" "alu_sat")
15922 + (set_attr "length" "4")]
15928 + [(set (match_operand:SI 0 "register_operand" "")
15929 + (plus:SI (match_dup 0)
15930 + (match_operand:SI 1 "immediate_operand" "")))
15931 + (set (match_dup 0)
15932 + (ashiftrt:SI (match_dup 0)
15933 + (match_operand:SI 2 "immediate_operand" "")))]
15934 + "avr32_rnd_operands(operands[1], operands[2])"
15936 + [(set (match_dup 0)
15937 + (ashiftrt:SI (plus:SI (match_dup 0)
15943 + [(set (match_operand:SI 0 "register_operand" "r")
15944 + (plus:SI (match_dup 0)
15945 + (match_operand:SI 1 "immediate_operand" "i")))
15946 + (set (match_dup 0)
15947 + (ashiftrt:SI (match_dup 0)
15948 + (match_operand:SI 2 "immediate_operand" "i")))]
15949 + "avr32_rnd_operands(operands[1], operands[2])"
15951 + "satrnds %0 >> %2, 31"
15953 + [(set_attr "type" "alu_sat")
15954 + (set_attr "length" "4")
15955 + (set_attr "cc" "clobber")]
15960 +;;=================================================================
15962 +;;=================================================================
15964 + [(set (match_operand:SI 0 "register_operand" "")
15965 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15966 + (parallel [(call (mem:SI (match_dup 0))
15967 + (match_operand 2 "" ""))
15968 + (clobber (reg:SI LR_REGNUM))])]
15969 + "dead_or_set_p(insn, operands[0])"
15971 + [(set_attr "type" "call")
15972 + (set_attr "length" "4")
15973 + (set_attr "cc" "clobber")]
15977 + [(set (match_operand:SI 2 "register_operand" "")
15978 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15979 + (parallel [(set (match_operand 0 "register_operand" "")
15980 + (call (mem:SI (match_dup 2))
15981 + (match_operand 3 "" "")))
15982 + (clobber (reg:SI LR_REGNUM))])]
15983 + "dead_or_set_p(insn, operands[2])"
15985 + [(set_attr "type" "call")
15986 + (set_attr "length" "4")
15987 + (set_attr "cc" "call_set")]
15992 + [(set (match_operand:SI 0 "register_operand" "")
15993 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15994 + (parallel [(call (mem:SI (match_dup 0))
15995 + (match_operand 2 "" ""))
15996 + (clobber (reg:SI LR_REGNUM))])]
15997 + "peep2_reg_dead_p(2, operands[0])"
15998 + [(parallel [(call (mem:SI (match_dup 1))
16000 + (clobber (reg:SI LR_REGNUM))])]
16005 + [(set (match_operand:SI 0 "register_operand" "")
16006 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
16007 + (parallel [(set (match_operand 2 "register_operand" "")
16008 + (call (mem:SI (match_dup 0))
16009 + (match_operand 3 "" "")))
16010 + (clobber (reg:SI LR_REGNUM))])]
16011 + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
16012 + [(parallel [(set (match_dup 2)
16013 + (call (mem:SI (match_dup 1))
16015 + (clobber (reg:SI LR_REGNUM))])]
16019 +;;=================================================================
16020 +;; Returning a value
16021 +;;=================================================================
16025 + [(set (match_operand 0 "register_operand" "")
16026 + (match_operand 1 "register_operand" ""))
16028 + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
16029 + && (REGNO(operands[1]) != LR_REGNUM)
16030 + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
16032 + [(set_attr "type" "call")
16033 + (set_attr "length" "2")]
16038 + [(set (match_operand 0 "register_operand" "r")
16039 + (match_operand 1 "immediate_operand" "i"))
16041 + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
16042 + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
16044 + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
16047 + [(set_attr "type" "call")
16048 + (set_attr "length" "4")]
16052 + [(set (match_operand 0 "register_operand" "r")
16053 + (match_operand 1 "immediate_operand" "i"))
16054 + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
16055 + "(REGNO(operands[0]) == RETVAL_REGNUM) &&
16056 + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
16058 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
16061 + ; Length is absolute worst case
16062 + [(set_attr "type" "branch")
16063 + (set_attr "length" "12")]
16067 + [(set (match_operand 0 "register_operand" "=r")
16068 + (if_then_else (match_operator 1 "avr32_comparison_operator"
16069 + [(match_operand 4 "register_operand" "r")
16070 + (match_operand 5 "register_immediate_operand" "rKs21")])
16071 + (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08")
16072 + (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08")))
16074 + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)"
16076 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
16078 + if ( GET_CODE(operands[2]) == REG
16079 + && GET_CODE(operands[3]) == REG
16080 + && REGNO(operands[2]) != LR_REGNUM
16081 + && REGNO(operands[3]) != LR_REGNUM ){
16082 + return "ret%1 %2\;ret%i1 %3";
16083 + } else if ( GET_CODE(operands[2]) == REG
16084 + && GET_CODE(operands[3]) == CONST_INT ){
16085 + if ( INTVAL(operands[3]) == -1
16086 + || INTVAL(operands[3]) == 0
16087 + || INTVAL(operands[3]) == 1 ){
16088 + return "ret%1 %2\;ret%i1 %d3";
16090 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
16092 + } else if ( GET_CODE(operands[2]) == CONST_INT
16093 + && GET_CODE(operands[3]) == REG ){
16094 + if ( INTVAL(operands[2]) == -1
16095 + || INTVAL(operands[2]) == 0
16096 + || INTVAL(operands[2]) == 1 ){
16097 + return "ret%1 %d2\;ret%i1 %3";
16099 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
16102 + if ( (INTVAL(operands[2]) == -1
16103 + || INTVAL(operands[2]) == 0
16104 + || INTVAL(operands[2]) == 1 )
16105 + && (INTVAL(operands[3]) == -1
16106 + || INTVAL(operands[3]) == 0
16107 + || INTVAL(operands[3]) == 1 )){
16108 + return "ret%1 %d2\;ret%i1 %d3";
16110 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
16115 + [(set_attr "length" "10")
16116 + (set_attr "cc" "none")
16117 + (set_attr "type" "call")])
16121 +;;=================================================================
16123 +;;=================================================================
16126 + [(set (match_operand:HI 0 "register_operand" "")
16127 + (neg:HI (match_operand:HI 1 "register_operand" "")))
16128 + (set (match_operand:SI 2 "register_operand" "")
16130 + (sign_extend:SI (match_dup 0))
16131 + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
16132 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
16133 + [ (set (match_dup 2)
16135 + (sign_extend:SI (neg:HI (match_dup 1)))
16136 + (sign_extend:SI (match_dup 3))))]
16141 + [(set (match_operand:HI 0 "register_operand" "")
16142 + (neg:HI (match_operand:HI 1 "register_operand" "")))
16143 + (set (match_operand:SI 2 "register_operand" "")
16145 + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
16146 + (sign_extend:SI (match_dup 0))))]
16147 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
16148 + [ (set (match_dup 2)
16150 + (sign_extend:SI (neg:HI (match_dup 1)))
16151 + (sign_extend:SI (match_dup 3))))]
16157 +;;=================================================================
16158 +;; Vector set and extract operations
16159 +;;=================================================================
16160 +(define_insn "vec_setv2hi_hi"
16161 + [(set (match_operand:V2HI 0 "register_operand" "=r")
16164 + (vec_duplicate:V2HI
16165 + (match_operand:HI 1 "register_operand" "r"))
16168 + "bfins\t%0, %1, 16, 16"
16169 + [(set_attr "type" "alu")
16170 + (set_attr "length" "4")
16171 + (set_attr "cc" "clobber")])
16173 +(define_insn "vec_setv2hi_lo"
16174 + [(set (match_operand:V2HI 0 "register_operand" "+r")
16177 + (vec_duplicate:V2HI
16178 + (match_operand:HI 1 "register_operand" "r"))
16181 + "bfins\t%0, %1, 0, 16"
16182 + [(set_attr "type" "alu")
16183 + (set_attr "length" "4")
16184 + (set_attr "cc" "clobber")])
16186 +(define_expand "vec_setv2hi"
16187 + [(set (match_operand:V2HI 0 "register_operand" "")
16190 + (vec_duplicate:V2HI
16191 + (match_operand:HI 1 "register_operand" ""))
16192 + (match_operand 2 "immediate_operand" "")))]
16194 + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
16197 +(define_insn "vec_extractv2hi"
16198 + [(set (match_operand:HI 0 "register_operand" "=r")
16200 + (match_operand:V2HI 1 "register_operand" "r")
16201 + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
16204 + if ( INTVAL(operands[2]) == 0 )
16205 + return "bfextu\t%0, %1, 16, 16";
16207 + return "bfextu\t%0, %1, 0, 16";
16209 + [(set_attr "type" "alu")
16210 + (set_attr "length" "4")
16211 + (set_attr "cc" "clobber")])
16213 +(define_insn "vec_extractv4qi"
16214 + [(set (match_operand:QI 0 "register_operand" "=r")
16216 + (match_operand:V4QI 1 "register_operand" "r")
16217 + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
16220 + switch ( INTVAL(operands[2]) ){
16222 + return "bfextu\t%0, %1, 24, 8";
16224 + return "bfextu\t%0, %1, 16, 8";
16226 + return "bfextu\t%0, %1, 8, 8";
16228 + return "bfextu\t%0, %1, 0, 8";
16233 + [(set_attr "type" "alu")
16234 + (set_attr "length" "4")
16235 + (set_attr "cc" "clobber")])
16238 +(define_insn "concatv2hi"
16239 + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
16241 + (match_operand:HI 1 "register_operand" "r, r, 0")
16242 + (match_operand:HI 2 "register_operand" "r, 0, r")))]
16245 + mov\t%0, %1\;bfins\t%0, %2, 0, 16
16246 + bfins\t%0, %2, 0, 16
16247 + bfins\t%0, %1, 16, 16"
16248 + [(set_attr "length" "6, 4, 4")
16249 + (set_attr "type" "alu")])
16252 +;; Load the atomic operation description
16253 +(include "sync.md")
16255 +;; Load the SIMD description
16256 +(include "simd.md")
16258 +;; Load the FP coprAocessor patterns
16259 +(include "fpcp.md")
16261 +++ b/gcc/config/avr32/avr32-modes.def
16263 +VECTOR_MODES (INT, 4); /* V4QI V2HI */
16265 +++ b/gcc/config/avr32/avr32.opt
16267 +; Options for the ATMEL AVR32 port of the compiler.
16269 +; Copyright 2007 Atmel Corporation.
16271 +; This file is part of GCC.
16273 +; GCC is free software; you can redistribute it and/or modify it under
16274 +; the terms of the GNU General Public License as published by the Free
16275 +; Software Foundation; either version 2, or (at your option) any later
16278 +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16279 +; WARRANTY; without even the implied warranty of MERCHANTABILITY or
16280 +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16281 +; for more details.
16283 +; You should have received a copy of the GNU General Public License
16284 +; along with GCC; see the file COPYING. If not, write to the Free
16285 +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
16286 +; 02110-1301, USA.
16288 +muse-rodata-section
16289 +Target Report Mask(USE_RODATA_SECTION)
16290 +Use section .rodata for read-only data instead of .text.
16293 +Target Report Undocumented Mask(HARD_FLOAT)
16294 +Use floating point coprocessor instructions.
16297 +Target Report Undocumented InverseMask(HARD_FLOAT, SOFT_FLOAT)
16298 +Use software floating-point library for floating-point operations.
16300 +mforce-double-align
16301 +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
16302 +Force double-word alignment for double-word memory accesses.
16305 +Target Report RejectNegative Mask(NO_INIT_GOT)
16306 +Do not initialize GOT register before using it when compiling PIC code.
16309 +Target Report Mask(RELAX)
16310 +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
16313 +Target Report Undocumented Mask(MD_REORG_OPTIMIZATION)
16314 +Perform machine dependent optimizations in reorg stage.
16317 +Target Report Mask(HAS_ASM_ADDR_PSEUDOS)
16318 +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
16321 +Target Report RejectNegative Joined Var(avr32_part_name)
16322 +Specify the AVR32 part name
16325 +Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
16326 +Specify the AVR32 part name (deprecated)
16329 +Target Report RejectNegative Joined Var(avr32_arch_name)
16330 +Specify the AVR32 architecture name
16333 +Target Report Mask(FAST_FLOAT)
16334 +Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified.
16336 +mimm-in-const-pool
16337 +Target Report Var(avr32_imm_in_const_pool) Init(-1)
16338 +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
16341 +Target Report RejectNegative Mask(NO_PIC)
16342 +Do not generate position-independent code. (deprecated, use -fno-pic instead)
16344 +mcond-exec-before-reload
16345 +Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
16346 +Enable experimental conditional execution preparation before the reload stage.
16349 +++ b/gcc/config/avr32/avr32-protos.h
16352 + Prototypes for exported functions defined in avr32.c
16353 + Copyright 2003-2006 Atmel Corporation.
16355 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
16356 + Initial porting by Anders �dland.
16358 + This file is part of GCC.
16360 + This program is free software; you can redistribute it and/or modify
16361 + it under the terms of the GNU General Public License as published by
16362 + the Free Software Foundation; either version 2 of the License, or
16363 + (at your option) any later version.
16365 + This program is distributed in the hope that it will be useful,
16366 + but WITHOUT ANY WARRANTY; without even the implied warranty of
16367 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16368 + GNU General Public License for more details.
16370 + You should have received a copy of the GNU General Public License
16371 + along with this program; if not, write to the Free Software
16372 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
16375 +#ifndef AVR32_PROTOS_H
16376 +#define AVR32_PROTOS_H
16378 +extern const int swap_reg[];
16380 +extern int avr32_valid_macmac_bypass (rtx, rtx);
16381 +extern int avr32_valid_mulmac_bypass (rtx, rtx);
16383 +extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
16384 +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
16386 +extern const char *avr32_strip_name_encoding (const char *);
16388 +extern rtx avr32_get_note_reg_equiv (rtx insn);
16390 +extern int avr32_use_return_insn (int iscond);
16392 +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
16394 +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
16395 +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
16396 +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
16398 +extern void avr32_output_return_instruction (int single_ret_inst,
16399 + int iscond, rtx cond,
16401 +extern void avr32_expand_prologue (void);
16402 +extern void avr32_set_return_address (rtx source, rtx scratch);
16404 +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
16405 +extern int avr32_extra_constraint_s (rtx value, const int strict);
16406 +extern int avr32_eh_return_data_regno (const int n);
16407 +extern int avr32_initial_elimination_offset (const int from, const int to);
16408 +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
16409 + tree type, int named);
16410 +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
16411 + rtx libname, tree fndecl);
16412 +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
16413 + enum machine_mode mode,
16414 + tree type, int named);
16415 +#ifdef ARGS_SIZE_RTX
16416 +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
16417 +extern enum direction avr32_function_arg_padding (enum machine_mode mode,
16419 +#endif /* ARGS_SIZE_RTX */
16420 +extern rtx avr32_function_value (tree valtype, tree func, bool outgoing);
16421 +extern rtx avr32_libcall_value (enum machine_mode mode);
16422 +extern int avr32_sched_use_dfa_pipeline_interface (void);
16423 +extern bool avr32_return_in_memory (tree type, tree fntype);
16424 +extern void avr32_regs_to_save (char *operand);
16425 +extern void avr32_target_asm_function_prologue (FILE * file,
16426 + HOST_WIDE_INT size);
16427 +extern void avr32_target_asm_function_epilogue (FILE * file,
16428 + HOST_WIDE_INT size);
16429 +extern void avr32_trampoline_template (FILE * file);
16430 +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
16431 + rtx static_chain);
16432 +extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
16434 +extern int avr32_legitimate_constant_p (rtx x);
16436 +extern int avr32_legitimate_pic_operand_p (rtx x);
16438 +extern rtx avr32_find_symbol (rtx x);
16439 +extern void avr32_select_section (rtx exp, int reloc, int align);
16440 +extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
16441 +extern void avr32_asm_file_end (FILE * stream);
16442 +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
16443 +extern void avr32_asm_output_common (FILE * stream, const char *name,
16444 + int size, int rounded);
16445 +extern void avr32_asm_output_label (FILE * stream, const char *name);
16446 +extern void avr32_asm_declare_object_name (FILE * stream, char *name,
16448 +extern void avr32_asm_globalize_label (FILE * stream, const char *name);
16449 +extern void avr32_asm_weaken_label (FILE * stream, const char *name);
16450 +extern void avr32_asm_output_external (FILE * stream, tree decl,
16451 + const char *name);
16452 +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
16453 +extern void avr32_asm_output_labelref (FILE * stream, const char *name);
16454 +extern void avr32_notice_update_cc (rtx exp, rtx insn);
16455 +extern void avr32_print_operand (FILE * stream, rtx x, int code);
16456 +extern void avr32_print_operand_address (FILE * stream, rtx x);
16458 +extern int avr32_symbol (rtx x);
16460 +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
16461 + unsigned HOST_WIDE_INT align);
16463 +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
16464 +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
16466 +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
16467 + const char *str);
16469 +extern bool avr32_cannot_force_const_mem (rtx x);
16471 +extern void avr32_init_builtins (void);
16473 +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
16474 + enum machine_mode mode, int ignore);
16476 +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
16478 +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
16480 +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
16481 + enum machine_mode mode,
16482 + tree type, bool named);
16484 +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
16485 + int write_back, int in_struct_p,
16487 +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
16488 + int in_struct_p, int scalar_p);
16489 +extern int avr32_gen_movmemsi (rtx * operands);
16491 +extern int avr32_rnd_operands (rtx add, rtx shift);
16492 +extern int avr32_adjust_insn_length (rtx insn, int length);
16494 +extern int symbol_mentioned_p (rtx x);
16495 +extern int label_mentioned_p (rtx x);
16496 +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
16497 +extern int avr32_address_register_rtx_p (rtx x, int strict_p);
16498 +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
16501 +extern int avr32_const_double_immediate (rtx value);
16502 +extern void avr32_init_expanders (void);
16503 +extern rtx avr32_return_addr (int count, rtx frame);
16504 +extern bool avr32_got_mentioned_p (rtx addr);
16506 +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
16508 +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
16509 +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
16511 +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
16514 +extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
16515 +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
16516 +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
16517 +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
16518 +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
16519 + rtx op0, rtx op1);
16521 +rtx get_next_insn_cond (rtx cur_insn);
16522 +int set_next_insn_cond (rtx cur_insn, rtx cond);
16523 +rtx next_insn_emits_cmp (rtx cur_insn);
16524 +void avr32_override_options (void);
16525 +void avr32_load_pic_register (void);
16526 +#ifdef GCC_BASIC_BLOCK_H
16527 +rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
16528 + int *num_true_changes);
16529 +rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
16530 +void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
16532 +void avr32_optimization_options (int level, int size);
16533 +int avr32_const_ok_for_move (HOST_WIDE_INT c);
16535 +void avr32_split_const_expr (enum machine_mode mode,
16536 + enum machine_mode new_mode,
16538 + rtx *split_expr);
16539 +void avr32_get_intval (enum machine_mode mode,
16541 + HOST_WIDE_INT *val);
16543 +int avr32_cond_imm_clobber_splittable (rtx insn,
16547 +#endif /* AVR32_PROTOS_H */
16549 +++ b/gcc/config/avr32/crti.asm
16552 + Init/fini stuff for AVR32.
16553 + Copyright 2003-2006 Atmel Corporation.
16555 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
16557 + This file is part of GCC.
16559 + This program is free software; you can redistribute it and/or modify
16560 + it under the terms of the GNU General Public License as published by
16561 + the Free Software Foundation; either version 2 of the License, or
16562 + (at your option) any later version.
16564 + This program is distributed in the hope that it will be useful,
16565 + but WITHOUT ANY WARRANTY; without even the implied warranty of
16566 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16567 + GNU General Public License for more details.
16569 + You should have received a copy of the GNU General Public License
16570 + along with this program; if not, write to the Free Software
16571 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
16574 +/* The code in sections .init and .fini is supposed to be a single
16575 + regular function. The function in .init is called directly from
16576 + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
16579 + crti.asm contributes the prologue of a function to these sections,
16580 + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
16581 + crti.o before any other object files that might add code to .init
16582 + or .fini sections, and ENDFILE_SPEC should list crtn.o after any
16583 + such object files. */
16588 +/* Just load the GOT */
16598 +1: .long 0b - _GLOBAL_OFFSET_TABLE_
16602 +/* Just load the GOT */
16612 +1: .long 0b - _GLOBAL_OFFSET_TABLE_
16616 +++ b/gcc/config/avr32/crtn.asm
16618 +/* Copyright (C) 2001 Free Software Foundation, Inc.
16619 + Written By Nick Clifton
16621 + This file is free software; you can redistribute it and/or modify it
16622 + under the terms of the GNU General Public License as published by the
16623 + Free Software Foundation; either version 2, or (at your option) any
16626 + In addition to the permissions in the GNU General Public License, the
16627 + Free Software Foundation gives you unlimited permission to link the
16628 + compiled version of this file with other programs, and to distribute
16629 + those programs without any restriction coming from the use of this
16630 + file. (The General Public License restrictions do apply in other
16631 + respects; for example, they cover modification of the file, and
16632 + distribution when not linked into another program.)
16634 + This file is distributed in the hope that it will be useful, but
16635 + WITHOUT ANY WARRANTY; without even the implied warranty of
16636 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16637 + General Public License for more details.
16639 + You should have received a copy of the GNU General Public License
16640 + along with this program; see the file COPYING. If not, write to
16641 + the Free Software Foundation, 59 Temple Place - Suite 330,
16642 + Boston, MA 02111-1307, USA.
16644 + As a special exception, if you link this library with files
16645 + compiled with GCC to produce an executable, this does not cause
16646 + the resulting executable to be covered by the GNU General Public License.
16647 + This exception does not however invalidate any other reasons why
16648 + the executable file might be covered by the GNU General Public License.
16663 +++ b/gcc/config/avr32/fpcp.md
16665 +;; AVR32 machine description file for Floating-Point instructions.
16666 +;; Copyright 2003-2006 Atmel Corporation.
16668 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
16670 +;; This file is part of GCC.
16672 +;; This program is free software; you can redistribute it and/or modify
16673 +;; it under the terms of the GNU General Public License as published by
16674 +;; the Free Software Foundation; either version 2 of the License, or
16675 +;; (at your option) any later version.
16677 +;; This program is distributed in the hope that it will be useful,
16678 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
16679 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16680 +;; GNU General Public License for more details.
16682 +;; You should have received a copy of the GNU General Public License
16683 +;; along with this program; if not, write to the Free Software
16684 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16686 +;; -*- Mode: Scheme -*-
16688 +;;******************************************************************************
16689 +;; Automaton pipeline description for floating-point coprocessor insns
16690 +;;******************************************************************************
16691 +(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap")
16693 +(define_insn_reservation "fmv_op" 1
16694 + (and (eq_attr "pipeline" "ap")
16695 + (eq_attr "type" "fmv"))
16696 + "is,da,d,fid,fwb")
16698 +(define_insn_reservation "fmul_op" 5
16699 + (and (eq_attr "pipeline" "ap")
16700 + (eq_attr "type" "fmul"))
16701 + "is,da,d,fid,fm1,fm2,fm3,fm4,fwb")
16703 +(define_insn_reservation "fcmps_op" 1
16704 + (and (eq_attr "pipeline" "ap")
16705 + (eq_attr "type" "fcmps"))
16706 + "is,da,d,fid,fcmp")
16708 +(define_insn_reservation "fcmpd_op" 2
16709 + (and (eq_attr "pipeline" "ap")
16710 + (eq_attr "type" "fcmpd"))
16711 + "is,da,d,fid*2,fcmp")
16713 +(define_insn_reservation "fcast_op" 3
16714 + (and (eq_attr "pipeline" "ap")
16715 + (eq_attr "type" "fcast"))
16716 + "is,da,d,fid,fcmp,fcast,fwb")
16718 +(define_insn_reservation "fmvcpu_op" 2
16719 + (and (eq_attr "pipeline" "ap")
16720 + (eq_attr "type" "fmvcpu"))
16723 +(define_insn_reservation "fldd_op" 1
16724 + (and (eq_attr "pipeline" "ap")
16725 + (eq_attr "type" "fldd"))
16728 +(define_insn_reservation "flds_op" 1
16729 + (and (eq_attr "pipeline" "ap")
16730 + (eq_attr "type" "flds"))
16733 +(define_insn_reservation "fsts_op" 0
16734 + (and (eq_attr "pipeline" "ap")
16735 + (eq_attr "type" "fsts"))
16738 +(define_insn_reservation "fstd_op" 0
16739 + (and (eq_attr "pipeline" "ap")
16740 + (eq_attr "type" "fstd"))
16744 +(define_insn "*movsf_fpcp"
16745 + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,r,m")
16746 + (match_operand:SF 1 "general_operand" " f,r,f,m,f,r,G,m,r"))]
16747 + "TARGET_HARD_FLOAT"
16758 + [(set_attr "length" "4,4,4,4,4,2,4,4,4")
16759 + (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")])
16761 +(define_insn_and_split "*movdf_fpcp"
16762 + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,m")
16763 + (match_operand:DF 1 "general_operand" " f,r,f,m,f,r,m,r"))]
16764 + "TARGET_HARD_FLOAT"
16771 + mov\t%0, %1\;mov\t%m0, %m1
16775 + "TARGET_HARD_FLOAT
16776 + && reload_completed
16777 + && (REG_P(operands[0]) && (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS))
16778 + && (REG_P(operands[1]) && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))"
16779 + [(set (match_dup 0) (match_dup 1))
16780 + (set (match_dup 2) (match_dup 3))]
16783 + operands[2] = gen_highpart (SImode, operands[0]);
16784 + operands[0] = gen_lowpart (SImode, operands[0]);
16785 + operands[3] = gen_highpart(SImode, operands[1]);
16786 + operands[1] = gen_lowpart(SImode, operands[1]);
16790 + [(set_attr "length" "4,4,4,4,4,4,4,4")
16791 + (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")])
16794 +(define_insn "mulsf3"
16795 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16796 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16797 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
16798 + "TARGET_HARD_FLOAT"
16799 + "fmul.s\t%0, %1, %2"
16800 + [(set_attr "length" "4")
16801 + (set_attr "type" "fmul")])
16803 +(define_insn "nmulsf3"
16804 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16805 + (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16806 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))))]
16807 + "TARGET_HARD_FLOAT"
16808 + "fnmul.s\t%0, %1, %2"
16809 + [(set_attr "length" "4")
16810 + (set_attr "type" "fmul")])
16813 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
16814 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
16815 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
16816 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
16817 + (neg:SF (match_dup 0)))]
16818 + "TARGET_HARD_FLOAT &&
16819 + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
16820 + [(set (match_dup 3)
16821 + (neg:SF (mult:SF (match_dup 1)
16822 + (match_dup 2))))]
16826 +(define_insn "macsf3"
16827 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16828 + (plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16829 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))
16830 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16831 + "TARGET_HARD_FLOAT"
16832 + "fmac.s\t%0, %1, %2"
16833 + [(set_attr "length" "4")
16834 + (set_attr "type" "fmul")])
16836 +(define_insn "nmacsf3"
16837 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16838 + (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16839 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))
16840 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16841 + "TARGET_HARD_FLOAT"
16842 + "fnmac.s\t%0, %1, %2"
16843 + [(set_attr "length" "4")
16844 + (set_attr "type" "fmul")])
16847 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
16848 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
16849 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
16850 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
16854 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16855 + [(set (match_dup 3)
16856 + (plus:SF (neg:SF (mult:SF (match_dup 1)
16862 +(define_insn "msubacsf3"
16863 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16864 + (minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16865 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))
16866 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16867 + "TARGET_HARD_FLOAT"
16868 + "fmsc.s\t%0, %1, %2"
16869 + [(set_attr "length" "4")
16870 + (set_attr "type" "fmul")])
16873 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
16874 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
16875 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
16876 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
16880 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16881 + [(set (match_dup 3)
16882 + (minus:SF (mult:SF (match_dup 1)
16887 +(define_insn "nmsubacsf3"
16888 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16889 + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16890 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))
16891 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16892 + "TARGET_HARD_FLOAT"
16893 + "fnmsc.s\t%0, %1, %2"
16894 + [(set_attr "length" "4")
16895 + (set_attr "type" "fmul")])
16899 +(define_insn "addsf3"
16900 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16901 + (plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16902 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
16903 + "TARGET_HARD_FLOAT"
16904 + "fadd.s\t%0, %1, %2"
16905 + [(set_attr "length" "4")
16906 + (set_attr "type" "fmul")])
16908 +(define_insn "subsf3"
16909 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16910 + (minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16911 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
16912 + "TARGET_HARD_FLOAT"
16913 + "fsub.s\t%0, %1, %2"
16914 + [(set_attr "length" "4")
16915 + (set_attr "type" "fmul")])
16918 +(define_insn "negsf2"
16919 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16920 + (neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16921 + "TARGET_HARD_FLOAT"
16923 + [(set_attr "length" "4")
16924 + (set_attr "type" "fmv")])
16926 +(define_insn "abssf2"
16927 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16928 + (abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16929 + "TARGET_HARD_FLOAT"
16931 + [(set_attr "length" "4")
16932 + (set_attr "type" "fmv")])
16934 +(define_insn "truncdfsf2"
16935 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16936 + (float_truncate:SF
16937 + (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
16938 + "TARGET_HARD_FLOAT"
16939 + "fcastd.s\t%0, %1"
16940 + [(set_attr "length" "4")
16941 + (set_attr "type" "fcast")])
16943 +(define_insn "extendsfdf2"
16944 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16946 + (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16947 + "TARGET_HARD_FLOAT"
16948 + "fcasts.d\t%0, %1"
16949 + [(set_attr "length" "4")
16950 + (set_attr "type" "fcast")])
16952 +(define_insn "muldf3"
16953 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16954 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16955 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
16956 + "TARGET_HARD_FLOAT"
16957 + "fmul.d\t%0, %1, %2"
16958 + [(set_attr "length" "4")
16959 + (set_attr "type" "fmul")])
16961 +(define_insn "nmuldf3"
16962 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16963 + (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16964 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))))]
16965 + "TARGET_HARD_FLOAT"
16966 + "fnmul.d\t%0, %1, %2"
16967 + [(set_attr "length" "4")
16968 + (set_attr "type" "fmul")])
16971 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
16972 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
16973 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
16974 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
16975 + (neg:DF (match_dup 0)))]
16976 + "TARGET_HARD_FLOAT &&
16977 + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
16978 + [(set (match_dup 3)
16979 + (neg:DF (mult:DF (match_dup 1)
16980 + (match_dup 2))))]
16983 +(define_insn "macdf3"
16984 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16985 + (plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16986 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))
16987 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16988 + "TARGET_HARD_FLOAT"
16989 + "fmac.d\t%0, %1, %2"
16990 + [(set_attr "length" "4")
16991 + (set_attr "type" "fmul")])
16993 +(define_insn "msubacdf3"
16994 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16995 + (minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16996 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))
16997 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16998 + "TARGET_HARD_FLOAT"
16999 + "fmsc.d\t%0, %1, %2"
17000 + [(set_attr "length" "4")
17001 + (set_attr "type" "fmul")])
17004 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
17005 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
17006 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
17007 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
17011 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
17012 + [(set (match_dup 3)
17013 + (minus:DF (mult:DF (match_dup 1)
17018 +(define_insn "nmsubacdf3"
17019 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17020 + (minus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
17021 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))
17022 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
17023 + "TARGET_HARD_FLOAT"
17024 + "fnmsc.d\t%0, %1, %2"
17025 + [(set_attr "length" "4")
17026 + (set_attr "type" "fmul")])
17028 +(define_insn "nmacdf3"
17029 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17030 + (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
17031 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))
17032 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
17033 + "TARGET_HARD_FLOAT"
17034 + "fnmac.d\t%0, %1, %2"
17035 + [(set_attr "length" "4")
17036 + (set_attr "type" "fmul")])
17039 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
17040 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
17041 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
17042 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
17046 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
17047 + [(set (match_dup 3)
17048 + (plus:DF (neg:DF (mult:DF (match_dup 1)
17053 +(define_insn "adddf3"
17054 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17055 + (plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
17056 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
17057 + "TARGET_HARD_FLOAT"
17058 + "fadd.d\t%0, %1, %2"
17059 + [(set_attr "length" "4")
17060 + (set_attr "type" "fmul")])
17062 +(define_insn "subdf3"
17063 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17064 + (minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
17065 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
17066 + "TARGET_HARD_FLOAT"
17067 + "fsub.d\t%0, %1, %2"
17068 + [(set_attr "length" "4")
17069 + (set_attr "type" "fmul")])
17071 +(define_insn "negdf2"
17072 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17073 + (neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
17074 + "TARGET_HARD_FLOAT"
17076 + [(set_attr "length" "4")
17077 + (set_attr "type" "fmv")])
17079 +(define_insn "absdf2"
17080 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
17081 + (abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
17082 + "TARGET_HARD_FLOAT"
17084 + [(set_attr "length" "4")
17085 + (set_attr "type" "fmv")])
17088 +(define_expand "cmpdf"
17091 + (match_operand:DF 0 "general_operand" "")
17092 + (match_operand:DF 1 "general_operand" "")))]
17093 + "TARGET_HARD_FLOAT"
17096 + if ( !REG_P(operands[0]) )
17097 + operands[0] = force_reg(DFmode, operands[0]);
17099 + if ( !REG_P(operands[1]) )
17100 + operands[1] = force_reg(DFmode, operands[1]);
17102 + avr32_compare_op0 = operands[0];
17103 + avr32_compare_op1 = operands[1];
17105 + emit_insn(gen_cmpdf_internal(operands[0], operands[1]));
17107 + tmpreg = gen_reg_rtx(SImode);
17108 + emit_insn(gen_fpcc_to_reg(tmpreg));
17109 + emit_insn(gen_reg_to_cc(tmpreg));
17115 +(define_insn "cmpdf_internal"
17116 + [(set (reg:CC FPCC_REGNUM)
17118 + (match_operand:DF 0 "avr32_fp_register_operand" "f")
17119 + (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
17120 + "TARGET_HARD_FLOAT"
17122 + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
17123 + return "fcmp.d\t%0, %1";
17126 + [(set_attr "length" "4")
17127 + (set_attr "type" "fcmpd")
17128 + (set_attr "cc" "fpcompare")])
17130 +(define_expand "cmpsf"
17133 + (match_operand:SF 0 "general_operand" "")
17134 + (match_operand:SF 1 "general_operand" "")))]
17135 + "TARGET_HARD_FLOAT"
17138 + if ( !REG_P(operands[0]) )
17139 + operands[0] = force_reg(SFmode, operands[0]);
17141 + if ( !REG_P(operands[1]) )
17142 + operands[1] = force_reg(SFmode, operands[1]);
17144 + avr32_compare_op0 = operands[0];
17145 + avr32_compare_op1 = operands[1];
17147 + emit_insn(gen_cmpsf_internal(operands[0], operands[1]));
17149 + tmpreg = gen_reg_rtx(SImode);
17150 + emit_insn(gen_fpcc_to_reg(tmpreg));
17151 + emit_insn(gen_reg_to_cc(tmpreg));
17157 +(define_insn "cmpsf_internal"
17158 + [(set (reg:CC FPCC_REGNUM)
17160 + (match_operand:SF 0 "avr32_fp_register_operand" "f")
17161 + (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
17162 + "TARGET_HARD_FLOAT"
17164 + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
17165 + return "fcmp.s\t%0, %1";
17168 + [(set_attr "length" "4")
17169 + (set_attr "type" "fcmps")
17170 + (set_attr "cc" "fpcompare")])
17172 +(define_insn "fpcc_to_reg"
17173 + [(set (match_operand:SI 0 "register_operand" "=r")
17174 + (unspec:SI [(reg:CC FPCC_REGNUM)]
17175 + UNSPEC_FPCC_TO_REG))]
17176 + "TARGET_HARD_FLOAT"
17177 + "fmov.s\t%0, fsr"
17178 + [(set_attr "length" "4")
17179 + (set_attr "type" "fmvcpu")])
17181 +(define_insn "reg_to_cc"
17183 + (unspec:SI [(match_operand:SI 0 "register_operand" "r")]
17184 + UNSPEC_REG_TO_CC))]
17185 + "TARGET_HARD_FLOAT"
17187 + [(set_attr "length" "2")
17188 + (set_attr "type" "alu")
17189 + (set_attr "cc" "from_fpcc")])
17191 +(define_insn "stm_fp"
17192 + [(unspec [(match_operand 0 "register_operand" "r")
17193 + (match_operand 1 "const_int_operand" "")
17194 + (match_operand 2 "const_int_operand" "")]
17196 + "TARGET_HARD_FLOAT"
17198 + int cop_reglist = INTVAL(operands[1]);
17200 + if (INTVAL(operands[2]) != 0)
17201 + return "stcm.w\tcp0, --%0, %C1";
17203 + return "stcm.w\tcp0, %0, %C1";
17205 + if ( cop_reglist & ~0xff ){
17206 + operands[1] = GEN_INT(cop_reglist & ~0xff);
17207 + if (INTVAL(operands[2]) != 0)
17208 + return "stcm.d\tcp0, --%0, %D1";
17210 + return "stcm.d\tcp0, %0, %D1";
17213 + [(set_attr "type" "fstm")
17214 + (set_attr "length" "4")
17215 + (set_attr "cc" "none")])
17217 +++ b/gcc/config/avr32/lib1funcs.S
17219 +/* Macro for moving immediate value to register. */
17220 +.macro mov_imm reg, imm
17221 +.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
17223 +#if __AVR32_UC__ >= 2
17224 +.elseif ((\imm & 0xffff) == 0)
17225 + movh \reg, hi(\imm)
17229 + mov \reg, lo(\imm)
17230 + orh \reg, hi(\imm)
17236 +/* Adjust the unpacked double number if it is a subnormal number.
17237 + The exponent and mantissa pair are stored
17238 + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
17239 + the MSB is passed in [sign]. Needs two scratch
17240 + registers [scratch1] and [scratch2]. An adjusted and packed double float
17241 + is present in [mant_hi,mant_lo] after macro has executed */
17242 +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
17243 + /* We have an exponent which is <=0 indicating a subnormal number
17244 + As it should be stored as if the exponent was 1 (although the
17245 + exponent field is all zeros to indicate a subnormal number)
17246 + we have to shift down the mantissa to its correct position. */
17248 + sub \exp,-1 /* amount to shift down */
17250 + brlo 50f /* if more than 53 shift steps, the
17251 + entire mantissa will disappear
17252 + without any rounding to occur */
17257 + sub \exp,-10 /* do the shift to position the
17258 + mantissa at the same time
17259 + note! this does not include the
17260 + final 1 step shift to add the sign */
17262 + /* when shifting, save all shifted out bits in [scratch2]. we may need to
17263 + look at them to make correct rounding. */
17265 + rsub \scratch1,\exp,32 /* get inverted shift count */
17266 + cp.w \exp,32 /* handle shifts >= 32 separately */
17269 + /* small (<32) shift amount, both words are part of the shift */
17270 + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
17271 + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
17272 + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
17273 + lsr \mant_hi,\mant_hi,\exp /* shift down msw */
17274 + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
17277 + /* large (>=32) shift amount, only lsw will have bits left after shift.
17278 + note that shift operations will use ((shift count) mod 32) so
17279 + we do not need to subtract 32 from shift count. */
17281 + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
17282 + or \scratch2,\mant_lo /* also save all bits from lsw */
17283 + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
17284 + mov \mant_hi,0 /* clear msw */
17285 + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
17288 + /* result is almost ready to return, except that least significant bit
17289 + and the part we already shifted out may cause the result to be
17291 + bld \mant_lo,0 /* get bit to be shifted out */
17292 + brcc 51f /* if bit was 0, no rounding */
17294 + /* msb of part to remove is 1, so rounding depends on rest of bits */
17295 + tst \scratch2,\scratch2 /* get shifted out tail */
17296 + brne 50f /* if rest > 0, do round */
17297 + bld \mant_lo,1 /* we have to look at lsb in result */
17298 + brcc 51f /* if lsb is 0, don't round */
17301 + /* subnormal result requires rounding
17302 + rounding may cause subnormal to become smallest normal number
17303 + luckily, smallest normal number has exactly the representation
17304 + we got by rippling a one bit up from mantissa into exponent field. */
17306 + subcc \mant_hi,-1
17309 + /* shift and return packed double with correct sign */
17317 +/* Adjust subnormal single float number with exponent [exp]
17318 + and mantissa [mant] and round. */
17319 +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
17320 + /* subnormal number */
17321 + rsub \exp,\exp, 1 /* shift amount */
17324 + brhs 90f /* Return zero */
17325 + rsub \scratch, \exp, 32
17326 + lsl \scratch, \mant,\scratch/* Check if there are any bits set
17327 + in the bits discarded in the mantissa */
17328 + srne \scratch /* If so set the lsb of the shifted mantissa */
17329 + lsr \mant,\mant,\exp /* Shift the mantissa */
17330 + or \mant, \scratch /* Round lsb if any bits were shifted out */
17331 + /* Rounding : For explaination, see round_sf. */
17332 + mov \scratch, 0x7f /* Set rounding constant */
17334 + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
17335 + add \mant, \scratch /* Add rounding constant to mantissa */
17336 + /* We can't overflow because mantissa is at least shifted one position
17337 + to the right so the implicit bit is zero. We can however get the implicit
17338 + bit set after rounding which means that we have the lowest normal number
17339 + but this is ok since this bit has the same position as the LSB of the
17341 + lsr \sf, \mant, 7
17342 + /* Rotate in sign */
17349 +/* Round the unpacked df number with exponent [exp] and
17350 + mantissa [mant_hi, mant_lo]. Uses scratch register
17352 +.macro round_df exp, mant_lo, mant_hi, scratch
17353 + mov \scratch, 0x3ff /* Rounding constant */
17354 + bld \mant_lo,11 /* Check if lsb in the final result is
17356 + subeq \scratch, -1 /* Adjust rounding constant to 0x400
17357 + if rounding 0.5 upwards */
17358 + add \mant_lo, \scratch /* Round */
17359 + acr \mant_hi /* If overflowing we know that
17360 + we have all zeros in the bits not
17361 + scaled out so we can leave them
17362 + but we must increase the exponent with
17363 + two since we had an implicit bit
17364 + which is lost + the extra overflow bit */
17365 + subcs \exp, -2 /* Update exponent */
17368 +/* Round single float number stored in [mant] and [exp] */
17369 +.macro round_sf exp, mant, scratch
17371 + For 0.5 we round to nearest even integer
17372 + for all other cases we round to nearest integer.
17373 + This means that if the digit left of the "point" (.)
17374 + is 1 we can add 0x80 to the mantissa since the
17375 + corner case 0x180 will round up to 0x200. If the
17376 + digit left of the "point" is 0 we will have to
17377 + add 0x7f since this will give 0xff and hence a
17378 + truncation/rounding downwards for the corner
17379 + case when the 9 lowest bits are 0x080 */
17380 + mov \scratch, 0x7f /* Set rounding constant */
17381 + /* Check if the mantissa is even or odd */
17383 + subeq \scratch, -1 /* Rounding constant should be 0x80 */
17384 + add \mant, \scratch
17385 + subcs \exp, -2 /* Adjust exponent if we overflowed */
17390 +/* Pack a single float number stored in [mant] and [exp]
17391 + into a single float number in [sf] */
17392 +.macro pack_sf sf, exp, mant
17393 + bld \mant,31 /* implicit bit to z */
17394 + subne \exp,1 /* if subnormal (implicit bit 0)
17395 + adjust exponent to storage format */
17397 + lsr \sf, \mant, 7
17398 + bfins \sf, \exp, 24, 8
17401 +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
17402 + into [df_hi, df_lo]. [df_hi] is shifted
17403 + one bit up so the sign bit can be shifted into it */
17405 +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
17406 + bld \mant_hi,31 /* implicit bit to z */
17407 + subne \exp,1 /* if subnormal (implicit bit 0)
17408 + adjust exponent to storage format */
17410 + lsr \mant_lo,11 /* shift back lsw */
17411 + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
17412 + lsl \mant_hi,1 /* get rid of implicit bit */
17413 + lsr \mant_hi,11 /* shift back msw except for one step*/
17414 + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
17417 +/* Normalize single float number stored in [mant] and [exp]
17418 + using scratch register [scratch] */
17419 +.macro normalize_sf exp, mant, scratch
17420 + /* Adjust exponent and mantissa */
17421 + clz \scratch, \mant
17422 + sub \exp, \scratch
17423 + lsl \mant, \mant, \scratch
17426 +/* Normalize the exponent and mantissa pair stored
17427 + in [mant_hi,mant_lo] and [exp]. Needs two scratch
17428 + registers [scratch1] and [scratch2]. */
17429 +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
17430 + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
17431 + breq 80f /* No need for scaling if no zeros in high bits */
17432 + brcs 81f /* Check for all zeros */
17434 + /* shift amount is smaller than 32, and involves both msw and lsw*/
17435 + rsub \scratch2,\scratch1,32 /* shift mantissa */
17436 + lsl \mant_hi,\mant_hi,\scratch1
17437 + lsr \scratch2,\mant_lo,\scratch2
17438 + or \mant_hi,\scratch2
17439 + lsl \mant_lo,\mant_lo,\scratch1
17440 + sub \exp,\scratch1 /* adjust exponent */
17441 + rjmp 80f /* Finished */
17443 + /* shift amount is greater than 32 */
17444 + clz \scratch1,\mant_lo /* shift mantissa */
17445 + movcs \scratch1, 0
17446 + subcc \scratch1,-32
17447 + lsl \mant_hi,\mant_lo,\scratch1
17449 + sub \exp,\scratch1 /* adjust exponent */
17454 +/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result.
17455 + The multiplication of [al]x[bl] is discarded.
17456 + Operands in [ah], [al], [bh], [bl].
17457 + Scratch registers in [sh], [sl].
17458 + Returns results in registers [rh], [rl].*/
17459 +.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl
17460 + mulu.d \sl, \ah, \bl
17461 + macu.d \sl, \al, \bh
17462 + mulu.d \rl, \ah, \bh
17469 +#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
17471 +#if defined(L_avr32_f64_mul)
17472 + .global __avr32_f64_mul
17473 + .type __avr32_f64_mul,@function
17476 + .global __avr32_f64_mul_fast
17477 + .type __avr32_f64_mul_fast,@function
17478 +__avr32_f64_mul_fast:
17480 + or r12, r10, r11 << 1
17481 + breq __avr32_f64_mul_op1_zero
17483 +#if defined(L_avr32_f64_mul)
17486 + stm --sp, r5,r6,r7,lr
17489 +#define AVR32_F64_MUL_OP1_INT_BITS 1
17490 +#define AVR32_F64_MUL_OP2_INT_BITS 10
17491 +#define AVR32_F64_MUL_RES_INT_BITS 11
17493 + /* op1 in {r11,r10}*/
17494 + /* op2 in {r9,r8}*/
17495 + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
17497 + /* Unpack op1 to 1.63 format*/
17499 + /* sf: r11, r10 */
17500 + bfextu r7, r11, 20, 11 /* Extract exponent */
17504 + /* Check if normalization is needed */
17505 + breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
17507 + lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
17508 + or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
17509 + lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
17510 + bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
17514 + /* Unpack op2 to 10.54 format */
17517 + bfextu r6, r9, 20, 11 /* Extract exponent */
17519 + /* Check if normalization is needed */
17520 + breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
17522 + lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
17524 + bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
17528 + /* Check if any operands are NaN or INF */
17530 + breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */
17532 + breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */
17535 + /* Calculate new exponent in r12*/
17537 + sub r12, (1023-1)
17539 +#if defined(L_avr32_f64_mul)
17540 + /* Do the multiplication.
17541 + Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
17542 + mulu.d r4, r11, r8
17543 + macu.d r4, r10, r9
17544 + mulu.d r6, r10, r8
17545 + mulu.d r10, r11, r9
17550 + /* Do the multiplication using approximate calculation. discard the al x bl
17552 + Place result in [r11, r10, r7]. The result is in 11.85 format. */
17554 + /* Do the multiplication using approximate calculation.
17555 + Place result in r11, r10. Use r7, r6 as scratch registers */
17556 + mulu.d r6, r11, r8
17557 + macu.d r6, r10, r9
17558 + mulu.d r10, r11, r9
17562 + /* Adjust exponent and mantissa */
17563 + /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
17564 + /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
17565 + /* In the first case, shift one pos to left.*/
17566 + bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
17574 + brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
17576 + /* Check for Inf. */
17578 + brge __avr32_f64_mul_res_inf
17580 + /* Insert exponent. */
17581 + bfins r11, r12, 20, 11
17583 + /* Result was not subnormal. Perform rounding. */
17584 + /* For the fast version we discard the sticky bits and always round
17585 + the halfwaycase up. */
17587 +#if defined(L_avr32_f64_mul)
17588 + or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
17589 + or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
17590 + of r7 represent the halfway bit. */
17591 + eorh r7, 0x8000 /* Toggle halfway bit. */
17592 + /* We should now round up by adding one for the following cases:
17594 + halfway sticky|parity round-up
17599 + Since we have inverted the halfway bit we can use the satu instruction
17600 + by saturating to 1 bit to implement this.
17609 + /* Insert sign bit*/
17613 + /* Return result in [r11,r10] */
17614 +#if defined(L_avr32_f64_mul)
17617 + ldm sp++, r5, r6, r7,pc
17621 +__avr32_f64_mul_op1_subnormal:
17622 + andh r11, 0x000f /* Remove sign bit and exponent */
17623 + clz r12, r10 /* Count leading zeros in lsw */
17624 + clz r6, r11 /* Count leading zeros in msw */
17625 + subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
17627 + subcc r6, AVR32_F64_MUL_OP1_INT_BITS
17631 + /* shifting involves both msw and lsw*/
17632 + rsub r12, r6, 32 /* shift mantissa */
17634 + lsr r12, r10, r12
17637 + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
17638 + sub r7, r6 /* adjust exponent */
17639 + rjmp 22b /* Finished */
17641 + /* msw is zero so only need to consider lsw */
17643 + breq __avr32_f64_mul_res_zero
17645 + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
17646 + sub r7, r6 /* adjust exponent */
17650 +__avr32_f64_mul_op2_subnormal:
17651 + andh r9, 0x000f /* Remove sign bit and exponent */
17652 + clz r12, r8 /* Count leading zeros in lsw */
17653 + clz r5, r9 /* Count leading zeros in msw */
17654 + subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
17656 + subcc r5, AVR32_F64_MUL_OP2_INT_BITS
17660 + /* shifting involves both msw and lsw*/
17661 + rsub r12, r5, 32 /* shift mantissa */
17666 + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
17667 + sub r6, r5 /* adjust exponent */
17668 + rjmp 23b /* Finished */
17670 + /* msw is zero so only need to consider lsw */
17672 + breq __avr32_f64_mul_res_zero
17674 + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
17675 + sub r6, r5 /* adjust exponent */
17679 +__avr32_f64_mul_op_nan_or_inf:
17680 + /* Same code for OP1 and OP2*/
17681 + /* Since we are here, at least one of the OPs were NaN or INF*/
17682 + andh r9, 0x000f /* Remove sign bit and exponent */
17683 + andh r11, 0x000f /* Remove sign bit and exponent */
17684 + /* Merge the regs in each operand to check for zero*/
17685 + or r11, r10 /* op1 */
17686 + or r9, r8 /* op2 */
17687 + /* Check if op1 is NaN or INF */
17689 + brne __avr32_f64_mul_op1_not_naninf
17690 + /* op1 was NaN or INF.*/
17692 + brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/
17693 + /*op1 was INF. check if op2 is NaN or INF*/
17695 + brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/
17696 + /* op1 is INF, op2 is either NaN or INF*/
17698 + breq __avr32_f64_mul_res_inf /*op2 was also INF*/
17699 + rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/
17701 +__avr32_f64_mul_op1_not_naninf:
17702 + /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/
17704 + breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
17705 + rjmp __avr32_f64_mul_res_nan /*else return NaN*/
17707 +__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
17708 +#if defined(L_avr32_f64_mul)
17709 + /* Check how much we must scale down the mantissa. */
17711 + sub r12, -1 /* We do no longer have an implicit bit. */
17712 + satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
17715 + /* Shift amount <32 */
17721 + lsr r10, r10, r12
17724 + lsr r11, r11, r12
17727 + /* Shift amount >=32 */
17734 + or r6, r6, r10 << 1
17735 + lsr r10, r10, r12
17737 + lsr r10, r11, r12
17741 + /* Flush to zero for the fast version. */
17742 + mov r11, lr /*Get correct sign*/
17743 + andh r11, 0x8000, COH
17745 + ldm sp++, r5, r6, r7,pc
17748 +__avr32_f64_mul_res_zero:/* Multiply result is zero. */
17749 + mov r11, lr /*Get correct sign*/
17750 + andh r11, 0x8000, COH
17752 +#if defined(L_avr32_f64_mul)
17755 + ldm sp++, r5, r6, r7,pc
17758 +__avr32_f64_mul_res_nan: /* Return NaN. */
17761 +#if defined(L_avr32_f64_mul)
17764 + ldm sp++, r5, r6, r7,pc
17767 +__avr32_f64_mul_res_inf: /* Return INF. */
17768 + mov r11, 0xfff00000
17772 +#if defined(L_avr32_f64_mul)
17775 + ldm sp++, r5, r6, r7,pc
17778 +__avr32_f64_mul_op1_zero:
17781 + andh r11, 0x8000, COH
17782 + /* Check if op2 is Inf or NaN. */
17783 + bfextu r12, r9, 20, 11
17785 + retne r12 /* Return 0.0 */
17796 +#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast)
17799 +__avr32_f64_sub_from_add:
17800 + /* Switch sign on op2 */
17803 +#if defined(L_avr32_f64_addsub_fast)
17804 + .global __avr32_f64_sub_fast
17805 + .type __avr32_f64_sub_fast,@function
17806 +__avr32_f64_sub_fast:
17808 + .global __avr32_f64_sub
17809 + .type __avr32_f64_sub,@function
17813 + /* op1 in {r11,r10}*/
17814 + /* op2 in {r9,r8}*/
17816 +#if defined(L_avr32_f64_addsub_fast)
17817 + /* If op2 is zero just return op1 */
17818 + or r12, r8, r9 << 1
17822 + /* Check signs */
17824 + /* Different signs, use addition. */
17825 + brmi __avr32_f64_add_from_sub
17827 + stm --sp, r5, r6, r7, lr
17829 + /* Get sign of op1 into r12 */
17831 + andh r12, 0x8000, COH
17833 + /* Remove sign from operands */
17837 + /* Put the largest number in [r11, r10]
17838 + and the smallest number in [r9, r8] */
17841 + brhs 1f /* Skip swap if operands already correctly ordered*/
17842 + /* Operands were not correctly ordered, swap them*/
17849 + eorh r12, 0x8000 /* Invert sign in r12*/
17851 + /* Unpack largest operand - opH */
17853 + /* sf: r11, r10 */
17854 + lsr r7, r11, 20 /* Extract exponent */
17855 + lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
17856 + or r11, r11, r10>>21
17858 + sbr r11, 31 /* Insert implicit bit */
17861 + /* Unpack smallest operand - opL */
17864 + lsr r6, r9, 20 /* Extract exponent */
17865 + breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
17866 + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
17867 + or r9, r9, r8>>21
17869 + sbr r9, 31 /* Insert implicit bit */
17872 +__avr32_f64_sub_opL_subnormal_done:
17873 + /* opH is NaN or Inf. */
17875 + breq __avr32_f64_sub_opH_nan_or_inf
17877 + /* Get shift amount to scale mantissa of op2. */
17879 + breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/
17881 + /* Scale mantissa [r9, r8] with amount [r6].
17882 + Uses scratch registers [r5] and [lr].
17883 + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
17885 + rsub r5,r6,32 /* get (32 - shift count)
17886 + (if shift count > 32 we get a
17887 + negative value, but that will
17888 + work as well in the code below.) */
17890 + cp.w r6,32 /* handle shifts >= 32 separately */
17891 + brhs __avr32_f64_sub_longshift
17893 + /* small (<32) shift amount, both words are part of the shift
17894 + first remember whether part that is lost contains any 1 bits ... */
17895 + lsl lr,r8,r5 /* shift away bits that are part of
17896 + final mantissa. only part that goes
17897 + to lr are bits that will be lost */
17899 + /* ... and now to the actual shift */
17900 + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
17901 + lsr r8,r8,r6 /* shift down lsw of mantissa */
17902 + lsr r9,r9,r6 /* shift down msw of mantissa */
17903 + or r8,r5 /* combine these bits with prepared lsw*/
17904 +#if defined(L_avr32_f64_addsub)
17905 + cp.w lr,0 /* if any '1' bit in part we lost ...*/
17907 + or r8, lr /* ... we need to set sticky bit*/
17910 +__avr32_f64_sub_shift_done:
17911 + /* Now subtract the mantissas. */
17915 + /* Normalize the exponent and mantissa pair stored in
17916 + [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */
17917 + clz r6,r11 /* Check if we have zeros in high bits */
17918 + breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
17919 + brcs __avr32_f64_sub_longnormalize
17922 + /* shift amount is smaller than 32, and involves both msw and lsw*/
17923 + rsub lr,r6,32 /* shift mantissa */
17929 + sub r7,r6 /* adjust exponent */
17930 + brle __avr32_f64_sub_subnormal_result
17931 +__avr32_f64_sub_longnormalize_done:
17933 +#if defined(L_avr32_f64_addsub)
17934 + /* Insert the bits we will remove from the mantissa r9[31:21] */
17935 + lsl r9, r10, (32 - 11)
17937 + /* Keep the last bit shifted out. */
17938 + bfextu r9, r10, 10, 1
17941 + /* Pack final result*/
17942 + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
17943 + /* Result in [r11,r10] */
17944 + /* Insert mantissa */
17946 + or r10, r10, r11<<21
17948 + /* Insert exponent and sign bit*/
17949 + bfins r11, r7, 20, 11
17953 +__avr32_f64_sub_round:
17954 +#if defined(L_avr32_f64_addsub)
17955 + mov_imm r7, 0x80000000
17965 + /* Return result in [r11,r10] */
17966 + ldm sp++, r5, r6, r7,pc
17970 +__avr32_f64_sub_opL_subnormal:
17971 + /* Extract the of mantissa */
17972 + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
17973 + or r9, r9, r8>>21
17976 + /* Set exponent to 1 if we do not have a zero. */
17980 + /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
17985 + /* Check if op1 is zero, if so set exponent to 0. */
17989 + rjmp __avr32_f64_sub_opL_subnormal_done
17991 +__avr32_f64_sub_opH_nan_or_inf:
17992 + /* Check if opH is NaN, if so return NaN */
17995 + brne __avr32_f64_sub_return_nan
17997 + /* opH is Inf. */
17998 + /* Check if opL is Inf. or NaN */
18000 + breq __avr32_f64_sub_return_nan
18001 + /* Return infinity with correct sign. */
18002 + or r11, r12, r7 << 20
18003 + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
18004 +__avr32_f64_sub_return_nan:
18005 + mov r10, -1 /* Generate NaN in r11, r10 */
18007 + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
18010 +__avr32_f64_sub_subnormal_result:
18011 +#if defined(L_avr32_f64_addsub)
18012 + /* Check how much we must scale down the mantissa. */
18014 + sub r7, -1 /* We do no longer have an implicit bit. */
18015 + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
18018 + /* Shift amount <32 */
18023 + or r10, r6 /* Sticky bit from the
18024 + part that was shifted out. */
18028 + /* Set exponent */
18030 + rjmp __avr32_f64_sub_longnormalize_done
18032 + /* Shift amount >=32 */
18038 + or r10, r6 /* Sticky bit from the
18039 + part that was shifted out. */
18041 + /* Set exponent */
18043 + rjmp __avr32_f64_sub_longnormalize_done
18045 + /* Just flush subnormals to zero. */
18049 + ldm sp++, r5, r6, r7, pc
18051 +__avr32_f64_sub_longshift:
18052 + /* large (>=32) shift amount, only lsw will have bits left after shift.
18053 + note that shift operations will use ((shift count=r6) mod 32) so
18054 + we do not need to subtract 32 from shift count. */
18055 + /* Saturate the shift amount to 63. If the amount
18056 + is any larger op2 is insignificant. */
18059 +#if defined(L_avr32_f64_addsub)
18060 + /* first remember whether part that is lost contains any 1 bits ... */
18061 + moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
18063 + lsl lr,r9,r5 /* save all lost bits from msw */
18064 + or lr,r8 /* also save lost bits (all) from lsw
18065 + now lr != 0 if we lose any bits */
18068 + /* ... and now to the actual shift */
18069 + lsr r8,r9,r6 /* Move msw to lsw and shift. */
18070 + mov r9,0 /* clear msw */
18071 +#if defined(L_avr32_f64_addsub)
18072 + cp.w lr,0 /* if any '1' bit in part we lost ...*/
18074 + or r8, lr /* ... we need to set sticky bit*/
18076 + rjmp __avr32_f64_sub_shift_done
18078 +__avr32_f64_sub_longnormalize:
18079 + /* shift amount is greater than 32 */
18080 + clz r6,r10 /* shift mantissa */
18081 + /* If the resulting mantissa is zero the result is
18082 + zero so force exponent to zero. */
18085 + movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
18086 + always is +0.0 */
18090 + sub r7,r6 /* adjust exponent */
18091 + brle __avr32_f64_sub_subnormal_result
18092 + rjmp __avr32_f64_sub_longnormalize_done
18097 +__avr32_f64_add_from_sub:
18098 + /* Switch sign on op2 */
18101 +#if defined(L_avr32_f64_addsub_fast)
18102 + .global __avr32_f64_add_fast
18103 + .type __avr32_f64_add_fast,@function
18104 +__avr32_f64_add_fast:
18106 + .global __avr32_f64_add
18107 + .type __avr32_f64_add,@function
18111 + /* op1 in {r11,r10}*/
18112 + /* op2 in {r9,r8}*/
18114 +#if defined(L_avr32_f64_addsub_fast)
18115 + /* If op2 is zero just return op1 */
18116 + or r12, r8, r9 << 1
18120 + /* Check signs */
18122 + /* Different signs, use subtraction. */
18123 + brmi __avr32_f64_sub_from_add
18125 + stm --sp, r5, r6, r7, lr
18127 + /* Get sign of op1 into r12 */
18129 + andh r12, 0x8000, COH
18131 + /* Remove sign from operands */
18135 + /* Put the number with the largest exponent in [r11, r10]
18136 + and the number with the smallest exponent in [r9, r8] */
18138 + brhs 1f /* Skip swap if operands already correctly ordered */
18139 + /* Operands were not correctly ordered, swap them */
18147 + mov lr, 0 /* Set sticky bits to zero */
18148 + /* Unpack largest operand - opH */
18150 + /* sf: r11, r10 */
18151 + bfextu R7, R11, 20, 11 /* Extract exponent */
18152 + bfextu r11, r11, 0, 20 /* Extract mantissa */
18153 + sbr r11, 20 /* Insert implicit bit */
18155 + /* Unpack smallest operand - opL */
18158 + bfextu R6, R9, 20, 11 /* Extract exponent */
18159 + breq __avr32_f64_add_op2_subnormal
18160 + bfextu r9, r9, 0, 20 /* Extract mantissa */
18161 + sbr r9, 20 /* Insert implicit bit */
18164 + /* opH is NaN or Inf. */
18166 + breq __avr32_f64_add_opH_nan_or_inf
18168 + /* Get shift amount to scale mantissa of op2. */
18170 + breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/
18172 + /* Scale mantissa [r9, r8] with amount [r6].
18173 + Uses scratch registers [r5] and [lr].
18174 + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
18175 + rsub r5,r6,32 /* get (32 - shift count)
18176 + (if shift count > 32 we get a
18177 + negative value, but that will
18178 + work as well in the code below.) */
18180 + cp.w r6,32 /* handle shifts >= 32 separately */
18181 + brhs __avr32_f64_add_longshift
18183 + /* small (<32) shift amount, both words are part of the shift
18184 + first remember whether part that is lost contains any 1 bits ... */
18185 + lsl lr,r8,r5 /* shift away bits that are part of
18186 + final mantissa. only part that goes
18187 + to lr are bits that will be lost */
18189 + /* ... and now to the actual shift */
18190 + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
18191 + lsr r8,r8,r6 /* shift down lsw of mantissa */
18192 + lsr r9,r9,r6 /* shift down msw of mantissa */
18193 + or r8,r5 /* combine these bits with prepared lsw*/
18195 +__avr32_f64_add_shift_done:
18196 + /* Now add the mantissas. */
18200 + /* Check if we overflowed. */
18202 + breq __avr32_f64_add_res_of:
18204 +__avr32_f64_add_res_of_done:
18206 + /* Pack final result*/
18207 + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
18208 + /* Result in [r11,r10] */
18209 + /* Insert exponent and sign bit*/
18210 + bfins r11, r7, 20, 11
18214 +__avr32_f64_add_round:
18215 +#if defined(L_avr32_f64_addsub)
18216 + bfextu r12, r10, 0, 1 /* Extract parity bit.*/
18217 + or lr, r12 /* or it together with the sticky bits. */
18218 + eorh lr, 0x8000 /* Toggle round bit. */
18219 + /* We should now round up by adding one for the following cases:
18221 + halfway sticky|parity round-up
18226 + Since we have inverted the halfway bit we can use the satu instruction
18227 + by saturating to 1 bit to implement this.
18236 + /* Return result in [r11,r10] */
18237 + ldm sp++, r5, r6, r7,pc
18240 +__avr32_f64_add_opH_nan_or_inf:
18241 + /* Check if opH is NaN, if so return NaN */
18244 + brne __avr32_f64_add_return_nan
18246 + /* opH is Inf. */
18247 + /* Check if opL is Inf. or NaN */
18249 + breq __avr32_f64_add_opL_nan_or_inf
18250 + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
18251 +__avr32_f64_add_opL_nan_or_inf:
18254 + brne __avr32_f64_add_return_nan
18255 + mov r10, 0 /* Generate Inf in r11, r10 */
18256 + mov_imm r11, 0x7ff00000
18257 + or r11, r12 /* Put sign bit back */
18258 + ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
18259 +__avr32_f64_add_return_nan:
18260 + mov r10, -1 /* Generate NaN in r11, r10 */
18262 + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
18265 +__avr32_f64_add_longshift:
18266 + /* large (>=32) shift amount, only lsw will have bits left after shift.
18267 + note that shift operations will use ((shift count=r6) mod 32) so
18268 + we do not need to subtract 32 from shift count. */
18269 + /* Saturate the shift amount to 63. If the amount
18270 + is any larger op2 is insignificant. */
18272 + /* If shift amount is 32 there are no bits from the msw that are lost. */
18275 + /* first remember whether part that is lost contains any 1 bits ... */
18276 + lsl lr,r9,r5 /* save all lost bits from msw */
18277 +#if defined(L_avr32_f64_addsub)
18280 + or lr,r8 /* also save lost bits (all) from lsw
18281 + now lr != 0 if we lose any bits */
18284 + /* ... and now to the actual shift */
18285 + lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
18286 + mov r9,0 /* clear msw */
18287 + rjmp __avr32_f64_add_shift_done
18289 +__avr32_f64_add_res_of:
18290 + /* We overflowed. Scale down mantissa by shifting right one position. */
18291 + or lr, lr, lr << 1 /* Remember stickybits*/
18295 + sub r7, -1 /* Increment exponent */
18297 + /* Clear mantissa to set result to Inf if the exponent is 255. */
18302 + rjmp __avr32_f64_add_res_of_done
18304 +__avr32_f64_add_op2_subnormal:
18305 + /* Set epxponent to 1 */
18308 + /* Check if op2 is also subnormal. */
18313 + /* Both operands are subnormal. Just addd the mantissas
18314 + and the exponent will automatically be set to 1 if
18315 + we overflow into a normal number. */
18319 + /* Add sign bit */
18322 + /* Return result in [r11,r10] */
18323 + ldm sp++, r5, r6, r7,pc
18329 +#ifdef L_avr32_f64_to_u32
18330 + /* This goes into L_fixdfsi */
18334 +#ifdef L_avr32_f64_to_s32
18335 + .global __avr32_f64_to_u32
18336 + .type __avr32_f64_to_u32,@function
18337 +__avr32_f64_to_u32:
18339 + retmi 0 /* Negative returns 0 */
18341 + /* Fallthrough to df to signed si conversion */
18342 + .global __avr32_f64_to_s32
18343 + .type __avr32_f64_to_s32,@function
18344 +__avr32_f64_to_s32:
18346 + lsr r12,21 /* extract exponent*/
18347 + sub r12,1023 /* convert to unbiased exponent.*/
18348 + retlo 0 /* too small exponent implies zero. */
18351 + rsub r12,r12,31 /* shift count = 31 - exponent */
18352 + mov r9,r11 /* save sign for later...*/
18353 + lsl r11,11 /* remove exponent and sign*/
18354 + sbr r11,31 /* add implicit bit*/
18355 + or r11,r11,r10>>21 /* get rest of bits from lsw of double */
18356 + lsr r11,r11,r12 /* shift down mantissa to final place */
18357 + lsl r9,1 /* sign -> carry */
18358 + retcc r11 /* if positive, we are done */
18359 + neg r11 /* if negative float, negate result */
18362 +#endif /* L_fixdfsi*/
18364 +#ifdef L_avr32_f64_to_u64
18365 + /* Actual function is in L_fixdfdi */
18368 +#ifdef L_avr32_f64_to_s64
18369 + .global __avr32_f64_to_u64
18370 + .type __avr32_f64_to_u64,@function
18371 +__avr32_f64_to_u64:
18373 + /* Negative numbers return zero */
18380 + /* Fallthrough */
18381 + .global __avr32_f64_to_s64
18382 + .type __avr32_f64_to_s64,@function
18383 +__avr32_f64_to_s64:
18385 + lsr r9,21 /* get exponent*/
18386 + sub r9,1023 /* convert to correct range*/
18387 + /* Return zero if exponent to small */
18392 + mov r8,r11 /* save sign for later...*/
18394 + lsl r11,11 /* remove exponent */
18395 + sbr r11,31 /* add implicit bit*/
18396 + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
18397 + lsl r10,11 /* align lsw correctly as well */
18398 + rsub r9,r9,63 /* shift count = 63 - exponent */
18401 + cp.w r9,32 /* is shift count more than one reg? */
18404 + mov r12,r11 /* save msw */
18405 + lsr r10,r10,r9 /* small shift count, shift down lsw */
18406 + lsr r11,r11,r9 /* small shift count, shift down msw */
18407 + rsub r9,r9,32 /* get 32-size of shifted out tail */
18408 + lsl r12,r12,r9 /* align part to move from msw to lsw */
18409 + or r10,r12 /* combine to get new lsw */
18413 + lsr r10,r11,r9 /* large shift count,only lsw get bits
18414 + note that shift count is modulo 32*/
18415 + mov r11,0 /* msw will be 0 */
18418 + lsl r8,1 /* sign -> carry */
18419 + retcc r11 /* if positive, we are done */
18421 + neg r11 /* if negative float, negate result */
18428 +#ifdef L_avr32_u32_to_f64
18429 + /* Code located in L_floatsidf */
18432 +#ifdef L_avr32_s32_to_f64
18433 + .global __avr32_u32_to_f64
18434 + .type __avr32_u32_to_f64,@function
18435 +__avr32_u32_to_f64:
18436 + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
18437 + mov r12, 0 /* always positive */
18438 + rjmp 0f /* Jump to common code for floatsidf */
18440 + .global __avr32_s32_to_f64
18441 + .type __avr32_s32_to_f64,@function
18442 +__avr32_s32_to_f64:
18443 + mov r11, r12 /* Keep original value in r12 for sign */
18444 + abs r11 /* Absolute value if r12 */
18446 + mov r10,0 /* let remaining bits be zero */
18447 + reteq r11 /* zero long will return zero float */
18450 + mov r9,31+1023 /* set exponent */
18452 + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
18454 + /* Check if a subnormal result was created */
18458 + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
18462 + /* Round result */
18463 + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
18466 + /*Return infinity */
18468 + mov_imm r11, 0xffe00000
18469 + rjmp __floatsidf_return_op1
18474 + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
18475 +__floatsidf_return_op1:
18476 + lsl r12,1 /* shift in sign bit */
18483 +#ifdef L_avr32_f32_cmp_eq
18484 + .global __avr32_f32_cmp_eq
18485 + .type __avr32_f32_cmp_eq,@function
18486 +__avr32_f32_cmp_eq:
18489 + /* If not equal check for +/-0 */
18490 + /* Or together the two values and shift out the sign bit.
18491 + If the result is zero, then the two values are both zero. */
18497 + /* Numbers were equal. Check for NaN or Inf */
18498 + mov_imm r11, 0xff000000
18501 + retls 1 /* 0 if NaN, 1 otherwise */
18505 +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
18506 +#ifdef L_avr32_f32_cmp_ge
18507 + .global __avr32_f32_cmp_ge
18508 + .type __avr32_f32_cmp_ge,@function
18509 +__avr32_f32_cmp_ge:
18511 +#ifdef L_avr32_f32_cmp_lt
18512 + .global __avr32_f32_cmp_lt
18513 + .type __avr32_f32_cmp_lt,@function
18514 +__avr32_f32_cmp_lt:
18516 + lsl r10, r12, 1 /* Remove sign bits */
18519 +#ifdef L_avr32_f32_cmp_ge
18520 + reteq 1 /* Both number are zero. Return true. */
18522 +#ifdef L_avr32_f32_cmp_lt
18523 + reteq 0 /* Both number are zero. Return false. */
18525 + mov_imm r8, 0xff000000
18527 + rethi 0 /* Op0 is NaN */
18529 + rethi 0 /* Op1 is Nan */
18533 +#ifdef L_avr32_f32_cmp_ge
18534 + srcc r8 /* Set result to true if op0 is positive*/
18536 +#ifdef L_avr32_f32_cmp_lt
18537 + srcs r8 /* Set result to true if op0 is negative*/
18539 + retmi r8 /* Return if signs are different */
18540 + brcs 0f /* Both signs negative? */
18542 + /* Both signs positive */
18544 +#ifdef L_avr32_f32_cmp_ge
18548 +#ifdef L_avr32_f32_cmp_lt
18553 + /* Both signs negative */
18555 +#ifdef L_avr32_f32_cmp_ge
18559 +#ifdef L_avr32_f32_cmp_lt
18566 +#ifdef L_avr32_f64_cmp_eq
18567 + .global __avr32_f64_cmp_eq
18568 + .type __avr32_f64_cmp_eq,@function
18569 +__avr32_f64_cmp_eq:
18574 + /* Args were not equal*/
18575 + /* Both args could be zero with different sign bits */
18576 + lsl r11,1 /* get rid of sign bits */
18578 + or r11,r10 /* Check if all bits are zero */
18581 + reteq 1 /* If all zeros the arguments are equal
18582 + so return 1 else return 0 */
18585 + /* check for NaN */
18587 + mov_imm r12, 0xffe00000
18589 + cpc r11,r12 /* check if nan or inf */
18590 + retls 1 /* If Arg is NaN return 0 else 1*/
18591 + ret 0 /* Return */
18596 +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
18598 +#ifdef L_avr32_f64_cmp_ge
18599 + .global __avr32_f64_cmp_ge
18600 + .type __avr32_f64_cmp_ge,@function
18601 +__avr32_f64_cmp_ge:
18603 +#ifdef L_avr32_f64_cmp_lt
18604 + .global __avr32_f64_cmp_lt
18605 + .type __avr32_f64_cmp_lt,@function
18606 +__avr32_f64_cmp_lt:
18609 + /* compare magnitude of op1 and op2 */
18612 + lsl r11,1 /* Remove sign bit of op1 */
18613 + srcs r12 /* Sign op1 to lsb of r12*/
18614 + lsl r9,1 /* Remove sign bit of op2 */
18616 + rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
18619 + /* Check for Nan */
18620 + mov_imm lr, 0xffe00000
18623 + brhi 0f /* We have NaN */
18626 + brhi 0f /* We have NaN */
18630 + breq 3f /* op1 zero */
18634 + cp.w r12,3 /* both operands negative ?*/
18637 + cp.w r12,1 /* both operands positive? */
18640 + /* Different signs. If sign of op1 is negative the difference
18641 + between op1 and op2 will always be negative, and if op1 is
18642 + positive the difference will always be positive */
18643 +#ifdef L_avr32_f64_cmp_ge
18647 +#ifdef L_avr32_f64_cmp_lt
18653 + /* Both operands positive. Just compute the difference */
18656 +#ifdef L_avr32_f64_cmp_ge
18660 +#ifdef L_avr32_f64_cmp_lt
18666 + /* Both operands negative. Compute the difference with operands switched */
18669 +#ifdef L_avr32_f64_cmp_ge
18673 +#ifdef L_avr32_f64_cmp_lt
18684 + cp.w r7, 1 /* Check sign bit from r9 */
18685 +#ifdef L_avr32_f64_cmp_ge
18686 + sreq r12 /* If op2 is negative then op1 >= op2. */
18688 +#ifdef L_avr32_f64_cmp_lt
18689 + srne r12 /* If op2 is positve then op1 <= op2. */
18695 +#ifdef L_avr32_f64_cmp_ge
18696 + reteq 1 /* Both operands are zero. Return true. */
18698 +#ifdef L_avr32_f64_cmp_lt
18699 + reteq 0 /* Both operands are zero. Return false. */
18704 +#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
18707 +#if defined(L_avr32_f64_div_fast)
18708 + .global __avr32_f64_div_fast
18709 + .type __avr32_f64_div_fast,@function
18710 +__avr32_f64_div_fast:
18712 + .global __avr32_f64_div
18713 + .type __avr32_f64_div,@function
18716 + stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
18717 + /* op1 in {r11,r10}*/
18718 + /* op2 in {r9,r8}*/
18719 + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
18722 + /* Unpack op1 to 2.62 format*/
18724 + /* sf: r11, r10 */
18725 + lsr r7, r11, 20 /* Extract exponent */
18727 + lsl r11, 9 /* Extract mantissa, leave room for implicit bit */
18728 + or r11, r11, r10>>23
18730 + sbr r11, 29 /* Insert implicit bit */
18731 + andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
18733 + cbr r7, 11 /* Clear sign bit */
18734 + /* Check if normalization is needed */
18735 + breq 11f /*If number is subnormal, normalize it */
18738 + brge 2f /* Check op1 for NaN or Inf */
18740 + /* Unpack op2 to 2.62 format*/
18743 + lsr r6, r9, 20 /* Extract exponent */
18745 + lsl r9, 9 /* Extract mantissa, leave room for implicit bit */
18746 + or r9, r9, r8>>23
18748 + sbr r9, 29 /* Insert implicit bit */
18749 + andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
18751 + cbr r6, 11 /* Clear sign bit */
18752 + /* Check if normalization is needed */
18753 + breq 13f /*If number is subnormal, normalize it */
18756 + brge 3f /* Check op2 for NaN or Inf */
18758 + /* Calculate new exponent */
18763 + /* Approximating 1/d with the following recurrence: */
18764 + /* R[j+1] = R[j]*(2-R[j]*d) */
18765 + /* Using 2.62 format */
18767 + /* d = op2 = divisor (2.62 format): r9,r8 */
18768 + /* Multiply result : r5, r4 */
18769 + /* Initial guess : r3, r2 */
18770 + /* New approximations : r3, r2 */
18771 + /* op1 = Dividend (2.62 format) : r11, r10 */
18773 + mov_imm r12, 0x80000000
18775 + /* Load initial guess, using look-up table */
18776 + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
18777 + /* Let d be of following format: 00.1xy....., then XY=~xy */
18778 + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
18779 + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
18780 + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
18781 + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
18782 + /* r2 is also part of the reg pair forming initial guess, but it*/
18783 + /* is kept uninitialized to save one cycle since it has so low significance*/
18786 + bfextu r4, r9, 27, 2
18788 + bfins r3, r4, 28, 2
18790 + /* First approximation */
18791 + /* Approximating to 32 bits */
18792 + /* r5 = R[j]*d */
18793 + mulu.d r4, r3, r9
18794 + /* r5 = 2-R[j]*d */
18795 + sub r5, r12, r5<<2
18796 + /* r3 = R[j]*(2-R[j]*d) */
18797 + mulu.d r4, r3, r5
18800 + /* Second approximation */
18801 + /* Approximating to 32 bits */
18802 + /* r5 = R[j]*d */
18803 + mulu.d r4, r3, r9
18804 + /* r5 = 2-R[j]*d */
18805 + sub r5, r12, r5<<2
18806 + /* r3 = R[j]*(2-R[j]*d) */
18807 + mulu.d r4, r3, r5
18810 + /* Third approximation */
18811 + /* Approximating to 32 bits */
18812 + /* r5 = R[j]*d */
18813 + mulu.d r4, r3, r9
18814 + /* r5 = 2-R[j]*d */
18815 + sub r5, r12, r5<<2
18816 + /* r3 = R[j]*(2-R[j]*d) */
18817 + mulu.d r4, r3, r5
18820 + /* Fourth approximation */
18821 + /* Approximating to 64 bits */
18822 + /* r5,r4 = R[j]*d */
18823 + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
18825 + or r5, r5, r4>>30
18827 + /* r5,r4 = 2-R[j]*d */
18830 + /* r3,r2 = R[j]*(2-R[j]*d) */
18831 + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
18833 + or r3, r3, r4>>30
18837 + /* Fifth approximation */
18838 + /* Approximating to 64 bits */
18839 + /* r5,r4 = R[j]*d */
18840 + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
18842 + or r5, r5, r4>>30
18844 + /* r5,r4 = 2-R[j]*d */
18847 + /* r3,r2 = R[j]*(2-R[j]*d) */
18848 + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
18850 + or r3, r3, r4>>30
18854 + /* Multiply with dividend to get quotient */
18855 + mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
18858 + /* To increase speed, this result is not corrected before final rounding.*/
18859 + /* This may give a difference to IEEE compliant code of 1 ULP.*/
18862 + /* Adjust exponent and mantissa */
18863 + /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
18864 + /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
18865 + /* In the first case, shift one pos to left.*/
18871 +#if defined(L_avr32_f64_div)
18872 + /* We must scale down the dividend to 5.59 format. */
18874 + or r10, r10, r11 << 29
18879 +#if defined(L_avr32_f64_div)
18880 + /* We must scale down the dividend to 6.58 format. */
18882 + or r10, r10, r11 << 28
18887 + brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
18890 +#if defined(L_avr32_f64_div)
18891 + /* In order to round correctly we calculate the remainder:
18892 + Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
18893 + for the case when the quotient is halfway between the round-up
18894 + value and the round down value. If the remainder then is negative
18895 + it means that the quotient was to big and that it should not be
18896 + rounded up, if the remainder is positive the quotient was to small
18897 + and we need to round up. If the remainder is zero it means that the
18898 + quotient is exact but since we need to remove the guard bit we should
18899 + round to even. */
18901 + /* Truncate and add guard bit. */
18906 + /* Now do the multiplication. The quotient has the format 4.60
18907 + while the divisor has the format 2.62 which gives a result
18909 + mulu.d r0, r3, r8
18910 + macu.d r0, r2, r9
18911 + mulu.d r4, r2, r8
18912 + mulu.d r8, r3, r9
18918 + /* Check if remainder is positive, negative or equal. */
18919 + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
18922 +__avr32_f64_div_round_subnormal:
18925 + srlo r6 /* Remainder positive: we need to round up.*/
18926 + moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
18928 + bfextu r6, r2, 7, 1 /* Get guard bit */
18930 + /* Final packing, scale down mantissa. */
18932 + or r10, r10, r3<<24
18934 + /* Insert exponent and sign bit*/
18935 + bfins r11, r7, 20, 11
18939 + /* Final rounding */
18943 + /* Return result in [r11,r10] */
18944 + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
18948 + /* Op1 is NaN or inf */
18949 + andh r11, 0x000f /* Extract mantissa */
18951 + brne 16f /* Return NaN if op1 is NaN */
18952 + /* Op1 is inf check op2 */
18953 + lsr r6, r9, 20 /* Extract exponent */
18954 + cbr r6, 11 /* Clear sign bit */
18956 + brne 17f /* Inf/number gives inf, return inf */
18957 + rjmp 16f /* The rest gives NaN*/
18960 + /* Op1 is a valid number. Op 2 is NaN or inf */
18961 + andh r9, 0x000f /* Extract mantissa */
18963 + brne 16f /* Return NaN if op2 is NaN */
18964 + rjmp 15f /* Op2 was inf, return zero*/
18966 +11: /* Op1 was denormal. Fix it. */
18968 + or r11, r11, r10 >> 29
18970 + /* Check if op1 is zero. */
18972 + breq __avr32_f64_div_op1_zero
18973 + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
18975 + or r10, r10, r11 << 30
18980 +13: /* Op2 was denormal. Fix it */
18982 + or r9, r9, r8 >> 29
18984 + /* Check if op2 is zero. */
18986 + breq 17f /* Divisor is zero -> return Inf */
18987 + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
18989 + or r8, r8, r9 << 30
18994 +__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
18995 +#if defined(L_avr32_f64_div)
18996 + /* Check how much we must scale down the mantissa. */
18998 + sub r7, -1 /* We do no longer have an implicit bit. */
18999 + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
19002 + /* Shift amount <32 */
19003 + /* Scale down quotient */
19009 + /* Scale down the dividend to match the scaling of the quotient. */
19018 + /* Shift amount >=32 */
19028 + /* Scale down the dividend to match the scaling of the quotient. */
19034 + /* Start performing the same rounding as done for normal numbers
19035 + but this time we have scaled the quotient and dividend and hence
19036 + need a little different comparison. */
19037 + /* Truncate and add guard bit. */
19041 + /* Now do the multiplication. */
19042 + mulu.d r6, r3, r8
19043 + macu.d r6, r2, r9
19044 + mulu.d r4, r2, r8
19045 + mulu.d r8, r3, r9
19050 + /* Set exponent to 0 */
19053 + /* Check if remainder is positive, negative or equal. */
19054 + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
19057 + /* Now the rest of the rounding is the same as for normals. */
19058 + rjmp __avr32_f64_div_round_subnormal
19062 + /* Flush to zero for the fast version. */
19063 + mov r11, lr /*Get correct sign*/
19064 + andh r11, 0x8000, COH
19066 + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
19068 +16: /* Return NaN. */
19071 + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
19074 + /* Check if op1 is zero. */
19076 + breq __avr32_f64_div_op1_zero
19077 + /* Return INF. */
19078 + mov r11, lr /*Get correct sign*/
19079 + andh r11, 0x8000, COH
19082 + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
19084 +__avr32_f64_div_op1_zero:
19085 + or r5, r8, r9 << 1
19086 + breq 16b /* 0.0/0.0 -> NaN */
19087 + bfextu r4, r9, 20, 11
19089 + brne 15b /* Return zero */
19090 + /* Check if divisor is Inf or NaN */
19091 + or r5, r8, r9 << 12
19092 + breq 15b /* Divisor is inf -> return zero */
19093 + rjmp 16b /* Return NaN */
19100 +#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast)
19103 +__avr32_f32_sub_from_add:
19104 + /* Switch sign on op2 */
19107 +#if defined(L_avr32_f32_addsub_fast)
19108 + .global __avr32_f32_sub_fast
19109 + .type __avr32_f32_sub_fast,@function
19110 +__avr32_f32_sub_fast:
19112 + .global __avr32_f32_sub
19113 + .type __avr32_f32_sub,@function
19117 + /* Check signs */
19119 + /* Different signs, use subtraction. */
19120 + brmi __avr32_f32_add_from_sub
19122 + /* Get sign of op1 */
19124 + andh r12, 0x8000, COH
19126 + /* Remove sign from operands */
19128 +#if defined(L_avr32_f32_addsub_fast)
19129 + reteq r8 /* If op2 is zero return op1 */
19133 + /* Put the number with the largest exponent in r10
19134 + and the number with the smallest exponent in r9 */
19137 + cp r10, r8 /*If largest operand (in R10) is not equal to op1*/
19138 + subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/
19139 + andh r12, 0x8000, COH /*Mask all but MSB*/
19141 + /* Unpack exponent and mantissa of op1 */
19143 + sbr r8, 31 /* Set implicit bit. */
19146 + /* op1 is NaN or Inf. */
19148 + breq __avr32_f32_sub_op1_nan_or_inf
19150 + /* Unpack exponent and mantissa of op2 */
19152 + sbr r11, 31 /* Set implicit bit. */
19155 +#if defined(L_avr32_f32_addsub)
19156 + /* Keep sticky bit for correct IEEE rounding */
19159 + /* op2 is either zero or subnormal. */
19160 + breq __avr32_f32_sub_op2_subnormal
19162 + /* Get shift amount to scale mantissa of op2. */
19165 + breq __avr32_f32_sub_shift_done
19167 + /* Saturate the shift amount to 31. If the amount
19168 + is any larger op2 is insignificant. */
19171 + /* Put the remaining bits into r9.*/
19175 + /* If the remaining bits are non-zero then we must subtract one
19176 + more from opL. */
19178 + srne r9 /* LSB of r9 represents sticky bits. */
19180 + /* Shift mantissa of op2 to same decimal point as the mantissa
19182 + lsr r11, r11, r12
19185 +__avr32_f32_sub_shift_done:
19186 + /* Now subtract the mantissas. */
19191 + /* Normalize resulting mantissa. */
19197 + brle __avr32_f32_sub_subnormal_result
19199 + /* Insert the bits we will remove from the mantissa into r9[31:24] */
19200 + or r9, r9, r8 << 24
19202 + /* Ignore sticky bit to simplify and speed up rounding */
19203 + /* op2 is either zero or subnormal. */
19204 + breq __avr32_f32_sub_op2_subnormal
19206 + /* Get shift amount to scale mantissa of op2. */
19209 + /* Saturate the shift amount to 31. If the amount
19210 + is any larger op2 is insignificant. */
19213 + /* Shift mantissa of op2 to same decimal point as the mantissa
19217 + /* Now subtract the mantissas. */
19220 + /* Normalize resulting mantissa. */
19225 + brle __avr32_f32_sub_subnormal_result
19228 + /* Pack result. */
19229 + or r12, r12, r8 >> 8
19230 + bfins r12, r10, 23, 8
19233 +__avr32_f32_sub_round:
19234 +#if defined(L_avr32_f32_addsub)
19235 + mov_imm r10, 0x80000000
19248 +__avr32_f32_sub_op2_subnormal:
19249 + /* Fix implicit bit and adjust exponent of subnormals. */
19251 + /* Set exponent to 1 if we do not have a zero. */
19254 + /* Check if op1 is also subnormal. */
19259 + /* If op1 is not zero set exponent to 1. */
19264 +__avr32_f32_sub_op1_nan_or_inf:
19265 + /* Check if op1 is NaN, if so return NaN */
19269 + /* op1 is Inf. */
19270 + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
19272 + /* Check if op2 is Inf. or NaN */
19275 + retne r12 /* op2 not Inf or NaN, return op1 */
19277 + ret -1 /* op2 Inf or NaN, return NaN */
19279 +__avr32_f32_sub_subnormal_result:
19280 + /* Check if the number is so small that
19281 + it will be represented with zero. */
19283 + rsub r11, r10, 32
19286 + /* Shift the mantissa into the correct position.*/
19288 + /* Add sign bit. */
19291 + /* Put the shifted out bits in the most significant part
19295 +#if defined(L_avr32_f32_addsub)
19296 + /* Add all the remainder bits used for rounding into r9 */
19301 + rjmp __avr32_f32_sub_round
19306 +__avr32_f32_add_from_sub:
19307 + /* Switch sign on op2 */
19310 +#if defined(L_avr32_f32_addsub_fast)
19311 + .global __avr32_f32_add_fast
19312 + .type __avr32_f32_add_fast,@function
19313 +__avr32_f32_add_fast:
19315 + .global __avr32_f32_add
19316 + .type __avr32_f32_add,@function
19320 + /* Check signs */
19322 + /* Different signs, use subtraction. */
19323 + brmi __avr32_f32_sub_from_add
19325 + /* Get sign of op1 */
19327 + andh r12, 0x8000, COH
19329 + /* Remove sign from operands */
19331 +#if defined(L_avr32_f32_addsub_fast)
19332 + reteq r8 /* If op2 is zero return op1 */
19336 + /* Put the number with the largest exponent in r10
19337 + and the number with the smallest exponent in r9 */
19341 + /* Unpack exponent and mantissa of op1 */
19343 + sbr r8, 31 /* Set implicit bit. */
19346 + /* op1 is NaN or Inf. */
19348 + breq __avr32_f32_add_op1_nan_or_inf
19350 + /* Unpack exponent and mantissa of op2 */
19352 + sbr r11, 31 /* Set implicit bit. */
19355 +#if defined(L_avr32_f32_addsub)
19356 + /* op2 is either zero or subnormal. */
19357 + breq __avr32_f32_add_op2_subnormal
19359 + /* Keep sticky bit for correct IEEE rounding */
19362 + /* Get shift amount to scale mantissa of op2. */
19365 + /* Saturate the shift amount to 31. If the amount
19366 + is any larger op2 is insignificant. */
19369 + /* Shift mantissa of op2 to same decimal point as the mantissa
19373 + /* Put the remainding bits into r11[23:..].*/
19374 + rsub r9, r9, (32-8)
19376 + /* Insert the bits we will remove from the mantissa into r11[31:24] */
19377 + bfins r11, r12, 24, 8
19379 + /* Now add the mantissas. */
19384 + /* Ignore sticky bit to simplify and speed up rounding */
19385 + /* op2 is either zero or subnormal. */
19386 + breq __avr32_f32_add_op2_subnormal
19388 + /* Get shift amount to scale mantissa of op2. */
19391 + /* Saturate the shift amount to 31. If the amount
19392 + is any larger op2 is insignificant. */
19395 + /* Shift mantissa of op2 to same decimal point as the mantissa
19399 + /* Now add the mantissas. */
19403 + /* Check if we overflowed. */
19404 + brcs __avr32_f32_add_res_of
19406 + /* Pack result. */
19407 + or r12, r12, r8 >> 8
19408 + bfins r12, r10, 23, 8
19411 +#if defined(L_avr32_f32_addsub)
19412 + mov_imm r10, 0x80000000
19424 +__avr32_f32_add_op2_subnormal:
19425 + /* Fix implicit bit and adjust exponent of subnormals. */
19427 + /* Set exponent to 1 if we do not have a zero. */
19430 + /* Check if op1 is also subnormal. */
19433 + /* Both operands subnormal, just add the mantissas and
19434 + pack. If the addition of the subnormal numbers results
19435 + in a normal number then the exponent will automatically
19436 + be set to 1 by the addition. */
19439 + or r12, r12, r11 >> 8
19442 +__avr32_f32_add_op1_nan_or_inf:
19443 + /* Check if op1 is NaN, if so return NaN */
19447 + /* op1 is Inf. */
19448 + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
19450 + /* Check if op2 is Inf. or NaN */
19453 + retne r12 /* op2 not Inf or NaN, return op1 */
19456 + reteq r12 /* op2 Inf return op1 */
19457 + ret -1 /* op2 is NaN, return NaN */
19459 +__avr32_f32_add_res_of:
19460 + /* We overflowed. Increase exponent and shift mantissa.*/
19464 + /* Clear mantissa to set result to Inf if the exponent is 255. */
19474 +#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
19477 +#if defined(L_avr32_f32_div_fast)
19478 + .global __avr32_f32_div_fast
19479 + .type __avr32_f32_div_fast,@function
19480 +__avr32_f32_div_fast:
19482 + .global __avr32_f32_div
19483 + .type __avr32_f32_div,@function
19487 + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
19492 + breq 4f /* Check op2 for zero */
19502 + breq 11f /*If number is subnormal*/
19504 + brhs 2f /* Check op1 for NaN or Inf */
19506 + sbr r12, 31 /*Implicit bit*/
19513 + breq 13f /*If number is subnormal*/
19515 + brhs 3f /* Check op2 for NaN or Inf */
19517 + sbr r11, 31 /*Implicit bit*/
19521 + reteq 0 /* op1 is zero and op2 is not zero */
19522 + /* or NaN so return zero */
19526 + /* For UC3, store with predecrement is faster than stm */
19530 + /* Calculate new exponent */
19535 + /* Approximating 1/d with the following recurrence: */
19536 + /* R[j+1] = R[j]*(2-R[j]*d) */
19537 + /* Using 2.30 format */
19540 + /* Multiply result : r6, r7 */
19541 + /* Initial guess : r11 */
19542 + /* New approximations : r11 */
19543 + /* Dividend : r12 */
19546 + mov_imm r10, 0x80000000
19548 + lsr r12, 2 /* Get significand of Op1 in 2.30 format */
19549 + lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
19551 + /* Load initial guess, using look-up table */
19552 + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
19553 + /* Let d be of following format: 00.1xy....., then XY=~xy */
19554 + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
19555 + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
19556 + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
19557 + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
19560 + bfextu r6, r5, 27, 2
19562 + bfins r11, r6, 28, 2
19564 + /* First approximation */
19565 + /* r7 = R[j]*d */
19566 + mulu.d r6, r11, r5
19567 + /* r7 = 2-R[j]*d */
19568 + sub r7, r10, r7<<2
19569 + /* r11 = R[j]*(2-R[j]*d) */
19570 + mulu.d r6, r11, r7
19573 + /* Second approximation */
19574 + /* r7 = R[j]*d */
19575 + mulu.d r6, r11, r5
19576 + /* r7 = 2-R[j]*d */
19577 + sub r7, r10, r7<<2
19578 + /* r11 = R[j]*(2-R[j]*d) */
19579 + mulu.d r6, r11, r7
19582 + /* Third approximation */
19583 + /* r7 = R[j]*d */
19584 + mulu.d r6, r11, r5
19585 + /* r7 = 2-R[j]*d */
19586 + sub r7, r10, r7<<2
19587 + /* r11 = R[j]*(2-R[j]*d) */
19588 + mulu.d r6, r11, r7
19591 + /* Fourth approximation */
19592 + /* r7 = R[j]*d */
19593 + mulu.d r6, r11, r5
19594 + /* r7 = 2-R[j]*d */
19595 + sub r7, r10, r7<<2
19596 + /* r11 = R[j]*(2-R[j]*d) */
19597 + mulu.d r6, r11, r7
19601 + /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */
19602 + mulu.d r6, r11, r12
19604 + /* Shift by 3 to get result in 1.31 format, as required by the exponent. */
19605 + /* Note that 1.31 format is already used by the exponent in r9, since */
19606 + /* a bias of 127 was added to the result exponent, even though the implicit */
19607 + /* bit was inserted. This gives the exponent an additional bias of 1, which */
19608 + /* supports 1.31 format. */
19611 + /* Adjust exponent and mantissa in case the result is of format
19612 + 0000.1xxx to 0001.xxx*/
19613 +#if defined(L_avr32_f32_div)
19614 + lsr r12, 4 /* Scale dividend to 6.26 format to match the
19615 + result of the multiplication of the divisor and
19616 + quotient to get the remainder. */
19622 +#if defined(L_avr32_f32_div)
19623 + lsl r12, 1 /* Scale dividend to 5.27 format to match the
19624 + result of the multiplication of the divisor and
19625 + quotient to get the remainder. */
19629 + brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
19632 +#if defined(L_avr32_f32_div)
19633 + /* In order to round correctly we calculate the remainder:
19634 + Remainder = dividend[r12] - divisor[r5]*quotient[r7]
19635 + for the case when the quotient is halfway between the round-up
19636 + value and the round down value. If the remainder then is negative
19637 + it means that the quotient was to big and that it should not be
19638 + rounded up, if the remainder is positive the quotient was to small
19639 + and we need to round up. If the remainder is zero it means that the
19640 + quotient is exact but since we need to remove the guard bit we should
19641 + round to even. */
19645 + /* Now do the multiplication. The quotient has the format 4.28
19646 + while the divisor has the format 2.30 which gives a result
19648 + mulu.d r10, r5, r7
19650 + /* Check if remainder is positive, negative or equal. */
19651 + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
19653 +__avr32_f32_div_round_subnormal:
19655 + srlo r11 /* Remainder positive: we need to round up.*/
19656 + moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
19658 + bfextu r11, r7, 4, 1 /* Get guard bit */
19661 + /* Pack final result*/
19663 + bfins r12, r9, 23, 8
19664 + /* For UC3, load with postincrement is faster than ldm */
19669 + /* Rounding add. */
19673 +__divsf_return_op1:
19680 + /* Op1 is NaN or inf */
19681 + retne -1 /* Return NaN if op1 is NaN */
19682 + /* Op1 is inf check op2 */
19683 + mov_imm r9, 0xff000000
19685 + brlo __divsf_return_op1 /* inf/number gives inf */
19686 + ret -1 /* The rest gives NaN*/
19688 + /* Op2 is NaN or inf */
19689 + reteq 0 /* Return zero if number/inf*/
19690 + ret -1 /* Return NaN*/
19692 + /* Op1 is zero ? */
19694 + reteq -1 /* 0.0/0.0 is NaN */
19695 + /* Op1 is Nan? */
19697 + breq 11f /*If number is subnormal*/
19699 + brhs 2b /* Check op1 for NaN or Inf */
19700 + /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
19701 + mov_imm r12, 0xff000000
19702 + rjmp __divsf_return_op1
19704 +11: /* Op1 was denormal. Fix it. */
19711 +13: /* Op2 was denormal. Fix it. */
19719 +__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
19720 +#if defined(L_avr32_f32_div)
19721 + /* Check how much we must scale down the mantissa. */
19723 + sub r9, -1 /* We do no longer have an implicit bit. */
19724 + satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
19725 + /* Scale down quotient */
19728 + /* Scale down the dividend to match the scaling of the quotient. */
19729 + lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
19732 + /* Start performing the same rounding as done for normal numbers
19733 + but this time we have scaled the quotient and dividend and hence
19734 + need a little different comparison. */
19738 + /* Now do the multiplication. The quotient has the format 4.28
19739 + while the divisor has the format 2.30 which gives a result
19741 + mulu.d r10, r5, r7
19743 + /* Set exponent to 0 */
19746 + /* Check if remainder is positive, negative or equal. */
19747 + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
19749 + rjmp __avr32_f32_div_round_subnormal
19754 + /*Flush to zero*/
19759 +#ifdef L_avr32_f32_mul
19760 + .global __avr32_f32_mul
19761 + .type __avr32_f32_mul,@function
19766 + eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
19767 + andh r12, 0x8000, COH
19769 + /* arrange operands so that that op1 >= op2 */
19771 + breq __avr32_f32_mul_op1_zero
19774 + /* Put the number with the largest exponent in r10
19775 + and the number with the smallest exponent in r9 */
19779 + /* Unpack exponent and mantissa of op1 */
19781 + sbr r8, 31 /* Set implicit bit. */
19784 + /* op1 is NaN or Inf. */
19786 + breq __avr32_f32_mul_op1_nan_or_inf
19788 + /* Unpack exponent and mantissa of op2 */
19790 + sbr r11, 31 /* Set implicit bit. */
19793 + /* op2 is either zero or subnormal. */
19794 + breq __avr32_f32_mul_op2_subnormal
19796 + /* Calculate new exponent */
19799 + /* Do the multiplication */
19800 + mulu.d r10,r8,r11
19802 + /* We might need to scale up by two if the MSB of the result is
19808 + /* Put the shifted out bits of the mantissa into r10 */
19810 + bfins r10, r11, 24, 8
19812 + sub r9,(127-1) /* remove extra exponent bias */
19813 + brle __avr32_f32_mul_res_subnormal
19815 + /* Check for Inf. */
19819 + /* Pack result. */
19820 + or r12, r12, r11 >> 8
19821 + bfins r12, r9, 23, 8
19824 +__avr32_f32_mul_round:
19825 + mov_imm r8, 0x80000000
19839 +__avr32_f32_mul_op2_subnormal:
19842 + retcs 0 /* op2 is zero. Return 0 */
19846 + /* Check if op2 is subnormal. */
19850 + /* op2 is subnormal */
19853 + retcs 0 /* op1 is zero. Return 0 */
19860 +__avr32_f32_mul_op1_nan_or_inf:
19861 + /* Check if op1 is NaN, if so return NaN */
19865 + /* op1 is Inf. */
19867 + reteq -1 /* Inf * 0 -> NaN */
19869 + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
19871 + /* Check if op2 is Inf. or NaN */
19874 + retne r12 /* op2 not Inf or NaN, return Info */
19877 + reteq r12 /* op2 Inf return Inf */
19878 + ret -1 /* op2 is NaN, return NaN */
19880 +__avr32_f32_mul_res_subnormal:
19881 + /* Check if the number is so small that
19882 + it will be represented with zero. */
19887 + /* Shift the mantissa into the correct position.*/
19889 + /* Add sign bit. */
19891 + /* Put the shifted out bits in the most significant part
19895 + /* Add all the remainder bits used for rounding into r11 */
19898 + rjmp __avr32_f32_mul_round
19900 +__avr32_f32_mul_op1_zero:
19901 + bfextu r10, r11, 23, 8
19909 +#ifdef L_avr32_s32_to_f32
19910 + .global __avr32_s32_to_f32
19911 + .type __avr32_s32_to_f32,@function
19912 +__avr32_s32_to_f32:
19914 + reteq r12 /* If zero then return zero float */
19915 + mov r11, r12 /* Keep the sign */
19916 + abs r12 /* Compute the absolute value */
19917 + mov r10, 31 + 127 /* Set the correct exponent */
19920 + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
19922 + /* Check for subnormal result */
19924 + brle __avr32_s32_to_f32_subnormal
19926 + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
19927 + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
19932 +__avr32_s32_to_f32_subnormal:
19933 + /* Adjust a subnormal result */
19934 + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
19939 +#ifdef L_avr32_u32_to_f32
19940 + .global __avr32_u32_to_f32
19941 + .type __avr32_u32_to_f32,@function
19942 +__avr32_u32_to_f32:
19944 + reteq r12 /* If zero then return zero float */
19945 + mov r10, 31 + 127 /* Set the correct exponent */
19948 + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
19950 + /* Check for subnormal result */
19952 + brle __avr32_u32_to_f32_subnormal
19954 + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
19955 + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
19956 + lsr r12,1 /* Sign bit is 0 for unsigned int */
19959 +__avr32_u32_to_f32_subnormal:
19960 + /* Adjust a subnormal result */
19962 + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
19969 +#ifdef L_avr32_f32_to_s32
19970 + .global __avr32_f32_to_s32
19971 + .type __avr32_f32_to_s32,@function
19972 +__avr32_f32_to_s32:
19973 + bfextu r11, r12, 23, 8
19974 + sub r11,127 /* Fix bias */
19975 + retlo 0 /* Negative exponent yields zero integer */
19977 + /* Shift mantissa into correct position */
19978 + rsub r11,r11,31 /* Shift amount */
19979 + lsl r10,r12,8 /* Get mantissa */
19980 + sbr r10,31 /* Add implicit bit */
19981 + lsr r10,r10,r11 /* Perform shift */
19982 + lsl r12,1 /* Check sign */
19983 + retcc r10 /* if positive, we are done */
19984 + neg r10 /* if negative float, negate result */
19989 +#ifdef L_avr32_f32_to_u32
19990 + .global __avr32_f32_to_u32
19991 + .type __avr32_f32_to_u32,@function
19992 +__avr32_f32_to_u32:
19994 + retmi 0 /* Negative numbers gives 0 */
19995 + bfextu r11, r12, 23, 8 /* Extract exponent */
19996 + sub r11,127 /* Fix bias */
19997 + retlo 0 /* Negative exponent yields zero integer */
19999 + /* Shift mantissa into correct position */
20000 + rsub r11,r11,31 /* Shift amount */
20001 + lsl r12,8 /* Get mantissa */
20002 + sbr r12,31 /* Add implicit bit */
20003 + lsr r12,r12,r11 /* Perform shift */
20008 +#ifdef L_avr32_f32_to_f64
20009 + .global __avr32_f32_to_f64
20010 + .type __avr32_f32_to_f64,@function
20012 +__avr32_f32_to_f64:
20013 + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
20015 + reteq r11 /* Return zero if input is zero */
20017 + bfextu r9,r11,24,8 /* Get exponent */
20018 + cp.w r9,0xff /* check for NaN or inf */
20021 + lsl r11,7 /* Convert sf mantissa to df format */
20024 + /* Check if implicit bit should be set */
20026 + subeq r9,-1 /* Adjust exponent if it was 0 */
20028 + or r11, r11, r8 << 31 /* Set implicit bit if needed */
20029 + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
20031 + /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/
20032 + /*We can therefore use the faster normalize_sf function instead of normalize_df.*/
20033 + normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/
20034 + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
20036 +__extendsfdf_return_op1:
20037 + /* Rotate in sign bit */
20044 + mov_imm r10, 0xffe00000
20045 + lsl r11,8 /* check mantissa */
20046 + movne r11, -1 /* Return NaN */
20047 + moveq r11, r10 /* Return inf */
20049 + rjmp __extendsfdf_return_op1
20053 +#ifdef L_avr32_f64_to_f32
20054 + .global __avr32_f64_to_f32
20055 + .type __avr32_f64_to_f32,@function
20057 +__avr32_f64_to_f32:
20059 + lsl r9,r11,1 /* Unpack exponent */
20062 + reteq 0 /* If exponent is 0 the number is so small
20063 + that the conversion to single float gives
20066 + lsl r8,r11,10 /* Adjust mantissa */
20067 + or r12,r8,r10>>22
20069 + lsl r10,10 /* Check if there are any remaining bits
20070 + in the low part of the mantissa.*/
20072 + rol r12 /* If there were remaining bits then set lsb
20073 + of mantissa to 1 */
20076 + breq 2f /* Check for NaN or inf */
20078 + sub r9,(0x3ff-127) /* Adjust bias of exponent */
20079 + sbr r12,31 /* set the implicit bit.*/
20081 + cp.w r9, 0 /* Check for subnormal number */
20084 + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
20085 + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
20086 +__truncdfsf_return_op1:
20087 + /* Rotate in sign bit */
20094 + cbr r12,31 /* clear implicit bit */
20095 + retne -1 /* Return NaN if mantissa not zero */
20096 + mov_imm r12, 0x7f800000
20097 + ret r12 /* Return inf */
20099 +3: /* Result is subnormal. Adjust it.*/
20100 + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
20106 +#if defined(L_mulsi3) && defined(__AVR32_NO_MUL__)
20108 + .type __mulsi3,@function
20114 + addcs r9, r9, r12
20122 +++ b/gcc/config/avr32/lib2funcs.S
20125 + .global __nonlocal_goto
20126 + .type __nonlocal_goto,@function
20128 +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
20130 + parameter 0 (r12) = New Frame Pointer
20131 + parameter 1 (r11) = Address to goto
20132 + parameter 2 (r10) = New Stack Pointer
20134 + This function invalidates the return stack, since it returns from a
20135 + function without using a return instruction.
20140 + frs # Flush return stack
20146 +++ b/gcc/config/avr32/linux-elf.h
20149 + Linux/Elf specific definitions.
20150 + Copyright 2003-2006 Atmel Corporation.
20152 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
20153 + and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
20155 + This file is part of GCC.
20157 + This program is free software; you can redistribute it and/or modify
20158 + it under the terms of the GNU General Public License as published by
20159 + the Free Software Foundation; either version 2 of the License, or
20160 + (at your option) any later version.
20162 + This program is distributed in the hope that it will be useful,
20163 + but WITHOUT ANY WARRANTY; without even the implied warranty of
20164 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20165 + GNU General Public License for more details.
20167 + You should have received a copy of the GNU General Public License
20168 + along with this program; if not, write to the Free Software
20169 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20173 +/* elfos.h should have already been included. Now just override
20174 + any conflicting definitions and add any extras. */
20176 +/* Run-time Target Specification. */
20177 +#undef TARGET_VERSION
20178 +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
20180 +/* Do not assume anything about header files. */
20181 +#define NO_IMPLICIT_EXTERN_C
20183 +/* The GNU C++ standard library requires that these macros be defined. */
20184 +#undef CPLUSPLUS_CPP_SPEC
20185 +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
20187 +/* Now we define the strings used to build the spec file. */
20189 +#define LIB_SPEC \
20190 + "%{pthread:-lpthread} \
20192 + %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
20194 +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
20195 + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
20196 + provides part of the support for getting C++ file-scope static
20197 + object constructed before entering `main'. */
20199 +#undef STARTFILE_SPEC
20200 +#define STARTFILE_SPEC \
20202 + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
20203 + %{!p:%{profile:gcrt1.o%s} \
20204 + %{!profile:crt1.o%s}}}} \
20205 + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
20207 +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
20208 + the GNU/Linux magical crtend.o file (see crtstuff.c) which
20209 + provides part of the support for getting C++ file-scope static
20210 + object constructed before entering `main', followed by a normal
20211 + GNU/Linux "finalizer" file, `crtn.o'. */
20213 +#undef ENDFILE_SPEC
20214 +#define ENDFILE_SPEC \
20215 + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
20218 +#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
20221 +#define LINK_SPEC "%{version:-v} \
20222 + %{static:-Bstatic} \
20223 + %{shared:-shared} \
20224 + %{symbolic:-Bsymbolic} \
20225 + %{rdynamic:-export-dynamic} \
20226 + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
20227 + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
20229 +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
20231 +/* This is how we tell the assembler that two symbols have the same value. */
20232 +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
20235 + assemble_name (FILE, NAME1); \
20236 + fputs (" = ", FILE); \
20237 + assemble_name (FILE, NAME2); \
20238 + fputc ('\n', FILE); \
20245 +#define CC1_SPEC "%{profile:-p}"
20247 +/* Target CPU builtins. */
20248 +#define TARGET_CPU_CPP_BUILTINS() \
20251 + builtin_define ("__avr32__"); \
20252 + builtin_define ("__AVR32__"); \
20253 + builtin_define ("__AVR32_LINUX__"); \
20254 + builtin_define (avr32_part->macro); \
20255 + builtin_define (avr32_arch->macro); \
20256 + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
20257 + builtin_define ("__AVR32_AVR32A__"); \
20259 + builtin_define ("__AVR32_AVR32B__"); \
20260 + if (TARGET_UNALIGNED_WORD) \
20261 + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
20262 + if (TARGET_SIMD) \
20263 + builtin_define ("__AVR32_HAS_SIMD__"); \
20264 + if (TARGET_DSP) \
20265 + builtin_define ("__AVR32_HAS_DSP__"); \
20266 + if (TARGET_RMW) \
20267 + builtin_define ("__AVR32_HAS_RMW__"); \
20268 + if (TARGET_BRANCH_PRED) \
20269 + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
20270 + if (TARGET_FAST_FLOAT) \
20271 + builtin_define ("__AVR32_FAST_FLOAT__"); \
20277 +/* Call the function profiler with a given profile label. */
20278 +#undef FUNCTION_PROFILER
20279 +#define FUNCTION_PROFILER(STREAM, LABELNO) \
20282 + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
20283 + fprintf (STREAM, "\ticall lr\n"); \
20287 +#define NO_PROFILE_COUNTERS 1
20289 +/* For dynamic libraries to work */
20290 +/* #define PLT_REG_CALL_CLOBBERED 1 */
20291 +#define AVR32_ALWAYS_PIC 1
20293 +/* uclibc does not implement sinf, cosf etc. */
20294 +#undef TARGET_C99_FUNCTIONS
20295 +#define TARGET_C99_FUNCTIONS 0
20297 +#define LINK_GCC_C_SEQUENCE_SPEC \
20298 + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
20300 +++ b/gcc/config/avr32/predicates.md
20302 +;; AVR32 predicates file.
20303 +;; Copyright 2003-2006 Atmel Corporation.
20305 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
20307 +;; This file is part of GCC.
20309 +;; This program is free software; you can redistribute it and/or modify
20310 +;; it under the terms of the GNU General Public License as published by
20311 +;; the Free Software Foundation; either version 2 of the License, or
20312 +;; (at your option) any later version.
20314 +;; This program is distributed in the hope that it will be useful,
20315 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
20316 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20317 +;; GNU General Public License for more details.
20319 +;; You should have received a copy of the GNU General Public License
20320 +;; along with this program; if not, write to the Free Software
20321 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20324 +;; True if the operand is a memory reference which contains an
20325 +;; Address consisting of a single pointer register
20326 +(define_predicate "avr32_indirect_register_operand"
20327 + (and (match_code "mem")
20328 + (match_test "register_operand(XEXP(op, 0), SImode)")))
20332 +;; Address expression with a base pointer offset with
20333 +;; a register displacement
20334 +(define_predicate "avr32_indexed_memory_operand"
20335 + (and (match_code "mem")
20336 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
20339 + rtx op0 = XEXP(XEXP(op, 0), 0);
20340 + rtx op1 = XEXP(XEXP(op, 0), 1);
20342 + return ((avr32_address_register_rtx_p (op0, 0)
20343 + && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
20344 + || (avr32_address_register_rtx_p (op1, 0)
20345 + && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
20349 +;; Operand suitable for the ld.sb instruction
20350 +(define_predicate "load_sb_memory_operand"
20351 + (ior (match_operand 0 "avr32_indirect_register_operand")
20352 + (match_operand 0 "avr32_indexed_memory_operand")))
20355 +;; Operand suitable as operand to insns sign extending QI values
20356 +(define_predicate "extendqi_operand"
20357 + (ior (match_operand 0 "load_sb_memory_operand")
20358 + (match_operand 0 "register_operand")))
20360 +(define_predicate "post_inc_memory_operand"
20361 + (and (match_code "mem")
20362 + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
20363 + && REG_P(XEXP(XEXP(op, 0), 0))")))
20365 +(define_predicate "pre_dec_memory_operand"
20366 + (and (match_code "mem")
20367 + (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC)
20368 + && REG_P(XEXP(XEXP(op, 0), 0))")))
20370 +;; Operand suitable for add instructions
20371 +(define_predicate "avr32_add_operand"
20372 + (ior (match_operand 0 "register_operand")
20373 + (and (match_operand 0 "immediate_operand")
20374 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
20376 +;; Operand is a power of two immediate
20377 +(define_predicate "power_of_two_operand"
20378 + (match_code "const_int")
20380 + HOST_WIDE_INT value = INTVAL (op);
20382 + return value != 0 && (value & (value - 1)) == 0;
20385 +;; Operand is a multiple of 8 immediate
20386 +(define_predicate "multiple_of_8_operand"
20387 + (match_code "const_int")
20389 + HOST_WIDE_INT value = INTVAL (op);
20391 + return (value & 0x7) == 0 ;
20394 +;; Operand is a multiple of 16 immediate
20395 +(define_predicate "multiple_of_16_operand"
20396 + (match_code "const_int")
20398 + HOST_WIDE_INT value = INTVAL (op);
20400 + return (value & 0xf) == 0 ;
20403 +;; Operand is a mask used for masking away upper bits of a reg
20404 +(define_predicate "avr32_mask_upper_bits_operand"
20405 + (match_code "const_int")
20407 + HOST_WIDE_INT value = INTVAL (op) + 1;
20409 + return value != 1 && value != 0 && (value & (value - 1)) == 0;
20413 +;; Operand suitable for mul instructions
20414 +(define_predicate "avr32_mul_operand"
20415 + (ior (match_operand 0 "register_operand")
20416 + (and (match_operand 0 "immediate_operand")
20417 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
20419 +;; True for logical binary operators.
20420 +(define_predicate "logical_binary_operator"
20421 + (match_code "ior,xor,and"))
20423 +;; True for logical shift operators
20424 +(define_predicate "logical_shift_operator"
20425 + (match_code "ashift,lshiftrt"))
20427 +;; True for shift operand for logical and, or and eor insns
20428 +(define_predicate "avr32_logical_shift_operand"
20429 + (and (match_code "ashift,lshiftrt")
20430 + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
20431 + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
20432 + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
20433 + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
20437 +;; Predicate for second operand to and, ior and xor insn patterns
20438 +(define_predicate "avr32_logical_insn_operand"
20439 + (ior (match_operand 0 "register_operand")
20440 + (match_operand 0 "avr32_logical_shift_operand"))
20444 +;; True for avr32 comparison operators
20445 +(define_predicate "avr32_comparison_operator"
20446 + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
20447 + (and (match_code "unspec")
20448 + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
20449 + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
20451 +(define_predicate "avr32_cond3_comparison_operator"
20452 + (ior (match_code "eq, ne, ge, lt, geu, ltu")
20453 + (and (match_code "unspec")
20454 + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
20455 + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
20457 +;; True for avr32 comparison operand
20458 +(define_predicate "avr32_comparison_operand"
20459 + (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
20460 + (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
20461 + (and (match_code "unspec")
20462 + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
20463 + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
20465 +;; True if this is a const_int with one bit set
20466 +(define_predicate "one_bit_set_operand"
20467 + (match_code "const_int")
20473 + value = INTVAL(op);
20474 + for ( i = 0 ; i < 32; i++ ){
20475 + if ( value & ( 1 << i ) ){
20480 + return ( ones == 1 );
20484 +;; True if this is a const_int with one bit cleared
20485 +(define_predicate "one_bit_cleared_operand"
20486 + (match_code "const_int")
20492 + value = INTVAL(op);
20493 + for ( i = 0 ; i < 32; i++ ){
20494 + if ( !(value & ( 1 << i )) ){
20499 + return ( zeroes == 1 );
20503 +;; Immediate all the low 16-bits cleared
20504 +(define_predicate "avr32_hi16_immediate_operand"
20505 + (match_code "const_int")
20507 + /* If the low 16-bits are zero then this
20508 + is a hi16 immediate. */
20509 + return ((INTVAL(op) & 0xffff) == 0);
20513 +;; True if this is a register or immediate operand
20514 +(define_predicate "register_immediate_operand"
20515 + (ior (match_operand 0 "register_operand")
20516 + (match_operand 0 "immediate_operand")))
20518 +;; True if this is a register or const_int operand
20519 +(define_predicate "register_const_int_operand"
20520 + (ior (match_operand 0 "register_operand")
20521 + (and (match_operand 0 "const_int_operand")
20522 + (match_operand 0 "immediate_operand"))))
20524 +;; True if this is a register or const_double operand
20525 +(define_predicate "register_const_double_operand"
20526 + (ior (match_operand 0 "register_operand")
20527 + (match_operand 0 "const_double_operand")))
20529 +;; True is this is an operand containing a label_ref
20530 +(define_predicate "avr32_label_ref_operand"
20531 + (and (match_code "mem")
20532 + (match_test "avr32_find_symbol(op)
20533 + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
20535 +;; True is this is a valid symbol pointing to the constant pool
20536 +(define_predicate "avr32_const_pool_operand"
20537 + (and (match_code "symbol_ref")
20538 + (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
20540 + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
20541 + || label_mentioned_p (get_pool_constant (op)))
20542 + || avr32_got_mentioned_p(get_pool_constant (op)))
20547 +;; True is this is a memory reference to the constant or mini pool
20548 +(define_predicate "avr32_const_pool_ref_operand"
20549 + (ior (match_operand 0 "avr32_label_ref_operand")
20550 + (and (match_code "mem")
20551 + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
20554 +;; Legal source operand for movti insns
20555 +(define_predicate "avr32_movti_src_operand"
20556 + (ior (match_operand 0 "avr32_const_pool_ref_operand")
20557 + (ior (ior (match_operand 0 "register_immediate_operand")
20558 + (match_operand 0 "avr32_indirect_register_operand"))
20559 + (match_operand 0 "post_inc_memory_operand"))))
20561 +;; Legal destination operand for movti insns
20562 +(define_predicate "avr32_movti_dst_operand"
20563 + (ior (ior (match_operand 0 "register_operand")
20564 + (match_operand 0 "avr32_indirect_register_operand"))
20565 + (match_operand 0 "pre_dec_memory_operand")))
20568 +;; True is this is a k12 offseted memory operand
20569 +(define_predicate "avr32_k12_memory_operand"
20570 + (and (match_code "mem")
20571 + (ior (match_test "REG_P(XEXP(op, 0))")
20572 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
20573 + && REG_P(XEXP(XEXP(op, 0), 0))
20574 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
20575 + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
20576 + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
20578 +;; True is this is a memory operand with an immediate displacement
20579 +(define_predicate "avr32_imm_disp_memory_operand"
20580 + (and (match_code "mem")
20581 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
20582 + && REG_P(XEXP(XEXP(op, 0), 0))
20583 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
20585 +;; True is this is a bswap operand
20586 +(define_predicate "avr32_bswap_operand"
20587 + (ior (match_operand 0 "avr32_k12_memory_operand")
20588 + (match_operand 0 "register_operand")))
20590 +;; True is this is a valid coprocessor insn memory operand
20591 +(define_predicate "avr32_cop_memory_operand"
20592 + (and (match_operand 0 "memory_operand")
20593 + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
20594 + && REG_P(XEXP(XEXP(op, 0), 0))
20595 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
20596 + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
20598 +;; True is this is a valid source/destination operand
20599 +;; for moving values to/from a coprocessor
20600 +(define_predicate "avr32_cop_move_operand"
20601 + (ior (match_operand 0 "register_operand")
20602 + (match_operand 0 "avr32_cop_memory_operand")))
20605 +;; True is this is a valid extract byte offset for use in
20606 +;; load extracted index insns
20607 +(define_predicate "avr32_extract_shift_operand"
20608 + (and (match_operand 0 "const_int_operand")
20609 + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
20610 + || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
20612 +;; True is this is a floating-point register
20613 +(define_predicate "avr32_fp_register_operand"
20614 + (and (match_operand 0 "register_operand")
20615 + (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS")))
20617 +;; True is this is valid avr32 symbol operand
20618 +(define_predicate "avr32_symbol_operand"
20619 + (ior (match_code "label_ref, symbol_ref")
20620 + (and (match_code "const")
20621 + (match_test "avr32_find_symbol(op)"))))
20623 +;; True is this is valid operand for the lda.w and call pseudo insns
20624 +(define_predicate "avr32_address_operand"
20625 + (and (match_code "label_ref, symbol_ref")
20626 + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
20627 + (match_test "flag_pic")) ))
20629 +;; An immediate k16 address operand
20630 +(define_predicate "avr32_ks16_address_operand"
20631 + (and (match_operand 0 "address_operand")
20632 + (ior (match_test "REG_P(op)")
20633 + (match_test "GET_CODE(op) == PLUS
20634 + && ((GET_CODE(XEXP(op,0)) == CONST_INT)
20635 + || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
20637 +;; An offset k16 memory operand
20638 +(define_predicate "avr32_ks16_memory_operand"
20639 + (and (match_code "mem")
20640 + (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
20642 +;; An immediate k11 address operand
20643 +(define_predicate "avr32_ks11_address_operand"
20644 + (and (match_operand 0 "address_operand")
20645 + (ior (match_test "REG_P(op)")
20646 + (match_test "GET_CODE(op) == PLUS
20647 + && (((GET_CODE(XEXP(op,0)) == CONST_INT)
20648 + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
20649 + || ((GET_CODE(XEXP(op,1)) == CONST_INT)
20650 + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
20652 +;; True if this is a avr32 call operand
20653 +(define_predicate "avr32_call_operand"
20654 + (ior (ior (match_operand 0 "register_operand")
20655 + (ior (match_operand 0 "avr32_const_pool_ref_operand")
20656 + (match_operand 0 "avr32_address_operand")))
20657 + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
20659 +;; Return true for operators performing ALU operations
20661 +(define_predicate "alu_operator"
20662 + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
20664 +(define_predicate "avr32_add_shift_immediate_operand"
20665 + (and (match_operand 0 "immediate_operand")
20666 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
20668 +(define_predicate "avr32_cond_register_immediate_operand"
20669 + (ior (match_operand 0 "register_operand")
20670 + (and (match_operand 0 "immediate_operand")
20671 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
20673 +(define_predicate "avr32_cond_immediate_operand"
20674 + (and (match_operand 0 "immediate_operand")
20675 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
20678 +(define_predicate "avr32_cond_move_operand"
20679 + (ior (ior (match_operand 0 "register_operand")
20680 + (and (match_operand 0 "immediate_operand")
20681 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
20682 + (and (match_test "TARGET_V2_INSNS")
20683 + (match_operand 0 "memory_operand"))))
20685 +(define_predicate "avr32_mov_immediate_operand"
20686 + (and (match_operand 0 "immediate_operand")
20687 + (match_test "avr32_const_ok_for_move(INTVAL(op))")))
20689 +++ b/gcc/config/avr32/simd.md
20691 +;; AVR32 machine description file for SIMD instructions.
20692 +;; Copyright 2003-2006 Atmel Corporation.
20694 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
20696 +;; This file is part of GCC.
20698 +;; This program is free software; you can redistribute it and/or modify
20699 +;; it under the terms of the GNU General Public License as published by
20700 +;; the Free Software Foundation; either version 2 of the License, or
20701 +;; (at your option) any later version.
20703 +;; This program is distributed in the hope that it will be useful,
20704 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
20705 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20706 +;; GNU General Public License for more details.
20708 +;; You should have received a copy of the GNU General Public License
20709 +;; along with this program; if not, write to the Free Software
20710 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20712 +;; -*- Mode: Scheme -*-
20716 +(define_mode_iterator VECM [V2HI V4QI])
20717 +(define_mode_attr size [(V2HI "h") (V4QI "b")])
20719 +(define_insn "add<mode>3"
20720 + [(set (match_operand:VECM 0 "register_operand" "=r")
20721 + (plus:VECM (match_operand:VECM 1 "register_operand" "r")
20722 + (match_operand:VECM 2 "register_operand" "r")))]
20724 + "padd.<size>\t%0, %1, %2"
20725 + [(set_attr "length" "4")
20726 + (set_attr "type" "alu")])
20729 +(define_insn "sub<mode>3"
20730 + [(set (match_operand:VECM 0 "register_operand" "=r")
20731 + (minus:VECM (match_operand:VECM 1 "register_operand" "r")
20732 + (match_operand:VECM 2 "register_operand" "r")))]
20734 + "psub.<size>\t%0, %1, %2"
20735 + [(set_attr "length" "4")
20736 + (set_attr "type" "alu")])
20739 +(define_insn "abs<mode>2"
20740 + [(set (match_operand:VECM 0 "register_operand" "=r")
20741 + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
20743 + "pabs.s<size>\t%0, %1"
20744 + [(set_attr "length" "4")
20745 + (set_attr "type" "alu")])
20747 +(define_insn "ashl<mode>3"
20748 + [(set (match_operand:VECM 0 "register_operand" "=r")
20749 + (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
20750 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
20752 + "plsl.<size>\t%0, %1, %2"
20753 + [(set_attr "length" "4")
20754 + (set_attr "type" "alu")])
20756 +(define_insn "ashr<mode>3"
20757 + [(set (match_operand:VECM 0 "register_operand" "=r")
20758 + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
20759 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
20761 + "pasr.<size>\t%0, %1, %2"
20762 + [(set_attr "length" "4")
20763 + (set_attr "type" "alu")])
20765 +(define_insn "lshr<mode>3"
20766 + [(set (match_operand:VECM 0 "register_operand" "=r")
20767 + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
20768 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
20770 + "plsr.<size>\t%0, %1, %2"
20771 + [(set_attr "length" "4")
20772 + (set_attr "type" "alu")])
20774 +(define_insn "smaxv2hi3"
20775 + [(set (match_operand:V2HI 0 "register_operand" "=r")
20776 + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
20777 + (match_operand:V2HI 2 "register_operand" "r")))]
20780 + "pmax.sh\t%0, %1, %2"
20781 + [(set_attr "length" "4")
20782 + (set_attr "type" "alu")])
20784 +(define_insn "sminv2hi3"
20785 + [(set (match_operand:V2HI 0 "register_operand" "=r")
20786 + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
20787 + (match_operand:V2HI 2 "register_operand" "r")))]
20790 + "pmin.sh\t%0, %1, %2"
20791 + [(set_attr "length" "4")
20792 + (set_attr "type" "alu")])
20794 +(define_insn "umaxv4qi3"
20795 + [(set (match_operand:V4QI 0 "register_operand" "=r")
20796 + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
20797 + (match_operand:V4QI 2 "register_operand" "r")))]
20800 + "pmax.ub\t%0, %1, %2"
20801 + [(set_attr "length" "4")
20802 + (set_attr "type" "alu")])
20804 +(define_insn "uminv4qi3"
20805 + [(set (match_operand:V4QI 0 "register_operand" "=r")
20806 + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
20807 + (match_operand:V4QI 2 "register_operand" "r")))]
20810 + "pmin.ub\t%0, %1, %2"
20811 + [(set_attr "length" "4")
20812 + (set_attr "type" "alu")])
20815 +(define_insn "addsubv2hi"
20816 + [(set (match_operand:V2HI 0 "register_operand" "=r")
20818 + (plus:HI (match_operand:HI 1 "register_operand" "r")
20819 + (match_operand:HI 2 "register_operand" "r"))
20820 + (minus:HI (match_dup 1) (match_dup 2))))]
20822 + "paddsub.h\t%0, %1:b, %2:b"
20823 + [(set_attr "length" "4")
20824 + (set_attr "type" "alu")])
20826 +(define_insn "subaddv2hi"
20827 + [(set (match_operand:V2HI 0 "register_operand" "=r")
20829 + (minus:HI (match_operand:HI 1 "register_operand" "r")
20830 + (match_operand:HI 2 "register_operand" "r"))
20831 + (plus:HI (match_dup 1) (match_dup 2))))]
20833 + "psubadd.h\t%0, %1:b, %2:b"
20834 + [(set_attr "length" "4")
20835 + (set_attr "type" "alu")])
20837 +++ b/gcc/config/avr32/sync.md
20839 +;;=================================================================
20840 +;; Atomic operations
20841 +;;=================================================================
20844 +(define_insn "sync_compare_and_swapsi"
20845 + [(set (match_operand:SI 0 "register_operand" "=&r,&r")
20846 + (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16"))
20847 + (set (match_dup 1)
20848 + (unspec_volatile:SI
20850 + (match_operand:SI 2 "register_immediate_operand" "r,Ks21")
20851 + (match_operand:SI 3 "register_operand" "r,r")]
20852 + VUNSPEC_SYNC_CMPXCHG)) ]
20863 + [(set_attr "length" "16,18")
20864 + (set_attr "cc" "clobber")]
20868 +(define_code_iterator atomic_op [plus minus and ior xor])
20869 +(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")])
20870 +(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
20872 +(define_insn "sync_loadsi"
20873 + ; NB! Put an early clobber on the destination operand to
20874 + ; avoid gcc using the same register in the source and
20875 + ; destination. This is done in order to avoid gcc to
20876 + ; clobber the source operand since these instructions
20877 + ; are actually inside a "loop".
20878 + [(set (match_operand:SI 0 "register_operand" "=&r")
20879 + (unspec_volatile:SI
20880 + [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
20881 + (label_ref (match_operand 2 "" ""))]
20882 + VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
20887 + [(set_attr "length" "6")
20888 + (set_attr "cc" "clobber")]
20891 +(define_insn "sync_store_if_lock"
20892 + [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
20893 + (unspec_volatile:SI
20894 + [(match_operand:SI 1 "register_operand" "r")
20895 + (label_ref (match_operand 2 "" ""))]
20896 + VUNSPEC_SYNC_STORE_IF_LOCK) )]
20900 + [(set_attr "length" "6")
20901 + (set_attr "cc" "clobber")]
20905 +(define_expand "sync_<atomic_insn>si"
20906 + [(set (match_dup 2)
20907 + (unspec_volatile:SI
20908 + [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
20910 + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
20911 + (set (match_dup 2)
20912 + (atomic_op:SI (match_dup 2)
20913 + (match_operand:SI 1 "register_immediate_operand" "")))
20914 + (set (match_dup 0)
20915 + (unspec_volatile:SI
20918 + VUNSPEC_SYNC_STORE_IF_LOCK) )
20919 + (use (match_dup 1))
20920 + (use (match_dup 4))]
20923 + rtx *mem_expr = &operands[0];
20925 + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
20927 + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
20928 + XEXP (*mem_expr, 0) = ptr_reg;
20932 + rtx address = XEXP (*mem_expr, 0);
20933 + if ( REG_P (address) )
20934 + ptr_reg = address;
20935 + else if ( REG_P (XEXP (address, 0)) )
20936 + ptr_reg = XEXP (address, 0);
20938 + ptr_reg = XEXP (address, 1);
20941 + operands[2] = gen_reg_rtx (SImode);
20942 + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
20943 + operands[4] = ptr_reg;
20950 +(define_expand "sync_old_<atomic_insn>si"
20951 + [(set (match_operand:SI 0 "register_operand" "")
20952 + (unspec_volatile:SI
20953 + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
20955 + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
20956 + (set (match_dup 3)
20957 + (atomic_op:SI (match_dup 0)
20958 + (match_operand:SI 2 "register_immediate_operand" "")))
20959 + (set (match_dup 1)
20960 + (unspec_volatile:SI
20963 + VUNSPEC_SYNC_STORE_IF_LOCK) )
20964 + (use (match_dup 2))
20965 + (use (match_dup 5))]
20968 + rtx *mem_expr = &operands[1];
20970 + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
20972 + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
20973 + XEXP (*mem_expr, 0) = ptr_reg;
20977 + rtx address = XEXP (*mem_expr, 0);
20978 + if ( REG_P (address) )
20979 + ptr_reg = address;
20980 + else if ( REG_P (XEXP (address, 0)) )
20981 + ptr_reg = XEXP (address, 0);
20983 + ptr_reg = XEXP (address, 1);
20986 + operands[3] = gen_reg_rtx (SImode);
20987 + operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
20988 + operands[5] = ptr_reg;
20992 +(define_expand "sync_new_<atomic_insn>si"
20993 + [(set (match_operand:SI 0 "register_operand" "")
20994 + (unspec_volatile:SI
20995 + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
20997 + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
20998 + (set (match_dup 0)
20999 + (atomic_op:SI (match_dup 0)
21000 + (match_operand:SI 2 "register_immediate_operand" "")))
21001 + (set (match_dup 1)
21002 + (unspec_volatile:SI
21005 + VUNSPEC_SYNC_STORE_IF_LOCK) )
21006 + (use (match_dup 2))
21007 + (use (match_dup 4))]
21010 + rtx *mem_expr = &operands[1];
21012 + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
21014 + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
21015 + XEXP (*mem_expr, 0) = ptr_reg;
21019 + rtx address = XEXP (*mem_expr, 0);
21020 + if ( REG_P (address) )
21021 + ptr_reg = address;
21022 + else if ( REG_P (XEXP (address, 0)) )
21023 + ptr_reg = XEXP (address, 0);
21025 + ptr_reg = XEXP (address, 1);
21028 + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
21029 + operands[4] = ptr_reg;
21034 +;(define_insn "sync_<atomic_insn>si"
21035 +; [(set (match_operand:SI 0 "memory_operand" "+RKs16")
21036 +; (unspec_volatile:SI
21037 +; [(atomic_op:SI (match_dup 0)
21038 +; (match_operand:SI 1 "register_operand" "r"))]
21039 +; VUNSPEC_SYNC_CMPXCHG))
21040 +; (clobber (match_scratch:SI 2 "=&r"))]
21045 +; <atomic_asm_insn>\t%2,%1
21049 +; [(set_attr "length" "14")
21050 +; (set_attr "cc" "clobber")]
21053 +;(define_insn "sync_new_<atomic_insn>si"
21054 +; [(set (match_operand:SI 1 "memory_operand" "+RKs16")
21055 +; (unspec_volatile:SI
21056 +; [(atomic_op:SI (match_dup 1)
21057 +; (match_operand:SI 2 "register_operand" "r"))]
21058 +; VUNSPEC_SYNC_CMPXCHG))
21059 +; (set (match_operand:SI 0 "register_operand" "=&r")
21060 +; (atomic_op:SI (match_dup 1)
21061 +; (match_dup 2)))]
21066 +; <atomic_asm_insn>\t%0,%2
21070 +; [(set_attr "length" "14")
21071 +; (set_attr "cc" "clobber")]
21074 +(define_insn "sync_lock_test_and_setsi"
21075 + [ (set (match_operand:SI 0 "register_operand" "=&r")
21076 + (match_operand:SI 1 "memory_operand" "+RKu00"))
21077 + (set (match_dup 1)
21078 + (match_operand:SI 2 "register_operand" "r")) ]
21080 + "xchg\t%0, %p1, %2"
21081 + [(set_attr "length" "4")]
21084 +++ b/gcc/config/avr32/t-avr32
21087 +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
21088 + $(srcdir)/config/avr32/sync.md \
21089 + $(srcdir)/config/avr32/fpcp.md \
21090 + $(srcdir)/config/avr32/simd.md \
21091 + $(srcdir)/config/avr32/predicates.md
21093 +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
21094 + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
21096 +# We want fine grained libraries, so use the new code
21097 +# to build the floating point emulation libraries.
21101 +LIB1ASMSRC = avr32/lib1funcs.S
21102 +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
21103 + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
21104 + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
21105 + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
21106 + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
21107 + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
21108 + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
21110 +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
21112 +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3
21113 +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3
21114 +MULTILIB_EXCEPTIONS =
21115 +MULTILIB_MATCHES += march?ap=mpart?ap7000
21116 +MULTILIB_MATCHES += march?ap=mpart?ap7001
21117 +MULTILIB_MATCHES += march?ap=mpart?ap7002
21118 +MULTILIB_MATCHES += march?ap=mpart?ap7200
21119 +MULTILIB_MATCHES += march?ucr1=march?uc
21120 +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
21121 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
21122 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
21123 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
21124 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
21125 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
21126 +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
21127 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
21128 +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
21129 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
21130 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
21131 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
21132 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
21133 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
21134 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
21135 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
21136 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
21137 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
21138 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
21139 +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
21140 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
21141 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
21142 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
21143 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
21144 +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
21145 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512c
21146 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0256c
21147 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0128c
21148 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c064c
21149 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512c
21150 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1256c
21151 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1128c
21152 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c164c
21153 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512c
21154 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2256c
21155 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2128c
21156 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c264c
21157 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
21158 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
21159 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
21162 +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
21164 +CRTSTUFF_T_CFLAGS = -mrelax
21165 +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
21166 +TARGET_LIBGCC2_CFLAGS += -mrelax
21168 +LIBGCC = stmp-multilib
21169 +INSTALL_LIBGCC = install-multilib
21171 +fp-bit.c: $(srcdir)/config/fp-bit.c
21172 + echo '#define FLOAT' > fp-bit.c
21173 + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
21175 +dp-bit.c: $(srcdir)/config/fp-bit.c
21176 + cat $(srcdir)/config/fp-bit.c > dp-bit.c
21181 +++ b/gcc/config/avr32/t-avr32-linux
21184 +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
21185 + $(srcdir)/config/avr32/sync.md \
21186 + $(srcdir)/config/avr32/fpcp.md \
21187 + $(srcdir)/config/avr32/simd.md \
21188 + $(srcdir)/config/avr32/predicates.md
21190 +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
21191 + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
21193 +# We want fine grained libraries, so use the new code
21194 +# to build the floating point emulation libraries.
21198 +LIB1ASMSRC = avr32/lib1funcs.S
21199 +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
21200 + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
21201 + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
21202 + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
21203 + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
21204 + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
21205 + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
21207 +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
21209 +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3
21210 +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3
21211 +MULTILIB_EXCEPTIONS =
21212 +MULTILIB_MATCHES += march?ap=mpart?ap7000
21213 +MULTILIB_MATCHES += march?ap=mpart?ap7001
21214 +MULTILIB_MATCHES += march?ap=mpart?ap7002
21215 +MULTILIB_MATCHES += march?ap=mpart?ap7200
21216 +MULTILIB_MATCHES += march?ucr1=march?uc
21217 +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
21218 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
21219 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
21220 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
21221 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
21222 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
21223 +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
21224 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
21225 +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
21226 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
21227 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
21228 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
21229 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
21230 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
21231 +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
21232 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
21233 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
21234 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
21235 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
21236 +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
21237 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
21238 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
21239 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
21240 +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
21241 +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
21242 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512c
21243 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0256c
21244 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0128c
21245 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c064c
21246 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512c
21247 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1256c
21248 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1128c
21249 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c164c
21250 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512c
21251 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2256c
21252 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2128c
21253 +MULTILIB_MATCHES += march?ucr3=mpart?uc3c264c
21254 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
21255 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
21256 +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
21259 +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o
21261 +CRTSTUFF_T_CFLAGS = -mrelax
21262 +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
21263 +TARGET_LIBGCC2_CFLAGS += -mrelax
21265 +LIBGCC = stmp-multilib
21266 +INSTALL_LIBGCC = install-multilib
21268 +fp-bit.c: $(srcdir)/config/fp-bit.c
21269 + echo '#define FLOAT' > fp-bit.c
21270 + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
21272 +dp-bit.c: $(srcdir)/config/fp-bit.c
21273 + cat $(srcdir)/config/fp-bit.c > dp-bit.c
21278 +++ b/gcc/config/avr32/t-elf
21281 +# Assemble startup files.
21282 +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
21283 + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
21284 + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
21286 +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
21287 + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
21288 + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
21291 +# Build the libraries for both hard and soft floating point
21292 +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
21294 +LIBGCC = stmp-multilib
21295 +INSTALL_LIBGCC = install-multilib
21297 +++ b/gcc/config/avr32/uclinux-elf.h
21300 +/* Run-time Target Specification. */
21301 +#undef TARGET_VERSION
21302 +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
21304 +/* We don't want a .jcr section on uClinux. As if this makes a difference... */
21305 +#define TARGET_USE_JCR_SECTION 0
21307 +/* Here we go. Drop the crtbegin/crtend stuff completely. */
21308 +#undef STARTFILE_SPEC
21309 +#define STARTFILE_SPEC \
21310 + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
21311 + " %{!p:%{profile:gcrt1.o%s}" \
21312 + " %{!profile:crt1.o%s}}}} crti.o%s"
21314 +#undef ENDFILE_SPEC
21315 +#define ENDFILE_SPEC "crtn.o%s"
21317 +#undef TARGET_DEFAULT
21318 +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
21319 --- a/gcc/config/host-linux.c
21320 +++ b/gcc/config/host-linux.c
21322 #include "hosthooks.h"
21323 #include "hosthooks-def.h"
21326 +#define SSIZE_MAX LONG_MAX
21329 /* Linux has a feature called exec-shield-randomize that perturbs the
21330 address of non-fixed mapped segments by a (relatively) small amount.
21331 --- a/gcc/config.gcc
21332 +++ b/gcc/config.gcc
21333 @@ -834,6 +834,24 @@ avr-*-*)
21334 tm_file="avr/avr.h dbxelf.h"
21338 + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
21339 + tmake_file="t-linux avr32/t-avr32-linux"
21340 + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
21341 + extra_modes=avr32/avr32-modes.def
21344 +avr32*-*-uclinux*)
21345 + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
21346 + tmake_file="t-linux avr32/t-avr32-linux"
21347 + extra_modes=avr32/avr32-modes.def
21351 + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
21352 + tmake_file="avr32/t-avr32 avr32/t-elf"
21353 + extra_modes=avr32/avr32-modes.def
21356 tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
21357 tmake_file=bfin/t-bfin-elf
21358 @@ -2950,6 +2968,32 @@ case "${target}" in
21363 + supported_defaults="part arch"
21365 + case "$with_part" in
21367 + | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" )
21371 + echo "Unknown part used in --with-part=$with_part" 1>&2
21376 + case "$with_arch" in
21382 + echo "Unknown arch used in --with-arch=$with_arch" 1>&2
21389 supported_defaults=cpu
21390 case "$with_cpu" in
21391 --- a/gcc/configure.ac
21392 +++ b/gcc/configure.ac
21393 @@ -2174,10 +2174,9 @@ L2:],
21394 as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
21395 if echo "$as_ver" | grep GNU > /dev/null; then
21397 - as_vers=`echo $as_ver | sed -n \
21398 - -e 's,^.*[ ]\([0-9][0-9]*\.[0-9][0-9]*.*\)$,\1,p'`
21399 - as_major=`expr "$as_vers" : '\([0-9]*\)'`
21400 - as_minor=`expr "$as_vers" : '[0-9]*\.\([0-9]*\)'`
21401 + as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
21402 + as_major=`echo $as_ver | sed 's/\..*//'`
21403 + as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
21404 changequote([,])dnl
21405 if test $as_major -eq 2 && test $as_minor -lt 11
21407 @@ -3077,7 +3076,7 @@ esac
21409 i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
21410 | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \
21411 - | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | spu-*-*)
21412 + | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | spu-*-* | avr32-*-*)
21415 ia64*-*-* | s390*-*-*)
21416 --- a/gcc/doc/extend.texi
21417 +++ b/gcc/doc/extend.texi
21418 @@ -2336,7 +2336,7 @@ This attribute is ignored for R8C target
21421 @cindex interrupt handler functions
21422 -Use this attribute on the ARM, AVR, CRX, M32C, M32R/D, m68k, MS1,
21423 +Use this attribute on the ARM, AVR, AVR32, CRX, M32C, M32R/D, m68k, MS1,
21424 and Xstormy16 ports to indicate that the specified function is an
21425 interrupt handler. The compiler will generate function entry and exit
21426 sequences suitable for use in an interrupt handler when this attribute
21427 @@ -2356,6 +2356,15 @@ void f () __attribute__ ((interrupt ("IR
21429 Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
21431 +Note, for the AVR32, you can specify which banking scheme is used for
21432 +the interrupt mode this interrupt handler is used in like this:
21435 +void f () __attribute__ ((interrupt ("FULL")));
21438 +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
21440 On ARMv7-M the interrupt type is ignored, and the attribute means the function
21441 may be called with a word aligned stack pointer.
21443 @@ -3925,6 +3934,23 @@ placed in either the @code{.bss_below100
21447 +@subsection AVR32 Variable Attributes
21449 +One attribute is currently defined for AVR32 configurations:
21450 +@code{rmw_addressable}
21453 +@item rmw_addressable
21454 +@cindex @code{rmw_addressable} attribute
21456 +This attribute can be used to signal that a variable can be accessed
21457 +with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
21458 +instructions and hence make it possible for gcc to generate these
21459 +instructions without using built-in functions or inline assembly statements.
21460 +Variables used within the AVR32 Atomic Read-Modify-Write built-in
21461 +functions will automatically get the @code{rmw_addressable} attribute.
21464 @subsection AVR Variable Attributes
21467 @@ -6708,6 +6734,7 @@ instructions, but allow the compiler to
21468 * Alpha Built-in Functions::
21469 * ARM iWMMXt Built-in Functions::
21470 * ARM NEON Intrinsics::
21471 +* AVR32 Built-in Functions::
21472 * Blackfin Built-in Functions::
21473 * FR-V Built-in Functions::
21474 * X86 Built-in Functions::
21475 @@ -6955,6 +6982,74 @@ when the @option{-mfpu=neon} switch is u
21477 @include arm-neon-intrinsics.texi
21479 +@node AVR32 Built-in Functions
21480 +@subsection AVR32 Built-in Functions
21482 +Built-in functions for atomic memory (RMW) instructions. Note that these
21483 +built-ins will fail for targets where the RMW instructions are not
21484 +implemented. Also note that these instructions only that a Ks15 << 2
21485 +memory address and will therefor not work with any runtime computed
21486 +memory addresses. The user is responsible for making sure that any
21487 +pointers used within these functions points to a valid memory address.
21490 +void __builtin_mems(int */*ptr*/, int /*bit*/)
21491 +void __builtin_memc(int */*ptr*/, int /*bit*/)
21492 +void __builtin_memt(int */*ptr*/, int /*bit*/)
21495 +Built-in functions for DSP instructions. Note that these built-ins will
21496 +fail for targets where the DSP instructions are not implemented.
21499 +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
21500 +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
21501 +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
21502 +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
21503 +short __builtin_mulsathh_h (short, short)
21504 +int __builtin_mulsathh_w (short, short)
21505 +short __builtin_mulsatrndhh_h (short, short)
21506 +int __builtin_mulsatrndwh_w (int, short)
21507 +int __builtin_mulsatwh_w (int, short)
21508 +int __builtin_macsathh_w (int, short, short)
21509 +short __builtin_satadd_h (short, short)
21510 +short __builtin_satsub_h (short, short)
21511 +int __builtin_satadd_w (int, int)
21512 +int __builtin_satsub_w (int, int)
21513 +long long __builtin_mulwh_d(int, short)
21514 +long long __builtin_mulnwh_d(int, short)
21515 +long long __builtin_macwh_d(long long, int, short)
21516 +long long __builtin_machh_d(long long, short, short)
21519 +Other built-in functions for instructions that cannot easily be
21520 +generated by the compiler.
21523 +void __builtin_ssrf(int);
21524 +void __builtin_csrf(int);
21525 +void __builtin_musfr(int);
21526 +int __builtin_mustr(void);
21527 +int __builtin_mfsr(int /*Status Register Address*/)
21528 +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
21529 +int __builtin_mfdr(int /*Debug Register Address*/)
21530 +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
21531 +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
21532 +void __builtin_sync(int /*Sync Operation*/)
21533 +void __builtin_tlbr(void)
21534 +void __builtin_tlbs(void)
21535 +void __builtin_tlbw(void)
21536 +void __builtin_breakpoint(void)
21537 +int __builtin_xchg(void * /*Address*/, int /*Value*/ )
21538 +short __builtin_bswap_16(short)
21539 +int __builtin_bswap_32(int)
21540 +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
21541 +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
21542 +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
21543 +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
21544 +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
21547 @node Blackfin Built-in Functions
21548 @subsection Blackfin Built-in Functions
21550 --- a/gcc/doc/invoke.texi
21551 +++ b/gcc/doc/invoke.texi
21552 @@ -195,7 +195,7 @@ in the following sections.
21553 -fvisibility-ms-compat @gol
21554 -Wabi -Wctor-dtor-privacy @gol
21555 -Wnon-virtual-dtor -Wreorder @gol
21556 --Weffc++ -Wno-deprecated -Wstrict-null-sentinel @gol
21557 +-Weffc++ -Wno-deprecated @gol
21558 -Wno-non-template-friend -Wold-style-cast @gol
21559 -Woverloaded-virtual -Wno-pmf-conversions @gol
21561 @@ -609,6 +609,12 @@ Objective-C and Objective-C++ Dialects}.
21562 -mauto-incdec -minmax -mlong-calls -mshort @gol
21563 -msoft-reg-count=@var{count}}
21565 +@emph{AVR32 Options}
21566 +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
21567 +-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
21568 +-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
21569 +-mfast-float -mimm-in-const-pool}
21571 @emph{MCore Options}
21572 @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
21573 -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
21574 @@ -3163,13 +3169,11 @@ requiring @option{-O}.
21575 If you want to warn about code which uses the uninitialized value of the
21576 variable in its own initializer, use the @option{-Winit-self} option.
21578 -These warnings occur for individual uninitialized or clobbered
21579 -elements of structure, union or array variables as well as for
21580 -variables which are uninitialized or clobbered as a whole. They do
21581 -not occur for variables or elements declared @code{volatile}. Because
21582 -these warnings depend on optimization, the exact variables or elements
21583 -for which there are warnings will depend on the precise optimization
21584 -options and version of GCC used.
21585 +These warnings occur only for variables that are candidates for
21586 +register allocation. Therefore, they do not occur for a variable that
21587 +is declared @code{volatile}, or whose address is taken, or whose size
21588 +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
21589 +structures, unions or arrays, even when they are in registers.
21591 Note that there may be no warning about a variable that is used only
21592 to compute a value that itself is never used, because such
21593 @@ -7034,10 +7038,6 @@ If number of candidates in the set is sm
21594 we always try to remove unnecessary ivs from the set during its
21595 optimization when a new iv is added to the set.
21597 -@item scev-max-expr-size
21598 -Bound on size of expressions used in the scalar evolutions analyzer.
21599 -Large expressions slow the analyzer.
21601 @item omega-max-vars
21602 The maximum number of variables in an Omega constraint system.
21603 The default value is 128.
21604 @@ -8363,6 +8363,7 @@ platform.
21609 * Blackfin Options::
21612 @@ -8834,6 +8835,120 @@ comply to the C standards, but it will p
21616 +@node AVR32 Options
21617 +@subsection AVR32 Options
21618 +@cindex AVR32 Options
21620 +These options are defined for AVR32 implementations:
21623 +@item -muse-rodata-section
21624 +@opindex muse-rodata-section
21625 +Use section @samp{.rodata} for read-only data instead of @samp{.text}.
21627 +@item -mhard-float
21628 +@opindex mhard-float
21629 +Use floating point coprocessor instructions.
21631 +@item -msoft-float
21632 +@opindex msoft-float
21633 +Use software floating-point library for floating-point operations.
21635 +@item -mforce-double-align
21636 +@opindex mforce-double-align
21637 +Force double-word alignment for double-word memory accesses.
21639 +@item -masm-addr-pseudos
21640 +@opindex masm-addr-pseudos
21641 +Use assembler pseudo-instructions lda.w and call for handling direct
21642 +addresses. (Enabled by default)
21644 +@item -mno-init-got
21645 +@opindex mno-init-got
21646 +Do not initialize the GOT register before using it when compiling PIC
21651 +Let invoked assembler and linker do relaxing
21652 +(Enabled by default when optimization level is >1).
21653 +This means that when the address of symbols are known at link time,
21654 +the linker can optimize @samp{icall} and @samp{mcall}
21655 +instructions into a @samp{rcall} instruction if possible.
21656 +Loading the address of a symbol can also be optimized.
21658 +@item -mmd-reorg-opt
21659 +@opindex mmd-reorg-opt
21660 +Perform machine dependent optimizations in reorg stage.
21662 +@item -mpart=@var{part}
21664 +Generate code for the specified part. Permissible parts are:
21672 +@samp{uc3a0512es},
21676 +@samp{uc3a1512es},
21687 +@samp{uc3b0256es},
21688 +@samp{uc3b0512revc},
21692 +@samp{uc3b1256es},
21693 +@samp{uc3b1512revc}
21710 +@item -mcpu=@var{cpu-type}
21712 +Same as -mpart. Obsolete.
21714 +@item -march=@var{arch}
21716 +Generate code for the specified architecture. Permissible architectures are:
21717 +@samp{ap}, @samp{uc} and @samp{ucr2}.
21719 +@item -mfast-float
21720 +@opindex mfast-float
21721 +Enable fast floating-point library that does not conform to IEEE-754 but is still good enough
21722 +for most applications. The fast floating-point library does not round to the nearest even
21723 +but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
21725 +@item -mimm-in-const-pool
21726 +@opindex mimm-in-const-pool
21727 +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
21730 @node Blackfin Options
21731 @subsection Blackfin Options
21732 @cindex Blackfin Options
21733 @@ -8889,29 +9004,12 @@ When enabled, the compiler will ensure t
21734 contain speculative loads after jump instructions. If this option is used,
21735 @code{__WORKAROUND_SPECULATIVE_LOADS} is defined.
21737 -@item -mno-specld-anomaly
21738 -@opindex mno-specld-anomaly
21739 -Don't generate extra code to prevent speculative loads from occurring.
21741 @item -mcsync-anomaly
21742 @opindex mcsync-anomaly
21743 When enabled, the compiler will ensure that the generated code does not
21744 contain CSYNC or SSYNC instructions too soon after conditional branches.
21745 If this option is used, @code{__WORKAROUND_SPECULATIVE_SYNCS} is defined.
21747 -@item -mno-csync-anomaly
21748 -@opindex mno-csync-anomaly
21749 -Don't generate extra code to prevent CSYNC or SSYNC instructions from
21750 -occurring too soon after a conditional branch.
21754 -When enabled, the compiler is free to take advantage of the knowledge that
21755 -the entire program fits into the low 64k of memory.
21757 -@item -mno-low-64k
21758 -@opindex mno-low-64k
21759 -Assume that the program is arbitrarily large. This is the default.
21761 @item -mstack-check-l1
21762 @opindex mstack-check-l1
21763 @@ -8925,11 +9023,6 @@ This allows for execute in place and sha
21764 without virtual memory management. This option implies @option{-fPIC}.
21765 With a @samp{bfin-elf} target, this option implies @option{-msim}.
21767 -@item -mno-id-shared-library
21768 -@opindex mno-id-shared-library
21769 -Generate code that doesn't assume ID based shared libraries are being used.
21770 -This is the default.
21772 @item -mleaf-id-shared-library
21773 @opindex mleaf-id-shared-library
21774 Generate code that supports shared libraries via the library ID method,
21775 @@ -8971,11 +9064,6 @@ call on this register. This switch is n
21776 will lie outside of the 24 bit addressing range of the offset based
21777 version of subroutine call instruction.
21779 -This feature is not enabled by default. Specifying
21780 -@option{-mno-long-calls} will restore the default behavior. Note these
21781 -switches have no effect on how the compiler generates code to handle
21782 -function calls via function pointers.
21786 Link with the fast floating-point library. This library relaxes some of
21787 --- a/gcc/doc/md.texi
21788 +++ b/gcc/doc/md.texi
21789 @@ -1681,6 +1681,58 @@ A memory reference suitable for iWMMXt l
21790 A memory reference suitable for the ARMv4 ldrsb instruction.
21793 +@item AVR32 family---@file{avr32.h}
21796 +Floating-point registers (f0 to f15)
21798 +@item Ku@var{bits}
21799 +Unsigned constant representable with @var{bits} number of bits (Must be
21800 +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
21802 +@item Ks@var{bits}
21803 +Signed constant representable with @var{bits} number of bits (Must be
21804 +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
21806 +@item Is@var{bits}
21807 +The negated range of a signed constant representable with @var{bits}
21808 +number of bits. The same as @samp{Ks@var{bits}} with a negated range.
21809 +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
21812 +A single/double precision floating-point immediate or 64-bit integer
21813 +immediate where the least and most significant words both can be
21814 +loaded with a move instruction. That is the the integer form of the
21815 +values in the least and most significant words both are in the range
21816 +@math{-2^{20}} to @math{2^{20}-1}.
21818 +@item RKs@var{bits}
21819 +A memory reference where the address consists of a base register
21820 +plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
21821 +which has the same format as for the signed immediate integer constraint
21824 +@item RKu@var{bits}
21825 +A memory reference where the address consists of a base register
21826 +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
21827 +which has the same format as for the unsigned immediate integer constraint
21831 +A memory reference with an immediate or register offset
21834 +A memory reference to a constant pool entry
21837 +A valid operand for use in the @samp{lda.w} instruction macro when
21838 +relaxing is enabled
21841 +A memory reference valid for coprocessor memory instructions
21845 @item AVR family---@file{config/avr/constraints.md}
21850 @@ -463,9 +463,9 @@ store_bit_field_1 (rtx str_rtx, unsigned
21851 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
21852 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
21853 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
21854 - : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
21855 + : ( (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
21856 || (offset * BITS_PER_UNIT % bitsize == 0
21857 - && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
21858 + && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))))
21861 op0 = adjust_address (op0, fieldmode, offset);
21864 @@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.
21865 #include "tree-flow.h"
21866 #include "target.h"
21867 #include "timevar.h"
21868 +#include "c-common.h"
21870 #include "diagnostic.h"
21872 @@ -3620,16 +3621,17 @@ emit_single_push_insn (enum machine_mode
21876 + emit_move_insn (stack_pointer_rtx,
21877 + expand_binop (Pmode,
21878 #ifdef STACK_GROWS_DOWNWARD
21879 - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
21880 - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
21881 - GEN_INT (-(HOST_WIDE_INT) rounded_size));
21884 - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
21885 - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
21886 - GEN_INT (rounded_size));
21889 - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
21890 + stack_pointer_rtx,
21891 + GEN_INT (rounded_size),
21892 + NULL_RTX, 0, OPTAB_LIB_WIDEN));
21893 + dest_addr = stack_pointer_rtx;
21896 dest = gen_rtx_MEM (mode, dest_addr);
21897 @@ -5739,7 +5741,8 @@ store_field (rtx target, HOST_WIDE_INT b
21898 is a bit field, we cannot use addressing to access it.
21899 Use bit-field techniques or SUBREG to store in it. */
21901 - if (mode == VOIDmode
21904 || (mode != BLKmode && ! direct_store[(int) mode]
21905 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
21906 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
21907 @@ -5896,7 +5899,19 @@ get_inner_reference (tree exp, HOST_WIDE
21909 tree field = TREE_OPERAND (exp, 1);
21910 size_tree = DECL_SIZE (field);
21911 - if (!DECL_BIT_FIELD (field))
21912 + if (!DECL_BIT_FIELD (field)
21913 + /* Added for AVR32:
21914 + Bitfields with a size equal to a target storage
21915 + type might not cause DECL_BIT_FIELD to return
21916 + true since it can be optimized into a normal array
21917 + access operation. But for volatile bitfields we do
21918 + not allow this when targetm.narrow_volatile_bitfield ()
21919 + is false. We can use DECL_C_BIT_FIELD to check if this
21920 + really is a c-bitfield. */
21921 + && !(TREE_THIS_VOLATILE (exp)
21922 + && !targetm.narrow_volatile_bitfield ()
21923 + && DECL_C_BIT_FIELD (field)) )
21925 mode = DECL_MODE (field);
21926 else if (DECL_MODE (field) == BLKmode)
21927 blkmode_bitfield = true;
21928 @@ -7889,7 +7904,8 @@ expand_expr_real_1 (tree exp, rtx target
21929 by doing the extract into an object as wide as the field
21930 (which we know to be the width of a basic mode), then
21931 storing into memory, and changing the mode to BLKmode. */
21932 - if (mode1 == VOIDmode
21934 + mode1 == VOIDmode
21935 || REG_P (op0) || GET_CODE (op0) == SUBREG
21936 || (mode1 != BLKmode && ! direct_load[(int) mode1]
21937 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
21938 --- a/gcc/function.c
21939 +++ b/gcc/function.c
21940 @@ -2715,7 +2715,11 @@ assign_parm_setup_reg (struct assign_par
21941 SET_DECL_RTL (parm, parmreg);
21943 /* Copy the value into the register. */
21944 - if (data->nominal_mode != data->passed_mode
21945 + if ( (data->nominal_mode != data->passed_mode
21946 + /* Added for AVR32: If passed_mode is equal
21947 + to promoted nominal mode why should be convert?
21948 + The conversion should make no difference. */
21949 + && data->passed_mode != promoted_nominal_mode)
21950 || promoted_nominal_mode != data->promoted_mode)
21952 int save_tree_used;
21953 --- a/gcc/genemit.c
21954 +++ b/gcc/genemit.c
21955 @@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg)
21959 +gen_vararg_prologue(int operands)
21963 + if (operands > 1)
21965 + for (i = 1; i < operands; i++)
21966 + printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
21968 + printf(" va_list args;\n\n");
21969 + printf(" va_start(args, operand0);\n");
21970 + for (i = 1; i < operands; i++)
21971 + printf(" operand%d = va_arg(args, rtx);\n", i);
21972 + printf(" va_end(args);\n\n");
21977 print_code (RTX_CODE code)
21980 @@ -406,18 +424,16 @@ gen_insn (rtx insn, int lineno)
21981 fatal ("match_dup operand number has no match_operand");
21983 /* Output the function name and argument declarations. */
21984 - printf ("rtx\ngen_%s (", XSTR (insn, 0));
21985 + printf ("rtx\ngen_%s ", XSTR (insn, 0));
21988 - for (i = 0; i < operands; i++)
21990 - printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
21991 + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
21993 - printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
21997 + printf("(void)\n");
22000 + gen_vararg_prologue(operands);
22002 /* Output code to construct and return the rtl for the instruction body. */
22004 if (XVECLEN (insn, 1) == 1)
22005 @@ -461,16 +477,12 @@ gen_expand (rtx expand)
22006 operands = max_operand_vec (expand, 1);
22008 /* Output the function name and argument declarations. */
22009 - printf ("rtx\ngen_%s (", XSTR (expand, 0));
22010 + printf ("rtx\ngen_%s ", XSTR (expand, 0));
22012 - for (i = 0; i < operands; i++)
22014 - printf (",\n\trtx operand%d", i);
22016 - printf ("rtx operand%d", i);
22017 + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
22021 + printf("(void)\n");
22025 /* If we don't have any C code to write, only one insn is being written,
22026 @@ -480,6 +492,8 @@ gen_expand (rtx expand)
22027 && operands > max_dup_opno
22028 && XVECLEN (expand, 1) == 1)
22030 + gen_vararg_prologue(operands);
22032 printf (" return ");
22033 gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
22034 printf (";\n}\n\n");
22035 @@ -493,6 +507,7 @@ gen_expand (rtx expand)
22036 for (; i <= max_scratch_opno; i++)
22037 printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
22038 printf (" rtx _val = 0;\n");
22039 + gen_vararg_prologue(operands);
22040 printf (" start_sequence ();\n");
22042 /* The fourth operand of DEFINE_EXPAND is some code to be executed
22043 --- a/gcc/genflags.c
22044 +++ b/gcc/genflags.c
22045 @@ -127,7 +127,6 @@ static void
22046 gen_proto (rtx insn)
22048 int num = num_operands (insn);
22050 const char *name = XSTR (insn, 0);
22051 int truth = maybe_eval_c_test (XSTR (insn, 2));
22053 @@ -158,12 +157,7 @@ gen_proto (rtx insn)
22055 fputs ("void", stdout);
22058 - for (i = 1; i < num; i++)
22059 - fputs ("rtx, ", stdout);
22061 - fputs ("rtx", stdout);
22063 + fputs("rtx, ...", stdout);
22067 @@ -173,12 +167,7 @@ gen_proto (rtx insn)
22069 printf ("static inline rtx\ngen_%s", name);
22073 - for (i = 0; i < num-1; i++)
22074 - printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
22075 - printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
22077 + puts("(rtx ARG_UNUSED(a), ...)");
22080 puts ("{\n return 0;\n}");
22081 --- a/gcc/genoutput.c
22082 +++ b/gcc/genoutput.c
22083 @@ -386,7 +386,7 @@ output_insn_data (void)
22086 if (d->name && d->name[0] != '*')
22087 - printf (" (insn_gen_fn) gen_%s,\n", d->name);
22088 + printf (" gen_%s,\n", d->name);
22094 @@ -81,7 +81,7 @@ static int num_possible_if_blocks;
22095 static int num_updated_if_blocks;
22097 /* # of changes made. */
22098 -static int num_true_changes;
22099 +int num_true_changes;
22101 /* Whether conditional execution changes were made. */
22102 static int cond_exec_changed_p;
22103 @@ -286,6 +286,9 @@ cond_exec_process_insns (ce_if_block_t *
22107 +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
22108 + if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
22110 if (modified_in_p (test, insn))
22113 @@ -566,15 +569,18 @@ cond_exec_process_if_block (ce_if_block_
22114 IFCVT_MODIFY_FINAL (ce_info);
22117 + /* Merge the blocks! */
22118 + if ( reload_completed ){
22119 /* Conversion succeeded. */
22121 fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
22122 n_insns, (n_insns == 1) ? " was" : "s were");
22124 - /* Merge the blocks! */
22125 merge_if_block (ce_info);
22126 cond_exec_changed_p = TRUE;
22132 #ifdef IFCVT_MODIFY_CANCEL
22133 @@ -1080,7 +1086,11 @@ noce_try_addcc (struct noce_if_info *if_
22136 rtx cond = if_info->cond;
22137 - enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
22138 + /* This generates wrong code for AVR32. The cond code need not be reversed
22139 + since the addmodecc patterns add if the condition is NOT met. */
22140 + /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/
22141 + enum rtx_code code = GET_CODE(cond);
22144 /* First try to use addcc pattern. */
22145 if (general_operand (XEXP (cond, 0), VOIDmode)
22146 @@ -3017,7 +3027,12 @@ find_if_header (basic_block test_bb, int
22147 && noce_find_if_block (test_bb, then_edge, else_edge, pass))
22150 - if (HAVE_conditional_execution && reload_completed
22151 + if (HAVE_conditional_execution &&
22152 +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
22153 + (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD)
22157 && cond_exec_find_if_block (&ce_info))
22160 @@ -3132,7 +3147,11 @@ cond_exec_find_if_block (struct ce_if_bl
22162 /* We only ever should get here after reload,
22163 and only if we have conditional execution. */
22164 +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
22165 + gcc_assert (HAVE_conditional_execution && (reload_completed||IFCVT_COND_EXEC_BEFORE_RELOAD));
22167 gcc_assert (HAVE_conditional_execution && reload_completed);
22170 /* Discover if any fall through predecessors of the current test basic block
22171 were && tests (which jump to the else block) or || tests (which jump to
22172 @@ -4226,6 +4245,14 @@ gate_handle_if_after_reload (void)
22173 static unsigned int
22174 rest_of_handle_if_after_reload (void)
22176 + /* Hack for the AVR32 experimental ifcvt processing before reload.
22177 + The AVR32 specific ifcvt code needs to know when ifcvt after reload
22179 +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
22180 + if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
22181 + cfun->machine->ifcvt_after_reload = 1;
22187 --- a/gcc/longlong.h
22188 +++ b/gcc/longlong.h
22189 @@ -239,6 +239,41 @@ UDItype __umulsidi3 (USItype, USItype);
22190 #define UDIV_TIME 100
22191 #endif /* __arm__ */
22193 +#if defined (__avr32__) && W_TYPE_SIZE == 32
22194 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
22195 + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
22196 + : "=r" ((USItype) (sh)), \
22197 + "=&r" ((USItype) (sl)) \
22198 + : "r" ((USItype) (ah)), \
22199 + "r" ((USItype) (bh)), \
22200 + "r" ((USItype) (al)), \
22201 + "r" ((USItype) (bl)) __CLOBBER_CC)
22202 +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
22203 + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
22204 + : "=r" ((USItype) (sh)), \
22205 + "=&r" ((USItype) (sl)) \
22206 + : "r" ((USItype) (ah)), \
22207 + "r" ((USItype) (bh)), \
22208 + "r" ((USItype) (al)), \
22209 + "r" ((USItype) (bl)) __CLOBBER_CC)
22211 +#if !defined (__AVR32_NO_MUL__)
22212 +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
22214 +#define umul_ppmm(w1, w0, u, v) \
22217 + __w.ll = __umulsidi3 (u, v); \
22218 + w1 = __w.s.high; \
22219 + w0 = __w.s.low; \
22223 +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
22224 +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
22225 +#define COUNT_LEADING_ZEROS_0 32
22228 #if defined (__CRIS__) && __CRIS_arch_version >= 3
22229 #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
22230 #if __CRIS_arch_version >= 8
22233 @@ -586,7 +586,7 @@ extern enum insn_code reload_out_optab[N
22234 extern optab code_to_optab[NUM_RTX_CODE + 1];
22237 -typedef rtx (*rtxfun) (rtx);
22238 +typedef rtx (*rtxfun) (rtx, ...);
22240 /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
22241 gives the gen_function to make a branch to test that condition. */
22242 --- a/gcc/sched-deps.c
22243 +++ b/gcc/sched-deps.c
22244 @@ -1406,7 +1406,14 @@ fixup_sched_groups (rtx insn)
22246 prev_nonnote = prev_nonnote_insn (insn);
22247 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
22248 - && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
22249 + /* Modification for AVR32 by RP: Why is this here, this will
22250 + cause instruction to be without any dependencies which might
22251 + cause it to be moved anywhere. For the AVR32 we try to keep
22252 + a group of conditionals together even if they are mutual exclusive.
22254 + && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
22255 + || GET_CODE (PATTERN (insn)) == COND_EXEC )
22257 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
22260 @@ -1905,8 +1912,29 @@ sched_analyze_insn (struct deps *deps, r
22262 if (code == COND_EXEC)
22264 +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
22265 + if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
22267 + /* Check if we have a group og conditional instructions with the same test.
22268 + If so we must make sure that they are not scheduled apart in order to
22269 + avoid unnecesarry tests and if one of the registers in the test is modified
22270 + in the instruction this is needed to ensure correct code. */
22271 + if ( prev_nonnote_insn (insn)
22272 + && INSN_P (prev_nonnote_insn (insn))
22273 + && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
22274 + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
22275 + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
22276 + && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
22277 + || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
22279 + SCHED_GROUP_P (insn) = 1;
22280 + //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
22284 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
22287 /* ??? Should be recording conditions so we reduce the number of
22288 false dependencies. */
22289 x = COND_EXEC_CODE (x);
22290 --- a/gcc/testsuite/gcc.dg/sibcall-3.c
22291 +++ b/gcc/testsuite/gcc.dg/sibcall-3.c
22293 Copyright (C) 2002 Free Software Foundation Inc.
22294 Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
22296 -/* { dg-do run { xfail arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
22297 +/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
22298 /* -mlongcall disables sibcall patterns. */
22299 /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
22300 /* { dg-options "-O2 -foptimize-sibling-calls" } */
22301 --- a/gcc/testsuite/gcc.dg/sibcall-4.c
22302 +++ b/gcc/testsuite/gcc.dg/sibcall-4.c
22304 Copyright (C) 2002 Free Software Foundation Inc.
22305 Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
22307 -/* { dg-do run { xfail arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
22308 +/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */
22309 /* -mlongcall disables sibcall patterns. */
22310 /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
22311 /* { dg-options "-O2 -foptimize-sibling-calls" } */
22312 --- a/gcc/testsuite/gcc.dg/trampoline-1.c
22313 +++ b/gcc/testsuite/gcc.dg/trampoline-1.c
22314 @@ -46,6 +46,8 @@ void foo (void)
22318 +#ifndef NO_TRAMPOLINES
22323 --- a/gcc/testsuite/g++.old-deja/g++.pt/static11.C
22324 +++ b/gcc/testsuite/g++.old-deja/g++.pt/static11.C
22326 // in their dejagnu baseboard description) require that the status is
22327 // final when exit is entered (or main returns), and not "overruled" by a
22328 // destructor calling _exit. It's not really worth it to handle that.
22329 -// { dg-do run { xfail mmix-knuth-mmixware arm*-*-elf arm*-*-eabi m68k-*-elf } }
22330 +// { dg-do run { xfail mmix-knuth-mmixware avr32-*-elf arm*-*-elf arm*-*-eabi m68k-*-elf } }
22332 // Bug: g++ was failing to destroy C<int>::a because it was using two
22333 // different sentry variables for construction and destruction.
22334 --- a/libgcc/config.host
22335 +++ b/libgcc/config.host
22336 @@ -240,6 +240,13 @@ arm-*-pe*)
22341 + # No need to build crtbeginT.o on uClibc systems. Should probably be
22342 + # moved to the OS specific section above.
22343 + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
22350 --- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h
22351 +++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h
22354 // ISO C++ 14882: 22.1 Locales
22356 +#include <features.h>
22357 +#include <ctype.h>
22359 /** @file ctype_base.h
22360 * This is an internal header file, included by other library headers.
22361 @@ -45,7 +47,11 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
22364 // Non-standard typedefs.
22366 + typedef const __ctype_touplow_t* __to_type;
22368 typedef const int* __to_type;
22371 // NB: Offsets into ctype<char>::_M_table force a particular size
22372 // on the mask type. Because of this, we don't use an enum.
22373 --- a/libstdc++-v3/include/Makefile.in
22374 +++ b/libstdc++-v3/include/Makefile.in
22375 @@ -36,6 +36,7 @@ POST_UNINSTALL = :
22376 build_triplet = @build@
22377 host_triplet = @host@
22378 target_triplet = @target@
22380 DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/Makefile.in \
22381 $(srcdir)/Makefile.am
22383 --- a/libstdc++-v3/libmath/Makefile.in
22384 +++ b/libstdc++-v3/libmath/Makefile.in
22385 @@ -37,6 +37,7 @@ POST_UNINSTALL = :
22386 build_triplet = @build@
22387 host_triplet = @host@
22388 target_triplet = @target@
22391 DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
22392 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
22393 --- a/libstdc++-v3/libsupc++/Makefile.in
22394 +++ b/libstdc++-v3/libsupc++/Makefile.in
22395 @@ -38,6 +38,7 @@ POST_UNINSTALL = :
22396 build_triplet = @build@
22397 host_triplet = @host@
22398 target_triplet = @target@
22400 DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/Makefile.in \
22401 $(srcdir)/Makefile.am $(glibcxxinstall_HEADERS)
22403 --- a/libstdc++-v3/Makefile.in
22404 +++ b/libstdc++-v3/Makefile.in
22405 @@ -36,6 +36,7 @@ POST_UNINSTALL = :
22406 build_triplet = @build@
22407 host_triplet = @host@
22408 target_triplet = @target@
22410 DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/../config.guess \
22411 $(srcdir)/../config.sub README ChangeLog $(srcdir)/Makefile.in \
22412 $(srcdir)/Makefile.am $(top_srcdir)/configure \
22413 --- a/libstdc++-v3/po/Makefile.in
22414 +++ b/libstdc++-v3/po/Makefile.in
22415 @@ -36,6 +36,7 @@ POST_UNINSTALL = :
22416 build_triplet = @build@
22417 host_triplet = @host@
22418 target_triplet = @target@
22420 DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/Makefile.in \
22421 $(srcdir)/Makefile.am
22423 --- a/libstdc++-v3/src/Makefile.in
22424 +++ b/libstdc++-v3/src/Makefile.in
22425 @@ -37,6 +37,7 @@ POST_UNINSTALL = :
22426 build_triplet = @build@
22427 host_triplet = @host@
22428 target_triplet = @target@
22430 DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/Makefile.in \
22431 $(srcdir)/Makefile.am